From d2839953a096fc0e131871a31e2e5b5255ee55cf Mon Sep 17 00:00:00 2001 From: Rongjun Chen Date: Fri, 23 Nov 2018 13:36:06 +0800 Subject: [PATCH] wifi: add bcmdhd.100.10.315.x driver to support ap6271S [2/3] PD# OTT-766 Problem: AP6271S wifi not support Solution: add AP6271S support Verify: test pass on u221 Change-Id: I79e864b584a6baa955db9b9b152f9b4fbd237970 Signed-off-by: Rongjun Chen --- bcmdhd.100.10.315.x/Kconfig | 61 + bcmdhd.100.10.315.x/Makefile | 193 + bcmdhd.100.10.315.x/aiutils.c | 2097 ++ bcmdhd.100.10.315.x/bcm_app_utils.c | 1032 + bcmdhd.100.10.315.x/bcmbloom.c | 240 + bcmdhd.100.10.315.x/bcmevent.c | 394 + bcmdhd.100.10.315.x/bcmsdh.c | 883 + bcmdhd.100.10.315.x/bcmsdh_linux.c | 506 + bcmdhd.100.10.315.x/bcmsdh_sdmmc.c | 1774 ++ bcmdhd.100.10.315.x/bcmsdh_sdmmc_linux.c | 377 + bcmdhd.100.10.315.x/bcmsdspi_linux.c | 437 + bcmdhd.100.10.315.x/bcmspibrcm.c | 1799 ++ bcmdhd.100.10.315.x/bcmstdlib_s.c | 306 + bcmdhd.100.10.315.x/bcmutils.c | 4232 +++ bcmdhd.100.10.315.x/bcmwifi_channels.c | 1464 + bcmdhd.100.10.315.x/bcmwifi_channels.h | 803 + bcmdhd.100.10.315.x/bcmwifi_rates.h | 831 + bcmdhd.100.10.315.x/bcmwifi_rspec.h | 212 + bcmdhd.100.10.315.x/bcmxtlv.c | 612 + bcmdhd.100.10.315.x/dbus.c | 2929 ++ bcmdhd.100.10.315.x/dbus_usb.c | 1172 + bcmdhd.100.10.315.x/dbus_usb_linux.c | 3403 +++ bcmdhd.100.10.315.x/dhd.h | 3069 ++ bcmdhd.100.10.315.x/dhd_bus.h | 321 + bcmdhd.100.10.315.x/dhd_buzzz.h | 37 + bcmdhd.100.10.315.x/dhd_cdc.c | 980 + bcmdhd.100.10.315.x/dhd_cfg80211.c | 302 + bcmdhd.100.10.315.x/dhd_cfg80211.h | 54 + bcmdhd.100.10.315.x/dhd_common.c | 6732 +++++ bcmdhd.100.10.315.x/dhd_config.c | 2896 ++ bcmdhd.100.10.315.x/dhd_config.h | 285 + bcmdhd.100.10.315.x/dhd_custom_gpio.c | 278 + bcmdhd.100.10.315.x/dhd_custom_hikey.c | 283 + bcmdhd.100.10.315.x/dhd_custom_memprealloc.c | 559 + bcmdhd.100.10.315.x/dhd_dbg.h | 353 + bcmdhd.100.10.315.x/dhd_dbg_ring.c | 409 + bcmdhd.100.10.315.x/dhd_dbg_ring.h | 137 + bcmdhd.100.10.315.x/dhd_debug.c | 2092 ++ bcmdhd.100.10.315.x/dhd_debug.h | 845 + bcmdhd.100.10.315.x/dhd_debug_linux.c | 509 + bcmdhd.100.10.315.x/dhd_flowring.c | 1157 + bcmdhd.100.10.315.x/dhd_flowring.h | 272 + bcmdhd.100.10.315.x/dhd_gpio.c | 386 + bcmdhd.100.10.315.x/dhd_ip.c | 1391 + bcmdhd.100.10.315.x/dhd_ip.h | 85 + bcmdhd.100.10.315.x/dhd_linux.c | 22537 +++++++++++++++ bcmdhd.100.10.315.x/dhd_linux.h | 169 + bcmdhd.100.10.315.x/dhd_linux_platdev.c | 1021 + bcmdhd.100.10.315.x/dhd_linux_sched.c | 51 + bcmdhd.100.10.315.x/dhd_linux_wq.c | 404 + bcmdhd.100.10.315.x/dhd_linux_wq.h | 91 + bcmdhd.100.10.315.x/dhd_mschdbg.c | 778 + bcmdhd.100.10.315.x/dhd_mschdbg.h | 39 + bcmdhd.100.10.315.x/dhd_msgbuf.c | 9934 +++++++ bcmdhd.100.10.315.x/dhd_pcie.c | 10394 +++++++ bcmdhd.100.10.315.x/dhd_pcie.h | 552 + bcmdhd.100.10.315.x/dhd_pcie_linux.c | 2347 ++ bcmdhd.100.10.315.x/dhd_pno.c | 4514 +++ bcmdhd.100.10.315.x/dhd_pno.h | 572 + bcmdhd.100.10.315.x/dhd_proto.h | 218 + bcmdhd.100.10.315.x/dhd_rtt.c | 3014 ++ bcmdhd.100.10.315.x/dhd_rtt.h | 397 + bcmdhd.100.10.315.x/dhd_sdio.c | 10353 +++++++ bcmdhd.100.10.315.x/dhd_static_buf.c | 535 + bcmdhd.100.10.315.x/dhd_wlfc.c | 4578 +++ bcmdhd.100.10.315.x/dhd_wlfc.h | 562 + bcmdhd.100.10.315.x/dngl_stats.h | 386 + bcmdhd.100.10.315.x/dngl_wlhdr.h | 43 + bcmdhd.100.10.315.x/frag.c | 112 + bcmdhd.100.10.315.x/frag.h | 38 + bcmdhd.100.10.315.x/hnd_pktpool.c | 1427 + bcmdhd.100.10.315.x/hnd_pktq.c | 1428 + bcmdhd.100.10.315.x/hndlhl.c | 537 + bcmdhd.100.10.315.x/hndmem.c | 429 + bcmdhd.100.10.315.x/hndpmu.c | 788 + bcmdhd.100.10.315.x/include/802.11.h | 5259 ++++ bcmdhd.100.10.315.x/include/802.11e.h | 139 + bcmdhd.100.10.315.x/include/802.11s.h | 334 + bcmdhd.100.10.315.x/include/802.1d.h | 53 + bcmdhd.100.10.315.x/include/802.3.h | 55 + bcmdhd.100.10.315.x/include/aidmp.h | 429 + bcmdhd.100.10.315.x/include/bcm_cfg.h | 32 + bcmdhd.100.10.315.x/include/bcm_mpool_pub.h | 350 + bcmdhd.100.10.315.x/include/bcm_ring.h | 613 + bcmdhd.100.10.315.x/include/bcmbloom.h | 79 + bcmdhd.100.10.315.x/include/bcmcdc.h | 121 + bcmdhd.100.10.315.x/include/bcmdefs.h | 600 + bcmdhd.100.10.315.x/include/bcmdevs.h | 936 + bcmdhd.100.10.315.x/include/bcmdhcp.h | 92 + bcmdhd.100.10.315.x/include/bcmendian.h | 416 + bcmdhd.100.10.315.x/include/bcmeth.h | 115 + bcmdhd.100.10.315.x/include/bcmevent.h | 1188 + bcmdhd.100.10.315.x/include/bcmiov.h | 356 + bcmdhd.100.10.315.x/include/bcmip.h | 250 + bcmdhd.100.10.315.x/include/bcmipv6.h | 161 + bcmdhd.100.10.315.x/include/bcmmsgbuf.h | 1340 + bcmdhd.100.10.315.x/include/bcmnvram.h | 328 + bcmdhd.100.10.315.x/include/bcmpcie.h | 522 + bcmdhd.100.10.315.x/include/bcmpcispi.h | 181 + bcmdhd.100.10.315.x/include/bcmperf.h | 39 + bcmdhd.100.10.315.x/include/bcmsdbus.h | 179 + bcmdhd.100.10.315.x/include/bcmsdh.h | 271 + bcmdhd.100.10.315.x/include/bcmsdh_sdmmc.h | 128 + bcmdhd.100.10.315.x/include/bcmsdpcm.h | 304 + bcmdhd.100.10.315.x/include/bcmsdspi.h | 138 + bcmdhd.100.10.315.x/include/bcmsdstd.h | 281 + bcmdhd.100.10.315.x/include/bcmspi.h | 43 + bcmdhd.100.10.315.x/include/bcmspibrcm.h | 167 + bcmdhd.100.10.315.x/include/bcmsrom_fmt.h | 1013 + bcmdhd.100.10.315.x/include/bcmsrom_tbl.h | 1458 + bcmdhd.100.10.315.x/include/bcmstdlib_s.h | 44 + bcmdhd.100.10.315.x/include/bcmtcp.h | 92 + bcmdhd.100.10.315.x/include/bcmtlv.h | 334 + bcmdhd.100.10.315.x/include/bcmudp.h | 60 + bcmdhd.100.10.315.x/include/bcmutils.h | 1329 + bcmdhd.100.10.315.x/include/brcm_nl80211.h | 77 + bcmdhd.100.10.315.x/include/dbus.h | 598 + bcmdhd.100.10.315.x/include/dhd_daemon.h | 62 + bcmdhd.100.10.315.x/include/dhdioctl.h | 242 + bcmdhd.100.10.315.x/include/dnglevent.h | 139 + bcmdhd.100.10.315.x/include/eapol.h | 266 + bcmdhd.100.10.315.x/include/epivers.h | 51 + bcmdhd.100.10.315.x/include/etd.h | 478 + bcmdhd.100.10.315.x/include/ethernet.h | 224 + bcmdhd.100.10.315.x/include/event_log.h | 422 + .../include/event_log_payload.h | 799 + bcmdhd.100.10.315.x/include/event_log_set.h | 111 + bcmdhd.100.10.315.x/include/event_log_tag.h | 415 + bcmdhd.100.10.315.x/include/event_trace.h | 123 + bcmdhd.100.10.315.x/include/fils.h | 294 + bcmdhd.100.10.315.x/include/hnd_armtrap.h | 89 + bcmdhd.100.10.315.x/include/hnd_cons.h | 86 + bcmdhd.100.10.315.x/include/hnd_debug.h | 168 + bcmdhd.100.10.315.x/include/hnd_pktpool.h | 243 + bcmdhd.100.10.315.x/include/hnd_pktq.h | 325 + bcmdhd.100.10.315.x/include/hnd_trap.h | 39 + bcmdhd.100.10.315.x/include/hndchipc.h | 53 + bcmdhd.100.10.315.x/include/hndlhl.h | 61 + bcmdhd.100.10.315.x/include/hndmem.h | 80 + bcmdhd.100.10.315.x/include/hndpmu.h | 79 + bcmdhd.100.10.315.x/include/hndsoc.h | 349 + bcmdhd.100.10.315.x/include/linux_osl.h | 588 + bcmdhd.100.10.315.x/include/linux_pkt.h | 233 + bcmdhd.100.10.315.x/include/linuxver.h | 834 + bcmdhd.100.10.315.x/include/lpflags.h | 45 + bcmdhd.100.10.315.x/include/mbo.h | 285 + bcmdhd.100.10.315.x/include/miniopt.h | 79 + bcmdhd.100.10.315.x/include/msf.h | 66 + bcmdhd.100.10.315.x/include/msgtrace.h | 62 + bcmdhd.100.10.315.x/include/nan.h | 1529 + bcmdhd.100.10.315.x/include/osl.h | 361 + bcmdhd.100.10.315.x/include/osl_decl.h | 37 + bcmdhd.100.10.315.x/include/osl_ext.h | 765 + bcmdhd.100.10.315.x/include/p2p.h | 701 + .../include/packed_section_end.h | 59 + .../include/packed_section_start.h | 104 + bcmdhd.100.10.315.x/include/pcicfg.h | 394 + bcmdhd.100.10.315.x/include/pcie_core.h | 1156 + bcmdhd.100.10.315.x/include/rte_ioctl.h | 103 + bcmdhd.100.10.315.x/include/sbchipc.h | 4643 +++ bcmdhd.100.10.315.x/include/sbconfig.h | 285 + bcmdhd.100.10.315.x/include/sbgci.h | 273 + bcmdhd.100.10.315.x/include/sbhnddma.h | 449 + bcmdhd.100.10.315.x/include/sbpcmcia.h | 137 + bcmdhd.100.10.315.x/include/sbsdio.h | 188 + bcmdhd.100.10.315.x/include/sbsdpcmdev.h | 309 + bcmdhd.100.10.315.x/include/sbsocram.h | 204 + bcmdhd.100.10.315.x/include/sbsysmem.h | 180 + bcmdhd.100.10.315.x/include/sdio.h | 625 + bcmdhd.100.10.315.x/include/sdioh.h | 450 + bcmdhd.100.10.315.x/include/sdiovar.h | 124 + bcmdhd.100.10.315.x/include/sdspi.h | 78 + bcmdhd.100.10.315.x/include/siutils.h | 801 + bcmdhd.100.10.315.x/include/spid.h | 168 + bcmdhd.100.10.315.x/include/trxhdr.h | 95 + bcmdhd.100.10.315.x/include/typedefs.h | 367 + bcmdhd.100.10.315.x/include/usbrdl.h | 134 + bcmdhd.100.10.315.x/include/vlan.h | 97 + bcmdhd.100.10.315.x/include/wlfc_proto.h | 413 + bcmdhd.100.10.315.x/include/wlioctl.h | 18636 ++++++++++++ bcmdhd.100.10.315.x/include/wlioctl_defs.h | 2320 ++ bcmdhd.100.10.315.x/include/wlioctl_utils.h | 61 + bcmdhd.100.10.315.x/include/wpa.h | 290 + bcmdhd.100.10.315.x/include/wps.h | 385 + bcmdhd.100.10.315.x/linux_osl.c | 1903 ++ bcmdhd.100.10.315.x/linux_osl_priv.h | 179 + bcmdhd.100.10.315.x/linux_pkt.c | 623 + bcmdhd.100.10.315.x/pcie_core.c | 158 + bcmdhd.100.10.315.x/sbutils.c | 1093 + bcmdhd.100.10.315.x/siutils.c | 3764 +++ bcmdhd.100.10.315.x/siutils_priv.h | 354 + bcmdhd.100.10.315.x/wl_android.c | 5724 ++++ bcmdhd.100.10.315.x/wl_android.h | 320 + bcmdhd.100.10.315.x/wl_android_ext.c | 4403 +++ bcmdhd.100.10.315.x/wl_cfg80211.c | 24125 ++++++++++++++++ bcmdhd.100.10.315.x/wl_cfg80211.h | 2151 ++ bcmdhd.100.10.315.x/wl_cfg_btcoex.c | 585 + bcmdhd.100.10.315.x/wl_cfgnan.c | 6014 ++++ bcmdhd.100.10.315.x/wl_cfgnan.h | 800 + bcmdhd.100.10.315.x/wl_cfgp2p.c | 2638 ++ bcmdhd.100.10.315.x/wl_cfgp2p.h | 469 + bcmdhd.100.10.315.x/wl_cfgvendor.c | 7310 +++++ bcmdhd.100.10.315.x/wl_cfgvendor.h | 615 + bcmdhd.100.10.315.x/wl_dbg.h | 380 + bcmdhd.100.10.315.x/wl_escan.c | 1557 + bcmdhd.100.10.315.x/wl_escan.h | 93 + bcmdhd.100.10.315.x/wl_iw.c | 4104 +++ bcmdhd.100.10.315.x/wl_iw.h | 165 + bcmdhd.100.10.315.x/wl_linux_mon.c | 406 + bcmdhd.100.10.315.x/wl_roam.c | 373 + bcmdhd.100.10.315.x/wldev_common.c | 535 + bcmdhd.100.10.315.x/wldev_common.h | 130 + 212 files changed, 263223 insertions(+) create mode 100755 bcmdhd.100.10.315.x/Kconfig create mode 100755 bcmdhd.100.10.315.x/Makefile create mode 100644 bcmdhd.100.10.315.x/aiutils.c create mode 100644 bcmdhd.100.10.315.x/bcm_app_utils.c create mode 100644 bcmdhd.100.10.315.x/bcmbloom.c create mode 100644 bcmdhd.100.10.315.x/bcmevent.c create mode 100644 bcmdhd.100.10.315.x/bcmsdh.c create mode 100644 bcmdhd.100.10.315.x/bcmsdh_linux.c create mode 100644 bcmdhd.100.10.315.x/bcmsdh_sdmmc.c create mode 100644 bcmdhd.100.10.315.x/bcmsdh_sdmmc_linux.c create mode 100644 bcmdhd.100.10.315.x/bcmsdspi_linux.c create mode 100644 bcmdhd.100.10.315.x/bcmspibrcm.c create mode 100644 bcmdhd.100.10.315.x/bcmstdlib_s.c create mode 100644 bcmdhd.100.10.315.x/bcmutils.c create mode 100644 bcmdhd.100.10.315.x/bcmwifi_channels.c create mode 100644 bcmdhd.100.10.315.x/bcmwifi_channels.h create mode 100644 bcmdhd.100.10.315.x/bcmwifi_rates.h create mode 100644 bcmdhd.100.10.315.x/bcmwifi_rspec.h create mode 100644 bcmdhd.100.10.315.x/bcmxtlv.c create mode 100644 bcmdhd.100.10.315.x/dbus.c create mode 100644 bcmdhd.100.10.315.x/dbus_usb.c create mode 100644 bcmdhd.100.10.315.x/dbus_usb_linux.c create mode 100644 bcmdhd.100.10.315.x/dhd.h create mode 100644 bcmdhd.100.10.315.x/dhd_bus.h create mode 100644 bcmdhd.100.10.315.x/dhd_buzzz.h create mode 100644 bcmdhd.100.10.315.x/dhd_cdc.c create mode 100644 bcmdhd.100.10.315.x/dhd_cfg80211.c create mode 100644 bcmdhd.100.10.315.x/dhd_cfg80211.h create mode 100644 bcmdhd.100.10.315.x/dhd_common.c create mode 100644 bcmdhd.100.10.315.x/dhd_config.c create mode 100644 bcmdhd.100.10.315.x/dhd_config.h create mode 100644 bcmdhd.100.10.315.x/dhd_custom_gpio.c create mode 100644 bcmdhd.100.10.315.x/dhd_custom_hikey.c create mode 100644 bcmdhd.100.10.315.x/dhd_custom_memprealloc.c create mode 100644 bcmdhd.100.10.315.x/dhd_dbg.h create mode 100644 bcmdhd.100.10.315.x/dhd_dbg_ring.c create mode 100644 bcmdhd.100.10.315.x/dhd_dbg_ring.h create mode 100644 bcmdhd.100.10.315.x/dhd_debug.c create mode 100644 bcmdhd.100.10.315.x/dhd_debug.h create mode 100644 bcmdhd.100.10.315.x/dhd_debug_linux.c create mode 100644 bcmdhd.100.10.315.x/dhd_flowring.c create mode 100644 bcmdhd.100.10.315.x/dhd_flowring.h create mode 100644 bcmdhd.100.10.315.x/dhd_gpio.c create mode 100644 bcmdhd.100.10.315.x/dhd_ip.c create mode 100644 bcmdhd.100.10.315.x/dhd_ip.h create mode 100644 bcmdhd.100.10.315.x/dhd_linux.c create mode 100644 bcmdhd.100.10.315.x/dhd_linux.h create mode 100644 bcmdhd.100.10.315.x/dhd_linux_platdev.c create mode 100644 bcmdhd.100.10.315.x/dhd_linux_sched.c create mode 100644 bcmdhd.100.10.315.x/dhd_linux_wq.c create mode 100644 bcmdhd.100.10.315.x/dhd_linux_wq.h create mode 100644 bcmdhd.100.10.315.x/dhd_mschdbg.c create mode 100644 bcmdhd.100.10.315.x/dhd_mschdbg.h create mode 100644 bcmdhd.100.10.315.x/dhd_msgbuf.c create mode 100644 bcmdhd.100.10.315.x/dhd_pcie.c create mode 100644 bcmdhd.100.10.315.x/dhd_pcie.h create mode 100644 bcmdhd.100.10.315.x/dhd_pcie_linux.c create mode 100644 bcmdhd.100.10.315.x/dhd_pno.c create mode 100644 bcmdhd.100.10.315.x/dhd_pno.h create mode 100644 bcmdhd.100.10.315.x/dhd_proto.h create mode 100644 bcmdhd.100.10.315.x/dhd_rtt.c create mode 100644 bcmdhd.100.10.315.x/dhd_rtt.h create mode 100644 bcmdhd.100.10.315.x/dhd_sdio.c create mode 100644 bcmdhd.100.10.315.x/dhd_static_buf.c create mode 100644 bcmdhd.100.10.315.x/dhd_wlfc.c create mode 100644 bcmdhd.100.10.315.x/dhd_wlfc.h create mode 100644 bcmdhd.100.10.315.x/dngl_stats.h create mode 100644 bcmdhd.100.10.315.x/dngl_wlhdr.h create mode 100644 bcmdhd.100.10.315.x/frag.c create mode 100644 bcmdhd.100.10.315.x/frag.h create mode 100644 bcmdhd.100.10.315.x/hnd_pktpool.c create mode 100644 bcmdhd.100.10.315.x/hnd_pktq.c create mode 100644 bcmdhd.100.10.315.x/hndlhl.c create mode 100644 bcmdhd.100.10.315.x/hndmem.c create mode 100644 bcmdhd.100.10.315.x/hndpmu.c create mode 100644 bcmdhd.100.10.315.x/include/802.11.h create mode 100644 bcmdhd.100.10.315.x/include/802.11e.h create mode 100644 bcmdhd.100.10.315.x/include/802.11s.h create mode 100644 bcmdhd.100.10.315.x/include/802.1d.h create mode 100644 bcmdhd.100.10.315.x/include/802.3.h create mode 100644 bcmdhd.100.10.315.x/include/aidmp.h create mode 100644 bcmdhd.100.10.315.x/include/bcm_cfg.h create mode 100644 bcmdhd.100.10.315.x/include/bcm_mpool_pub.h create mode 100644 bcmdhd.100.10.315.x/include/bcm_ring.h create mode 100644 bcmdhd.100.10.315.x/include/bcmbloom.h create mode 100644 bcmdhd.100.10.315.x/include/bcmcdc.h create mode 100644 bcmdhd.100.10.315.x/include/bcmdefs.h create mode 100644 bcmdhd.100.10.315.x/include/bcmdevs.h create mode 100644 bcmdhd.100.10.315.x/include/bcmdhcp.h create mode 100644 bcmdhd.100.10.315.x/include/bcmendian.h create mode 100644 bcmdhd.100.10.315.x/include/bcmeth.h create mode 100644 bcmdhd.100.10.315.x/include/bcmevent.h create mode 100644 bcmdhd.100.10.315.x/include/bcmiov.h create mode 100644 bcmdhd.100.10.315.x/include/bcmip.h create mode 100644 bcmdhd.100.10.315.x/include/bcmipv6.h create mode 100644 bcmdhd.100.10.315.x/include/bcmmsgbuf.h create mode 100644 bcmdhd.100.10.315.x/include/bcmnvram.h create mode 100644 bcmdhd.100.10.315.x/include/bcmpcie.h create mode 100644 bcmdhd.100.10.315.x/include/bcmpcispi.h create mode 100644 bcmdhd.100.10.315.x/include/bcmperf.h create mode 100644 bcmdhd.100.10.315.x/include/bcmsdbus.h create mode 100644 bcmdhd.100.10.315.x/include/bcmsdh.h create mode 100644 bcmdhd.100.10.315.x/include/bcmsdh_sdmmc.h create mode 100644 bcmdhd.100.10.315.x/include/bcmsdpcm.h create mode 100644 bcmdhd.100.10.315.x/include/bcmsdspi.h create mode 100644 bcmdhd.100.10.315.x/include/bcmsdstd.h create mode 100644 bcmdhd.100.10.315.x/include/bcmspi.h create mode 100644 bcmdhd.100.10.315.x/include/bcmspibrcm.h create mode 100644 bcmdhd.100.10.315.x/include/bcmsrom_fmt.h create mode 100644 bcmdhd.100.10.315.x/include/bcmsrom_tbl.h create mode 100644 bcmdhd.100.10.315.x/include/bcmstdlib_s.h create mode 100644 bcmdhd.100.10.315.x/include/bcmtcp.h create mode 100644 bcmdhd.100.10.315.x/include/bcmtlv.h create mode 100644 bcmdhd.100.10.315.x/include/bcmudp.h create mode 100644 bcmdhd.100.10.315.x/include/bcmutils.h create mode 100644 bcmdhd.100.10.315.x/include/brcm_nl80211.h create mode 100644 bcmdhd.100.10.315.x/include/dbus.h create mode 100644 bcmdhd.100.10.315.x/include/dhd_daemon.h create mode 100644 bcmdhd.100.10.315.x/include/dhdioctl.h create mode 100644 bcmdhd.100.10.315.x/include/dnglevent.h create mode 100644 bcmdhd.100.10.315.x/include/eapol.h create mode 100644 bcmdhd.100.10.315.x/include/epivers.h create mode 100644 bcmdhd.100.10.315.x/include/etd.h create mode 100644 bcmdhd.100.10.315.x/include/ethernet.h create mode 100644 bcmdhd.100.10.315.x/include/event_log.h create mode 100644 bcmdhd.100.10.315.x/include/event_log_payload.h create mode 100644 bcmdhd.100.10.315.x/include/event_log_set.h create mode 100644 bcmdhd.100.10.315.x/include/event_log_tag.h create mode 100644 bcmdhd.100.10.315.x/include/event_trace.h create mode 100644 bcmdhd.100.10.315.x/include/fils.h create mode 100644 bcmdhd.100.10.315.x/include/hnd_armtrap.h create mode 100644 bcmdhd.100.10.315.x/include/hnd_cons.h create mode 100644 bcmdhd.100.10.315.x/include/hnd_debug.h create mode 100644 bcmdhd.100.10.315.x/include/hnd_pktpool.h create mode 100644 bcmdhd.100.10.315.x/include/hnd_pktq.h create mode 100644 bcmdhd.100.10.315.x/include/hnd_trap.h create mode 100644 bcmdhd.100.10.315.x/include/hndchipc.h create mode 100644 bcmdhd.100.10.315.x/include/hndlhl.h create mode 100644 bcmdhd.100.10.315.x/include/hndmem.h create mode 100644 bcmdhd.100.10.315.x/include/hndpmu.h create mode 100644 bcmdhd.100.10.315.x/include/hndsoc.h create mode 100644 bcmdhd.100.10.315.x/include/linux_osl.h create mode 100644 bcmdhd.100.10.315.x/include/linux_pkt.h create mode 100644 bcmdhd.100.10.315.x/include/linuxver.h create mode 100644 bcmdhd.100.10.315.x/include/lpflags.h create mode 100644 bcmdhd.100.10.315.x/include/mbo.h create mode 100644 bcmdhd.100.10.315.x/include/miniopt.h create mode 100644 bcmdhd.100.10.315.x/include/msf.h create mode 100644 bcmdhd.100.10.315.x/include/msgtrace.h create mode 100644 bcmdhd.100.10.315.x/include/nan.h create mode 100644 bcmdhd.100.10.315.x/include/osl.h create mode 100644 bcmdhd.100.10.315.x/include/osl_decl.h create mode 100644 bcmdhd.100.10.315.x/include/osl_ext.h create mode 100644 bcmdhd.100.10.315.x/include/p2p.h create mode 100644 bcmdhd.100.10.315.x/include/packed_section_end.h create mode 100644 bcmdhd.100.10.315.x/include/packed_section_start.h create mode 100644 bcmdhd.100.10.315.x/include/pcicfg.h create mode 100644 bcmdhd.100.10.315.x/include/pcie_core.h create mode 100644 bcmdhd.100.10.315.x/include/rte_ioctl.h create mode 100644 bcmdhd.100.10.315.x/include/sbchipc.h create mode 100644 bcmdhd.100.10.315.x/include/sbconfig.h create mode 100644 bcmdhd.100.10.315.x/include/sbgci.h create mode 100644 bcmdhd.100.10.315.x/include/sbhnddma.h create mode 100644 bcmdhd.100.10.315.x/include/sbpcmcia.h create mode 100644 bcmdhd.100.10.315.x/include/sbsdio.h create mode 100644 bcmdhd.100.10.315.x/include/sbsdpcmdev.h create mode 100644 bcmdhd.100.10.315.x/include/sbsocram.h create mode 100644 bcmdhd.100.10.315.x/include/sbsysmem.h create mode 100644 bcmdhd.100.10.315.x/include/sdio.h create mode 100644 bcmdhd.100.10.315.x/include/sdioh.h create mode 100644 bcmdhd.100.10.315.x/include/sdiovar.h create mode 100644 bcmdhd.100.10.315.x/include/sdspi.h create mode 100644 bcmdhd.100.10.315.x/include/siutils.h create mode 100644 bcmdhd.100.10.315.x/include/spid.h create mode 100644 bcmdhd.100.10.315.x/include/trxhdr.h create mode 100644 bcmdhd.100.10.315.x/include/typedefs.h create mode 100644 bcmdhd.100.10.315.x/include/usbrdl.h create mode 100644 bcmdhd.100.10.315.x/include/vlan.h create mode 100644 bcmdhd.100.10.315.x/include/wlfc_proto.h create mode 100644 bcmdhd.100.10.315.x/include/wlioctl.h create mode 100644 bcmdhd.100.10.315.x/include/wlioctl_defs.h create mode 100644 bcmdhd.100.10.315.x/include/wlioctl_utils.h create mode 100644 bcmdhd.100.10.315.x/include/wpa.h create mode 100644 bcmdhd.100.10.315.x/include/wps.h create mode 100644 bcmdhd.100.10.315.x/linux_osl.c create mode 100644 bcmdhd.100.10.315.x/linux_osl_priv.h create mode 100644 bcmdhd.100.10.315.x/linux_pkt.c create mode 100644 bcmdhd.100.10.315.x/pcie_core.c create mode 100644 bcmdhd.100.10.315.x/sbutils.c create mode 100644 bcmdhd.100.10.315.x/siutils.c create mode 100644 bcmdhd.100.10.315.x/siutils_priv.h create mode 100644 bcmdhd.100.10.315.x/wl_android.c create mode 100644 bcmdhd.100.10.315.x/wl_android.h create mode 100644 bcmdhd.100.10.315.x/wl_android_ext.c create mode 100644 bcmdhd.100.10.315.x/wl_cfg80211.c create mode 100644 bcmdhd.100.10.315.x/wl_cfg80211.h create mode 100644 bcmdhd.100.10.315.x/wl_cfg_btcoex.c create mode 100644 bcmdhd.100.10.315.x/wl_cfgnan.c create mode 100644 bcmdhd.100.10.315.x/wl_cfgnan.h create mode 100644 bcmdhd.100.10.315.x/wl_cfgp2p.c create mode 100644 bcmdhd.100.10.315.x/wl_cfgp2p.h create mode 100644 bcmdhd.100.10.315.x/wl_cfgvendor.c create mode 100644 bcmdhd.100.10.315.x/wl_cfgvendor.h create mode 100644 bcmdhd.100.10.315.x/wl_dbg.h create mode 100644 bcmdhd.100.10.315.x/wl_escan.c create mode 100644 bcmdhd.100.10.315.x/wl_escan.h create mode 100644 bcmdhd.100.10.315.x/wl_iw.c create mode 100644 bcmdhd.100.10.315.x/wl_iw.h create mode 100644 bcmdhd.100.10.315.x/wl_linux_mon.c create mode 100644 bcmdhd.100.10.315.x/wl_roam.c create mode 100644 bcmdhd.100.10.315.x/wldev_common.c create mode 100644 bcmdhd.100.10.315.x/wldev_common.h diff --git a/bcmdhd.100.10.315.x/Kconfig b/bcmdhd.100.10.315.x/Kconfig new file mode 100755 index 0000000..f49ae76 --- /dev/null +++ b/bcmdhd.100.10.315.x/Kconfig @@ -0,0 +1,61 @@ +config BCMDHD + tristate "Broadcom FullMAC wireless cards support" + ---help--- + This module adds support for wireless adapters based on + Broadcom FullMAC chipset. + +config BCMDHD_FW_PATH + depends on BCMDHD + string "Firmware path" + default "/system/etc/firmware/fw_bcmdhd.bin" + ---help--- + Path to the firmware file. + +config BCMDHD_NVRAM_PATH + depends on BCMDHD + string "NVRAM path" + default "/system/etc/firmware/nvram.txt" + ---help--- + Path to the calibration file. + +config BCMDHD_WEXT + bool "Enable WEXT support" + depends on BCMDHD && CFG80211 = n + select WIRELESS_EXT + select WEXT_PRIV + help + Enables WEXT support + +choice + prompt "Enable Chip Interface" + depends on BCMDHD + ---help--- + Enable Chip Interface. +config BCMDHD_SDIO + bool "SDIO bus interface support" + depends on BCMDHD && MMC +config BCMDHD_PCIE + bool "PCIe bus interface support" + depends on BCMDHD && PCI +config BCMDHD_USB + bool "USB bus interface support" + depends on BCMDHD && USB +endchoice + +choice + depends on BCMDHD && BCMDHD_SDIO + prompt "Interrupt type" + ---help--- + Interrupt type +config BCMDHD_OOB + depends on BCMDHD && BCMDHD_SDIO + bool "Out-of-Band Interrupt" + default y + ---help--- + Interrupt from WL_HOST_WAKE. +config BCMDHD_SDIO_IRQ + depends on BCMDHD && BCMDHD_SDIO + bool "In-Band Interrupt" + ---help--- + Interrupt from SDIO DAT[1] +endchoice diff --git a/bcmdhd.100.10.315.x/Makefile b/bcmdhd.100.10.315.x/Makefile new file mode 100755 index 0000000..1b4fe83 --- /dev/null +++ b/bcmdhd.100.10.315.x/Makefile @@ -0,0 +1,193 @@ +# bcmdhd +# 1. WL_IFACE_COMB_NUM_CHANNELS must be added if Android version is 4.4 with Kernel version 3.0~3.4, +# otherwise please remove it. + +# if not confiure pci mode, we use sdio mode as default +ifeq ($(CONFIG_BCMDHD_PCIE),) +$(info bcm SDIO driver configured) +CONFIG_DHD_USE_STATIC_BUF := y +endif +#CONFIG_BCMDHD_SDIO := y +#CONFIG_BCMDHD_PCIE := y +#CONFIG_BCMDHD_USB := y +CONFIG_BCMDHD_PROPTXSTATUS := y + +CONFIG_MACH_PLATFORM := y +#CONFIG_BCMDHD_DTS := y + +export CONFIG_BCMDHD = m +export CONFIG_BCMDHD_OOB = y +export CONFIG_VTS_SUPPORT = y + +DHDCFLAGS = -Wall -Wstrict-prototypes -Dlinux -DBCMDRIVER \ + -DBCMDONGLEHOST -DUNRELEASEDCHIP -DBCMDMA32 -DBCMFILEIMAGE \ + -DDHDTHREAD -DDHD_DEBUG -DSHOW_EVENTS -DBCMDBG -DGET_OTP_MAC_ENABLE \ + -DWIFI_ACT_FRAME -DARP_OFFLOAD_SUPPORT -DSUPPORT_PM2_ONLY \ + -DKEEP_ALIVE -DPKT_FILTER_SUPPORT -DDHDTCPACK_SUPPRESS \ + -DDHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT \ + -DMULTIPLE_SUPPLICANT -DTSQ_MULTIPLIER -DMFP \ + -DWL_EXT_IAPSTA \ + -DENABLE_INSMOD_NO_FW_LOAD \ + -I$(src) -I$(src)/include + +DHDOFILES = aiutils.o siutils.o sbutils.o bcmutils.o bcmwifi_channels.o \ + dhd_linux.o dhd_linux_platdev.o dhd_linux_sched.o dhd_pno.o \ + dhd_common.o dhd_ip.o dhd_linux_wq.o dhd_custom_gpio.o \ + bcmevent.o hndpmu.o linux_osl.o wldev_common.o wl_android.o \ + dhd_debug_linux.o dhd_debug.o dhd_mschdbg.o dhd_dbg_ring.o \ + hnd_pktq.o hnd_pktpool.o bcmxtlv.o linux_pkt.o bcmstdlib_s.o \ + dhd_config.o wl_android_ext.o + +#BCMDHD_SDIO +ifneq ($(CONFIG_BCMDHD_SDIO),) +DHDCFLAGS += -DBCMSDIO -DMMC_SDIO_ABORT -DBCMLXSDMMC -DUSE_SDIOFIFO_IOVAR \ + -DSDTEST -DBDC -DDHD_USE_IDLECOUNT -DCUSTOM_SDIO_F2_BLKSIZE=256 \ + -DBCMSDIOH_TXGLOM -DBCMSDIOH_TXGLOM_EXT -DRXFRAME_THREAD +ifeq ($(CONFIG_BCMDHD_OOB),y) + DHDCFLAGS += -DOOB_INTR_ONLY -DCUSTOMER_OOB -DHW_OOB +ifeq ($(CONFIG_BCMDHD_DISABLE_WOWLAN),y) + DHDCFLAGS += -DDISABLE_WOWLAN +endif +else + DHDCFLAGS += -DSDIO_ISR_THREAD +endif + +DHDOFILES += bcmsdh.o bcmsdh_linux.o bcmsdh_sdmmc.o bcmsdh_sdmmc_linux.o \ + dhd_sdio.o dhd_cdc.o dhd_wlfc.o +endif + +#BCMDHD_PCIE +ifneq ($(CONFIG_BCMDHD_PCIE),) +DHDCFLAGS += -DPCIE_FULL_DONGLE -DBCMPCIE -DCUSTOM_DPC_PRIO_SETTING=-1 \ + -DDONGLE_ENABLE_ISOLATION -DDHD_WAKE_STATUS +ifneq ($(CONFIG_PCI_MSI),) + DHDCFLAGS += -DDHD_USE_MSI +endif + +DHDOFILES += dhd_pcie.o dhd_pcie_linux.o pcie_core.o dhd_flowring.o \ + dhd_msgbuf.o +endif + +#BCMDHD_USB +ifneq ($(CONFIG_BCMDHD_USB),) +DHDCFLAGS += -DUSBOS_TX_THREAD -DBCMDBUS -DBCMTRXV2 -DDBUS_USB_LOOPBACK \ + -DBDC +DHDCFLAGS += -DBCM_REQUEST_FW -DEXTERNAL_FW_PATH +#DHDCFLAGS :=$(filter-out -DENABLE_INSMOD_NO_FW_LOAD,$(DHDCFLAGS)) +ifneq ($(CONFIG_BCMDHD_CUSB),) + DHDCFLAGS += -DBCMUSBDEV_COMPOSITE + DHDCFLAGS :=$(filter-out -DENABLE_INSMOD_NO_FW_LOAD,$(DHDCFLAGS)) +endif + +DHDOFILES += dbus.o dbus_usb.o dbus_usb_linux.o dhd_cdc.o dhd_wlfc.o +endif + +#PROPTXSTATUS +ifeq ($(CONFIG_BCMDHD_PROPTXSTATUS),y) +ifneq ($(CONFIG_BCMDHD_USB),) + DHDCFLAGS += -DPROP_TXSTATUS +endif +ifneq ($(CONFIG_BCMDHD_SDIO),) + DHDCFLAGS += -DPROP_TXSTATUS +endif +ifneq ($(CONFIG_CFG80211),) + DHDCFLAGS += -DPROP_TXSTATUS_VSDB +endif +endif + +#VTS_SUPPORT +ifeq ($(CONFIG_VTS_SUPPORT),y) +ifneq ($(CONFIG_CFG80211),) +DHDCFLAGS += -DGSCAN_SUPPORT -DRTT_SUPPORT -DCUSTOM_FORCE_NODFS_FLAG \ + -DLINKSTAT_SUPPORT -DDEBUGABILITY -DDBG_PKT_MON -DPKT_FILTER_SUPPORT \ + -DAPF -DNDO_CONFIG_SUPPORT -DRSSI_MONITOR_SUPPORT \ + -DCUSTOM_COUNTRY_CODE -DDHD_FW_COREDUMP -DEXPLICIT_DISCIF_CLEANUP + +DHDOFILES += dhd_rtt.o bcm_app_utils.o +endif +endif + +# MESH support for kernel 3.10 later +ifeq ($(CONFIG_WL_MESH),y) + DHDCFLAGS += -DWLMESH +ifneq ($(CONFIG_BCMDHD_PCIE),) + DHDCFLAGS += -DBCM_HOST_BUF -DDMA_HOST_BUFFER_LEN=0x80000 +endif + DHDCFLAGS += -DDHD_UPDATE_INTF_MAC + DHDCFLAGS :=$(filter-out -DDHD_FW_COREDUMP,$(DHDCFLAGS)) + DHDCFLAGS :=$(filter-out -DSET_RANDOM_MAC_SOFTAP,$(DHDCFLAGS)) +endif + +ifeq ($(CONFIG_BCMDHD_SDIO),y) +obj-$(CONFIG_BCMDHD) += dhd.o +dhd-objs += $(DHDOFILES) +else +obj-$(CONFIG_BCMDHD) += bcmdhd.o +bcmdhd-objs += $(DHDOFILES) +endif + +ifeq ($(CONFIG_MACH_PLATFORM),y) + DHDOFILES += dhd_gpio.o +ifeq ($(CONFIG_BCMDHD_DTS),y) + DHDCFLAGS += -DCONFIG_DTS +else + DHDCFLAGS += -DCUSTOMER_HW -DDHD_OF_SUPPORT +endif + DHDCFLAGS += -DCUSTOMER_HW_AMLOGIC +# DHDCFLAGS += -DBCMWAPI_WPI -DBCMWAPI_WAI +endif + +ifeq ($(CONFIG_BCMDHD_AG),y) + DHDCFLAGS += -DBAND_AG +endif + +ifeq ($(CONFIG_DHD_USE_STATIC_BUF),y) +# obj-m += dhd_static_buf.o + DHDCFLAGS += -DSTATIC_WL_PRIV_STRUCT -DENHANCED_STATIC_BUF + DHDCFLAGS += -DDHD_USE_STATIC_MEMDUMP -DCONFIG_DHD_USE_STATIC_BUF +endif + +ifneq ($(CONFIG_WIRELESS_EXT),) + DHDOFILES += wl_iw.o wl_escan.o + DHDCFLAGS += -DSOFTAP -DWL_WIRELESS_EXT -DUSE_IW -DWL_ESCAN +endif +ifneq ($(CONFIG_CFG80211),) + DHDOFILES += wl_cfg80211.o wl_cfgp2p.o wl_linux_mon.o wl_cfg_btcoex.o wl_cfgvendor.o + DHDOFILES += dhd_cfg80211.o + DHDCFLAGS += -DWL_CFG80211 -DWLP2P -DWL_CFG80211_STA_EVENT + DHDCFLAGS += -DWL_IFACE_COMB_NUM_CHANNELS + DHDCFLAGS += -DCUSTOM_ROAM_TRIGGER_SETTING=-65 + DHDCFLAGS += -DCUSTOM_ROAM_DELTA_SETTING=15 + DHDCFLAGS += -DCUSTOM_KEEP_ALIVE_SETTING=28000 + DHDCFLAGS += -DCUSTOM_PNO_EVENT_LOCK_xTIME=7 + DHDCFLAGS += -DWL_SUPPORT_AUTO_CHANNEL + DHDCFLAGS += -DWL_SUPPORT_BACKPORTED_KPATCHES + DHDCFLAGS += -DESCAN_RESULT_PATCH -DESCAN_BUF_OVERFLOW_MGMT + DHDCFLAGS += -DVSDB -DWL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST + DHDCFLAGS += -DWLTDLS -DMIRACAST_AMPDU_SIZE=8 + DHDCFLAGS += -DWL_VIRTUAL_APSTA + DHDCFLAGS += -DPNO_SUPPORT +endif +EXTRA_CFLAGS = $(DHDCFLAGS) +ifeq ($(CONFIG_BCMDHD),m) +EXTRA_LDFLAGS += --strip-debug +endif + +ARCH ?= arm64 +CROSS_COMPILE ?=aarch64-linux-gnu- +KDIR ?=../../../../../../common + +all: bcmdhd_sdio bcmdhd_usb + +bcmdhd_sdio: + $(MAKE) -C $(KDIR) M=$(PWD) ARCH=$(ARCH) CROSS_COMPILE=$(CROSS_COMPILE) modules CONFIG_BCMDHD_SDIO=y + mv dhd.ko dhd_sdio.ko + +bcmdhd_usb: + $(MAKE) -C $(KDIR) M=$(PWD) ARCH=$(ARCH) CROSS_COMPILE=$(CROSS_COMPILE) modules CONFIG_BCMDHD_USB=y + mv dhd.ko dhd_usb.ko + +clean: + $(MAKE) -C $(KDIR) M=$(PWD) ARCH=$(ARCH) clean + $(RM) Module.markers + $(RM) modules.order diff --git a/bcmdhd.100.10.315.x/aiutils.c b/bcmdhd.100.10.315.x/aiutils.c new file mode 100644 index 0000000..6952983 --- /dev/null +++ b/bcmdhd.100.10.315.x/aiutils.c @@ -0,0 +1,2097 @@ +/* + * Misc utility routines for accessing chip-specific features + * of the SiliconBackplane-based Broadcom chips. + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: aiutils.c 769534 2018-06-26 21:19:11Z $ + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "siutils_priv.h" +#include + +#define BCM53573_DMP() (0) +#define BCM4707_DMP() (0) +#define PMU_DMP() (0) +#define GCI_DMP() (0) + +#if defined(BCM_BACKPLANE_TIMEOUT) +static bool ai_get_apb_bridge(si_t *sih, uint32 coreidx, uint32 *apb_id, uint32 *apb_coreuinit); +#endif /* BCM_BACKPLANE_TIMEOUT */ + +#if defined(AXI_TIMEOUTS) || defined(BCM_BACKPLANE_TIMEOUT) +static void ai_reset_axi_to(si_info_t *sii, aidmp_t *ai); +#endif /* defined (AXI_TIMEOUTS) || defined (BCM_BACKPLANE_TIMEOUT) */ + +/* EROM parsing */ + +static uint32 +get_erom_ent(si_t *sih, uint32 **eromptr, uint32 mask, uint32 match) +{ + uint32 ent; + uint inv = 0, nom = 0; + uint32 size = 0; + + while (TRUE) { + ent = R_REG(si_osh(sih), *eromptr); + (*eromptr)++; + + if (mask == 0) + break; + + if ((ent & ER_VALID) == 0) { + inv++; + continue; + } + + if (ent == (ER_END | ER_VALID)) + break; + + if ((ent & mask) == match) + break; + + /* escape condition related EROM size if it has invalid values */ + size += sizeof(*eromptr); + if (size >= ER_SZ_MAX) { + SI_ERROR(("Failed to find end of EROM marker\n")); + break; + } + + nom++; + } + + SI_VMSG(("%s: Returning ent 0x%08x\n", __FUNCTION__, ent)); + if (inv + nom) { + SI_VMSG((" after %d invalid and %d non-matching entries\n", inv, nom)); + } + return ent; +} + +static uint32 +get_asd(si_t *sih, uint32 **eromptr, uint sp, uint ad, uint st, uint32 *addrl, uint32 *addrh, + uint32 *sizel, uint32 *sizeh) +{ + uint32 asd, sz, szd; + + BCM_REFERENCE(ad); + + asd = get_erom_ent(sih, eromptr, ER_VALID, ER_VALID); + if (((asd & ER_TAG1) != ER_ADD) || + (((asd & AD_SP_MASK) >> AD_SP_SHIFT) != sp) || + ((asd & AD_ST_MASK) != st)) { + /* This is not what we want, "push" it back */ + (*eromptr)--; + return 0; + } + *addrl = asd & AD_ADDR_MASK; + if (asd & AD_AG32) + *addrh = get_erom_ent(sih, eromptr, 0, 0); + else + *addrh = 0; + *sizeh = 0; + sz = asd & AD_SZ_MASK; + if (sz == AD_SZ_SZD) { + szd = get_erom_ent(sih, eromptr, 0, 0); + *sizel = szd & SD_SZ_MASK; + if (szd & SD_SG32) + *sizeh = get_erom_ent(sih, eromptr, 0, 0); + } else + *sizel = AD_SZ_BASE << (sz >> AD_SZ_SHIFT); + + SI_VMSG((" SP %d, ad %d: st = %d, 0x%08x_0x%08x @ 0x%08x_0x%08x\n", + sp, ad, st, *sizeh, *sizel, *addrh, *addrl)); + + return asd; +} + +/* Parse the enumeration rom to identify all cores + * Erom content format can be found in: + * http://hwnbu-twiki.broadcom.com/twiki/pub/Mwgroup/ArmDocumentation/SystemDiscovery.pdf + */ +void +ai_scan(si_t *sih, void *regs, uint devid) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + chipcregs_t *cc = (chipcregs_t *)regs; + uint32 erombase, *eromptr, *eromlim; + axi_wrapper_t * axi_wrapper = sii->axi_wrapper; + + BCM_REFERENCE(devid); + + erombase = R_REG(sii->osh, &cc->eromptr); + + switch (BUSTYPE(sih->bustype)) { + case SI_BUS: + eromptr = (uint32 *)REG_MAP(erombase, SI_CORE_SIZE); + break; + + case PCI_BUS: + /* Set wrappers address */ + sii->curwrap = (void *)((uintptr)regs + SI_CORE_SIZE); + + /* Now point the window at the erom */ + OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, erombase); + eromptr = regs; + break; + +#ifdef BCMSDIO + case SPI_BUS: + case SDIO_BUS: + eromptr = (uint32 *)(uintptr)erombase; + break; +#endif /* BCMSDIO */ + + case PCMCIA_BUS: + default: + SI_ERROR(("Don't know how to do AXI enumertion on bus %d\n", sih->bustype)); + ASSERT(0); + return; + } + eromlim = eromptr + (ER_REMAPCONTROL / sizeof(uint32)); + sii->axi_num_wrappers = 0; + + SI_VMSG(("ai_scan: regs = 0x%p, erombase = 0x%08x, eromptr = 0x%p, eromlim = 0x%p\n", + OSL_OBFUSCATE_BUF(regs), erombase, + OSL_OBFUSCATE_BUF(eromptr), OSL_OBFUSATE_BUF(eromlim))); + while (eromptr < eromlim) { + uint32 cia, cib, cid, mfg, crev, nmw, nsw, nmp, nsp; + uint32 mpd, asd, addrl, addrh, sizel, sizeh; + uint i, j, idx; + bool br; + + br = FALSE; + + /* Grok a component */ + cia = get_erom_ent(sih, &eromptr, ER_TAG, ER_CI); + if (cia == (ER_END | ER_VALID)) { + SI_VMSG(("Found END of erom after %d cores\n", sii->numcores)); + return; + } + + cib = get_erom_ent(sih, &eromptr, 0, 0); + + if ((cib & ER_TAG) != ER_CI) { + SI_ERROR(("CIA not followed by CIB\n")); + goto error; + } + + cid = (cia & CIA_CID_MASK) >> CIA_CID_SHIFT; + mfg = (cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT; + crev = (cib & CIB_REV_MASK) >> CIB_REV_SHIFT; + nmw = (cib & CIB_NMW_MASK) >> CIB_NMW_SHIFT; + nsw = (cib & CIB_NSW_MASK) >> CIB_NSW_SHIFT; + nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT; + nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT; + +#ifdef BCMDBG_SI + SI_VMSG(("Found component 0x%04x/0x%04x rev %d at erom addr 0x%p, with nmw = %d, " + "nsw = %d, nmp = %d & nsp = %d\n", + mfg, cid, crev, OSL_OBFUSCATE_BUF(eromptr - 1), nmw, nsw, nmp, nsp)); +#else + BCM_REFERENCE(crev); +#endif // endif + + if (BCM4347_CHIP(sih->chip)) { + /* 4347 has more entries for ARM core + * This should apply to all chips but crashes on router + * This is a temp fix to be further analyze + */ + if (nsp == 0) + continue; + } else + { + /* Include Default slave wrapper for timeout monitoring */ + if ((nsp == 0) || +#if !defined(AXI_TIMEOUTS) && !defined(BCM_BACKPLANE_TIMEOUT) + ((mfg == MFGID_ARM) && (cid == DEF_AI_COMP)) || +#else + ((CHIPTYPE(sii->pub.socitype) == SOCI_NAI) && + (mfg == MFGID_ARM) && (cid == DEF_AI_COMP)) || +#endif /* !defined(AXI_TIMEOUTS) && !defined(BCM_BACKPLANE_TIMEOUT) */ + FALSE) { + continue; + } + } + + if ((nmw + nsw == 0)) { + /* A component which is not a core */ + if (cid == OOB_ROUTER_CORE_ID) { + asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, + &addrl, &addrh, &sizel, &sizeh); + if (asd != 0) { + sii->oob_router = addrl; + } + } + if (cid != NS_CCB_CORE_ID && + cid != PMU_CORE_ID && cid != GCI_CORE_ID && cid != SR_CORE_ID && + cid != HUB_CORE_ID) + continue; + } + + idx = sii->numcores; + + cores_info->cia[idx] = cia; + cores_info->cib[idx] = cib; + cores_info->coreid[idx] = cid; + + for (i = 0; i < nmp; i++) { + mpd = get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID); + if ((mpd & ER_TAG) != ER_MP) { + SI_ERROR(("Not enough MP entries for component 0x%x\n", cid)); + goto error; + } + SI_VMSG((" Master port %d, mp: %d id: %d\n", i, + (mpd & MPD_MP_MASK) >> MPD_MP_SHIFT, + (mpd & MPD_MUI_MASK) >> MPD_MUI_SHIFT)); + } + + /* First Slave Address Descriptor should be port 0: + * the main register space for the core + */ + asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh, &sizel, &sizeh); + if (asd == 0) { + do { + /* Try again to see if it is a bridge */ + asd = get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl, &addrh, + &sizel, &sizeh); + if (asd != 0) + br = TRUE; + else { + if (br == TRUE) { + break; + } + else if ((addrh != 0) || (sizeh != 0) || + (sizel != SI_CORE_SIZE)) { + SI_ERROR(("addrh = 0x%x\t sizeh = 0x%x\t size1 =" + "0x%x\n", addrh, sizeh, sizel)); + SI_ERROR(("First Slave ASD for" + "core 0x%04x malformed " + "(0x%08x)\n", cid, asd)); + goto error; + } + } + } while (1); + } + cores_info->coresba[idx] = addrl; + cores_info->coresba_size[idx] = sizel; + /* Get any more ASDs in first port */ + j = 1; + do { + asd = get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl, &addrh, + &sizel, &sizeh); + if ((asd != 0) && (j == 1) && (sizel == SI_CORE_SIZE)) { + cores_info->coresba2[idx] = addrl; + cores_info->coresba2_size[idx] = sizel; + } + j++; + } while (asd != 0); + + /* Go through the ASDs for other slave ports */ + for (i = 1; i < nsp; i++) { + j = 0; + do { + asd = get_asd(sih, &eromptr, i, j, AD_ST_SLAVE, &addrl, &addrh, + &sizel, &sizeh); + /* To get the first base address of second slave port */ + if ((asd != 0) && (i == 1) && (j == 0)) { + cores_info->csp2ba[idx] = addrl; + cores_info->csp2ba_size[idx] = sizel; + } + if (asd == 0) + break; + j++; + } while (1); + if (j == 0) { + SI_ERROR((" SP %d has no address descriptors\n", i)); + goto error; + } + } + + /* Now get master wrappers */ + for (i = 0; i < nmw; i++) { + asd = get_asd(sih, &eromptr, i, 0, AD_ST_MWRAP, &addrl, &addrh, + &sizel, &sizeh); + if (asd == 0) { + SI_ERROR(("Missing descriptor for MW %d\n", i)); + goto error; + } + if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) { + SI_ERROR(("Master wrapper %d is not 4KB\n", i)); + goto error; + } + if (i == 0) { + cores_info->wrapba[idx] = addrl; + } else if (i == 1) { + cores_info->wrapba2[idx] = addrl; + } else if (i == 2) { + cores_info->wrapba3[idx] = addrl; + } + + if (axi_wrapper && + (sii->axi_num_wrappers < SI_MAX_AXI_WRAPPERS)) { + axi_wrapper[sii->axi_num_wrappers].mfg = mfg; + axi_wrapper[sii->axi_num_wrappers].cid = cid; + axi_wrapper[sii->axi_num_wrappers].rev = crev; + axi_wrapper[sii->axi_num_wrappers].wrapper_type = AI_MASTER_WRAPPER; + axi_wrapper[sii->axi_num_wrappers].wrapper_addr = addrl; + sii->axi_num_wrappers++; + SI_VMSG(("MASTER WRAPPER: %d, mfg:%x, cid:%x," + "rev:%x, addr:%x, size:%x\n", + sii->axi_num_wrappers, mfg, cid, crev, addrl, sizel)); + } + } + + /* And finally slave wrappers */ + for (i = 0; i < nsw; i++) { + uint fwp = (nsp == 1) ? 0 : 1; + asd = get_asd(sih, &eromptr, fwp + i, 0, AD_ST_SWRAP, &addrl, &addrh, + &sizel, &sizeh); + + /* cache APB bridge wrapper address for set/clear timeout */ + if ((mfg == MFGID_ARM) && (cid == APB_BRIDGE_ID)) { + ASSERT(sii->num_br < SI_MAXBR); + sii->br_wrapba[sii->num_br++] = addrl; + } + + if (asd == 0) { + SI_ERROR(("Missing descriptor for SW %d\n", i)); + goto error; + } + if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) { + SI_ERROR(("Slave wrapper %d is not 4KB\n", i)); + goto error; + } + if ((nmw == 0) && (i == 0)) { + cores_info->wrapba[idx] = addrl; + } else if ((nmw == 0) && (i == 1)) { + cores_info->wrapba2[idx] = addrl; + } else if ((nmw == 0) && (i == 2)) { + cores_info->wrapba3[idx] = addrl; + } + + /* Include all slave wrappers to the list to + * enable and monitor watchdog timeouts + */ + + if (axi_wrapper && + (sii->axi_num_wrappers < SI_MAX_AXI_WRAPPERS)) { + axi_wrapper[sii->axi_num_wrappers].mfg = mfg; + axi_wrapper[sii->axi_num_wrappers].cid = cid; + axi_wrapper[sii->axi_num_wrappers].rev = crev; + axi_wrapper[sii->axi_num_wrappers].wrapper_type = AI_SLAVE_WRAPPER; + + /* Software WAR as discussed with hardware team, to ensure proper + * Slave Wrapper Base address is set for 4364 Chip ID. + * Current address is 0x1810c000, Corrected the same to 0x1810e000. + * This ensures AXI default slave wrapper is registered along with + * other slave wrapper cores and is useful while generating trap info + * when write operation is tried on Invalid Core / Wrapper register + */ + + if ((CHIPID(sih->chip) == BCM4364_CHIP_ID) && + (cid == DEF_AI_COMP)) { + axi_wrapper[sii->axi_num_wrappers].wrapper_addr = + 0x1810e000; + } else { + axi_wrapper[sii->axi_num_wrappers].wrapper_addr = addrl; + } + + sii->axi_num_wrappers++; + + SI_VMSG(("SLAVE WRAPPER: %d, mfg:%x, cid:%x," + "rev:%x, addr:%x, size:%x\n", + sii->axi_num_wrappers, mfg, cid, crev, addrl, sizel)); + } + } + +#ifndef BCM_BACKPLANE_TIMEOUT + /* Don't record bridges */ + if (br) + continue; +#endif // endif + + /* Done with core */ + sii->numcores++; + } + + SI_ERROR(("Reached end of erom without finding END\n")); + +error: + sii->numcores = 0; + return; +} + +#define AI_SETCOREIDX_MAPSIZE(coreid) \ + (((coreid) == NS_CCB_CORE_ID) ? 15 * SI_CORE_SIZE : SI_CORE_SIZE) + +/* This function changes the logical "focus" to the indicated core. + * Return the current core's virtual address. + */ +static volatile void * +_ai_setcoreidx(si_t *sih, uint coreidx, uint use_wrapn) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + uint32 addr, wrap, wrap2, wrap3; + volatile void *regs; + + if (coreidx >= MIN(sii->numcores, SI_MAXCORES)) + return (NULL); + + addr = cores_info->coresba[coreidx]; + wrap = cores_info->wrapba[coreidx]; + wrap2 = cores_info->wrapba2[coreidx]; + wrap3 = cores_info->wrapba3[coreidx]; + +#ifdef BCM_BACKPLANE_TIMEOUT + /* No need to disable interrupts while entering/exiting APB bridge core */ + if ((cores_info->coreid[coreidx] != APB_BRIDGE_CORE_ID) && + (cores_info->coreid[sii->curidx] != APB_BRIDGE_CORE_ID)) +#endif /* BCM_BACKPLANE_TIMEOUT */ + { + /* + * If the user has provided an interrupt mask enabled function, + * then assert interrupts are disabled before switching the core. + */ + ASSERT((sii->intrsenabled_fn == NULL) || + !(*(sii)->intrsenabled_fn)((sii)->intr_arg)); + } + + switch (BUSTYPE(sih->bustype)) { + case SI_BUS: + /* map new one */ + if (!cores_info->regs[coreidx]) { + cores_info->regs[coreidx] = REG_MAP(addr, + AI_SETCOREIDX_MAPSIZE(cores_info->coreid[coreidx])); + ASSERT(GOODREGS(cores_info->regs[coreidx])); + } + sii->curmap = regs = cores_info->regs[coreidx]; + if (!cores_info->wrappers[coreidx] && (wrap != 0)) { + cores_info->wrappers[coreidx] = REG_MAP(wrap, SI_CORE_SIZE); + ASSERT(GOODREGS(cores_info->wrappers[coreidx])); + } + if (!cores_info->wrappers2[coreidx] && (wrap2 != 0)) { + cores_info->wrappers2[coreidx] = REG_MAP(wrap2, SI_CORE_SIZE); + ASSERT(GOODREGS(cores_info->wrappers2[coreidx])); + } + if (!cores_info->wrappers3[coreidx] && (wrap3 != 0)) { + cores_info->wrappers3[coreidx] = REG_MAP(wrap3, SI_CORE_SIZE); + ASSERT(GOODREGS(cores_info->wrappers3[coreidx])); + } + + if (use_wrapn == 2) { + sii->curwrap = cores_info->wrappers3[coreidx]; + } else if (use_wrapn == 1) { + sii->curwrap = cores_info->wrappers2[coreidx]; + } else { + sii->curwrap = cores_info->wrappers[coreidx]; + } + break; + + case PCI_BUS: +#ifdef BCM_BACKPLANE_TIMEOUT + /* No need to set the BAR0 if core is APB Bridge. + * This is to reduce 2 PCI writes while checkng for errlog + */ + if (cores_info->coreid[coreidx] != APB_BRIDGE_CORE_ID) +#endif /* BCM_BACKPLANE_TIMEOUT */ + { + /* point bar0 window */ + OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, addr); + } + + regs = sii->curmap; + /* point bar0 2nd 4KB window to the primary wrapper */ + if (use_wrapn) + wrap = wrap2; + if (PCIE_GEN2(sii)) + OSL_PCI_WRITE_CONFIG(sii->osh, PCIE2_BAR0_WIN2, 4, wrap); + else + OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN2, 4, wrap); + break; + +#ifdef BCMSDIO + case SPI_BUS: + case SDIO_BUS: + sii->curmap = regs = (void *)((uintptr)addr); + if (use_wrapn) + sii->curwrap = (void *)((uintptr)wrap2); + else + sii->curwrap = (void *)((uintptr)wrap); + break; +#endif /* BCMSDIO */ + + case PCMCIA_BUS: + default: + ASSERT(0); + regs = NULL; + break; + } + + sii->curmap = regs; + sii->curidx = coreidx; + + return regs; +} + +volatile void * +ai_setcoreidx(si_t *sih, uint coreidx) +{ + return _ai_setcoreidx(sih, coreidx, 0); +} + +volatile void * +ai_setcoreidx_2ndwrap(si_t *sih, uint coreidx) +{ + return _ai_setcoreidx(sih, coreidx, 1); +} + +volatile void * +ai_setcoreidx_3rdwrap(si_t *sih, uint coreidx) +{ + return _ai_setcoreidx(sih, coreidx, 2); +} + +void +ai_coreaddrspaceX(si_t *sih, uint asidx, uint32 *addr, uint32 *size) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + chipcregs_t *cc = NULL; + uint32 erombase, *eromptr, *eromlim; + uint i, j, cidx; + uint32 cia, cib, nmp, nsp; + uint32 asd, addrl, addrh, sizel, sizeh; + + for (i = 0; i < sii->numcores; i++) { + if (cores_info->coreid[i] == CC_CORE_ID) { + cc = (chipcregs_t *)cores_info->regs[i]; + break; + } + } + if (cc == NULL) + goto error; + + erombase = R_REG(sii->osh, &cc->eromptr); + eromptr = (uint32 *)REG_MAP(erombase, SI_CORE_SIZE); + eromlim = eromptr + (ER_REMAPCONTROL / sizeof(uint32)); + + cidx = sii->curidx; + cia = cores_info->cia[cidx]; + cib = cores_info->cib[cidx]; + + nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT; + nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT; + + /* scan for cores */ + while (eromptr < eromlim) { + if ((get_erom_ent(sih, &eromptr, ER_TAG, ER_CI) == cia) && + (get_erom_ent(sih, &eromptr, 0, 0) == cib)) { + break; + } + } + + /* skip master ports */ + for (i = 0; i < nmp; i++) + get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID); + + /* Skip ASDs in port 0 */ + asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh, &sizel, &sizeh); + if (asd == 0) { + /* Try again to see if it is a bridge */ + asd = get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl, &addrh, + &sizel, &sizeh); + } + + j = 1; + do { + asd = get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl, &addrh, + &sizel, &sizeh); + j++; + } while (asd != 0); + + /* Go through the ASDs for other slave ports */ + for (i = 1; i < nsp; i++) { + j = 0; + do { + asd = get_asd(sih, &eromptr, i, j, AD_ST_SLAVE, &addrl, &addrh, + &sizel, &sizeh); + if (asd == 0) + break; + + if (!asidx--) { + *addr = addrl; + *size = sizel; + return; + } + j++; + } while (1); + + if (j == 0) { + SI_ERROR((" SP %d has no address descriptors\n", i)); + break; + } + } + +error: + *size = 0; + return; +} + +/* Return the number of address spaces in current core */ +int +ai_numaddrspaces(si_t *sih) +{ + + BCM_REFERENCE(sih); + + return 2; +} + +/* Return the address of the nth address space in the current core + * Arguments: + * sih : Pointer to struct si_t + * spidx : slave port index + * baidx : base address index + */ +uint32 +ai_addrspace(si_t *sih, uint spidx, uint baidx) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + uint cidx; + + cidx = sii->curidx; + + if (spidx == CORE_SLAVE_PORT_0) { + if (baidx == CORE_BASE_ADDR_0) + return cores_info->coresba[cidx]; + else if (baidx == CORE_BASE_ADDR_1) + return cores_info->coresba2[cidx]; + } + else if (spidx == CORE_SLAVE_PORT_1) { + if (baidx == CORE_BASE_ADDR_0) + return cores_info->csp2ba[cidx]; + } + + SI_ERROR(("%s: Need to parse the erom again to find %d base addr in %d slave port\n", + __FUNCTION__, baidx, spidx)); + + return 0; + +} + +/* Return the size of the nth address space in the current core +* Arguments: +* sih : Pointer to struct si_t +* spidx : slave port index +* baidx : base address index +*/ +uint32 +ai_addrspacesize(si_t *sih, uint spidx, uint baidx) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + uint cidx; + + cidx = sii->curidx; + if (spidx == CORE_SLAVE_PORT_0) { + if (baidx == CORE_BASE_ADDR_0) + return cores_info->coresba_size[cidx]; + else if (baidx == CORE_BASE_ADDR_1) + return cores_info->coresba2_size[cidx]; + } + else if (spidx == CORE_SLAVE_PORT_1) { + if (baidx == CORE_BASE_ADDR_0) + return cores_info->csp2ba_size[cidx]; + } + + SI_ERROR(("%s: Need to parse the erom again to find %d base addr in %d slave port\n", + __FUNCTION__, baidx, spidx)); + + return 0; +} + +uint +ai_flag(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + aidmp_t *ai; + + if (BCM4707_DMP()) { + SI_ERROR(("%s: Attempting to read CHIPCOMMONB DMP registers on 4707\n", + __FUNCTION__)); + return sii->curidx; + } + if (BCM53573_DMP()) { + SI_ERROR(("%s: Attempting to read DMP registers on 53573\n", __FUNCTION__)); + return sii->curidx; + } + if (PMU_DMP()) { + uint idx, flag; + idx = sii->curidx; + ai_setcoreidx(sih, SI_CC_IDX); + flag = ai_flag_alt(sih); + ai_setcoreidx(sih, idx); + return flag; + } + + ai = sii->curwrap; + ASSERT(ai != NULL); + + return (R_REG(sii->osh, &ai->oobselouta30) & 0x1f); +} + +uint +ai_flag_alt(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + aidmp_t *ai; + + if (BCM4707_DMP()) { + SI_ERROR(("%s: Attempting to read CHIPCOMMONB DMP registers on 4707\n", + __FUNCTION__)); + return sii->curidx; + } + + ai = sii->curwrap; + + return ((R_REG(sii->osh, &ai->oobselouta30) >> AI_OOBSEL_1_SHIFT) & AI_OOBSEL_MASK); +} + +void +ai_setint(si_t *sih, int siflag) +{ + BCM_REFERENCE(sih); + BCM_REFERENCE(siflag); + +} + +uint +ai_wrap_reg(si_t *sih, uint32 offset, uint32 mask, uint32 val) +{ + si_info_t *sii = SI_INFO(sih); + uint32 *addr = (uint32 *) ((uchar *)(sii->curwrap) + offset); + + if (mask || val) { + uint32 w = R_REG(sii->osh, addr); + w &= ~mask; + w |= val; + W_REG(sii->osh, addr, w); + } + return (R_REG(sii->osh, addr)); +} + +uint +ai_corevendor(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + uint32 cia; + + cia = cores_info->cia[sii->curidx]; + return ((cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT); +} + +uint +ai_corerev(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + uint32 cib; + + cib = cores_info->cib[sii->curidx]; + return ((cib & CIB_REV_MASK) >> CIB_REV_SHIFT); +} + +uint +ai_corerev_minor(si_t *sih) +{ + return (ai_core_sflags(sih, 0, 0) >> SISF_MINORREV_D11_SHIFT) & + SISF_MINORREV_D11_MASK; +} + +bool +ai_iscoreup(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + aidmp_t *ai; + + ai = sii->curwrap; + + return (((R_REG(sii->osh, &ai->ioctrl) & (SICF_FGC | SICF_CLOCK_EN)) == SICF_CLOCK_EN) && + ((R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET) == 0)); +} + +/* + * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation, + * switch back to the original core, and return the new value. + * + * When using the silicon backplane, no fiddling with interrupts or core switches is needed. + * + * Also, when using pci/pcie, we can optimize away the core switching for pci registers + * and (on newer pci cores) chipcommon registers. + */ +uint +ai_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val) +{ + uint origidx = 0; + volatile uint32 *r = NULL; + uint w; + uint intr_val = 0; + bool fast = FALSE; + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + + ASSERT(GOODIDX(coreidx)); + ASSERT(regoff < SI_CORE_SIZE); + ASSERT((val & ~mask) == 0); + + if (coreidx >= SI_MAXCORES) + return 0; + + if (BUSTYPE(sih->bustype) == SI_BUS) { + /* If internal bus, we can always get at everything */ + fast = TRUE; + /* map if does not exist */ + if (!cores_info->regs[coreidx]) { + cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx], + SI_CORE_SIZE); + ASSERT(GOODREGS(cores_info->regs[coreidx])); + } + r = (volatile uint32 *)((volatile uchar *)cores_info->regs[coreidx] + regoff); + } else if (BUSTYPE(sih->bustype) == PCI_BUS) { + /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */ + + if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) { + /* Chipc registers are mapped at 12KB */ + + fast = TRUE; + r = (volatile uint32 *)((volatile char *)sii->curmap + + PCI_16KB0_CCREGS_OFFSET + regoff); + } else if (sii->pub.buscoreidx == coreidx) { + /* pci registers are at either in the last 2KB of an 8KB window + * or, in pcie and pci rev 13 at 8KB + */ + fast = TRUE; + if (SI_FAST(sii)) + r = (volatile uint32 *)((volatile char *)sii->curmap + + PCI_16KB0_PCIREGS_OFFSET + regoff); + else + r = (volatile uint32 *)((volatile char *)sii->curmap + + ((regoff >= SBCONFIGOFF) ? + PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) + + regoff); + } + } + + if (!fast) { + INTR_OFF(sii, intr_val); + + /* save current core index */ + origidx = si_coreidx(&sii->pub); + + /* switch core */ + r = (volatile uint32*) ((volatile uchar*) ai_setcoreidx(&sii->pub, coreidx) + + regoff); + } + ASSERT(r != NULL); + + /* mask and set */ + if (mask || val) { + w = (R_REG(sii->osh, r) & ~mask) | val; + W_REG(sii->osh, r, w); + } + + /* readback */ + w = R_REG(sii->osh, r); + + if (!fast) { + /* restore core index */ + if (origidx != coreidx) + ai_setcoreidx(&sii->pub, origidx); + + INTR_RESTORE(sii, intr_val); + } + + return (w); +} + +/* + * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation, + * switch back to the original core, and return the new value. + * + * When using the silicon backplane, no fiddling with interrupts or core switches is needed. + * + * Also, when using pci/pcie, we can optimize away the core switching for pci registers + * and (on newer pci cores) chipcommon registers. + */ +uint +ai_corereg_writeonly(si_t *sih, uint coreidx, uint regoff, uint mask, uint val) +{ + uint origidx = 0; + volatile uint32 *r = NULL; + uint w = 0; + uint intr_val = 0; + bool fast = FALSE; + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + + ASSERT(GOODIDX(coreidx)); + ASSERT(regoff < SI_CORE_SIZE); + ASSERT((val & ~mask) == 0); + + if (coreidx >= SI_MAXCORES) + return 0; + + if (BUSTYPE(sih->bustype) == SI_BUS) { + /* If internal bus, we can always get at everything */ + fast = TRUE; + /* map if does not exist */ + if (!cores_info->regs[coreidx]) { + cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx], + SI_CORE_SIZE); + ASSERT(GOODREGS(cores_info->regs[coreidx])); + } + r = (volatile uint32 *)((volatile uchar *)cores_info->regs[coreidx] + regoff); + } else if (BUSTYPE(sih->bustype) == PCI_BUS) { + /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */ + + if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) { + /* Chipc registers are mapped at 12KB */ + + fast = TRUE; + r = (volatile uint32 *)((volatile char *)sii->curmap + + PCI_16KB0_CCREGS_OFFSET + regoff); + } else if (sii->pub.buscoreidx == coreidx) { + /* pci registers are at either in the last 2KB of an 8KB window + * or, in pcie and pci rev 13 at 8KB + */ + fast = TRUE; + if (SI_FAST(sii)) + r = (volatile uint32 *)((volatile char *)sii->curmap + + PCI_16KB0_PCIREGS_OFFSET + regoff); + else + r = (volatile uint32 *)((volatile char *)sii->curmap + + ((regoff >= SBCONFIGOFF) ? + PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) + + regoff); + } + } + + if (!fast) { + INTR_OFF(sii, intr_val); + + /* save current core index */ + origidx = si_coreidx(&sii->pub); + + /* switch core */ + r = (volatile uint32*) ((volatile uchar*) ai_setcoreidx(&sii->pub, coreidx) + + regoff); + } + ASSERT(r != NULL); + + /* mask and set */ + if (mask || val) { + w = (R_REG(sii->osh, r) & ~mask) | val; + W_REG(sii->osh, r, w); + } + + if (!fast) { + /* restore core index */ + if (origidx != coreidx) + ai_setcoreidx(&sii->pub, origidx); + + INTR_RESTORE(sii, intr_val); + } + + return (w); +} + +/* + * If there is no need for fiddling with interrupts or core switches (typically silicon + * back plane registers, pci registers and chipcommon registers), this function + * returns the register offset on this core to a mapped address. This address can + * be used for W_REG/R_REG directly. + * + * For accessing registers that would need a core switch, this function will return + * NULL. + */ +volatile uint32 * +ai_corereg_addr(si_t *sih, uint coreidx, uint regoff) +{ + volatile uint32 *r = NULL; + bool fast = FALSE; + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + + ASSERT(GOODIDX(coreidx)); + ASSERT(regoff < SI_CORE_SIZE); + + if (coreidx >= SI_MAXCORES) + return 0; + + if (BUSTYPE(sih->bustype) == SI_BUS) { + /* If internal bus, we can always get at everything */ + fast = TRUE; + /* map if does not exist */ + if (!cores_info->regs[coreidx]) { + cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx], + SI_CORE_SIZE); + ASSERT(GOODREGS(cores_info->regs[coreidx])); + } + r = (volatile uint32 *)((volatile uchar *)cores_info->regs[coreidx] + regoff); + } else if (BUSTYPE(sih->bustype) == PCI_BUS) { + /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */ + + if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) { + /* Chipc registers are mapped at 12KB */ + + fast = TRUE; + r = (volatile uint32 *)((volatile char *)sii->curmap + + PCI_16KB0_CCREGS_OFFSET + regoff); + } else if (sii->pub.buscoreidx == coreidx) { + /* pci registers are at either in the last 2KB of an 8KB window + * or, in pcie and pci rev 13 at 8KB + */ + fast = TRUE; + if (SI_FAST(sii)) + r = (volatile uint32 *)((volatile char *)sii->curmap + + PCI_16KB0_PCIREGS_OFFSET + regoff); + else + r = (volatile uint32 *)((volatile char *)sii->curmap + + ((regoff >= SBCONFIGOFF) ? + PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) + + regoff); + } + } + + if (!fast) { + ASSERT(sii->curidx == coreidx); + r = (volatile uint32*) ((volatile uchar*)sii->curmap + regoff); + } + + return (r); +} + +void +ai_core_disable(si_t *sih, uint32 bits) +{ + si_info_t *sii = SI_INFO(sih); + volatile uint32 dummy; + uint32 status; + aidmp_t *ai; + + ASSERT(GOODREGS(sii->curwrap)); + ai = sii->curwrap; + + /* if core is already in reset, just return */ + if (R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET) { + return; + } + + /* ensure there are no pending backplane operations */ + SPINWAIT(((status = R_REG(sii->osh, &ai->resetstatus)) != 0), 300); + + /* if pending backplane ops still, try waiting longer */ + if (status != 0) { + /* 300usecs was sufficient to allow backplane ops to clear for big hammer */ + /* during driver load we may need more time */ + SPINWAIT(((status = R_REG(sii->osh, &ai->resetstatus)) != 0), 10000); + /* if still pending ops, continue on and try disable anyway */ + /* this is in big hammer path, so don't call wl_reinit in this case... */ + } + + W_REG(sii->osh, &ai->resetctrl, AIRC_RESET); + dummy = R_REG(sii->osh, &ai->resetctrl); + BCM_REFERENCE(dummy); + OSL_DELAY(1); + + W_REG(sii->osh, &ai->ioctrl, bits); + dummy = R_REG(sii->osh, &ai->ioctrl); + BCM_REFERENCE(dummy); + OSL_DELAY(10); +} + +/* reset and re-enable a core + * inputs: + * bits - core specific bits that are set during and after reset sequence + * resetbits - core specific bits that are set only during reset sequence + */ +static void +_ai_core_reset(si_t *sih, uint32 bits, uint32 resetbits) +{ + si_info_t *sii = SI_INFO(sih); +#if defined(UCM_CORRUPTION_WAR) + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; +#endif // endif + aidmp_t *ai; + volatile uint32 dummy; + uint loop_counter = 10; + + ASSERT(GOODREGS(sii->curwrap)); + ai = sii->curwrap; + + /* ensure there are no pending backplane operations */ + SPINWAIT(((dummy = R_REG(sii->osh, &ai->resetstatus)) != 0), 300); + + /* put core into reset state */ + W_REG(sii->osh, &ai->resetctrl, AIRC_RESET); + OSL_DELAY(10); + + /* ensure there are no pending backplane operations */ + SPINWAIT((R_REG(sii->osh, &ai->resetstatus) != 0), 300); + + W_REG(sii->osh, &ai->ioctrl, (bits | resetbits | SICF_FGC | SICF_CLOCK_EN)); + dummy = R_REG(sii->osh, &ai->ioctrl); + BCM_REFERENCE(dummy); +#ifdef UCM_CORRUPTION_WAR + if (cores_info->coreid[sii->curidx] == D11_CORE_ID) { + /* Reset FGC */ + OSL_DELAY(1); + W_REG(sii->osh, &ai->ioctrl, (dummy & (~SICF_FGC))); + } +#endif /* UCM_CORRUPTION_WAR */ + /* ensure there are no pending backplane operations */ + SPINWAIT(((dummy = R_REG(sii->osh, &ai->resetstatus)) != 0), 300); + + while (R_REG(sii->osh, &ai->resetctrl) != 0 && --loop_counter != 0) { + /* ensure there are no pending backplane operations */ + SPINWAIT(((dummy = R_REG(sii->osh, &ai->resetstatus)) != 0), 300); + + /* take core out of reset */ + W_REG(sii->osh, &ai->resetctrl, 0); + + /* ensure there are no pending backplane operations */ + SPINWAIT((R_REG(sii->osh, &ai->resetstatus) != 0), 300); + } + +#ifdef UCM_CORRUPTION_WAR + /* Pulse FGC after lifting Reset */ + W_REG(sii->osh, &ai->ioctrl, (bits | SICF_FGC | SICF_CLOCK_EN)); +#else + W_REG(sii->osh, &ai->ioctrl, (bits | SICF_CLOCK_EN)); +#endif /* UCM_CORRUPTION_WAR */ + dummy = R_REG(sii->osh, &ai->ioctrl); + BCM_REFERENCE(dummy); +#ifdef UCM_CORRUPTION_WAR + if (cores_info->coreid[sii->curidx] == D11_CORE_ID) { + /* Reset FGC */ + OSL_DELAY(1); + W_REG(sii->osh, &ai->ioctrl, (dummy & (~SICF_FGC))); + } +#endif /* UCM_CORRUPTION_WAR */ + OSL_DELAY(1); + +} + +void +ai_core_reset(si_t *sih, uint32 bits, uint32 resetbits) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + uint idx = sii->curidx; + + if (cores_info->wrapba3[idx] != 0) { + ai_setcoreidx_3rdwrap(sih, idx); + _ai_core_reset(sih, bits, resetbits); + ai_setcoreidx(sih, idx); + } + + if (cores_info->wrapba2[idx] != 0) { + ai_setcoreidx_2ndwrap(sih, idx); + _ai_core_reset(sih, bits, resetbits); + ai_setcoreidx(sih, idx); + } + + _ai_core_reset(sih, bits, resetbits); +} + +void +ai_core_cflags_wo(si_t *sih, uint32 mask, uint32 val) +{ + si_info_t *sii = SI_INFO(sih); + aidmp_t *ai; + uint32 w; + + if (BCM4707_DMP()) { + SI_ERROR(("%s: Accessing CHIPCOMMONB DMP register (ioctrl) on 4707\n", + __FUNCTION__)); + return; + } + if (PMU_DMP()) { + SI_ERROR(("%s: Accessing PMU DMP register (ioctrl)\n", + __FUNCTION__)); + return; + } + + ASSERT(GOODREGS(sii->curwrap)); + ai = sii->curwrap; + + ASSERT((val & ~mask) == 0); + + if (mask || val) { + w = ((R_REG(sii->osh, &ai->ioctrl) & ~mask) | val); + W_REG(sii->osh, &ai->ioctrl, w); + } +} + +uint32 +ai_core_cflags(si_t *sih, uint32 mask, uint32 val) +{ + si_info_t *sii = SI_INFO(sih); + aidmp_t *ai; + uint32 w; + + if (BCM4707_DMP()) { + SI_ERROR(("%s: Accessing CHIPCOMMONB DMP register (ioctrl) on 4707\n", + __FUNCTION__)); + return 0; + } + + if (PMU_DMP()) { + SI_ERROR(("%s: Accessing PMU DMP register (ioctrl)\n", + __FUNCTION__)); + return 0; + } + ASSERT(GOODREGS(sii->curwrap)); + ai = sii->curwrap; + + ASSERT((val & ~mask) == 0); + + if (mask || val) { + w = ((R_REG(sii->osh, &ai->ioctrl) & ~mask) | val); + W_REG(sii->osh, &ai->ioctrl, w); + } + + return R_REG(sii->osh, &ai->ioctrl); +} + +uint32 +ai_core_sflags(si_t *sih, uint32 mask, uint32 val) +{ + si_info_t *sii = SI_INFO(sih); + aidmp_t *ai; + uint32 w; + + if (BCM4707_DMP()) { + SI_ERROR(("%s: Accessing CHIPCOMMONB DMP register (ioctrl) on 4707\n", + __FUNCTION__)); + return 0; + } + if (PMU_DMP()) { + SI_ERROR(("%s: Accessing PMU DMP register (ioctrl)\n", + __FUNCTION__)); + return 0; + } + + ASSERT(GOODREGS(sii->curwrap)); + ai = sii->curwrap; + + ASSERT((val & ~mask) == 0); + ASSERT((mask & ~SISF_CORE_BITS) == 0); + + if (mask || val) { + w = ((R_REG(sii->osh, &ai->iostatus) & ~mask) | val); + W_REG(sii->osh, &ai->iostatus, w); + } + + return R_REG(sii->osh, &ai->iostatus); +} + +#if defined(BCMDBG_PHYDUMP) +/* print interesting aidmp registers */ +void +ai_dumpregs(si_t *sih, struct bcmstrbuf *b) +{ + si_info_t *sii = SI_INFO(sih); + osl_t *osh; + aidmp_t *ai; + uint i; + uint32 prev_value = 0; + axi_wrapper_t * axi_wrapper = sii->axi_wrapper; + uint32 cfg_reg = 0; + uint bar0_win_offset = 0; + + osh = sii->osh; + + /* Save and restore wrapper access window */ + if (BUSTYPE(sii->pub.bustype) == PCI_BUS) { + if (PCIE_GEN2(sii)) { + cfg_reg = PCIE2_BAR0_CORE2_WIN2; + bar0_win_offset = PCIE2_BAR0_CORE2_WIN2_OFFSET; + } else { + cfg_reg = PCI_BAR0_WIN2; + bar0_win_offset = PCI_BAR0_WIN2_OFFSET; + } + + prev_value = OSL_PCI_READ_CONFIG(osh, cfg_reg, 4); + + if (prev_value == ID32_INVALID) { + SI_PRINT(("%s, PCI_BAR0_WIN2 - %x\n", __FUNCTION__, prev_value)); + return; + } + } + + bcm_bprintf(b, "ChipNum:%x, ChipRev;%x, BusType:%x, BoardType:%x, BoardVendor:%x\n\n", + sih->chip, sih->chiprev, sih->bustype, sih->boardtype, sih->boardvendor); + + for (i = 0; i < sii->axi_num_wrappers; i++) { + + if (BUSTYPE(sii->pub.bustype) == PCI_BUS) { + /* Set BAR0 window to bridge wapper base address */ + OSL_PCI_WRITE_CONFIG(osh, + cfg_reg, 4, axi_wrapper[i].wrapper_addr); + + ai = (aidmp_t *) ((volatile uint8*)sii->curmap + bar0_win_offset); + } else { + ai = (aidmp_t *)(uintptr) axi_wrapper[i].wrapper_addr; + } + + bcm_bprintf(b, "core 0x%x: core_rev:%d, %s_WR ADDR:%x \n", axi_wrapper[i].cid, + axi_wrapper[i].rev, + axi_wrapper[i].wrapper_type == AI_SLAVE_WRAPPER ? "SLAVE" : "MASTER", + axi_wrapper[i].wrapper_addr); + + /* BCM4707_DMP() */ + if (BCM4707_CHIP(CHIPID(sih->chip)) && + (axi_wrapper[i].cid == NS_CCB_CORE_ID)) { + bcm_bprintf(b, "Skipping chipcommonb in 4707\n"); + continue; + } + + bcm_bprintf(b, "ioctrlset 0x%x ioctrlclear 0x%x ioctrl 0x%x iostatus 0x%x " + "ioctrlwidth 0x%x iostatuswidth 0x%x\n" + "resetctrl 0x%x resetstatus 0x%x resetreadid 0x%x resetwriteid 0x%x\n" + "errlogctrl 0x%x errlogdone 0x%x errlogstatus 0x%x " + "errlogaddrlo 0x%x errlogaddrhi 0x%x\n" + "errlogid 0x%x errloguser 0x%x errlogflags 0x%x\n" + "intstatus 0x%x config 0x%x itcr 0x%x\n\n", + R_REG(osh, &ai->ioctrlset), + R_REG(osh, &ai->ioctrlclear), + R_REG(osh, &ai->ioctrl), + R_REG(osh, &ai->iostatus), + R_REG(osh, &ai->ioctrlwidth), + R_REG(osh, &ai->iostatuswidth), + R_REG(osh, &ai->resetctrl), + R_REG(osh, &ai->resetstatus), + R_REG(osh, &ai->resetreadid), + R_REG(osh, &ai->resetwriteid), + R_REG(osh, &ai->errlogctrl), + R_REG(osh, &ai->errlogdone), + R_REG(osh, &ai->errlogstatus), + R_REG(osh, &ai->errlogaddrlo), + R_REG(osh, &ai->errlogaddrhi), + R_REG(osh, &ai->errlogid), + R_REG(osh, &ai->errloguser), + R_REG(osh, &ai->errlogflags), + R_REG(osh, &ai->intstatus), + R_REG(osh, &ai->config), + R_REG(osh, &ai->itcr)); + } + + /* Restore the initial wrapper space */ + if (BUSTYPE(sii->pub.bustype) == PCI_BUS) { + if (prev_value && cfg_reg) { + OSL_PCI_WRITE_CONFIG(osh, cfg_reg, 4, prev_value); + } + } +} +#endif // endif + +void +ai_update_backplane_timeouts(si_t *sih, bool enable, uint32 timeout_exp, uint32 cid) +{ +#if defined(AXI_TIMEOUTS) || defined(BCM_BACKPLANE_TIMEOUT) + si_info_t *sii = SI_INFO(sih); + aidmp_t *ai; + uint32 i; + axi_wrapper_t * axi_wrapper = sii->axi_wrapper; + uint32 errlogctrl = (enable << AIELC_TO_ENAB_SHIFT) | + ((timeout_exp << AIELC_TO_EXP_SHIFT) & AIELC_TO_EXP_MASK); + +#ifdef BCM_BACKPLANE_TIMEOUT + uint32 prev_value = 0; + osl_t *osh = sii->osh; + uint32 cfg_reg = 0; + uint32 offset = 0; +#endif /* BCM_BACKPLANE_TIMEOUT */ + + if ((sii->axi_num_wrappers == 0) || +#ifdef BCM_BACKPLANE_TIMEOUT + (!PCIE(sii)) || +#endif /* BCM_BACKPLANE_TIMEOUT */ + FALSE) { + SI_VMSG((" %s, axi_num_wrappers:%d, Is_PCIE:%d, BUS_TYPE:%d, ID:%x\n", + __FUNCTION__, sii->axi_num_wrappers, PCIE(sii), + BUSTYPE(sii->pub.bustype), sii->pub.buscoretype)); + return; + } + +#ifdef BCM_BACKPLANE_TIMEOUT + /* Save and restore the wrapper access window */ + if (BUSTYPE(sii->pub.bustype) == PCI_BUS) { + if (PCIE_GEN1(sii)) { + cfg_reg = PCI_BAR0_WIN2; + offset = PCI_BAR0_WIN2_OFFSET; + } else if (PCIE_GEN2(sii)) { + cfg_reg = PCIE2_BAR0_CORE2_WIN2; + offset = PCIE2_BAR0_CORE2_WIN2_OFFSET; + } + else { + ASSERT(!"!PCIE_GEN1 && !PCIE_GEN2"); + } + + prev_value = OSL_PCI_READ_CONFIG(osh, cfg_reg, 4); + if (prev_value == ID32_INVALID) { + SI_PRINT(("%s, PCI_BAR0_WIN2 - %x\n", __FUNCTION__, prev_value)); + return; + } + } +#endif /* BCM_BACKPLANE_TIMEOUT */ + + for (i = 0; i < sii->axi_num_wrappers; ++i) { + + if (axi_wrapper[i].wrapper_type != AI_SLAVE_WRAPPER) { + SI_VMSG(("SKIP ENABLE BPT: MFG:%x, CID:%x, ADDR:%x\n", + axi_wrapper[i].mfg, + axi_wrapper[i].cid, + axi_wrapper[i].wrapper_addr)); + continue; + } + + /* Update only given core if requested */ + if ((cid != 0) && (axi_wrapper[i].cid != cid)) { + continue; + } + +#ifdef BCM_BACKPLANE_TIMEOUT + if (BUSTYPE(sii->pub.bustype) == PCI_BUS) { + /* Set BAR0_CORE2_WIN2 to bridge wapper base address */ + OSL_PCI_WRITE_CONFIG(osh, + cfg_reg, 4, axi_wrapper[i].wrapper_addr); + + /* set AI to BAR0 + Offset corresponding to Gen1 or gen2 */ + ai = (aidmp_t *) (DISCARD_QUAL(sii->curmap, uint8) + offset); + } + else +#endif /* BCM_BACKPLANE_TIMEOUT */ + { + ai = (aidmp_t *)(uintptr) axi_wrapper[i].wrapper_addr; + } + + W_REG(sii->osh, &ai->errlogctrl, errlogctrl); + + SI_VMSG(("ENABLED BPT: MFG:%x, CID:%x, ADDR:%x, ERR_CTRL:%x\n", + axi_wrapper[i].mfg, + axi_wrapper[i].cid, + axi_wrapper[i].wrapper_addr, + R_REG(sii->osh, &ai->errlogctrl))); + } + +#ifdef BCM_BACKPLANE_TIMEOUT + /* Restore the initial wrapper space */ + if (prev_value) { + OSL_PCI_WRITE_CONFIG(osh, cfg_reg, 4, prev_value); + } +#endif /* BCM_BACKPLANE_TIMEOUT */ + +#endif /* AXI_TIMEOUTS || BCM_BACKPLANE_TIMEOUT */ +} + +#if defined(AXI_TIMEOUTS) || defined(BCM_BACKPLANE_TIMEOUT) + +/* slave error is ignored, so account for those cases */ +static uint32 si_ignore_errlog_cnt = 0; + +static bool +ai_ignore_errlog(si_info_t *sii, aidmp_t *ai, + uint32 lo_addr, uint32 hi_addr, uint32 err_axi_id, uint32 errsts) +{ + uint32 axi_id; +#ifdef BCMPCIE_BTLOG + uint32 axi_id2 = BCM4347_UNUSED_AXI_ID; +#endif /* BCMPCIE_BTLOG */ + uint32 ignore_errsts = AIELS_SLAVE_ERR; + uint32 ignore_hi = BT_CC_SPROM_BADREG_HI; + uint32 ignore_lo = BT_CC_SPROM_BADREG_LO; + uint32 ignore_size = BT_CC_SPROM_BADREG_SIZE; + + /* ignore the BT slave errors if the errlog is to chipcommon addr 0x190 */ + switch (CHIPID(sii->pub.chip)) { + case BCM4350_CHIP_ID: + axi_id = BCM4350_BT_AXI_ID; + break; + case BCM4345_CHIP_ID: + axi_id = BCM4345_BT_AXI_ID; + break; + case BCM4349_CHIP_GRPID: + axi_id = BCM4349_BT_AXI_ID; + break; + case BCM4364_CHIP_ID: + case BCM4373_CHIP_ID: + axi_id = BCM4364_BT_AXI_ID; + break; +#ifdef BCMPCIE_BTLOG + case BCM4347_CHIP_ID: + case BCM4357_CHIP_ID: + axi_id = BCM4347_CC_AXI_ID; + axi_id2 = BCM4347_PCIE_AXI_ID; + ignore_errsts = AIELS_TIMEOUT; + ignore_hi = BCM4347_BT_ADDR_HI; + ignore_lo = BCM4347_BT_ADDR_LO; + ignore_size = BCM4347_BT_SIZE; + break; +#endif /* BCMPCIE_BTLOG */ + + default: + return FALSE; + } + + /* AXI ID check */ + err_axi_id &= AI_ERRLOGID_AXI_ID_MASK; + if (!(err_axi_id == axi_id || +#ifdef BCMPCIE_BTLOG + (axi_id2 != BCM4347_UNUSED_AXI_ID && err_axi_id == axi_id2))) +#else + FALSE)) +#endif /* BCMPCIE_BTLOG */ + return FALSE; + + /* slave errors */ + if ((errsts & AIELS_TIMEOUT_MASK) != ignore_errsts) + return FALSE; + + /* address range check */ + if ((hi_addr != ignore_hi) || + (lo_addr < ignore_lo) || (lo_addr >= (ignore_lo + ignore_size))) + return FALSE; + +#ifdef BCMPCIE_BTLOG + if (ignore_errsts == AIELS_TIMEOUT) { + /* reset AXI timeout */ + ai_reset_axi_to(sii, ai); + } +#endif /* BCMPCIE_BTLOG */ + + return TRUE; +} +#endif /* defined (AXI_TIMEOUTS) || defined (BCM_BACKPLANE_TIMEOUT) */ + +#ifdef BCM_BACKPLANE_TIMEOUT + +/* Function to return the APB bridge details corresponding to the core */ +static bool +ai_get_apb_bridge(si_t * sih, uint32 coreidx, uint32 *apb_id, uint32 * apb_coreuinit) +{ + uint i; + uint32 core_base, core_end; + si_info_t *sii = SI_INFO(sih); + static uint32 coreidx_cached = 0, apb_id_cached = 0, apb_coreunit_cached = 0; + uint32 tmp_coreunit = 0; + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + + if (coreidx >= MIN(sii->numcores, SI_MAXCORES)) + return FALSE; + + /* Most of the time apb bridge query will be for d11 core. + * Maintain the last cache and return if found rather than iterating the table + */ + if (coreidx_cached == coreidx) { + *apb_id = apb_id_cached; + *apb_coreuinit = apb_coreunit_cached; + return TRUE; + } + + core_base = cores_info->coresba[coreidx]; + core_end = core_base + cores_info->coresba_size[coreidx]; + + for (i = 0; i < sii->numcores; i++) { + if (cores_info->coreid[i] == APB_BRIDGE_ID) { + uint32 apb_base; + uint32 apb_end; + + apb_base = cores_info->coresba[i]; + apb_end = apb_base + cores_info->coresba_size[i]; + + if ((core_base >= apb_base) && + (core_end <= apb_end)) { + /* Current core is attached to this APB bridge */ + *apb_id = apb_id_cached = APB_BRIDGE_ID; + *apb_coreuinit = apb_coreunit_cached = tmp_coreunit; + coreidx_cached = coreidx; + return TRUE; + } + /* Increment the coreunit */ + tmp_coreunit++; + } + } + + return FALSE; +} + +uint32 +ai_clear_backplane_to_fast(si_t *sih, void *addr) +{ + si_info_t *sii = SI_INFO(sih); + volatile void *curmap = sii->curmap; + bool core_reg = FALSE; + + /* Use fast path only for core register access */ + if (((uintptr)addr >= (uintptr)curmap) && + ((uintptr)addr < ((uintptr)curmap + SI_CORE_SIZE))) { + /* address being accessed is within current core reg map */ + core_reg = TRUE; + } + + if (core_reg) { + uint32 apb_id, apb_coreuinit; + + if (ai_get_apb_bridge(sih, si_coreidx(&sii->pub), + &apb_id, &apb_coreuinit) == TRUE) { + /* Found the APB bridge corresponding to current core, + * Check for bus errors in APB wrapper + */ + return ai_clear_backplane_to_per_core(sih, + apb_id, apb_coreuinit, NULL); + } + } + + /* Default is to poll for errors on all slave wrappers */ + return si_clear_backplane_to(sih); +} +#endif /* BCM_BACKPLANE_TIMEOUT */ + +#if defined(AXI_TIMEOUTS) || defined(BCM_BACKPLANE_TIMEOUT) +static bool g_disable_backplane_logs = FALSE; + +#if defined(ETD) +static uint32 last_axi_error = AXI_WRAP_STS_NONE; +static uint32 last_axi_error_core = 0; +static uint32 last_axi_error_wrap = 0; +#endif /* ETD */ + +/* + * API to clear the back plane timeout per core. + * Caller may passs optional wrapper address. If present this will be used as + * the wrapper base address. If wrapper base address is provided then caller + * must provide the coreid also. + * If both coreid and wrapper is zero, then err status of current bridge + * will be verified. + */ +uint32 +ai_clear_backplane_to_per_core(si_t *sih, uint coreid, uint coreunit, void *wrap) +{ + int ret = AXI_WRAP_STS_NONE; + aidmp_t *ai = NULL; + uint32 errlog_status = 0; + si_info_t *sii = SI_INFO(sih); + uint32 errlog_lo = 0, errlog_hi = 0, errlog_id = 0, errlog_flags = 0; + uint32 current_coreidx = si_coreidx(sih); + uint32 target_coreidx = si_findcoreidx(sih, coreid, coreunit); + +#if defined(BCM_BACKPLANE_TIMEOUT) + si_axi_error_t * axi_error = sih->err_info ? + &sih->err_info->axi_error[sih->err_info->count] : NULL; +#endif /* BCM_BACKPLANE_TIMEOUT */ + bool restore_core = FALSE; + + if ((sii->axi_num_wrappers == 0) || +#ifdef BCM_BACKPLANE_TIMEOUT + (!PCIE(sii)) || +#endif /* BCM_BACKPLANE_TIMEOUT */ + FALSE) { + SI_VMSG((" %s, axi_num_wrappers:%d, Is_PCIE:%d, BUS_TYPE:%d, ID:%x\n", + __FUNCTION__, sii->axi_num_wrappers, PCIE(sii), + BUSTYPE(sii->pub.bustype), sii->pub.buscoretype)); + return AXI_WRAP_STS_NONE; + } + + if (wrap != NULL) { + ai = (aidmp_t *)wrap; + } else if (coreid && (target_coreidx != current_coreidx)) { + + if (ai_setcoreidx(sih, target_coreidx) == NULL) { + /* Unable to set the core */ + SI_PRINT(("Set Code Failed: coreid:%x, unit:%d, target_coreidx:%d\n", + coreid, coreunit, target_coreidx)); + errlog_lo = target_coreidx; + ret = AXI_WRAP_STS_SET_CORE_FAIL; + goto end; + } + + restore_core = TRUE; + ai = (aidmp_t *)si_wrapperregs(sih); + } else { + /* Read error status of current wrapper */ + ai = (aidmp_t *)si_wrapperregs(sih); + + /* Update CoreID to current Code ID */ + coreid = si_coreid(sih); + } + + /* read error log status */ + errlog_status = R_REG(sii->osh, &ai->errlogstatus); + + if (errlog_status == ID32_INVALID) { + /* Do not try to peek further */ + SI_PRINT(("%s, errlogstatus:%x - Slave Wrapper:%x\n", + __FUNCTION__, errlog_status, coreid)); + ret = AXI_WRAP_STS_WRAP_RD_ERR; + errlog_lo = (uint32)(uintptr)&ai->errlogstatus; + goto end; + } + + if ((errlog_status & AIELS_TIMEOUT_MASK) != 0) { + uint32 tmp; + uint32 count = 0; + /* set ErrDone to clear the condition */ + W_REG(sii->osh, &ai->errlogdone, AIELD_ERRDONE_MASK); + + /* SPINWAIT on errlogstatus timeout status bits */ + while ((tmp = R_REG(sii->osh, &ai->errlogstatus)) & AIELS_TIMEOUT_MASK) { + + if (tmp == ID32_INVALID) { + SI_PRINT(("%s: prev errlogstatus:%x, errlogstatus:%x\n", + __FUNCTION__, errlog_status, tmp)); + ret = AXI_WRAP_STS_WRAP_RD_ERR; + errlog_lo = (uint32)(uintptr)&ai->errlogstatus; + goto end; + } + /* + * Clear again, to avoid getting stuck in the loop, if a new error + * is logged after we cleared the first timeout + */ + W_REG(sii->osh, &ai->errlogdone, AIELD_ERRDONE_MASK); + + count++; + OSL_DELAY(10); + if ((10 * count) > AI_REG_READ_TIMEOUT) { + errlog_status = tmp; + break; + } + } + + errlog_lo = R_REG(sii->osh, &ai->errlogaddrlo); + errlog_hi = R_REG(sii->osh, &ai->errlogaddrhi); + errlog_id = R_REG(sii->osh, &ai->errlogid); + errlog_flags = R_REG(sii->osh, &ai->errlogflags); + + /* we are already in the error path, so OK to check for the slave error */ + if (ai_ignore_errlog(sii, ai, errlog_lo, errlog_hi, errlog_id, + errlog_status)) { + si_ignore_errlog_cnt++; + goto end; + } + + /* only reset APB Bridge on timeout (not slave error, or dec error) */ + switch (errlog_status & AIELS_TIMEOUT_MASK) { + case AIELS_SLAVE_ERR: + SI_PRINT(("AXI slave error\n")); + ret = AXI_WRAP_STS_SLAVE_ERR; + break; + + case AIELS_TIMEOUT: + ai_reset_axi_to(sii, ai); + ret = AXI_WRAP_STS_TIMEOUT; + break; + + case AIELS_DECODE: + SI_PRINT(("AXI decode error\n")); + ret = AXI_WRAP_STS_DECODE_ERR; + break; + default: + ASSERT(0); /* should be impossible */ + } + + SI_PRINT(("\tCoreID: %x\n", coreid)); + SI_PRINT(("\t errlog: lo 0x%08x, hi 0x%08x, id 0x%08x, flags 0x%08x" + ", status 0x%08x\n", + errlog_lo, errlog_hi, errlog_id, errlog_flags, + errlog_status)); + } + +end: +#if defined(ETD) + if (ret != AXI_WRAP_STS_NONE) { + last_axi_error = ret; + last_axi_error_core = coreid; + last_axi_error_wrap = (uint32)ai; + } +#endif /* ETD */ + +#if defined(BCM_BACKPLANE_TIMEOUT) + if (axi_error && (ret != AXI_WRAP_STS_NONE)) { + axi_error->error = ret; + axi_error->coreid = coreid; + axi_error->errlog_lo = errlog_lo; + axi_error->errlog_hi = errlog_hi; + axi_error->errlog_id = errlog_id; + axi_error->errlog_flags = errlog_flags; + axi_error->errlog_status = errlog_status; + sih->err_info->count++; + + if (sih->err_info->count == SI_MAX_ERRLOG_SIZE) { + sih->err_info->count = SI_MAX_ERRLOG_SIZE - 1; + SI_PRINT(("AXI Error log overflow\n")); + } + } +#endif /* BCM_BACKPLANE_TIMEOUT */ + + if (restore_core) { + if (ai_setcoreidx(sih, current_coreidx) == NULL) { + /* Unable to set the core */ + return ID32_INVALID; + } + } + + return ret; +} + +/* reset AXI timeout */ +static void +ai_reset_axi_to(si_info_t *sii, aidmp_t *ai) +{ + /* reset APB Bridge */ + OR_REG(sii->osh, &ai->resetctrl, AIRC_RESET); + /* sync write */ + (void)R_REG(sii->osh, &ai->resetctrl); + /* clear Reset bit */ + AND_REG(sii->osh, &ai->resetctrl, ~(AIRC_RESET)); + /* sync write */ + (void)R_REG(sii->osh, &ai->resetctrl); + SI_PRINT(("AXI timeout\n")); + if (R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET) { + SI_PRINT(("reset failed on wrapper %p\n", ai)); + g_disable_backplane_logs = TRUE; + } +} +#endif /* AXI_TIMEOUTS || BCM_BACKPLANE_TIMEOUT */ + +/* + * This API polls all slave wrappers for errors and returns bit map of + * all reported errors. + * return - bit map of + * AXI_WRAP_STS_NONE + * AXI_WRAP_STS_TIMEOUT + * AXI_WRAP_STS_SLAVE_ERR + * AXI_WRAP_STS_DECODE_ERR + * AXI_WRAP_STS_PCI_RD_ERR + * AXI_WRAP_STS_WRAP_RD_ERR + * AXI_WRAP_STS_SET_CORE_FAIL + * On timeout detection, correspondign bridge will be reset to + * unblock the bus. + * Error reported in each wrapper can be retrieved using the API + * si_get_axi_errlog_info() + */ +uint32 +ai_clear_backplane_to(si_t *sih) +{ + uint32 ret = 0; +#if defined(AXI_TIMEOUTS) || defined(BCM_BACKPLANE_TIMEOUT) + + si_info_t *sii = SI_INFO(sih); + aidmp_t *ai; + uint32 i; + axi_wrapper_t * axi_wrapper = sii->axi_wrapper; + +#ifdef BCM_BACKPLANE_TIMEOUT + uint32 prev_value = 0; + osl_t *osh = sii->osh; + uint32 cfg_reg = 0; + uint32 offset = 0; + + if ((sii->axi_num_wrappers == 0) || (!PCIE(sii))) +#else + if (sii->axi_num_wrappers == 0) +#endif // endif + { + SI_VMSG((" %s, axi_num_wrappers:%d, Is_PCIE:%d, BUS_TYPE:%d, ID:%x\n", + __FUNCTION__, sii->axi_num_wrappers, PCIE(sii), + BUSTYPE(sii->pub.bustype), sii->pub.buscoretype)); + return AXI_WRAP_STS_NONE; + } + +#ifdef BCM_BACKPLANE_TIMEOUT + /* Save and restore wrapper access window */ + if (BUSTYPE(sii->pub.bustype) == PCI_BUS) { + if (PCIE_GEN1(sii)) { + cfg_reg = PCI_BAR0_WIN2; + offset = PCI_BAR0_WIN2_OFFSET; + } else if (PCIE_GEN2(sii)) { + cfg_reg = PCIE2_BAR0_CORE2_WIN2; + offset = PCIE2_BAR0_CORE2_WIN2_OFFSET; + } + else { + ASSERT(!"!PCIE_GEN1 && !PCIE_GEN2"); + } + + prev_value = OSL_PCI_READ_CONFIG(osh, cfg_reg, 4); + + if (prev_value == ID32_INVALID) { + si_axi_error_t * axi_error = + sih->err_info ? + &sih->err_info->axi_error[sih->err_info->count] : + NULL; + + SI_PRINT(("%s, PCI_BAR0_WIN2 - %x\n", __FUNCTION__, prev_value)); + if (axi_error) { + axi_error->error = ret = AXI_WRAP_STS_PCI_RD_ERR; + axi_error->errlog_lo = cfg_reg; + sih->err_info->count++; + + if (sih->err_info->count == SI_MAX_ERRLOG_SIZE) { + sih->err_info->count = SI_MAX_ERRLOG_SIZE - 1; + SI_PRINT(("AXI Error log overflow\n")); + } + } + + return ret; + } + } +#endif /* BCM_BACKPLANE_TIMEOUT */ + + for (i = 0; i < sii->axi_num_wrappers; ++i) { + uint32 tmp; + + if (axi_wrapper[i].wrapper_type != AI_SLAVE_WRAPPER) { + continue; + } + +#ifdef BCM_BACKPLANE_TIMEOUT + if (BUSTYPE(sii->pub.bustype) == PCI_BUS) { + /* Set BAR0_CORE2_WIN2 to bridge wapper base address */ + OSL_PCI_WRITE_CONFIG(osh, + cfg_reg, 4, axi_wrapper[i].wrapper_addr); + + /* set AI to BAR0 + Offset corresponding to Gen1 or gen2 */ + ai = (aidmp_t *) (DISCARD_QUAL(sii->curmap, uint8) + offset); + } + else +#endif /* BCM_BACKPLANE_TIMEOUT */ + { + ai = (aidmp_t *)(uintptr) axi_wrapper[i].wrapper_addr; + } + + tmp = ai_clear_backplane_to_per_core(sih, axi_wrapper[i].cid, 0, + DISCARD_QUAL(ai, void)); + + ret |= tmp; + } + +#ifdef BCM_BACKPLANE_TIMEOUT + /* Restore the initial wrapper space */ + if (prev_value) { + OSL_PCI_WRITE_CONFIG(osh, cfg_reg, 4, prev_value); + } +#endif /* BCM_BACKPLANE_TIMEOUT */ + +#endif /* AXI_TIMEOUTS || BCM_BACKPLANE_TIMEOUT */ + + return ret; +} + +uint +ai_num_slaveports(si_t *sih, uint coreidx) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + uint32 cib; + + cib = cores_info->cib[coreidx]; + return ((cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT); +} + +#ifdef UART_TRAP_DBG +void +ai_dump_APB_Bridge_registers(si_t *sih) +{ +aidmp_t *ai; +si_info_t *sii = SI_INFO(sih); + + ai = (aidmp_t *) sii->br_wrapba[0]; + printf("APB Bridge 0\n"); + printf("lo 0x%08x, hi 0x%08x, id 0x%08x, flags 0x%08x", + R_REG(sii->osh, &ai->errlogaddrlo), + R_REG(sii->osh, &ai->errlogaddrhi), + R_REG(sii->osh, &ai->errlogid), + R_REG(sii->osh, &ai->errlogflags)); + printf("\n status 0x%08x\n", R_REG(sii->osh, &ai->errlogstatus)); +} +#endif /* UART_TRAP_DBG */ + +void +ai_force_clocks(si_t *sih, uint clock_state) +{ + + si_info_t *sii = SI_INFO(sih); + aidmp_t *ai, *ai_sec = NULL; + volatile uint32 dummy; + uint32 ioctrl; + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + + ASSERT(GOODREGS(sii->curwrap)); + ai = sii->curwrap; + if (cores_info->wrapba2[sii->curidx]) + ai_sec = REG_MAP(cores_info->wrapba2[sii->curidx], SI_CORE_SIZE); + + /* ensure there are no pending backplane operations */ + SPINWAIT((R_REG(sii->osh, &ai->resetstatus) != 0), 300); + + if (clock_state == FORCE_CLK_ON) { + ioctrl = R_REG(sii->osh, &ai->ioctrl); + W_REG(sii->osh, &ai->ioctrl, (ioctrl | SICF_FGC)); + dummy = R_REG(sii->osh, &ai->ioctrl); + BCM_REFERENCE(dummy); + if (ai_sec) { + ioctrl = R_REG(sii->osh, &ai_sec->ioctrl); + W_REG(sii->osh, &ai_sec->ioctrl, (ioctrl | SICF_FGC)); + dummy = R_REG(sii->osh, &ai_sec->ioctrl); + BCM_REFERENCE(dummy); + } + } else { + ioctrl = R_REG(sii->osh, &ai->ioctrl); + W_REG(sii->osh, &ai->ioctrl, (ioctrl & (~SICF_FGC))); + dummy = R_REG(sii->osh, &ai->ioctrl); + BCM_REFERENCE(dummy); + if (ai_sec) { + ioctrl = R_REG(sii->osh, &ai_sec->ioctrl); + W_REG(sii->osh, &ai_sec->ioctrl, (ioctrl & (~SICF_FGC))); + dummy = R_REG(sii->osh, &ai_sec->ioctrl); + BCM_REFERENCE(dummy); + } + } + /* ensure there are no pending backplane operations */ + SPINWAIT((R_REG(sii->osh, &ai->resetstatus) != 0), 300); +} diff --git a/bcmdhd.100.10.315.x/bcm_app_utils.c b/bcmdhd.100.10.315.x/bcm_app_utils.c new file mode 100644 index 0000000..e88dcef --- /dev/null +++ b/bcmdhd.100.10.315.x/bcm_app_utils.c @@ -0,0 +1,1032 @@ +/* + * Misc utility routines used by kernel or app-level. + * Contents are wifi-specific, used by any kernel or app-level + * software that might want wifi things as it grows. + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcm_app_utils.c 667243 2016-10-26 11:37:48Z $ + */ + +#include + +#ifdef BCMDRIVER +#include +#define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base)) +#define tolower(c) (bcm_isupper((c)) ? ((c) + 'a' - 'A') : (c)) +#else /* BCMDRIVER */ +#include +#include +#include +#include +#ifndef ASSERT +#define ASSERT(exp) +#endif // endif +#endif /* BCMDRIVER */ +#include + +#if defined(WIN32) && (defined(BCMDLL) || defined(WLMDLL)) +#include /* For wl/exe/GNUmakefile.brcm_wlu and GNUmakefile.wlm_dll */ +#endif // endif + +#include +#include +#include + +#ifndef BCMDRIVER +/* Take an array of measurments representing a single channel over time and return + a summary. Currently implemented as a simple average but could easily evolve + into more cpomplex alogrithms. +*/ +cca_congest_channel_req_t * +cca_per_chan_summary(cca_congest_channel_req_t *input, cca_congest_channel_req_t *avg, bool percent) +{ + int sec; + cca_congest_t totals; + + totals.duration = 0; + totals.congest_ibss = 0; + totals.congest_obss = 0; + totals.interference = 0; + avg->num_secs = 0; + + for (sec = 0; sec < input->num_secs; sec++) { + if (input->secs[sec].duration) { + totals.duration += input->secs[sec].duration; + totals.congest_ibss += input->secs[sec].congest_ibss; + totals.congest_obss += input->secs[sec].congest_obss; + totals.interference += input->secs[sec].interference; + avg->num_secs++; + } + } + avg->chanspec = input->chanspec; + + if (!avg->num_secs || !totals.duration) + return (avg); + + if (percent) { + avg->secs[0].duration = totals.duration / avg->num_secs; + avg->secs[0].congest_ibss = totals.congest_ibss * 100/totals.duration; + avg->secs[0].congest_obss = totals.congest_obss * 100/totals.duration; + avg->secs[0].interference = totals.interference * 100/totals.duration; + } else { + avg->secs[0].duration = totals.duration / avg->num_secs; + avg->secs[0].congest_ibss = totals.congest_ibss / avg->num_secs; + avg->secs[0].congest_obss = totals.congest_obss / avg->num_secs; + avg->secs[0].interference = totals.interference / avg->num_secs; + } + + return (avg); +} + +static void +cca_info(uint8 *bitmap, int num_bits, int *left, int *bit_pos) +{ + int i; + for (*left = 0, i = 0; i < num_bits; i++) { + if (isset(bitmap, i)) { + (*left)++; + *bit_pos = i; + } + } +} + +static uint8 +spec_to_chan(chanspec_t chspec) +{ + uint8 center_ch, edge, primary, sb; + + center_ch = CHSPEC_CHANNEL(chspec); + + if (CHSPEC_BW_LE20(chspec)) { + return center_ch; + } else { + /* the lower edge of the wide channel is half the bw from + * the center channel. + */ + if (CHSPEC_IS40(chspec)) { + edge = center_ch - CH_20MHZ_APART; + } else { + /* must be 80MHz (until we support more) */ + ASSERT(CHSPEC_IS80(chspec)); + edge = center_ch - CH_40MHZ_APART; + } + + /* find the channel number of the lowest 20MHz primary channel */ + primary = edge + CH_10MHZ_APART; + + /* select the actual subband */ + sb = (chspec & WL_CHANSPEC_CTL_SB_MASK) >> WL_CHANSPEC_CTL_SB_SHIFT; + primary = primary + sb * CH_20MHZ_APART; + + return primary; + } +} + +/* + Take an array of measumrements representing summaries of different channels. + Return a recomended channel. + Interference is evil, get rid of that first. + Then hunt for lowest Other bss traffic. + Don't forget that channels with low duration times may not have accurate readings. + For the moment, do not overwrite input array. +*/ +int +cca_analyze(cca_congest_channel_req_t *input[], int num_chans, uint flags, chanspec_t *answer) +{ + uint8 *bitmap = NULL; /* 38 Max channels needs 5 bytes = 40 */ + int i, left, winner, ret_val = 0; + uint32 min_obss = 1 << 30; + uint bitmap_sz; + + bitmap_sz = CEIL(num_chans, NBBY); + bitmap = (uint8 *)malloc(bitmap_sz); + if (bitmap == NULL) { + printf("unable to allocate memory\n"); + return BCME_NOMEM; + } + + memset(bitmap, 0, bitmap_sz); + /* Initially, all channels are up for consideration */ + for (i = 0; i < num_chans; i++) { + if (input[i]->chanspec) + setbit(bitmap, i); + } + cca_info(bitmap, num_chans, &left, &i); + if (!left) { + ret_val = CCA_ERRNO_TOO_FEW; + goto f_exit; + } + + /* Filter for 2.4 GHz Band */ + if (flags & CCA_FLAG_2G_ONLY) { + for (i = 0; i < num_chans; i++) { + if (!CHSPEC_IS2G(input[i]->chanspec)) + clrbit(bitmap, i); + } + } + cca_info(bitmap, num_chans, &left, &i); + if (!left) { + ret_val = CCA_ERRNO_BAND; + goto f_exit; + } + + /* Filter for 5 GHz Band */ + if (flags & CCA_FLAG_5G_ONLY) { + for (i = 0; i < num_chans; i++) { + if (!CHSPEC_IS5G(input[i]->chanspec)) + clrbit(bitmap, i); + } + } + cca_info(bitmap, num_chans, &left, &i); + if (!left) { + ret_val = CCA_ERRNO_BAND; + goto f_exit; + } + + /* Filter for Duration */ + if (!(flags & CCA_FLAG_IGNORE_DURATION)) { + for (i = 0; i < num_chans; i++) { + if (input[i]->secs[0].duration < CCA_THRESH_MILLI) + clrbit(bitmap, i); + } + } + cca_info(bitmap, num_chans, &left, &i); + if (!left) { + ret_val = CCA_ERRNO_DURATION; + goto f_exit; + } + + /* Filter for 1 6 11 on 2.4 Band */ + if (flags & CCA_FLAGS_PREFER_1_6_11) { + int tmp_channel = spec_to_chan(input[i]->chanspec); + int is2g = CHSPEC_IS2G(input[i]->chanspec); + for (i = 0; i < num_chans; i++) { + if (is2g && tmp_channel != 1 && tmp_channel != 6 && tmp_channel != 11) + clrbit(bitmap, i); + } + } + cca_info(bitmap, num_chans, &left, &i); + if (!left) { + ret_val = CCA_ERRNO_PREF_CHAN; + goto f_exit; + } + + /* Toss high interference interference */ + if (!(flags & CCA_FLAG_IGNORE_INTERFER)) { + for (i = 0; i < num_chans; i++) { + if (input[i]->secs[0].interference > CCA_THRESH_INTERFERE) + clrbit(bitmap, i); + } + cca_info(bitmap, num_chans, &left, &i); + if (!left) { + ret_val = CCA_ERRNO_INTERFER; + goto f_exit; + } + } + + /* Now find lowest obss */ + winner = 0; + for (i = 0; i < num_chans; i++) { + if (isset(bitmap, i) && input[i]->secs[0].congest_obss < min_obss) { + winner = i; + min_obss = input[i]->secs[0].congest_obss; + } + } + *answer = input[winner]->chanspec; + f_exit: + free(bitmap); /* free the allocated memory for bitmap */ + return ret_val; +} +#endif /* !BCMDRIVER */ + +/* offset of cntmember by sizeof(uint32) from the first cnt variable, txframe. */ +#define IDX_IN_WL_CNT_VER_6_T(cntmember) \ + ((OFFSETOF(wl_cnt_ver_6_t, cntmember) - OFFSETOF(wl_cnt_ver_6_t, txframe)) / sizeof(uint32)) + +#define IDX_IN_WL_CNT_VER_11_T(cntmember) \ + ((OFFSETOF(wl_cnt_ver_11_t, cntmember) - OFFSETOF(wl_cnt_ver_11_t, txframe)) \ + / sizeof(uint32)) + +/* Exclude version and length fields */ +#define NUM_OF_CNT_IN_WL_CNT_VER_6_T \ + ((sizeof(wl_cnt_ver_6_t) - 2 * sizeof(uint16)) / sizeof(uint32)) +/* Exclude macstat cnt variables. wl_cnt_ver_6_t only has 62 macstat cnt variables. */ +#define NUM_OF_WLCCNT_IN_WL_CNT_VER_6_T \ + (NUM_OF_CNT_IN_WL_CNT_VER_6_T - (WL_CNT_MCST_VAR_NUM - 2)) + +/* Exclude version and length fields */ +#define NUM_OF_CNT_IN_WL_CNT_VER_11_T \ + ((sizeof(wl_cnt_ver_11_t) - 2 * sizeof(uint16)) / sizeof(uint32)) +/* Exclude 64 macstat cnt variables. */ +#define NUM_OF_WLCCNT_IN_WL_CNT_VER_11_T \ + ((sizeof(wl_cnt_wlc_t)) / sizeof(uint32)) + +/* Index conversion table from wl_cnt_ver_6_t to wl_cnt_wlc_t */ +static const uint8 wlcntver6t_to_wlcntwlct[NUM_OF_WLCCNT_IN_WL_CNT_VER_6_T] = { + IDX_IN_WL_CNT_VER_6_T(txframe), + IDX_IN_WL_CNT_VER_6_T(txbyte), + IDX_IN_WL_CNT_VER_6_T(txretrans), + IDX_IN_WL_CNT_VER_6_T(txerror), + IDX_IN_WL_CNT_VER_6_T(txctl), + IDX_IN_WL_CNT_VER_6_T(txprshort), + IDX_IN_WL_CNT_VER_6_T(txserr), + IDX_IN_WL_CNT_VER_6_T(txnobuf), + IDX_IN_WL_CNT_VER_6_T(txnoassoc), + IDX_IN_WL_CNT_VER_6_T(txrunt), + IDX_IN_WL_CNT_VER_6_T(txchit), + IDX_IN_WL_CNT_VER_6_T(txcmiss), + IDX_IN_WL_CNT_VER_6_T(txuflo), + IDX_IN_WL_CNT_VER_6_T(txphyerr), + IDX_IN_WL_CNT_VER_6_T(txphycrs), + IDX_IN_WL_CNT_VER_6_T(rxframe), + IDX_IN_WL_CNT_VER_6_T(rxbyte), + IDX_IN_WL_CNT_VER_6_T(rxerror), + IDX_IN_WL_CNT_VER_6_T(rxctl), + IDX_IN_WL_CNT_VER_6_T(rxnobuf), + IDX_IN_WL_CNT_VER_6_T(rxnondata), + IDX_IN_WL_CNT_VER_6_T(rxbadds), + IDX_IN_WL_CNT_VER_6_T(rxbadcm), + IDX_IN_WL_CNT_VER_6_T(rxfragerr), + IDX_IN_WL_CNT_VER_6_T(rxrunt), + IDX_IN_WL_CNT_VER_6_T(rxgiant), + IDX_IN_WL_CNT_VER_6_T(rxnoscb), + IDX_IN_WL_CNT_VER_6_T(rxbadproto), + IDX_IN_WL_CNT_VER_6_T(rxbadsrcmac), + IDX_IN_WL_CNT_VER_6_T(rxbadda), + IDX_IN_WL_CNT_VER_6_T(rxfilter), + IDX_IN_WL_CNT_VER_6_T(rxoflo), + IDX_IN_WL_CNT_VER_6_T(rxuflo), + IDX_IN_WL_CNT_VER_6_T(rxuflo) + 1, + IDX_IN_WL_CNT_VER_6_T(rxuflo) + 2, + IDX_IN_WL_CNT_VER_6_T(rxuflo) + 3, + IDX_IN_WL_CNT_VER_6_T(rxuflo) + 4, + IDX_IN_WL_CNT_VER_6_T(rxuflo) + 5, + IDX_IN_WL_CNT_VER_6_T(d11cnt_txrts_off), + IDX_IN_WL_CNT_VER_6_T(d11cnt_rxcrc_off), + IDX_IN_WL_CNT_VER_6_T(d11cnt_txnocts_off), + IDX_IN_WL_CNT_VER_6_T(dmade), + IDX_IN_WL_CNT_VER_6_T(dmada), + IDX_IN_WL_CNT_VER_6_T(dmape), + IDX_IN_WL_CNT_VER_6_T(reset), + IDX_IN_WL_CNT_VER_6_T(tbtt), + IDX_IN_WL_CNT_VER_6_T(txdmawar), + IDX_IN_WL_CNT_VER_6_T(pkt_callback_reg_fail), + IDX_IN_WL_CNT_VER_6_T(txfrag), + IDX_IN_WL_CNT_VER_6_T(txmulti), + IDX_IN_WL_CNT_VER_6_T(txfail), + IDX_IN_WL_CNT_VER_6_T(txretry), + IDX_IN_WL_CNT_VER_6_T(txretrie), + IDX_IN_WL_CNT_VER_6_T(rxdup), + IDX_IN_WL_CNT_VER_6_T(txrts), + IDX_IN_WL_CNT_VER_6_T(txnocts), + IDX_IN_WL_CNT_VER_6_T(txnoack), + IDX_IN_WL_CNT_VER_6_T(rxfrag), + IDX_IN_WL_CNT_VER_6_T(rxmulti), + IDX_IN_WL_CNT_VER_6_T(rxcrc), + IDX_IN_WL_CNT_VER_6_T(txfrmsnt), + IDX_IN_WL_CNT_VER_6_T(rxundec), + IDX_IN_WL_CNT_VER_6_T(tkipmicfaill), + IDX_IN_WL_CNT_VER_6_T(tkipcntrmsr), + IDX_IN_WL_CNT_VER_6_T(tkipreplay), + IDX_IN_WL_CNT_VER_6_T(ccmpfmterr), + IDX_IN_WL_CNT_VER_6_T(ccmpreplay), + IDX_IN_WL_CNT_VER_6_T(ccmpundec), + IDX_IN_WL_CNT_VER_6_T(fourwayfail), + IDX_IN_WL_CNT_VER_6_T(wepundec), + IDX_IN_WL_CNT_VER_6_T(wepicverr), + IDX_IN_WL_CNT_VER_6_T(decsuccess), + IDX_IN_WL_CNT_VER_6_T(tkipicverr), + IDX_IN_WL_CNT_VER_6_T(wepexcluded), + IDX_IN_WL_CNT_VER_6_T(txchanrej), + IDX_IN_WL_CNT_VER_6_T(psmwds), + IDX_IN_WL_CNT_VER_6_T(phywatchdog), + IDX_IN_WL_CNT_VER_6_T(prq_entries_handled), + IDX_IN_WL_CNT_VER_6_T(prq_undirected_entries), + IDX_IN_WL_CNT_VER_6_T(prq_bad_entries), + IDX_IN_WL_CNT_VER_6_T(atim_suppress_count), + IDX_IN_WL_CNT_VER_6_T(bcn_template_not_ready), + IDX_IN_WL_CNT_VER_6_T(bcn_template_not_ready_done), + IDX_IN_WL_CNT_VER_6_T(late_tbtt_dpc), + IDX_IN_WL_CNT_VER_6_T(rx1mbps), + IDX_IN_WL_CNT_VER_6_T(rx2mbps), + IDX_IN_WL_CNT_VER_6_T(rx5mbps5), + IDX_IN_WL_CNT_VER_6_T(rx6mbps), + IDX_IN_WL_CNT_VER_6_T(rx9mbps), + IDX_IN_WL_CNT_VER_6_T(rx11mbps), + IDX_IN_WL_CNT_VER_6_T(rx12mbps), + IDX_IN_WL_CNT_VER_6_T(rx18mbps), + IDX_IN_WL_CNT_VER_6_T(rx24mbps), + IDX_IN_WL_CNT_VER_6_T(rx36mbps), + IDX_IN_WL_CNT_VER_6_T(rx48mbps), + IDX_IN_WL_CNT_VER_6_T(rx54mbps), + IDX_IN_WL_CNT_VER_6_T(rx108mbps), + IDX_IN_WL_CNT_VER_6_T(rx162mbps), + IDX_IN_WL_CNT_VER_6_T(rx216mbps), + IDX_IN_WL_CNT_VER_6_T(rx270mbps), + IDX_IN_WL_CNT_VER_6_T(rx324mbps), + IDX_IN_WL_CNT_VER_6_T(rx378mbps), + IDX_IN_WL_CNT_VER_6_T(rx432mbps), + IDX_IN_WL_CNT_VER_6_T(rx486mbps), + IDX_IN_WL_CNT_VER_6_T(rx540mbps), + IDX_IN_WL_CNT_VER_6_T(rfdisable), + IDX_IN_WL_CNT_VER_6_T(txexptime), + IDX_IN_WL_CNT_VER_6_T(txmpdu_sgi), + IDX_IN_WL_CNT_VER_6_T(rxmpdu_sgi), + IDX_IN_WL_CNT_VER_6_T(txmpdu_stbc), + IDX_IN_WL_CNT_VER_6_T(rxmpdu_stbc), + IDX_IN_WL_CNT_VER_6_T(rxundec_mcst), + IDX_IN_WL_CNT_VER_6_T(tkipmicfaill_mcst), + IDX_IN_WL_CNT_VER_6_T(tkipcntrmsr_mcst), + IDX_IN_WL_CNT_VER_6_T(tkipreplay_mcst), + IDX_IN_WL_CNT_VER_6_T(ccmpfmterr_mcst), + IDX_IN_WL_CNT_VER_6_T(ccmpreplay_mcst), + IDX_IN_WL_CNT_VER_6_T(ccmpundec_mcst), + IDX_IN_WL_CNT_VER_6_T(fourwayfail_mcst), + IDX_IN_WL_CNT_VER_6_T(wepundec_mcst), + IDX_IN_WL_CNT_VER_6_T(wepicverr_mcst), + IDX_IN_WL_CNT_VER_6_T(decsuccess_mcst), + IDX_IN_WL_CNT_VER_6_T(tkipicverr_mcst), + IDX_IN_WL_CNT_VER_6_T(wepexcluded_mcst) +}; + +#define INVALID_IDX ((uint8)(-1)) + +/* Index conversion table from wl_cnt_ver_11_t to wl_cnt_wlc_t */ +static const uint8 wlcntver11t_to_wlcntwlct[NUM_OF_WLCCNT_IN_WL_CNT_VER_11_T] = { + IDX_IN_WL_CNT_VER_11_T(txframe), + IDX_IN_WL_CNT_VER_11_T(txbyte), + IDX_IN_WL_CNT_VER_11_T(txretrans), + IDX_IN_WL_CNT_VER_11_T(txerror), + IDX_IN_WL_CNT_VER_11_T(txctl), + IDX_IN_WL_CNT_VER_11_T(txprshort), + IDX_IN_WL_CNT_VER_11_T(txserr), + IDX_IN_WL_CNT_VER_11_T(txnobuf), + IDX_IN_WL_CNT_VER_11_T(txnoassoc), + IDX_IN_WL_CNT_VER_11_T(txrunt), + IDX_IN_WL_CNT_VER_11_T(txchit), + IDX_IN_WL_CNT_VER_11_T(txcmiss), + IDX_IN_WL_CNT_VER_11_T(txuflo), + IDX_IN_WL_CNT_VER_11_T(txphyerr), + IDX_IN_WL_CNT_VER_11_T(txphycrs), + IDX_IN_WL_CNT_VER_11_T(rxframe), + IDX_IN_WL_CNT_VER_11_T(rxbyte), + IDX_IN_WL_CNT_VER_11_T(rxerror), + IDX_IN_WL_CNT_VER_11_T(rxctl), + IDX_IN_WL_CNT_VER_11_T(rxnobuf), + IDX_IN_WL_CNT_VER_11_T(rxnondata), + IDX_IN_WL_CNT_VER_11_T(rxbadds), + IDX_IN_WL_CNT_VER_11_T(rxbadcm), + IDX_IN_WL_CNT_VER_11_T(rxfragerr), + IDX_IN_WL_CNT_VER_11_T(rxrunt), + IDX_IN_WL_CNT_VER_11_T(rxgiant), + IDX_IN_WL_CNT_VER_11_T(rxnoscb), + IDX_IN_WL_CNT_VER_11_T(rxbadproto), + IDX_IN_WL_CNT_VER_11_T(rxbadsrcmac), + IDX_IN_WL_CNT_VER_11_T(rxbadda), + IDX_IN_WL_CNT_VER_11_T(rxfilter), + IDX_IN_WL_CNT_VER_11_T(rxoflo), + IDX_IN_WL_CNT_VER_11_T(rxuflo), + IDX_IN_WL_CNT_VER_11_T(rxuflo) + 1, + IDX_IN_WL_CNT_VER_11_T(rxuflo) + 2, + IDX_IN_WL_CNT_VER_11_T(rxuflo) + 3, + IDX_IN_WL_CNT_VER_11_T(rxuflo) + 4, + IDX_IN_WL_CNT_VER_11_T(rxuflo) + 5, + IDX_IN_WL_CNT_VER_11_T(d11cnt_txrts_off), + IDX_IN_WL_CNT_VER_11_T(d11cnt_rxcrc_off), + IDX_IN_WL_CNT_VER_11_T(d11cnt_txnocts_off), + IDX_IN_WL_CNT_VER_11_T(dmade), + IDX_IN_WL_CNT_VER_11_T(dmada), + IDX_IN_WL_CNT_VER_11_T(dmape), + IDX_IN_WL_CNT_VER_11_T(reset), + IDX_IN_WL_CNT_VER_11_T(tbtt), + IDX_IN_WL_CNT_VER_11_T(txdmawar), + IDX_IN_WL_CNT_VER_11_T(pkt_callback_reg_fail), + IDX_IN_WL_CNT_VER_11_T(txfrag), + IDX_IN_WL_CNT_VER_11_T(txmulti), + IDX_IN_WL_CNT_VER_11_T(txfail), + IDX_IN_WL_CNT_VER_11_T(txretry), + IDX_IN_WL_CNT_VER_11_T(txretrie), + IDX_IN_WL_CNT_VER_11_T(rxdup), + IDX_IN_WL_CNT_VER_11_T(txrts), + IDX_IN_WL_CNT_VER_11_T(txnocts), + IDX_IN_WL_CNT_VER_11_T(txnoack), + IDX_IN_WL_CNT_VER_11_T(rxfrag), + IDX_IN_WL_CNT_VER_11_T(rxmulti), + IDX_IN_WL_CNT_VER_11_T(rxcrc), + IDX_IN_WL_CNT_VER_11_T(txfrmsnt), + IDX_IN_WL_CNT_VER_11_T(rxundec), + IDX_IN_WL_CNT_VER_11_T(tkipmicfaill), + IDX_IN_WL_CNT_VER_11_T(tkipcntrmsr), + IDX_IN_WL_CNT_VER_11_T(tkipreplay), + IDX_IN_WL_CNT_VER_11_T(ccmpfmterr), + IDX_IN_WL_CNT_VER_11_T(ccmpreplay), + IDX_IN_WL_CNT_VER_11_T(ccmpundec), + IDX_IN_WL_CNT_VER_11_T(fourwayfail), + IDX_IN_WL_CNT_VER_11_T(wepundec), + IDX_IN_WL_CNT_VER_11_T(wepicverr), + IDX_IN_WL_CNT_VER_11_T(decsuccess), + IDX_IN_WL_CNT_VER_11_T(tkipicverr), + IDX_IN_WL_CNT_VER_11_T(wepexcluded), + IDX_IN_WL_CNT_VER_11_T(txchanrej), + IDX_IN_WL_CNT_VER_11_T(psmwds), + IDX_IN_WL_CNT_VER_11_T(phywatchdog), + IDX_IN_WL_CNT_VER_11_T(prq_entries_handled), + IDX_IN_WL_CNT_VER_11_T(prq_undirected_entries), + IDX_IN_WL_CNT_VER_11_T(prq_bad_entries), + IDX_IN_WL_CNT_VER_11_T(atim_suppress_count), + IDX_IN_WL_CNT_VER_11_T(bcn_template_not_ready), + IDX_IN_WL_CNT_VER_11_T(bcn_template_not_ready_done), + IDX_IN_WL_CNT_VER_11_T(late_tbtt_dpc), + IDX_IN_WL_CNT_VER_11_T(rx1mbps), + IDX_IN_WL_CNT_VER_11_T(rx2mbps), + IDX_IN_WL_CNT_VER_11_T(rx5mbps5), + IDX_IN_WL_CNT_VER_11_T(rx6mbps), + IDX_IN_WL_CNT_VER_11_T(rx9mbps), + IDX_IN_WL_CNT_VER_11_T(rx11mbps), + IDX_IN_WL_CNT_VER_11_T(rx12mbps), + IDX_IN_WL_CNT_VER_11_T(rx18mbps), + IDX_IN_WL_CNT_VER_11_T(rx24mbps), + IDX_IN_WL_CNT_VER_11_T(rx36mbps), + IDX_IN_WL_CNT_VER_11_T(rx48mbps), + IDX_IN_WL_CNT_VER_11_T(rx54mbps), + IDX_IN_WL_CNT_VER_11_T(rx108mbps), + IDX_IN_WL_CNT_VER_11_T(rx162mbps), + IDX_IN_WL_CNT_VER_11_T(rx216mbps), + IDX_IN_WL_CNT_VER_11_T(rx270mbps), + IDX_IN_WL_CNT_VER_11_T(rx324mbps), + IDX_IN_WL_CNT_VER_11_T(rx378mbps), + IDX_IN_WL_CNT_VER_11_T(rx432mbps), + IDX_IN_WL_CNT_VER_11_T(rx486mbps), + IDX_IN_WL_CNT_VER_11_T(rx540mbps), + IDX_IN_WL_CNT_VER_11_T(rfdisable), + IDX_IN_WL_CNT_VER_11_T(txexptime), + IDX_IN_WL_CNT_VER_11_T(txmpdu_sgi), + IDX_IN_WL_CNT_VER_11_T(rxmpdu_sgi), + IDX_IN_WL_CNT_VER_11_T(txmpdu_stbc), + IDX_IN_WL_CNT_VER_11_T(rxmpdu_stbc), + IDX_IN_WL_CNT_VER_11_T(rxundec_mcst), + IDX_IN_WL_CNT_VER_11_T(tkipmicfaill_mcst), + IDX_IN_WL_CNT_VER_11_T(tkipcntrmsr_mcst), + IDX_IN_WL_CNT_VER_11_T(tkipreplay_mcst), + IDX_IN_WL_CNT_VER_11_T(ccmpfmterr_mcst), + IDX_IN_WL_CNT_VER_11_T(ccmpreplay_mcst), + IDX_IN_WL_CNT_VER_11_T(ccmpundec_mcst), + IDX_IN_WL_CNT_VER_11_T(fourwayfail_mcst), + IDX_IN_WL_CNT_VER_11_T(wepundec_mcst), + IDX_IN_WL_CNT_VER_11_T(wepicverr_mcst), + IDX_IN_WL_CNT_VER_11_T(decsuccess_mcst), + IDX_IN_WL_CNT_VER_11_T(tkipicverr_mcst), + IDX_IN_WL_CNT_VER_11_T(wepexcluded_mcst), + IDX_IN_WL_CNT_VER_11_T(dma_hang), + IDX_IN_WL_CNT_VER_11_T(reinit), + IDX_IN_WL_CNT_VER_11_T(pstatxucast), + IDX_IN_WL_CNT_VER_11_T(pstatxnoassoc), + IDX_IN_WL_CNT_VER_11_T(pstarxucast), + IDX_IN_WL_CNT_VER_11_T(pstarxbcmc), + IDX_IN_WL_CNT_VER_11_T(pstatxbcmc), + IDX_IN_WL_CNT_VER_11_T(cso_passthrough), + IDX_IN_WL_CNT_VER_11_T(cso_normal), + IDX_IN_WL_CNT_VER_11_T(chained), + IDX_IN_WL_CNT_VER_11_T(chainedsz1), + IDX_IN_WL_CNT_VER_11_T(unchained), + IDX_IN_WL_CNT_VER_11_T(maxchainsz), + IDX_IN_WL_CNT_VER_11_T(currchainsz), + IDX_IN_WL_CNT_VER_11_T(pciereset), + IDX_IN_WL_CNT_VER_11_T(cfgrestore), + IDX_IN_WL_CNT_VER_11_T(reinitreason), + IDX_IN_WL_CNT_VER_11_T(reinitreason) + 1, + IDX_IN_WL_CNT_VER_11_T(reinitreason) + 2, + IDX_IN_WL_CNT_VER_11_T(reinitreason) + 3, + IDX_IN_WL_CNT_VER_11_T(reinitreason) + 4, + IDX_IN_WL_CNT_VER_11_T(reinitreason) + 5, + IDX_IN_WL_CNT_VER_11_T(reinitreason) + 6, + IDX_IN_WL_CNT_VER_11_T(reinitreason) + 7, + IDX_IN_WL_CNT_VER_11_T(rxrtry), + IDX_IN_WL_CNT_VER_11_T(rxmpdu_mu), + IDX_IN_WL_CNT_VER_11_T(txbar), + IDX_IN_WL_CNT_VER_11_T(rxbar), + IDX_IN_WL_CNT_VER_11_T(txpspoll), + IDX_IN_WL_CNT_VER_11_T(rxpspoll), + IDX_IN_WL_CNT_VER_11_T(txnull), + IDX_IN_WL_CNT_VER_11_T(rxnull), + IDX_IN_WL_CNT_VER_11_T(txqosnull), + IDX_IN_WL_CNT_VER_11_T(rxqosnull), + IDX_IN_WL_CNT_VER_11_T(txassocreq), + IDX_IN_WL_CNT_VER_11_T(rxassocreq), + IDX_IN_WL_CNT_VER_11_T(txreassocreq), + IDX_IN_WL_CNT_VER_11_T(rxreassocreq), + IDX_IN_WL_CNT_VER_11_T(txdisassoc), + IDX_IN_WL_CNT_VER_11_T(rxdisassoc), + IDX_IN_WL_CNT_VER_11_T(txassocrsp), + IDX_IN_WL_CNT_VER_11_T(rxassocrsp), + IDX_IN_WL_CNT_VER_11_T(txreassocrsp), + IDX_IN_WL_CNT_VER_11_T(rxreassocrsp), + IDX_IN_WL_CNT_VER_11_T(txauth), + IDX_IN_WL_CNT_VER_11_T(rxauth), + IDX_IN_WL_CNT_VER_11_T(txdeauth), + IDX_IN_WL_CNT_VER_11_T(rxdeauth), + IDX_IN_WL_CNT_VER_11_T(txprobereq), + IDX_IN_WL_CNT_VER_11_T(rxprobereq), + IDX_IN_WL_CNT_VER_11_T(txprobersp), + IDX_IN_WL_CNT_VER_11_T(rxprobersp), + IDX_IN_WL_CNT_VER_11_T(txaction), + IDX_IN_WL_CNT_VER_11_T(rxaction), + IDX_IN_WL_CNT_VER_11_T(ampdu_wds), + IDX_IN_WL_CNT_VER_11_T(txlost), + IDX_IN_WL_CNT_VER_11_T(txdatamcast), + IDX_IN_WL_CNT_VER_11_T(txdatabcast), + INVALID_IDX, + IDX_IN_WL_CNT_VER_11_T(rxback), + IDX_IN_WL_CNT_VER_11_T(txback), + INVALID_IDX, + INVALID_IDX, + INVALID_IDX, + INVALID_IDX, + IDX_IN_WL_CNT_VER_11_T(txbcast), + IDX_IN_WL_CNT_VER_11_T(txdropped), + IDX_IN_WL_CNT_VER_11_T(rxbcast), + IDX_IN_WL_CNT_VER_11_T(rxdropped) +}; + +/* Index conversion table from wl_cnt_ver_11_t to + * either wl_cnt_ge40mcst_v1_t or wl_cnt_lt40mcst_v1_t + */ +static const uint8 wlcntver11t_to_wlcntXX40mcstv1t[WL_CNT_MCST_VAR_NUM] = { + IDX_IN_WL_CNT_VER_11_T(txallfrm), + IDX_IN_WL_CNT_VER_11_T(txrtsfrm), + IDX_IN_WL_CNT_VER_11_T(txctsfrm), + IDX_IN_WL_CNT_VER_11_T(txackfrm), + IDX_IN_WL_CNT_VER_11_T(txdnlfrm), + IDX_IN_WL_CNT_VER_11_T(txbcnfrm), + IDX_IN_WL_CNT_VER_11_T(txfunfl), + IDX_IN_WL_CNT_VER_11_T(txfunfl) + 1, + IDX_IN_WL_CNT_VER_11_T(txfunfl) + 2, + IDX_IN_WL_CNT_VER_11_T(txfunfl) + 3, + IDX_IN_WL_CNT_VER_11_T(txfunfl) + 4, + IDX_IN_WL_CNT_VER_11_T(txfunfl) + 5, + IDX_IN_WL_CNT_VER_11_T(txfbw), + IDX_IN_WL_CNT_VER_11_T(txmpdu), + IDX_IN_WL_CNT_VER_11_T(txtplunfl), + IDX_IN_WL_CNT_VER_11_T(txphyerror), + IDX_IN_WL_CNT_VER_11_T(pktengrxducast), + IDX_IN_WL_CNT_VER_11_T(pktengrxdmcast), + IDX_IN_WL_CNT_VER_11_T(rxfrmtoolong), + IDX_IN_WL_CNT_VER_11_T(rxfrmtooshrt), + IDX_IN_WL_CNT_VER_11_T(rxinvmachdr), + IDX_IN_WL_CNT_VER_11_T(rxbadfcs), + IDX_IN_WL_CNT_VER_11_T(rxbadplcp), + IDX_IN_WL_CNT_VER_11_T(rxcrsglitch), + IDX_IN_WL_CNT_VER_11_T(rxstrt), + IDX_IN_WL_CNT_VER_11_T(rxdfrmucastmbss), + IDX_IN_WL_CNT_VER_11_T(rxmfrmucastmbss), + IDX_IN_WL_CNT_VER_11_T(rxcfrmucast), + IDX_IN_WL_CNT_VER_11_T(rxrtsucast), + IDX_IN_WL_CNT_VER_11_T(rxctsucast), + IDX_IN_WL_CNT_VER_11_T(rxackucast), + IDX_IN_WL_CNT_VER_11_T(rxdfrmocast), + IDX_IN_WL_CNT_VER_11_T(rxmfrmocast), + IDX_IN_WL_CNT_VER_11_T(rxcfrmocast), + IDX_IN_WL_CNT_VER_11_T(rxrtsocast), + IDX_IN_WL_CNT_VER_11_T(rxctsocast), + IDX_IN_WL_CNT_VER_11_T(rxdfrmmcast), + IDX_IN_WL_CNT_VER_11_T(rxmfrmmcast), + IDX_IN_WL_CNT_VER_11_T(rxcfrmmcast), + IDX_IN_WL_CNT_VER_11_T(rxbeaconmbss), + IDX_IN_WL_CNT_VER_11_T(rxdfrmucastobss), + IDX_IN_WL_CNT_VER_11_T(rxbeaconobss), + IDX_IN_WL_CNT_VER_11_T(rxrsptmout), + IDX_IN_WL_CNT_VER_11_T(bcntxcancl), + IDX_IN_WL_CNT_VER_11_T(rxnodelim), + IDX_IN_WL_CNT_VER_11_T(rxf0ovfl), + IDX_IN_WL_CNT_VER_11_T(rxf1ovfl), + IDX_IN_WL_CNT_VER_11_T(rxf2ovfl), + IDX_IN_WL_CNT_VER_11_T(txsfovfl), + IDX_IN_WL_CNT_VER_11_T(pmqovfl), + IDX_IN_WL_CNT_VER_11_T(rxcgprqfrm), + IDX_IN_WL_CNT_VER_11_T(rxcgprsqovfl), + IDX_IN_WL_CNT_VER_11_T(txcgprsfail), + IDX_IN_WL_CNT_VER_11_T(txcgprssuc), + IDX_IN_WL_CNT_VER_11_T(prs_timeout), + IDX_IN_WL_CNT_VER_11_T(rxnack), + IDX_IN_WL_CNT_VER_11_T(frmscons), + IDX_IN_WL_CNT_VER_11_T(txnack), + IDX_IN_WL_CNT_VER_11_T(rxback), + IDX_IN_WL_CNT_VER_11_T(txback), + IDX_IN_WL_CNT_VER_11_T(bphy_rxcrsglitch), + IDX_IN_WL_CNT_VER_11_T(rxdrop20s), + IDX_IN_WL_CNT_VER_11_T(rxtoolate), + IDX_IN_WL_CNT_VER_11_T(bphy_badplcp) +}; + +/* For mcst offsets that were not used. (2 Pads) */ +#define INVALID_MCST_IDX ((uint8)(-1)) +/* Index conversion table from wl_cnt_ver_11_t to wl_cnt_v_le10_mcst_t */ +static const uint8 wlcntver11t_to_wlcntvle10mcstt[WL_CNT_MCST_VAR_NUM] = { + IDX_IN_WL_CNT_VER_11_T(txallfrm), + IDX_IN_WL_CNT_VER_11_T(txrtsfrm), + IDX_IN_WL_CNT_VER_11_T(txctsfrm), + IDX_IN_WL_CNT_VER_11_T(txackfrm), + IDX_IN_WL_CNT_VER_11_T(txdnlfrm), + IDX_IN_WL_CNT_VER_11_T(txbcnfrm), + IDX_IN_WL_CNT_VER_11_T(txfunfl), + IDX_IN_WL_CNT_VER_11_T(txfunfl) + 1, + IDX_IN_WL_CNT_VER_11_T(txfunfl) + 2, + IDX_IN_WL_CNT_VER_11_T(txfunfl) + 3, + IDX_IN_WL_CNT_VER_11_T(txfunfl) + 4, + IDX_IN_WL_CNT_VER_11_T(txfunfl) + 5, + IDX_IN_WL_CNT_VER_11_T(txfbw), + INVALID_MCST_IDX, + IDX_IN_WL_CNT_VER_11_T(txtplunfl), + IDX_IN_WL_CNT_VER_11_T(txphyerror), + IDX_IN_WL_CNT_VER_11_T(pktengrxducast), + IDX_IN_WL_CNT_VER_11_T(pktengrxdmcast), + IDX_IN_WL_CNT_VER_11_T(rxfrmtoolong), + IDX_IN_WL_CNT_VER_11_T(rxfrmtooshrt), + IDX_IN_WL_CNT_VER_11_T(rxinvmachdr), + IDX_IN_WL_CNT_VER_11_T(rxbadfcs), + IDX_IN_WL_CNT_VER_11_T(rxbadplcp), + IDX_IN_WL_CNT_VER_11_T(rxcrsglitch), + IDX_IN_WL_CNT_VER_11_T(rxstrt), + IDX_IN_WL_CNT_VER_11_T(rxdfrmucastmbss), + IDX_IN_WL_CNT_VER_11_T(rxmfrmucastmbss), + IDX_IN_WL_CNT_VER_11_T(rxcfrmucast), + IDX_IN_WL_CNT_VER_11_T(rxrtsucast), + IDX_IN_WL_CNT_VER_11_T(rxctsucast), + IDX_IN_WL_CNT_VER_11_T(rxackucast), + IDX_IN_WL_CNT_VER_11_T(rxdfrmocast), + IDX_IN_WL_CNT_VER_11_T(rxmfrmocast), + IDX_IN_WL_CNT_VER_11_T(rxcfrmocast), + IDX_IN_WL_CNT_VER_11_T(rxrtsocast), + IDX_IN_WL_CNT_VER_11_T(rxctsocast), + IDX_IN_WL_CNT_VER_11_T(rxdfrmmcast), + IDX_IN_WL_CNT_VER_11_T(rxmfrmmcast), + IDX_IN_WL_CNT_VER_11_T(rxcfrmmcast), + IDX_IN_WL_CNT_VER_11_T(rxbeaconmbss), + IDX_IN_WL_CNT_VER_11_T(rxdfrmucastobss), + IDX_IN_WL_CNT_VER_11_T(rxbeaconobss), + IDX_IN_WL_CNT_VER_11_T(rxrsptmout), + IDX_IN_WL_CNT_VER_11_T(bcntxcancl), + INVALID_MCST_IDX, + IDX_IN_WL_CNT_VER_11_T(rxf0ovfl), + IDX_IN_WL_CNT_VER_11_T(rxf1ovfl), + IDX_IN_WL_CNT_VER_11_T(rxf2ovfl), + IDX_IN_WL_CNT_VER_11_T(txsfovfl), + IDX_IN_WL_CNT_VER_11_T(pmqovfl), + IDX_IN_WL_CNT_VER_11_T(rxcgprqfrm), + IDX_IN_WL_CNT_VER_11_T(rxcgprsqovfl), + IDX_IN_WL_CNT_VER_11_T(txcgprsfail), + IDX_IN_WL_CNT_VER_11_T(txcgprssuc), + IDX_IN_WL_CNT_VER_11_T(prs_timeout), + IDX_IN_WL_CNT_VER_11_T(rxnack), + IDX_IN_WL_CNT_VER_11_T(frmscons), + IDX_IN_WL_CNT_VER_11_T(txnack), + IDX_IN_WL_CNT_VER_11_T(rxback), + IDX_IN_WL_CNT_VER_11_T(txback), + IDX_IN_WL_CNT_VER_11_T(bphy_rxcrsglitch), + IDX_IN_WL_CNT_VER_11_T(rxdrop20s), + IDX_IN_WL_CNT_VER_11_T(rxtoolate), + IDX_IN_WL_CNT_VER_11_T(bphy_badplcp) +}; + +/* Index conversion table from wl_cnt_ver_6_t to wl_cnt_v_le10_mcst_t */ +static const uint8 wlcntver6t_to_wlcntvle10mcstt[WL_CNT_MCST_VAR_NUM] = { + IDX_IN_WL_CNT_VER_6_T(txallfrm), + IDX_IN_WL_CNT_VER_6_T(txrtsfrm), + IDX_IN_WL_CNT_VER_6_T(txctsfrm), + IDX_IN_WL_CNT_VER_6_T(txackfrm), + IDX_IN_WL_CNT_VER_6_T(txdnlfrm), + IDX_IN_WL_CNT_VER_6_T(txbcnfrm), + IDX_IN_WL_CNT_VER_6_T(txfunfl), + IDX_IN_WL_CNT_VER_6_T(txfunfl) + 1, + IDX_IN_WL_CNT_VER_6_T(txfunfl) + 2, + IDX_IN_WL_CNT_VER_6_T(txfunfl) + 3, + IDX_IN_WL_CNT_VER_6_T(txfunfl) + 4, + IDX_IN_WL_CNT_VER_6_T(txfunfl) + 5, + IDX_IN_WL_CNT_VER_6_T(txfbw), + INVALID_MCST_IDX, + IDX_IN_WL_CNT_VER_6_T(txtplunfl), + IDX_IN_WL_CNT_VER_6_T(txphyerror), + IDX_IN_WL_CNT_VER_6_T(pktengrxducast), + IDX_IN_WL_CNT_VER_6_T(pktengrxdmcast), + IDX_IN_WL_CNT_VER_6_T(rxfrmtoolong), + IDX_IN_WL_CNT_VER_6_T(rxfrmtooshrt), + IDX_IN_WL_CNT_VER_6_T(rxinvmachdr), + IDX_IN_WL_CNT_VER_6_T(rxbadfcs), + IDX_IN_WL_CNT_VER_6_T(rxbadplcp), + IDX_IN_WL_CNT_VER_6_T(rxcrsglitch), + IDX_IN_WL_CNT_VER_6_T(rxstrt), + IDX_IN_WL_CNT_VER_6_T(rxdfrmucastmbss), + IDX_IN_WL_CNT_VER_6_T(rxmfrmucastmbss), + IDX_IN_WL_CNT_VER_6_T(rxcfrmucast), + IDX_IN_WL_CNT_VER_6_T(rxrtsucast), + IDX_IN_WL_CNT_VER_6_T(rxctsucast), + IDX_IN_WL_CNT_VER_6_T(rxackucast), + IDX_IN_WL_CNT_VER_6_T(rxdfrmocast), + IDX_IN_WL_CNT_VER_6_T(rxmfrmocast), + IDX_IN_WL_CNT_VER_6_T(rxcfrmocast), + IDX_IN_WL_CNT_VER_6_T(rxrtsocast), + IDX_IN_WL_CNT_VER_6_T(rxctsocast), + IDX_IN_WL_CNT_VER_6_T(rxdfrmmcast), + IDX_IN_WL_CNT_VER_6_T(rxmfrmmcast), + IDX_IN_WL_CNT_VER_6_T(rxcfrmmcast), + IDX_IN_WL_CNT_VER_6_T(rxbeaconmbss), + IDX_IN_WL_CNT_VER_6_T(rxdfrmucastobss), + IDX_IN_WL_CNT_VER_6_T(rxbeaconobss), + IDX_IN_WL_CNT_VER_6_T(rxrsptmout), + IDX_IN_WL_CNT_VER_6_T(bcntxcancl), + INVALID_MCST_IDX, + IDX_IN_WL_CNT_VER_6_T(rxf0ovfl), + IDX_IN_WL_CNT_VER_6_T(rxf1ovfl), + IDX_IN_WL_CNT_VER_6_T(rxf2ovfl), + IDX_IN_WL_CNT_VER_6_T(txsfovfl), + IDX_IN_WL_CNT_VER_6_T(pmqovfl), + IDX_IN_WL_CNT_VER_6_T(rxcgprqfrm), + IDX_IN_WL_CNT_VER_6_T(rxcgprsqovfl), + IDX_IN_WL_CNT_VER_6_T(txcgprsfail), + IDX_IN_WL_CNT_VER_6_T(txcgprssuc), + IDX_IN_WL_CNT_VER_6_T(prs_timeout), + IDX_IN_WL_CNT_VER_6_T(rxnack), + IDX_IN_WL_CNT_VER_6_T(frmscons), + IDX_IN_WL_CNT_VER_6_T(txnack), + IDX_IN_WL_CNT_VER_6_T(rxback), + IDX_IN_WL_CNT_VER_6_T(txback), + IDX_IN_WL_CNT_VER_6_T(bphy_rxcrsglitch), + IDX_IN_WL_CNT_VER_6_T(rxdrop20s), + IDX_IN_WL_CNT_VER_6_T(rxtoolate), + IDX_IN_WL_CNT_VER_6_T(bphy_badplcp) +}; + +/* copy wlc layer counters from old type cntbuf to wl_cnt_wlc_t type. */ +static int +wl_copy_wlccnt(uint16 cntver, uint32 *dst, uint32 *src, uint8 src_max_idx) +{ + uint i; + if (dst == NULL || src == NULL) { + return BCME_ERROR; + } + + /* Init wlccnt with invalid value. Unchanged value will not be printed out */ + for (i = 0; i < (sizeof(wl_cnt_wlc_t) / sizeof(uint32)); i++) { + dst[i] = INVALID_CNT_VAL; + } + + if (cntver == WL_CNT_VERSION_6) { + for (i = 0; i < NUM_OF_WLCCNT_IN_WL_CNT_VER_6_T; i++) { + if (wlcntver6t_to_wlcntwlct[i] >= src_max_idx) { + /* src buffer does not have counters from here */ + break; + } + dst[i] = src[wlcntver6t_to_wlcntwlct[i]]; + } + } else { + for (i = 0; i < NUM_OF_WLCCNT_IN_WL_CNT_VER_11_T; i++) { + if (wlcntver11t_to_wlcntwlct[i] >= src_max_idx) { + if (wlcntver11t_to_wlcntwlct[i] == INVALID_IDX) { + continue; + } + else { + /* src buffer does not have counters from here */ + break; + } + } + dst[i] = src[wlcntver11t_to_wlcntwlct[i]]; + } + } + return BCME_OK; +} + +/* copy macstat counters from old type cntbuf to wl_cnt_v_le10_mcst_t type. */ +static int +wl_copy_macstat_upto_ver10(uint16 cntver, uint32 *dst, uint32 *src) +{ + uint i; + + if (dst == NULL || src == NULL) { + return BCME_ERROR; + } + + if (cntver == WL_CNT_VERSION_6) { + for (i = 0; i < WL_CNT_MCST_VAR_NUM; i++) { + if (wlcntver6t_to_wlcntvle10mcstt[i] == INVALID_MCST_IDX) { + /* This mcst counter does not exist in wl_cnt_ver_6_t */ + dst[i] = INVALID_CNT_VAL; + } else { + dst[i] = src[wlcntver6t_to_wlcntvle10mcstt[i]]; + } + } + } else { + for (i = 0; i < WL_CNT_MCST_VAR_NUM; i++) { + if (wlcntver11t_to_wlcntvle10mcstt[i] == INVALID_MCST_IDX) { + /* This mcst counter does not exist in wl_cnt_ver_11_t */ + dst[i] = INVALID_CNT_VAL; + } else { + dst[i] = src[wlcntver11t_to_wlcntvle10mcstt[i]]; + } + } + } + return BCME_OK; +} + +static int +wl_copy_macstat_ver11(uint32 *dst, uint32 *src) +{ + uint i; + + if (dst == NULL || src == NULL) { + return BCME_ERROR; + } + + for (i = 0; i < WL_CNT_MCST_VAR_NUM; i++) { + dst[i] = src[wlcntver11t_to_wlcntXX40mcstv1t[i]]; + } + return BCME_OK; +} + +/** + * Translate non-xtlv 'wl counters' IOVar buffer received by old driver/FW to xtlv format. + * Parameters: + * cntbuf: pointer to non-xtlv 'wl counters' IOVar buffer received by old driver/FW. + * Newly translated xtlv format is written to this pointer. + * buflen: length of the "cntbuf" without any padding. + * corerev: chip core revision of the driver/FW. + */ +int +wl_cntbuf_to_xtlv_format(void *ctx, void *cntbuf, int buflen, uint32 corerev) +{ + wl_cnt_wlc_t *wlccnt = NULL; + uint32 *macstat = NULL; + xtlv_desc_t xtlv_desc[3]; + uint16 mcst_xtlv_id; + int res = BCME_OK; + wl_cnt_info_t *cntinfo = cntbuf; + uint8 *xtlvbuf_p = cntinfo->data; + uint16 ver = cntinfo->version; + uint16 xtlvbuflen = (uint16)buflen; + uint16 src_max_idx; +#ifdef BCMDRIVER + osl_t *osh = ctx; +#else + BCM_REFERENCE(ctx); +#endif // endif + + if (ver >= WL_CNT_VERSION_XTLV) { + /* Already in xtlv format. */ + goto exit; + } + +#ifdef BCMDRIVER + wlccnt = MALLOC(osh, sizeof(*wlccnt)); + macstat = MALLOC(osh, WL_CNT_MCST_STRUCT_SZ); +#else + wlccnt = (wl_cnt_wlc_t *)malloc(sizeof(*wlccnt)); + macstat = (uint32 *)malloc(WL_CNT_MCST_STRUCT_SZ); +#endif // endif + if (!wlccnt || !macstat) { + printf("%s: malloc fail!\n", __FUNCTION__); + res = BCME_NOMEM; + goto exit; + } + + /* Check if the max idx in the struct exceeds the boundary of uint8 */ + if (NUM_OF_CNT_IN_WL_CNT_VER_6_T > ((uint8)(-1) + 1) || + NUM_OF_CNT_IN_WL_CNT_VER_11_T > ((uint8)(-1) + 1)) { + printf("wlcntverXXt_to_wlcntwlct and src_max_idx need" + " to be of uint16 instead of uint8\n"); + res = BCME_ERROR; + goto exit; + } + + /* Exclude version and length fields in either wlc_cnt_ver_6_t or wlc_cnt_ver_11_t */ + src_max_idx = (cntinfo->datalen - OFFSETOF(wl_cnt_info_t, data)) / sizeof(uint32); + if (src_max_idx > (uint8)(-1)) { + printf("wlcntverXXt_to_wlcntwlct and src_max_idx need" + " to be of uint16 instead of uint8\n" + "Try updating wl utility to the latest.\n"); + src_max_idx = (uint8)(-1); + } + + /* Copy wlc layer counters to wl_cnt_wlc_t */ + res = wl_copy_wlccnt(ver, (uint32 *)wlccnt, (uint32 *)cntinfo->data, (uint8)src_max_idx); + if (res != BCME_OK) { + printf("wl_copy_wlccnt fail!\n"); + goto exit; + } + + /* Copy macstat counters to wl_cnt_wlc_t */ + if (ver == WL_CNT_VERSION_11) { + res = wl_copy_macstat_ver11(macstat, (uint32 *)cntinfo->data); + if (res != BCME_OK) { + printf("wl_copy_macstat_ver11 fail!\n"); + goto exit; + } + if (corerev >= 40) { + mcst_xtlv_id = WL_CNT_XTLV_GE40_UCODE_V1; + } else { + mcst_xtlv_id = WL_CNT_XTLV_LT40_UCODE_V1; + } + } else { + res = wl_copy_macstat_upto_ver10(ver, macstat, (uint32 *)cntinfo->data); + if (res != BCME_OK) { + printf("wl_copy_macstat_upto_ver10 fail!\n"); + goto exit; + } + mcst_xtlv_id = WL_CNT_XTLV_CNTV_LE10_UCODE; + } + + xtlv_desc[0].type = WL_CNT_XTLV_WLC; + xtlv_desc[0].len = sizeof(*wlccnt); + xtlv_desc[0].ptr = wlccnt; + + xtlv_desc[1].type = mcst_xtlv_id; + xtlv_desc[1].len = WL_CNT_MCST_STRUCT_SZ; + xtlv_desc[1].ptr = macstat; + + xtlv_desc[2].type = 0; + xtlv_desc[2].len = 0; + xtlv_desc[2].ptr = NULL; + + memset(cntbuf, 0, buflen); + + res = bcm_pack_xtlv_buf_from_mem(&xtlvbuf_p, &xtlvbuflen, + xtlv_desc, BCM_XTLV_OPTION_ALIGN32); + cntinfo->datalen = (buflen - xtlvbuflen); +exit: +#ifdef BCMDRIVER + if (wlccnt) { + MFREE(osh, wlccnt, sizeof(*wlccnt)); + } + if (macstat) { + MFREE(osh, macstat, WL_CNT_MCST_STRUCT_SZ); + } +#else + if (wlccnt) { + free(wlccnt); + } + if (macstat) { + free(macstat); + } +#endif // endif + return res; +} diff --git a/bcmdhd.100.10.315.x/bcmbloom.c b/bcmdhd.100.10.315.x/bcmbloom.c new file mode 100644 index 0000000..420a09b --- /dev/null +++ b/bcmdhd.100.10.315.x/bcmbloom.c @@ -0,0 +1,240 @@ +/* + * Bloom filter support + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmbloom.c 714397 2017-08-04 08:24:38Z $ + */ + +#include +#include + +#include + +#ifdef BCMDRIVER +#include +#include +#else /* !BCMDRIVER */ +#include +#include +#ifndef ASSERT +#define ASSERT(exp) +#endif // endif +#endif /* !BCMDRIVER */ +#include + +#include + +#define BLOOM_BIT_LEN(_x) ((_x) << 3) + +struct bcm_bloom_filter { + void *cb_ctx; + uint max_hash; + bcm_bloom_hash_t *hash; /* array of hash functions */ + uint filter_size; /* in bytes */ + uint8 *filter; /* can be NULL for validate only */ +}; + +/* public interface */ +int +bcm_bloom_create(bcm_bloom_alloc_t alloc_cb, + bcm_bloom_free_t free_cb, void *cb_ctx, uint max_hash, + uint filter_size, bcm_bloom_filter_t **bloom) +{ + int err = BCME_OK; + bcm_bloom_filter_t *bp = NULL; + + if (!bloom || !alloc_cb || (max_hash == 0)) { + err = BCME_BADARG; + goto done; + } + + bp = (*alloc_cb)(cb_ctx, sizeof(*bp)); + if (!bp) { + err = BCME_NOMEM; + goto done; + } + + memset(bp, 0, sizeof(*bp)); + bp->cb_ctx = cb_ctx; + bp->max_hash = max_hash; + bp->hash = (*alloc_cb)(cb_ctx, sizeof(*bp->hash) * max_hash); + memset(bp->hash, 0, sizeof(*bp->hash) * max_hash); + + if (!bp->hash) { + err = BCME_NOMEM; + goto done; + } + + if (filter_size > 0) { + bp->filter = (*alloc_cb)(cb_ctx, filter_size); + if (!bp->filter) { + err = BCME_NOMEM; + goto done; + } + bp->filter_size = filter_size; + memset(bp->filter, 0, filter_size); + } + + *bloom = bp; + +done: + if (err != BCME_OK) + bcm_bloom_destroy(&bp, free_cb); + + return err; +} + +int +bcm_bloom_destroy(bcm_bloom_filter_t **bloom, bcm_bloom_free_t free_cb) +{ + int err = BCME_OK; + bcm_bloom_filter_t *bp; + + if (!bloom || !*bloom || !free_cb) + goto done; + + bp = *bloom; + *bloom = NULL; + + if (bp->filter) + (*free_cb)(bp->cb_ctx, bp->filter, bp->filter_size); + if (bp->hash) + (*free_cb)(bp->cb_ctx, bp->hash, + sizeof(*bp->hash) * bp->max_hash); + (*free_cb)(bp->cb_ctx, bp, sizeof(*bp)); + +done: + return err; +} + +int +bcm_bloom_add_hash(bcm_bloom_filter_t *bp, bcm_bloom_hash_t hash, uint *idx) +{ + uint i; + + if (!bp || !hash || !idx) + return BCME_BADARG; + + for (i = 0; i < bp->max_hash; ++i) { + if (bp->hash[i] == NULL) + break; + } + + if (i >= bp->max_hash) + return BCME_NORESOURCE; + + bp->hash[i] = hash; + *idx = i; + return BCME_OK; +} + +int +bcm_bloom_remove_hash(bcm_bloom_filter_t *bp, uint idx) +{ + if (!bp) + return BCME_BADARG; + + if (idx >= bp->max_hash) + return BCME_NOTFOUND; + + bp->hash[idx] = NULL; + return BCME_OK; +} + +bool +bcm_bloom_is_member(bcm_bloom_filter_t *bp, + const uint8 *tag, uint tag_len, const uint8 *buf, uint buf_len) +{ + uint i; + int err = BCME_OK; + + if (!tag || (tag_len == 0)) /* empty tag is always a member */ + goto done; + + /* use internal buffer if none was specified */ + if (!buf || (buf_len == 0)) { + if (!bp->filter) /* every one is a member of empty filter */ + goto done; + + buf = bp->filter; + buf_len = bp->filter_size; + } + + for (i = 0; i < bp->max_hash; ++i) { + uint pos; + if (!bp->hash[i]) + continue; + pos = (*bp->hash[i])(bp->cb_ctx, i, tag, tag_len); + + /* all bits must be set for a match */ + if (isclr(buf, pos % BLOOM_BIT_LEN(buf_len))) { + err = BCME_NOTFOUND; + break; + } + } + +done: + return err; +} + +int +bcm_bloom_add_member(bcm_bloom_filter_t *bp, const uint8 *tag, uint tag_len) +{ + uint i; + + if (!bp || !tag || (tag_len == 0)) + return BCME_BADARG; + + if (!bp->filter) /* validate only */ + return BCME_UNSUPPORTED; + + for (i = 0; i < bp->max_hash; ++i) { + uint pos; + if (!bp->hash[i]) + continue; + pos = (*bp->hash[i])(bp->cb_ctx, i, tag, tag_len); + setbit(bp->filter, pos % BLOOM_BIT_LEN(bp->filter_size)); + } + + return BCME_OK; +} + +int bcm_bloom_get_filter_data(bcm_bloom_filter_t *bp, + uint buf_size, uint8 *buf, uint *buf_len) +{ + if (!bp) + return BCME_BADARG; + + if (buf_len) + *buf_len = bp->filter_size; + + if (buf_size < bp->filter_size) + return BCME_BUFTOOSHORT; + + if (bp->filter && bp->filter_size) + memcpy(buf, bp->filter, bp->filter_size); + + return BCME_OK; +} diff --git a/bcmdhd.100.10.315.x/bcmevent.c b/bcmdhd.100.10.315.x/bcmevent.c new file mode 100644 index 0000000..a9fcb37 --- /dev/null +++ b/bcmdhd.100.10.315.x/bcmevent.c @@ -0,0 +1,394 @@ +/* + * bcmevent read-only data shared by kernel or app layers + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmevent.c 755881 2018-04-05 06:32:32Z $ + */ + +#include +#include +#include +#include +#include +#include +#include <802.11.h> + +/* Table of event name strings for UIs and debugging dumps */ +typedef struct { + uint event; + const char *name; +} bcmevent_name_str_t; + +/* Use the actual name for event tracing */ +#define BCMEVENT_NAME(_event) {(_event), #_event} + +static const bcmevent_name_str_t bcmevent_names[] = { + BCMEVENT_NAME(WLC_E_SET_SSID), + BCMEVENT_NAME(WLC_E_JOIN), + BCMEVENT_NAME(WLC_E_START), + BCMEVENT_NAME(WLC_E_AUTH), + BCMEVENT_NAME(WLC_E_AUTH_IND), + BCMEVENT_NAME(WLC_E_DEAUTH), + BCMEVENT_NAME(WLC_E_DEAUTH_IND), + BCMEVENT_NAME(WLC_E_ASSOC), + BCMEVENT_NAME(WLC_E_ASSOC_IND), + BCMEVENT_NAME(WLC_E_REASSOC), + BCMEVENT_NAME(WLC_E_REASSOC_IND), + BCMEVENT_NAME(WLC_E_DISASSOC), + BCMEVENT_NAME(WLC_E_DISASSOC_IND), + BCMEVENT_NAME(WLC_E_QUIET_START), + BCMEVENT_NAME(WLC_E_QUIET_END), + BCMEVENT_NAME(WLC_E_BEACON_RX), + BCMEVENT_NAME(WLC_E_LINK), + BCMEVENT_NAME(WLC_E_MIC_ERROR), + BCMEVENT_NAME(WLC_E_NDIS_LINK), + BCMEVENT_NAME(WLC_E_ROAM), + BCMEVENT_NAME(WLC_E_TXFAIL), + BCMEVENT_NAME(WLC_E_PMKID_CACHE), + BCMEVENT_NAME(WLC_E_RETROGRADE_TSF), + BCMEVENT_NAME(WLC_E_PRUNE), + BCMEVENT_NAME(WLC_E_AUTOAUTH), + BCMEVENT_NAME(WLC_E_EAPOL_MSG), + BCMEVENT_NAME(WLC_E_SCAN_COMPLETE), + BCMEVENT_NAME(WLC_E_ADDTS_IND), + BCMEVENT_NAME(WLC_E_DELTS_IND), + BCMEVENT_NAME(WLC_E_BCNSENT_IND), + BCMEVENT_NAME(WLC_E_BCNRX_MSG), + BCMEVENT_NAME(WLC_E_BCNLOST_MSG), + BCMEVENT_NAME(WLC_E_ROAM_PREP), + BCMEVENT_NAME(WLC_E_PFN_NET_FOUND), + BCMEVENT_NAME(WLC_E_PFN_SCAN_ALLGONE), + BCMEVENT_NAME(WLC_E_PFN_NET_LOST), + BCMEVENT_NAME(WLC_E_JOIN_START), + BCMEVENT_NAME(WLC_E_ROAM_START), + BCMEVENT_NAME(WLC_E_ASSOC_START), +#if defined(IBSS_PEER_DISCOVERY_EVENT) + BCMEVENT_NAME(WLC_E_IBSS_ASSOC), +#endif /* defined(IBSS_PEER_DISCOVERY_EVENT) */ + BCMEVENT_NAME(WLC_E_RADIO), + BCMEVENT_NAME(WLC_E_PSM_WATCHDOG), + BCMEVENT_NAME(WLC_E_PROBREQ_MSG), + BCMEVENT_NAME(WLC_E_SCAN_CONFIRM_IND), + BCMEVENT_NAME(WLC_E_PSK_SUP), + BCMEVENT_NAME(WLC_E_COUNTRY_CODE_CHANGED), + BCMEVENT_NAME(WLC_E_EXCEEDED_MEDIUM_TIME), + BCMEVENT_NAME(WLC_E_ICV_ERROR), + BCMEVENT_NAME(WLC_E_UNICAST_DECODE_ERROR), + BCMEVENT_NAME(WLC_E_MULTICAST_DECODE_ERROR), + BCMEVENT_NAME(WLC_E_TRACE), + BCMEVENT_NAME(WLC_E_IF), +#ifdef WLP2P + BCMEVENT_NAME(WLC_E_P2P_DISC_LISTEN_COMPLETE), +#endif // endif + BCMEVENT_NAME(WLC_E_RSSI), + BCMEVENT_NAME(WLC_E_PFN_SCAN_COMPLETE), + BCMEVENT_NAME(WLC_E_ACTION_FRAME), + BCMEVENT_NAME(WLC_E_ACTION_FRAME_RX), + BCMEVENT_NAME(WLC_E_ACTION_FRAME_COMPLETE), +#ifdef BCMWAPI_WAI + BCMEVENT_NAME(WLC_E_WAI_STA_EVENT), + BCMEVENT_NAME(WLC_E_WAI_MSG), +#endif /* BCMWAPI_WAI */ + BCMEVENT_NAME(WLC_E_ESCAN_RESULT), + BCMEVENT_NAME(WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE), +#ifdef WLP2P + BCMEVENT_NAME(WLC_E_PROBRESP_MSG), + BCMEVENT_NAME(WLC_E_P2P_PROBREQ_MSG), +#endif // endif +#ifdef PROP_TXSTATUS + BCMEVENT_NAME(WLC_E_FIFO_CREDIT_MAP), +#endif // endif + BCMEVENT_NAME(WLC_E_WAKE_EVENT), + BCMEVENT_NAME(WLC_E_DCS_REQUEST), + BCMEVENT_NAME(WLC_E_RM_COMPLETE), + BCMEVENT_NAME(WLC_E_OVERLAY_REQ), + BCMEVENT_NAME(WLC_E_CSA_COMPLETE_IND), + BCMEVENT_NAME(WLC_E_EXCESS_PM_WAKE_EVENT), + BCMEVENT_NAME(WLC_E_PFN_SCAN_NONE), + BCMEVENT_NAME(WLC_E_PFN_SCAN_ALLGONE), +#ifdef SOFTAP + BCMEVENT_NAME(WLC_E_GTK_PLUMBED), +#endif // endif + BCMEVENT_NAME(WLC_E_ASSOC_REQ_IE), + BCMEVENT_NAME(WLC_E_ASSOC_RESP_IE), + BCMEVENT_NAME(WLC_E_BEACON_FRAME_RX), +#ifdef WLTDLS + BCMEVENT_NAME(WLC_E_TDLS_PEER_EVENT), +#endif /* WLTDLS */ + BCMEVENT_NAME(WLC_E_NATIVE), +#ifdef WLPKTDLYSTAT + BCMEVENT_NAME(WLC_E_PKTDELAY_IND), +#endif /* WLPKTDLYSTAT */ + BCMEVENT_NAME(WLC_E_SERVICE_FOUND), + BCMEVENT_NAME(WLC_E_GAS_FRAGMENT_RX), + BCMEVENT_NAME(WLC_E_GAS_COMPLETE), + BCMEVENT_NAME(WLC_E_P2PO_ADD_DEVICE), + BCMEVENT_NAME(WLC_E_P2PO_DEL_DEVICE), +#ifdef WLWNM + BCMEVENT_NAME(WLC_E_WNM_STA_SLEEP), +#endif /* WLWNM */ +#if defined(WL_PROXDETECT) + BCMEVENT_NAME(WLC_E_PROXD), +#endif // endif + BCMEVENT_NAME(WLC_E_CCA_CHAN_QUAL), + BCMEVENT_NAME(WLC_E_BSSID), +#ifdef PROP_TXSTATUS + BCMEVENT_NAME(WLC_E_BCMC_CREDIT_SUPPORT), +#endif // endif + BCMEVENT_NAME(WLC_E_PSTA_PRIMARY_INTF_IND), + BCMEVENT_NAME(WLC_E_TXFAIL_THRESH), +#ifdef GSCAN_SUPPORT + BCMEVENT_NAME(WLC_E_PFN_GSCAN_FULL_RESULT), + BCMEVENT_NAME(WLC_E_PFN_SSID_EXT), +#endif /* GSCAN_SUPPORT */ +#ifdef WLBSSLOAD_REPORT + BCMEVENT_NAME(WLC_E_BSS_LOAD), +#endif // endif +#if defined(BT_WIFI_HANDOVER) || defined(WL_TBOW) + BCMEVENT_NAME(WLC_E_BT_WIFI_HANDOVER_REQ), +#endif // endif +#ifdef WLFBT + BCMEVENT_NAME(WLC_E_FBT), +#endif /* WLFBT */ + BCMEVENT_NAME(WLC_E_AUTHORIZED), + BCMEVENT_NAME(WLC_E_PROBREQ_MSG_RX), + BCMEVENT_NAME(WLC_E_CSA_START_IND), + BCMEVENT_NAME(WLC_E_CSA_DONE_IND), + BCMEVENT_NAME(WLC_E_CSA_FAILURE_IND), + BCMEVENT_NAME(WLC_E_RMC_EVENT), + BCMEVENT_NAME(WLC_E_DPSTA_INTF_IND), + BCMEVENT_NAME(WLC_E_ALLOW_CREDIT_BORROW), + BCMEVENT_NAME(WLC_E_MSCH), + BCMEVENT_NAME(WLC_E_ULP), + BCMEVENT_NAME(WLC_E_NAN), + BCMEVENT_NAME(WLC_E_PKT_FILTER), + BCMEVENT_NAME(WLC_E_DMA_TXFLUSH_COMPLETE), + BCMEVENT_NAME(WLC_E_PSK_AUTH), + BCMEVENT_NAME(WLC_E_SDB_TRANSITION), + BCMEVENT_NAME(WLC_E_PFN_SCAN_BACKOFF), + BCMEVENT_NAME(WLC_E_PFN_BSSID_SCAN_BACKOFF), + BCMEVENT_NAME(WLC_E_AGGR_EVENT), + BCMEVENT_NAME(WLC_E_TVPM_MITIGATION), +#ifdef WL_NAN + BCMEVENT_NAME(WLC_E_NAN_CRITICAL), + BCMEVENT_NAME(WLC_E_NAN_NON_CRITICAL), + BCMEVENT_NAME(WLC_E_NAN), +#endif /* WL_NAN */ + BCMEVENT_NAME(WLC_E_RPSNOA), + BCMEVENT_NAME(WLC_E_PHY_CAL), +}; + +const char *bcmevent_get_name(uint event_type) +{ + /* note: first coded this as a static const but some + * ROMs already have something called event_name so + * changed it so we don't have a variable for the + * 'unknown string + */ + const char *event_name = NULL; + + uint idx; + for (idx = 0; idx < (uint)ARRAYSIZE(bcmevent_names); idx++) { + + if (bcmevent_names[idx].event == event_type) { + event_name = bcmevent_names[idx].name; + break; + } + } + + /* if we find an event name in the array, return it. + * otherwise return unknown string. + */ + return ((event_name) ? event_name : "Unknown Event"); +} + +void +wl_event_to_host_order(wl_event_msg_t * evt) +{ + /* Event struct members passed from dongle to host are stored in network + * byte order. Convert all members to host-order. + */ + evt->event_type = ntoh32(evt->event_type); + evt->flags = ntoh16(evt->flags); + evt->status = ntoh32(evt->status); + evt->reason = ntoh32(evt->reason); + evt->auth_type = ntoh32(evt->auth_type); + evt->datalen = ntoh32(evt->datalen); + evt->version = ntoh16(evt->version); +} + +void +wl_event_to_network_order(wl_event_msg_t * evt) +{ + /* Event struct members passed from dongle to host are stored in network + * byte order. Convert all members to host-order. + */ + evt->event_type = hton32(evt->event_type); + evt->flags = hton16(evt->flags); + evt->status = hton32(evt->status); + evt->reason = hton32(evt->reason); + evt->auth_type = hton32(evt->auth_type); + evt->datalen = hton32(evt->datalen); + evt->version = hton16(evt->version); +} + +/* + * Validate if the event is proper and if valid copy event header to event. + * If proper event pointer is passed, to just validate, pass NULL to event. + * + * Return values are + * BCME_OK - It is a BRCM event or BRCM dongle event + * BCME_NOTFOUND - Not BRCM, not an event, may be okay + * BCME_BADLEN - Bad length, should not process, just drop + */ +int +is_wlc_event_frame(void *pktdata, uint pktlen, uint16 exp_usr_subtype, + bcm_event_msg_u_t *out_event) +{ + uint16 evlen = 0; /* length in bcmeth_hdr */ + uint16 subtype; + uint16 usr_subtype; + bcm_event_t *bcm_event; + uint8 *pktend; + uint8 *evend; + int err = BCME_OK; + uint32 data_len = 0; /* data length in bcm_event */ + + pktend = (uint8 *)pktdata + pktlen; + bcm_event = (bcm_event_t *)pktdata; + + /* only care about 16-bit subtype / length versions */ + if ((uint8 *)&bcm_event->bcm_hdr < pktend) { + uint8 short_subtype = *(uint8 *)&bcm_event->bcm_hdr; + if (!(short_subtype & 0x80)) { + err = BCME_NOTFOUND; + goto done; + } + } + + /* must have both ether_header and bcmeth_hdr */ + if (pktlen < OFFSETOF(bcm_event_t, event)) { + err = BCME_BADLEN; + goto done; + } + + /* check length in bcmeth_hdr */ + + /* temporary - header length not always set properly. When the below + * !BCMDONGLEHOST is in all branches that use trunk DHD, the code + * under BCMDONGLEHOST can be removed. + */ + evlen = (uint16)(pktend - (uint8 *)&bcm_event->bcm_hdr.version); + evend = (uint8 *)&bcm_event->bcm_hdr.version + evlen; + if (evend != pktend) { + err = BCME_BADLEN; + goto done; + } + + /* match on subtype, oui and usr subtype for BRCM events */ + subtype = ntoh16_ua((void *)&bcm_event->bcm_hdr.subtype); + if (subtype != BCMILCP_SUBTYPE_VENDOR_LONG) { + err = BCME_NOTFOUND; + goto done; + } + + if (bcmp(BRCM_OUI, &bcm_event->bcm_hdr.oui[0], DOT11_OUI_LEN)) { + err = BCME_NOTFOUND; + goto done; + } + + /* if it is a bcm_event or bcm_dngl_event_t, validate it */ + usr_subtype = ntoh16_ua((void *)&bcm_event->bcm_hdr.usr_subtype); + switch (usr_subtype) { + case BCMILCP_BCM_SUBTYPE_EVENT: + /* check that header length and pkt length are sufficient */ + if ((pktlen < sizeof(bcm_event_t)) || + (evend < ((uint8 *)bcm_event + sizeof(bcm_event_t)))) { + err = BCME_BADLEN; + goto done; + } + + /* ensure data length in event is not beyond the packet. */ + data_len = ntoh32_ua((void *)&bcm_event->event.datalen); + if ((sizeof(bcm_event_t) + data_len + + BCMILCP_BCM_SUBTYPE_EVENT_DATA_PAD) != pktlen) { + err = BCME_BADLEN; + goto done; + } + + if (exp_usr_subtype && (exp_usr_subtype != usr_subtype)) { + err = BCME_NOTFOUND; + goto done; + } + + if (out_event) { + /* ensure BRCM event pkt aligned */ + memcpy(&out_event->event, &bcm_event->event, sizeof(wl_event_msg_t)); + } + + break; + + case BCMILCP_BCM_SUBTYPE_DNGLEVENT: +#if defined(DNGL_EVENT_SUPPORT) + if ((pktlen < sizeof(bcm_dngl_event_t)) || + (evend < ((uint8 *)bcm_event + sizeof(bcm_dngl_event_t)))) { + err = BCME_BADLEN; + goto done; + } + + /* ensure data length in event is not beyond the packet. */ + data_len = ntoh16_ua((void *)&((bcm_dngl_event_t *)pktdata)->dngl_event.datalen); + if ((sizeof(bcm_dngl_event_t) + data_len + + BCMILCP_BCM_SUBTYPE_EVENT_DATA_PAD) != pktlen) { + err = BCME_BADLEN; + goto done; + } + + if (exp_usr_subtype && (exp_usr_subtype != usr_subtype)) { + err = BCME_NOTFOUND; + goto done; + } + + if (out_event) { + /* ensure BRCM dngl event pkt aligned */ + memcpy(&out_event->dngl_event, &((bcm_dngl_event_t *)pktdata)->dngl_event, + sizeof(bcm_dngl_event_msg_t)); + } + + break; +#else + err = BCME_UNSUPPORTED; + break; +#endif // endif + + default: + err = BCME_NOTFOUND; + goto done; + } + + BCM_REFERENCE(data_len); +done: + return err; +} diff --git a/bcmdhd.100.10.315.x/bcmsdh.c b/bcmdhd.100.10.315.x/bcmsdh.c new file mode 100644 index 0000000..1f90472 --- /dev/null +++ b/bcmdhd.100.10.315.x/bcmsdh.c @@ -0,0 +1,883 @@ +/* + * BCMSDH interface glue + * implement bcmsdh API for SDIOH driver + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmsdh.c 727623 2017-10-21 01:00:32Z $ + */ + +/** + * @file bcmsdh.c + */ + +/* ****************** BCMSDH Interface Functions *************************** */ + +#include +#include +#include +#include +#include +#include +#include + +#include /* BRCM API for SDIO clients (such as wl, dhd) */ +#include /* common SDIO/controller interface */ +#include /* SDIO device core hardware definitions. */ +#include /* SDIO Device and Protocol Specs */ + +#if defined(BT_OVER_SDIO) +#include +#endif /* defined (BT_OVER_SDIO) */ + +#define SDIOH_API_ACCESS_RETRY_LIMIT 2 +const uint bcmsdh_msglevel = BCMSDH_ERROR_VAL; + +/* local copy of bcm sd handler */ +bcmsdh_info_t * l_bcmsdh = NULL; + +#if defined(BT_OVER_SDIO) +struct sdio_func *func_f3 = NULL; +static f3intr_handler processf3intr = NULL; +static dhd_hang_notification process_dhd_hang_notification = NULL; +static dhd_hang_state_t g_dhd_hang_state = NO_HANG_STATE; +#endif /* defined (BT_OVER_SDIO) */ + +#if defined(OOB_INTR_ONLY) && defined(HW_OOB) || defined(FORCE_WOWLAN) +extern int +sdioh_enable_hw_oob_intr(void *sdioh, bool enable); + +void +bcmsdh_enable_hw_oob_intr(bcmsdh_info_t *sdh, bool enable) +{ + sdioh_enable_hw_oob_intr(sdh->sdioh, enable); +} +#endif // endif + +#if defined(BT_OVER_SDIO) +void bcmsdh_btsdio_process_hang_state(dhd_hang_state_t new_state) +{ + bool state_change = false; + + BCMSDH_ERROR(("%s: DHD hang state changed - [%d] -> [%d]\n", + __FUNCTION__, g_dhd_hang_state, new_state)); + + if (g_dhd_hang_state == new_state) + return; + + switch (g_dhd_hang_state) { + case NO_HANG_STATE: + if (HANG_START_STATE == new_state) + state_change = true; + break; + + case HANG_START_STATE: + if (HANG_RECOVERY_STATE == new_state || + NO_HANG_STATE == new_state) + state_change = true; + + break; + + case HANG_RECOVERY_STATE: + if (NO_HANG_STATE == new_state) + state_change = true; + break; + + default: + BCMSDH_ERROR(("%s: Unhandled Hang state\n", __FUNCTION__)); + break; + } + + if (!state_change) { + BCMSDH_ERROR(("%s: Hang state cannot be changed\n", __FUNCTION__)); + return; + } + + g_dhd_hang_state = new_state; +} + +void bcmsdh_btsdio_process_f3_intr(void) +{ + if (processf3intr && (g_dhd_hang_state == NO_HANG_STATE)) + processf3intr(func_f3); +} + +void bcmsdh_btsdio_process_dhd_hang_notification(bool wifi_recovery_completed) +{ + bcmsdh_btsdio_process_hang_state(HANG_START_STATE); + + if (process_dhd_hang_notification) + process_dhd_hang_notification(func_f3, wifi_recovery_completed); + + /* WiFi was off, so HANG_RECOVERY_STATE is not needed */ + if (wifi_recovery_completed) + bcmsdh_btsdio_process_hang_state(NO_HANG_STATE); + else { + bcmsdh_btsdio_process_hang_state(HANG_RECOVERY_STATE); + } +} + +void bcmsdh_btsdio_interface_init(struct sdio_func *func, + f3intr_handler f3intr_fun, dhd_hang_notification hang_notification) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)l_bcmsdh; + BCMSDH_INFO(("%s: func %p \n", __FUNCTION__, func)); + func_f3 = func; + processf3intr = f3intr_fun; + sdioh_sdmmc_card_enable_func_f3(bcmsdh->sdioh, func); + process_dhd_hang_notification = hang_notification; + +} EXPORT_SYMBOL(bcmsdh_btsdio_interface_init); +#endif /* defined (BT_OVER_SDIO) */ + +/* Attach BCMSDH layer to SDIO Host Controller Driver + * + * @param osh OSL Handle. + * @param cfghdl Configuration Handle. + * @param regsva Virtual address of controller registers. + * @param irq Interrupt number of SDIO controller. + * + * @return bcmsdh_info_t Handle to BCMSDH context. + */ +bcmsdh_info_t * +bcmsdh_attach(osl_t *osh, void *sdioh, ulong *regsva) +{ + bcmsdh_info_t *bcmsdh; + + if ((bcmsdh = (bcmsdh_info_t *)MALLOC(osh, sizeof(bcmsdh_info_t))) == NULL) { + BCMSDH_ERROR(("bcmsdh_attach: out of memory, malloced %d bytes\n", MALLOCED(osh))); + return NULL; + } + bzero((char *)bcmsdh, sizeof(bcmsdh_info_t)); + bcmsdh->sdioh = sdioh; + bcmsdh->osh = osh; + bcmsdh->init_success = TRUE; + *regsva = si_enum_base(0); + + bcmsdh_force_sbwad_calc(bcmsdh, FALSE); + + /* Report the BAR, to fix if needed */ + bcmsdh->sbwad = si_enum_base(0); + + /* save the handler locally */ + l_bcmsdh = bcmsdh; + + return bcmsdh; +} + +int +bcmsdh_detach(osl_t *osh, void *sdh) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + + if (bcmsdh != NULL) { + MFREE(osh, bcmsdh, sizeof(bcmsdh_info_t)); + } + + l_bcmsdh = NULL; + + return 0; +} + +int +bcmsdh_iovar_op(void *sdh, const char *name, + void *params, uint plen, void *arg, uint len, bool set) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + return sdioh_iovar_op(bcmsdh->sdioh, name, params, plen, arg, len, set); +} + +bool +bcmsdh_intr_query(void *sdh) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; + bool on; + + ASSERT(bcmsdh); + status = sdioh_interrupt_query(bcmsdh->sdioh, &on); + if (SDIOH_API_SUCCESS(status)) + return FALSE; + else + return on; +} + +int +bcmsdh_intr_enable(void *sdh) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; +#ifdef BCMSPI_ANDROID + uint32 data; +#endif /* BCMSPI_ANDROID */ + ASSERT(bcmsdh); + + status = sdioh_interrupt_set(bcmsdh->sdioh, TRUE); +#ifdef BCMSPI_ANDROID + data = bcmsdh_cfg_read_word(sdh, 0, 4, NULL); + data |= 0xE0E70000; + bcmsdh_cfg_write_word(sdh, 0, 4, data, NULL); +#endif /* BCMSPI_ANDROID */ + return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR); +} + +int +bcmsdh_intr_disable(void *sdh) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; +#ifdef BCMSPI_ANDROID + uint32 data; +#endif /* BCMSPI_ANDROID */ + ASSERT(bcmsdh); + + status = sdioh_interrupt_set(bcmsdh->sdioh, FALSE); +#ifdef BCMSPI_ANDROID + data = bcmsdh_cfg_read_word(sdh, 0, 4, NULL); + data &= ~0xE0E70000; + bcmsdh_cfg_write_word(sdh, 0, 4, data, NULL); +#endif /* BCMSPI_ANDROID */ + return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR); +} + +int +bcmsdh_intr_reg(void *sdh, bcmsdh_cb_fn_t fn, void *argh) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; + + if (!bcmsdh) + bcmsdh = l_bcmsdh; + + ASSERT(bcmsdh); + + status = sdioh_interrupt_register(bcmsdh->sdioh, fn, argh); + return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR); +} + +int +bcmsdh_intr_dereg(void *sdh) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; + + if (!bcmsdh) + bcmsdh = l_bcmsdh; + + ASSERT(bcmsdh); + + status = sdioh_interrupt_deregister(bcmsdh->sdioh); + return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR); +} + +#if defined(DHD_DEBUG) +bool +bcmsdh_intr_pending(void *sdh) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + + ASSERT(sdh); + return sdioh_interrupt_pending(bcmsdh->sdioh); +} +#endif // endif + +int +bcmsdh_devremove_reg(void *sdh, bcmsdh_cb_fn_t fn, void *argh) +{ + ASSERT(sdh); + + /* don't support yet */ + return BCME_UNSUPPORTED; +} + +/** + * Read from SDIO Configuration Space + * @param sdh SDIO Host context. + * @param func_num Function number to read from. + * @param addr Address to read from. + * @param err Error return. + * @return value read from SDIO configuration space. + */ +uint8 +bcmsdh_cfg_read(void *sdh, uint fnc_num, uint32 addr, int *err) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; +#ifdef SDIOH_API_ACCESS_RETRY_LIMIT + int32 retry = 0; +#endif // endif + uint8 data = 0; + + if (!bcmsdh) + bcmsdh = l_bcmsdh; + + ASSERT(bcmsdh->init_success); + +#ifdef SDIOH_API_ACCESS_RETRY_LIMIT + do { + if (retry) /* wait for 1 ms till bus get settled down */ + OSL_DELAY(1000); +#endif // endif + status = sdioh_cfg_read(bcmsdh->sdioh, fnc_num, addr, (uint8 *)&data); +#ifdef SDIOH_API_ACCESS_RETRY_LIMIT + } while (!SDIOH_API_SUCCESS(status) && (retry++ < SDIOH_API_ACCESS_RETRY_LIMIT)); +#endif // endif + if (err) + *err = (SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR); + + BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, uint8data = 0x%x\n", __FUNCTION__, + fnc_num, addr, data)); + + return data; +} EXPORT_SYMBOL(bcmsdh_cfg_read); + +void +bcmsdh_cfg_write(void *sdh, uint fnc_num, uint32 addr, uint8 data, int *err) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; +#ifdef SDIOH_API_ACCESS_RETRY_LIMIT + int32 retry = 0; +#endif // endif + + if (!bcmsdh) + bcmsdh = l_bcmsdh; + + ASSERT(bcmsdh->init_success); + +#ifdef SDIOH_API_ACCESS_RETRY_LIMIT + do { + if (retry) /* wait for 1 ms till bus get settled down */ + OSL_DELAY(1000); +#endif // endif + status = sdioh_cfg_write(bcmsdh->sdioh, fnc_num, addr, (uint8 *)&data); +#ifdef SDIOH_API_ACCESS_RETRY_LIMIT + } while (!SDIOH_API_SUCCESS(status) && (retry++ < SDIOH_API_ACCESS_RETRY_LIMIT)); +#endif // endif + if (err) + *err = SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR; + + BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, uint8data = 0x%x\n", __FUNCTION__, + fnc_num, addr, data)); +} EXPORT_SYMBOL(bcmsdh_cfg_write); + +uint32 +bcmsdh_cfg_read_word(void *sdh, uint fnc_num, uint32 addr, int *err) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; + uint32 data = 0; + + if (!bcmsdh) + bcmsdh = l_bcmsdh; + + ASSERT(bcmsdh->init_success); + + status = sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL, SDIOH_READ, fnc_num, + addr, &data, 4); + + if (err) + *err = (SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR); + + BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, uint32data = 0x%x\n", __FUNCTION__, + fnc_num, addr, data)); + + return data; +} + +void +bcmsdh_cfg_write_word(void *sdh, uint fnc_num, uint32 addr, uint32 data, int *err) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; + + if (!bcmsdh) + bcmsdh = l_bcmsdh; + + ASSERT(bcmsdh->init_success); + + status = sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL, SDIOH_WRITE, fnc_num, + addr, &data, 4); + + if (err) + *err = (SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR); + + BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, uint32data = 0x%x\n", __FUNCTION__, fnc_num, + addr, data)); +} + +int +bcmsdh_cis_read(void *sdh, uint func, uint8 *cis, uint length) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; + + uint8 *tmp_buf, *tmp_ptr; + uint8 *ptr; + bool ascii = func & ~0xf; + func &= 0x7; + + if (!bcmsdh) + bcmsdh = l_bcmsdh; + + ASSERT(bcmsdh->init_success); + ASSERT(cis); + ASSERT(length <= SBSDIO_CIS_SIZE_LIMIT); + + status = sdioh_cis_read(bcmsdh->sdioh, func, cis, length); + + if (ascii) { + /* Move binary bits to tmp and format them into the provided buffer. */ + if ((tmp_buf = (uint8 *)MALLOC(bcmsdh->osh, length)) == NULL) { + BCMSDH_ERROR(("%s: out of memory\n", __FUNCTION__)); + return BCME_NOMEM; + } + bcopy(cis, tmp_buf, length); + for (tmp_ptr = tmp_buf, ptr = cis; ptr < (cis + length - 4); tmp_ptr++) { + ptr += snprintf((char*)ptr, (cis + length - ptr - 4), + "%.2x ", *tmp_ptr & 0xff); + if ((((tmp_ptr - tmp_buf) + 1) & 0xf) == 0) + ptr += snprintf((char *)ptr, (cis + length - ptr -4), "\n"); + } + MFREE(bcmsdh->osh, tmp_buf, length); + } + + return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR); +} + +int +bcmsdh_cisaddr_read(void *sdh, uint func, uint8 *cisd, uint32 offset) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; + + func &= 0x7; + + if (!bcmsdh) + bcmsdh = l_bcmsdh; + + ASSERT(bcmsdh->init_success); + ASSERT(cisd); + + status = sdioh_cisaddr_read(bcmsdh->sdioh, func, cisd, offset); + + return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR); +} + + +int +bcmsdhsdio_set_sbaddr_window(void *sdh, uint32 address, bool force_set) +{ + int err = 0; + uint bar0 = address & ~SBSDIO_SB_OFT_ADDR_MASK; + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + + if (bar0 != bcmsdh->sbwad || force_set) { + bcmsdh_cfg_write(bcmsdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRLOW, + (address >> 8) & SBSDIO_SBADDRLOW_MASK, &err); + if (!err) + bcmsdh_cfg_write(bcmsdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRMID, + (address >> 16) & SBSDIO_SBADDRMID_MASK, &err); + if (!err) + bcmsdh_cfg_write(bcmsdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRHIGH, + (address >> 24) & SBSDIO_SBADDRHIGH_MASK, &err); + + if (!err) + bcmsdh->sbwad = bar0; + else + /* invalidate cached window var */ + bcmsdh->sbwad = 0; + + } + + return err; +} + +uint32 +bcmsdh_reg_read(void *sdh, uintptr addr, uint size) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; + uint32 word = 0; + + BCMSDH_INFO(("%s:fun = 1, addr = 0x%x\n", __FUNCTION__, (unsigned int)addr)); + + if (!bcmsdh) + bcmsdh = l_bcmsdh; + + ASSERT(bcmsdh->init_success); + + if (bcmsdhsdio_set_sbaddr_window(bcmsdh, addr, FALSE)) { + bcmsdh->regfail = TRUE; // terence 20130621: prevent dhd_dpc in dead lock + return 0xFFFFFFFF; + } + + addr &= SBSDIO_SB_OFT_ADDR_MASK; + if (size == 4) + addr |= SBSDIO_SB_ACCESS_2_4B_FLAG; + + status = sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL, + SDIOH_READ, SDIO_FUNC_1, addr, &word, size); + + bcmsdh->regfail = !(SDIOH_API_SUCCESS(status)); + + BCMSDH_INFO(("uint32data = 0x%x\n", word)); + + /* if ok, return appropriately masked word */ + if (SDIOH_API_SUCCESS(status)) { + switch (size) { + case sizeof(uint8): + return (word & 0xff); + case sizeof(uint16): + return (word & 0xffff); + case sizeof(uint32): + return word; + default: + bcmsdh->regfail = TRUE; + + } + } + + /* otherwise, bad sdio access or invalid size */ + BCMSDH_ERROR(("%s: error reading addr 0x%x size %d\n", + __FUNCTION__, (unsigned int)addr, size)); + return 0xFFFFFFFF; +} + +uint32 +bcmsdh_reg_write(void *sdh, uintptr addr, uint size, uint32 data) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; + int err = 0; + + BCMSDH_INFO(("%s:fun = 1, addr = 0x%x, uint%ddata = 0x%x\n", + __FUNCTION__, (unsigned int)addr, size*8, data)); + + if (!bcmsdh) + bcmsdh = l_bcmsdh; + + ASSERT(bcmsdh->init_success); + + if ((err = bcmsdhsdio_set_sbaddr_window(bcmsdh, addr, bcmsdh->force_sbwad_calc))) { + bcmsdh->regfail = TRUE; // terence 20130621: + return err; + } + + addr &= SBSDIO_SB_OFT_ADDR_MASK; + if (size == 4) + addr |= SBSDIO_SB_ACCESS_2_4B_FLAG; + status = sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL, SDIOH_WRITE, SDIO_FUNC_1, + addr, &data, size); + bcmsdh->regfail = !(SDIOH_API_SUCCESS(status)); + + if (SDIOH_API_SUCCESS(status)) + return 0; + + BCMSDH_ERROR(("%s: error writing 0x%08x to addr 0x%04x size %d\n", + __FUNCTION__, data, (unsigned int)addr, size)); + return 0xFFFFFFFF; +} + +bool +bcmsdh_regfail(void *sdh) +{ + return ((bcmsdh_info_t *)sdh)->regfail; +} + +int +bcmsdh_recv_buf(void *sdh, uint32 addr, uint fn, uint flags, + uint8 *buf, uint nbytes, void *pkt, + bcmsdh_cmplt_fn_t complete_fn, void *handle) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; + uint incr_fix; + uint width; + int err = 0; + + ASSERT(bcmsdh); + ASSERT(bcmsdh->init_success); + + BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, size = %d\n", + __FUNCTION__, fn, addr, nbytes)); + + /* Async not implemented yet */ + ASSERT(!(flags & SDIO_REQ_ASYNC)); + if (flags & SDIO_REQ_ASYNC) + return BCME_UNSUPPORTED; + + if ((err = bcmsdhsdio_set_sbaddr_window(bcmsdh, addr, FALSE))) + return err; + + addr &= SBSDIO_SB_OFT_ADDR_MASK; + + incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC; + width = (flags & SDIO_REQ_4BYTE) ? 4 : 2; + if (width == 4) + addr |= SBSDIO_SB_ACCESS_2_4B_FLAG; + + status = sdioh_request_buffer(bcmsdh->sdioh, SDIOH_DATA_PIO, incr_fix, + SDIOH_READ, fn, addr, width, nbytes, buf, pkt); + + return (SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR); +} + +int +bcmsdh_send_buf(void *sdh, uint32 addr, uint fn, uint flags, + uint8 *buf, uint nbytes, void *pkt, + bcmsdh_cmplt_fn_t complete_fn, void *handle) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; + uint incr_fix; + uint width; + int err = 0; + + ASSERT(bcmsdh); + ASSERT(bcmsdh->init_success); + + BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, size = %d\n", + __FUNCTION__, fn, addr, nbytes)); + + /* Async not implemented yet */ + ASSERT(!(flags & SDIO_REQ_ASYNC)); + if (flags & SDIO_REQ_ASYNC) + return BCME_UNSUPPORTED; + + if ((err = bcmsdhsdio_set_sbaddr_window(bcmsdh, addr, FALSE))) + return err; + + addr &= SBSDIO_SB_OFT_ADDR_MASK; + + incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC; + width = (flags & SDIO_REQ_4BYTE) ? 4 : 2; + if (width == 4) + addr |= SBSDIO_SB_ACCESS_2_4B_FLAG; + + status = sdioh_request_buffer(bcmsdh->sdioh, SDIOH_DATA_PIO, incr_fix, + SDIOH_WRITE, fn, addr, width, nbytes, buf, pkt); + + return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR); +} + +int +bcmsdh_rwdata(void *sdh, uint rw, uint32 addr, uint8 *buf, uint nbytes) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; + + ASSERT(bcmsdh); + ASSERT(bcmsdh->init_success); + ASSERT((addr & SBSDIO_SBWINDOW_MASK) == 0); + + addr &= SBSDIO_SB_OFT_ADDR_MASK; + addr |= SBSDIO_SB_ACCESS_2_4B_FLAG; + + status = sdioh_request_buffer(bcmsdh->sdioh, SDIOH_DATA_PIO, SDIOH_DATA_INC, + (rw ? SDIOH_WRITE : SDIOH_READ), SDIO_FUNC_1, + addr, 4, nbytes, buf, NULL); + + return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR); +} + +int +bcmsdh_abort(void *sdh, uint fn) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + + return sdioh_abort(bcmsdh->sdioh, fn); +} + +int +bcmsdh_start(void *sdh, int stage) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + + return sdioh_start(bcmsdh->sdioh, stage); +} + +int +bcmsdh_stop(void *sdh) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + + return sdioh_stop(bcmsdh->sdioh); +} + +int +bcmsdh_waitlockfree(void *sdh) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + + return sdioh_waitlockfree(bcmsdh->sdioh); +} + +int +bcmsdh_query_device(void *sdh) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + bcmsdh->vendevid = (VENDOR_BROADCOM << 16) | 0; + return (bcmsdh->vendevid); +} + +uint +bcmsdh_query_iofnum(void *sdh) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + + if (!bcmsdh) + bcmsdh = l_bcmsdh; + + return (sdioh_query_iofnum(bcmsdh->sdioh)); +} + +int +bcmsdh_reset(bcmsdh_info_t *sdh) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + + return sdioh_sdio_reset(bcmsdh->sdioh); +} + +void *bcmsdh_get_sdioh(bcmsdh_info_t *sdh) +{ + ASSERT(sdh); + return sdh->sdioh; +} + +/* Function to pass device-status bits to DHD. */ +uint32 +bcmsdh_get_dstatus(void *sdh) +{ +#ifdef BCMSPI + bcmsdh_info_t *p = (bcmsdh_info_t *)sdh; + sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh); + return sdioh_get_dstatus(sd); +#else + return 0; +#endif /* BCMSPI */ +} +uint32 +bcmsdh_cur_sbwad(void *sdh) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + + if (!bcmsdh) + bcmsdh = l_bcmsdh; + + return (bcmsdh->sbwad); +} + +/* example usage: if force is TRUE, forces the bcmsdhsdio_set_sbaddr_window to + * calculate sbwad always instead of caching. + */ +void +bcmsdh_force_sbwad_calc(void *sdh, bool force) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + + if (!bcmsdh) + bcmsdh = l_bcmsdh; + bcmsdh->force_sbwad_calc = force; +} + +void +bcmsdh_chipinfo(void *sdh, uint32 chip, uint32 chiprev) +{ +#ifdef BCMSPI + bcmsdh_info_t *p = (bcmsdh_info_t *)sdh; + sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh); + sdioh_chipinfo(sd, chip, chiprev); +#else + return; +#endif /* BCMSPI */ +} + +#ifdef BCMSPI +void +bcmsdh_dwordmode(void *sdh, bool set) +{ + bcmsdh_info_t *p = (bcmsdh_info_t *)sdh; + sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh); + sdioh_dwordmode(sd, set); + return; +} +#endif /* BCMSPI */ + +int +bcmsdh_sleep(void *sdh, bool enab) +{ +#ifdef SDIOH_SLEEP_ENABLED + bcmsdh_info_t *p = (bcmsdh_info_t *)sdh; + sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh); + + return sdioh_sleep(sd, enab); +#else + return BCME_UNSUPPORTED; +#endif // endif +} + +int +bcmsdh_gpio_init(void *sdh) +{ + bcmsdh_info_t *p = (bcmsdh_info_t *)sdh; + sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh); + + return sdioh_gpio_init(sd); +} + +bool +bcmsdh_gpioin(void *sdh, uint32 gpio) +{ + bcmsdh_info_t *p = (bcmsdh_info_t *)sdh; + sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh); + + return sdioh_gpioin(sd, gpio); +} + +int +bcmsdh_gpioouten(void *sdh, uint32 gpio) +{ + bcmsdh_info_t *p = (bcmsdh_info_t *)sdh; + sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh); + + return sdioh_gpioouten(sd, gpio); +} + +int +bcmsdh_gpioout(void *sdh, uint32 gpio, bool enab) +{ + bcmsdh_info_t *p = (bcmsdh_info_t *)sdh; + sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh); + + return sdioh_gpioout(sd, gpio, enab); +} + +uint +bcmsdh_set_mode(void *sdh, uint mode) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + return (sdioh_set_mode(bcmsdh->sdioh, mode)); +} diff --git a/bcmdhd.100.10.315.x/bcmsdh_linux.c b/bcmdhd.100.10.315.x/bcmsdh_linux.c new file mode 100644 index 0000000..b27716a --- /dev/null +++ b/bcmdhd.100.10.315.x/bcmsdh_linux.c @@ -0,0 +1,506 @@ +/* + * SDIO access interface for drivers - linux specific (pci only) + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmsdh_linux.c 689948 2017-03-14 05:21:03Z $ + */ + +/** + * @file bcmsdh_linux.c + */ + +#define __UNDEF_NO_VERSION__ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +extern void dhdsdio_isr(void * args); +#include +#include +#include +#include + +/* driver info, initialized when bcmsdh_register is called */ +static bcmsdh_driver_t drvinfo = {NULL, NULL, NULL, NULL}; + +typedef enum { + DHD_INTR_INVALID = 0, + DHD_INTR_INBAND, + DHD_INTR_HWOOB, + DHD_INTR_SWOOB +} DHD_HOST_INTR_TYPE; + +/* the BCMSDH module comprises the generic part (bcmsdh.c) and OS specific layer (e.g. + * bcmsdh_linux.c). Put all OS specific variables (e.g. irq number and flags) here rather + * than in the common structure bcmsdh_info. bcmsdh_info only keeps a handle (os_ctx) to this + * structure. + */ +typedef struct bcmsdh_os_info { + DHD_HOST_INTR_TYPE intr_type; + int oob_irq_num; /* valid when hardware or software oob in use */ + unsigned long oob_irq_flags; /* valid when hardware or software oob in use */ + bool oob_irq_registered; + bool oob_irq_enabled; + bool oob_irq_wake_enabled; + spinlock_t oob_irq_spinlock; + bcmsdh_cb_fn_t oob_irq_handler; + void *oob_irq_handler_context; + void *context; /* context returned from upper layer */ + void *sdioh; /* handle to lower layer (sdioh) */ + void *dev; /* handle to the underlying device */ + bool dev_wake_enabled; +} bcmsdh_os_info_t; + +/* debugging macros */ +#define SDLX_MSG(x) + +/** + * Checks to see if vendor and device IDs match a supported SDIO Host Controller. + */ +bool +bcmsdh_chipmatch(uint16 vendor, uint16 device) +{ + /* Add other vendors and devices as required */ + +#ifdef BCMSDIOH_STD + /* Check for Arasan host controller */ + if (vendor == VENDOR_SI_IMAGE) { + return (TRUE); + } + /* Check for BRCM 27XX Standard host controller */ + if (device == BCM27XX_SDIOH_ID && vendor == VENDOR_BROADCOM) { + return (TRUE); + } + /* Check for BRCM Standard host controller */ + if (device == SDIOH_FPGA_ID && vendor == VENDOR_BROADCOM) { + return (TRUE); + } + /* Check for TI PCIxx21 Standard host controller */ + if (device == PCIXX21_SDIOH_ID && vendor == VENDOR_TI) { + return (TRUE); + } + if (device == PCIXX21_SDIOH0_ID && vendor == VENDOR_TI) { + return (TRUE); + } + /* Ricoh R5C822 Standard SDIO Host */ + if (device == R5C822_SDIOH_ID && vendor == VENDOR_RICOH) { + return (TRUE); + } + /* JMicron Standard SDIO Host */ + if (device == JMICRON_SDIOH_ID && vendor == VENDOR_JMICRON) { + return (TRUE); + } + +#endif /* BCMSDIOH_STD */ +#ifdef BCMSDIOH_SPI + /* This is the PciSpiHost. */ + if (device == SPIH_FPGA_ID && vendor == VENDOR_BROADCOM) { + printf("Found PCI SPI Host Controller\n"); + return (TRUE); + } + +#endif /* BCMSDIOH_SPI */ + + return (FALSE); +} + +void* bcmsdh_probe(osl_t *osh, void *dev, void *sdioh, void *adapter_info, uint bus_type, + uint bus_num, uint slot_num) +{ + ulong regs; + bcmsdh_info_t *bcmsdh; + uint32 vendevid; + bcmsdh_os_info_t *bcmsdh_osinfo = NULL; + + bcmsdh = bcmsdh_attach(osh, sdioh, ®s); + if (bcmsdh == NULL) { + SDLX_MSG(("%s: bcmsdh_attach failed\n", __FUNCTION__)); + goto err; + } + bcmsdh_osinfo = MALLOC(osh, sizeof(bcmsdh_os_info_t)); + if (bcmsdh_osinfo == NULL) { + SDLX_MSG(("%s: failed to allocate bcmsdh_os_info_t\n", __FUNCTION__)); + goto err; + } + bzero((char *)bcmsdh_osinfo, sizeof(bcmsdh_os_info_t)); + bcmsdh->os_cxt = bcmsdh_osinfo; + bcmsdh_osinfo->sdioh = sdioh; + bcmsdh_osinfo->dev = dev; + osl_set_bus_handle(osh, bcmsdh); + +#if !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) + if (dev && device_init_wakeup(dev, true) == 0) + bcmsdh_osinfo->dev_wake_enabled = TRUE; +#endif /* !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) */ + +#if defined(OOB_INTR_ONLY) + spin_lock_init(&bcmsdh_osinfo->oob_irq_spinlock); + /* Get customer specific OOB IRQ parametres: IRQ number as IRQ type */ + bcmsdh_osinfo->oob_irq_num = wifi_platform_get_irq_number(adapter_info, + &bcmsdh_osinfo->oob_irq_flags); + if (bcmsdh_osinfo->oob_irq_num < 0) { + SDLX_MSG(("%s: Host OOB irq is not defined\n", __FUNCTION__)); + goto err; + } +#endif /* defined(BCMLXSDMMC) */ + + /* Read the vendor/device ID from the CIS */ + vendevid = bcmsdh_query_device(bcmsdh); + /* try to attach to the target device */ + bcmsdh_osinfo->context = drvinfo.probe((vendevid >> 16), (vendevid & 0xFFFF), bus_num, + slot_num, 0, bus_type, (void *)regs, osh, bcmsdh); + if (bcmsdh_osinfo->context == NULL) { + SDLX_MSG(("%s: device attach failed\n", __FUNCTION__)); + goto err; + } + + return bcmsdh; + + /* error handling */ +err: + if (bcmsdh != NULL) + bcmsdh_detach(osh, bcmsdh); + if (bcmsdh_osinfo != NULL) + MFREE(osh, bcmsdh_osinfo, sizeof(bcmsdh_os_info_t)); + return NULL; +} + +int bcmsdh_remove(bcmsdh_info_t *bcmsdh) +{ + bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt; + +#if !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) + if (bcmsdh_osinfo->dev) + device_init_wakeup(bcmsdh_osinfo->dev, false); + bcmsdh_osinfo->dev_wake_enabled = FALSE; +#endif /* !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) */ + + drvinfo.remove(bcmsdh_osinfo->context); + MFREE(bcmsdh->osh, bcmsdh->os_cxt, sizeof(bcmsdh_os_info_t)); + bcmsdh_detach(bcmsdh->osh, bcmsdh); + + return 0; +} + +#ifdef DHD_WAKE_STATUS +int bcmsdh_get_total_wake(bcmsdh_info_t *bcmsdh) +{ + return bcmsdh->total_wake_count; +} + +int bcmsdh_set_get_wake(bcmsdh_info_t *bcmsdh, int flag) +{ + bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt; + unsigned long flags; + int ret; + + spin_lock_irqsave(&bcmsdh_osinfo->oob_irq_spinlock, flags); + + ret = bcmsdh->pkt_wake; + bcmsdh->total_wake_count += flag; + bcmsdh->pkt_wake = flag; + + spin_unlock_irqrestore(&bcmsdh_osinfo->oob_irq_spinlock, flags); + return ret; +} +#endif /* DHD_WAKE_STATUS */ + +int bcmsdh_suspend(bcmsdh_info_t *bcmsdh) +{ + bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt; + + if (drvinfo.suspend && drvinfo.suspend(bcmsdh_osinfo->context)) + return -EBUSY; + return 0; +} + +int bcmsdh_resume(bcmsdh_info_t *bcmsdh) +{ + bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt; + + if (drvinfo.resume) + return drvinfo.resume(bcmsdh_osinfo->context); + return 0; +} + +extern int bcmsdh_register_client_driver(void); +extern void bcmsdh_unregister_client_driver(void); +extern int sdio_func_reg_notify(void* semaphore); +extern void sdio_func_unreg_notify(void); + +#if defined(BCMLXSDMMC) +int bcmsdh_reg_sdio_notify(void* semaphore) +{ + return sdio_func_reg_notify(semaphore); +} + +void bcmsdh_unreg_sdio_notify(void) +{ + sdio_func_unreg_notify(); +} +#endif /* defined(BCMLXSDMMC) */ + +int +bcmsdh_register(bcmsdh_driver_t *driver) +{ + int error = 0; + + drvinfo = *driver; + SDLX_MSG(("%s: register client driver\n", __FUNCTION__)); + error = bcmsdh_register_client_driver(); + if (error) + SDLX_MSG(("%s: failed %d\n", __FUNCTION__, error)); + + return error; +} + +void +bcmsdh_unregister(void) +{ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) + if (bcmsdh_pci_driver.node.next == NULL) + return; +#endif // endif + + bcmsdh_unregister_client_driver(); +} + +void bcmsdh_dev_pm_stay_awake(bcmsdh_info_t *bcmsdh) +{ +#if !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) + bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt; + pm_stay_awake(bcmsdh_osinfo->dev); +#endif /* !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) */ +} + +void bcmsdh_dev_relax(bcmsdh_info_t *bcmsdh) +{ +#if !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) + bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt; + pm_relax(bcmsdh_osinfo->dev); +#endif /* !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) */ +} + +bool bcmsdh_dev_pm_enabled(bcmsdh_info_t *bcmsdh) +{ + bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt; + + return bcmsdh_osinfo->dev_wake_enabled; +} + +#if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) +void bcmsdh_oob_intr_set(bcmsdh_info_t *bcmsdh, bool enable) +{ + unsigned long flags; + bcmsdh_os_info_t *bcmsdh_osinfo; + + if (!bcmsdh) + return; + + bcmsdh_osinfo = bcmsdh->os_cxt; + spin_lock_irqsave(&bcmsdh_osinfo->oob_irq_spinlock, flags); + if (bcmsdh_osinfo->oob_irq_enabled != enable) { + if (enable) + enable_irq(bcmsdh_osinfo->oob_irq_num); + else + disable_irq_nosync(bcmsdh_osinfo->oob_irq_num); + bcmsdh_osinfo->oob_irq_enabled = enable; + } + spin_unlock_irqrestore(&bcmsdh_osinfo->oob_irq_spinlock, flags); +} + +static irqreturn_t wlan_oob_irq(int irq, void *dev_id) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)dev_id; + bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt; + +#ifndef BCMSPI_ANDROID + bcmsdh_oob_intr_set(bcmsdh, FALSE); +#endif /* !BCMSPI_ANDROID */ + bcmsdh_osinfo->oob_irq_handler(bcmsdh_osinfo->oob_irq_handler_context); + + return IRQ_HANDLED; +} + +int bcmsdh_oob_intr_register(bcmsdh_info_t *bcmsdh, bcmsdh_cb_fn_t oob_irq_handler, + void* oob_irq_handler_context) +{ + int err = 0; + bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt; + + if (bcmsdh_osinfo->oob_irq_registered) { + SDLX_MSG(("%s: irq is already registered\n", __FUNCTION__)); + return -EBUSY; + } +#ifdef HW_OOB + printf("%s: HW_OOB irq=%d flags=0x%X\n", __FUNCTION__, + (int)bcmsdh_osinfo->oob_irq_num, (int)bcmsdh_osinfo->oob_irq_flags); +#else + printf("%s: SW_OOB irq=%d flags=0x%X\n", __FUNCTION__, + (int)bcmsdh_osinfo->oob_irq_num, (int)bcmsdh_osinfo->oob_irq_flags); +#endif + bcmsdh_osinfo->oob_irq_handler = oob_irq_handler; + bcmsdh_osinfo->oob_irq_handler_context = oob_irq_handler_context; + bcmsdh_osinfo->oob_irq_enabled = TRUE; + bcmsdh_osinfo->oob_irq_registered = TRUE; + err = request_irq(bcmsdh_osinfo->oob_irq_num, wlan_oob_irq, + bcmsdh_osinfo->oob_irq_flags, "bcmsdh_sdmmc", bcmsdh); + if (err) { + SDLX_MSG(("%s: request_irq failed with %d\n", __FUNCTION__, err)); + bcmsdh_osinfo->oob_irq_enabled = FALSE; + bcmsdh_osinfo->oob_irq_registered = FALSE; + return err; + } + +#if defined(DISABLE_WOWLAN) + SDLX_MSG(("%s: disable_irq_wake\n", __FUNCTION__)); + bcmsdh_osinfo->oob_irq_wake_enabled = FALSE; +#else + err = enable_irq_wake(bcmsdh_osinfo->oob_irq_num); + if (err) + SDLX_MSG(("%s: enable_irq_wake failed with %d\n", __FUNCTION__, err)); + else + bcmsdh_osinfo->oob_irq_wake_enabled = TRUE; +#endif + + return 0; +} + +void bcmsdh_oob_intr_unregister(bcmsdh_info_t *bcmsdh) +{ + int err = 0; + bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt; + + SDLX_MSG(("%s: Enter\n", __FUNCTION__)); + if (!bcmsdh_osinfo->oob_irq_registered) { + SDLX_MSG(("%s: irq is not registered\n", __FUNCTION__)); + return; + } + if (bcmsdh_osinfo->oob_irq_wake_enabled) { + err = disable_irq_wake(bcmsdh_osinfo->oob_irq_num); + if (!err) + bcmsdh_osinfo->oob_irq_wake_enabled = FALSE; + } + if (bcmsdh_osinfo->oob_irq_enabled) { + disable_irq(bcmsdh_osinfo->oob_irq_num); + bcmsdh_osinfo->oob_irq_enabled = FALSE; + } + free_irq(bcmsdh_osinfo->oob_irq_num, bcmsdh); + bcmsdh_osinfo->oob_irq_registered = FALSE; +} +#endif /* defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) */ + +/* Module parameters specific to each host-controller driver */ + +extern uint sd_msglevel; /* Debug message level */ +module_param(sd_msglevel, uint, 0); + +extern uint sd_power; /* 0 = SD Power OFF, 1 = SD Power ON. */ +module_param(sd_power, uint, 0); + +extern uint sd_clock; /* SD Clock Control, 0 = SD Clock OFF, 1 = SD Clock ON */ +module_param(sd_clock, uint, 0); + +extern uint sd_divisor; /* Divisor (-1 means external clock) */ +module_param(sd_divisor, uint, 0); + +extern uint sd_sdmode; /* Default is SD4, 0=SPI, 1=SD1, 2=SD4 */ +module_param(sd_sdmode, uint, 0); + +extern uint sd_hiok; /* Ok to use hi-speed mode */ +module_param(sd_hiok, uint, 0); + +extern uint sd_f2_blocksize; +module_param(sd_f2_blocksize, int, 0); + +extern uint sd_f1_blocksize; +module_param(sd_f1_blocksize, int, 0); + +#ifdef BCMSDIOH_STD +extern int sd_uhsimode; +module_param(sd_uhsimode, int, 0); +extern uint sd_tuning_period; +module_param(sd_tuning_period, uint, 0); +extern int sd_delay_value; +module_param(sd_delay_value, uint, 0); + +/* SDIO Drive Strength for UHSI mode specific to SDIO3.0 */ +extern char dhd_sdiod_uhsi_ds_override[2]; +module_param_string(dhd_sdiod_uhsi_ds_override, dhd_sdiod_uhsi_ds_override, 2, 0); + +#endif // endif + +#ifdef BCMSDH_MODULE +EXPORT_SYMBOL(bcmsdh_attach); +EXPORT_SYMBOL(bcmsdh_detach); +EXPORT_SYMBOL(bcmsdh_intr_query); +EXPORT_SYMBOL(bcmsdh_intr_enable); +EXPORT_SYMBOL(bcmsdh_intr_disable); +EXPORT_SYMBOL(bcmsdh_intr_reg); +EXPORT_SYMBOL(bcmsdh_intr_dereg); + +#if defined(DHD_DEBUG) +EXPORT_SYMBOL(bcmsdh_intr_pending); +#endif // endif + +#if defined(BT_OVER_SDIO) +EXPORT_SYMBOL(bcmsdh_btsdio_interface_init); +#endif /* defined (BT_OVER_SDIO) */ + +EXPORT_SYMBOL(bcmsdh_devremove_reg); +EXPORT_SYMBOL(bcmsdh_cfg_read); +EXPORT_SYMBOL(bcmsdh_cfg_write); +EXPORT_SYMBOL(bcmsdh_cis_read); +EXPORT_SYMBOL(bcmsdh_reg_read); +EXPORT_SYMBOL(bcmsdh_reg_write); +EXPORT_SYMBOL(bcmsdh_regfail); +EXPORT_SYMBOL(bcmsdh_send_buf); +EXPORT_SYMBOL(bcmsdh_recv_buf); + +EXPORT_SYMBOL(bcmsdh_rwdata); +EXPORT_SYMBOL(bcmsdh_abort); +EXPORT_SYMBOL(bcmsdh_query_device); +EXPORT_SYMBOL(bcmsdh_query_iofnum); +EXPORT_SYMBOL(bcmsdh_iovar_op); +EXPORT_SYMBOL(bcmsdh_register); +EXPORT_SYMBOL(bcmsdh_unregister); +EXPORT_SYMBOL(bcmsdh_chipmatch); +EXPORT_SYMBOL(bcmsdh_reset); +EXPORT_SYMBOL(bcmsdh_waitlockfree); + +EXPORT_SYMBOL(bcmsdh_get_dstatus); +EXPORT_SYMBOL(bcmsdh_cfg_read_word); +EXPORT_SYMBOL(bcmsdh_cfg_write_word); +EXPORT_SYMBOL(bcmsdh_cur_sbwad); +EXPORT_SYMBOL(bcmsdh_chipinfo); + +#endif /* BCMSDH_MODULE */ diff --git a/bcmdhd.100.10.315.x/bcmsdh_sdmmc.c b/bcmdhd.100.10.315.x/bcmsdh_sdmmc.c new file mode 100644 index 0000000..7c5e919 --- /dev/null +++ b/bcmdhd.100.10.315.x/bcmsdh_sdmmc.c @@ -0,0 +1,1774 @@ +/* + * BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmsdh_sdmmc.c 766268 2018-06-07 06:44:55Z $ + */ +#include + +#include +#include +#include +#include +#include /* SDIO Device and Protocol Specs */ +#include /* Standard SDIO Host Controller Specification */ +#include /* bcmsdh to/from specific controller APIs */ +#include /* ioctl/iovars */ + +#include +#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 0, 8)) +#include +#else +#include +#endif /* (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 0, 0)) */ +#include +#include +#include + +#include +#include +#include + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) +#include +extern volatile bool dhd_mmc_suspend; +#endif // endif +#include "bcmsdh_sdmmc.h" + +#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 0, 0)) || (LINUX_VERSION_CODE >= \ + KERNEL_VERSION(4, 4, 0)) +static inline void +mmc_host_clk_hold(struct mmc_host *host) +{ + BCM_REFERENCE(host); + return; +} + +static inline void +mmc_host_clk_release(struct mmc_host *host) +{ + BCM_REFERENCE(host); + return; +} + +static inline unsigned int +mmc_host_clk_rate(struct mmc_host *host) +{ + return host->ios.clock; +} +#endif /* LINUX_VERSION_CODE <= KERNEL_VERSION(3, 0, 0) */ + +#ifndef BCMSDH_MODULE +extern int sdio_function_init(void); +extern void sdio_function_cleanup(void); +#endif /* BCMSDH_MODULE */ + +#if !defined(OOB_INTR_ONLY) +static void IRQHandler(struct sdio_func *func); +static void IRQHandlerF2(struct sdio_func *func); +#endif /* !defined(OOB_INTR_ONLY) */ +static int sdioh_sdmmc_get_cisaddr(sdioh_info_t *sd, uint32 regaddr); +#if defined(ENABLE_INSMOD_NO_FW_LOAD) && !defined(BUS_POWER_RESTORE) +extern int sdio_reset_comm(struct mmc_card *card); +#else +int sdio_reset_comm(struct mmc_card *card) +{ + return 0; +} +#endif +#ifdef GLOBAL_SDMMC_INSTANCE +extern PBCMSDH_SDMMC_INSTANCE gInstance; +#endif + +#define DEFAULT_SDIO_F2_BLKSIZE 512 +#ifndef CUSTOM_SDIO_F2_BLKSIZE +#define CUSTOM_SDIO_F2_BLKSIZE DEFAULT_SDIO_F2_BLKSIZE +#endif // endif + +#define DEFAULT_SDIO_F1_BLKSIZE 64 +#ifndef CUSTOM_SDIO_F1_BLKSIZE +#define CUSTOM_SDIO_F1_BLKSIZE DEFAULT_SDIO_F1_BLKSIZE +#endif // endif + +#define MAX_IO_RW_EXTENDED_BLK 511 + +uint sd_sdmode = SDIOH_MODE_SD4; /* Use SD4 mode by default */ +uint sd_f2_blocksize = CUSTOM_SDIO_F2_BLKSIZE; +uint sd_f1_blocksize = CUSTOM_SDIO_F1_BLKSIZE; + +#if defined(BT_OVER_SDIO) +uint sd_f3_blocksize = 64; +#endif /* defined (BT_OVER_SDIO) */ + +uint sd_divisor = 2; /* Default 48MHz/2 = 24MHz */ + +uint sd_power = 1; /* Default to SD Slot powered ON */ +uint sd_clock = 1; /* Default to SD Clock turned ON */ +uint sd_hiok = FALSE; /* Don't use hi-speed mode by default */ +uint sd_msglevel = SDH_ERROR_VAL; +uint sd_use_dma = TRUE; + +#ifndef CUSTOM_RXCHAIN +#define CUSTOM_RXCHAIN 0 +#endif // endif + +DHD_PM_RESUME_WAIT_INIT(sdioh_request_byte_wait); +DHD_PM_RESUME_WAIT_INIT(sdioh_request_word_wait); +DHD_PM_RESUME_WAIT_INIT(sdioh_request_packet_wait); +DHD_PM_RESUME_WAIT_INIT(sdioh_request_buffer_wait); + +#define DMA_ALIGN_MASK 0x03 +#define MMC_SDIO_ABORT_RETRY_LIMIT 5 + +int sdioh_sdmmc_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data); + +#if defined(BT_OVER_SDIO) +extern +void sdioh_sdmmc_card_enable_func_f3(sdioh_info_t *sd, struct sdio_func *func) +{ + sd->func[3] = func; + sd_info(("%s sd->func[3] %p\n", __FUNCTION__, sd->func[3])); +} +#endif /* defined (BT_OVER_SDIO) */ + +void sdmmc_set_clock_rate(sdioh_info_t *sd, uint hz); +uint sdmmc_get_clock_rate(sdioh_info_t *sd); +void sdmmc_set_clock_divisor(sdioh_info_t *sd, uint sd_div); + +static int +sdioh_sdmmc_card_enablefuncs(sdioh_info_t *sd) +{ + int err_ret; + uint32 fbraddr; + uint8 func; + + sd_trace(("%s\n", __FUNCTION__)); + + /* Get the Card's common CIS address */ + sd->com_cis_ptr = sdioh_sdmmc_get_cisaddr(sd, SDIOD_CCCR_CISPTR_0); + sd->func_cis_ptr[0] = sd->com_cis_ptr; + sd_info(("%s: Card's Common CIS Ptr = 0x%x\n", __FUNCTION__, sd->com_cis_ptr)); + + /* Get the Card's function CIS (for each function) */ + for (fbraddr = SDIOD_FBR_STARTADDR, func = 1; + func <= sd->num_funcs; func++, fbraddr += SDIOD_FBR_SIZE) { + sd->func_cis_ptr[func] = sdioh_sdmmc_get_cisaddr(sd, SDIOD_FBR_CISPTR_0 + fbraddr); + sd_info(("%s: Function %d CIS Ptr = 0x%x\n", + __FUNCTION__, func, sd->func_cis_ptr[func])); + } + + sd->func_cis_ptr[0] = sd->com_cis_ptr; + sd_info(("%s: Card's Common CIS Ptr = 0x%x\n", __FUNCTION__, sd->com_cis_ptr)); + + /* Enable Function 1 */ + sdio_claim_host(sd->func[1]); + err_ret = sdio_enable_func(sd->func[1]); + sdio_release_host(sd->func[1]); + if (err_ret) { + sd_err(("bcmsdh_sdmmc: Failed to enable F1 Err: 0x%08x\n", err_ret)); + } + + return FALSE; +} + +/* + * Public entry points & extern's + */ +extern sdioh_info_t * +sdioh_attach(osl_t *osh, struct sdio_func *func) +{ + sdioh_info_t *sd = NULL; + int err_ret; + + sd_trace(("%s\n", __FUNCTION__)); + + if (func == NULL) { + sd_err(("%s: sdio function device is NULL\n", __FUNCTION__)); + return NULL; + } + + if ((sd = (sdioh_info_t *)MALLOC(osh, sizeof(sdioh_info_t))) == NULL) { + sd_err(("sdioh_attach: out of memory, malloced %d bytes\n", MALLOCED(osh))); + return NULL; + } + bzero((char *)sd, sizeof(sdioh_info_t)); + sd->osh = osh; + sd->fake_func0.num = 0; + sd->fake_func0.card = func->card; + sd->func[0] = &sd->fake_func0; +#ifdef GLOBAL_SDMMC_INSTANCE + if (func->num == 2) + sd->func[1] = gInstance->func[1]; +#else + sd->func[1] = func->card->sdio_func[0]; +#endif + sd->func[2] = func->card->sdio_func[1]; +#ifdef GLOBAL_SDMMC_INSTANCE + sd->func[func->num] = func; +#endif + +#if defined(BT_OVER_SDIO) + sd->func[3] = NULL; +#endif /* defined (BT_OVER_SDIO) */ + + sd->num_funcs = 2; + sd->sd_blockmode = TRUE; + sd->use_client_ints = TRUE; + sd->client_block_size[0] = 64; + sd->use_rxchain = CUSTOM_RXCHAIN; + if (sd->func[1] == NULL || sd->func[2] == NULL) { + sd_err(("%s: func 1 or 2 is null \n", __FUNCTION__)); + goto fail; + } + sdio_set_drvdata(sd->func[1], sd); + + sdio_claim_host(sd->func[1]); + sd->client_block_size[1] = sd_f1_blocksize; + err_ret = sdio_set_block_size(sd->func[1], sd_f1_blocksize); + sdio_release_host(sd->func[1]); + if (err_ret) { + sd_err(("bcmsdh_sdmmc: Failed to set F1 blocksize(%d)\n", err_ret)); + goto fail; + } + + sdio_claim_host(sd->func[2]); + sd->client_block_size[2] = sd_f2_blocksize; + printf("%s: set sd_f2_blocksize %d\n", __FUNCTION__, sd_f2_blocksize); + err_ret = sdio_set_block_size(sd->func[2], sd_f2_blocksize); + sdio_release_host(sd->func[2]); + if (err_ret) { + sd_err(("bcmsdh_sdmmc: Failed to set F2 blocksize to %d(%d)\n", + sd_f2_blocksize, err_ret)); + goto fail; + } + + sd->sd_clk_rate = sdmmc_get_clock_rate(sd); + printf("%s: sd clock rate = %u\n", __FUNCTION__, sd->sd_clk_rate); + sdioh_sdmmc_card_enablefuncs(sd); + + sd_trace(("%s: Done\n", __FUNCTION__)); + return sd; + +fail: + MFREE(sd->osh, sd, sizeof(sdioh_info_t)); + return NULL; +} + +extern SDIOH_API_RC +sdioh_detach(osl_t *osh, sdioh_info_t *sd) +{ + sd_trace(("%s\n", __FUNCTION__)); + + if (sd) { + + /* Disable Function 2 */ + if (sd->func[2]) { + sdio_claim_host(sd->func[2]); + sdio_disable_func(sd->func[2]); + sdio_release_host(sd->func[2]); + } + + /* Disable Function 1 */ + if (sd->func[1]) { + sdio_claim_host(sd->func[1]); + sdio_disable_func(sd->func[1]); + sdio_release_host(sd->func[1]); + } + + sd->func[1] = NULL; + sd->func[2] = NULL; + + MFREE(sd->osh, sd, sizeof(sdioh_info_t)); + } + return SDIOH_API_RC_SUCCESS; +} + +#if defined(OOB_INTR_ONLY) && defined(HW_OOB) + +extern SDIOH_API_RC +sdioh_enable_func_intr(sdioh_info_t *sd) +{ + uint8 reg; + int err; + + if (sd->func[0] == NULL) { + sd_err(("%s: function 0 pointer is NULL\n", __FUNCTION__)); + return SDIOH_API_RC_FAIL; + } + + sdio_claim_host(sd->func[0]); + reg = sdio_readb(sd->func[0], SDIOD_CCCR_INTEN, &err); + if (err) { + sd_err(("%s: error for read SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err)); + sdio_release_host(sd->func[0]); + return SDIOH_API_RC_FAIL; + } + /* Enable F1 and F2 interrupts, clear master enable */ + reg &= ~INTR_CTL_MASTER_EN; + reg |= (INTR_CTL_FUNC1_EN | INTR_CTL_FUNC2_EN); +#if defined(BT_OVER_SDIO) + reg |= (INTR_CTL_FUNC3_EN); +#endif /* defined (BT_OVER_SDIO) */ + sdio_writeb(sd->func[0], reg, SDIOD_CCCR_INTEN, &err); + sdio_release_host(sd->func[0]); + + if (err) { + sd_err(("%s: error for write SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err)); + return SDIOH_API_RC_FAIL; + } + + return SDIOH_API_RC_SUCCESS; +} + +extern SDIOH_API_RC +sdioh_disable_func_intr(sdioh_info_t *sd) +{ + uint8 reg; + int err; + + if (sd->func[0] == NULL) { + sd_err(("%s: function 0 pointer is NULL\n", __FUNCTION__)); + return SDIOH_API_RC_FAIL; + } + + sdio_claim_host(sd->func[0]); + reg = sdio_readb(sd->func[0], SDIOD_CCCR_INTEN, &err); + if (err) { + sd_err(("%s: error for read SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err)); + sdio_release_host(sd->func[0]); + return SDIOH_API_RC_FAIL; + } + reg &= ~(INTR_CTL_FUNC1_EN | INTR_CTL_FUNC2_EN); +#if defined(BT_OVER_SDIO) + reg &= ~INTR_CTL_FUNC3_EN; +#endif // endif + /* Disable master interrupt with the last function interrupt */ + if (!(reg & 0xFE)) + reg = 0; + sdio_writeb(sd->func[0], reg, SDIOD_CCCR_INTEN, &err); + sdio_release_host(sd->func[0]); + + if (err) { + sd_err(("%s: error for write SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err)); + return SDIOH_API_RC_FAIL; + } + + return SDIOH_API_RC_SUCCESS; +} +#endif /* defined(OOB_INTR_ONLY) && defined(HW_OOB) */ + +/* Configure callback to client when we recieve client interrupt */ +extern SDIOH_API_RC +sdioh_interrupt_register(sdioh_info_t *sd, sdioh_cb_fn_t fn, void *argh) +{ + sd_trace(("%s: Entering\n", __FUNCTION__)); + if (fn == NULL) { + sd_err(("%s: interrupt handler is NULL, not registering\n", __FUNCTION__)); + return SDIOH_API_RC_FAIL; + } +#if !defined(OOB_INTR_ONLY) + sd->intr_handler = fn; + sd->intr_handler_arg = argh; + sd->intr_handler_valid = TRUE; + + /* register and unmask irq */ + if (sd->func[2]) { + sdio_claim_host(sd->func[2]); + sdio_claim_irq(sd->func[2], IRQHandlerF2); + sdio_release_host(sd->func[2]); + } + + if (sd->func[1]) { + sdio_claim_host(sd->func[1]); + sdio_claim_irq(sd->func[1], IRQHandler); + sdio_release_host(sd->func[1]); + } +#elif defined(HW_OOB) + sdioh_enable_func_intr(sd); +#endif /* !defined(OOB_INTR_ONLY) */ + + return SDIOH_API_RC_SUCCESS; +} + +extern SDIOH_API_RC +sdioh_interrupt_deregister(sdioh_info_t *sd) +{ + sd_trace(("%s: Entering\n", __FUNCTION__)); + +#if !defined(OOB_INTR_ONLY) + if (sd->func[1]) { + /* register and unmask irq */ + sdio_claim_host(sd->func[1]); + sdio_release_irq(sd->func[1]); + sdio_release_host(sd->func[1]); + } + + if (sd->func[2]) { + /* Claim host controller F2 */ + sdio_claim_host(sd->func[2]); + sdio_release_irq(sd->func[2]); + /* Release host controller F2 */ + sdio_release_host(sd->func[2]); + } + + sd->intr_handler_valid = FALSE; + sd->intr_handler = NULL; + sd->intr_handler_arg = NULL; +#elif defined(HW_OOB) + if (dhd_download_fw_on_driverload) + sdioh_disable_func_intr(sd); +#endif /* !defined(OOB_INTR_ONLY) */ + return SDIOH_API_RC_SUCCESS; +} + +extern SDIOH_API_RC +sdioh_interrupt_query(sdioh_info_t *sd, bool *onoff) +{ + sd_trace(("%s: Entering\n", __FUNCTION__)); + *onoff = sd->client_intr_enabled; + return SDIOH_API_RC_SUCCESS; +} + +#if defined(DHD_DEBUG) +extern bool +sdioh_interrupt_pending(sdioh_info_t *sd) +{ + return (0); +} +#endif // endif + +uint +sdioh_query_iofnum(sdioh_info_t *sd) +{ + return sd->num_funcs; +} + +/* IOVar table */ +enum { + IOV_MSGLEVEL = 1, + IOV_BLOCKMODE, + IOV_BLOCKSIZE, + IOV_DMA, + IOV_USEINTS, + IOV_NUMINTS, + IOV_NUMLOCALINTS, + IOV_HOSTREG, + IOV_DEVREG, + IOV_DIVISOR, + IOV_SDMODE, + IOV_HISPEED, + IOV_HCIREGS, + IOV_POWER, + IOV_CLOCK, + IOV_RXCHAIN +}; + +const bcm_iovar_t sdioh_iovars[] = { + {"sd_msglevel", IOV_MSGLEVEL, 0, 0, IOVT_UINT32, 0 }, + {"sd_blockmode", IOV_BLOCKMODE, 0, 0, IOVT_BOOL, 0 }, + {"sd_blocksize", IOV_BLOCKSIZE, 0, 0, IOVT_UINT32, 0 }, /* ((fn << 16) | size) */ + {"sd_dma", IOV_DMA, 0, 0, IOVT_BOOL, 0 }, + {"sd_ints", IOV_USEINTS, 0, 0, IOVT_BOOL, 0 }, + {"sd_numints", IOV_NUMINTS, 0, 0, IOVT_UINT32, 0 }, + {"sd_numlocalints", IOV_NUMLOCALINTS, 0, 0, IOVT_UINT32, 0 }, + {"sd_divisor", IOV_DIVISOR, 0, 0, IOVT_UINT32, 0 }, + {"sd_power", IOV_POWER, 0, 0, IOVT_UINT32, 0 }, + {"sd_clock", IOV_CLOCK, 0, 0, IOVT_UINT32, 0 }, + {"sd_mode", IOV_SDMODE, 0, 0, IOVT_UINT32, 100}, + {"sd_highspeed", IOV_HISPEED, 0, 0, IOVT_UINT32, 0 }, + {"sd_rxchain", IOV_RXCHAIN, 0, 0, IOVT_BOOL, 0 }, + {NULL, 0, 0, 0, 0, 0 } +}; + +int +sdioh_iovar_op(sdioh_info_t *si, const char *name, + void *params, int plen, void *arg, int len, bool set) +{ + const bcm_iovar_t *vi = NULL; + int bcmerror = 0; + int val_size; + int32 int_val = 0; + bool bool_val; + uint32 actionid; + + ASSERT(name); + ASSERT(len >= 0); + + /* Get must have return space; Set does not take qualifiers */ + ASSERT(set || (arg && len)); + ASSERT(!set || (!params && !plen)); + + sd_trace(("%s: Enter (%s %s)\n", __FUNCTION__, (set ? "set" : "get"), name)); + + if ((vi = bcm_iovar_lookup(sdioh_iovars, name)) == NULL) { + bcmerror = BCME_UNSUPPORTED; + goto exit; + } + + if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, set)) != 0) + goto exit; + + /* Set up params so get and set can share the convenience variables */ + if (params == NULL) { + params = arg; + plen = len; + } + + if (vi->type == IOVT_VOID) + val_size = 0; + else if (vi->type == IOVT_BUFFER) + val_size = len; + else + val_size = sizeof(int); + + if (plen >= (int)sizeof(int_val)) + bcopy(params, &int_val, sizeof(int_val)); + + bool_val = (int_val != 0) ? TRUE : FALSE; + BCM_REFERENCE(bool_val); + + actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid); + switch (actionid) { + case IOV_GVAL(IOV_MSGLEVEL): + int_val = (int32)sd_msglevel; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_MSGLEVEL): + sd_msglevel = int_val; + break; + + case IOV_GVAL(IOV_BLOCKMODE): + int_val = (int32)si->sd_blockmode; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_BLOCKMODE): + si->sd_blockmode = (bool)int_val; + /* Haven't figured out how to make non-block mode with DMA */ + break; + + case IOV_GVAL(IOV_BLOCKSIZE): + if ((uint32)int_val > si->num_funcs) { + bcmerror = BCME_BADARG; + break; + } + int_val = (int32)si->client_block_size[int_val]; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_BLOCKSIZE): + { + uint func = ((uint32)int_val >> 16); + uint blksize = (uint16)int_val; + uint maxsize; + + if (func > si->num_funcs) { + bcmerror = BCME_BADARG; + break; + } + + switch (func) { + case 0: maxsize = 32; break; + case 1: maxsize = BLOCK_SIZE_4318; break; + case 2: maxsize = BLOCK_SIZE_4328; break; + default: maxsize = 0; + } + if (blksize > maxsize) { + bcmerror = BCME_BADARG; + break; + } + if (!blksize) { + blksize = maxsize; + } + + /* Now set it */ + si->client_block_size[func] = blksize; + + if (si->func[func] == NULL) { + sd_err(("%s: SDIO Device not present\n", __FUNCTION__)); + bcmerror = BCME_NORESOURCE; + break; + } + sdio_claim_host(si->func[func]); + bcmerror = sdio_set_block_size(si->func[func], blksize); + if (bcmerror) + sd_err(("%s: Failed to set F%d blocksize to %d(%d)\n", + __FUNCTION__, func, blksize, bcmerror)); + sdio_release_host(si->func[func]); + break; + } + + case IOV_GVAL(IOV_RXCHAIN): + int_val = (int32)si->use_rxchain; + bcopy(&int_val, arg, val_size); + break; + + case IOV_GVAL(IOV_DMA): + int_val = (int32)si->sd_use_dma; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_DMA): + si->sd_use_dma = (bool)int_val; + break; + + case IOV_GVAL(IOV_USEINTS): + int_val = (int32)si->use_client_ints; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_USEINTS): + si->use_client_ints = (bool)int_val; + if (si->use_client_ints) + si->intmask |= CLIENT_INTR; + else + si->intmask &= ~CLIENT_INTR; + + break; + + case IOV_GVAL(IOV_DIVISOR): + int_val = (uint32)sd_divisor; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_DIVISOR): + /* set the clock to divisor, if value is non-zero & power of 2 */ + if (int_val && !(int_val & (int_val - 1))) { + sd_divisor = int_val; + sdmmc_set_clock_divisor(si, sd_divisor); + } else { + DHD_ERROR(("%s: Invalid sd_divisor value, should be power of 2!\n", + __FUNCTION__)); + } + break; + + case IOV_GVAL(IOV_POWER): + int_val = (uint32)sd_power; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_POWER): + sd_power = int_val; + break; + + case IOV_GVAL(IOV_CLOCK): + int_val = (uint32)sd_clock; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_CLOCK): + sd_clock = int_val; + break; + + case IOV_GVAL(IOV_SDMODE): + int_val = (uint32)sd_sdmode; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_SDMODE): + sd_sdmode = int_val; + break; + + case IOV_GVAL(IOV_HISPEED): + int_val = (uint32)sd_hiok; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_HISPEED): + sd_hiok = int_val; + break; + + case IOV_GVAL(IOV_NUMINTS): + int_val = (int32)si->intrcount; + bcopy(&int_val, arg, val_size); + break; + + case IOV_GVAL(IOV_NUMLOCALINTS): + int_val = (int32)0; + bcopy(&int_val, arg, val_size); + break; + default: + bcmerror = BCME_UNSUPPORTED; + break; + } +exit: + + return bcmerror; +} + +#if (defined(OOB_INTR_ONLY) && defined(HW_OOB)) || defined(FORCE_WOWLAN) +#ifdef CUSTOMER_HW_AMLOGIC +#include +extern int wifi_irq_trigger_level(void); +#endif +SDIOH_API_RC +sdioh_enable_hw_oob_intr(sdioh_info_t *sd, bool enable) +{ + SDIOH_API_RC status; + uint8 data; + + if (enable) { + if (wifi_irq_trigger_level() == GPIO_IRQ_LOW) + data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE; + else + data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE | SDIO_SEPINT_ACT_HI; + } + else + data = SDIO_SEPINT_ACT_HI; /* disable hw oob interrupt */ + + status = sdioh_request_byte(sd, SDIOH_WRITE, 0, SDIOD_CCCR_BRCM_SEPINT, &data); + return status; +} +#endif /* defined(OOB_INTR_ONLY) && defined(HW_OOB) */ + +extern SDIOH_API_RC +sdioh_cfg_read(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data) +{ + SDIOH_API_RC status; + /* No lock needed since sdioh_request_byte does locking */ + status = sdioh_request_byte(sd, SDIOH_READ, fnc_num, addr, data); + return status; +} + +extern SDIOH_API_RC +sdioh_cfg_write(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data) +{ + /* No lock needed since sdioh_request_byte does locking */ + SDIOH_API_RC status; + status = sdioh_request_byte(sd, SDIOH_WRITE, fnc_num, addr, data); + return status; +} + +static int +sdioh_sdmmc_get_cisaddr(sdioh_info_t *sd, uint32 regaddr) +{ + /* read 24 bits and return valid 17 bit addr */ + int i; + uint32 scratch, regdata; + uint8 *ptr = (uint8 *)&scratch; + for (i = 0; i < 3; i++) { + if ((sdioh_sdmmc_card_regread (sd, 0, regaddr, 1, ®data)) != SUCCESS) + sd_err(("%s: Can't read!\n", __FUNCTION__)); + + *ptr++ = (uint8) regdata; + regaddr++; + } + + /* Only the lower 17-bits are valid */ + scratch = ltoh32(scratch); + scratch &= 0x0001FFFF; + return (scratch); +} + +extern SDIOH_API_RC +sdioh_cis_read(sdioh_info_t *sd, uint func, uint8 *cisd, uint32 length) +{ + uint32 count; + int offset; + uint32 foo; + uint8 *cis = cisd; + + sd_trace(("%s: Func = %d\n", __FUNCTION__, func)); + + if (!sd->func_cis_ptr[func]) { + bzero(cis, length); + sd_err(("%s: no func_cis_ptr[%d]\n", __FUNCTION__, func)); + return SDIOH_API_RC_FAIL; + } + + sd_err(("%s: func_cis_ptr[%d]=0x%04x\n", __FUNCTION__, func, sd->func_cis_ptr[func])); + + for (count = 0; count < length; count++) { + offset = sd->func_cis_ptr[func] + count; + if (sdioh_sdmmc_card_regread (sd, 0, offset, 1, &foo) < 0) { + sd_err(("%s: regread failed: Can't read CIS\n", __FUNCTION__)); + return SDIOH_API_RC_FAIL; + } + + *cis = (uint8)(foo & 0xff); + cis++; + } + + return SDIOH_API_RC_SUCCESS; +} + +extern SDIOH_API_RC +sdioh_cisaddr_read(sdioh_info_t *sd, uint func, uint8 *cisd, uint32 offset) +{ + uint32 foo; + + sd_trace(("%s: Func = %d\n", __FUNCTION__, func)); + + if (!sd->func_cis_ptr[func]) { + sd_err(("%s: no func_cis_ptr[%d]\n", __FUNCTION__, func)); + return SDIOH_API_RC_FAIL; + } + + sd_err(("%s: func_cis_ptr[%d]=0x%04x\n", __FUNCTION__, func, sd->func_cis_ptr[func])); + + if (sdioh_sdmmc_card_regread (sd, 0, sd->func_cis_ptr[func]+offset, 1, &foo) < 0) { + sd_err(("%s: regread failed: Can't read CIS\n", __FUNCTION__)); + return SDIOH_API_RC_FAIL; + } + + *cisd = (uint8)(foo & 0xff); + + return SDIOH_API_RC_SUCCESS; +} + +extern SDIOH_API_RC +sdioh_request_byte(sdioh_info_t *sd, uint rw, uint func, uint regaddr, uint8 *byte) +{ + int err_ret = 0; +#if defined(MMC_SDIO_ABORT) + int sdio_abort_retry = MMC_SDIO_ABORT_RETRY_LIMIT; +#endif // endif + struct timespec now, before; + + if (sd_msglevel & SDH_COST_VAL) + getnstimeofday(&before); + + sd_info(("%s: rw=%d, func=%d, addr=0x%05x\n", __FUNCTION__, rw, func, regaddr)); + + DHD_PM_RESUME_WAIT(sdioh_request_byte_wait); + DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL); + if(rw) { /* CMD52 Write */ + if (func == 0) { + /* Can only directly write to some F0 registers. Handle F2 enable + * as a special case. + */ + if (regaddr == SDIOD_CCCR_IOEN) { +#if defined(BT_OVER_SDIO) + do { + if (sd->func[3]) { + sd_info(("bcmsdh_sdmmc F3: *byte 0x%x\n", *byte)); + + if (*byte & SDIO_FUNC_ENABLE_3) { + sdio_claim_host(sd->func[3]); + + /* Set Function 3 Block Size */ + err_ret = sdio_set_block_size(sd->func[3], + sd_f3_blocksize); + if (err_ret) { + sd_err(("F3 blocksize set err%d\n", + err_ret)); + } + + /* Enable Function 3 */ + sd_info(("bcmsdh_sdmmc F3: enable F3 fn %p\n", + sd->func[3])); + err_ret = sdio_enable_func(sd->func[3]); + if (err_ret) { + sd_err(("bcmsdh_sdmmc: enable F3 err:%d\n", + err_ret)); + } + + sdio_release_host(sd->func[3]); + + break; + } else if (*byte & SDIO_FUNC_DISABLE_3) { + sdio_claim_host(sd->func[3]); + + /* Disable Function 3 */ + sd_info(("bcmsdh_sdmmc F3: disable F3 fn %p\n", + sd->func[3])); + err_ret = sdio_disable_func(sd->func[3]); + if (err_ret) { + sd_err(("bcmsdh_sdmmc: Disable F3 err:%d\n", + err_ret)); + } + sdio_release_host(sd->func[3]); + sd->func[3] = NULL; + + break; + } + } +#endif /* defined (BT_OVER_SDIO) */ + if (sd->func[2]) { + sdio_claim_host(sd->func[2]); + if (*byte & SDIO_FUNC_ENABLE_2) { + /* Enable Function 2 */ + err_ret = sdio_enable_func(sd->func[2]); + if (err_ret) { + sd_err(("bcmsdh_sdmmc: enable F2 failed:%d\n", + err_ret)); + } + } else { + /* Disable Function 2 */ + err_ret = sdio_disable_func(sd->func[2]); + if (err_ret) { + sd_err(("bcmsdh_sdmmc: Disab F2 failed:%d\n", + err_ret)); + } + } + sdio_release_host(sd->func[2]); + } +#if defined(BT_OVER_SDIO) + } while (0); +#endif /* defined (BT_OVER_SDIO) */ + } +#if defined(MMC_SDIO_ABORT) + /* to allow abort command through F1 */ + else if (regaddr == SDIOD_CCCR_IOABORT) { + while (sdio_abort_retry--) { + if (sd->func[func]) { + sdio_claim_host(sd->func[func]); + /* + * this sdio_f0_writeb() can be replaced with + * another api depending upon MMC driver change. + * As of this time, this is temporaray one + */ + sdio_writeb(sd->func[func], + *byte, regaddr, &err_ret); + sdio_release_host(sd->func[func]); + } + if (!err_ret) + break; + } + } +#endif /* MMC_SDIO_ABORT */ + else if (regaddr < 0xF0) { + sd_err(("bcmsdh_sdmmc: F0 Wr:0x%02x: write disallowed\n", regaddr)); + } else { + /* Claim host controller, perform F0 write, and release */ + if (sd->func[func]) { + sdio_claim_host(sd->func[func]); + sdio_f0_writeb(sd->func[func], + *byte, regaddr, &err_ret); + sdio_release_host(sd->func[func]); + } + } + } else { + /* Claim host controller, perform Fn write, and release */ + if (sd->func[func]) { + sdio_claim_host(sd->func[func]); + sdio_writeb(sd->func[func], *byte, regaddr, &err_ret); + sdio_release_host(sd->func[func]); + } + } + } else { /* CMD52 Read */ + /* Claim host controller, perform Fn read, and release */ + if (sd->func[func]) { + sdio_claim_host(sd->func[func]); + if (func == 0) { + *byte = sdio_f0_readb(sd->func[func], regaddr, &err_ret); + } else { + *byte = sdio_readb(sd->func[func], regaddr, &err_ret); + } + sdio_release_host(sd->func[func]); + } + } + + if (err_ret) { + if ((regaddr == 0x1001F) && ((err_ret == -ETIMEDOUT) || (err_ret == -EILSEQ) + || (err_ret == -EIO))) { + } else { + sd_err(("bcmsdh_sdmmc: Failed to %s byte F%d:@0x%05x=%02x, Err: %d\n", + rw ? "Write" : "Read", func, regaddr, *byte, err_ret)); + } + } + + if (sd_msglevel & SDH_COST_VAL) { + getnstimeofday(&now); + sd_cost(("%s: rw=%d len=1 cost=%lds %luus\n", __FUNCTION__, + rw, now.tv_sec-before.tv_sec, now.tv_nsec/1000-before.tv_nsec/1000)); + } + + return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL); +} + +uint +sdioh_set_mode(sdioh_info_t *sd, uint mode) +{ + if (mode == SDPCM_TXGLOM_CPY) + sd->txglom_mode = mode; + else if (mode == SDPCM_TXGLOM_MDESC) + sd->txglom_mode = mode; + + return (sd->txglom_mode); +} + +extern SDIOH_API_RC +sdioh_request_word(sdioh_info_t *sd, uint cmd_type, uint rw, uint func, uint addr, + uint32 *word, uint nbytes) +{ + int err_ret = SDIOH_API_RC_FAIL; + int err_ret2 = SDIOH_API_RC_SUCCESS; // terence 20130621: prevent dhd_dpc in dead lock +#if defined(MMC_SDIO_ABORT) + int sdio_abort_retry = MMC_SDIO_ABORT_RETRY_LIMIT; +#endif // endif + struct timespec now, before; + + if (sd_msglevel & SDH_COST_VAL) + getnstimeofday(&before); + + if (func == 0) { + sd_err(("%s: Only CMD52 allowed to F0.\n", __FUNCTION__)); + return SDIOH_API_RC_FAIL; + } + + sd_info(("%s: cmd_type=%d, rw=%d, func=%d, addr=0x%05x, nbytes=%d\n", + __FUNCTION__, cmd_type, rw, func, addr, nbytes)); + + DHD_PM_RESUME_WAIT(sdioh_request_word_wait); + DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL); + /* Claim host controller */ + sdio_claim_host(sd->func[func]); + + if(rw) { /* CMD52 Write */ + if (nbytes == 4) { + sdio_writel(sd->func[func], *word, addr, &err_ret); + } else if (nbytes == 2) { + sdio_writew(sd->func[func], (*word & 0xFFFF), addr, &err_ret); + } else { + sd_err(("%s: Invalid nbytes: %d\n", __FUNCTION__, nbytes)); + } + } else { /* CMD52 Read */ + if (nbytes == 4) { + *word = sdio_readl(sd->func[func], addr, &err_ret); + } else if (nbytes == 2) { + *word = sdio_readw(sd->func[func], addr, &err_ret) & 0xFFFF; + } else { + sd_err(("%s: Invalid nbytes: %d\n", __FUNCTION__, nbytes)); + } + } + + /* Release host controller */ + sdio_release_host(sd->func[func]); + + if (err_ret) { +#if defined(MMC_SDIO_ABORT) + /* Any error on CMD53 transaction should abort that function using function 0. */ + while (sdio_abort_retry--) { + if (sd->func[0]) { + sdio_claim_host(sd->func[0]); + /* + * this sdio_f0_writeb() can be replaced with another api + * depending upon MMC driver change. + * As of this time, this is temporaray one + */ + sdio_writeb(sd->func[0], + func, SDIOD_CCCR_IOABORT, &err_ret2); + sdio_release_host(sd->func[0]); + } + if (!err_ret2) + break; + } + if (err_ret) +#endif /* MMC_SDIO_ABORT */ + { + sd_err(("bcmsdh_sdmmc: Failed to %s word F%d:@0x%05x=%02x, Err: 0x%08x\n", + rw ? "Write" : "Read", func, addr, *word, err_ret)); + } + } + + if (sd_msglevel & SDH_COST_VAL) { + getnstimeofday(&now); + sd_cost(("%s: rw=%d, len=%d cost=%lds %luus\n", __FUNCTION__, + rw, nbytes, now.tv_sec-before.tv_sec, now.tv_nsec/1000 - before.tv_nsec/1000)); + } + + return (((err_ret == 0)&&(err_ret2 == 0)) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL); +} + +#ifdef BCMSDIOH_TXGLOM +static SDIOH_API_RC +sdioh_request_packet_chain(sdioh_info_t *sd, uint fix_inc, uint write, uint func, + uint addr, void *pkt) +{ + bool fifo = (fix_inc == SDIOH_DATA_FIX); + int err_ret = 0; + void *pnext; + uint ttl_len, pkt_offset; + uint blk_num; + uint blk_size; + uint max_blk_count; + uint max_req_size; + struct mmc_request mmc_req; + struct mmc_command mmc_cmd; + struct mmc_data mmc_dat; + uint32 sg_count; + struct sdio_func *sdio_func = sd->func[func]; + struct mmc_host *host = sdio_func->card->host; + uint8 *localbuf = NULL; + uint local_plen = 0; + uint pkt_len = 0; + struct timespec now, before; + + sd_trace(("%s: Enter\n", __FUNCTION__)); + ASSERT(pkt); + DHD_PM_RESUME_WAIT(sdioh_request_packet_wait); + DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL); + + if (sd_msglevel & SDH_COST_VAL) + getnstimeofday(&before); + + blk_size = sd->client_block_size[func]; + max_blk_count = min(host->max_blk_count, (uint)MAX_IO_RW_EXTENDED_BLK); + max_req_size = min(max_blk_count * blk_size, host->max_req_size); + + pkt_offset = 0; + pnext = pkt; + + ttl_len = 0; + sg_count = 0; + if(sd->txglom_mode == SDPCM_TXGLOM_MDESC) { + while (pnext != NULL) { + ttl_len = 0; + sg_count = 0; + memset(&mmc_req, 0, sizeof(struct mmc_request)); + memset(&mmc_cmd, 0, sizeof(struct mmc_command)); + memset(&mmc_dat, 0, sizeof(struct mmc_data)); + sg_init_table(sd->sg_list, ARRAYSIZE(sd->sg_list)); + + /* Set up scatter-gather DMA descriptors. this loop is to find out the max + * data we can transfer with one command 53. blocks per command is limited by + * host max_req_size and 9-bit max block number. when the total length of this + * packet chain is bigger than max_req_size, use multiple SD_IO_RW_EXTENDED + * commands (each transfer is still block aligned) + */ + while (pnext != NULL && ttl_len < max_req_size) { + int pkt_len; + int sg_data_size; + uint8 *pdata = (uint8*)PKTDATA(sd->osh, pnext); + + ASSERT(pdata != NULL); + pkt_len = PKTLEN(sd->osh, pnext); + sd_trace(("%s[%d] data=%p, len=%d\n", __FUNCTION__, write, pdata, pkt_len)); + /* sg_count is unlikely larger than the array size, and this is + * NOT something we can handle here, but in case it happens, PLEASE put + * a restriction on max tx/glom count (based on host->max_segs). + */ + if (sg_count >= ARRAYSIZE(sd->sg_list)) { + sd_err(("%s: sg list entries(%u) exceed limit(%lu)," + " sd blk_size=%u\n", + __FUNCTION__, sg_count, ARRAYSIZE(sd->sg_list), blk_size)); + return (SDIOH_API_RC_FAIL); + } + pdata += pkt_offset; + + sg_data_size = pkt_len - pkt_offset; + if (sg_data_size > max_req_size - ttl_len) + sg_data_size = max_req_size - ttl_len; + /* some platforms put a restriction on the data size of each scatter-gather + * DMA descriptor, use multiple sg buffers when xfer_size is bigger than + * max_seg_size + */ + if (sg_data_size > host->max_seg_size) { + sg_data_size = host->max_seg_size; + } + sg_set_buf(&sd->sg_list[sg_count++], pdata, sg_data_size); + + ttl_len += sg_data_size; + pkt_offset += sg_data_size; + if (pkt_offset == pkt_len) { + pnext = PKTNEXT(sd->osh, pnext); + pkt_offset = 0; + } + } + + if (ttl_len % blk_size != 0) { + sd_err(("%s, data length %d not aligned to block size %d\n", + __FUNCTION__, ttl_len, blk_size)); + return SDIOH_API_RC_FAIL; + } + blk_num = ttl_len / blk_size; + mmc_dat.sg = sd->sg_list; + mmc_dat.sg_len = sg_count; + mmc_dat.blksz = blk_size; + mmc_dat.blocks = blk_num; + mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ; + mmc_cmd.opcode = 53; /* SD_IO_RW_EXTENDED */ + mmc_cmd.arg = write ? 1<<31 : 0; + mmc_cmd.arg |= (func & 0x7) << 28; + mmc_cmd.arg |= 1<<27; + mmc_cmd.arg |= fifo ? 0 : 1<<26; + mmc_cmd.arg |= (addr & 0x1FFFF) << 9; + mmc_cmd.arg |= blk_num & 0x1FF; + mmc_cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC; + mmc_req.cmd = &mmc_cmd; + mmc_req.data = &mmc_dat; + if (!fifo) + addr += ttl_len; + + sdio_claim_host(sdio_func); + mmc_set_data_timeout(&mmc_dat, sdio_func->card); + mmc_wait_for_req(host, &mmc_req); + sdio_release_host(sdio_func); + + err_ret = mmc_cmd.error? mmc_cmd.error : mmc_dat.error; + if (0 != err_ret) { + sd_err(("%s:CMD53 %s failed with code %d\n", + __FUNCTION__, write ? "write" : "read", err_ret)); + return SDIOH_API_RC_FAIL; + } + } + } else if(sd->txglom_mode == SDPCM_TXGLOM_CPY) { + for (pnext = pkt; pnext; pnext = PKTNEXT(sd->osh, pnext)) { + ttl_len += PKTLEN(sd->osh, pnext); + } + /* Claim host controller */ + sdio_claim_host(sd->func[func]); + for (pnext = pkt; pnext; pnext = PKTNEXT(sd->osh, pnext)) { + uint8 *buf = (uint8*)PKTDATA(sd->osh, pnext); + pkt_len = PKTLEN(sd->osh, pnext); + + if (!localbuf) { + localbuf = (uint8 *)MALLOC(sd->osh, ttl_len); + if (localbuf == NULL) { + sd_err(("%s: %s TXGLOM: localbuf malloc FAILED\n", + __FUNCTION__, (write) ? "TX" : "RX")); + goto txglomfail; + } + } + + bcopy(buf, (localbuf + local_plen), pkt_len); + local_plen += pkt_len; + if (PKTNEXT(sd->osh, pnext)) + continue; + + buf = localbuf; + pkt_len = local_plen; +txglomfail: + /* Align Patch */ + if (!write || pkt_len < 32) + pkt_len = (pkt_len + 3) & 0xFFFFFFFC; + else if (pkt_len % blk_size) + pkt_len += blk_size - (pkt_len % blk_size); + + if ((write) && (!fifo)) + err_ret = sdio_memcpy_toio( + sd->func[func], + addr, buf, pkt_len); + else if (write) + err_ret = sdio_memcpy_toio( + sd->func[func], + addr, buf, pkt_len); + else if (fifo) + err_ret = sdio_readsb( + sd->func[func], + buf, addr, pkt_len); + else + err_ret = sdio_memcpy_fromio( + sd->func[func], + buf, addr, pkt_len); + + if (err_ret) + sd_err(("%s: %s FAILED %p[%d], addr=0x%05x, pkt_len=%d, ERR=%d\n", + __FUNCTION__, + (write) ? "TX" : "RX", + pnext, sg_count, addr, pkt_len, err_ret)); + else + sd_trace(("%s: %s xfr'd %p[%d], addr=0x%05x, len=%d\n", + __FUNCTION__, + (write) ? "TX" : "RX", + pnext, sg_count, addr, pkt_len)); + + if (!fifo) + addr += pkt_len; + sg_count ++; + } + sdio_release_host(sd->func[func]); + } else { + sd_err(("%s: set to wrong glom mode %d\n", __FUNCTION__, sd->txglom_mode)); + return SDIOH_API_RC_FAIL; + } + + if (localbuf) + MFREE(sd->osh, localbuf, ttl_len); + + if (sd_msglevel & SDH_COST_VAL) { + getnstimeofday(&now); + sd_cost(("%s: rw=%d, ttl_len=%d, cost=%lds %luus\n", __FUNCTION__, + write, ttl_len, now.tv_sec-before.tv_sec, now.tv_nsec/1000-before.tv_nsec/1000)); + } + + sd_trace(("%s: Exit\n", __FUNCTION__)); + return SDIOH_API_RC_SUCCESS; +} +#endif /* BCMSDIOH_TXGLOM */ + +static SDIOH_API_RC +sdioh_buffer_tofrom_bus(sdioh_info_t *sd, uint fix_inc, uint write, uint func, + uint addr, uint8 *buf, uint len) +{ + bool fifo = (fix_inc == SDIOH_DATA_FIX); + int err_ret = 0; + struct timespec now, before; + + sd_trace(("%s: Enter\n", __FUNCTION__)); + ASSERT(buf); + + if (sd_msglevel & SDH_COST_VAL) + getnstimeofday(&before); + + /* NOTE: + * For all writes, each packet length is aligned to 32 (or 4) + * bytes in dhdsdio_txpkt_preprocess, and for glom the last packet length + * is aligned to block boundary. If you want to align each packet to + * a custom size, please do it in dhdsdio_txpkt_preprocess, NOT here + * + * For reads, the alignment is doen in sdioh_request_buffer. + * + */ + sdio_claim_host(sd->func[func]); + + if ((write) && (!fifo)) + err_ret = sdio_memcpy_toio(sd->func[func], addr, buf, len); + else if (write) + err_ret = sdio_memcpy_toio(sd->func[func], addr, buf, len); + else if (fifo) + err_ret = sdio_readsb(sd->func[func], buf, addr, len); + else + err_ret = sdio_memcpy_fromio(sd->func[func], buf, addr, len); + + sdio_release_host(sd->func[func]); + + if (err_ret) + sd_err(("%s: %s FAILED %p, addr=0x%05x, pkt_len=%d, ERR=%d\n", __FUNCTION__, + (write) ? "TX" : "RX", buf, addr, len, err_ret)); + else + sd_trace(("%s: %s xfr'd %p, addr=0x%05x, len=%d\n", __FUNCTION__, + (write) ? "TX" : "RX", buf, addr, len)); + + sd_trace(("%s: Exit\n", __FUNCTION__)); + + if (sd_msglevel & SDH_COST_VAL) { + getnstimeofday(&now); + sd_cost(("%s: rw=%d, len=%d cost=%lds %luus\n", __FUNCTION__, + write, len, now.tv_sec-before.tv_sec, now.tv_nsec/1000 - before.tv_nsec/1000)); + } + + return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL); +} + +/* + * This function takes a buffer or packet, and fixes everything up so that in the + * end, a DMA-able packet is created. + * + * A buffer does not have an associated packet pointer, and may or may not be aligned. + * A packet may consist of a single packet, or a packet chain. If it is a packet chain, + * then all the packets in the chain must be properly aligned. If the packet data is not + * aligned, then there may only be one packet, and in this case, it is copied to a new + * aligned packet. + * + */ +extern SDIOH_API_RC +sdioh_request_buffer(sdioh_info_t *sd, uint pio_dma, uint fix_inc, uint write, uint func, + uint addr, uint reg_width, uint buf_len, uint8 *buffer, void *pkt) +{ + SDIOH_API_RC status; + void *tmppkt; + struct timespec now, before; + + sd_trace(("%s: Enter\n", __FUNCTION__)); + DHD_PM_RESUME_WAIT(sdioh_request_buffer_wait); + DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL); + + if (sd_msglevel & SDH_COST_VAL) + getnstimeofday(&before); + + if (pkt) { +#ifdef BCMSDIOH_TXGLOM + /* packet chain, only used for tx/rx glom, all packets length + * are aligned, total length is a block multiple + */ + if (PKTNEXT(sd->osh, pkt)) + return sdioh_request_packet_chain(sd, fix_inc, write, func, addr, pkt); +#endif /* BCMSDIOH_TXGLOM */ + /* non-glom mode, ignore the buffer parameter and use the packet pointer + * (this shouldn't happen) + */ + buffer = PKTDATA(sd->osh, pkt); + buf_len = PKTLEN(sd->osh, pkt); + } + + ASSERT(buffer); + + /* buffer and length are aligned, use it directly so we can avoid memory copy */ + if (((ulong)buffer & DMA_ALIGN_MASK) == 0 && (buf_len & DMA_ALIGN_MASK) == 0) + return sdioh_buffer_tofrom_bus(sd, fix_inc, write, func, addr, buffer, buf_len); + + sd_trace(("%s: [%d] doing memory copy buf=%p, len=%d\n", + __FUNCTION__, write, buffer, buf_len)); + + /* otherwise, a memory copy is needed as the input buffer is not aligned */ + tmppkt = PKTGET_STATIC(sd->osh, buf_len + DEFAULT_SDIO_F2_BLKSIZE, write ? TRUE : FALSE); + if (tmppkt == NULL) { + sd_err(("%s: PKTGET failed: len %d\n", __FUNCTION__, buf_len)); + return SDIOH_API_RC_FAIL; + } + + if (write) + bcopy(buffer, PKTDATA(sd->osh, tmppkt), buf_len); + + status = sdioh_buffer_tofrom_bus(sd, fix_inc, write, func, addr, + PKTDATA(sd->osh, tmppkt), ROUNDUP(buf_len, (DMA_ALIGN_MASK+1))); + + if (!write) + bcopy(PKTDATA(sd->osh, tmppkt), buffer, buf_len); + + PKTFREE_STATIC(sd->osh, tmppkt, write ? TRUE : FALSE); + + if (sd_msglevel & SDH_COST_VAL) { + getnstimeofday(&now); + sd_cost(("%s: len=%d cost=%lds %luus\n", __FUNCTION__, + buf_len, now.tv_sec-before.tv_sec, now.tv_nsec/1000 - before.tv_nsec/1000)); + } + + return status; +} + +/* this function performs "abort" for both of host & device */ +extern int +sdioh_abort(sdioh_info_t *sd, uint func) +{ +#if defined(MMC_SDIO_ABORT) + char t_func = (char) func; +#endif /* defined(MMC_SDIO_ABORT) */ + sd_trace(("%s: Enter\n", __FUNCTION__)); + +#if defined(MMC_SDIO_ABORT) + /* issue abort cmd52 command through F1 */ + sdioh_request_byte(sd, SD_IO_OP_WRITE, SDIO_FUNC_0, SDIOD_CCCR_IOABORT, &t_func); +#endif /* defined(MMC_SDIO_ABORT) */ + + sd_trace(("%s: Exit\n", __FUNCTION__)); + return SDIOH_API_RC_SUCCESS; +} + +/* Reset and re-initialize the device */ +int sdioh_sdio_reset(sdioh_info_t *si) +{ + sd_trace(("%s: Enter\n", __FUNCTION__)); + sd_trace(("%s: Exit\n", __FUNCTION__)); + return SDIOH_API_RC_SUCCESS; +} + +/* Disable device interrupt */ +void +sdioh_sdmmc_devintr_off(sdioh_info_t *sd) +{ + sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints)); + sd->intmask &= ~CLIENT_INTR; +} + +/* Enable device interrupt */ +void +sdioh_sdmmc_devintr_on(sdioh_info_t *sd) +{ + sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints)); + sd->intmask |= CLIENT_INTR; +} + +/* Read client card reg */ +int +sdioh_sdmmc_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data) +{ + + if ((func == 0) || (regsize == 1)) { + uint8 temp = 0; + + sdioh_request_byte(sd, SDIOH_READ, func, regaddr, &temp); + *data = temp; + *data &= 0xff; + sd_data(("%s: byte read data=0x%02x\n", + __FUNCTION__, *data)); + } else { + if (sdioh_request_word(sd, 0, SDIOH_READ, func, regaddr, data, regsize)) { + return BCME_SDIO_ERROR; + } + if (regsize == 2) + *data &= 0xffff; + + sd_data(("%s: word read data=0x%08x\n", + __FUNCTION__, *data)); + } + + return SUCCESS; +} + +#if !defined(OOB_INTR_ONLY) +/* bcmsdh_sdmmc interrupt handler */ +static void IRQHandler(struct sdio_func *func) +{ + sdioh_info_t *sd; + + sd = sdio_get_drvdata(func); + + ASSERT(sd != NULL); + sdio_release_host(sd->func[0]); + + if (sd->use_client_ints) { + sd->intrcount++; + ASSERT(sd->intr_handler); + ASSERT(sd->intr_handler_arg); + (sd->intr_handler)(sd->intr_handler_arg); + } else { + sd_err(("bcmsdh_sdmmc: ***IRQHandler\n")); + + sd_err(("%s: Not ready for intr: enabled %d, handler %p\n", + __FUNCTION__, sd->client_intr_enabled, sd->intr_handler)); + } + + sdio_claim_host(sd->func[0]); +} + +/* bcmsdh_sdmmc interrupt handler for F2 (dummy handler) */ +static void IRQHandlerF2(struct sdio_func *func) +{ + sd_trace(("bcmsdh_sdmmc: ***IRQHandlerF2\n")); +} +#endif /* !defined(OOB_INTR_ONLY) */ + +#ifdef NOTUSED +/* Write client card reg */ +static int +sdioh_sdmmc_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 data) +{ + + if ((func == 0) || (regsize == 1)) { + uint8 temp; + + temp = data & 0xff; + sdioh_request_byte(sd, SDIOH_READ, func, regaddr, &temp); + sd_data(("%s: byte write data=0x%02x\n", + __FUNCTION__, data)); + } else { + if (regsize == 2) + data &= 0xffff; + + sdioh_request_word(sd, 0, SDIOH_READ, func, regaddr, &data, regsize); + + sd_data(("%s: word write data=0x%08x\n", + __FUNCTION__, data)); + } + + return SUCCESS; +} +#endif /* NOTUSED */ + +int +sdioh_start(sdioh_info_t *sd, int stage) +{ + int ret; + + if (!sd) { + sd_err(("%s Failed, sd is NULL\n", __FUNCTION__)); + return (0); + } + + /* Need to do this stages as we can't enable the interrupt till + downloading of the firmware is complete, other wise polling + sdio access will come in way + */ + if (sd->func[0]) { + if (stage == 0) { + /* Since the power to the chip is killed, we will have + re enumerate the device again. Set the block size + and enable the fucntion 1 for in preparation for + downloading the code + */ + /* sdio_reset_comm() - has been fixed in latest kernel/msm.git for Linux + 2.6.27. The implementation prior to that is buggy, and needs broadcom's + patch for it + */ + if ((ret = sdio_reset_comm(sd->func[0]->card))) { + sd_err(("%s Failed, error = %d\n", __FUNCTION__, ret)); + return ret; + } + else { + sd->num_funcs = 2; + sd->sd_blockmode = TRUE; + sd->use_client_ints = TRUE; + sd->client_block_size[0] = 64; + + if (sd->func[1]) { + /* Claim host controller */ + sdio_claim_host(sd->func[1]); + + sd->client_block_size[1] = 64; + ret = sdio_set_block_size(sd->func[1], 64); + if (ret) { + sd_err(("bcmsdh_sdmmc: Failed to set F1 " + "blocksize(%d)\n", ret)); + } + + /* Release host controller F1 */ + sdio_release_host(sd->func[1]); + } + + if (sd->func[2]) { + /* Claim host controller F2 */ + sdio_claim_host(sd->func[2]); + + sd->client_block_size[2] = sd_f2_blocksize; + printf("%s: set sd_f2_blocksize %d\n", __FUNCTION__, sd_f2_blocksize); + ret = sdio_set_block_size(sd->func[2], sd_f2_blocksize); + if (ret) { + sd_err(("bcmsdh_sdmmc: Failed to set F2 " + "blocksize to %d(%d)\n", sd_f2_blocksize, ret)); + } + + /* Release host controller F2 */ + sdio_release_host(sd->func[2]); + } + + sdioh_sdmmc_card_enablefuncs(sd); + } + } else { +#if !defined(OOB_INTR_ONLY) + sdio_claim_host(sd->func[0]); + if (sd->func[2]) + sdio_claim_irq(sd->func[2], IRQHandlerF2); + if (sd->func[1]) + sdio_claim_irq(sd->func[1], IRQHandler); + sdio_release_host(sd->func[0]); +#else /* defined(OOB_INTR_ONLY) */ +#if defined(HW_OOB) + sdioh_enable_func_intr(sd); +#endif // endif + bcmsdh_oob_intr_set(sd->bcmsdh, TRUE); +#endif /* !defined(OOB_INTR_ONLY) */ + } + } + else + sd_err(("%s Failed\n", __FUNCTION__)); + + return (0); +} + +int +sdioh_stop(sdioh_info_t *sd) +{ + /* MSM7201A Android sdio stack has bug with interrupt + So internaly within SDIO stack they are polling + which cause issue when device is turned off. So + unregister interrupt with SDIO stack to stop the + polling + */ + if (sd->func[0]) { +#if !defined(OOB_INTR_ONLY) + sdio_claim_host(sd->func[0]); + if (sd->func[1]) + sdio_release_irq(sd->func[1]); + if (sd->func[2]) + sdio_release_irq(sd->func[2]); + sdio_release_host(sd->func[0]); +#else /* defined(OOB_INTR_ONLY) */ +#if defined(HW_OOB) + sdioh_disable_func_intr(sd); +#endif // endif + bcmsdh_oob_intr_set(sd->bcmsdh, FALSE); +#endif /* !defined(OOB_INTR_ONLY) */ + } + else + sd_err(("%s Failed\n", __FUNCTION__)); + return (0); +} + +int +sdioh_waitlockfree(sdioh_info_t *sd) +{ + return (1); +} + +SDIOH_API_RC +sdioh_gpioouten(sdioh_info_t *sd, uint32 gpio) +{ + return SDIOH_API_RC_FAIL; +} + +SDIOH_API_RC +sdioh_gpioout(sdioh_info_t *sd, uint32 gpio, bool enab) +{ + return SDIOH_API_RC_FAIL; +} + +bool +sdioh_gpioin(sdioh_info_t *sd, uint32 gpio) +{ + return FALSE; +} + +SDIOH_API_RC +sdioh_gpio_init(sdioh_info_t *sd) +{ + return SDIOH_API_RC_FAIL; +} + +uint +sdmmc_get_clock_rate(sdioh_info_t *sd) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0)) + return 0; +#else + struct sdio_func *sdio_func = sd->func[0]; + struct mmc_host *host = sdio_func->card->host; + return mmc_host_clk_rate(host); +#endif +} + +void +sdmmc_set_clock_rate(sdioh_info_t *sd, uint hz) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0)) + return; +#else + struct sdio_func *sdio_func = sd->func[0]; + struct mmc_host *host = sdio_func->card->host; + struct mmc_ios *ios = &host->ios; + + mmc_host_clk_hold(host); + DHD_INFO(("%s: Before change: sd clock rate is %u\n", __FUNCTION__, ios->clock)); + if (hz < host->f_min) { + DHD_ERROR(("%s: Intended rate is below min rate, setting to min\n", __FUNCTION__)); + hz = host->f_min; + } + + if (hz > host->f_max) { + DHD_ERROR(("%s: Intended rate exceeds max rate, setting to max\n", __FUNCTION__)); + hz = host->f_max; + } + ios->clock = hz; + host->ops->set_ios(host, ios); + DHD_ERROR(("%s: After change: sd clock rate is %u\n", __FUNCTION__, ios->clock)); + mmc_host_clk_release(host); +#endif +} + +void +sdmmc_set_clock_divisor(sdioh_info_t *sd, uint sd_div) +{ + uint hz; + uint old_div = sdmmc_get_clock_rate(sd); + if (old_div == sd_div) { + return; + } + + hz = sd->sd_clk_rate / sd_div; + sdmmc_set_clock_rate(sd, hz); +} diff --git a/bcmdhd.100.10.315.x/bcmsdh_sdmmc_linux.c b/bcmdhd.100.10.315.x/bcmsdh_sdmmc_linux.c new file mode 100644 index 0000000..4e407f4 --- /dev/null +++ b/bcmdhd.100.10.315.x/bcmsdh_sdmmc_linux.c @@ -0,0 +1,377 @@ +/* + * BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmsdh_sdmmc_linux.c 753315 2018-03-21 04:10:12Z $ + */ + +#include +#include +#include /* SDIO Device and Protocol Specs */ +#include /* bcmsdh to/from specific controller APIs */ +#include /* to get msglevel bit values */ + +#include /* request_irq() */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#if !defined(SDIO_VENDOR_ID_BROADCOM) +#define SDIO_VENDOR_ID_BROADCOM 0x02d0 +#endif /* !defined(SDIO_VENDOR_ID_BROADCOM) */ + +#define SDIO_DEVICE_ID_BROADCOM_DEFAULT 0x0000 + +#if !defined(SDIO_DEVICE_ID_BROADCOM_4362) +#define SDIO_DEVICE_ID_BROADCOM_4362 0x4362 +#endif // endif + +extern void wl_cfg80211_set_parent_dev(void *dev); +extern void sdioh_sdmmc_devintr_off(sdioh_info_t *sd); +extern void sdioh_sdmmc_devintr_on(sdioh_info_t *sd); +extern void* bcmsdh_probe(osl_t *osh, void *dev, void *sdioh, void *adapter_info, uint bus_type, + uint bus_num, uint slot_num); +extern int bcmsdh_remove(bcmsdh_info_t *bcmsdh); + +int sdio_function_init(void); +void sdio_function_cleanup(void); + +#define DESCRIPTION "bcmsdh_sdmmc Driver" +#define AUTHOR "Broadcom Corporation" + +/* module param defaults */ +static int clockoverride = 0; + +module_param(clockoverride, int, 0644); +MODULE_PARM_DESC(clockoverride, "SDIO card clock override"); + +#ifdef GLOBAL_SDMMC_INSTANCE +PBCMSDH_SDMMC_INSTANCE gInstance; +#endif + +/* Maximum number of bcmsdh_sdmmc devices supported by driver */ +#define BCMSDH_SDMMC_MAX_DEVICES 1 + +extern volatile bool dhd_mmc_suspend; + +static int sdioh_probe(struct sdio_func *func) +{ + int host_idx = func->card->host->index; + uint32 rca = func->card->rca; + wifi_adapter_info_t *adapter; + osl_t *osh = NULL; + sdioh_info_t *sdioh = NULL; + + sd_err(("bus num (host idx)=%d, slot num (rca)=%d\n", host_idx, rca)); + adapter = dhd_wifi_platform_get_adapter(SDIO_BUS, host_idx, rca); + if (adapter != NULL) { + sd_err(("found adapter info '%s'\n", adapter->name)); +#ifdef BUS_POWER_RESTORE + adapter->sdio_func = func; +#endif + } else + sd_err(("can't find adapter info for this chip\n")); + +#ifdef WL_CFG80211 + wl_cfg80211_set_parent_dev(&func->dev); +#endif // endif + + /* allocate SDIO Host Controller state info */ + osh = osl_attach(&func->dev, SDIO_BUS, TRUE); + if (osh == NULL) { + sd_err(("%s: osl_attach failed\n", __FUNCTION__)); + goto fail; + } + osl_static_mem_init(osh, adapter); + sdioh = sdioh_attach(osh, func); + if (sdioh == NULL) { + sd_err(("%s: sdioh_attach failed\n", __FUNCTION__)); + goto fail; + } + sdioh->bcmsdh = bcmsdh_probe(osh, &func->dev, sdioh, adapter, SDIO_BUS, host_idx, rca); + if (sdioh->bcmsdh == NULL) { + sd_err(("%s: bcmsdh_probe failed\n", __FUNCTION__)); + goto fail; + } + + sdio_set_drvdata(func, sdioh); + return 0; + +fail: + if (sdioh != NULL) + sdioh_detach(osh, sdioh); + if (osh != NULL) + osl_detach(osh); + return -ENOMEM; +} + +static void sdioh_remove(struct sdio_func *func) +{ + sdioh_info_t *sdioh; + osl_t *osh; + + sdioh = sdio_get_drvdata(func); + if (sdioh == NULL) { + sd_err(("%s: error, no sdioh handler found\n", __FUNCTION__)); + return; + } + sd_err(("%s: Enter\n", __FUNCTION__)); + + osh = sdioh->osh; + bcmsdh_remove(sdioh->bcmsdh); + sdioh_detach(osh, sdioh); + osl_detach(osh); +} + +static int bcmsdh_sdmmc_probe(struct sdio_func *func, + const struct sdio_device_id *id) +{ + int ret = 0; + + if (func == NULL) + return -EINVAL; + + sd_err(("%s: Enter num=%d\n", __FUNCTION__, func->num)); + sd_info(("sdio_bcmsdh: func->class=%x\n", func->class)); + sd_info(("sdio_vendor: 0x%04x\n", func->vendor)); + sd_info(("sdio_device: 0x%04x\n", func->device)); + sd_info(("Function#: 0x%04x\n", func->num)); + +#ifdef GLOBAL_SDMMC_INSTANCE + gInstance->func[func->num] = func; +#endif + + /* 4318 doesn't have function 2 */ + if ((func->num == 2) || (func->num == 1 && func->device == 0x4)) + ret = sdioh_probe(func); + + return ret; +} + +static void bcmsdh_sdmmc_remove(struct sdio_func *func) +{ + if (func == NULL) { + sd_err(("%s is called with NULL SDIO function pointer\n", __FUNCTION__)); + return; + } + + sd_trace(("bcmsdh_sdmmc: %s Enter\n", __FUNCTION__)); + sd_info(("sdio_bcmsdh: func->class=%x\n", func->class)); + sd_info(("sdio_vendor: 0x%04x\n", func->vendor)); + sd_info(("sdio_device: 0x%04x\n", func->device)); + sd_info(("Function#: 0x%04x\n", func->num)); + + if ((func->num == 2) || (func->num == 1 && func->device == 0x4)) + sdioh_remove(func); +} + +/* devices we support, null terminated */ +static const struct sdio_device_id bcmsdh_sdmmc_ids[] = { + { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_DEFAULT) }, + { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4362) }, + { SDIO_DEVICE_CLASS(SDIO_CLASS_NONE) }, + { 0, 0, 0, 0 /* end: all zeroes */ + }, +}; + +MODULE_DEVICE_TABLE(sdio, bcmsdh_sdmmc_ids); + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) && defined(CONFIG_PM) +static int bcmsdh_sdmmc_suspend(struct device *pdev) +{ + int err; + sdioh_info_t *sdioh; + struct sdio_func *func = dev_to_sdio_func(pdev); + mmc_pm_flag_t sdio_flags; + + printf("%s Enter func->num=%d\n", __FUNCTION__, func->num); + if (func->num != 2) + return 0; + + dhd_mmc_suspend = TRUE; + sdioh = sdio_get_drvdata(func); + err = bcmsdh_suspend(sdioh->bcmsdh); + if (err) { + printf("%s bcmsdh_suspend err=%d\n", __FUNCTION__, err); + dhd_mmc_suspend = FALSE; + return err; + } + + sdio_flags = sdio_get_host_pm_caps(func); + if (!(sdio_flags & MMC_PM_KEEP_POWER)) { + sd_err(("%s: can't keep power while host is suspended\n", __FUNCTION__)); + dhd_mmc_suspend = FALSE; + return -EINVAL; + } + + /* keep power while host suspended */ + err = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER); + if (err) { + sd_err(("%s: error while trying to keep power\n", __FUNCTION__)); + dhd_mmc_suspend = FALSE; + return err; + } + smp_mb(); + + printf("%s Exit\n", __FUNCTION__); + return 0; +} + +static int bcmsdh_sdmmc_resume(struct device *pdev) +{ + sdioh_info_t *sdioh; + struct sdio_func *func = dev_to_sdio_func(pdev); + + printf("%s Enter func->num=%d\n", __FUNCTION__, func->num); + if (func->num != 2) + return 0; + + dhd_mmc_suspend = FALSE; + sdioh = sdio_get_drvdata(func); + bcmsdh_resume(sdioh->bcmsdh); + + smp_mb(); + printf("%s Exit\n", __FUNCTION__); + return 0; +} + +static const struct dev_pm_ops bcmsdh_sdmmc_pm_ops = { + .suspend = bcmsdh_sdmmc_suspend, + .resume = bcmsdh_sdmmc_resume, +}; +#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) && defined(CONFIG_PM) */ + +#if defined(BCMLXSDMMC) +static struct semaphore *notify_semaphore = NULL; + +static int dummy_probe(struct sdio_func *func, + const struct sdio_device_id *id) +{ + if (func && (func->num != 2)) { + return 0; + } + + if (notify_semaphore) + up(notify_semaphore); + return 0; +} + +static void dummy_remove(struct sdio_func *func) +{ +} + +static struct sdio_driver dummy_sdmmc_driver = { + .probe = dummy_probe, + .remove = dummy_remove, + .name = "dummy_sdmmc", + .id_table = bcmsdh_sdmmc_ids, + }; + +int sdio_func_reg_notify(void* semaphore) +{ + notify_semaphore = semaphore; + return sdio_register_driver(&dummy_sdmmc_driver); +} + +void sdio_func_unreg_notify(void) +{ + OSL_SLEEP(15); + sdio_unregister_driver(&dummy_sdmmc_driver); +} + +#endif /* defined(BCMLXSDMMC) */ + +static struct sdio_driver bcmsdh_sdmmc_driver = { + .probe = bcmsdh_sdmmc_probe, + .remove = bcmsdh_sdmmc_remove, + .name = "bcmsdh_sdmmc", + .id_table = bcmsdh_sdmmc_ids, +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) && defined(CONFIG_PM) + .drv = { + .pm = &bcmsdh_sdmmc_pm_ops, + }, +#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) && defined(CONFIG_PM) */ + }; + +struct sdos_info { + sdioh_info_t *sd; + spinlock_t lock; +}; + +/* Interrupt enable/disable */ +SDIOH_API_RC +sdioh_interrupt_set(sdioh_info_t *sd, bool enable) +{ + if (!sd) + return BCME_BADARG; + + sd_trace(("%s: %s\n", __FUNCTION__, enable ? "Enabling" : "Disabling")); + return SDIOH_API_RC_SUCCESS; +} + +#ifdef BCMSDH_MODULE +static int __init +bcmsdh_module_init(void) +{ + int error = 0; + error = sdio_function_init(); + return error; +} + +static void __exit +bcmsdh_module_cleanup(void) +{ + sdio_function_cleanup(); +} + +module_init(bcmsdh_module_init); +module_exit(bcmsdh_module_cleanup); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION(DESCRIPTION); +MODULE_AUTHOR(AUTHOR); + +#endif /* BCMSDH_MODULE */ +/* + * module init +*/ +int bcmsdh_register_client_driver(void) +{ + return sdio_register_driver(&bcmsdh_sdmmc_driver); +} + +/* + * module cleanup +*/ +void bcmsdh_unregister_client_driver(void) +{ + sdio_unregister_driver(&bcmsdh_sdmmc_driver); +} diff --git a/bcmdhd.100.10.315.x/bcmsdspi_linux.c b/bcmdhd.100.10.315.x/bcmsdspi_linux.c new file mode 100644 index 0000000..d34dec5 --- /dev/null +++ b/bcmdhd.100.10.315.x/bcmsdspi_linux.c @@ -0,0 +1,437 @@ +/* + * Broadcom SPI Host Controller Driver - Linux Per-port + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmsdspi_linux.c 514727 2014-11-12 03:02:48Z $ + */ + +#include +#include + +#include /* bcmsdh to/from specific controller APIs */ +#include /* to get msglevel bit values */ + +#ifdef BCMSPI_ANDROID +#include +#include +#include +#else +#include +#include /* SDIO Device and Protocol Specs */ +#include /* request_irq(), free_irq() */ +#include +#include +#endif /* BCMSPI_ANDROID */ + +#ifndef BCMSPI_ANDROID +extern uint sd_crc; +module_param(sd_crc, uint, 0); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) +#define KERNEL26 +#endif // endif +#endif /* !BCMSPI_ANDROID */ + +struct sdos_info { + sdioh_info_t *sd; + spinlock_t lock; +#ifndef BCMSPI_ANDROID + wait_queue_head_t intr_wait_queue; +#endif /* !BCMSPI_ANDROID */ +}; + +#ifndef BCMSPI_ANDROID +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) +#define BLOCKABLE() (!in_atomic()) +#else +#define BLOCKABLE() (!in_interrupt()) +#endif // endif + +/* Interrupt handler */ +static irqreturn_t +sdspi_isr(int irq, void *dev_id +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20) +, struct pt_regs *ptregs +#endif // endif +) +{ + sdioh_info_t *sd; + struct sdos_info *sdos; + bool ours; + + sd = (sdioh_info_t *)dev_id; + sd->local_intrcount++; + + if (!sd->card_init_done) { + sd_err(("%s: Hey Bogus intr...not even initted: irq %d\n", __FUNCTION__, irq)); + return IRQ_RETVAL(FALSE); + } else { + ours = spi_check_client_intr(sd, NULL); + + /* For local interrupts, wake the waiting process */ + if (ours && sd->got_hcint) { + sdos = (struct sdos_info *)sd->sdos_info; + wake_up_interruptible(&sdos->intr_wait_queue); + } + + return IRQ_RETVAL(ours); + } +} +#endif /* !BCMSPI_ANDROID */ + +#ifdef BCMSPI_ANDROID +static struct spi_device *gBCMSPI = NULL; + +extern int bcmsdh_probe(struct device *dev); +extern int bcmsdh_remove(struct device *dev); + +static int bcmsdh_spi_probe(struct spi_device *spi_dev) +{ + int ret = 0; + + gBCMSPI = spi_dev; + +#ifdef SPI_PIO_32BIT_RW + spi_dev->bits_per_word = 32; +#else + spi_dev->bits_per_word = 8; +#endif /* SPI_PIO_32BIT_RW */ + ret = spi_setup(spi_dev); + + if (ret) { + sd_err(("bcmsdh_spi_probe: spi_setup fail with %d\n", ret)); + } + sd_err(("bcmsdh_spi_probe: spi_setup with %d, bits_per_word=%d\n", + ret, spi_dev->bits_per_word)); + ret = bcmsdh_probe(&spi_dev->dev); + + return ret; +} + +static int bcmsdh_spi_remove(struct spi_device *spi_dev) +{ + int ret = 0; + + ret = bcmsdh_remove(&spi_dev->dev); + gBCMSPI = NULL; + + return ret; +} + +static struct spi_driver bcmsdh_spi_driver = { + .probe = bcmsdh_spi_probe, + .remove = bcmsdh_spi_remove, + .driver = { + .name = "wlan_spi", + .bus = &spi_bus_type, + .owner = THIS_MODULE, + }, +}; + +/* + * module init +*/ +int bcmsdh_register_client_driver(void) +{ + int error = 0; + sd_trace(("bcmsdh_gspi: %s Enter\n", __FUNCTION__)); + + error = spi_register_driver(&bcmsdh_spi_driver); + + return error; +} + +/* + * module cleanup +*/ +void bcmsdh_unregister_client_driver(void) +{ + sd_trace(("%s Enter\n", __FUNCTION__)); + spi_unregister_driver(&bcmsdh_spi_driver); +} +#endif /* BCMSPI_ANDROID */ + +/* Register with Linux for interrupts */ +int +spi_register_irq(sdioh_info_t *sd, uint irq) +{ +#ifndef BCMSPI_ANDROID + sd_trace(("Entering %s: irq == %d\n", __FUNCTION__, irq)); + if (request_irq(irq, sdspi_isr, IRQF_SHARED, "bcmsdspi", sd) < 0) { + sd_err(("%s: request_irq() failed\n", __FUNCTION__)); + return ERROR; + } +#endif /* !BCMSPI_ANDROID */ + return SUCCESS; +} + +/* Free Linux irq */ +void +spi_free_irq(uint irq, sdioh_info_t *sd) +{ +#ifndef BCMSPI_ANDROID + free_irq(irq, sd); +#endif /* !BCMSPI_ANDROID */ +} + +/* Map Host controller registers */ +#ifndef BCMSPI_ANDROID +uint32 * +spi_reg_map(osl_t *osh, uintptr addr, int size) +{ + return (uint32 *)REG_MAP(addr, size); +} + +void +spi_reg_unmap(osl_t *osh, uintptr addr, int size) +{ + REG_UNMAP((void*)(uintptr)addr); +} +#endif /* !BCMSPI_ANDROID */ + +int +spi_osinit(sdioh_info_t *sd) +{ + struct sdos_info *sdos; + + sdos = (struct sdos_info*)MALLOC(sd->osh, sizeof(struct sdos_info)); + sd->sdos_info = (void*)sdos; + if (sdos == NULL) + return BCME_NOMEM; + + sdos->sd = sd; + spin_lock_init(&sdos->lock); +#ifndef BCMSPI_ANDROID + init_waitqueue_head(&sdos->intr_wait_queue); +#endif /* !BCMSPI_ANDROID */ + return BCME_OK; +} + +void +spi_osfree(sdioh_info_t *sd) +{ + struct sdos_info *sdos; + ASSERT(sd && sd->sdos_info); + + sdos = (struct sdos_info *)sd->sdos_info; + MFREE(sd->osh, sdos, sizeof(struct sdos_info)); +} + +/* Interrupt enable/disable */ +SDIOH_API_RC +sdioh_interrupt_set(sdioh_info_t *sd, bool enable) +{ + ulong flags; + struct sdos_info *sdos; + + sd_trace(("%s: %s\n", __FUNCTION__, enable ? "Enabling" : "Disabling")); + + sdos = (struct sdos_info *)sd->sdos_info; + ASSERT(sdos); + + if (!(sd->host_init_done && sd->card_init_done)) { + sd_err(("%s: Card & Host are not initted - bailing\n", __FUNCTION__)); + return SDIOH_API_RC_FAIL; + } + +#ifndef BCMSPI_ANDROID + if (enable && !(sd->intr_handler && sd->intr_handler_arg)) { + sd_err(("%s: no handler registered, will not enable\n", __FUNCTION__)); + return SDIOH_API_RC_FAIL; + } +#endif /* !BCMSPI_ANDROID */ + + /* Ensure atomicity for enable/disable calls */ + spin_lock_irqsave(&sdos->lock, flags); + + sd->client_intr_enabled = enable; +#ifndef BCMSPI_ANDROID + if (enable && !sd->lockcount) + spi_devintr_on(sd); + else + spi_devintr_off(sd); +#endif /* !BCMSPI_ANDROID */ + + spin_unlock_irqrestore(&sdos->lock, flags); + + return SDIOH_API_RC_SUCCESS; +} + +/* Protect against reentrancy (disable device interrupts while executing) */ +void +spi_lock(sdioh_info_t *sd) +{ + ulong flags; + struct sdos_info *sdos; + + sdos = (struct sdos_info *)sd->sdos_info; + ASSERT(sdos); + + sd_trace(("%s: %d\n", __FUNCTION__, sd->lockcount)); + + spin_lock_irqsave(&sdos->lock, flags); + if (sd->lockcount) { + sd_err(("%s: Already locked!\n", __FUNCTION__)); + ASSERT(sd->lockcount == 0); + } +#ifdef BCMSPI_ANDROID + if (sd->client_intr_enabled) + bcmsdh_oob_intr_set(0); +#else + spi_devintr_off(sd); +#endif /* BCMSPI_ANDROID */ + sd->lockcount++; + spin_unlock_irqrestore(&sdos->lock, flags); +} + +/* Enable client interrupt */ +void +spi_unlock(sdioh_info_t *sd) +{ + ulong flags; + struct sdos_info *sdos; + + sd_trace(("%s: %d, %d\n", __FUNCTION__, sd->lockcount, sd->client_intr_enabled)); + ASSERT(sd->lockcount > 0); + + sdos = (struct sdos_info *)sd->sdos_info; + ASSERT(sdos); + + spin_lock_irqsave(&sdos->lock, flags); + if (--sd->lockcount == 0 && sd->client_intr_enabled) { +#ifdef BCMSPI_ANDROID + bcmsdh_oob_intr_set(1); +#else + spi_devintr_on(sd); +#endif /* BCMSPI_ANDROID */ + } + spin_unlock_irqrestore(&sdos->lock, flags); +} + +#ifndef BCMSPI_ANDROID +void spi_waitbits(sdioh_info_t *sd, bool yield) +{ +#ifndef BCMSDYIELD + ASSERT(!yield); +#endif // endif + sd_trace(("%s: yield %d canblock %d\n", + __FUNCTION__, yield, BLOCKABLE())); + + /* Clear the "interrupt happened" flag and last intrstatus */ + sd->got_hcint = FALSE; + +#ifdef BCMSDYIELD + if (yield && BLOCKABLE()) { + struct sdos_info *sdos; + sdos = (struct sdos_info *)sd->sdos_info; + /* Wait for the indication, the interrupt will be masked when the ISR fires. */ + wait_event_interruptible(sdos->intr_wait_queue, (sd->got_hcint)); + } else +#endif /* BCMSDYIELD */ + { + spi_spinbits(sd); + } + +} +#else /* !BCMSPI_ANDROID */ +int bcmgspi_dump = 0; /* Set to dump complete trace of all SPI bus transactions */ + +static void +hexdump(char *pfx, unsigned char *msg, int msglen) +{ + int i, col; + char buf[80]; + + ASSERT(strlen(pfx) + 49 <= sizeof(buf)); + + col = 0; + + for (i = 0; i < msglen; i++, col++) { + if (col % 16 == 0) + strcpy(buf, pfx); + sprintf(buf + strlen(buf), "%02x", msg[i]); + if ((col + 1) % 16 == 0) + printf("%s\n", buf); + else + sprintf(buf + strlen(buf), " "); + } + + if (col % 16 != 0) + printf("%s\n", buf); +} + +/* Send/Receive an SPI Packet */ +void +spi_sendrecv(sdioh_info_t *sd, uint8 *msg_out, uint8 *msg_in, int msglen) +{ + int write = 0; + int tx_len = 0; + struct spi_message msg; + struct spi_transfer t[2]; + + spi_message_init(&msg); + memset(t, 0, 2*sizeof(struct spi_transfer)); + + if (sd->wordlen == 2) +#if !(defined(SPI_PIO_RW_BIGENDIAN) && defined(SPI_PIO_32BIT_RW)) + write = msg_out[2] & 0x80; +#else + write = msg_out[1] & 0x80; +#endif /* !(defined(SPI_PIO_RW_BIGENDIAN) && defined(SPI_PIO_32BIT_RW)) */ + if (sd->wordlen == 4) +#if !(defined(SPI_PIO_RW_BIGENDIAN) && defined(SPI_PIO_32BIT_RW)) + write = msg_out[0] & 0x80; +#else + write = msg_out[3] & 0x80; +#endif /* !(defined(SPI_PIO_RW_BIGENDIAN) && defined(SPI_PIO_32BIT_RW)) */ + + if (bcmgspi_dump) { + hexdump(" OUT: ", msg_out, msglen); + } + + tx_len = write ? msglen-4 : 4; + + sd_trace(("spi_sendrecv: %s, wordlen %d, cmd : 0x%02x 0x%02x 0x%02x 0x%02x\n", + write ? "WR" : "RD", sd->wordlen, + msg_out[0], msg_out[1], msg_out[2], msg_out[3])); + + t[0].tx_buf = (char *)&msg_out[0]; + t[0].rx_buf = 0; + t[0].len = tx_len; + + spi_message_add_tail(&t[0], &msg); + + t[1].rx_buf = (char *)&msg_in[tx_len]; + t[1].tx_buf = 0; + t[1].len = msglen-tx_len; + + spi_message_add_tail(&t[1], &msg); + spi_sync(gBCMSPI, &msg); + + if (bcmgspi_dump) { + hexdump(" IN : ", msg_in, msglen); + } +} +#endif /* !BCMSPI_ANDROID */ diff --git a/bcmdhd.100.10.315.x/bcmspibrcm.c b/bcmdhd.100.10.315.x/bcmspibrcm.c new file mode 100644 index 0000000..2c720cc --- /dev/null +++ b/bcmdhd.100.10.315.x/bcmspibrcm.c @@ -0,0 +1,1799 @@ +/* + * Broadcom BCMSDH to gSPI Protocol Conversion Layer + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmspibrcm.c 700323 2017-05-18 16:12:11Z $ + */ + +#define HSMODE + +#include + +#include +#include +#include +#include +#include +#include +#include +#include /* SDIO device core hardware definitions. */ +#include + +#include /* bcmsdh to/from specific controller APIs */ +#include /* ioctl/iovars */ +#include /* SDIO Device and Protocol Specs */ + +#include + +#include +#ifdef BCMSPI_ANDROID +extern void spi_sendrecv(sdioh_info_t *sd, uint8 *msg_out, uint8 *msg_in, int msglen); +#else +#include +#endif /* BCMSPI_ANDROID */ + +/* these are for the older cores... for newer cores we have control for each of them */ +#define F0_RESPONSE_DELAY 16 +#define F1_RESPONSE_DELAY 16 +#define F2_RESPONSE_DELAY F0_RESPONSE_DELAY + +#define GSPI_F0_RESP_DELAY 0 +#define GSPI_F1_RESP_DELAY F1_RESPONSE_DELAY +#define GSPI_F2_RESP_DELAY 0 +#define GSPI_F3_RESP_DELAY 0 + +#define CMDLEN 4 + +/* Globals */ +#if defined(DHD_DEBUG) +uint sd_msglevel = SDH_ERROR_VAL; +#else +uint sd_msglevel = 0; +#endif // endif + +uint sd_hiok = FALSE; /* Use hi-speed mode if available? */ +uint sd_sdmode = SDIOH_MODE_SPI; /* Use SD4 mode by default */ +uint sd_f2_blocksize = 64; /* Default blocksize */ + +uint sd_divisor = 2; +uint sd_power = 1; /* Default to SD Slot powered ON */ +uint sd_clock = 1; /* Default to SD Clock turned ON */ +uint sd_crc = 0; /* Default to SPI CRC Check turned OFF */ +uint sd_pci_slot = 0xFFFFffff; /* Used to force selection of a particular PCI slot */ + +uint8 spi_outbuf[SPI_MAX_PKT_LEN]; +uint8 spi_inbuf[SPI_MAX_PKT_LEN]; + +/* 128bytes buffer is enough to clear data-not-available and program response-delay F0 bits + * assuming we will not exceed F0 response delay > 100 bytes at 48MHz. + */ +#define BUF2_PKT_LEN 128 +uint8 spi_outbuf2[BUF2_PKT_LEN]; +uint8 spi_inbuf2[BUF2_PKT_LEN]; +#ifdef BCMSPI_ANDROID +uint *dhd_spi_lockcount = NULL; +#endif /* BCMSPI_ANDROID */ + +#if !(defined(SPI_PIO_RW_BIGENDIAN) && defined(SPI_PIO_32BIT_RW)) +#define SPISWAP_WD4(x) bcmswap32(x); +#define SPISWAP_WD2(x) (bcmswap16(x & 0xffff)) | \ + (bcmswap16((x & 0xffff0000) >> 16) << 16); +#else +#define SPISWAP_WD4(x) x; +#define SPISWAP_WD2(x) bcmswap32by16(x); +#endif // endif + +/* Prototypes */ +static bool bcmspi_test_card(sdioh_info_t *sd); +static bool bcmspi_host_device_init_adapt(sdioh_info_t *sd); +static int bcmspi_set_highspeed_mode(sdioh_info_t *sd, bool hsmode); +static int bcmspi_cmd_issue(sdioh_info_t *sd, bool use_dma, uint32 cmd_arg, + uint32 *data, uint32 datalen); +static int bcmspi_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, + int regsize, uint32 *data); +static int bcmspi_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr, + int regsize, uint32 data); +static int bcmspi_card_bytewrite(sdioh_info_t *sd, int func, uint32 regaddr, + uint8 *data); +static int bcmspi_driver_init(sdioh_info_t *sd); +static int bcmspi_card_buf(sdioh_info_t *sd, int rw, int func, bool fifo, + uint32 addr, int nbytes, uint32 *data); +static int bcmspi_card_regread_fixedaddr(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, + uint32 *data); +static void bcmspi_cmd_getdstatus(sdioh_info_t *sd, uint32 *dstatus_buffer); +static int bcmspi_update_stats(sdioh_info_t *sd, uint32 cmd_arg); + +/* + * Public entry points & extern's + */ +extern sdioh_info_t * +sdioh_attach(osl_t *osh, void *bar0, uint irq) +{ + sdioh_info_t *sd; + + sd_trace(("%s\n", __FUNCTION__)); + if ((sd = (sdioh_info_t *)MALLOC(osh, sizeof(sdioh_info_t))) == NULL) { + sd_err(("%s: out of memory, malloced %d bytes\n", __FUNCTION__, MALLOCED(osh))); + return NULL; + } + bzero((char *)sd, sizeof(sdioh_info_t)); + sd->osh = osh; + if (spi_osinit(sd) != 0) { + sd_err(("%s: spi_osinit() failed\n", __FUNCTION__)); + MFREE(sd->osh, sd, sizeof(sdioh_info_t)); + return NULL; + } + +#ifndef BCMSPI_ANDROID + sd->bar0 = bar0; +#endif /* !BCMSPI_ANDROID */ + sd->irq = irq; +#ifndef BCMSPI_ANDROID + sd->intr_handler = NULL; + sd->intr_handler_arg = NULL; + sd->intr_handler_valid = FALSE; +#endif /* !BCMSPI_ANDROID */ + + /* Set defaults */ + sd->use_client_ints = TRUE; + sd->sd_use_dma = FALSE; /* DMA Not supported */ + + /* Spi device default is 16bit mode, change to 4 when device is changed to 32bit + * mode + */ + sd->wordlen = 2; + +#ifdef BCMSPI_ANDROID + dhd_spi_lockcount = &sd->lockcount; +#endif /* BCMSPI_ANDROID */ + +#ifndef BCMSPI_ANDROID + if (!spi_hw_attach(sd)) { + sd_err(("%s: spi_hw_attach() failed\n", __FUNCTION__)); + spi_osfree(sd); + MFREE(sd->osh, sd, sizeof(sdioh_info_t)); + return (NULL); + } +#endif /* !BCMSPI_ANDROID */ + + if (bcmspi_driver_init(sd) != SUCCESS) { + sd_err(("%s: bcmspi_driver_init() failed()\n", __FUNCTION__)); +#ifndef BCMSPI_ANDROID + spi_hw_detach(sd); +#endif /* !BCMSPI_ANDROID */ + spi_osfree(sd); + MFREE(sd->osh, sd, sizeof(sdioh_info_t)); + return (NULL); + } + + if (spi_register_irq(sd, irq) != SUCCESS) { + sd_err(("%s: spi_register_irq() failed for irq = %d\n", __FUNCTION__, irq)); +#ifndef BCMSPI_ANDROID + spi_hw_detach(sd); +#endif /* !BCMSPI_ANDROID */ + spi_osfree(sd); + MFREE(sd->osh, sd, sizeof(sdioh_info_t)); + return (NULL); + } + + sd_trace(("%s: Done\n", __FUNCTION__)); + + return sd; +} + +extern SDIOH_API_RC +sdioh_detach(osl_t *osh, sdioh_info_t *sd) +{ + sd_trace(("%s\n", __FUNCTION__)); + if (sd) { + sd_err(("%s: detaching from hardware\n", __FUNCTION__)); + spi_free_irq(sd->irq, sd); +#ifndef BCMSPI_ANDROID + spi_hw_detach(sd); +#endif /* !BCMSPI_ANDROID */ + spi_osfree(sd); +#ifdef BCMSPI_ANDROID + dhd_spi_lockcount = NULL; +#endif /* !BCMSPI_ANDROID */ + MFREE(sd->osh, sd, sizeof(sdioh_info_t)); + } + return SDIOH_API_RC_SUCCESS; +} + +/* Configure callback to client when we recieve client interrupt */ +extern SDIOH_API_RC +sdioh_interrupt_register(sdioh_info_t *sd, sdioh_cb_fn_t fn, void *argh) +{ + sd_trace(("%s: Entering\n", __FUNCTION__)); +#if !defined(OOB_INTR_ONLY) + sd->intr_handler = fn; + sd->intr_handler_arg = argh; + sd->intr_handler_valid = TRUE; +#endif /* !defined(OOB_INTR_ONLY) */ + return SDIOH_API_RC_SUCCESS; +} + +extern SDIOH_API_RC +sdioh_interrupt_deregister(sdioh_info_t *sd) +{ + sd_trace(("%s: Entering\n", __FUNCTION__)); +#if !defined(OOB_INTR_ONLY) + sd->intr_handler_valid = FALSE; + sd->intr_handler = NULL; + sd->intr_handler_arg = NULL; +#endif /* !defined(OOB_INTR_ONLY) */ + return SDIOH_API_RC_SUCCESS; +} + +extern SDIOH_API_RC +sdioh_interrupt_query(sdioh_info_t *sd, bool *onoff) +{ +#ifndef BCMSPI_ANDROID + sd_trace(("%s: Entering\n", __FUNCTION__)); + *onoff = sd->client_intr_enabled; +#endif /* !BCMSPI_ANDROID */ + return SDIOH_API_RC_SUCCESS; +} + +#if defined(DHD_DEBUG) +extern bool +sdioh_interrupt_pending(sdioh_info_t *sd) +{ + return 0; +} +#endif // endif + +/* Provide dstatus bits of spi-transaction for dhd layers. */ +extern uint32 +sdioh_get_dstatus(sdioh_info_t *sd) +{ + return sd->card_dstatus; +} + +extern void +sdioh_chipinfo(sdioh_info_t *sd, uint32 chip, uint32 chiprev) +{ + sd->chip = chip; + sd->chiprev = chiprev; +} + +extern void +sdioh_dwordmode(sdioh_info_t *sd, bool set) +{ + uint8 reg = 0; + int status; + + if ((status = sdioh_request_byte(sd, SDIOH_READ, SPI_FUNC_0, SPID_STATUS_ENABLE, ®)) != + SUCCESS) { + sd_err(("%s: Failed to set dwordmode in gSPI\n", __FUNCTION__)); + return; + } + + if (set) { + reg |= DWORD_PKT_LEN_EN; + sd->dwordmode = TRUE; + sd->client_block_size[SPI_FUNC_2] = 4096; /* h2spi's limit is 4KB, we support 8KB */ + } else { + reg &= ~DWORD_PKT_LEN_EN; + sd->dwordmode = FALSE; + sd->client_block_size[SPI_FUNC_2] = 2048; + } + + if ((status = sdioh_request_byte(sd, SDIOH_WRITE, SPI_FUNC_0, SPID_STATUS_ENABLE, ®)) != + SUCCESS) { + sd_err(("%s: Failed to set dwordmode in gSPI\n", __FUNCTION__)); + return; + } +} + +uint +sdioh_query_iofnum(sdioh_info_t *sd) +{ + return sd->num_funcs; +} + +/* IOVar table */ +enum { + IOV_MSGLEVEL = 1, + IOV_BLOCKMODE, + IOV_BLOCKSIZE, + IOV_DMA, + IOV_USEINTS, + IOV_NUMINTS, + IOV_NUMLOCALINTS, + IOV_HOSTREG, + IOV_DEVREG, + IOV_DIVISOR, + IOV_SDMODE, + IOV_HISPEED, + IOV_HCIREGS, + IOV_POWER, + IOV_CLOCK, + IOV_SPIERRSTATS, + IOV_RESP_DELAY_ALL +}; + +const bcm_iovar_t sdioh_iovars[] = { + {"sd_msglevel", IOV_MSGLEVEL, 0, IOVT_UINT32, 0 }, + {"sd_blocksize", IOV_BLOCKSIZE, 0, IOVT_UINT32, 0 }, /* ((fn << 16) | size) */ + {"sd_dma", IOV_DMA, 0, IOVT_BOOL, 0 }, + {"sd_ints", IOV_USEINTS, 0, IOVT_BOOL, 0 }, + {"sd_numints", IOV_NUMINTS, 0, IOVT_UINT32, 0 }, + {"sd_numlocalints", IOV_NUMLOCALINTS, 0, IOVT_UINT32, 0 }, + {"sd_hostreg", IOV_HOSTREG, 0, IOVT_BUFFER, sizeof(sdreg_t) }, + {"sd_devreg", IOV_DEVREG, 0, IOVT_BUFFER, sizeof(sdreg_t) }, + {"sd_divisor", IOV_DIVISOR, 0, IOVT_UINT32, 0 }, + {"sd_power", IOV_POWER, 0, IOVT_UINT32, 0 }, + {"sd_clock", IOV_CLOCK, 0, IOVT_UINT32, 0 }, + {"sd_mode", IOV_SDMODE, 0, IOVT_UINT32, 100}, + {"sd_highspeed", IOV_HISPEED, 0, IOVT_UINT32, 0}, + {"spi_errstats", IOV_SPIERRSTATS, 0, IOVT_BUFFER, sizeof(struct spierrstats_t) }, + {"spi_respdelay", IOV_RESP_DELAY_ALL, 0, IOVT_BOOL, 0 }, + {NULL, 0, 0, 0, 0 } +}; + +int +sdioh_iovar_op(sdioh_info_t *si, const char *name, + void *params, int plen, void *arg, int len, bool set) +{ + const bcm_iovar_t *vi = NULL; + int bcmerror = 0; + int val_size; + int32 int_val = 0; + bool bool_val; + uint32 actionid; +/* + sdioh_regs_t *regs; +*/ + + ASSERT(name); + ASSERT(len >= 0); + + /* Get must have return space; Set does not take qualifiers */ + ASSERT(set || (arg && len)); + ASSERT(!set || (!params && !plen)); + + sd_trace(("%s: Enter (%s %s)\n", __FUNCTION__, (set ? "set" : "get"), name)); + + if ((vi = bcm_iovar_lookup(sdioh_iovars, name)) == NULL) { + bcmerror = BCME_UNSUPPORTED; + goto exit; + } + + if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, set)) != 0) + goto exit; + + /* Set up params so get and set can share the convenience variables */ + if (params == NULL) { + params = arg; + plen = len; + } + + if (vi->type == IOVT_VOID) + val_size = 0; + else if (vi->type == IOVT_BUFFER) + val_size = len; + else + val_size = sizeof(int); + + if (plen >= (int)sizeof(int_val)) + bcopy(params, &int_val, sizeof(int_val)); + + bool_val = (int_val != 0) ? TRUE : FALSE; + + actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid); + switch (actionid) { + case IOV_GVAL(IOV_MSGLEVEL): + int_val = (int32)sd_msglevel; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_MSGLEVEL): + sd_msglevel = int_val; + break; + + case IOV_GVAL(IOV_BLOCKSIZE): + if ((uint32)int_val > si->num_funcs) { + bcmerror = BCME_BADARG; + break; + } + int_val = (int32)si->client_block_size[int_val]; + bcopy(&int_val, arg, val_size); + break; + + case IOV_GVAL(IOV_DMA): + int_val = (int32)si->sd_use_dma; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_DMA): + si->sd_use_dma = (bool)int_val; + break; + + case IOV_GVAL(IOV_USEINTS): + int_val = (int32)si->use_client_ints; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_USEINTS): + break; + + case IOV_GVAL(IOV_DIVISOR): + int_val = (uint32)sd_divisor; + bcopy(&int_val, arg, val_size); + break; + +#ifndef BCMSPI_ANDROID + case IOV_SVAL(IOV_DIVISOR): + sd_divisor = int_val; + if (!spi_start_clock(si, (uint16)sd_divisor)) { + sd_err(("%s: set clock failed\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + } + break; +#endif /* !BCMSPI_ANDROID */ + + case IOV_GVAL(IOV_POWER): + int_val = (uint32)sd_power; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_POWER): + sd_power = int_val; + break; + + case IOV_GVAL(IOV_CLOCK): + int_val = (uint32)sd_clock; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_CLOCK): + sd_clock = int_val; + break; + + case IOV_GVAL(IOV_SDMODE): + int_val = (uint32)sd_sdmode; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_SDMODE): + sd_sdmode = int_val; + break; + + case IOV_GVAL(IOV_HISPEED): + int_val = (uint32)sd_hiok; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_HISPEED): + sd_hiok = int_val; + + if (!bcmspi_set_highspeed_mode(si, (bool)sd_hiok)) { + sd_err(("%s: Failed changing highspeed mode to %d.\n", + __FUNCTION__, sd_hiok)); + bcmerror = BCME_ERROR; + return ERROR; + } + break; + + case IOV_GVAL(IOV_NUMINTS): + int_val = (int32)si->intrcount; + bcopy(&int_val, arg, val_size); + break; + + case IOV_GVAL(IOV_NUMLOCALINTS): + int_val = (int32)si->local_intrcount; + bcopy(&int_val, arg, val_size); + break; + case IOV_GVAL(IOV_DEVREG): + { + sdreg_t *sd_ptr = (sdreg_t *)params; + uint8 data; + + if (sdioh_cfg_read(si, sd_ptr->func, sd_ptr->offset, &data)) { + bcmerror = BCME_SDIO_ERROR; + break; + } + + int_val = (int)data; + bcopy(&int_val, arg, sizeof(int_val)); + break; + } + + case IOV_SVAL(IOV_DEVREG): + { + sdreg_t *sd_ptr = (sdreg_t *)params; + uint8 data = (uint8)sd_ptr->value; + + if (sdioh_cfg_write(si, sd_ptr->func, sd_ptr->offset, &data)) { + bcmerror = BCME_SDIO_ERROR; + break; + } + break; + } + + case IOV_GVAL(IOV_SPIERRSTATS): + { + bcopy(&si->spierrstats, arg, sizeof(struct spierrstats_t)); + break; + } + + case IOV_SVAL(IOV_SPIERRSTATS): + { + bzero(&si->spierrstats, sizeof(struct spierrstats_t)); + break; + } + + case IOV_GVAL(IOV_RESP_DELAY_ALL): + int_val = (int32)si->resp_delay_all; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_RESP_DELAY_ALL): + si->resp_delay_all = (bool)int_val; + int_val = STATUS_ENABLE|INTR_WITH_STATUS; + if (si->resp_delay_all) + int_val |= RESP_DELAY_ALL; + else { + if (bcmspi_card_regwrite(si, SPI_FUNC_0, SPID_RESPONSE_DELAY, 1, + F1_RESPONSE_DELAY) != SUCCESS) { + sd_err(("%s: Unable to set response delay.\n", __FUNCTION__)); + bcmerror = BCME_SDIO_ERROR; + break; + } + } + + if (bcmspi_card_regwrite(si, SPI_FUNC_0, SPID_STATUS_ENABLE, 1, int_val) + != SUCCESS) { + sd_err(("%s: Unable to set response delay.\n", __FUNCTION__)); + bcmerror = BCME_SDIO_ERROR; + break; + } + break; + + default: + bcmerror = BCME_UNSUPPORTED; + break; + } +exit: + + return bcmerror; +} + +extern SDIOH_API_RC +sdioh_cfg_read(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data) +{ + SDIOH_API_RC status; + /* No lock needed since sdioh_request_byte does locking */ + status = sdioh_request_byte(sd, SDIOH_READ, fnc_num, addr, data); + return status; +} + +extern SDIOH_API_RC +sdioh_cfg_write(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data) +{ + /* No lock needed since sdioh_request_byte does locking */ + SDIOH_API_RC status; + + if ((fnc_num == SPI_FUNC_1) && (addr == SBSDIO_FUNC1_FRAMECTRL)) { + uint8 dummy_data; + status = sdioh_cfg_read(sd, fnc_num, addr, &dummy_data); + if (status) { + sd_err(("sdioh_cfg_read() failed.\n")); + return status; + } + } + + status = sdioh_request_byte(sd, SDIOH_WRITE, fnc_num, addr, data); + return status; +} + +extern SDIOH_API_RC +sdioh_cis_read(sdioh_info_t *sd, uint func, uint8 *cisd, uint32 length) +{ + uint32 count; + int offset; + uint32 cis_byte; + uint16 *cis = (uint16 *)cisd; + uint bar0 = SI_ENUM_BASE(sd->sih); + int status; + uint8 data; + + sd_trace(("%s: Func %d\n", __FUNCTION__, func)); + + spi_lock(sd); + + /* Set sb window address to 0x18000000 */ + data = (bar0 >> 8) & SBSDIO_SBADDRLOW_MASK; + status = bcmspi_card_bytewrite(sd, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRLOW, &data); + if (status == SUCCESS) { + data = (bar0 >> 16) & SBSDIO_SBADDRMID_MASK; + status = bcmspi_card_bytewrite(sd, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRMID, &data); + } else { + sd_err(("%s: Unable to set sb-addr-windows\n", __FUNCTION__)); + spi_unlock(sd); + return (BCME_ERROR); + } + if (status == SUCCESS) { + data = (bar0 >> 24) & SBSDIO_SBADDRHIGH_MASK; + status = bcmspi_card_bytewrite(sd, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRHIGH, &data); + } else { + sd_err(("%s: Unable to set sb-addr-windows\n", __FUNCTION__)); + spi_unlock(sd); + return (BCME_ERROR); + } + + offset = CC_SROM_OTP; /* OTP offset in chipcommon. */ + for (count = 0; count < length/2; count++) { + if (bcmspi_card_regread (sd, SDIO_FUNC_1, offset, 2, &cis_byte) < 0) { + sd_err(("%s: regread failed: Can't read CIS\n", __FUNCTION__)); + spi_unlock(sd); + return (BCME_ERROR); + } + + *cis = (uint16)cis_byte; + cis++; + offset += 2; + } + + spi_unlock(sd); + + return (BCME_OK); +} + +extern SDIOH_API_RC +sdioh_request_byte(sdioh_info_t *sd, uint rw, uint func, uint regaddr, uint8 *byte) +{ + int status; + uint32 cmd_arg; + uint32 dstatus; + uint32 data = (uint32)(*byte); + + spi_lock(sd); + + cmd_arg = 0; + cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func); + cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1); /* Incremental access */ + cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr); + cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, rw == SDIOH_READ ? 0 : 1); + cmd_arg = SFIELD(cmd_arg, SPI_LEN, 1); + + if (rw == SDIOH_READ) { + sd_trace(("%s: RD cmd_arg=0x%x func=%d regaddr=0x%x\n", + __FUNCTION__, cmd_arg, func, regaddr)); + } else { + sd_trace(("%s: WR cmd_arg=0x%x func=%d regaddr=0x%x data=0x%x\n", + __FUNCTION__, cmd_arg, func, regaddr, data)); + } + + if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma, cmd_arg, &data, 1)) != SUCCESS) { + spi_unlock(sd); + return status; + } + + if (rw == SDIOH_READ) { + *byte = (uint8)data; + sd_trace(("%s: RD result=0x%x\n", __FUNCTION__, *byte)); + } + + bcmspi_cmd_getdstatus(sd, &dstatus); + if (dstatus) + sd_trace(("dstatus=0x%x\n", dstatus)); + + spi_unlock(sd); + return SDIOH_API_RC_SUCCESS; +} + +extern SDIOH_API_RC +sdioh_request_word(sdioh_info_t *sd, uint cmd_type, uint rw, uint func, uint addr, + uint32 *word, uint nbytes) +{ + int status; + + spi_lock(sd); + + if (rw == SDIOH_READ) + status = bcmspi_card_regread(sd, func, addr, nbytes, word); + else + status = bcmspi_card_regwrite(sd, func, addr, nbytes, *word); + + spi_unlock(sd); + return (status == SUCCESS ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL); +} + +extern SDIOH_API_RC +sdioh_request_buffer(sdioh_info_t *sd, uint pio_dma, uint fix_inc, uint rw, uint func, + uint addr, uint reg_width, uint buflen_u, uint8 *buffer, void *pkt) +{ + int len; + int buflen = (int)buflen_u; + bool fifo = (fix_inc == SDIOH_DATA_FIX); + + spi_lock(sd); + + ASSERT(reg_width == 4); + ASSERT(buflen_u < (1 << 30)); + ASSERT(sd->client_block_size[func]); + + sd_data(("%s: %c len %d r_cnt %d t_cnt %d, pkt @0x%p\n", + __FUNCTION__, rw == SDIOH_READ ? 'R' : 'W', + buflen_u, sd->r_cnt, sd->t_cnt, pkt)); + + /* Break buffer down into blocksize chunks. */ + while (buflen > 0) { + len = MIN(sd->client_block_size[func], buflen); + if (bcmspi_card_buf(sd, rw, func, fifo, addr, len, (uint32 *)buffer) != SUCCESS) { + sd_err(("%s: bcmspi_card_buf %s failed\n", + __FUNCTION__, rw == SDIOH_READ ? "Read" : "Write")); + spi_unlock(sd); + return SDIOH_API_RC_FAIL; + } + buffer += len; + buflen -= len; + if (!fifo) + addr += len; + } + spi_unlock(sd); + return SDIOH_API_RC_SUCCESS; +} + +/* This function allows write to gspi bus when another rd/wr function is deep down the call stack. + * Its main aim is to have simpler spi writes rather than recursive writes. + * e.g. When there is a need to program response delay on the fly after detecting the SPI-func + * this call will allow to program the response delay. + */ +static int +bcmspi_card_byterewrite(sdioh_info_t *sd, int func, uint32 regaddr, uint8 byte) +{ + uint32 cmd_arg; + uint32 datalen = 1; + uint32 hostlen; + + cmd_arg = 0; + + cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, 1); + cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1); /* Incremental access */ + cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func); + cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr); + cmd_arg = SFIELD(cmd_arg, SPI_LEN, datalen); + + sd_trace(("%s cmd_arg = 0x%x\n", __FUNCTION__, cmd_arg)); + + /* Set up and issue the SPI command. MSByte goes out on bus first. Increase datalen + * according to the wordlen mode(16/32bit) the device is in. + */ + ASSERT(sd->wordlen == 4 || sd->wordlen == 2); + datalen = ROUNDUP(datalen, sd->wordlen); + + /* Start by copying command in the spi-outbuffer */ + if (sd->wordlen == 4) { /* 32bit spid */ + *(uint32 *)spi_outbuf2 = SPISWAP_WD4(cmd_arg); + if (datalen & 0x3) + datalen += (4 - (datalen & 0x3)); + } else if (sd->wordlen == 2) { /* 16bit spid */ + *(uint32 *)spi_outbuf2 = SPISWAP_WD2(cmd_arg); + if (datalen & 0x1) + datalen++; + } else { + sd_err(("%s: Host is %d bit spid, could not create SPI command.\n", + __FUNCTION__, 8 * sd->wordlen)); + return ERROR; + } + + /* for Write, put the data into the output buffer */ + if (datalen != 0) { + if (sd->wordlen == 4) { /* 32bit spid */ + *(uint32 *)&spi_outbuf2[CMDLEN] = SPISWAP_WD4(byte); + } else if (sd->wordlen == 2) { /* 16bit spid */ + *(uint32 *)&spi_outbuf2[CMDLEN] = SPISWAP_WD2(byte); + } + } + + /* +4 for cmd, +4 for dstatus */ + hostlen = datalen + 8; + hostlen += (4 - (hostlen & 0x3)); + spi_sendrecv(sd, spi_outbuf2, spi_inbuf2, hostlen); + + /* Last 4bytes are dstatus. Device is configured to return status bits. */ + if (sd->wordlen == 4) { /* 32bit spid */ + sd->card_dstatus = SPISWAP_WD4(*(uint32 *)&spi_inbuf2[datalen + CMDLEN ]); + } else if (sd->wordlen == 2) { /* 16bit spid */ + sd->card_dstatus = SPISWAP_WD2(*(uint32 *)&spi_inbuf2[datalen + CMDLEN ]); + } else { + sd_err(("%s: Host is %d bit machine, could not read SPI dstatus.\n", + __FUNCTION__, 8 * sd->wordlen)); + return ERROR; + } + + if (sd->card_dstatus) + sd_trace(("dstatus after byte rewrite = 0x%x\n", sd->card_dstatus)); + + return (BCME_OK); +} + +/* Program the response delay corresponding to the spi function */ +static int +bcmspi_prog_resp_delay(sdioh_info_t *sd, int func, uint8 resp_delay) +{ + if (sd->resp_delay_all == FALSE) + return (BCME_OK); + + if (sd->prev_fun == func) + return (BCME_OK); + + if (F0_RESPONSE_DELAY == F1_RESPONSE_DELAY) + return (BCME_OK); + + bcmspi_card_byterewrite(sd, SPI_FUNC_0, SPID_RESPONSE_DELAY, resp_delay); + + /* Remember function for which to avoid reprogramming resp-delay in next iteration */ + sd->prev_fun = func; + + return (BCME_OK); + +} + +#define GSPI_RESYNC_PATTERN 0x0 + +/* A resync pattern is a 32bit MOSI line with all zeros. Its a special command in gSPI. + * It resets the spi-bkplane logic so that all F1 related ping-pong buffer logic is + * synchronised and all queued resuests are cancelled. + */ +static int +bcmspi_resync_f1(sdioh_info_t *sd) +{ + uint32 cmd_arg = GSPI_RESYNC_PATTERN, data = 0, datalen = 0; + + /* Set up and issue the SPI command. MSByte goes out on bus first. Increase datalen + * according to the wordlen mode(16/32bit) the device is in. + */ + ASSERT(sd->wordlen == 4 || sd->wordlen == 2); + datalen = ROUNDUP(datalen, sd->wordlen); + + /* Start by copying command in the spi-outbuffer */ + *(uint32 *)spi_outbuf2 = cmd_arg; + + /* for Write, put the data into the output buffer */ + *(uint32 *)&spi_outbuf2[CMDLEN] = data; + + /* +4 for cmd, +4 for dstatus */ + spi_sendrecv(sd, spi_outbuf2, spi_inbuf2, datalen + 8); + + /* Last 4bytes are dstatus. Device is configured to return status bits. */ + if (sd->wordlen == 4) { /* 32bit spid */ + sd->card_dstatus = SPISWAP_WD4(*(uint32 *)&spi_inbuf2[datalen + CMDLEN ]); + } else if (sd->wordlen == 2) { /* 16bit spid */ + sd->card_dstatus = SPISWAP_WD2(*(uint32 *)&spi_inbuf2[datalen + CMDLEN ]); + } else { + sd_err(("%s: Host is %d bit machine, could not read SPI dstatus.\n", + __FUNCTION__, 8 * sd->wordlen)); + return ERROR; + } + + if (sd->card_dstatus) + sd_trace(("dstatus after resync pattern write = 0x%x\n", sd->card_dstatus)); + + return (BCME_OK); +} + +uint32 dstatus_count = 0; + +static int +bcmspi_update_stats(sdioh_info_t *sd, uint32 cmd_arg) +{ + uint32 dstatus = sd->card_dstatus; + struct spierrstats_t *spierrstats = &sd->spierrstats; + int err = SUCCESS; + + sd_trace(("cmd = 0x%x, dstatus = 0x%x\n", cmd_arg, dstatus)); + + /* Store dstatus of last few gSPI transactions */ + spierrstats->dstatus[dstatus_count % NUM_PREV_TRANSACTIONS] = dstatus; + spierrstats->spicmd[dstatus_count % NUM_PREV_TRANSACTIONS] = cmd_arg; + dstatus_count++; + + if (sd->card_init_done == FALSE) + return err; + + if (dstatus & STATUS_DATA_NOT_AVAILABLE) { + spierrstats->dna++; + sd_trace(("Read data not available on F1 addr = 0x%x\n", + GFIELD(cmd_arg, SPI_REG_ADDR))); + /* Clear dna bit */ + bcmspi_card_byterewrite(sd, SPI_FUNC_0, SPID_INTR_REG, DATA_UNAVAILABLE); + } + + if (dstatus & STATUS_UNDERFLOW) { + spierrstats->rdunderflow++; + sd_err(("FIFO underflow happened due to current F2 read command.\n")); + } + + if (dstatus & STATUS_OVERFLOW) { + spierrstats->wroverflow++; + sd_err(("FIFO overflow happened due to current (F1/F2) write command.\n")); + bcmspi_card_byterewrite(sd, SPI_FUNC_0, SPID_INTR_REG, F1_OVERFLOW); + bcmspi_resync_f1(sd); + sd_err(("Recovering from F1 FIFO overflow.\n")); + } + + if (dstatus & STATUS_F2_INTR) { + spierrstats->f2interrupt++; + sd_trace(("Interrupt from F2. SW should clear corresponding IntStatus bits\n")); + } + + if (dstatus & STATUS_F3_INTR) { + spierrstats->f3interrupt++; + sd_err(("Interrupt from F3. SW should clear corresponding IntStatus bits\n")); + } + + if (dstatus & STATUS_HOST_CMD_DATA_ERR) { + spierrstats->hostcmddataerr++; + sd_err(("Error in CMD or Host data, detected by CRC/Checksum (optional)\n")); + } + + if (dstatus & STATUS_F2_PKT_AVAILABLE) { + spierrstats->f2pktavailable++; + sd_trace(("Packet is available/ready in F2 TX FIFO\n")); + sd_trace(("Packet length = %d\n", sd->dwordmode ? + ((dstatus & STATUS_F2_PKT_LEN_MASK) >> (STATUS_F2_PKT_LEN_SHIFT - 2)) : + ((dstatus & STATUS_F2_PKT_LEN_MASK) >> STATUS_F2_PKT_LEN_SHIFT))); + } + + if (dstatus & STATUS_F3_PKT_AVAILABLE) { + spierrstats->f3pktavailable++; + sd_err(("Packet is available/ready in F3 TX FIFO\n")); + sd_err(("Packet length = %d\n", + (dstatus & STATUS_F3_PKT_LEN_MASK) >> STATUS_F3_PKT_LEN_SHIFT)); + } + + return err; +} + +extern int +sdioh_abort(sdioh_info_t *sd, uint func) +{ + return 0; +} + +int +sdioh_start(sdioh_info_t *sd, int stage) +{ + return SUCCESS; +} + +int +sdioh_stop(sdioh_info_t *sd) +{ + return SUCCESS; +} + +int +sdioh_waitlockfree(sdioh_info_t *sd) +{ + return SUCCESS; +} + +/* + * Private/Static work routines + */ +static int +bcmspi_host_init(sdioh_info_t *sd) +{ + + /* Default power on mode */ + sd->sd_mode = SDIOH_MODE_SPI; + sd->polled_mode = TRUE; + sd->host_init_done = TRUE; + sd->card_init_done = FALSE; + sd->adapter_slot = 1; + + return (SUCCESS); +} + +static int +get_client_blocksize(sdioh_info_t *sd) +{ + uint32 regdata[2]; + int status; + + /* Find F1/F2/F3 max packet size */ + if ((status = bcmspi_card_regread(sd, 0, SPID_F1_INFO_REG, + 8, regdata)) != SUCCESS) { + return status; + } + + sd_trace(("pkt_size regdata[0] = 0x%x, regdata[1] = 0x%x\n", + regdata[0], regdata[1])); + + sd->client_block_size[1] = (regdata[0] & F1_MAX_PKT_SIZE) >> 2; + sd_trace(("Func1 blocksize = %d\n", sd->client_block_size[1])); + ASSERT(sd->client_block_size[1] == BLOCK_SIZE_F1); + + sd->client_block_size[2] = ((regdata[0] >> 16) & F2_MAX_PKT_SIZE) >> 2; + sd_trace(("Func2 blocksize = %d\n", sd->client_block_size[2])); + ASSERT(sd->client_block_size[2] == BLOCK_SIZE_F2); + + sd->client_block_size[3] = (regdata[1] & F3_MAX_PKT_SIZE) >> 2; + sd_trace(("Func3 blocksize = %d\n", sd->client_block_size[3])); + ASSERT(sd->client_block_size[3] == BLOCK_SIZE_F3); + + return 0; +} + +static int +bcmspi_client_init(sdioh_info_t *sd) +{ + uint32 status_en_reg = 0; + sd_trace(("%s: Powering up slot %d\n", __FUNCTION__, sd->adapter_slot)); + +#ifndef BCMSPI_ANDROID +#ifdef HSMODE + if (!spi_start_clock(sd, (uint16)sd_divisor)) { + sd_err(("spi_start_clock failed\n")); + return ERROR; + } +#else + /* Start at ~400KHz clock rate for initialization */ + if (!spi_start_clock(sd, 128)) { + sd_err(("spi_start_clock failed\n")); + return ERROR; + } +#endif /* HSMODE */ +#endif /* !BCMSPI_ANDROID */ + + if (!bcmspi_host_device_init_adapt(sd)) { + sd_err(("bcmspi_host_device_init_adapt failed\n")); + return ERROR; + } + + if (!bcmspi_test_card(sd)) { + sd_err(("bcmspi_test_card failed\n")); + return ERROR; + } + + sd->num_funcs = SPI_MAX_IOFUNCS; + + get_client_blocksize(sd); + + /* Apply resync pattern cmd with all zeros to reset spi-bkplane F1 logic */ + bcmspi_resync_f1(sd); + + sd->dwordmode = FALSE; + + bcmspi_card_regread(sd, 0, SPID_STATUS_ENABLE, 1, &status_en_reg); + + sd_trace(("%s: Enabling interrupt with dstatus \n", __FUNCTION__)); + status_en_reg |= INTR_WITH_STATUS; + + if (bcmspi_card_regwrite(sd, SPI_FUNC_0, SPID_STATUS_ENABLE, 1, + status_en_reg & 0xff) != SUCCESS) { + sd_err(("%s: Unable to set response delay for all fun's.\n", __FUNCTION__)); + return ERROR; + } + +#ifndef HSMODE +#ifndef BCMSPI_ANDROID + /* After configuring for High-Speed mode, set the desired clock rate. */ + if (!spi_start_clock(sd, 4)) { + sd_err(("spi_start_clock failed\n")); + return ERROR; + } +#endif /* !BCMSPI_ANDROID */ +#endif /* HSMODE */ + + /* check to see if the response delay needs to be programmed properly */ + { + uint32 f1_respdelay = 0; + bcmspi_card_regread(sd, 0, SPID_RESP_DELAY_F1, 1, &f1_respdelay); + if ((f1_respdelay == 0) || (f1_respdelay == 0xFF)) { + /* older sdiodevice core and has no separte resp delay for each of */ + sd_err(("older corerev < 4 so use the same resp delay for all funcs\n")); + sd->resp_delay_new = FALSE; + } + else { + /* older sdiodevice core and has no separte resp delay for each of */ + int ret_val; + sd->resp_delay_new = TRUE; + sd_err(("new corerev >= 4 so set the resp delay for each of the funcs\n")); + sd_trace(("resp delay for funcs f0(%d), f1(%d), f2(%d), f3(%d)\n", + GSPI_F0_RESP_DELAY, GSPI_F1_RESP_DELAY, + GSPI_F2_RESP_DELAY, GSPI_F3_RESP_DELAY)); + ret_val = bcmspi_card_regwrite(sd, SPI_FUNC_0, SPID_RESP_DELAY_F0, 1, + GSPI_F0_RESP_DELAY); + if (ret_val != SUCCESS) { + sd_err(("%s: Unable to set response delay for F0\n", __FUNCTION__)); + return ERROR; + } + ret_val = bcmspi_card_regwrite(sd, SPI_FUNC_0, SPID_RESP_DELAY_F1, 1, + GSPI_F1_RESP_DELAY); + if (ret_val != SUCCESS) { + sd_err(("%s: Unable to set response delay for F1\n", __FUNCTION__)); + return ERROR; + } + ret_val = bcmspi_card_regwrite(sd, SPI_FUNC_0, SPID_RESP_DELAY_F2, 1, + GSPI_F2_RESP_DELAY); + if (ret_val != SUCCESS) { + sd_err(("%s: Unable to set response delay for F2\n", __FUNCTION__)); + return ERROR; + } + ret_val = bcmspi_card_regwrite(sd, SPI_FUNC_0, SPID_RESP_DELAY_F3, 1, + GSPI_F3_RESP_DELAY); + if (ret_val != SUCCESS) { + sd_err(("%s: Unable to set response delay for F2\n", __FUNCTION__)); + return ERROR; + } + } + } + + sd->card_init_done = TRUE; + + /* get the device rev to program the prop respdelays */ + + return SUCCESS; +} + +static int +bcmspi_set_highspeed_mode(sdioh_info_t *sd, bool hsmode) +{ + uint32 regdata; + int status; + + if ((status = bcmspi_card_regread(sd, 0, SPID_CONFIG, + 4, ®data)) != SUCCESS) + return status; + + sd_trace(("In %s spih-ctrl = 0x%x \n", __FUNCTION__, regdata)); + + if (hsmode == TRUE) { + sd_trace(("Attempting to enable High-Speed mode.\n")); + + if (regdata & HIGH_SPEED_MODE) { + sd_trace(("Device is already in High-Speed mode.\n")); + return status; + } else { + regdata |= HIGH_SPEED_MODE; + sd_trace(("Writing %08x to device at %08x\n", regdata, SPID_CONFIG)); + if ((status = bcmspi_card_regwrite(sd, 0, SPID_CONFIG, + 4, regdata)) != SUCCESS) { + return status; + } + } + } else { + sd_trace(("Attempting to disable High-Speed mode.\n")); + + if (regdata & HIGH_SPEED_MODE) { + regdata &= ~HIGH_SPEED_MODE; + sd_trace(("Writing %08x to device at %08x\n", regdata, SPID_CONFIG)); + if ((status = bcmspi_card_regwrite(sd, 0, SPID_CONFIG, + 4, regdata)) != SUCCESS) + return status; + } + else { + sd_trace(("Device is already in Low-Speed mode.\n")); + return status; + } + } +#ifndef BCMSPI_ANDROID + spi_controller_highspeed_mode(sd, hsmode); +#endif /* !BCMSPI_ANDROID */ + + return TRUE; +} + +#define bcmspi_find_curr_mode(sd) { \ + sd->wordlen = 2; \ + status = bcmspi_card_regread_fixedaddr(sd, 0, SPID_TEST_READ, 4, ®data); \ + regdata &= 0xff; \ + if ((regdata == 0xad) || (regdata == 0x5b) || \ + (regdata == 0x5d) || (regdata == 0x5a)) \ + break; \ + sd->wordlen = 4; \ + status = bcmspi_card_regread_fixedaddr(sd, 0, SPID_TEST_READ, 4, ®data); \ + regdata &= 0xff; \ + if ((regdata == 0xad) || (regdata == 0x5b) || \ + (regdata == 0x5d) || (regdata == 0x5a)) \ + break; \ + sd_trace(("Silicon testability issue: regdata = 0x%x." \ + " Expected 0xad, 0x5a, 0x5b or 0x5d.\n", regdata)); \ + OSL_DELAY(100000); \ +} + +#define INIT_ADAPT_LOOP 100 + +/* Adapt clock-phase-speed-bitwidth between host and device */ +static bool +bcmspi_host_device_init_adapt(sdioh_info_t *sd) +{ + uint32 wrregdata, regdata = 0; + int status; + int i; + + /* Due to a silicon testability issue, the first command from the Host + * to the device will get corrupted (first bit will be lost). So the + * Host should poll the device with a safe read request. ie: The Host + * should try to read F0 addr 0x14 using the Fixed address mode + * (This will prevent a unintended write command to be detected by device) + */ + for (i = 0; i < INIT_ADAPT_LOOP; i++) { + /* If device was not power-cycled it will stay in 32bit mode with + * response-delay-all bit set. Alternate the iteration so that + * read either with or without response-delay for F0 to succeed. + */ + bcmspi_find_curr_mode(sd); + sd->resp_delay_all = (i & 0x1) ? TRUE : FALSE; + + bcmspi_find_curr_mode(sd); + sd->dwordmode = TRUE; + + bcmspi_find_curr_mode(sd); + sd->dwordmode = FALSE; + } + + /* Bail out, device not detected */ + if (i == INIT_ADAPT_LOOP) + return FALSE; + + /* Softreset the spid logic */ + if ((sd->dwordmode) || (sd->wordlen == 4)) { + bcmspi_card_regwrite(sd, 0, SPID_RESET_BP, 1, RESET_ON_WLAN_BP_RESET|RESET_SPI); + bcmspi_card_regread(sd, 0, SPID_RESET_BP, 1, ®data); + sd_trace(("reset reg read = 0x%x\n", regdata)); + sd_trace(("dwordmode = %d, wordlen = %d, resp_delay_all = %d\n", sd->dwordmode, + sd->wordlen, sd->resp_delay_all)); + /* Restore default state after softreset */ + sd->wordlen = 2; + sd->dwordmode = FALSE; + } + + if (sd->wordlen == 4) { + if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_READ, 4, ®data)) != + SUCCESS) + return FALSE; + if (regdata == TEST_RO_DATA_32BIT_LE) { + sd_trace(("Spid is already in 32bit LE mode. Value read = 0x%x\n", + regdata)); + sd_trace(("Spid power was left on.\n")); + } else { + sd_err(("Spid power was left on but signature read failed." + " Value read = 0x%x\n", regdata)); + return FALSE; + } + } else { + sd->wordlen = 2; + +#define CTRL_REG_DEFAULT 0x00010430 /* according to the host m/c */ + + wrregdata = (CTRL_REG_DEFAULT); + + if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_READ, 4, ®data)) != SUCCESS) + return FALSE; + sd_trace(("(we are still in 16bit mode) 32bit READ LE regdata = 0x%x\n", regdata)); + +#ifndef HSMODE + wrregdata |= (CLOCK_PHASE | CLOCK_POLARITY); + wrregdata &= ~HIGH_SPEED_MODE; + bcmspi_card_regwrite(sd, 0, SPID_CONFIG, 4, wrregdata); +#endif /* HSMODE */ + + for (i = 0; i < INIT_ADAPT_LOOP; i++) { + if ((regdata == 0xfdda7d5b) || (regdata == 0xfdda7d5a)) { + sd_trace(("0xfeedbead was leftshifted by 1-bit.\n")); + if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_READ, 4, + ®data)) != SUCCESS) + return FALSE; + } + OSL_DELAY(1000); + } + +#if defined(CHANGE_SPI_INTR_POLARITY_ACTIVE_HIGH) + /* Change to host controller intr-polarity of active-high */ + wrregdata |= INTR_POLARITY; +#else + /* Change to host controller intr-polarity of active-low */ + wrregdata &= ~INTR_POLARITY; +#endif /* CHANGE_SPI_INTR_POLARITY_ACTIVE_HIGH */ + + sd_trace(("(we are still in 16bit mode) 32bit Write LE reg-ctrl-data = 0x%x\n", + wrregdata)); + /* Change to 32bit mode */ + wrregdata |= WORD_LENGTH_32; + bcmspi_card_regwrite(sd, 0, SPID_CONFIG, 4, wrregdata); + + /* Change command/data packaging in 32bit LE mode */ + sd->wordlen = 4; + + if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_READ, 4, ®data)) != SUCCESS) + return FALSE; + + if (regdata == TEST_RO_DATA_32BIT_LE) { + sd_trace(("Read spid passed. Value read = 0x%x\n", regdata)); + sd_trace(("Spid had power-on cycle OR spi was soft-resetted \n")); + } else { + sd_err(("Stale spid reg values read as it was kept powered. Value read =" + "0x%x\n", regdata)); + return FALSE; + } + } + + return TRUE; +} + +static bool +bcmspi_test_card(sdioh_info_t *sd) +{ + uint32 regdata; + int status; + + if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_READ, 4, ®data)) != SUCCESS) + return FALSE; + + if (regdata == (TEST_RO_DATA_32BIT_LE)) + sd_trace(("32bit LE regdata = 0x%x\n", regdata)); + else { + sd_trace(("Incorrect 32bit LE regdata = 0x%x\n", regdata)); + return FALSE; + } + +#define RW_PATTERN1 0xA0A1A2A3 +#define RW_PATTERN2 0x4B5B6B7B + + regdata = RW_PATTERN1; + if ((status = bcmspi_card_regwrite(sd, 0, SPID_TEST_RW, 4, regdata)) != SUCCESS) + return FALSE; + regdata = 0; + if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_RW, 4, ®data)) != SUCCESS) + return FALSE; + if (regdata != RW_PATTERN1) { + sd_err(("Write-Read spid failed. Value wrote = 0x%x, Value read = 0x%x\n", + RW_PATTERN1, regdata)); + return FALSE; + } else + sd_trace(("R/W spid passed. Value read = 0x%x\n", regdata)); + + regdata = RW_PATTERN2; + if ((status = bcmspi_card_regwrite(sd, 0, SPID_TEST_RW, 4, regdata)) != SUCCESS) + return FALSE; + regdata = 0; + if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_RW, 4, ®data)) != SUCCESS) + return FALSE; + if (regdata != RW_PATTERN2) { + sd_err(("Write-Read spid failed. Value wrote = 0x%x, Value read = 0x%x\n", + RW_PATTERN2, regdata)); + return FALSE; + } else + sd_trace(("R/W spid passed. Value read = 0x%x\n", regdata)); + + return TRUE; +} + +static int +bcmspi_driver_init(sdioh_info_t *sd) +{ + sd_trace(("%s\n", __FUNCTION__)); + if ((bcmspi_host_init(sd)) != SUCCESS) { + return ERROR; + } + + if (bcmspi_client_init(sd) != SUCCESS) { + return ERROR; + } + + return SUCCESS; +} + +/* Read device reg */ +static int +bcmspi_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data) +{ + int status; + uint32 cmd_arg, dstatus; + + ASSERT(regsize); + + if (func == 2) + sd_trace(("Reg access on F2 will generate error indication in dstatus bits.\n")); + + cmd_arg = 0; + cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, 0); + cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1); /* Incremental access */ + cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func); + cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr); + cmd_arg = SFIELD(cmd_arg, SPI_LEN, regsize == BLOCK_SIZE_F2 ? 0 : regsize); + + sd_trace(("%s: RD cmd_arg=0x%x func=%d regaddr=0x%x regsize=%d\n", + __FUNCTION__, cmd_arg, func, regaddr, regsize)); + + if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma, cmd_arg, data, regsize)) != SUCCESS) + return status; + + bcmspi_cmd_getdstatus(sd, &dstatus); + if (dstatus) + sd_trace(("dstatus =0x%x\n", dstatus)); + + return SUCCESS; +} + +static int +bcmspi_card_regread_fixedaddr(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data) +{ + + int status; + uint32 cmd_arg; + uint32 dstatus; + + ASSERT(regsize); + + if (func == 2) + sd_trace(("Reg access on F2 will generate error indication in dstatus bits.\n")); + + cmd_arg = 0; + cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, 0); + cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 0); /* Fixed access */ + cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func); + cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr); + cmd_arg = SFIELD(cmd_arg, SPI_LEN, regsize); + + sd_trace(("%s: RD cmd_arg=0x%x func=%d regaddr=0x%x regsize=%d\n", + __FUNCTION__, cmd_arg, func, regaddr, regsize)); + + if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma, cmd_arg, data, regsize)) != SUCCESS) + return status; + + sd_trace(("%s: RD result=0x%x\n", __FUNCTION__, *data)); + + bcmspi_cmd_getdstatus(sd, &dstatus); + sd_trace(("dstatus =0x%x\n", dstatus)); + return SUCCESS; +} + +/* write a device register */ +static int +bcmspi_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 data) +{ + int status; + uint32 cmd_arg, dstatus; + + ASSERT(regsize); + + cmd_arg = 0; + + cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, 1); + cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1); /* Incremental access */ + cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func); + cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr); + cmd_arg = SFIELD(cmd_arg, SPI_LEN, regsize == BLOCK_SIZE_F2 ? 0 : regsize); + + sd_trace(("%s: WR cmd_arg=0x%x func=%d regaddr=0x%x regsize=%d data=0x%x\n", + __FUNCTION__, cmd_arg, func, regaddr, regsize, data)); + + if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma, cmd_arg, &data, regsize)) != SUCCESS) + return status; + + bcmspi_cmd_getdstatus(sd, &dstatus); + if (dstatus) + sd_trace(("dstatus=0x%x\n", dstatus)); + + return SUCCESS; +} + +/* write a device register - 1 byte */ +static int +bcmspi_card_bytewrite(sdioh_info_t *sd, int func, uint32 regaddr, uint8 *byte) +{ + int status; + uint32 cmd_arg; + uint32 dstatus; + uint32 data = (uint32)(*byte); + + cmd_arg = 0; + cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func); + cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1); /* Incremental access */ + cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr); + cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, 1); + cmd_arg = SFIELD(cmd_arg, SPI_LEN, 1); + + sd_trace(("%s: WR cmd_arg=0x%x func=%d regaddr=0x%x data=0x%x\n", + __FUNCTION__, cmd_arg, func, regaddr, data)); + + if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma, cmd_arg, &data, 1)) != SUCCESS) + return status; + + bcmspi_cmd_getdstatus(sd, &dstatus); + if (dstatus) + sd_trace(("dstatus =0x%x\n", dstatus)); + + return SUCCESS; +} + +void +bcmspi_cmd_getdstatus(sdioh_info_t *sd, uint32 *dstatus_buffer) +{ + *dstatus_buffer = sd->card_dstatus; +} + +/* 'data' is of type uint32 whereas other buffers are of type uint8 */ +static int +bcmspi_cmd_issue(sdioh_info_t *sd, bool use_dma, uint32 cmd_arg, + uint32 *data, uint32 datalen) +{ + uint32 i, j; + uint8 resp_delay = 0; + int err = SUCCESS; + uint32 hostlen; + uint32 spilen = 0; + uint32 dstatus_idx = 0; + uint16 templen, buslen, len, *ptr = NULL; + + sd_trace(("spi cmd = 0x%x\n", cmd_arg)); + + /* Set up and issue the SPI command. MSByte goes out on bus first. Increase datalen + * according to the wordlen mode(16/32bit) the device is in. + */ + if (sd->wordlen == 4) { /* 32bit spid */ + *(uint32 *)spi_outbuf = SPISWAP_WD4(cmd_arg); + if (datalen & 0x3) + datalen += (4 - (datalen & 0x3)); + } else if (sd->wordlen == 2) { /* 16bit spid */ + *(uint32 *)spi_outbuf = SPISWAP_WD2(cmd_arg); + if (datalen & 0x1) + datalen++; + if (datalen < 4) + datalen = ROUNDUP(datalen, 4); + } else { + sd_err(("Host is %d bit spid, could not create SPI command.\n", + 8 * sd->wordlen)); + return ERROR; + } + + /* for Write, put the data into the output buffer */ + if (GFIELD(cmd_arg, SPI_RW_FLAG) == 1) { + /* We send len field of hw-header always a mod16 size, both from host and dongle */ + if (datalen != 0) { + for (i = 0; i < datalen/4; i++) { + if (sd->wordlen == 4) { /* 32bit spid */ + *(uint32 *)&spi_outbuf[i * 4 + CMDLEN] = + SPISWAP_WD4(data[i]); + } else if (sd->wordlen == 2) { /* 16bit spid */ + *(uint32 *)&spi_outbuf[i * 4 + CMDLEN] = + SPISWAP_WD2(data[i]); + } + } + } + } + + /* Append resp-delay number of bytes and clock them out for F0/1/2 reads. */ + if ((GFIELD(cmd_arg, SPI_RW_FLAG) == 0)) { + int func = GFIELD(cmd_arg, SPI_FUNCTION); + switch (func) { + case 0: + if (sd->resp_delay_new) + resp_delay = GSPI_F0_RESP_DELAY; + else + resp_delay = sd->resp_delay_all ? F0_RESPONSE_DELAY : 0; + break; + case 1: + if (sd->resp_delay_new) + resp_delay = GSPI_F1_RESP_DELAY; + else + resp_delay = F1_RESPONSE_DELAY; + break; + case 2: + if (sd->resp_delay_new) + resp_delay = GSPI_F2_RESP_DELAY; + else + resp_delay = sd->resp_delay_all ? F2_RESPONSE_DELAY : 0; + break; + default: + ASSERT(0); + break; + } + /* Program response delay */ + if (sd->resp_delay_new == FALSE) + bcmspi_prog_resp_delay(sd, func, resp_delay); + } + + /* +4 for cmd and +4 for dstatus */ + hostlen = datalen + 8 + resp_delay; + hostlen += dstatus_idx; +#ifdef BCMSPI_ANDROID + if (hostlen%4) { + sd_err(("Unaligned data len %d, hostlen %d\n", + datalen, hostlen)); +#endif /* BCMSPI_ANDROID */ + hostlen += (4 - (hostlen & 0x3)); +#ifdef BCMSPI_ANDROID + } +#endif /* BCMSPI_ANDROID */ + spi_sendrecv(sd, spi_outbuf, spi_inbuf, hostlen); + + /* for Read, get the data into the input buffer */ + if (datalen != 0) { + if (GFIELD(cmd_arg, SPI_RW_FLAG) == 0) { /* if read cmd */ + for (j = 0; j < datalen/4; j++) { + if (sd->wordlen == 4) { /* 32bit spid */ + data[j] = SPISWAP_WD4(*(uint32 *)&spi_inbuf[j * 4 + + CMDLEN + resp_delay]); + } else if (sd->wordlen == 2) { /* 16bit spid */ + data[j] = SPISWAP_WD2(*(uint32 *)&spi_inbuf[j * 4 + + CMDLEN + resp_delay]); + } + } + } + } + + dstatus_idx += (datalen + CMDLEN + resp_delay); + /* Last 4bytes are dstatus. Device is configured to return status bits. */ + if (sd->wordlen == 4) { /* 32bit spid */ + sd->card_dstatus = SPISWAP_WD4(*(uint32 *)&spi_inbuf[dstatus_idx]); + } else if (sd->wordlen == 2) { /* 16bit spid */ + sd->card_dstatus = SPISWAP_WD2(*(uint32 *)&spi_inbuf[dstatus_idx]); + } else { + sd_err(("Host is %d bit machine, could not read SPI dstatus.\n", + 8 * sd->wordlen)); + return ERROR; + } + if (sd->card_dstatus == 0xffffffff) { + sd_err(("looks like not a GSPI device or device is not powered.\n")); + } + + err = bcmspi_update_stats(sd, cmd_arg); + + return err; + +} + +static int +bcmspi_card_buf(sdioh_info_t *sd, int rw, int func, bool fifo, + uint32 addr, int nbytes, uint32 *data) +{ + int status; + uint32 cmd_arg; + bool write = rw == SDIOH_READ ? 0 : 1; + uint retries = 0; + + bool enable; + uint32 spilen; + + cmd_arg = 0; + + ASSERT(nbytes); + ASSERT(nbytes <= sd->client_block_size[func]); + + if (write) sd->t_cnt++; else sd->r_cnt++; + + if (func == 2) { + /* Frame len check limited by gSPI. */ + if ((nbytes > 2000) && write) { + sd_trace((">2KB write: F2 wr of %d bytes\n", nbytes)); + } + /* ASSERT(nbytes <= 2048); Fix bigger len gspi issue and uncomment. */ + /* If F2 fifo on device is not ready to receive data, don't do F2 transfer */ + if (write) { + uint32 dstatus; + /* check F2 ready with cached one */ + bcmspi_cmd_getdstatus(sd, &dstatus); + if ((dstatus & STATUS_F2_RX_READY) == 0) { + retries = WAIT_F2RXFIFORDY; + enable = 0; + while (retries-- && !enable) { + OSL_DELAY(WAIT_F2RXFIFORDY_DELAY * 1000); + bcmspi_card_regread(sd, SPI_FUNC_0, SPID_STATUS_REG, 4, + &dstatus); + if (dstatus & STATUS_F2_RX_READY) + enable = TRUE; + } + if (!enable) { + struct spierrstats_t *spierrstats = &sd->spierrstats; + spierrstats->f2rxnotready++; + sd_err(("F2 FIFO is not ready to receive data.\n")); + return ERROR; + } + sd_trace(("No of retries on F2 ready %d\n", + (WAIT_F2RXFIFORDY - retries))); + } + } + } + + /* F2 transfers happen on 0 addr */ + addr = (func == 2) ? 0 : addr; + + /* In pio mode buffer is read using fixed address fifo in func 1 */ + if ((func == 1) && (fifo)) + cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 0); + else + cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1); + + cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func); + cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, addr); + cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, write); + spilen = sd->data_xfer_count = MIN(sd->client_block_size[func], nbytes); + if ((sd->dwordmode == TRUE) && (GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_2)) { + /* convert len to mod4 size */ + spilen = spilen + ((spilen & 0x3) ? (4 - (spilen & 0x3)): 0); + cmd_arg = SFIELD(cmd_arg, SPI_LEN, (spilen >> 2)); + } else + cmd_arg = SFIELD(cmd_arg, SPI_LEN, spilen); + + if ((func == 2) && (fifo == 1)) { + sd_data(("%s: %s func %d, %s, addr 0x%x, len %d bytes, r_cnt %d t_cnt %d\n", + __FUNCTION__, write ? "Wr" : "Rd", func, "INCR", + addr, nbytes, sd->r_cnt, sd->t_cnt)); + } + + sd_trace(("%s cmd_arg = 0x%x\n", __FUNCTION__, cmd_arg)); + sd_data(("%s: %s func %d, %s, addr 0x%x, len %d bytes, r_cnt %d t_cnt %d\n", + __FUNCTION__, write ? "Wd" : "Rd", func, "INCR", + addr, nbytes, sd->r_cnt, sd->t_cnt)); + + if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma, cmd_arg, data, nbytes)) != SUCCESS) { + sd_err(("%s: cmd_issue failed for %s\n", __FUNCTION__, + (write ? "write" : "read"))); + return status; + } + + /* gSPI expects that hw-header-len is equal to spi-command-len */ + if ((func == 2) && (rw == SDIOH_WRITE) && (sd->dwordmode == FALSE)) { + ASSERT((uint16)sd->data_xfer_count == (uint16)(*data & 0xffff)); + ASSERT((uint16)sd->data_xfer_count == (uint16)(~((*data & 0xffff0000) >> 16))); + } + + if ((nbytes > 2000) && !write) { + sd_trace((">2KB read: F2 rd of %d bytes\n", nbytes)); + } + + return SUCCESS; +} + +/* Reset and re-initialize the device */ +int +sdioh_sdio_reset(sdioh_info_t *si) +{ + si->card_init_done = FALSE; + return bcmspi_client_init(si); +} + +SDIOH_API_RC +sdioh_gpioouten(sdioh_info_t *sd, uint32 gpio) +{ + return SDIOH_API_RC_FAIL; +} + +SDIOH_API_RC +sdioh_gpioout(sdioh_info_t *sd, uint32 gpio, bool enab) +{ + return SDIOH_API_RC_FAIL; +} + +bool +sdioh_gpioin(sdioh_info_t *sd, uint32 gpio) +{ + return FALSE; +} + +SDIOH_API_RC +sdioh_gpio_init(sdioh_info_t *sd) +{ + return SDIOH_API_RC_FAIL; +} diff --git a/bcmdhd.100.10.315.x/bcmstdlib_s.c b/bcmdhd.100.10.315.x/bcmstdlib_s.c new file mode 100644 index 0000000..46ab0b9 --- /dev/null +++ b/bcmdhd.100.10.315.x/bcmstdlib_s.c @@ -0,0 +1,306 @@ +/* + * Broadcom Secure Standard Library. + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id $ + */ + +#include +#include +#include +#ifdef BCMDRIVER +#include +#else /* BCMDRIVER */ +#include +#include +#endif /* else BCMDRIVER */ + +#include +#include + +/* + * __SIZE_MAX__ value is depending on platform: + * Firmware Dongle: RAMSIZE (Dongle Specific Limit). + * LINUX NIC/Windows/MACOSX/Application: OS Native or + * 0xFFFFFFFFu if not defined. + */ +#ifndef SIZE_MAX +#ifndef __SIZE_MAX__ +#define __SIZE_MAX__ 0xFFFFFFFFu +#endif /* __SIZE_MAX__ */ +#define SIZE_MAX __SIZE_MAX__ +#endif /* SIZE_MAX */ +#define RSIZE_MAX (SIZE_MAX >> 1u) + +#if !defined(__STDC_WANT_SECURE_LIB__) && !(defined(__STDC_LIB_EXT1__) && \ + defined(__STDC_WANT_LIB_EXT1__)) +/* + * memmove_s - secure memmove + * dest : pointer to the object to copy to + * destsz : size of the destination buffer + * src : pointer to the object to copy from + * n : number of bytes to copy + * Return Value : zero on success and non-zero on error + * Also on error, if dest is not a null pointer and destsz not greater + * than RSIZE_MAX, writes destsz zero bytes into the dest object. + */ +int +memmove_s(void *dest, size_t destsz, const void *src, size_t n) +{ + int err = BCME_OK; + + if ((!dest) || (((char *)dest + destsz) < (char *)dest)) { + err = BCME_BADARG; + goto exit; + } + + if (destsz > RSIZE_MAX) { + err = BCME_BADLEN; + goto exit; + } + + if (destsz < n) { + memset(dest, 0, destsz); + err = BCME_BADLEN; + goto exit; + } + + if ((!src) || (((const char *)src + n) < (const char *)src)) { + memset(dest, 0, destsz); + err = BCME_BADARG; + goto exit; + } + + memmove(dest, src, n); +exit: + return err; +} + +/* + * memcpy_s - secure memcpy + * dest : pointer to the object to copy to + * destsz : size of the destination buffer + * src : pointer to the object to copy from + * n : number of bytes to copy + * Return Value : zero on success and non-zero on error + * Also on error, if dest is not a null pointer and destsz not greater + * than RSIZE_MAX, writes destsz zero bytes into the dest object. + */ +int +memcpy_s(void *dest, size_t destsz, const void *src, size_t n) +{ + int err = BCME_OK; + char *d = dest; + const char *s = src; + + if ((!d) || ((d + destsz) < d)) { + err = BCME_BADARG; + goto exit; + } + + if (destsz > RSIZE_MAX) { + err = BCME_BADLEN; + goto exit; + } + + if (destsz < n) { + memset(dest, 0, destsz); + err = BCME_BADLEN; + goto exit; + } + + if ((!s) || ((s + n) < s)) { + memset(dest, 0, destsz); + err = BCME_BADARG; + goto exit; + } + + /* overlap checking between dest and src */ + if (!(((d + destsz) <= s) || (d >= (s + n)))) { + memset(dest, 0, destsz); + err = BCME_BADARG; + goto exit; + } + + (void)memcpy(dest, src, n); +exit: + return err; +} + +/* + * memset_s - secure memset + * dest : pointer to the object to be set + * destsz : size of the destination buffer + * c : byte value + * n : number of bytes to be set + * Return Value : zero on success and non-zero on error + * Also on error, if dest is not a null pointer and destsz not greater + * than RSIZE_MAX, writes destsz bytes with value c into the dest object. + */ +int +memset_s(void *dest, size_t destsz, int c, size_t n) +{ + int err = BCME_OK; + if ((!dest) || (((char *)dest + destsz) < (char *)dest)) { + err = BCME_BADARG; + goto exit; + } + + if (destsz > RSIZE_MAX) { + err = BCME_BADLEN; + goto exit; + } + + if (destsz < n) { + (void)memset(dest, c, destsz); + err = BCME_BADLEN; + goto exit; + } + + (void)memset(dest, c, n); +exit: + return err; +} +#endif /* !__STDC_WANT_SECURE_LIB__ && !(__STDC_LIB_EXT1__ && __STDC_WANT_LIB_EXT1__) */ + +#if !defined(FREEBSD) && !defined(BCM_USE_PLATFORM_STRLCPY) +/** + * strlcpy - Copy a %NUL terminated string into a sized buffer + * @dest: Where to copy the string to + * @src: Where to copy the string from + * @size: size of destination buffer 0 if input parameters are NOK + * return: string leng of src (assume src is NUL terminated) + * + * Compatible with *BSD: the result is always a valid + * NUL-terminated string that fits in the buffer (unless, + * of course, the buffer size is zero). It does not pad + * out the result like strncpy() does. + */ +size_t strlcpy(char *dest, const char *src, size_t size) +{ + const char *s = src; + size_t n; + + if (dest == NULL) { + return 0; + } + + /* terminate dest if src is NULL and return 0 as only NULL was added */ + if (s == NULL) { + *dest = '\0'; + return 0; + } + + /* allows us to handle size 0 */ + if (size == 0) { + n = 0; + } else { + n = size - 1u; + } + + /* perform copy */ + while (*s && n != 0) { + *dest++ = *s++; + n--; + } + + *dest = '\0'; + + /* count to end of s or compensate for NULL */ + if (n == 0) { + while (*s++) + ; + } else { + s++; + } + + /* return bytes copied not accounting NUL */ + return (s - src - 1u); +} +#endif // endif + +/** + * strlcat_s - Concatenate a %NUL terminated string with a sized buffer + * @dest: Where to concatenate the string to + * @src: Where to copy the string from + * @size: size of destination buffer + * return: string length of created string (i.e. the initial length of dest plus the length of src) + * not including the NUL char, up until size + * + * Unlike strncat(), strlcat() take the full size of the buffer (not just the number of bytes to + * copy) and guarantee to NUL-terminate the result (even when there's nothing to concat). + * If the length of dest string concatinated with the src string >= size, truncation occurs. + * + * Compatible with *BSD: the result is always a valid NUL-terminated string that fits in the buffer + * (unless, of course, the buffer size is zero). + * + * If either src or dest is not NUL-terminated, dest[size-1] will be set to NUL. + * If size < strlen(dest) + strlen(src), dest[size-1] will be set to NUL. + * If size == 0, dest[0] will be set to NUL. + */ +size_t +strlcat_s(char *dest, const char *src, size_t size) +{ + char *d = dest; + const char *s = src; /* point to the start of the src string */ + size_t n = size; + size_t dlen; + size_t bytes_to_copy = 0; + + if (dest == NULL) { + return 0; + } + + /* set d to point to the end of dest string (up to size) */ + while (n != 0 && *d != '\0') { + d++; + n--; + } + dlen = (size_t)(d - dest); + + if (s != NULL) { + size_t slen = 0; + + /* calculate src len in case it's not null-terminated */ + n = size; + while (n-- != 0 && *(s + slen) != '\0') { + ++slen; + } + + n = size - dlen; /* maximum num of chars to copy */ + if (n != 0) { + /* copy relevant chars (until end of src buf or given size is reached) */ + bytes_to_copy = MIN(slen - (size_t)(s - src), n - 1); + (void)memcpy(d, s, bytes_to_copy); + d += bytes_to_copy; + } + } + if (n == 0 && dlen != 0) { + --d; /* nothing to copy, but NUL-terminate dest anyway */ + } + *d = '\0'; /* NUL-terminate dest */ + + return (dlen + bytes_to_copy); +} diff --git a/bcmdhd.100.10.315.x/bcmutils.c b/bcmdhd.100.10.315.x/bcmutils.c new file mode 100644 index 0000000..59fbfd8 --- /dev/null +++ b/bcmdhd.100.10.315.x/bcmutils.c @@ -0,0 +1,4232 @@ +/* + * Driver O/S-independent utility routines + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmutils.c 760883 2018-05-03 22:57:07Z $ + */ + +#include +#include +#include +#include +#ifdef BCMDRIVER +#include +#include + +#else /* !BCMDRIVER */ + +#include +#include +#include +#include + +#if defined(BCMEXTSUP) +#include +#endif // endif + +#ifndef ASSERT +#define ASSERT(exp) +#endif // endif + +#endif /* !BCMDRIVER */ + +#ifdef WL_UNITTEST +#ifdef ASSERT +#undef ASSERT +#endif /* ASSERT */ +#define ASSERT(exp) +#endif /* WL_UNITTEST */ + +#include +#include +#include +#include +#include +#include +#include <802.1d.h> +#include <802.11.h> +#include +#include +#include + +#ifdef BCMDRIVER + +/* return total length of buffer chain */ +uint BCMFASTPATH +pkttotlen(osl_t *osh, void *p) +{ + uint total; + int len; + + total = 0; + for (; p; p = PKTNEXT(osh, p)) { + len = PKTLEN(osh, p); + total += len; +#ifdef BCMLFRAG + if (BCMLFRAG_ENAB()) { + if (PKTISFRAG(osh, p)) { + total += PKTFRAGTOTLEN(osh, p); + } + } +#endif // endif + } + + return (total); +} + +/* return the last buffer of chained pkt */ +void * +pktlast(osl_t *osh, void *p) +{ + for (; PKTNEXT(osh, p); p = PKTNEXT(osh, p)) + ; + + return (p); +} + +/* count segments of a chained packet */ +uint BCMFASTPATH +pktsegcnt(osl_t *osh, void *p) +{ + uint cnt; + + for (cnt = 0; p; p = PKTNEXT(osh, p)) { + cnt++; +#ifdef BCMLFRAG + if (BCMLFRAG_ENAB()) { + if (PKTISFRAG(osh, p)) { + cnt += PKTFRAGTOTNUM(osh, p); + } + } +#endif // endif + } + + return cnt; +} + +/* copy a pkt buffer chain into a buffer */ +uint +pktcopy(osl_t *osh, void *p, uint offset, int len, uchar *buf) +{ + uint n, ret = 0; + + if (len < 0) + len = 4096; /* "infinite" */ + + /* skip 'offset' bytes */ + for (; p && offset; p = PKTNEXT(osh, p)) { + if (offset < (uint)PKTLEN(osh, p)) + break; + offset -= PKTLEN(osh, p); + } + + if (!p) + return 0; + + /* copy the data */ + for (; p && len; p = PKTNEXT(osh, p)) { + n = MIN((uint)PKTLEN(osh, p) - offset, (uint)len); + bcopy(PKTDATA(osh, p) + offset, buf, n); + buf += n; + len -= n; + ret += n; + offset = 0; + } + + return ret; +} + +/* copy a buffer into a pkt buffer chain */ +uint +pktfrombuf(osl_t *osh, void *p, uint offset, int len, uchar *buf) +{ + uint n, ret = 0; + + /* skip 'offset' bytes */ + for (; p && offset; p = PKTNEXT(osh, p)) { + if (offset < (uint)PKTLEN(osh, p)) + break; + offset -= PKTLEN(osh, p); + } + + if (!p) + return 0; + + /* copy the data */ + for (; p && len; p = PKTNEXT(osh, p)) { + n = MIN((uint)PKTLEN(osh, p) - offset, (uint)len); + bcopy(buf, PKTDATA(osh, p) + offset, n); + buf += n; + len -= n; + ret += n; + offset = 0; + } + + return ret; +} + +uint8 * BCMFASTPATH +pktdataoffset(osl_t *osh, void *p, uint offset) +{ + uint total = pkttotlen(osh, p); + uint pkt_off = 0, len = 0; + uint8 *pdata = (uint8 *) PKTDATA(osh, p); + + if (offset > total) + return NULL; + + for (; p; p = PKTNEXT(osh, p)) { + pdata = (uint8 *) PKTDATA(osh, p); + pkt_off = offset - len; + len += PKTLEN(osh, p); + if (len > offset) + break; + } + return (uint8*) (pdata+pkt_off); +} + +/* given a offset in pdata, find the pkt seg hdr */ +void * +pktoffset(osl_t *osh, void *p, uint offset) +{ + uint total = pkttotlen(osh, p); + uint len = 0; + + if (offset > total) + return NULL; + + for (; p; p = PKTNEXT(osh, p)) { + len += PKTLEN(osh, p); + if (len > offset) + break; + } + return p; +} + +void +bcm_mdelay(uint ms) +{ + uint i; + + for (i = 0; i < ms; i++) { + OSL_DELAY(1000); + } +} + +#if defined(DHD_DEBUG) +/* pretty hex print a pkt buffer chain */ +void +prpkt(const char *msg, osl_t *osh, void *p0) +{ + void *p; + + if (msg && (msg[0] != '\0')) + printf("%s:\n", msg); + + for (p = p0; p; p = PKTNEXT(osh, p)) + prhex(NULL, PKTDATA(osh, p), PKTLEN(osh, p)); +} +#endif // endif + +/* Takes an Ethernet frame and sets out-of-bound PKTPRIO. + * Also updates the inplace vlan tag if requested. + * For debugging, it returns an indication of what it did. + */ +uint BCMFASTPATH +pktsetprio(void *pkt, bool update_vtag) +{ + struct ether_header *eh; + struct ethervlan_header *evh; + uint8 *pktdata; + int priority = 0; + int rc = 0; + + pktdata = (uint8 *)PKTDATA(OSH_NULL, pkt); + ASSERT(ISALIGNED((uintptr)pktdata, sizeof(uint16))); + + eh = (struct ether_header *) pktdata; + + if (eh->ether_type == hton16(ETHER_TYPE_8021Q)) { + uint16 vlan_tag; + int vlan_prio, dscp_prio = 0; + + evh = (struct ethervlan_header *)eh; + + vlan_tag = ntoh16(evh->vlan_tag); + vlan_prio = (int) (vlan_tag >> VLAN_PRI_SHIFT) & VLAN_PRI_MASK; + + if ((evh->ether_type == hton16(ETHER_TYPE_IP)) || + (evh->ether_type == hton16(ETHER_TYPE_IPV6))) { + uint8 *ip_body = pktdata + sizeof(struct ethervlan_header); + uint8 tos_tc = IP_TOS46(ip_body); + dscp_prio = (int)(tos_tc >> IPV4_TOS_PREC_SHIFT); + } + + /* DSCP priority gets precedence over 802.1P (vlan tag) */ + if (dscp_prio != 0) { + priority = dscp_prio; + rc |= PKTPRIO_VDSCP; + } else { + priority = vlan_prio; + rc |= PKTPRIO_VLAN; + } + /* + * If the DSCP priority is not the same as the VLAN priority, + * then overwrite the priority field in the vlan tag, with the + * DSCP priority value. This is required for Linux APs because + * the VLAN driver on Linux, overwrites the skb->priority field + * with the priority value in the vlan tag + */ + if (update_vtag && (priority != vlan_prio)) { + vlan_tag &= ~(VLAN_PRI_MASK << VLAN_PRI_SHIFT); + vlan_tag |= (uint16)priority << VLAN_PRI_SHIFT; + evh->vlan_tag = hton16(vlan_tag); + rc |= PKTPRIO_UPD; + } +#if defined(EAPOL_PKT_PRIO) || defined(DHD_LOSSLESS_ROAMING) + } else if (eh->ether_type == hton16(ETHER_TYPE_802_1X)) { + priority = PRIO_8021D_NC; + rc = PKTPRIO_DSCP; +#endif /* EAPOL_PKT_PRIO || DHD_LOSSLESS_ROAMING */ + } else if ((eh->ether_type == hton16(ETHER_TYPE_IP)) || + (eh->ether_type == hton16(ETHER_TYPE_IPV6))) { + uint8 *ip_body = pktdata + sizeof(struct ether_header); + uint8 tos_tc = IP_TOS46(ip_body); + uint8 dscp = tos_tc >> IPV4_TOS_DSCP_SHIFT; + switch (dscp) { + case DSCP_EF: + priority = PRIO_8021D_VO; + break; + case DSCP_AF31: + case DSCP_AF32: + case DSCP_AF33: + priority = PRIO_8021D_CL; + break; + case DSCP_AF21: + case DSCP_AF22: + case DSCP_AF23: + case DSCP_AF11: + case DSCP_AF12: + case DSCP_AF13: + priority = PRIO_8021D_EE; + break; + default: + priority = (int)(tos_tc >> IPV4_TOS_PREC_SHIFT); + break; + } + + rc |= PKTPRIO_DSCP; + } + + ASSERT(priority >= 0 && priority <= MAXPRIO); + PKTSETPRIO(pkt, priority); + return (rc | priority); +} + +/* lookup user priority for specified DSCP */ +static uint8 +dscp2up(uint8 *up_table, uint8 dscp) +{ + uint8 user_priority = 255; + + /* lookup up from table if parameters valid */ + if (up_table != NULL && dscp < UP_TABLE_MAX) { + user_priority = up_table[dscp]; + } + + /* 255 is unused value so return up from dscp */ + if (user_priority == 255) { + user_priority = dscp >> (IPV4_TOS_PREC_SHIFT - IPV4_TOS_DSCP_SHIFT); + } + + return user_priority; +} + +/* set user priority by QoS Map Set table (UP table), table size is UP_TABLE_MAX */ +uint BCMFASTPATH +pktsetprio_qms(void *pkt, uint8* up_table, bool update_vtag) +{ + if (up_table) { + uint8 *pktdata; + uint pktlen; + uint8 dscp; + uint user_priority = 0; + uint rc = 0; + + pktdata = (uint8 *)PKTDATA(OSH_NULL, pkt); + pktlen = PKTLEN(OSH_NULL, pkt); + + if (pktgetdscp(pktdata, pktlen, &dscp)) { + rc = PKTPRIO_DSCP; + user_priority = dscp2up(up_table, dscp); + PKTSETPRIO(pkt, user_priority); + } + + return (rc | user_priority); + } else { + return pktsetprio(pkt, update_vtag); + } +} + +/* Returns TRUE and DSCP if IP header found, FALSE otherwise. + */ +bool BCMFASTPATH +pktgetdscp(uint8 *pktdata, uint pktlen, uint8 *dscp) +{ + struct ether_header *eh; + struct ethervlan_header *evh; + uint8 *ip_body; + bool rc = FALSE; + + /* minimum length is ether header and IP header */ + if (pktlen < sizeof(struct ether_header) + IPV4_MIN_HEADER_LEN) + return FALSE; + + eh = (struct ether_header *) pktdata; + + if (eh->ether_type == HTON16(ETHER_TYPE_IP)) { + ip_body = pktdata + sizeof(struct ether_header); + *dscp = IP_DSCP46(ip_body); + rc = TRUE; + } + else if (eh->ether_type == HTON16(ETHER_TYPE_8021Q)) { + evh = (struct ethervlan_header *)eh; + + /* minimum length is ethervlan header and IP header */ + if (pktlen >= sizeof(struct ethervlan_header) + IPV4_MIN_HEADER_LEN && + evh->ether_type == HTON16(ETHER_TYPE_IP)) { + ip_body = pktdata + sizeof(struct ethervlan_header); + *dscp = IP_DSCP46(ip_body); + rc = TRUE; + } + } + + return rc; +} + +/* usr_prio range from low to high with usr_prio value */ +static bool +up_table_set(uint8 *up_table, uint8 usr_prio, uint8 low, uint8 high) +{ + int i; + + if (usr_prio > 7 || low > high || low >= UP_TABLE_MAX || high >= UP_TABLE_MAX) { + return FALSE; + } + + for (i = low; i <= high; i++) { + up_table[i] = usr_prio; + } + + return TRUE; +} + +/* set user priority table */ +int BCMFASTPATH +wl_set_up_table(uint8 *up_table, bcm_tlv_t *qos_map_ie) +{ + uint8 len; + + if (up_table == NULL || qos_map_ie == NULL) { + return BCME_ERROR; + } + + /* clear table to check table was set or not */ + memset(up_table, 0xff, UP_TABLE_MAX); + + /* length of QoS Map IE must be 16+n*2, n is number of exceptions */ + if (qos_map_ie != NULL && qos_map_ie->id == DOT11_MNG_QOS_MAP_ID && + (len = qos_map_ie->len) >= QOS_MAP_FIXED_LENGTH && + (len % 2) == 0) { + uint8 *except_ptr = (uint8 *)qos_map_ie->data; + uint8 except_len = len - QOS_MAP_FIXED_LENGTH; + uint8 *range_ptr = except_ptr + except_len; + int i; + + /* fill in ranges */ + for (i = 0; i < QOS_MAP_FIXED_LENGTH; i += 2) { + uint8 low = range_ptr[i]; + uint8 high = range_ptr[i + 1]; + if (low == 255 && high == 255) { + continue; + } + + if (!up_table_set(up_table, i / 2, low, high)) { + /* clear the table on failure */ + memset(up_table, 0xff, UP_TABLE_MAX); + return BCME_ERROR; + } + } + + /* update exceptions */ + for (i = 0; i < except_len; i += 2) { + uint8 dscp = except_ptr[i]; + uint8 usr_prio = except_ptr[i+1]; + + /* exceptions with invalid dscp/usr_prio are ignored */ + up_table_set(up_table, usr_prio, dscp, dscp); + } + } + + return BCME_OK; +} + +/* The 0.5KB string table is not removed by compiler even though it's unused */ + +static char bcm_undeferrstr[32]; +static const char *bcmerrorstrtable[] = BCMERRSTRINGTABLE; + +/* Convert the error codes into related error strings */ +const char * +BCMRAMFN(bcmerrorstr)(int bcmerror) +{ + /* check if someone added a bcmerror code but forgot to add errorstring */ + ASSERT(ABS(BCME_LAST) == (ARRAYSIZE(bcmerrorstrtable) - 1)); + + if (bcmerror > 0 || bcmerror < BCME_LAST) { + snprintf(bcm_undeferrstr, sizeof(bcm_undeferrstr), "Undefined error %d", bcmerror); + return bcm_undeferrstr; + } + + ASSERT(strlen(bcmerrorstrtable[-bcmerror]) < BCME_STRLEN); + + return bcmerrorstrtable[-bcmerror]; +} + +/* iovar table lookup */ +/* could mandate sorted tables and do a binary search */ +const bcm_iovar_t* +bcm_iovar_lookup(const bcm_iovar_t *table, const char *name) +{ + const bcm_iovar_t *vi; + const char *lookup_name; + + /* skip any ':' delimited option prefixes */ + lookup_name = strrchr(name, ':'); + if (lookup_name != NULL) + lookup_name++; + else + lookup_name = name; + + ASSERT(table != NULL); + + for (vi = table; vi->name; vi++) { + if (!strcmp(vi->name, lookup_name)) + return vi; + } + /* ran to end of table */ + + return NULL; /* var name not found */ +} + +int +bcm_iovar_lencheck(const bcm_iovar_t *vi, void *arg, int len, bool set) +{ + int bcmerror = 0; + BCM_REFERENCE(arg); + + /* length check on io buf */ + switch (vi->type) { + case IOVT_BOOL: + case IOVT_INT8: + case IOVT_INT16: + case IOVT_INT32: + case IOVT_UINT8: + case IOVT_UINT16: + case IOVT_UINT32: + /* all integers are int32 sized args at the ioctl interface */ + if (len < (int)sizeof(int)) { + bcmerror = BCME_BUFTOOSHORT; + } + break; + + case IOVT_BUFFER: + /* buffer must meet minimum length requirement */ + if (len < vi->minlen) { + bcmerror = BCME_BUFTOOSHORT; + } + break; + + case IOVT_VOID: + if (!set) { + /* Cannot return nil... */ + bcmerror = BCME_UNSUPPORTED; + } + break; + + default: + /* unknown type for length check in iovar info */ + ASSERT(0); + bcmerror = BCME_UNSUPPORTED; + } + + return bcmerror; +} + +#if !defined(_CFEZ_) +/* + * Hierarchical Multiword bitmap based small id allocator. + * + * Multilevel hierarchy bitmap. (maximum 2 levels) + * First hierarchy uses a multiword bitmap to identify 32bit words in the + * second hierarchy that have at least a single bit set. Each bit in a word of + * the second hierarchy represents a unique ID that may be allocated. + * + * BCM_MWBMAP_ITEMS_MAX: Maximum number of IDs managed. + * BCM_MWBMAP_BITS_WORD: Number of bits in a bitmap word word + * BCM_MWBMAP_WORDS_MAX: Maximum number of bitmap words needed for free IDs. + * BCM_MWBMAP_WDMAP_MAX: Maximum number of bitmap wordss identifying first non + * non-zero bitmap word carrying at least one free ID. + * BCM_MWBMAP_SHIFT_OP: Used in MOD, DIV and MUL operations. + * BCM_MWBMAP_INVALID_IDX: Value ~0U is treated as an invalid ID + * + * Design Notes: + * BCM_MWBMAP_USE_CNTSETBITS trades CPU for memory. A runtime count of how many + * bits are computed each time on allocation and deallocation, requiring 4 + * array indexed access and 3 arithmetic operations. When not defined, a runtime + * count of set bits state is maintained. Upto 32 Bytes per 1024 IDs is needed. + * In a 4K max ID allocator, up to 128Bytes are hence used per instantiation. + * In a memory limited system e.g. dongle builds, a CPU for memory tradeoff may + * be used by defining BCM_MWBMAP_USE_CNTSETBITS. + * + * Note: wd_bitmap[] is statically declared and is not ROM friendly ... array + * size is fixed. No intention to support larger than 4K indice allocation. ID + * allocators for ranges smaller than 4K will have a wastage of only 12Bytes + * with savings in not having to use an indirect access, had it been dynamically + * allocated. + */ +#define BCM_MWBMAP_ITEMS_MAX (64 * 1024) /* May increase to 64K */ + +#define BCM_MWBMAP_BITS_WORD (NBITS(uint32)) +#define BCM_MWBMAP_WORDS_MAX (BCM_MWBMAP_ITEMS_MAX / BCM_MWBMAP_BITS_WORD) +#define BCM_MWBMAP_WDMAP_MAX (BCM_MWBMAP_WORDS_MAX / BCM_MWBMAP_BITS_WORD) +#define BCM_MWBMAP_SHIFT_OP (5) +#define BCM_MWBMAP_MODOP(ix) ((ix) & (BCM_MWBMAP_BITS_WORD - 1)) +#define BCM_MWBMAP_DIVOP(ix) ((ix) >> BCM_MWBMAP_SHIFT_OP) +#define BCM_MWBMAP_MULOP(ix) ((ix) << BCM_MWBMAP_SHIFT_OP) + +/* Redefine PTR() and/or HDL() conversion to invoke audit for debugging */ +#define BCM_MWBMAP_PTR(hdl) ((struct bcm_mwbmap *)(hdl)) +#define BCM_MWBMAP_HDL(ptr) ((void *)(ptr)) + +#if defined(BCM_MWBMAP_DEBUG) +#define BCM_MWBMAP_AUDIT(mwb) \ + do { \ + ASSERT((mwb != NULL) && \ + (((struct bcm_mwbmap *)(mwb))->magic == (void *)(mwb))); \ + bcm_mwbmap_audit(mwb); \ + } while (0) +#define MWBMAP_ASSERT(exp) ASSERT(exp) +#define MWBMAP_DBG(x) printf x +#else /* !BCM_MWBMAP_DEBUG */ +#define BCM_MWBMAP_AUDIT(mwb) do {} while (0) +#define MWBMAP_ASSERT(exp) do {} while (0) +#define MWBMAP_DBG(x) +#endif /* !BCM_MWBMAP_DEBUG */ + +typedef struct bcm_mwbmap { /* Hierarchical multiword bitmap allocator */ + uint16 wmaps; /* Total number of words in free wd bitmap */ + uint16 imaps; /* Total number of words in free id bitmap */ + int32 ifree; /* Count of free indices. Used only in audits */ + uint16 total; /* Total indices managed by multiword bitmap */ + + void * magic; /* Audit handle parameter from user */ + + uint32 wd_bitmap[BCM_MWBMAP_WDMAP_MAX]; /* 1st level bitmap of */ +#if !defined(BCM_MWBMAP_USE_CNTSETBITS) + int8 wd_count[BCM_MWBMAP_WORDS_MAX]; /* free id running count, 1st lvl */ +#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */ + + uint32 id_bitmap[0]; /* Second level bitmap */ +} bcm_mwbmap_t; + +/* Incarnate a hierarchical multiword bitmap based small index allocator. */ +struct bcm_mwbmap * +bcm_mwbmap_init(osl_t *osh, uint32 items_max) +{ + struct bcm_mwbmap * mwbmap_p; + uint32 wordix, size, words, extra; + + /* Implementation Constraint: Uses 32bit word bitmap */ + MWBMAP_ASSERT(BCM_MWBMAP_BITS_WORD == 32U); + MWBMAP_ASSERT(BCM_MWBMAP_SHIFT_OP == 5U); + MWBMAP_ASSERT(ISPOWEROF2(BCM_MWBMAP_ITEMS_MAX)); + MWBMAP_ASSERT((BCM_MWBMAP_ITEMS_MAX % BCM_MWBMAP_BITS_WORD) == 0U); + + ASSERT(items_max <= BCM_MWBMAP_ITEMS_MAX); + + /* Determine the number of words needed in the multiword bitmap */ + extra = BCM_MWBMAP_MODOP(items_max); + words = BCM_MWBMAP_DIVOP(items_max) + ((extra != 0U) ? 1U : 0U); + + /* Allocate runtime state of multiword bitmap */ + /* Note: wd_count[] or wd_bitmap[] are not dynamically allocated */ + size = sizeof(bcm_mwbmap_t) + (sizeof(uint32) * words); + mwbmap_p = (bcm_mwbmap_t *)MALLOC(osh, size); + if (mwbmap_p == (bcm_mwbmap_t *)NULL) { + ASSERT(0); + goto error1; + } + memset(mwbmap_p, 0, size); + + /* Initialize runtime multiword bitmap state */ + mwbmap_p->imaps = (uint16)words; + mwbmap_p->ifree = (int32)items_max; + mwbmap_p->total = (uint16)items_max; + + /* Setup magic, for use in audit of handle */ + mwbmap_p->magic = BCM_MWBMAP_HDL(mwbmap_p); + + /* Setup the second level bitmap of free indices */ + /* Mark all indices as available */ + for (wordix = 0U; wordix < mwbmap_p->imaps; wordix++) { + mwbmap_p->id_bitmap[wordix] = (uint32)(~0U); +#if !defined(BCM_MWBMAP_USE_CNTSETBITS) + mwbmap_p->wd_count[wordix] = BCM_MWBMAP_BITS_WORD; +#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */ + } + + /* Ensure that extra indices are tagged as un-available */ + if (extra) { /* fixup the free ids in last bitmap and wd_count */ + uint32 * bmap_p = &mwbmap_p->id_bitmap[mwbmap_p->imaps - 1]; + *bmap_p ^= (uint32)(~0U << extra); /* fixup bitmap */ +#if !defined(BCM_MWBMAP_USE_CNTSETBITS) + mwbmap_p->wd_count[mwbmap_p->imaps - 1] = (int8)extra; /* fixup count */ +#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */ + } + + /* Setup the first level bitmap hierarchy */ + extra = BCM_MWBMAP_MODOP(mwbmap_p->imaps); + words = BCM_MWBMAP_DIVOP(mwbmap_p->imaps) + ((extra != 0U) ? 1U : 0U); + + mwbmap_p->wmaps = (uint16)words; + + for (wordix = 0U; wordix < mwbmap_p->wmaps; wordix++) + mwbmap_p->wd_bitmap[wordix] = (uint32)(~0U); + if (extra) { + uint32 * bmap_p = &mwbmap_p->wd_bitmap[mwbmap_p->wmaps - 1]; + *bmap_p ^= (uint32)(~0U << extra); /* fixup bitmap */ + } + + return mwbmap_p; + +error1: + return BCM_MWBMAP_INVALID_HDL; +} + +/* Release resources used by multiword bitmap based small index allocator. */ +void +bcm_mwbmap_fini(osl_t * osh, struct bcm_mwbmap * mwbmap_hdl) +{ + bcm_mwbmap_t * mwbmap_p; + + BCM_MWBMAP_AUDIT(mwbmap_hdl); + mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl); + + MFREE(osh, mwbmap_p, sizeof(struct bcm_mwbmap) + + (sizeof(uint32) * mwbmap_p->imaps)); + return; +} + +/* Allocate a unique small index using a multiword bitmap index allocator. */ +uint32 BCMFASTPATH +bcm_mwbmap_alloc(struct bcm_mwbmap * mwbmap_hdl) +{ + bcm_mwbmap_t * mwbmap_p; + uint32 wordix, bitmap; + + BCM_MWBMAP_AUDIT(mwbmap_hdl); + mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl); + + /* Start with the first hierarchy */ + for (wordix = 0; wordix < mwbmap_p->wmaps; ++wordix) { + + bitmap = mwbmap_p->wd_bitmap[wordix]; /* get the word bitmap */ + + if (bitmap != 0U) { + + uint32 count, bitix, *bitmap_p; + + bitmap_p = &mwbmap_p->wd_bitmap[wordix]; + + /* clear all except trailing 1 */ + bitmap = (uint32)(((int)(bitmap)) & (-((int)(bitmap)))); + MWBMAP_ASSERT(C_bcm_count_leading_zeros(bitmap) == + bcm_count_leading_zeros(bitmap)); + bitix = (BCM_MWBMAP_BITS_WORD - 1) + - bcm_count_leading_zeros(bitmap); /* use asm clz */ + wordix = BCM_MWBMAP_MULOP(wordix) + bitix; + + /* Clear bit if wd count is 0, without conditional branch */ +#if defined(BCM_MWBMAP_USE_CNTSETBITS) + count = bcm_cntsetbits(mwbmap_p->id_bitmap[wordix]) - 1; +#else /* ! BCM_MWBMAP_USE_CNTSETBITS */ + mwbmap_p->wd_count[wordix]--; + count = mwbmap_p->wd_count[wordix]; + MWBMAP_ASSERT(count == + (bcm_cntsetbits(mwbmap_p->id_bitmap[wordix]) - 1)); +#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */ + MWBMAP_ASSERT(count >= 0); + + /* clear wd_bitmap bit if id_map count is 0 */ + bitmap = (count == 0) << bitix; + + MWBMAP_DBG(( + "Lvl1: bitix<%02u> wordix<%02u>: %08x ^ %08x = %08x wfree %d", + bitix, wordix, *bitmap_p, bitmap, (*bitmap_p) ^ bitmap, count)); + + *bitmap_p ^= bitmap; + + /* Use bitix in the second hierarchy */ + bitmap_p = &mwbmap_p->id_bitmap[wordix]; + + bitmap = mwbmap_p->id_bitmap[wordix]; /* get the id bitmap */ + MWBMAP_ASSERT(bitmap != 0U); + + /* clear all except trailing 1 */ + bitmap = (uint32)(((int)(bitmap)) & (-((int)(bitmap)))); + MWBMAP_ASSERT(C_bcm_count_leading_zeros(bitmap) == + bcm_count_leading_zeros(bitmap)); + bitix = BCM_MWBMAP_MULOP(wordix) + + (BCM_MWBMAP_BITS_WORD - 1) + - bcm_count_leading_zeros(bitmap); /* use asm clz */ + + mwbmap_p->ifree--; /* decrement system wide free count */ + MWBMAP_ASSERT(mwbmap_p->ifree >= 0); + + MWBMAP_DBG(( + "Lvl2: bitix<%02u> wordix<%02u>: %08x ^ %08x = %08x ifree %d", + bitix, wordix, *bitmap_p, bitmap, (*bitmap_p) ^ bitmap, + mwbmap_p->ifree)); + + *bitmap_p ^= bitmap; /* mark as allocated = 1b0 */ + + return bitix; + } + } + + ASSERT(mwbmap_p->ifree == 0); + + return BCM_MWBMAP_INVALID_IDX; +} + +/* Force an index at a specified position to be in use */ +void +bcm_mwbmap_force(struct bcm_mwbmap * mwbmap_hdl, uint32 bitix) +{ + bcm_mwbmap_t * mwbmap_p; + uint32 count, wordix, bitmap, *bitmap_p; + + BCM_MWBMAP_AUDIT(mwbmap_hdl); + mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl); + + ASSERT(bitix < mwbmap_p->total); + + /* Start with second hierarchy */ + wordix = BCM_MWBMAP_DIVOP(bitix); + bitmap = (uint32)(1U << BCM_MWBMAP_MODOP(bitix)); + bitmap_p = &mwbmap_p->id_bitmap[wordix]; + + ASSERT((*bitmap_p & bitmap) == bitmap); + + mwbmap_p->ifree--; /* update free count */ + ASSERT(mwbmap_p->ifree >= 0); + + MWBMAP_DBG(("Lvl2: bitix<%u> wordix<%u>: %08x ^ %08x = %08x ifree %d", + bitix, wordix, *bitmap_p, bitmap, (*bitmap_p) ^ bitmap, + mwbmap_p->ifree)); + + *bitmap_p ^= bitmap; /* mark as in use */ + + /* Update first hierarchy */ + bitix = wordix; + + wordix = BCM_MWBMAP_DIVOP(bitix); + bitmap_p = &mwbmap_p->wd_bitmap[wordix]; + +#if defined(BCM_MWBMAP_USE_CNTSETBITS) + count = bcm_cntsetbits(mwbmap_p->id_bitmap[bitix]); +#else /* ! BCM_MWBMAP_USE_CNTSETBITS */ + mwbmap_p->wd_count[bitix]--; + count = mwbmap_p->wd_count[bitix]; + MWBMAP_ASSERT(count == bcm_cntsetbits(mwbmap_p->id_bitmap[bitix])); +#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */ + MWBMAP_ASSERT(count >= 0); + + bitmap = (count == 0) << BCM_MWBMAP_MODOP(bitix); + + MWBMAP_DBG(("Lvl1: bitix<%02lu> wordix<%02u>: %08x ^ %08x = %08x wfree %d", + BCM_MWBMAP_MODOP(bitix), wordix, *bitmap_p, bitmap, + (*bitmap_p) ^ bitmap, count)); + + *bitmap_p ^= bitmap; /* mark as in use */ + + return; +} + +/* Free a previously allocated index back into the multiword bitmap allocator */ +void BCMFASTPATH +bcm_mwbmap_free(struct bcm_mwbmap * mwbmap_hdl, uint32 bitix) +{ + bcm_mwbmap_t * mwbmap_p; + uint32 wordix, bitmap, *bitmap_p; + + BCM_MWBMAP_AUDIT(mwbmap_hdl); + mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl); + + ASSERT(bitix < mwbmap_p->total); + + /* Start with second level hierarchy */ + wordix = BCM_MWBMAP_DIVOP(bitix); + bitmap = (1U << BCM_MWBMAP_MODOP(bitix)); + bitmap_p = &mwbmap_p->id_bitmap[wordix]; + + ASSERT((*bitmap_p & bitmap) == 0U); /* ASSERT not a double free */ + + mwbmap_p->ifree++; /* update free count */ + ASSERT(mwbmap_p->ifree <= mwbmap_p->total); + + MWBMAP_DBG(("Lvl2: bitix<%02u> wordix<%02u>: %08x | %08x = %08x ifree %d", + bitix, wordix, *bitmap_p, bitmap, (*bitmap_p) | bitmap, + mwbmap_p->ifree)); + + *bitmap_p |= bitmap; /* mark as available */ + + /* Now update first level hierarchy */ + + bitix = wordix; + + wordix = BCM_MWBMAP_DIVOP(bitix); /* first level's word index */ + bitmap = (1U << BCM_MWBMAP_MODOP(bitix)); + bitmap_p = &mwbmap_p->wd_bitmap[wordix]; + +#if !defined(BCM_MWBMAP_USE_CNTSETBITS) + mwbmap_p->wd_count[bitix]++; +#endif // endif + +#if defined(BCM_MWBMAP_DEBUG) + { + uint32 count; +#if defined(BCM_MWBMAP_USE_CNTSETBITS) + count = bcm_cntsetbits(mwbmap_p->id_bitmap[bitix]); +#else /* ! BCM_MWBMAP_USE_CNTSETBITS */ + count = mwbmap_p->wd_count[bitix]; + MWBMAP_ASSERT(count == bcm_cntsetbits(mwbmap_p->id_bitmap[bitix])); +#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */ + + MWBMAP_ASSERT(count <= BCM_MWBMAP_BITS_WORD); + + MWBMAP_DBG(("Lvl1: bitix<%02u> wordix<%02u>: %08x | %08x = %08x wfree %d", + bitix, wordix, *bitmap_p, bitmap, (*bitmap_p) | bitmap, count)); + } +#endif /* BCM_MWBMAP_DEBUG */ + + *bitmap_p |= bitmap; + + return; +} + +/* Fetch the toal number of free indices in the multiword bitmap allocator */ +uint32 +bcm_mwbmap_free_cnt(struct bcm_mwbmap * mwbmap_hdl) +{ + bcm_mwbmap_t * mwbmap_p; + + BCM_MWBMAP_AUDIT(mwbmap_hdl); + mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl); + + ASSERT(mwbmap_p->ifree >= 0); + + return mwbmap_p->ifree; +} + +/* Determine whether an index is inuse or free */ +bool +bcm_mwbmap_isfree(struct bcm_mwbmap * mwbmap_hdl, uint32 bitix) +{ + bcm_mwbmap_t * mwbmap_p; + uint32 wordix, bitmap; + + BCM_MWBMAP_AUDIT(mwbmap_hdl); + mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl); + + ASSERT(bitix < mwbmap_p->total); + + wordix = BCM_MWBMAP_DIVOP(bitix); + bitmap = (1U << BCM_MWBMAP_MODOP(bitix)); + + return ((mwbmap_p->id_bitmap[wordix] & bitmap) != 0U); +} + +/* Debug dump a multiword bitmap allocator */ +void +bcm_mwbmap_show(struct bcm_mwbmap * mwbmap_hdl) +{ + uint32 ix, count; + bcm_mwbmap_t * mwbmap_p; + + BCM_MWBMAP_AUDIT(mwbmap_hdl); + mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl); + + printf("mwbmap_p %p wmaps %u imaps %u ifree %d total %u\n", + OSL_OBFUSCATE_BUF((void *)mwbmap_p), + mwbmap_p->wmaps, mwbmap_p->imaps, mwbmap_p->ifree, mwbmap_p->total); + for (ix = 0U; ix < mwbmap_p->wmaps; ix++) { + printf("\tWDMAP:%2u. 0x%08x\t", ix, mwbmap_p->wd_bitmap[ix]); + bcm_bitprint32(mwbmap_p->wd_bitmap[ix]); + printf("\n"); + } + for (ix = 0U; ix < mwbmap_p->imaps; ix++) { +#if defined(BCM_MWBMAP_USE_CNTSETBITS) + count = bcm_cntsetbits(mwbmap_p->id_bitmap[ix]); +#else /* ! BCM_MWBMAP_USE_CNTSETBITS */ + count = mwbmap_p->wd_count[ix]; + MWBMAP_ASSERT(count == bcm_cntsetbits(mwbmap_p->id_bitmap[ix])); +#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */ + printf("\tIDMAP:%2u. 0x%08x %02u\t", ix, mwbmap_p->id_bitmap[ix], count); + bcm_bitprint32(mwbmap_p->id_bitmap[ix]); + printf("\n"); + } + + return; +} + +/* Audit a hierarchical multiword bitmap */ +void +bcm_mwbmap_audit(struct bcm_mwbmap * mwbmap_hdl) +{ + bcm_mwbmap_t * mwbmap_p; + uint32 count, free_cnt = 0U, wordix, idmap_ix, bitix, *bitmap_p; + + mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl); + + for (wordix = 0U; wordix < mwbmap_p->wmaps; ++wordix) { + + bitmap_p = &mwbmap_p->wd_bitmap[wordix]; + + for (bitix = 0U; bitix < BCM_MWBMAP_BITS_WORD; bitix++) { + if ((*bitmap_p) & (1 << bitix)) { + idmap_ix = BCM_MWBMAP_MULOP(wordix) + bitix; +#if defined(BCM_MWBMAP_USE_CNTSETBITS) + count = bcm_cntsetbits(mwbmap_p->id_bitmap[idmap_ix]); +#else /* ! BCM_MWBMAP_USE_CNTSETBITS */ + count = mwbmap_p->wd_count[idmap_ix]; + ASSERT(count == bcm_cntsetbits(mwbmap_p->id_bitmap[idmap_ix])); +#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */ + ASSERT(count != 0U); + free_cnt += count; + } + } + } + + ASSERT((int)free_cnt == mwbmap_p->ifree); +} +/* END : Multiword bitmap based 64bit to Unique 32bit Id allocator. */ + +/* Simple 16bit Id allocator using a stack implementation. */ +typedef struct id16_map { + uint32 failures; /* count of failures */ + void *dbg; /* debug placeholder */ + uint16 total; /* total number of ids managed by allocator */ + uint16 start; /* start value of 16bit ids to be managed */ + int stack_idx; /* index into stack of available ids */ + uint16 stack[0]; /* stack of 16 bit ids */ +} id16_map_t; + +#define ID16_MAP_SZ(items) (sizeof(id16_map_t) + \ + (sizeof(uint16) * (items))) + +#if defined(BCM_DBG) + +/* Uncomment BCM_DBG_ID16 to debug double free */ +/* #define BCM_DBG_ID16 */ + +typedef struct id16_map_dbg { + uint16 total; + bool avail[0]; +} id16_map_dbg_t; +#define ID16_MAP_DBG_SZ(items) (sizeof(id16_map_dbg_t) + \ + (sizeof(bool) * (items))) +#define ID16_MAP_MSG(x) print x +#else +#define ID16_MAP_MSG(x) +#endif /* BCM_DBG */ + +void * /* Construct an id16 allocator: [start_val16 .. start_val16+total_ids) */ +id16_map_init(osl_t *osh, uint16 total_ids, uint16 start_val16) +{ + uint16 idx, val16; + id16_map_t * id16_map; + + ASSERT(total_ids > 0); + + /* A start_val16 of ID16_UNDEFINED, allows the caller to fill the id16 map + * with random values. + */ + ASSERT((start_val16 == ID16_UNDEFINED) || + (start_val16 + total_ids) < ID16_INVALID); + + id16_map = (id16_map_t *) MALLOC(osh, ID16_MAP_SZ(total_ids)); + if (id16_map == NULL) { + return NULL; + } + + id16_map->total = total_ids; + id16_map->start = start_val16; + id16_map->failures = 0; + id16_map->dbg = NULL; + + /* + * Populate stack with 16bit id values, commencing with start_val16. + * if start_val16 is ID16_UNDEFINED, then do not populate the id16 map. + */ + id16_map->stack_idx = -1; + + if (id16_map->start != ID16_UNDEFINED) { + val16 = start_val16; + + for (idx = 0; idx < total_ids; idx++, val16++) { + id16_map->stack_idx = idx; + id16_map->stack[id16_map->stack_idx] = val16; + } + } + +#if defined(BCM_DBG) && defined(BCM_DBG_ID16) + if (id16_map->start != ID16_UNDEFINED) { + id16_map->dbg = MALLOC(osh, ID16_MAP_DBG_SZ(total_ids)); + + if (id16_map->dbg) { + id16_map_dbg_t *id16_map_dbg = (id16_map_dbg_t *)id16_map->dbg; + + id16_map_dbg->total = total_ids; + for (idx = 0; idx < total_ids; idx++) { + id16_map_dbg->avail[idx] = TRUE; + } + } + } +#endif /* BCM_DBG && BCM_DBG_ID16 */ + + return (void *)id16_map; +} + +void * /* Destruct an id16 allocator instance */ +id16_map_fini(osl_t *osh, void * id16_map_hndl) +{ + uint16 total_ids; + id16_map_t * id16_map; + + if (id16_map_hndl == NULL) + return NULL; + + id16_map = (id16_map_t *)id16_map_hndl; + + total_ids = id16_map->total; + ASSERT(total_ids > 0); + +#if defined(BCM_DBG) && defined(BCM_DBG_ID16) + if (id16_map->dbg) { + MFREE(osh, id16_map->dbg, ID16_MAP_DBG_SZ(total_ids)); + id16_map->dbg = NULL; + } +#endif /* BCM_DBG && BCM_DBG_ID16 */ + + id16_map->total = 0; + MFREE(osh, id16_map, ID16_MAP_SZ(total_ids)); + + return NULL; +} + +void +id16_map_clear(void * id16_map_hndl, uint16 total_ids, uint16 start_val16) +{ + uint16 idx, val16; + id16_map_t * id16_map; + + ASSERT(total_ids > 0); + /* A start_val16 of ID16_UNDEFINED, allows the caller to fill the id16 map + * with random values. + */ + ASSERT((start_val16 == ID16_UNDEFINED) || + (start_val16 + total_ids) < ID16_INVALID); + + id16_map = (id16_map_t *)id16_map_hndl; + if (id16_map == NULL) { + return; + } + + id16_map->total = total_ids; + id16_map->start = start_val16; + id16_map->failures = 0; + + /* Populate stack with 16bit id values, commencing with start_val16 */ + id16_map->stack_idx = -1; + + if (id16_map->start != ID16_UNDEFINED) { + val16 = start_val16; + + for (idx = 0; idx < total_ids; idx++, val16++) { + id16_map->stack_idx = idx; + id16_map->stack[id16_map->stack_idx] = val16; + } + } + +#if defined(BCM_DBG) && defined(BCM_DBG_ID16) + if (id16_map->start != ID16_UNDEFINED) { + if (id16_map->dbg) { + id16_map_dbg_t *id16_map_dbg = (id16_map_dbg_t *)id16_map->dbg; + + id16_map_dbg->total = total_ids; + for (idx = 0; idx < total_ids; idx++) { + id16_map_dbg->avail[idx] = TRUE; + } + } + } +#endif /* BCM_DBG && BCM_DBG_ID16 */ +} + +uint16 BCMFASTPATH /* Allocate a unique 16bit id */ +id16_map_alloc(void * id16_map_hndl) +{ + uint16 val16; + id16_map_t * id16_map; + + ASSERT(id16_map_hndl != NULL); + + id16_map = (id16_map_t *)id16_map_hndl; + + ASSERT(id16_map->total > 0); + + if (id16_map->stack_idx < 0) { + id16_map->failures++; + return ID16_INVALID; + } + + val16 = id16_map->stack[id16_map->stack_idx]; + id16_map->stack_idx--; + +#if defined(BCM_DBG) && defined(BCM_DBG_ID16) + ASSERT((id16_map->start == ID16_UNDEFINED) || + (val16 < (id16_map->start + id16_map->total))); + + if (id16_map->dbg) { /* Validate val16 */ + id16_map_dbg_t *id16_map_dbg = (id16_map_dbg_t *)id16_map->dbg; + + ASSERT(id16_map_dbg->avail[val16 - id16_map->start] == TRUE); + id16_map_dbg->avail[val16 - id16_map->start] = FALSE; + } +#endif /* BCM_DBG && BCM_DBG_ID16 */ + + return val16; +} + +void BCMFASTPATH /* Free a 16bit id value into the id16 allocator */ +id16_map_free(void * id16_map_hndl, uint16 val16) +{ + id16_map_t * id16_map; + + ASSERT(id16_map_hndl != NULL); + + id16_map = (id16_map_t *)id16_map_hndl; + +#if defined(BCM_DBG) && defined(BCM_DBG_ID16) + ASSERT((id16_map->start == ID16_UNDEFINED) || + (val16 < (id16_map->start + id16_map->total))); + + if (id16_map->dbg) { /* Validate val16 */ + id16_map_dbg_t *id16_map_dbg = (id16_map_dbg_t *)id16_map->dbg; + + ASSERT(id16_map_dbg->avail[val16 - id16_map->start] == FALSE); + id16_map_dbg->avail[val16 - id16_map->start] = TRUE; + } +#endif /* BCM_DBG && BCM_DBG_ID16 */ + + id16_map->stack_idx++; + id16_map->stack[id16_map->stack_idx] = val16; +} + +uint32 /* Returns number of failures to allocate an unique id16 */ +id16_map_failures(void * id16_map_hndl) +{ + ASSERT(id16_map_hndl != NULL); + return ((id16_map_t *)id16_map_hndl)->failures; +} + +bool +id16_map_audit(void * id16_map_hndl) +{ + int idx; + int insane = 0; + id16_map_t * id16_map; + + ASSERT(id16_map_hndl != NULL); + + id16_map = (id16_map_t *)id16_map_hndl; + + ASSERT(id16_map->stack_idx >= -1); + ASSERT(id16_map->stack_idx < (int)id16_map->total); + + if (id16_map->start == ID16_UNDEFINED) + goto done; + + for (idx = 0; idx <= id16_map->stack_idx; idx++) { + ASSERT(id16_map->stack[idx] >= id16_map->start); + ASSERT(id16_map->stack[idx] < (id16_map->start + id16_map->total)); + +#if defined(BCM_DBG) && defined(BCM_DBG_ID16) + if (id16_map->dbg) { + uint16 val16 = id16_map->stack[idx]; + if (((id16_map_dbg_t *)(id16_map->dbg))->avail[val16] != TRUE) { + insane |= 1; + ID16_MAP_MSG(("id16_map<%p>: stack_idx %u invalid val16 %u\n", + OSL_OBFUSATE_BUF(id16_map_hndl), idx, val16)); + } + } +#endif /* BCM_DBG && BCM_DBG_ID16 */ + } + +#if defined(BCM_DBG) && defined(BCM_DBG_ID16) + if (id16_map->dbg) { + uint16 avail = 0; /* Audit available ids counts */ + for (idx = 0; idx < id16_map_dbg->total; idx++) { + if (((id16_map_dbg_t *)(id16_map->dbg))->avail[idx16] == TRUE) + avail++; + } + if (avail && (avail != (id16_map->stack_idx + 1))) { + insane |= 1; + ID16_MAP_MSG(("id16_map<%p>: avail %u stack_idx %u\n", + OSL_OBFUSCATE_BUF(id16_map_hndl), + avail, id16_map->stack_idx)); + } + } +#endif /* BCM_DBG && BCM_DBG_ID16 */ + +done: + /* invoke any other system audits */ + return (!!insane); +} +/* END: Simple id16 allocator */ + +void +dll_pool_detach(void * osh, dll_pool_t * pool, uint16 elems_max, uint16 elem_size) +{ + uint32 mem_size; + mem_size = sizeof(dll_pool_t) + (elems_max * elem_size); + if (pool) + MFREE(osh, pool, mem_size); +} +dll_pool_t * +dll_pool_init(void * osh, uint16 elems_max, uint16 elem_size) +{ + uint32 mem_size, i; + dll_pool_t * dll_pool_p; + dll_t * elem_p; + + ASSERT(elem_size > sizeof(dll_t)); + + mem_size = sizeof(dll_pool_t) + (elems_max * elem_size); + + if ((dll_pool_p = (dll_pool_t *)MALLOCZ(osh, mem_size)) == NULL) { + printf("dll_pool_init: elems_max<%u> elem_size<%u> malloc failure\n", + elems_max, elem_size); + ASSERT(0); + return dll_pool_p; + } + + dll_init(&dll_pool_p->free_list); + dll_pool_p->elems_max = elems_max; + dll_pool_p->elem_size = elem_size; + + elem_p = dll_pool_p->elements; + for (i = 0; i < elems_max; i++) { + dll_append(&dll_pool_p->free_list, elem_p); + elem_p = (dll_t *)((uintptr)elem_p + elem_size); + } + + dll_pool_p->free_count = elems_max; + + return dll_pool_p; +} + +void * +dll_pool_alloc(dll_pool_t * dll_pool_p) +{ + dll_t * elem_p; + + if (dll_pool_p->free_count == 0) { + ASSERT(dll_empty(&dll_pool_p->free_list)); + return NULL; + } + + elem_p = dll_head_p(&dll_pool_p->free_list); + dll_delete(elem_p); + dll_pool_p->free_count -= 1; + + return (void *)elem_p; +} + +void +dll_pool_free(dll_pool_t * dll_pool_p, void * elem_p) +{ + dll_t * node_p = (dll_t *)elem_p; + dll_prepend(&dll_pool_p->free_list, node_p); + dll_pool_p->free_count += 1; +} + +void +dll_pool_free_tail(dll_pool_t * dll_pool_p, void * elem_p) +{ + dll_t * node_p = (dll_t *)elem_p; + dll_append(&dll_pool_p->free_list, node_p); + dll_pool_p->free_count += 1; +} + +#endif // endif + +#endif /* BCMDRIVER */ + +#if defined(BCMDRIVER) || defined(WL_UNITTEST) + +/* triggers bcm_bprintf to print to kernel log */ +bool bcm_bprintf_bypass = FALSE; + +/* Initialization of bcmstrbuf structure */ +void +bcm_binit(struct bcmstrbuf *b, char *buf, uint size) +{ + b->origsize = b->size = size; + b->origbuf = b->buf = buf; + if (size > 0) { + buf[0] = '\0'; + } +} + +/* Buffer sprintf wrapper to guard against buffer overflow */ +int +bcm_bprintf(struct bcmstrbuf *b, const char *fmt, ...) +{ + va_list ap; + int r; + + va_start(ap, fmt); + + r = vsnprintf(b->buf, b->size, fmt, ap); + if (bcm_bprintf_bypass == TRUE) { + printf("%s", b->buf); + goto exit; + } + + /* Non Ansi C99 compliant returns -1, + * Ansi compliant return r >= b->size, + * bcmstdlib returns 0, handle all + */ + /* r == 0 is also the case when strlen(fmt) is zero. + * typically the case when "" is passed as argument. + */ + if ((r == -1) || (r >= (int)b->size)) { + b->size = 0; + } else { + b->size -= r; + b->buf += r; + } + +exit: + va_end(ap); + + return r; +} + +void +bcm_bprhex(struct bcmstrbuf *b, const char *msg, bool newline, const uint8 *buf, int len) +{ + int i; + + if (msg != NULL && msg[0] != '\0') + bcm_bprintf(b, "%s", msg); + for (i = 0; i < len; i ++) + bcm_bprintf(b, "%02X", buf[i]); + if (newline) + bcm_bprintf(b, "\n"); +} + +void +bcm_inc_bytes(uchar *num, int num_bytes, uint8 amount) +{ + int i; + + for (i = 0; i < num_bytes; i++) { + num[i] += amount; + if (num[i] >= amount) + break; + amount = 1; + } +} + +int +bcm_cmp_bytes(const uchar *arg1, const uchar *arg2, uint8 nbytes) +{ + int i; + + for (i = nbytes - 1; i >= 0; i--) { + if (arg1[i] != arg2[i]) + return (arg1[i] - arg2[i]); + } + return 0; +} + +void +bcm_print_bytes(const char *name, const uchar *data, int len) +{ + int i; + int per_line = 0; + + printf("%s: %d \n", name ? name : "", len); + for (i = 0; i < len; i++) { + printf("%02x ", *data++); + per_line++; + if (per_line == 16) { + per_line = 0; + printf("\n"); + } + } + printf("\n"); +} + +/* Look for vendor-specific IE with specified OUI and optional type */ +bcm_tlv_t * +bcm_find_vendor_ie(const void *tlvs, uint tlvs_len, const char *voui, uint8 *type, uint type_len) +{ + const bcm_tlv_t *ie; + uint8 ie_len; + + ie = (const bcm_tlv_t*)tlvs; + + /* make sure we are looking at a valid IE */ + if (ie == NULL || !bcm_valid_tlv(ie, tlvs_len)) { + return NULL; + } + + /* Walk through the IEs looking for an OUI match */ + do { + ie_len = ie->len; + if ((ie->id == DOT11_MNG_VS_ID) && + (ie_len >= (DOT11_OUI_LEN + type_len)) && + !bcmp(ie->data, voui, DOT11_OUI_LEN)) + { + /* compare optional type */ + if (type_len == 0 || + !bcmp(&ie->data[DOT11_OUI_LEN], type, type_len)) { + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + return (bcm_tlv_t *)(ie); /* a match */ + GCC_DIAGNOSTIC_POP(); + } + } + } while ((ie = bcm_next_tlv(ie, &tlvs_len)) != NULL); + + return NULL; +} + +#if defined(WLTINYDUMP) || defined(WLMSG_INFORM) || defined(WLMSG_ASSOC) || \ + defined(WLMSG_PRPKT) || defined(WLMSG_WSEC) +#define SSID_FMT_BUF_LEN ((4 * DOT11_MAX_SSID_LEN) + 1) + +int +bcm_format_ssid(char* buf, const uchar ssid[], uint ssid_len) +{ + uint i, c; + char *p = buf; + char *endp = buf + SSID_FMT_BUF_LEN; + + if (ssid_len > DOT11_MAX_SSID_LEN) ssid_len = DOT11_MAX_SSID_LEN; + + for (i = 0; i < ssid_len; i++) { + c = (uint)ssid[i]; + if (c == '\\') { + *p++ = '\\'; + *p++ = '\\'; + } else if (bcm_isprint((uchar)c)) { + *p++ = (char)c; + } else { + p += snprintf(p, (endp - p), "\\x%02X", c); + } + } + *p = '\0'; + ASSERT(p < endp); + + return (int)(p - buf); +} +#endif // endif + +#endif /* BCMDRIVER || WL_UNITTEST */ + +char * +bcm_ether_ntoa(const struct ether_addr *ea, char *buf) +{ + static const char hex[] = + { + '0', '1', '2', '3', '4', '5', '6', '7', + '8', '9', 'a', 'b', 'c', 'd', 'e', 'f' + }; + const uint8 *octet = ea->octet; + char *p = buf; + int i; + + for (i = 0; i < 6; i++, octet++) { + *p++ = hex[(*octet >> 4) & 0xf]; + *p++ = hex[*octet & 0xf]; + *p++ = ':'; + } + + *(p-1) = '\0'; + + return (buf); +} + +/* Find the position of first bit set + * in the given number. + */ +int +bcm_find_fsb(uint32 num) +{ + uint8 pos = 0; + if (!num) + return pos; + while (!(num & 1)) { + num >>= 1; + pos++; + } + return (pos+1); +} + +char * +bcm_ip_ntoa(struct ipv4_addr *ia, char *buf) +{ + snprintf(buf, 16, "%d.%d.%d.%d", + ia->addr[0], ia->addr[1], ia->addr[2], ia->addr[3]); + return (buf); +} + +char * +bcm_ipv6_ntoa(void *ipv6, char *buf) +{ + /* Implementing RFC 5952 Sections 4 + 5 */ + /* Not thoroughly tested */ + uint16 tmp[8]; + uint16 *a = &tmp[0]; + char *p = buf; + int i, i_max = -1, cnt = 0, cnt_max = 1; + uint8 *a4 = NULL; + memcpy((uint8 *)&tmp[0], (uint8 *)ipv6, IPV6_ADDR_LEN); + + for (i = 0; i < IPV6_ADDR_LEN/2; i++) { + if (a[i]) { + if (cnt > cnt_max) { + cnt_max = cnt; + i_max = i - cnt; + } + cnt = 0; + } else + cnt++; + } + if (cnt > cnt_max) { + cnt_max = cnt; + i_max = i - cnt; + } + if (i_max == 0 && + /* IPv4-translated: ::ffff:0:a.b.c.d */ + ((cnt_max == 4 && a[4] == 0xffff && a[5] == 0) || + /* IPv4-mapped: ::ffff:a.b.c.d */ + (cnt_max == 5 && a[5] == 0xffff))) + a4 = (uint8*) (a + 6); + + for (i = 0; i < IPV6_ADDR_LEN/2; i++) { + if ((uint8*) (a + i) == a4) { + snprintf(p, 16, ":%u.%u.%u.%u", a4[0], a4[1], a4[2], a4[3]); + break; + } else if (i == i_max) { + *p++ = ':'; + i += cnt_max - 1; + p[0] = ':'; + p[1] = '\0'; + } else { + if (i) + *p++ = ':'; + p += snprintf(p, 8, "%x", ntoh16(a[i])); + } + } + + return buf; +} + +#if !defined(BCMROMOFFLOAD_EXCLUDE_BCMUTILS_FUNCS) +const unsigned char bcm_ctype[] = { + + _BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C, /* 0-7 */ + _BCM_C, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C, + _BCM_C, /* 8-15 */ + _BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C, /* 16-23 */ + _BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C, /* 24-31 */ + _BCM_S|_BCM_SP,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P, /* 32-39 */ + _BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P, /* 40-47 */ + _BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D, /* 48-55 */ + _BCM_D,_BCM_D,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P, /* 56-63 */ + _BCM_P, _BCM_U|_BCM_X, _BCM_U|_BCM_X, _BCM_U|_BCM_X, _BCM_U|_BCM_X, _BCM_U|_BCM_X, + _BCM_U|_BCM_X, _BCM_U, /* 64-71 */ + _BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U, /* 72-79 */ + _BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U, /* 80-87 */ + _BCM_U,_BCM_U,_BCM_U,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P, /* 88-95 */ + _BCM_P, _BCM_L|_BCM_X, _BCM_L|_BCM_X, _BCM_L|_BCM_X, _BCM_L|_BCM_X, _BCM_L|_BCM_X, + _BCM_L|_BCM_X, _BCM_L, /* 96-103 */ + _BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L, /* 104-111 */ + _BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L, /* 112-119 */ + _BCM_L,_BCM_L,_BCM_L,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_C, /* 120-127 */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 128-143 */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 144-159 */ + _BCM_S|_BCM_SP, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, + _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, /* 160-175 */ + _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, + _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, /* 176-191 */ + _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, + _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, /* 192-207 */ + _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_P, _BCM_U, _BCM_U, _BCM_U, + _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_L, /* 208-223 */ + _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, + _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, /* 224-239 */ + _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_P, _BCM_L, _BCM_L, _BCM_L, + _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L /* 240-255 */ +}; + +uint64 +bcm_strtoull(const char *cp, char **endp, uint base) +{ + uint64 result, last_result = 0, value; + bool minus; + + minus = FALSE; + + while (bcm_isspace(*cp)) + cp++; + + if (cp[0] == '+') + cp++; + else if (cp[0] == '-') { + minus = TRUE; + cp++; + } + + if (base == 0) { + if (cp[0] == '0') { + if ((cp[1] == 'x') || (cp[1] == 'X')) { + base = 16; + cp = &cp[2]; + } else { + base = 8; + cp = &cp[1]; + } + } else + base = 10; + } else if (base == 16 && (cp[0] == '0') && ((cp[1] == 'x') || (cp[1] == 'X'))) { + cp = &cp[2]; + } + + result = 0; + + while (bcm_isxdigit(*cp) && + (value = bcm_isdigit(*cp) ? *cp-'0' : bcm_toupper(*cp)-'A'+10) < base) { + result = result*base + value; + /* Detected overflow */ + if (result < last_result && !minus) { + if (endp) { + /* Go to the end of current number */ + while (bcm_isxdigit(*cp)) { + cp++; + } + *endp = DISCARD_QUAL(cp, char); + } + return (ulong)-1; + } + last_result = result; + cp++; + } + + if (minus) + result = (ulong)(-(long)result); + + if (endp) + *endp = DISCARD_QUAL(cp, char); + + return (result); +} + +ulong +bcm_strtoul(const char *cp, char **endp, uint base) +{ + return (ulong) bcm_strtoull(cp, endp, base); +} + +int +bcm_atoi(const char *s) +{ + return (int)bcm_strtoul(s, NULL, 10); +} + +/* return pointer to location of substring 'needle' in 'haystack' */ +char * +bcmstrstr(const char *haystack, const char *needle) +{ + int len, nlen; + int i; + + if ((haystack == NULL) || (needle == NULL)) + return DISCARD_QUAL(haystack, char); + + nlen = (int)strlen(needle); + len = (int)strlen(haystack) - nlen + 1; + + for (i = 0; i < len; i++) + if (memcmp(needle, &haystack[i], nlen) == 0) + return DISCARD_QUAL(&haystack[i], char); + return (NULL); +} + +char * +bcmstrnstr(const char *s, uint s_len, const char *substr, uint substr_len) +{ + for (; s_len >= substr_len; s++, s_len--) + if (strncmp(s, substr, substr_len) == 0) + return DISCARD_QUAL(s, char); + + return NULL; +} + +char * +bcmstrcat(char *dest, const char *src) +{ + char *p; + + p = dest + strlen(dest); + + while ((*p++ = *src++) != '\0') + ; + + return (dest); +} + +char * +bcmstrncat(char *dest, const char *src, uint size) +{ + char *endp; + char *p; + + p = dest + strlen(dest); + endp = p + size; + + while (p != endp && (*p++ = *src++) != '\0') + ; + + return (dest); +} + +/**************************************************************************** +* Function: bcmstrtok +* +* Purpose: +* Tokenizes a string. This function is conceptually similiar to ANSI C strtok(), +* but allows strToken() to be used by different strings or callers at the same +* time. Each call modifies '*string' by substituting a NULL character for the +* first delimiter that is encountered, and updates 'string' to point to the char +* after the delimiter. Leading delimiters are skipped. +* +* Parameters: +* string (mod) Ptr to string ptr, updated by token. +* delimiters (in) Set of delimiter characters. +* tokdelim (out) Character that delimits the returned token. (May +* be set to NULL if token delimiter is not required). +* +* Returns: Pointer to the next token found. NULL when no more tokens are found. +***************************************************************************** +*/ +char * +bcmstrtok(char **string, const char *delimiters, char *tokdelim) +{ + unsigned char *str; + unsigned long map[8]; + int count; + char *nextoken; + + if (tokdelim != NULL) { + /* Prime the token delimiter */ + *tokdelim = '\0'; + } + + /* Clear control map */ + for (count = 0; count < 8; count++) { + map[count] = 0; + } + + /* Set bits in delimiter table */ + do { + map[*delimiters >> 5] |= (1 << (*delimiters & 31)); + } + while (*delimiters++); + + str = (unsigned char*)*string; + + /* Find beginning of token (skip over leading delimiters). Note that + * there is no token iff this loop sets str to point to the terminal + * null (*str == '\0') + */ + while (((map[*str >> 5] & (1 << (*str & 31))) && *str) || (*str == ' ')) { + str++; + } + + nextoken = (char*)str; + + /* Find the end of the token. If it is not the end of the string, + * put a null there. + */ + for (; *str; str++) { + if (map[*str >> 5] & (1 << (*str & 31))) { + if (tokdelim != NULL) { + *tokdelim = *str; + } + + *str++ = '\0'; + break; + } + } + + *string = (char*)str; + + /* Determine if a token has been found. */ + if (nextoken == (char *) str) { + return NULL; + } + else { + return nextoken; + } +} + +#define xToLower(C) \ + ((C >= 'A' && C <= 'Z') ? (char)((int)C - (int)'A' + (int)'a') : C) + +/**************************************************************************** +* Function: bcmstricmp +* +* Purpose: Compare to strings case insensitively. +* +* Parameters: s1 (in) First string to compare. +* s2 (in) Second string to compare. +* +* Returns: Return 0 if the two strings are equal, -1 if t1 < t2 and 1 if +* t1 > t2, when ignoring case sensitivity. +***************************************************************************** +*/ +int +bcmstricmp(const char *s1, const char *s2) +{ + char dc, sc; + + while (*s2 && *s1) { + dc = xToLower(*s1); + sc = xToLower(*s2); + if (dc < sc) return -1; + if (dc > sc) return 1; + s1++; + s2++; + } + + if (*s1 && !*s2) return 1; + if (!*s1 && *s2) return -1; + return 0; +} + +/**************************************************************************** +* Function: bcmstrnicmp +* +* Purpose: Compare to strings case insensitively, upto a max of 'cnt' +* characters. +* +* Parameters: s1 (in) First string to compare. +* s2 (in) Second string to compare. +* cnt (in) Max characters to compare. +* +* Returns: Return 0 if the two strings are equal, -1 if t1 < t2 and 1 if +* t1 > t2, when ignoring case sensitivity. +***************************************************************************** +*/ +int +bcmstrnicmp(const char* s1, const char* s2, int cnt) +{ + char dc, sc; + + while (*s2 && *s1 && cnt) { + dc = xToLower(*s1); + sc = xToLower(*s2); + if (dc < sc) return -1; + if (dc > sc) return 1; + s1++; + s2++; + cnt--; + } + + if (!cnt) return 0; + if (*s1 && !*s2) return 1; + if (!*s1 && *s2) return -1; + return 0; +} + +/* parse a xx:xx:xx:xx:xx:xx format ethernet address */ +int +bcm_ether_atoe(const char *p, struct ether_addr *ea) +{ + int i = 0; + char *ep; + + for (;;) { + ea->octet[i++] = (char) bcm_strtoul(p, &ep, 16); + p = ep; + if (!*p++ || i == 6) + break; + } + + return (i == 6); +} + +int +bcm_atoipv4(const char *p, struct ipv4_addr *ip) +{ + + int i = 0; + char *c; + for (;;) { + ip->addr[i++] = (uint8)bcm_strtoul(p, &c, 0); + if (*c++ != '.' || i == IPV4_ADDR_LEN) + break; + p = c; + } + return (i == IPV4_ADDR_LEN); +} +#endif /* !BCMROMOFFLOAD_EXCLUDE_BCMUTILS_FUNCS */ + +#if defined(CONFIG_USBRNDIS_RETAIL) || defined(NDIS_MINIPORT_DRIVER) +/* registry routine buffer preparation utility functions: + * parameter order is like strncpy, but returns count + * of bytes copied. Minimum bytes copied is null char(1)/wchar(2) + */ +ulong +wchar2ascii(char *abuf, ushort *wbuf, ushort wbuflen, ulong abuflen) +{ + ulong copyct = 1; + ushort i; + + if (abuflen == 0) + return 0; + + /* wbuflen is in bytes */ + wbuflen /= sizeof(ushort); + + for (i = 0; i < wbuflen; ++i) { + if (--abuflen == 0) + break; + *abuf++ = (char) *wbuf++; + ++copyct; + } + *abuf = '\0'; + + return copyct; +} +#endif /* CONFIG_USBRNDIS_RETAIL || NDIS_MINIPORT_DRIVER */ + +#ifdef BCM_OBJECT_TRACE + +#define BCM_OBJECT_MERGE_SAME_OBJ 0 + +/* some place may add / remove the object to trace list for Linux: */ +/* add: osl_alloc_skb dev_alloc_skb skb_realloc_headroom dhd_start_xmit */ +/* remove: osl_pktfree dev_kfree_skb netif_rx */ + +#define BCM_OBJDBG_COUNT (1024 * 100) +static spinlock_t dbgobj_lock; +#define BCM_OBJDBG_LOCK_INIT() spin_lock_init(&dbgobj_lock) +#define BCM_OBJDBG_LOCK_DESTROY() +#define BCM_OBJDBG_LOCK spin_lock_irqsave +#define BCM_OBJDBG_UNLOCK spin_unlock_irqrestore + +#define BCM_OBJDBG_ADDTOHEAD 0 +#define BCM_OBJDBG_ADDTOTAIL 1 + +#define BCM_OBJDBG_CALLER_LEN 32 +struct bcm_dbgobj { + struct bcm_dbgobj *prior; + struct bcm_dbgobj *next; + uint32 flag; + void *obj; + uint32 obj_sn; + uint32 obj_state; + uint32 line; + char caller[BCM_OBJDBG_CALLER_LEN]; +}; + +static struct bcm_dbgobj *dbgobj_freehead = NULL; +static struct bcm_dbgobj *dbgobj_freetail = NULL; +static struct bcm_dbgobj *dbgobj_objhead = NULL; +static struct bcm_dbgobj *dbgobj_objtail = NULL; + +static uint32 dbgobj_sn = 0; +static int dbgobj_count = 0; +static struct bcm_dbgobj bcm_dbg_objs[BCM_OBJDBG_COUNT]; + +void +bcm_object_trace_init(void) +{ + int i = 0; + BCM_OBJDBG_LOCK_INIT(); + memset(&bcm_dbg_objs, 0x00, sizeof(struct bcm_dbgobj) * BCM_OBJDBG_COUNT); + dbgobj_freehead = &bcm_dbg_objs[0]; + dbgobj_freetail = &bcm_dbg_objs[BCM_OBJDBG_COUNT - 1]; + + for (i = 0; i < BCM_OBJDBG_COUNT; ++i) { + bcm_dbg_objs[i].next = (i == (BCM_OBJDBG_COUNT - 1)) ? + dbgobj_freehead : &bcm_dbg_objs[i + 1]; + bcm_dbg_objs[i].prior = (i == 0) ? + dbgobj_freetail : &bcm_dbg_objs[i - 1]; + } +} + +void +bcm_object_trace_deinit(void) +{ + if (dbgobj_objhead || dbgobj_objtail) { + printf("%s: not all objects are released\n", __FUNCTION__); + ASSERT(0); + } + BCM_OBJDBG_LOCK_DESTROY(); +} + +static void +bcm_object_rm_list(struct bcm_dbgobj **head, struct bcm_dbgobj **tail, + struct bcm_dbgobj *dbgobj) +{ + if ((dbgobj == *head) && (dbgobj == *tail)) { + *head = NULL; + *tail = NULL; + } else if (dbgobj == *head) { + *head = (*head)->next; + } else if (dbgobj == *tail) { + *tail = (*tail)->prior; + } + dbgobj->next->prior = dbgobj->prior; + dbgobj->prior->next = dbgobj->next; +} + +static void +bcm_object_add_list(struct bcm_dbgobj **head, struct bcm_dbgobj **tail, + struct bcm_dbgobj *dbgobj, int addtotail) +{ + if (!(*head) && !(*tail)) { + *head = dbgobj; + *tail = dbgobj; + dbgobj->next = dbgobj; + dbgobj->prior = dbgobj; + } else if ((*head) && (*tail)) { + (*tail)->next = dbgobj; + (*head)->prior = dbgobj; + dbgobj->next = *head; + dbgobj->prior = *tail; + if (addtotail == BCM_OBJDBG_ADDTOTAIL) + *tail = dbgobj; + else + *head = dbgobj; + } else { + ASSERT(0); /* can't be this case */ + } +} + +static INLINE void +bcm_object_movetoend(struct bcm_dbgobj **head, struct bcm_dbgobj **tail, + struct bcm_dbgobj *dbgobj, int movetotail) +{ + if ((*head) && (*tail)) { + if (movetotail == BCM_OBJDBG_ADDTOTAIL) { + if (dbgobj != (*tail)) { + bcm_object_rm_list(head, tail, dbgobj); + bcm_object_add_list(head, tail, dbgobj, movetotail); + } + } else { + if (dbgobj != (*head)) { + bcm_object_rm_list(head, tail, dbgobj); + bcm_object_add_list(head, tail, dbgobj, movetotail); + } + } + } else { + ASSERT(0); /* can't be this case */ + } +} + +void +bcm_object_trace_opr(void *obj, uint32 opt, const char *caller, int line) +{ + struct bcm_dbgobj *dbgobj; + unsigned long flags; + + BCM_REFERENCE(flags); + BCM_OBJDBG_LOCK(&dbgobj_lock, flags); + + if (opt == BCM_OBJDBG_ADD_PKT || + opt == BCM_OBJDBG_ADD) { + dbgobj = dbgobj_objtail; + while (dbgobj) { + if (dbgobj->obj == obj) { + printf("%s: obj %p allocated from %s(%d)," + " allocate again from %s(%d)\n", + __FUNCTION__, dbgobj->obj, + dbgobj->caller, dbgobj->line, + caller, line); + ASSERT(0); + goto EXIT; + } + dbgobj = dbgobj->prior; + if (dbgobj == dbgobj_objtail) + break; + } + +#if BCM_OBJECT_MERGE_SAME_OBJ + dbgobj = dbgobj_freetail; + while (dbgobj) { + if (dbgobj->obj == obj) { + goto FREED_ENTRY_FOUND; + } + dbgobj = dbgobj->prior; + if (dbgobj == dbgobj_freetail) + break; + } +#endif /* BCM_OBJECT_MERGE_SAME_OBJ */ + + dbgobj = dbgobj_freehead; +#if BCM_OBJECT_MERGE_SAME_OBJ +FREED_ENTRY_FOUND: +#endif /* BCM_OBJECT_MERGE_SAME_OBJ */ + if (!dbgobj) { + printf("%s: already got %d objects ?????????????????????\n", + __FUNCTION__, BCM_OBJDBG_COUNT); + ASSERT(0); + goto EXIT; + } + + bcm_object_rm_list(&dbgobj_freehead, &dbgobj_freetail, dbgobj); + dbgobj->obj = obj; + strncpy(dbgobj->caller, caller, BCM_OBJDBG_CALLER_LEN); + dbgobj->caller[BCM_OBJDBG_CALLER_LEN-1] = '\0'; + dbgobj->line = line; + dbgobj->flag = 0; + if (opt == BCM_OBJDBG_ADD_PKT) { + dbgobj->obj_sn = dbgobj_sn++; + dbgobj->obj_state = 0; + /* first 4 bytes is pkt sn */ + if (((unsigned long)PKTTAG(obj)) & 0x3) + printf("pkt tag address not aligned by 4: %p\n", PKTTAG(obj)); + *(uint32*)PKTTAG(obj) = dbgobj->obj_sn; + } + bcm_object_add_list(&dbgobj_objhead, &dbgobj_objtail, dbgobj, + BCM_OBJDBG_ADDTOTAIL); + + dbgobj_count++; + + } else if (opt == BCM_OBJDBG_REMOVE) { + dbgobj = dbgobj_objtail; + while (dbgobj) { + if (dbgobj->obj == obj) { + if (dbgobj->flag) { + printf("%s: rm flagged obj %p flag 0x%08x from %s(%d)\n", + __FUNCTION__, obj, dbgobj->flag, caller, line); + } + bcm_object_rm_list(&dbgobj_objhead, &dbgobj_objtail, dbgobj); + memset(dbgobj->caller, 0x00, BCM_OBJDBG_CALLER_LEN); + strncpy(dbgobj->caller, caller, BCM_OBJDBG_CALLER_LEN); + dbgobj->caller[BCM_OBJDBG_CALLER_LEN-1] = '\0'; + dbgobj->line = line; + bcm_object_add_list(&dbgobj_freehead, &dbgobj_freetail, dbgobj, + BCM_OBJDBG_ADDTOTAIL); + dbgobj_count--; + goto EXIT; + } + dbgobj = dbgobj->prior; + if (dbgobj == dbgobj_objtail) + break; + } + + dbgobj = dbgobj_freetail; + while (dbgobj && dbgobj->obj) { + if (dbgobj->obj == obj) { + printf("%s: obj %p already freed from from %s(%d)," + " try free again from %s(%d)\n", + __FUNCTION__, obj, + dbgobj->caller, dbgobj->line, + caller, line); + //ASSERT(0); /* release same obj more than one time? */ + goto EXIT; + } + dbgobj = dbgobj->prior; + if (dbgobj == dbgobj_freetail) + break; + } + + printf("%s: ################### release none-existing obj %p from %s(%d)\n", + __FUNCTION__, obj, caller, line); + //ASSERT(0); /* release same obj more than one time? */ + + } + +EXIT: + BCM_OBJDBG_UNLOCK(&dbgobj_lock, flags); + return; +} + +void +bcm_object_trace_upd(void *obj, void *obj_new) +{ + struct bcm_dbgobj *dbgobj; + unsigned long flags; + + BCM_REFERENCE(flags); + BCM_OBJDBG_LOCK(&dbgobj_lock, flags); + + dbgobj = dbgobj_objtail; + while (dbgobj) { + if (dbgobj->obj == obj) { + dbgobj->obj = obj_new; + if (dbgobj != dbgobj_objtail) { + bcm_object_movetoend(&dbgobj_objhead, &dbgobj_objtail, + dbgobj, BCM_OBJDBG_ADDTOTAIL); + } + goto EXIT; + } + dbgobj = dbgobj->prior; + if (dbgobj == dbgobj_objtail) + break; + } + +EXIT: + BCM_OBJDBG_UNLOCK(&dbgobj_lock, flags); + return; +} + +void +bcm_object_trace_chk(void *obj, uint32 chksn, uint32 sn, + const char *caller, int line) +{ + struct bcm_dbgobj *dbgobj; + unsigned long flags; + + BCM_REFERENCE(flags); + BCM_OBJDBG_LOCK(&dbgobj_lock, flags); + + dbgobj = dbgobj_objtail; + while (dbgobj) { + if ((dbgobj->obj == obj) && + ((!chksn) || (dbgobj->obj_sn == sn))) { + if (dbgobj != dbgobj_objtail) { + bcm_object_movetoend(&dbgobj_objhead, &dbgobj_objtail, + dbgobj, BCM_OBJDBG_ADDTOTAIL); + } + goto EXIT; + } + dbgobj = dbgobj->prior; + if (dbgobj == dbgobj_objtail) + break; + } + + dbgobj = dbgobj_freetail; + while (dbgobj) { + if ((dbgobj->obj == obj) && + ((!chksn) || (dbgobj->obj_sn == sn))) { + printf("%s: (%s:%d) obj %p (sn %d state %d) was freed from %s(%d)\n", + __FUNCTION__, caller, line, + dbgobj->obj, dbgobj->obj_sn, dbgobj->obj_state, + dbgobj->caller, dbgobj->line); + goto EXIT; + } + else if (dbgobj->obj == NULL) { + break; + } + dbgobj = dbgobj->prior; + if (dbgobj == dbgobj_freetail) + break; + } + + printf("%s: obj %p not found, check from %s(%d), chksn %s, sn %d\n", + __FUNCTION__, obj, caller, line, chksn ? "yes" : "no", sn); + dbgobj = dbgobj_objtail; + while (dbgobj) { + printf("%s: (%s:%d) obj %p sn %d was allocated from %s(%d)\n", + __FUNCTION__, caller, line, + dbgobj->obj, dbgobj->obj_sn, dbgobj->caller, dbgobj->line); + dbgobj = dbgobj->prior; + if (dbgobj == dbgobj_objtail) + break; + } + +EXIT: + BCM_OBJDBG_UNLOCK(&dbgobj_lock, flags); + return; +} + +void +bcm_object_feature_set(void *obj, uint32 type, uint32 value) +{ + struct bcm_dbgobj *dbgobj; + unsigned long flags; + + BCM_REFERENCE(flags); + BCM_OBJDBG_LOCK(&dbgobj_lock, flags); + + dbgobj = dbgobj_objtail; + while (dbgobj) { + if (dbgobj->obj == obj) { + if (type == BCM_OBJECT_FEATURE_FLAG) { + if (value & BCM_OBJECT_FEATURE_CLEAR) + dbgobj->flag &= ~(value); + else + dbgobj->flag |= (value); + } else if (type == BCM_OBJECT_FEATURE_PKT_STATE) { + dbgobj->obj_state = value; + } + if (dbgobj != dbgobj_objtail) { + bcm_object_movetoend(&dbgobj_objhead, &dbgobj_objtail, + dbgobj, BCM_OBJDBG_ADDTOTAIL); + } + goto EXIT; + } + dbgobj = dbgobj->prior; + if (dbgobj == dbgobj_objtail) + break; + } + + printf("%s: obj %p not found in active list\n", __FUNCTION__, obj); + ASSERT(0); + +EXIT: + BCM_OBJDBG_UNLOCK(&dbgobj_lock, flags); + return; +} + +int +bcm_object_feature_get(void *obj, uint32 type, uint32 value) +{ + int rtn = 0; + struct bcm_dbgobj *dbgobj; + unsigned long flags; + + BCM_REFERENCE(flags); + BCM_OBJDBG_LOCK(&dbgobj_lock, flags); + + dbgobj = dbgobj_objtail; + while (dbgobj) { + if (dbgobj->obj == obj) { + if (type == BCM_OBJECT_FEATURE_FLAG) { + rtn = (dbgobj->flag & value) & (~BCM_OBJECT_FEATURE_CLEAR); + } + if (dbgobj != dbgobj_objtail) { + bcm_object_movetoend(&dbgobj_objhead, &dbgobj_objtail, + dbgobj, BCM_OBJDBG_ADDTOTAIL); + } + goto EXIT; + } + dbgobj = dbgobj->prior; + if (dbgobj == dbgobj_objtail) + break; + } + + printf("%s: obj %p not found in active list\n", __FUNCTION__, obj); + ASSERT(0); + +EXIT: + BCM_OBJDBG_UNLOCK(&dbgobj_lock, flags); + return rtn; +} + +#endif /* BCM_OBJECT_TRACE */ + +uint8 * +bcm_write_tlv(int type, const void *data, int datalen, uint8 *dst) +{ + uint8 *new_dst = dst; + bcm_tlv_t *dst_tlv = (bcm_tlv_t *)dst; + + /* dst buffer should always be valid */ + ASSERT(dst); + + /* data len must be within valid range */ + ASSERT((datalen >= 0) && (datalen <= BCM_TLV_MAX_DATA_SIZE)); + + /* source data buffer pointer should be valid, unless datalen is 0 + * meaning no data with this TLV + */ + ASSERT((data != NULL) || (datalen == 0)); + + /* only do work if the inputs are valid + * - must have a dst to write to AND + * - datalen must be within range AND + * - the source data pointer must be non-NULL if datalen is non-zero + * (this last condition detects datalen > 0 with a NULL data pointer) + */ + if ((dst != NULL) && + ((datalen >= 0) && (datalen <= BCM_TLV_MAX_DATA_SIZE)) && + ((data != NULL) || (datalen == 0))) { + + /* write type, len fields */ + dst_tlv->id = (uint8)type; + dst_tlv->len = (uint8)datalen; + + /* if data is present, copy to the output buffer and update + * pointer to output buffer + */ + if (datalen > 0) { + + memcpy(dst_tlv->data, data, datalen); + } + + /* update the output destination poitner to point past + * the TLV written + */ + new_dst = dst + BCM_TLV_HDR_SIZE + datalen; + } + + return (new_dst); +} + +uint8 * +bcm_write_tlv_ext(uint8 type, uint8 ext, const void *data, uint8 datalen, uint8 *dst) +{ + uint8 *new_dst = dst; + bcm_tlv_ext_t *dst_tlv = (bcm_tlv_ext_t *)dst; + + /* dst buffer should always be valid */ + ASSERT(dst); + + /* data len must be within valid range */ + ASSERT(datalen <= BCM_TLV_EXT_MAX_DATA_SIZE); + + /* source data buffer pointer should be valid, unless datalen is 0 + * meaning no data with this TLV + */ + ASSERT((data != NULL) || (datalen == 0)); + + /* only do work if the inputs are valid + * - must have a dst to write to AND + * - datalen must be within range AND + * - the source data pointer must be non-NULL if datalen is non-zero + * (this last condition detects datalen > 0 with a NULL data pointer) + */ + if ((dst != NULL) && + (datalen <= BCM_TLV_EXT_MAX_DATA_SIZE) && + ((data != NULL) || (datalen == 0))) { + + /* write type, len fields */ + dst_tlv->id = (uint8)type; + dst_tlv->ext = ext; + dst_tlv->len = 1 + (uint8)datalen; + + /* if data is present, copy to the output buffer and update + * pointer to output buffer + */ + if (datalen > 0) { + memcpy(dst_tlv->data, data, datalen); + } + + /* update the output destination poitner to point past + * the TLV written + */ + new_dst = dst + BCM_TLV_EXT_HDR_SIZE + datalen; + } + + return (new_dst); +} + +uint8 * +bcm_write_tlv_safe(int type, const void *data, int datalen, uint8 *dst, int dst_maxlen) +{ + uint8 *new_dst = dst; + + if ((datalen >= 0) && (datalen <= BCM_TLV_MAX_DATA_SIZE)) { + + /* if len + tlv hdr len is more than destlen, don't do anything + * just return the buffer untouched + */ + if ((int)(datalen + BCM_TLV_HDR_SIZE) <= dst_maxlen) { + + new_dst = bcm_write_tlv(type, data, datalen, dst); + } + } + + return (new_dst); +} + +uint8 * +bcm_copy_tlv(const void *src, uint8 *dst) +{ + uint8 *new_dst = dst; + const bcm_tlv_t *src_tlv = (const bcm_tlv_t *)src; + uint totlen; + + ASSERT(dst && src); + if (dst && src) { + + totlen = BCM_TLV_HDR_SIZE + src_tlv->len; + memcpy(dst, src_tlv, totlen); + new_dst = dst + totlen; + } + + return (new_dst); +} + +uint8 *bcm_copy_tlv_safe(const void *src, uint8 *dst, int dst_maxlen) +{ + uint8 *new_dst = dst; + const bcm_tlv_t *src_tlv = (const bcm_tlv_t *)src; + + ASSERT(src); + if (src) { + if (bcm_valid_tlv(src_tlv, dst_maxlen)) { + new_dst = bcm_copy_tlv(src, dst); + } + } + + return (new_dst); +} + +#if !defined(BCMROMOFFLOAD_EXCLUDE_BCMUTILS_FUNCS) +/******************************************************************************* + * crc8 + * + * Computes a crc8 over the input data using the polynomial: + * + * x^8 + x^7 +x^6 + x^4 + x^2 + 1 + * + * The caller provides the initial value (either CRC8_INIT_VALUE + * or the previous returned value) to allow for processing of + * discontiguous blocks of data. When generating the CRC the + * caller is responsible for complementing the final return value + * and inserting it into the byte stream. When checking, a final + * return value of CRC8_GOOD_VALUE indicates a valid CRC. + * + * Reference: Dallas Semiconductor Application Note 27 + * Williams, Ross N., "A Painless Guide to CRC Error Detection Algorithms", + * ver 3, Aug 1993, ross@guest.adelaide.edu.au, Rocksoft Pty Ltd., + * ftp://ftp.rocksoft.com/clients/rocksoft/papers/crc_v3.txt + * + * **************************************************************************** + */ + +static const uint8 crc8_table[256] = { + 0x00, 0xF7, 0xB9, 0x4E, 0x25, 0xD2, 0x9C, 0x6B, + 0x4A, 0xBD, 0xF3, 0x04, 0x6F, 0x98, 0xD6, 0x21, + 0x94, 0x63, 0x2D, 0xDA, 0xB1, 0x46, 0x08, 0xFF, + 0xDE, 0x29, 0x67, 0x90, 0xFB, 0x0C, 0x42, 0xB5, + 0x7F, 0x88, 0xC6, 0x31, 0x5A, 0xAD, 0xE3, 0x14, + 0x35, 0xC2, 0x8C, 0x7B, 0x10, 0xE7, 0xA9, 0x5E, + 0xEB, 0x1C, 0x52, 0xA5, 0xCE, 0x39, 0x77, 0x80, + 0xA1, 0x56, 0x18, 0xEF, 0x84, 0x73, 0x3D, 0xCA, + 0xFE, 0x09, 0x47, 0xB0, 0xDB, 0x2C, 0x62, 0x95, + 0xB4, 0x43, 0x0D, 0xFA, 0x91, 0x66, 0x28, 0xDF, + 0x6A, 0x9D, 0xD3, 0x24, 0x4F, 0xB8, 0xF6, 0x01, + 0x20, 0xD7, 0x99, 0x6E, 0x05, 0xF2, 0xBC, 0x4B, + 0x81, 0x76, 0x38, 0xCF, 0xA4, 0x53, 0x1D, 0xEA, + 0xCB, 0x3C, 0x72, 0x85, 0xEE, 0x19, 0x57, 0xA0, + 0x15, 0xE2, 0xAC, 0x5B, 0x30, 0xC7, 0x89, 0x7E, + 0x5F, 0xA8, 0xE6, 0x11, 0x7A, 0x8D, 0xC3, 0x34, + 0xAB, 0x5C, 0x12, 0xE5, 0x8E, 0x79, 0x37, 0xC0, + 0xE1, 0x16, 0x58, 0xAF, 0xC4, 0x33, 0x7D, 0x8A, + 0x3F, 0xC8, 0x86, 0x71, 0x1A, 0xED, 0xA3, 0x54, + 0x75, 0x82, 0xCC, 0x3B, 0x50, 0xA7, 0xE9, 0x1E, + 0xD4, 0x23, 0x6D, 0x9A, 0xF1, 0x06, 0x48, 0xBF, + 0x9E, 0x69, 0x27, 0xD0, 0xBB, 0x4C, 0x02, 0xF5, + 0x40, 0xB7, 0xF9, 0x0E, 0x65, 0x92, 0xDC, 0x2B, + 0x0A, 0xFD, 0xB3, 0x44, 0x2F, 0xD8, 0x96, 0x61, + 0x55, 0xA2, 0xEC, 0x1B, 0x70, 0x87, 0xC9, 0x3E, + 0x1F, 0xE8, 0xA6, 0x51, 0x3A, 0xCD, 0x83, 0x74, + 0xC1, 0x36, 0x78, 0x8F, 0xE4, 0x13, 0x5D, 0xAA, + 0x8B, 0x7C, 0x32, 0xC5, 0xAE, 0x59, 0x17, 0xE0, + 0x2A, 0xDD, 0x93, 0x64, 0x0F, 0xF8, 0xB6, 0x41, + 0x60, 0x97, 0xD9, 0x2E, 0x45, 0xB2, 0xFC, 0x0B, + 0xBE, 0x49, 0x07, 0xF0, 0x9B, 0x6C, 0x22, 0xD5, + 0xF4, 0x03, 0x4D, 0xBA, 0xD1, 0x26, 0x68, 0x9F +}; + +#define CRC_INNER_LOOP(n, c, x) \ + (c) = ((c) >> 8) ^ crc##n##_table[((c) ^ (x)) & 0xff] + +uint8 +hndcrc8( + const uint8 *pdata, /* pointer to array of data to process */ + uint nbytes, /* number of input data bytes to process */ + uint8 crc /* either CRC8_INIT_VALUE or previous return value */ +) +{ + /* hard code the crc loop instead of using CRC_INNER_LOOP macro + * to avoid the undefined and unnecessary (uint8 >> 8) operation. + */ + while (nbytes-- > 0) + crc = crc8_table[(crc ^ *pdata++) & 0xff]; + + return crc; +} + +/******************************************************************************* + * crc16 + * + * Computes a crc16 over the input data using the polynomial: + * + * x^16 + x^12 +x^5 + 1 + * + * The caller provides the initial value (either CRC16_INIT_VALUE + * or the previous returned value) to allow for processing of + * discontiguous blocks of data. When generating the CRC the + * caller is responsible for complementing the final return value + * and inserting it into the byte stream. When checking, a final + * return value of CRC16_GOOD_VALUE indicates a valid CRC. + * + * Reference: Dallas Semiconductor Application Note 27 + * Williams, Ross N., "A Painless Guide to CRC Error Detection Algorithms", + * ver 3, Aug 1993, ross@guest.adelaide.edu.au, Rocksoft Pty Ltd., + * ftp://ftp.rocksoft.com/clients/rocksoft/papers/crc_v3.txt + * + * **************************************************************************** + */ + +static const uint16 crc16_table[256] = { + 0x0000, 0x1189, 0x2312, 0x329B, 0x4624, 0x57AD, 0x6536, 0x74BF, + 0x8C48, 0x9DC1, 0xAF5A, 0xBED3, 0xCA6C, 0xDBE5, 0xE97E, 0xF8F7, + 0x1081, 0x0108, 0x3393, 0x221A, 0x56A5, 0x472C, 0x75B7, 0x643E, + 0x9CC9, 0x8D40, 0xBFDB, 0xAE52, 0xDAED, 0xCB64, 0xF9FF, 0xE876, + 0x2102, 0x308B, 0x0210, 0x1399, 0x6726, 0x76AF, 0x4434, 0x55BD, + 0xAD4A, 0xBCC3, 0x8E58, 0x9FD1, 0xEB6E, 0xFAE7, 0xC87C, 0xD9F5, + 0x3183, 0x200A, 0x1291, 0x0318, 0x77A7, 0x662E, 0x54B5, 0x453C, + 0xBDCB, 0xAC42, 0x9ED9, 0x8F50, 0xFBEF, 0xEA66, 0xD8FD, 0xC974, + 0x4204, 0x538D, 0x6116, 0x709F, 0x0420, 0x15A9, 0x2732, 0x36BB, + 0xCE4C, 0xDFC5, 0xED5E, 0xFCD7, 0x8868, 0x99E1, 0xAB7A, 0xBAF3, + 0x5285, 0x430C, 0x7197, 0x601E, 0x14A1, 0x0528, 0x37B3, 0x263A, + 0xDECD, 0xCF44, 0xFDDF, 0xEC56, 0x98E9, 0x8960, 0xBBFB, 0xAA72, + 0x6306, 0x728F, 0x4014, 0x519D, 0x2522, 0x34AB, 0x0630, 0x17B9, + 0xEF4E, 0xFEC7, 0xCC5C, 0xDDD5, 0xA96A, 0xB8E3, 0x8A78, 0x9BF1, + 0x7387, 0x620E, 0x5095, 0x411C, 0x35A3, 0x242A, 0x16B1, 0x0738, + 0xFFCF, 0xEE46, 0xDCDD, 0xCD54, 0xB9EB, 0xA862, 0x9AF9, 0x8B70, + 0x8408, 0x9581, 0xA71A, 0xB693, 0xC22C, 0xD3A5, 0xE13E, 0xF0B7, + 0x0840, 0x19C9, 0x2B52, 0x3ADB, 0x4E64, 0x5FED, 0x6D76, 0x7CFF, + 0x9489, 0x8500, 0xB79B, 0xA612, 0xD2AD, 0xC324, 0xF1BF, 0xE036, + 0x18C1, 0x0948, 0x3BD3, 0x2A5A, 0x5EE5, 0x4F6C, 0x7DF7, 0x6C7E, + 0xA50A, 0xB483, 0x8618, 0x9791, 0xE32E, 0xF2A7, 0xC03C, 0xD1B5, + 0x2942, 0x38CB, 0x0A50, 0x1BD9, 0x6F66, 0x7EEF, 0x4C74, 0x5DFD, + 0xB58B, 0xA402, 0x9699, 0x8710, 0xF3AF, 0xE226, 0xD0BD, 0xC134, + 0x39C3, 0x284A, 0x1AD1, 0x0B58, 0x7FE7, 0x6E6E, 0x5CF5, 0x4D7C, + 0xC60C, 0xD785, 0xE51E, 0xF497, 0x8028, 0x91A1, 0xA33A, 0xB2B3, + 0x4A44, 0x5BCD, 0x6956, 0x78DF, 0x0C60, 0x1DE9, 0x2F72, 0x3EFB, + 0xD68D, 0xC704, 0xF59F, 0xE416, 0x90A9, 0x8120, 0xB3BB, 0xA232, + 0x5AC5, 0x4B4C, 0x79D7, 0x685E, 0x1CE1, 0x0D68, 0x3FF3, 0x2E7A, + 0xE70E, 0xF687, 0xC41C, 0xD595, 0xA12A, 0xB0A3, 0x8238, 0x93B1, + 0x6B46, 0x7ACF, 0x4854, 0x59DD, 0x2D62, 0x3CEB, 0x0E70, 0x1FF9, + 0xF78F, 0xE606, 0xD49D, 0xC514, 0xB1AB, 0xA022, 0x92B9, 0x8330, + 0x7BC7, 0x6A4E, 0x58D5, 0x495C, 0x3DE3, 0x2C6A, 0x1EF1, 0x0F78 +}; + +uint16 +hndcrc16( + const uint8 *pdata, /* pointer to array of data to process */ + uint nbytes, /* number of input data bytes to process */ + uint16 crc /* either CRC16_INIT_VALUE or previous return value */ +) +{ + while (nbytes-- > 0) + CRC_INNER_LOOP(16, crc, *pdata++); + return crc; +} + +static const uint32 crc32_table[256] = { + 0x00000000, 0x77073096, 0xEE0E612C, 0x990951BA, + 0x076DC419, 0x706AF48F, 0xE963A535, 0x9E6495A3, + 0x0EDB8832, 0x79DCB8A4, 0xE0D5E91E, 0x97D2D988, + 0x09B64C2B, 0x7EB17CBD, 0xE7B82D07, 0x90BF1D91, + 0x1DB71064, 0x6AB020F2, 0xF3B97148, 0x84BE41DE, + 0x1ADAD47D, 0x6DDDE4EB, 0xF4D4B551, 0x83D385C7, + 0x136C9856, 0x646BA8C0, 0xFD62F97A, 0x8A65C9EC, + 0x14015C4F, 0x63066CD9, 0xFA0F3D63, 0x8D080DF5, + 0x3B6E20C8, 0x4C69105E, 0xD56041E4, 0xA2677172, + 0x3C03E4D1, 0x4B04D447, 0xD20D85FD, 0xA50AB56B, + 0x35B5A8FA, 0x42B2986C, 0xDBBBC9D6, 0xACBCF940, + 0x32D86CE3, 0x45DF5C75, 0xDCD60DCF, 0xABD13D59, + 0x26D930AC, 0x51DE003A, 0xC8D75180, 0xBFD06116, + 0x21B4F4B5, 0x56B3C423, 0xCFBA9599, 0xB8BDA50F, + 0x2802B89E, 0x5F058808, 0xC60CD9B2, 0xB10BE924, + 0x2F6F7C87, 0x58684C11, 0xC1611DAB, 0xB6662D3D, + 0x76DC4190, 0x01DB7106, 0x98D220BC, 0xEFD5102A, + 0x71B18589, 0x06B6B51F, 0x9FBFE4A5, 0xE8B8D433, + 0x7807C9A2, 0x0F00F934, 0x9609A88E, 0xE10E9818, + 0x7F6A0DBB, 0x086D3D2D, 0x91646C97, 0xE6635C01, + 0x6B6B51F4, 0x1C6C6162, 0x856530D8, 0xF262004E, + 0x6C0695ED, 0x1B01A57B, 0x8208F4C1, 0xF50FC457, + 0x65B0D9C6, 0x12B7E950, 0x8BBEB8EA, 0xFCB9887C, + 0x62DD1DDF, 0x15DA2D49, 0x8CD37CF3, 0xFBD44C65, + 0x4DB26158, 0x3AB551CE, 0xA3BC0074, 0xD4BB30E2, + 0x4ADFA541, 0x3DD895D7, 0xA4D1C46D, 0xD3D6F4FB, + 0x4369E96A, 0x346ED9FC, 0xAD678846, 0xDA60B8D0, + 0x44042D73, 0x33031DE5, 0xAA0A4C5F, 0xDD0D7CC9, + 0x5005713C, 0x270241AA, 0xBE0B1010, 0xC90C2086, + 0x5768B525, 0x206F85B3, 0xB966D409, 0xCE61E49F, + 0x5EDEF90E, 0x29D9C998, 0xB0D09822, 0xC7D7A8B4, + 0x59B33D17, 0x2EB40D81, 0xB7BD5C3B, 0xC0BA6CAD, + 0xEDB88320, 0x9ABFB3B6, 0x03B6E20C, 0x74B1D29A, + 0xEAD54739, 0x9DD277AF, 0x04DB2615, 0x73DC1683, + 0xE3630B12, 0x94643B84, 0x0D6D6A3E, 0x7A6A5AA8, + 0xE40ECF0B, 0x9309FF9D, 0x0A00AE27, 0x7D079EB1, + 0xF00F9344, 0x8708A3D2, 0x1E01F268, 0x6906C2FE, + 0xF762575D, 0x806567CB, 0x196C3671, 0x6E6B06E7, + 0xFED41B76, 0x89D32BE0, 0x10DA7A5A, 0x67DD4ACC, + 0xF9B9DF6F, 0x8EBEEFF9, 0x17B7BE43, 0x60B08ED5, + 0xD6D6A3E8, 0xA1D1937E, 0x38D8C2C4, 0x4FDFF252, + 0xD1BB67F1, 0xA6BC5767, 0x3FB506DD, 0x48B2364B, + 0xD80D2BDA, 0xAF0A1B4C, 0x36034AF6, 0x41047A60, + 0xDF60EFC3, 0xA867DF55, 0x316E8EEF, 0x4669BE79, + 0xCB61B38C, 0xBC66831A, 0x256FD2A0, 0x5268E236, + 0xCC0C7795, 0xBB0B4703, 0x220216B9, 0x5505262F, + 0xC5BA3BBE, 0xB2BD0B28, 0x2BB45A92, 0x5CB36A04, + 0xC2D7FFA7, 0xB5D0CF31, 0x2CD99E8B, 0x5BDEAE1D, + 0x9B64C2B0, 0xEC63F226, 0x756AA39C, 0x026D930A, + 0x9C0906A9, 0xEB0E363F, 0x72076785, 0x05005713, + 0x95BF4A82, 0xE2B87A14, 0x7BB12BAE, 0x0CB61B38, + 0x92D28E9B, 0xE5D5BE0D, 0x7CDCEFB7, 0x0BDBDF21, + 0x86D3D2D4, 0xF1D4E242, 0x68DDB3F8, 0x1FDA836E, + 0x81BE16CD, 0xF6B9265B, 0x6FB077E1, 0x18B74777, + 0x88085AE6, 0xFF0F6A70, 0x66063BCA, 0x11010B5C, + 0x8F659EFF, 0xF862AE69, 0x616BFFD3, 0x166CCF45, + 0xA00AE278, 0xD70DD2EE, 0x4E048354, 0x3903B3C2, + 0xA7672661, 0xD06016F7, 0x4969474D, 0x3E6E77DB, + 0xAED16A4A, 0xD9D65ADC, 0x40DF0B66, 0x37D83BF0, + 0xA9BCAE53, 0xDEBB9EC5, 0x47B2CF7F, 0x30B5FFE9, + 0xBDBDF21C, 0xCABAC28A, 0x53B39330, 0x24B4A3A6, + 0xBAD03605, 0xCDD70693, 0x54DE5729, 0x23D967BF, + 0xB3667A2E, 0xC4614AB8, 0x5D681B02, 0x2A6F2B94, + 0xB40BBE37, 0xC30C8EA1, 0x5A05DF1B, 0x2D02EF8D +}; + +/* + * crc input is CRC32_INIT_VALUE for a fresh start, or previous return value if + * accumulating over multiple pieces. + */ +uint32 +hndcrc32(const uint8 *pdata, uint nbytes, uint32 crc) +{ + const uint8 *pend; + pend = pdata + nbytes; + while (pdata < pend) + CRC_INNER_LOOP(32, crc, *pdata++); + + return crc; +} + +#ifdef notdef +#define CLEN 1499 /* CRC Length */ +#define CBUFSIZ (CLEN+4) +#define CNBUFS 5 /* # of bufs */ + +void +testcrc32(void) +{ + uint j, k, l; + uint8 *buf; + uint len[CNBUFS]; + uint32 crcr; + uint32 crc32tv[CNBUFS] = + {0xd2cb1faa, 0xd385c8fa, 0xf5b4f3f3, 0x55789e20, 0x00343110}; + + ASSERT((buf = MALLOC(CBUFSIZ*CNBUFS)) != NULL); + + /* step through all possible alignments */ + for (l = 0; l <= 4; l++) { + for (j = 0; j < CNBUFS; j++) { + len[j] = CLEN; + for (k = 0; k < len[j]; k++) + *(buf + j*CBUFSIZ + (k+l)) = (j+k) & 0xff; + } + + for (j = 0; j < CNBUFS; j++) { + crcr = crc32(buf + j*CBUFSIZ + l, len[j], CRC32_INIT_VALUE); + ASSERT(crcr == crc32tv[j]); + } + } + + MFREE(buf, CBUFSIZ*CNBUFS); + return; +} +#endif /* notdef */ + +/* + * Advance from the current 1-byte tag/1-byte length/variable-length value + * triple, to the next, returning a pointer to the next. + * If the current or next TLV is invalid (does not fit in given buffer length), + * NULL is returned. + * *buflen is not modified if the TLV elt parameter is invalid, or is decremented + * by the TLV parameter's length if it is valid. + */ +bcm_tlv_t * +bcm_next_tlv(const bcm_tlv_t *elt, uint *buflen) +{ + uint len; + + /* validate current elt */ + if (!bcm_valid_tlv(elt, *buflen)) { + return NULL; + } + + /* advance to next elt */ + len = elt->len; + elt = (const bcm_tlv_t*)(elt->data + len); + *buflen -= (TLV_HDR_LEN + len); + + /* validate next elt */ + if (!bcm_valid_tlv(elt, *buflen)) { + return NULL; + } + + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + return (bcm_tlv_t *)(elt); + GCC_DIAGNOSTIC_POP(); +} + +/* + * Traverse a string of 1-byte tag/1-byte length/variable-length value + * triples, returning a pointer to the substring whose first element + * matches tag + */ +bcm_tlv_t * +bcm_parse_tlvs(const void *buf, uint buflen, uint key) +{ + const bcm_tlv_t *elt; + int totlen; + + if ((elt = (const bcm_tlv_t*)buf) == NULL) { + return NULL; + } + totlen = (int)buflen; + + /* find tagged parameter */ + while (totlen >= TLV_HDR_LEN) { + uint len = elt->len; + + /* validate remaining totlen */ + if ((elt->id == key) && (totlen >= (int)(len + TLV_HDR_LEN))) { + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + return (bcm_tlv_t *)(elt); + GCC_DIAGNOSTIC_POP(); + } + + elt = (const bcm_tlv_t*)((const uint8*)elt + (len + TLV_HDR_LEN)); + totlen -= (len + TLV_HDR_LEN); + } + + return NULL; +} + +bcm_tlv_t * +bcm_parse_tlvs_dot11(const void *buf, int buflen, uint key, bool id_ext) +{ + bcm_tlv_t *elt; + int totlen; + + /* + ideally, we don't want to do that, but returning a const pointer + from these parse function spreads casting everywhere in the code + */ + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + elt = (bcm_tlv_t*)buf; + GCC_DIAGNOSTIC_POP(); + + totlen = buflen; + + /* find tagged parameter */ + while (totlen >= TLV_HDR_LEN) { + int len = elt->len; + + do { + /* validate remaining totlen */ + if (totlen < (int)(len + TLV_HDR_LEN)) + break; + + if (id_ext) { + if (!DOT11_MNG_IE_ID_EXT_MATCH(elt, key)) + break; + } else if (elt->id != key) { + break; + } + + return (bcm_tlv_t *)(elt); /* a match */ + } while (0); + + elt = (bcm_tlv_t*)((uint8*)elt + (len + TLV_HDR_LEN)); + totlen -= (len + TLV_HDR_LEN); + } + + return NULL; +} + +/* + * Traverse a string of 1-byte tag/1-byte length/variable-length value + * triples, returning a pointer to the substring whose first element + * matches tag + * return NULL if not found or length field < min_varlen + */ +bcm_tlv_t * +bcm_parse_tlvs_min_bodylen(const void *buf, int buflen, uint key, int min_bodylen) +{ + bcm_tlv_t * ret; + ret = bcm_parse_tlvs(buf, buflen, key); + if (ret == NULL || ret->len < min_bodylen) { + return NULL; + } + return ret; +} + +/* + * Traverse a string of 1-byte tag/1-byte length/variable-length value + * triples, returning a pointer to the substring whose first element + * matches tag. Stop parsing when we see an element whose ID is greater + * than the target key. + */ +const bcm_tlv_t * +bcm_parse_ordered_tlvs(const void *buf, int buflen, uint key) +{ + const bcm_tlv_t *elt; + int totlen; + + elt = (const bcm_tlv_t*)buf; + totlen = buflen; + + /* find tagged parameter */ + while (totlen >= TLV_HDR_LEN) { + uint id = elt->id; + int len = elt->len; + + /* Punt if we start seeing IDs > than target key */ + if (id > key) { + return (NULL); + } + + /* validate remaining totlen */ + if ((id == key) && (totlen >= (int)(len + TLV_HDR_LEN))) { + return (elt); + } + + elt = (const bcm_tlv_t*)((const uint8*)elt + (len + TLV_HDR_LEN)); + totlen -= (len + TLV_HDR_LEN); + } + return NULL; +} +#endif /* !BCMROMOFFLOAD_EXCLUDE_BCMUTILS_FUNCS */ + +#if defined(WLMSG_PRHDRS) || defined(WLMSG_PRPKT) || defined(WLMSG_ASSOC) || \ + defined(DHD_DEBUG) +int +bcm_format_field(const bcm_bit_desc_ex_t *bd, uint32 flags, char* buf, int len) +{ + int i, slen = 0; + uint32 bit, mask; + const char *name; + mask = bd->mask; + if (len < 2 || !buf) + return 0; + + buf[0] = '\0'; + + for (i = 0; (name = bd->bitfield[i].name) != NULL; i++) { + bit = bd->bitfield[i].bit; + if ((flags & mask) == bit) { + if (len > (int)strlen(name)) { + slen = strlen(name); + strncpy(buf, name, slen+1); + } + break; + } + } + return slen; +} + +int +bcm_format_flags(const bcm_bit_desc_t *bd, uint32 flags, char* buf, int len) +{ + int i; + char* p = buf; + char hexstr[16]; + int slen = 0, nlen = 0; + uint32 bit; + const char* name; + + if (len < 2 || !buf) + return 0; + + buf[0] = '\0'; + + for (i = 0; flags != 0; i++) { + bit = bd[i].bit; + name = bd[i].name; + if (bit == 0 && flags != 0) { + /* print any unnamed bits */ + snprintf(hexstr, 16, "0x%X", flags); + name = hexstr; + flags = 0; /* exit loop */ + } else if ((flags & bit) == 0) + continue; + flags &= ~bit; + nlen = strlen(name); + slen += nlen; + /* count btwn flag space */ + if (flags != 0) + slen += 1; + /* need NULL char as well */ + if (len <= slen) + break; + /* copy NULL char but don't count it */ + strncpy(p, name, nlen + 1); + p += nlen; + /* copy btwn flag space and NULL char */ + if (flags != 0) + p += snprintf(p, 2, " "); + } + + /* indicate the str was too short */ + if (flags != 0) { + p += snprintf(p, 2, ">"); + } + + return (int)(p - buf); +} + +/* print out whcih bits in octet array 'addr' are set. bcm_bit_desc_t:bit is a bit offset. */ +int +bcm_format_octets(const bcm_bit_desc_t *bd, uint bdsz, + const uint8 *addr, uint size, char *buf, int len) +{ + uint i; + char *p = buf; + int slen = 0, nlen = 0; + uint32 bit; + const char* name; + bool more = FALSE; + + BCM_REFERENCE(size); + + if (len < 2 || !buf) + return 0; + + buf[0] = '\0'; + + for (i = 0; i < bdsz; i++) { + bit = bd[i].bit; + name = bd[i].name; + if (isset(addr, bit)) { + nlen = strlen(name); + slen += nlen; + /* need SPACE - for simplicity */ + slen += 1; + /* need NULL as well */ + if (len < slen + 1) { + more = TRUE; + break; + } + memcpy(p, name, nlen); + p += nlen; + p[0] = ' '; + p += 1; + p[0] = '\0'; + } + } + + if (more) { + p[0] = '>'; + p += 1; + p[0] = '\0'; + } + + return (int)(p - buf); +} +#endif // endif + +/* print bytes formatted as hex to a string. return the resulting string length */ +int +bcm_format_hex(char *str, const void *bytes, int len) +{ + int i; + char *p = str; + const uint8 *src = (const uint8*)bytes; + + for (i = 0; i < len; i++) { + p += snprintf(p, 3, "%02X", *src); + src++; + } + return (int)(p - str); +} + +/* pretty hex print a contiguous buffer */ +void +prhex(const char *msg, const uchar *buf, uint nbytes) +{ + char line[128], *p; + int len = sizeof(line); + int nchar; + uint i; + + if (msg && (msg[0] != '\0')) + printf("%s:\n", msg); + + p = line; + for (i = 0; i < nbytes; i++) { + if (i % 16 == 0) { + nchar = snprintf(p, len, " %04x: ", i); /* line prefix */ + p += nchar; + len -= nchar; + } + if (len > 0) { + nchar = snprintf(p, len, "%02x ", buf[i]); + p += nchar; + len -= nchar; + } + + if (i % 16 == 15) { + printf("%s\n", line); /* flush line */ + p = line; + len = sizeof(line); + } + } + + /* flush last partial line */ + if (p != line) + printf("%s\n", line); +} + +static const char *crypto_algo_names[] = { + "NONE", + "WEP1", + "TKIP", + "WEP128", + "AES_CCM", + "AES_OCB_MSDU", + "AES_OCB_MPDU", +#ifdef BCMCCX + "CKIP", + "CKIP_MMH", + "WEP_MMH", + "NALG", +#else + "NALG", + "UNDEF", + "UNDEF", + "UNDEF", +#endif /* BCMCCX */ +#ifdef BCMWAPI_WAI + "WAPI", +#else + "UNDEF", +#endif // endif + "PMK", + "BIP", + "AES_GCM", + "AES_CCM256", + "AES_GCM256", + "BIP_CMAC256", + "BIP_GMAC", + "BIP_GMAC256", + "UNDEF" +}; + +const char * +bcm_crypto_algo_name(uint algo) +{ + return (algo < ARRAYSIZE(crypto_algo_names)) ? crypto_algo_names[algo] : "ERR"; +} + +char * +bcm_chipname(uint chipid, char *buf, uint len) +{ + const char *fmt; + + fmt = ((chipid > 0xa000) || (chipid < 0x4000)) ? "%d" : "%x"; + snprintf(buf, len, fmt, chipid); + return buf; +} + +/* Produce a human-readable string for boardrev */ +char * +bcm_brev_str(uint32 brev, char *buf) +{ + if (brev < 0x100) + snprintf(buf, 8, "%d.%d", (brev & 0xf0) >> 4, brev & 0xf); + else + snprintf(buf, 8, "%c%03x", ((brev & 0xf000) == 0x1000) ? 'P' : 'A', brev & 0xfff); + + return (buf); +} + +#define BUFSIZE_TODUMP_ATONCE 512 /* Buffer size */ + +/* dump large strings to console */ +void +printbig(char *buf) +{ + uint len, max_len; + char c; + + len = (uint)strlen(buf); + + max_len = BUFSIZE_TODUMP_ATONCE; + + while (len > max_len) { + c = buf[max_len]; + buf[max_len] = '\0'; + printf("%s", buf); + buf[max_len] = c; + + buf += max_len; + len -= max_len; + } + /* print the remaining string */ + printf("%s\n", buf); + return; +} + +/* routine to dump fields in a fileddesc structure */ +uint +bcmdumpfields(bcmutl_rdreg_rtn read_rtn, void *arg0, uint arg1, struct fielddesc *fielddesc_array, + char *buf, uint32 bufsize) +{ + uint filled_len; + int len; + struct fielddesc *cur_ptr; + + filled_len = 0; + cur_ptr = fielddesc_array; + + while (bufsize > 1) { + if (cur_ptr->nameandfmt == NULL) + break; + len = snprintf(buf, bufsize, cur_ptr->nameandfmt, + read_rtn(arg0, arg1, cur_ptr->offset)); + /* check for snprintf overflow or error */ + if (len < 0 || (uint32)len >= bufsize) + len = bufsize - 1; + buf += len; + bufsize -= len; + filled_len += len; + cur_ptr++; + } + return filled_len; +} + +uint +bcm_mkiovar(const char *name, const char *data, uint datalen, char *buf, uint buflen) +{ + uint len; + + len = (uint)strlen(name) + 1; + + if ((len + datalen) > buflen) + return 0; + + strncpy(buf, name, buflen); + + /* append data onto the end of the name string */ + if (data && datalen != 0) { + memcpy(&buf[len], data, datalen); + len += datalen; + } + + return len; +} + +/* Quarter dBm units to mW + * Table starts at QDBM_OFFSET, so the first entry is mW for qdBm=153 + * Table is offset so the last entry is largest mW value that fits in + * a uint16. + */ + +#define QDBM_OFFSET 153 /* Offset for first entry */ +#define QDBM_TABLE_LEN 40 /* Table size */ + +/* Smallest mW value that will round up to the first table entry, QDBM_OFFSET. + * Value is ( mW(QDBM_OFFSET - 1) + mW(QDBM_OFFSET) ) / 2 + */ +#define QDBM_TABLE_LOW_BOUND 6493 /* Low bound */ + +/* Largest mW value that will round down to the last table entry, + * QDBM_OFFSET + QDBM_TABLE_LEN-1. + * Value is ( mW(QDBM_OFFSET + QDBM_TABLE_LEN - 1) + mW(QDBM_OFFSET + QDBM_TABLE_LEN) ) / 2. + */ +#define QDBM_TABLE_HIGH_BOUND 64938 /* High bound */ + +static const uint16 nqdBm_to_mW_map[QDBM_TABLE_LEN] = { +/* qdBm: +0 +1 +2 +3 +4 +5 +6 +7 */ +/* 153: */ 6683, 7079, 7499, 7943, 8414, 8913, 9441, 10000, +/* 161: */ 10593, 11220, 11885, 12589, 13335, 14125, 14962, 15849, +/* 169: */ 16788, 17783, 18836, 19953, 21135, 22387, 23714, 25119, +/* 177: */ 26607, 28184, 29854, 31623, 33497, 35481, 37584, 39811, +/* 185: */ 42170, 44668, 47315, 50119, 53088, 56234, 59566, 63096 +}; + +uint16 +bcm_qdbm_to_mw(uint8 qdbm) +{ + uint factor = 1; + int idx = qdbm - QDBM_OFFSET; + + if (idx >= QDBM_TABLE_LEN) { + /* clamp to max uint16 mW value */ + return 0xFFFF; + } + + /* scale the qdBm index up to the range of the table 0-40 + * where an offset of 40 qdBm equals a factor of 10 mW. + */ + while (idx < 0) { + idx += 40; + factor *= 10; + } + + /* return the mW value scaled down to the correct factor of 10, + * adding in factor/2 to get proper rounding. + */ + return ((nqdBm_to_mW_map[idx] + factor/2) / factor); +} + +uint8 +bcm_mw_to_qdbm(uint16 mw) +{ + uint8 qdbm; + int offset; + uint mw_uint = mw; + uint boundary; + + /* handle boundary case */ + if (mw_uint <= 1) + return 0; + + offset = QDBM_OFFSET; + + /* move mw into the range of the table */ + while (mw_uint < QDBM_TABLE_LOW_BOUND) { + mw_uint *= 10; + offset -= 40; + } + + for (qdbm = 0; qdbm < QDBM_TABLE_LEN-1; qdbm++) { + boundary = nqdBm_to_mW_map[qdbm] + (nqdBm_to_mW_map[qdbm+1] - + nqdBm_to_mW_map[qdbm])/2; + if (mw_uint < boundary) break; + } + + qdbm += (uint8)offset; + + return (qdbm); +} + +uint +bcm_bitcount(uint8 *bitmap, uint length) +{ + uint bitcount = 0, i; + uint8 tmp; + for (i = 0; i < length; i++) { + tmp = bitmap[i]; + while (tmp) { + bitcount++; + tmp &= (tmp - 1); + } + } + return bitcount; +} + +/* + * ProcessVars:Takes a buffer of "=\n" lines read from a file and ending in a NUL. + * also accepts nvram files which are already in the format of =\0\=\0 + * Removes carriage returns, empty lines, comment lines, and converts newlines to NULs. + * Shortens buffer as needed and pads with NULs. End of buffer is marked by two NULs. +*/ + +unsigned int +process_nvram_vars(char *varbuf, unsigned int len) +{ + char *dp; + bool findNewline; + int column; + unsigned int buf_len, n; + unsigned int pad = 0; + char nv_ver[128]; + + dp = varbuf; + + findNewline = FALSE; + column = 0; + + // terence 20130914: print out NVRAM version + if (varbuf[0] == '#') { + memset(nv_ver, 0x00, sizeof(nv_ver)); + for (n=1; n= start) { + if (endbyte - startbyte > 1) + { + startbytelastbit = (startbyte+1)*NBBY - 1; + endbytestartbit = endbyte*NBBY; + for (i = startbyte+1; i < endbyte; i++) + ((uint8 *)array)[i] = 0xFF; + for (i = start; i <= startbytelastbit; i++) + setbit(array, i); + for (i = endbytestartbit; i <= end; i++) + setbit(array, i); + } else { + for (i = start; i <= end; i++) + setbit(array, i); + } + } + else { + set_bitrange(array, start, maxbit, maxbit); + set_bitrange(array, 0, end, maxbit); + } +} + +void +bcm_bitprint32(const uint32 u32arg) +{ + int i; + for (i = NBITS(uint32) - 1; i >= 0; i--) { + if (isbitset(u32arg, i)) { + printf("1"); + } else { + printf("0"); + } + + if ((i % NBBY) == 0) printf(" "); + } + printf("\n"); +} + +/* calculate checksum for ip header, tcp / udp header / data */ +uint16 +bcm_ip_cksum(uint8 *buf, uint32 len, uint32 sum) +{ + while (len > 1) { + sum += (buf[0] << 8) | buf[1]; + buf += 2; + len -= 2; + } + + if (len > 0) { + sum += (*buf) << 8; + } + + while (sum >> 16) { + sum = (sum & 0xffff) + (sum >> 16); + } + + return ((uint16)~sum); +} + +int +BCMRAMFN(valid_bcmerror)(int e) +{ + return ((e <= 0) && (e >= BCME_LAST)); +} + +#ifdef DEBUG_COUNTER +#if (OSL_SYSUPTIME_SUPPORT == TRUE) +void counter_printlog(counter_tbl_t *ctr_tbl) +{ + uint32 now; + + if (!ctr_tbl->enabled) + return; + + now = OSL_SYSUPTIME(); + + if (now - ctr_tbl->prev_log_print > ctr_tbl->log_print_interval) { + uint8 i = 0; + printf("counter_print(%s %d):", ctr_tbl->name, now - ctr_tbl->prev_log_print); + + for (i = 0; i < ctr_tbl->needed_cnt; i++) { + printf(" %u", ctr_tbl->cnt[i]); + } + printf("\n"); + + ctr_tbl->prev_log_print = now; + bzero(ctr_tbl->cnt, CNTR_TBL_MAX * sizeof(uint)); + } +} +#else +/* OSL_SYSUPTIME is not supported so no way to get time */ +#define counter_printlog(a) do {} while (0) +#endif /* OSL_SYSUPTIME_SUPPORT == TRUE */ +#endif /* DEBUG_COUNTER */ + +/* calculate partial checksum */ +static uint32 +ip_cksum_partial(uint32 sum, uint8 *val8, uint32 count) +{ + uint32 i; + uint16 *val16 = (uint16 *)val8; + + ASSERT(val8 != NULL); + /* partial chksum calculated on 16-bit values */ + ASSERT((count % 2) == 0); + + count /= 2; + + for (i = 0; i < count; i++) { + sum += *val16++; + } + return sum; +} + +/* calculate IP checksum */ +static uint16 +ip_cksum(uint32 sum, uint8 *val8, uint32 count) +{ + uint16 *val16 = (uint16 *)val8; + + ASSERT(val8 != NULL); + + while (count > 1) { + sum += *val16++; + count -= 2; + } + /* add left-over byte, if any */ + if (count > 0) { + sum += (*(uint8 *)val16); + } + + /* fold 32-bit sum to 16 bits */ + sum = (sum >> 16) + (sum & 0xffff); + sum += (sum >> 16); + return ((uint16)~sum); +} + +/* calculate IPv4 header checksum + * - input ip points to IP header in network order + * - output cksum is in network order + */ +uint16 +ipv4_hdr_cksum(uint8 *ip, int ip_len) +{ + uint32 sum = 0; + uint8 *ptr = ip; + + ASSERT(ip != NULL); + ASSERT(ip_len >= IPV4_MIN_HEADER_LEN); + + /* partial cksum skipping the hdr_chksum field */ + sum = ip_cksum_partial(sum, ptr, OFFSETOF(struct ipv4_hdr, hdr_chksum)); + ptr += OFFSETOF(struct ipv4_hdr, hdr_chksum) + 2; + + /* return calculated chksum */ + return ip_cksum(sum, ptr, ip_len - OFFSETOF(struct ipv4_hdr, src_ip)); +} + +/* calculate TCP header checksum using partial sum */ +static uint16 +tcp_hdr_chksum(uint32 sum, uint8 *tcp_hdr, uint16 tcp_len) +{ + uint8 *ptr = tcp_hdr; + + ASSERT(tcp_hdr != NULL); + ASSERT(tcp_len >= TCP_MIN_HEADER_LEN); + + /* partial TCP cksum skipping the chksum field */ + sum = ip_cksum_partial(sum, ptr, OFFSETOF(struct bcmtcp_hdr, chksum)); + ptr += OFFSETOF(struct bcmtcp_hdr, chksum) + 2; + + /* return calculated chksum */ + return ip_cksum(sum, ptr, tcp_len - OFFSETOF(struct bcmtcp_hdr, urg_ptr)); +} + +struct tcp_pseudo_hdr { + uint8 src_ip[IPV4_ADDR_LEN]; /* Source IP Address */ + uint8 dst_ip[IPV4_ADDR_LEN]; /* Destination IP Address */ + uint8 zero; + uint8 prot; + uint16 tcp_size; +}; + +/* calculate IPv4 TCP header checksum + * - input ip and tcp points to IP and TCP header in network order + * - output cksum is in network order + */ +uint16 +ipv4_tcp_hdr_cksum(uint8 *ip, uint8 *tcp, uint16 tcp_len) +{ + struct ipv4_hdr *ip_hdr = (struct ipv4_hdr *)ip; + struct tcp_pseudo_hdr tcp_ps; + uint32 sum = 0; + + ASSERT(ip != NULL); + ASSERT(tcp != NULL); + ASSERT(tcp_len >= TCP_MIN_HEADER_LEN); + + /* pseudo header cksum */ + memset(&tcp_ps, 0, sizeof(tcp_ps)); + memcpy(&tcp_ps.dst_ip, ip_hdr->dst_ip, IPV4_ADDR_LEN); + memcpy(&tcp_ps.src_ip, ip_hdr->src_ip, IPV4_ADDR_LEN); + tcp_ps.zero = 0; + tcp_ps.prot = ip_hdr->prot; + tcp_ps.tcp_size = hton16(tcp_len); + sum = ip_cksum_partial(sum, (uint8 *)&tcp_ps, sizeof(tcp_ps)); + + /* return calculated TCP header chksum */ + return tcp_hdr_chksum(sum, tcp, tcp_len); +} + +struct ipv6_pseudo_hdr { + uint8 saddr[IPV6_ADDR_LEN]; + uint8 daddr[IPV6_ADDR_LEN]; + uint16 payload_len; + uint8 zero; + uint8 next_hdr; +}; + +/* calculate IPv6 TCP header checksum + * - input ipv6 and tcp points to IPv6 and TCP header in network order + * - output cksum is in network order + */ +uint16 +ipv6_tcp_hdr_cksum(uint8 *ipv6, uint8 *tcp, uint16 tcp_len) +{ + struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)ipv6; + struct ipv6_pseudo_hdr ipv6_pseudo; + uint32 sum = 0; + + ASSERT(ipv6 != NULL); + ASSERT(tcp != NULL); + ASSERT(tcp_len >= TCP_MIN_HEADER_LEN); + + /* pseudo header cksum */ + memset((char *)&ipv6_pseudo, 0, sizeof(ipv6_pseudo)); + memcpy((char *)ipv6_pseudo.saddr, (char *)ipv6_hdr->saddr.addr, + sizeof(ipv6_pseudo.saddr)); + memcpy((char *)ipv6_pseudo.daddr, (char *)ipv6_hdr->daddr.addr, + sizeof(ipv6_pseudo.daddr)); + ipv6_pseudo.payload_len = ipv6_hdr->payload_len; + ipv6_pseudo.next_hdr = ipv6_hdr->nexthdr; + sum = ip_cksum_partial(sum, (uint8 *)&ipv6_pseudo, sizeof(ipv6_pseudo)); + + /* return calculated TCP header chksum */ + return tcp_hdr_chksum(sum, tcp, tcp_len); +} + +void *_bcmutils_dummy_fn = NULL; + +/* GROUP 1 --- start + * These function under GROUP 1 are general purpose functions to do complex number + * calculations and square root calculation. + */ + +uint32 sqrt_int(uint32 value) +{ + uint32 root = 0, shift = 0; + + /* Compute integer nearest to square root of input integer value */ + for (shift = 0; shift < 32; shift += 2) { + if (((0x40000000 >> shift) + root) <= value) { + value -= ((0x40000000 >> shift) + root); + root = (root >> 1) | (0x40000000 >> shift); + } + else { + root = root >> 1; + } + } + + /* round to the nearest integer */ + if (root < value) ++root; + + return root; +} +/* GROUP 1 --- end */ + +/* read/write field in a consecutive bits in an octet array. + * 'addr' is the octet array's start byte address + * 'size' is the octet array's byte size + * 'stbit' is the value's start bit offset + * 'nbits' is the value's bit size + * This set of utilities are for convenience. Don't use them + * in time critical/data path as there's a great overhead in them. + */ +void +setbits(uint8 *addr, uint size, uint stbit, uint nbits, uint32 val) +{ + uint fbyte = stbit >> 3; /* first byte */ + uint lbyte = (stbit + nbits - 1) >> 3; /* last byte */ + uint fbit = stbit & 7; /* first bit in the first byte */ + uint rbits = (nbits > 8 - fbit ? + nbits - (8 - fbit) : + 0) & 7; /* remaining bits of the last byte when not 0 */ + uint8 mask; + uint byte; + + BCM_REFERENCE(size); + + ASSERT(fbyte < size); + ASSERT(lbyte < size); + ASSERT(nbits <= (sizeof(val) << 3)); + + /* all bits are in the same byte */ + if (fbyte == lbyte) { + mask = ((1 << nbits) - 1) << fbit; + addr[fbyte] &= ~mask; + addr[fbyte] |= (uint8)(val << fbit); + return; + } + + /* first partial byte */ + if (fbit > 0) { + mask = (0xff << fbit); + addr[fbyte] &= ~mask; + addr[fbyte] |= (uint8)(val << fbit); + val >>= (8 - fbit); + nbits -= (8 - fbit); + fbyte ++; /* first full byte */ + } + + /* last partial byte */ + if (rbits > 0) { + mask = (1 << rbits) - 1; + addr[lbyte] &= ~mask; + addr[lbyte] |= (uint8)(val >> (nbits - rbits)); + lbyte --; /* last full byte */ + } + + /* remaining full byte(s) */ + for (byte = fbyte; byte <= lbyte; byte ++) { + addr[byte] = (uint8)val; + val >>= 8; + } +} + +uint32 +getbits(const uint8 *addr, uint size, uint stbit, uint nbits) +{ + uint fbyte = stbit >> 3; /* first byte */ + uint lbyte = (stbit + nbits - 1) >> 3; /* last byte */ + uint fbit = stbit & 7; /* first bit in the first byte */ + uint rbits = (nbits > 8 - fbit ? + nbits - (8 - fbit) : + 0) & 7; /* remaining bits of the last byte when not 0 */ + uint32 val = 0; + uint bits = 0; /* bits in first partial byte */ + uint8 mask; + uint byte; + + BCM_REFERENCE(size); + + ASSERT(fbyte < size); + ASSERT(lbyte < size); + ASSERT(nbits <= (sizeof(val) << 3)); + + /* all bits are in the same byte */ + if (fbyte == lbyte) { + mask = ((1 << nbits) - 1) << fbit; + val = (addr[fbyte] & mask) >> fbit; + return val; + } + + /* first partial byte */ + if (fbit > 0) { + bits = 8 - fbit; + mask = (0xff << fbit); + val |= (addr[fbyte] & mask) >> fbit; + fbyte ++; /* first full byte */ + } + + /* last partial byte */ + if (rbits > 0) { + mask = (1 << rbits) - 1; + val |= (addr[lbyte] & mask) << (nbits - rbits); + lbyte --; /* last full byte */ + } + + /* remaining full byte(s) */ + for (byte = fbyte; byte <= lbyte; byte ++) { + val |= (addr[byte] << (((byte - fbyte) << 3) + bits)); + } + + return val; +} + +#ifdef BCMDRIVER + +/** allocate variable sized data with 'size' bytes. note: vld should NOT be null. + */ +int +bcm_vdata_alloc(osl_t *osh, var_len_data_t *vld, uint32 size) +{ + int ret = BCME_ERROR; + uint8 *dat = NULL; + + if (vld == NULL) { + ASSERT(0); + goto done; + } + + /* trying to allocate twice? */ + if (vld->vdata != NULL) { + ASSERT(0); + goto done; + } + + /* trying to allocate 0 size? */ + if (size == 0) { + ASSERT(0); + ret = BCME_BADARG; + goto done; + } + + dat = MALLOCZ(osh, size); + if (dat == NULL) { + ret = BCME_NOMEM; + goto done; + } + vld->vlen = size; + vld->vdata = dat; + ret = BCME_OK; +done: + return ret; +} + +/** free memory associated with variable sized data. note: vld should NOT be null. + */ +int +bcm_vdata_free(osl_t *osh, var_len_data_t *vld) +{ + int ret = BCME_ERROR; + + if (vld == NULL) { + ASSERT(0); + goto done; + } + + if (vld->vdata) { + MFREE(osh, vld->vdata, vld->vlen); + vld->vdata = NULL; + vld->vlen = 0; + ret = BCME_OK; + } +done: + return ret; +} + +#endif /* BCMDRIVER */ + +/* Count the number of elements not matching a given value in a null terminated array */ +int +array_value_mismatch_count(uint8 value, uint8 *array, int array_size) +{ + int i; + int count = 0; + + for (i = 0; i < array_size; i++) { + /* exit if a null terminator is found */ + if (array[i] == 0) { + break; + } + if (array[i] != value) { + count++; + } + } + return count; +} + +/* Count the number of non-zero elements in an uint8 array */ +int +array_nonzero_count(uint8 *array, int array_size) +{ + return array_value_mismatch_count(0, array, array_size); +} + +/* Count the number of non-zero elements in an int16 array */ +int +array_nonzero_count_int16(int16 *array, int array_size) +{ + int i; + int count = 0; + + for (i = 0; i < array_size; i++) { + if (array[i] != 0) { + count++; + } + } + return count; +} + +/* Count the number of zero elements in an uint8 array */ +int +array_zero_count(uint8 *array, int array_size) +{ + int i; + int count = 0; + + for (i = 0; i < array_size; i++) { + if (array[i] == 0) { + count++; + } + } + return count; +} + +/* Validate an array that can be 1 of 2 data types. + * One of array1 or array2 should be non-NULL. The other should be NULL. + */ +static int +verify_ordered_array(uint8 *array1, int16 *array2, int array_size, + int range_lo, int range_hi, bool err_if_no_zero_term, bool is_ordered) +{ + int ret; + int i; + int val = 0; + int prev_val = 0; + + ret = err_if_no_zero_term ? BCME_NOTFOUND : BCME_OK; + + /* Check that: + * - values are in strict descending order. + * - values are within the valid range. + */ + for (i = 0; i < array_size; i++) { + if (array1) { + val = (int)array1[i]; + } else if (array2) { + val = (int)array2[i]; + } else { + /* both array parameters are NULL */ + return BCME_NOTFOUND; + } + if (val == 0) { + /* array is zero-terminated */ + ret = BCME_OK; + break; + } + + if (is_ordered && i > 0 && val >= prev_val) { + /* array is not in descending order */ + ret = BCME_BADOPTION; + break; + } + prev_val = val; + + if (val < range_lo || val > range_hi) { + /* array value out of range */ + ret = BCME_RANGE; + break; + } + } + + return ret; +} + +/* Validate an ordered uint8 configuration array */ +int +verify_ordered_array_uint8(uint8 *array, int array_size, + uint8 range_lo, uint8 range_hi) +{ + return verify_ordered_array(array, NULL, array_size, (int)range_lo, (int)range_hi, + TRUE, TRUE); +} + +/* Validate an ordered int16 non-zero-terminated configuration array */ +int +verify_ordered_array_int16(int16 *array, int array_size, + int16 range_lo, int16 range_hi) +{ + return verify_ordered_array(NULL, array, array_size, (int)range_lo, (int)range_hi, + FALSE, TRUE); +} + +/* Validate all values in an array are in range */ +int +verify_array_values(uint8 *array, int array_size, + int range_lo, int range_hi, bool zero_terminated) +{ + int ret = BCME_OK; + int i; + int val = 0; + + /* Check that: + * - values are in strict descending order. + * - values are within the valid range. + */ + for (i = 0; i < array_size; i++) { + val = (int)array[i]; + if (val == 0 && zero_terminated) { + ret = BCME_OK; + break; + } + if (val < range_lo || val > range_hi) { + /* array value out of range */ + ret = BCME_RANGE; + break; + } + } + return ret; +} + +/* Adds/replaces NVRAM variable with given value + * varbuf[in,out] - Buffer with NVRAM variables (sequence of zero-terminated 'name=value' records, + * terminated with additional zero) + * buflen[in] - Length of buffer (may, even should, have some unused space) + * variable[in] - Variable to add/replace in 'name=value' form + * datalen[out,opt] - Optional output parameter - resulting length of data in buffer + * Returns TRUE on success, FALSE if buffer too short or variable specified incorrectly + */ +bool +replace_nvram_variable(char *varbuf, unsigned int buflen, const char *variable, + unsigned int *datalen) +{ + char *p; + int variable_heading_len, record_len, variable_record_len = strlen(variable) + 1; + char *buf_end = varbuf + buflen; + p = strchr(variable, '='); + if (!p) { + return FALSE; + } + /* Length of given variable name, followed by '=' */ + variable_heading_len = (int)((const char *)(p + 1) - variable); + /* Scanning NVRAM, record by record up to trailing 0 */ + for (p = varbuf; *p; p += strlen(p) + 1) { + /* If given variable found - remove it */ + if (!strncmp(p, variable, variable_heading_len)) { + record_len = strlen(p) + 1; + memmove_s(p, buf_end - p, p + record_len, buf_end - (p + record_len)); + } + } + /* If buffer does not have space for given variable - return FALSE */ + if ((p + variable_record_len + 1) > buf_end) { + return FALSE; + } + /* Copy given variable to end of buffer */ + memmove_s(p, buf_end - p, variable, variable_record_len); + /* Adding trailing 0 */ + p[variable_record_len] = 0; + /* Setting optional output parameter - length of data in buffer */ + if (datalen) { + *datalen = (unsigned int)(p + variable_record_len + 1 - varbuf); + } + return TRUE; +} + +/* Add to adjust the 802.1x priority */ +void +pktset8021xprio(void *pkt, int prio) +{ + struct ether_header *eh; + uint8 *pktdata; + if(prio == PKTPRIO(pkt)) + return; + pktdata = (uint8 *)PKTDATA(OSH_NULL, pkt); + ASSERT(ISALIGNED((uintptr)pktdata, sizeof(uint16))); + eh = (struct ether_header *) pktdata; + if (eh->ether_type == hton16(ETHER_TYPE_802_1X)) { + ASSERT(prio >= 0 && prio <= MAXPRIO); + PKTSETPRIO(pkt, prio); + } +} diff --git a/bcmdhd.100.10.315.x/bcmwifi_channels.c b/bcmdhd.100.10.315.x/bcmwifi_channels.c new file mode 100644 index 0000000..531246e --- /dev/null +++ b/bcmdhd.100.10.315.x/bcmwifi_channels.c @@ -0,0 +1,1464 @@ +/* + * Misc utility routines used by kernel or app-level. + * Contents are wifi-specific, used by any kernel or app-level + * software that might want wifi things as it grows. + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmwifi_channels.c 695288 2017-04-19 17:20:39Z $ + */ + +#include +#include +#include + +#ifdef BCMDRIVER +#include +#define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base)) +#define tolower(c) (bcm_isupper((c)) ? ((c) + 'a' - 'A') : (c)) +#else +#include +#include +#include +#ifndef ASSERT +#define ASSERT(exp) +#endif // endif +#endif /* BCMDRIVER */ + +#include + +#if defined(WIN32) && (defined(BCMDLL) || defined(WLMDLL)) +#include /* For wl/exe/GNUmakefile.brcm_wlu and GNUmakefile.wlm_dll */ +#endif // endif + +#include <802.11.h> + +/* Definitions for D11AC capable (80MHz+) Chanspec type */ + +/* Chanspec ASCII representation: + * [ 'g'] ['/' []['/'<1st80channel>'-'<2nd80channel>]] + * + * : + * (optional) 2, 3, 4, 5 for 2.4GHz, 3GHz, 4GHz, and 5GHz respectively. + * Default value is 2g if channel <= 14, otherwise 5g. + * : + * channel number of the 5MHz, 10MHz, 20MHz channel, + * or primary channel of 40MHz, 80MHz, 160MHz, or 80+80MHz channel. + * : + * (optional) 5, 10, 20, 40, 80, 160, or 80+80. Default value is 20. + * : + * (only for 2.4GHz band 40MHz) U for upper sideband primary, L for lower. + * + * For 2.4GHz band 40MHz channels, the same primary channel may be the + * upper sideband for one 40MHz channel, and the lower sideband for an + * overlapping 40MHz channel. The U/L disambiguates which 40MHz channel + * is being specified. + * + * For 40MHz in the 5GHz band and all channel bandwidths greater than + * 40MHz, the U/L specificaion is not allowed since the channels are + * non-overlapping and the primary sub-band is derived from its + * position in the wide bandwidth channel. + * + * <1st80Channel>: + * <2nd80Channel>: + * Required for 80+80, otherwise not allowed. + * Specifies the center channel of the primary and secondary 80MHz band. + * + * In its simplest form, it is a 20MHz channel number, with the implied band + * of 2.4GHz if channel number <= 14, and 5GHz otherwise. + * + * To allow for backward compatibility with scripts, the old form for + * 40MHz channels is also allowed: + * + * : + * primary channel of 40MHz, channel <= 14 is 2GHz, otherwise 5GHz + * : + * "U" for upper, "L" for lower (or lower case "u" "l") + * + * 5 GHz Examples: + * Chanspec BW Center Ch Channel Range Primary Ch + * 5g8 20MHz 8 - - + * 52 20MHz 52 - - + * 52/40 40MHz 54 52-56 52 + * 56/40 40MHz 54 52-56 56 + * 52/80 80MHz 58 52-64 52 + * 56/80 80MHz 58 52-64 56 + * 60/80 80MHz 58 52-64 60 + * 64/80 80MHz 58 52-64 64 + * 52/160 160MHz 50 36-64 52 + * 36/160 160MGz 50 36-64 36 + * 36/80+80/42-106 80+80MHz 42,106 36-48,100-112 36 + * + * 2 GHz Examples: + * Chanspec BW Center Ch Channel Range Primary Ch + * 2g8 20MHz 8 - - + * 8 20MHz 8 - - + * 6 20MHz 6 - - + * 6/40l 40MHz 8 6-10 6 + * 6l 40MHz 8 6-10 6 + * 6/40u 40MHz 4 2-6 6 + * 6u 40MHz 4 2-6 6 + */ + +/* bandwidth ASCII string */ +static const char *wf_chspec_bw_str[] = +{ + "5", + "10", + "20", + "40", + "80", + "160", + "80+80", + "na" +}; + +static const uint8 wf_chspec_bw_mhz[] = +{5, 10, 20, 40, 80, 160, 160}; + +#define WF_NUM_BW \ + (sizeof(wf_chspec_bw_mhz)/sizeof(uint8)) + +/* 40MHz channels in 5GHz band */ +static const uint8 wf_5g_40m_chans[] = +{38, 46, 54, 62, 102, 110, 118, 126, 134, 142, 151, 159, 167, 175}; +#define WF_NUM_5G_40M_CHANS \ + (sizeof(wf_5g_40m_chans)/sizeof(uint8)) + +/* 80MHz channels in 5GHz band */ +static const uint8 wf_5g_80m_chans[] = +{42, 58, 106, 122, 138, 155, 171}; +#define WF_NUM_5G_80M_CHANS \ + (sizeof(wf_5g_80m_chans)/sizeof(uint8)) + +/* 160MHz channels in 5GHz band */ +static const uint8 wf_5g_160m_chans[] = +{50, 114}; +#define WF_NUM_5G_160M_CHANS \ + (sizeof(wf_5g_160m_chans)/sizeof(uint8)) + +/* opclass and channel information for US. Table E-1 */ +static const uint16 opclass_data[] = { + (WL_CHANSPEC_BAND_5G |((WL_CHANSPEC_BW_20)&WL_CHANSPEC_BW_MASK)), + (WL_CHANSPEC_BAND_5G |((WL_CHANSPEC_BW_20)&WL_CHANSPEC_BW_MASK)), + (WL_CHANSPEC_BAND_5G |((WL_CHANSPEC_BW_20)&WL_CHANSPEC_BW_MASK)), + (WL_CHANSPEC_BAND_5G |((WL_CHANSPEC_BW_20)&WL_CHANSPEC_BW_MASK)), + (WL_CHANSPEC_BAND_5G |((WL_CHANSPEC_BW_20)&WL_CHANSPEC_BW_MASK)), + (WL_CHANSPEC_BAND_5G |((WL_CHANSPEC_BW_5)&WL_CHANSPEC_BW_MASK)), + (WL_CHANSPEC_BAND_5G |((WL_CHANSPEC_BW_5)&WL_CHANSPEC_BW_MASK)), + (WL_CHANSPEC_BAND_5G |((WL_CHANSPEC_BW_10)&WL_CHANSPEC_BW_MASK)), + (WL_CHANSPEC_BAND_5G |((WL_CHANSPEC_BW_10)&WL_CHANSPEC_BW_MASK)), + (WL_CHANSPEC_BAND_5G |((WL_CHANSPEC_BW_20)&WL_CHANSPEC_BW_MASK)), + (WL_CHANSPEC_BAND_5G |((WL_CHANSPEC_BW_20)&WL_CHANSPEC_BW_MASK)), + (WL_CHANSPEC_BAND_2G |((WL_CHANSPEC_BW_20)&WL_CHANSPEC_BW_MASK)), + (WL_CHANSPEC_BAND_3G |((WL_CHANSPEC_BW_20)&WL_CHANSPEC_BW_MASK)), + (WL_CHANSPEC_BAND_3G |((WL_CHANSPEC_BW_10)&WL_CHANSPEC_BW_MASK)), + (WL_CHANSPEC_BAND_3G |((WL_CHANSPEC_BW_5)&WL_CHANSPEC_BW_MASK)), + (WL_CHANSPEC_BAND_5G |((WL_CHANSPEC_BW_5)&WL_CHANSPEC_BW_MASK)), + (WL_CHANSPEC_BAND_5G |((WL_CHANSPEC_BW_10)&WL_CHANSPEC_BW_MASK)), + (WL_CHANSPEC_BAND_5G |((WL_CHANSPEC_BW_20)&WL_CHANSPEC_BW_MASK)), + 0, + 0, + 0, + (WL_CHANSPEC_BAND_5G |((WL_CHANSPEC_BW_40)&WL_CHANSPEC_BW_MASK)|WL_CHANSPEC_CTL_SB_LOWER), + (WL_CHANSPEC_BAND_5G |((WL_CHANSPEC_BW_40)&WL_CHANSPEC_BW_MASK)|WL_CHANSPEC_CTL_SB_LOWER), + (WL_CHANSPEC_BAND_5G |((WL_CHANSPEC_BW_40)&WL_CHANSPEC_BW_MASK)|WL_CHANSPEC_CTL_SB_LOWER), + (WL_CHANSPEC_BAND_5G |((WL_CHANSPEC_BW_40)&WL_CHANSPEC_BW_MASK)|WL_CHANSPEC_CTL_SB_LOWER), + (WL_CHANSPEC_BAND_5G |((WL_CHANSPEC_BW_40)&WL_CHANSPEC_BW_MASK)|WL_CHANSPEC_CTL_SB_LOWER), + (WL_CHANSPEC_BAND_5G |((WL_CHANSPEC_BW_40)&WL_CHANSPEC_BW_MASK)|WL_CHANSPEC_CTL_SB_UPPER), + (WL_CHANSPEC_BAND_5G |((WL_CHANSPEC_BW_40)&WL_CHANSPEC_BW_MASK)|WL_CHANSPEC_CTL_SB_UPPER), + (WL_CHANSPEC_BAND_5G |((WL_CHANSPEC_BW_40)&WL_CHANSPEC_BW_MASK)|WL_CHANSPEC_CTL_SB_UPPER), + (WL_CHANSPEC_BAND_5G |((WL_CHANSPEC_BW_40)&WL_CHANSPEC_BW_MASK)|WL_CHANSPEC_CTL_SB_UPPER), + (WL_CHANSPEC_BAND_5G |((WL_CHANSPEC_BW_40)&WL_CHANSPEC_BW_MASK)|WL_CHANSPEC_CTL_SB_UPPER), + (WL_CHANSPEC_BAND_2G |((WL_CHANSPEC_BW_40)&WL_CHANSPEC_BW_MASK)|WL_CHANSPEC_CTL_SB_LOWER), + (WL_CHANSPEC_BAND_2G |((WL_CHANSPEC_BW_40)&WL_CHANSPEC_BW_MASK)|WL_CHANSPEC_CTL_SB_UPPER), +}; + +/** + * Return the chanspec bandwidth in MHz + * Bandwidth of 160 MHz will be returned for 80+80MHz chanspecs. + * + * @param chspec chanspec_t + * + * @return bandwidth of chspec in MHz units + */ +uint +wf_bw_chspec_to_mhz(chanspec_t chspec) +{ + uint bw; + + bw = (chspec & WL_CHANSPEC_BW_MASK) >> WL_CHANSPEC_BW_SHIFT; + return (bw >= WF_NUM_BW ? 0 : wf_chspec_bw_mhz[bw]); +} + +/* bw in MHz, return the channel count from the center channel to the + * the channel at the edge of the band + */ +static uint8 +center_chan_to_edge(uint bw) +{ + /* edge channels separated by BW - 10MHz on each side + * delta from cf to edge is half of that, + * MHz to channel num conversion is 5MHz/channel + */ + return (uint8)(((bw - 20) / 2) / 5); +} + +/* return channel number of the low edge of the band + * given the center channel and BW + */ +static uint8 +channel_low_edge(uint center_ch, uint bw) +{ + return (uint8)(center_ch - center_chan_to_edge(bw)); +} + +/* return side band number given center channel and primary20 channel + * return -1 on error + */ +static int +channel_to_sb(uint center_ch, uint primary_ch, uint bw) +{ + uint lowest = channel_low_edge(center_ch, bw); + uint sb; + + if ((primary_ch - lowest) % 4) { + /* bad primary channel, not mult 4 */ + return -1; + } + + sb = ((primary_ch - lowest) / 4); + + /* sb must be a index to a 20MHz channel in range */ + if (sb >= (bw / 20)) { + /* primary_ch must have been too high for the center_ch */ + return -1; + } + + return sb; +} + +/* return primary20 channel given center channel and side band */ +static uint8 +channel_to_primary20_chan(uint center_ch, uint bw, uint sb) +{ + return (uint8)(channel_low_edge(center_ch, bw) + sb * 4); +} + +/* return index of 80MHz channel from channel number + * return -1 on error + */ +static int +channel_80mhz_to_id(uint ch) +{ + uint i; + for (i = 0; i < WF_NUM_5G_80M_CHANS; i ++) { + if (ch == wf_5g_80m_chans[i]) + return i; + } + + return -1; +} + +/* wrapper function for wf_chspec_ntoa. In case of an error it puts + * the original chanspec in the output buffer, prepended with "invalid". + * Can be directly used in print routines as it takes care of null + */ +char * +wf_chspec_ntoa_ex(chanspec_t chspec, char *buf) +{ + if (wf_chspec_ntoa(chspec, buf) == NULL) + snprintf(buf, CHANSPEC_STR_LEN, "invalid 0x%04x", chspec); + return buf; +} + +/* given a chanspec and a string buffer, format the chanspec as a + * string, and return the original pointer a. + * Min buffer length must be CHANSPEC_STR_LEN. + * On error return NULL + */ +char * +wf_chspec_ntoa(chanspec_t chspec, char *buf) +{ + const char *band; + uint pri_chan; + + if (wf_chspec_malformed(chspec)) + return NULL; + + band = ""; + + /* check for non-default band spec */ + if ((CHSPEC_IS2G(chspec) && CHSPEC_CHANNEL(chspec) > CH_MAX_2G_CHANNEL) || + (CHSPEC_IS5G(chspec) && CHSPEC_CHANNEL(chspec) <= CH_MAX_2G_CHANNEL)) + band = (CHSPEC_IS2G(chspec)) ? "2g" : "5g"; + + /* primary20 channel */ + pri_chan = wf_chspec_primary20_chan(chspec); + + /* bandwidth and primary20 sideband */ + if (CHSPEC_IS20(chspec)) { + snprintf(buf, CHANSPEC_STR_LEN, "%s%d", band, pri_chan); + } else if (!CHSPEC_IS8080(chspec)) { + const char *bw; + const char *sb = ""; + + bw = wf_chspec_to_bw_str(chspec); + +#ifdef CHANSPEC_NEW_40MHZ_FORMAT + /* primary20 sideband string if needed for 2g 40MHz */ + if (CHSPEC_IS40(chspec) && CHSPEC_IS2G(chspec)) { + sb = CHSPEC_SB_UPPER(chspec) ? "u" : "l"; + } + + snprintf(buf, CHANSPEC_STR_LEN, "%s%d/%s%s", band, pri_chan, bw, sb); +#else + /* primary20 sideband string instead of BW for 40MHz */ + if (CHSPEC_IS40(chspec)) { + sb = CHSPEC_SB_UPPER(chspec) ? "u" : "l"; + snprintf(buf, CHANSPEC_STR_LEN, "%s%d%s", band, pri_chan, sb); + } else { + snprintf(buf, CHANSPEC_STR_LEN, "%s%d/%s", band, pri_chan, bw); + } +#endif /* CHANSPEC_NEW_40MHZ_FORMAT */ + + } else { + /* 80+80 */ + uint chan1 = (chspec & WL_CHANSPEC_CHAN1_MASK) >> WL_CHANSPEC_CHAN1_SHIFT; + uint chan2 = (chspec & WL_CHANSPEC_CHAN2_MASK) >> WL_CHANSPEC_CHAN2_SHIFT; + + /* convert to channel number */ + chan1 = (chan1 < WF_NUM_5G_80M_CHANS) ? wf_5g_80m_chans[chan1] : 0; + chan2 = (chan2 < WF_NUM_5G_80M_CHANS) ? wf_5g_80m_chans[chan2] : 0; + + /* Outputs a max of CHANSPEC_STR_LEN chars including '\0' */ + snprintf(buf, CHANSPEC_STR_LEN, "%d/80+80/%d-%d", pri_chan, chan1, chan2); + } + + return (buf); +} + +static int +read_uint(const char **p, unsigned int *num) +{ + unsigned long val; + char *endp = NULL; + + val = strtoul(*p, &endp, 10); + /* if endp is the initial pointer value, then a number was not read */ + if (endp == *p) + return 0; + + /* advance the buffer pointer to the end of the integer string */ + *p = endp; + /* return the parsed integer */ + *num = (unsigned int)val; + + return 1; +} + +/* given a chanspec string, convert to a chanspec. + * On error return 0 + */ +chanspec_t +wf_chspec_aton(const char *a) +{ + chanspec_t chspec; + uint chspec_ch, chspec_band, bw, chspec_bw, chspec_sb; + uint num, pri_ch; + uint ch1, ch2; + char c, sb_ul = '\0'; + int i; + + bw = 20; + chspec_sb = 0; + chspec_ch = ch1 = ch2 = 0; + + /* parse channel num or band */ + if (!read_uint(&a, &num)) + return 0; + /* if we are looking at a 'g', then the first number was a band */ + c = tolower((int)a[0]); + if (c == 'g') { + a++; /* consume the char */ + + /* band must be "2" or "5" */ + if (num == 2) + chspec_band = WL_CHANSPEC_BAND_2G; + else if (num == 5) + chspec_band = WL_CHANSPEC_BAND_5G; + else + return 0; + + /* read the channel number */ + if (!read_uint(&a, &pri_ch)) + return 0; + + c = tolower((int)a[0]); + } + else { + /* first number is channel, use default for band */ + pri_ch = num; + chspec_band = ((pri_ch <= CH_MAX_2G_CHANNEL) ? + WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G); + } + + if (c == '\0') { + /* default BW of 20MHz */ + chspec_bw = WL_CHANSPEC_BW_20; + goto done_read; + } + + a ++; /* consume the 'u','l', or '/' */ + + /* check 'u'/'l' */ + if (c == 'u' || c == 'l') { + sb_ul = c; + chspec_bw = WL_CHANSPEC_BW_40; + goto done_read; + } + + /* next letter must be '/' */ + if (c != '/') + return 0; + + /* read bandwidth */ + if (!read_uint(&a, &bw)) + return 0; + + /* convert to chspec value */ + if (bw == 5) { + chspec_bw = WL_CHANSPEC_BW_5; + } else if (bw == 10) { + chspec_bw = WL_CHANSPEC_BW_10; + } else if (bw == 20) { + chspec_bw = WL_CHANSPEC_BW_20; + } else if (bw == 40) { + chspec_bw = WL_CHANSPEC_BW_40; + } else if (bw == 80) { + chspec_bw = WL_CHANSPEC_BW_80; + } else if (bw == 160) { + chspec_bw = WL_CHANSPEC_BW_160; + } else { + return 0; + } + + /* So far we have g/ + * Can now be followed by u/l if bw = 40, + * or '+80' if bw = 80, to make '80+80' bw. + */ + + c = tolower((int)a[0]); + + /* if we have a 2g/40 channel, we should have a l/u spec now */ + if (chspec_band == WL_CHANSPEC_BAND_2G && bw == 40) { + if (c == 'u' || c == 'l') { + a ++; /* consume the u/l char */ + sb_ul = c; + goto done_read; + } + } + + /* check for 80+80 */ + if (c == '+') { + /* 80+80 */ + const char plus80[] = "80/"; + + /* must be looking at '+80/' + * check and consume this string. + */ + chspec_bw = WL_CHANSPEC_BW_8080; + + a ++; /* consume the char '+' */ + + /* consume the '80/' string */ + for (i = 0; i < 3; i++) { + if (*a++ != plus80[i]) { + return 0; + } + } + + /* read primary 80MHz channel */ + if (!read_uint(&a, &ch1)) + return 0; + + /* must followed by '-' */ + if (a[0] != '-') + return 0; + a ++; /* consume the char */ + + /* read secondary 80MHz channel */ + if (!read_uint(&a, &ch2)) + return 0; + } + +done_read: + /* skip trailing white space */ + while (a[0] == ' ') { + a ++; + } + + /* must be end of string */ + if (a[0] != '\0') + return 0; + + /* Now have all the chanspec string parts read; + * chspec_band, pri_ch, chspec_bw, sb_ul, ch1, ch2. + * chspec_band and chspec_bw are chanspec values. + * Need to convert pri_ch, sb_ul, and ch1,ch2 into + * a center channel (or two) and sideband. + */ + + /* if a sb u/l string was given, just use that, + * guaranteed to be bw = 40 by sting parse. + */ + if (sb_ul != '\0') { + if (sb_ul == 'l') { + chspec_ch = UPPER_20_SB(pri_ch); + chspec_sb = WL_CHANSPEC_CTL_SB_LLL; + } else if (sb_ul == 'u') { + chspec_ch = LOWER_20_SB(pri_ch); + chspec_sb = WL_CHANSPEC_CTL_SB_LLU; + } + } + /* if the bw is 20, center and sideband are trivial */ + else if (chspec_bw == WL_CHANSPEC_BW_20) { + chspec_ch = pri_ch; + chspec_sb = WL_CHANSPEC_CTL_SB_NONE; + } + /* if the bw is 40/80/160, not 80+80, a single method + * can be used to to find the center and sideband + */ + else if (chspec_bw != WL_CHANSPEC_BW_8080) { + /* figure out primary20 sideband based on primary20 channel and bandwidth */ + const uint8 *center_ch = NULL; + int num_ch = 0; + int sb = -1; + + if (chspec_bw == WL_CHANSPEC_BW_40) { + center_ch = wf_5g_40m_chans; + num_ch = WF_NUM_5G_40M_CHANS; + } else if (chspec_bw == WL_CHANSPEC_BW_80) { + center_ch = wf_5g_80m_chans; + num_ch = WF_NUM_5G_80M_CHANS; + } else if (chspec_bw == WL_CHANSPEC_BW_160) { + center_ch = wf_5g_160m_chans; + num_ch = WF_NUM_5G_160M_CHANS; + } else { + return 0; + } + + for (i = 0; i < num_ch; i ++) { + sb = channel_to_sb(center_ch[i], pri_ch, bw); + if (sb >= 0) { + chspec_ch = center_ch[i]; + chspec_sb = sb << WL_CHANSPEC_CTL_SB_SHIFT; + break; + } + } + + /* check for no matching sb/center */ + if (sb < 0) { + return 0; + } + } + /* Otherwise, bw is 80+80. Figure out channel pair and sb */ + else { + int ch1_id = 0, ch2_id = 0; + int sb; + + /* look up the channel ID for the specified channel numbers */ + ch1_id = channel_80mhz_to_id(ch1); + ch2_id = channel_80mhz_to_id(ch2); + + /* validate channels */ + if (ch1_id < 0 || ch2_id < 0) + return 0; + + /* combine 2 channel IDs in channel field of chspec */ + chspec_ch = (((uint)ch1_id << WL_CHANSPEC_CHAN1_SHIFT) | + ((uint)ch2_id << WL_CHANSPEC_CHAN2_SHIFT)); + + /* figure out primary 20 MHz sideband */ + + /* is the primary channel contained in the 1st 80MHz channel? */ + sb = channel_to_sb(ch1, pri_ch, bw); + if (sb < 0) { + /* no match for primary channel 'pri_ch' in segment0 80MHz channel */ + return 0; + } + + chspec_sb = sb << WL_CHANSPEC_CTL_SB_SHIFT; + } + + chspec = (chspec_ch | chspec_band | chspec_bw | chspec_sb); + + if (wf_chspec_malformed(chspec)) + return 0; + + return chspec; +} + +/* + * Verify the chanspec is using a legal set of parameters, i.e. that the + * chanspec specified a band, bw, pri_sb and channel and that the + * combination could be legal given any set of circumstances. + * RETURNS: TRUE is the chanspec is malformed, false if it looks good. + */ +bool +wf_chspec_malformed(chanspec_t chanspec) +{ + uint chspec_bw = CHSPEC_BW(chanspec); + uint chspec_ch = CHSPEC_CHANNEL(chanspec); + + /* must be 2G or 5G band */ + if (CHSPEC_IS2G(chanspec)) { + /* must be valid bandwidth */ + if (!BW_LE40(chspec_bw)) { + return TRUE; + } + } else if (CHSPEC_IS5G(chanspec)) { + if (chspec_bw == WL_CHANSPEC_BW_8080) { + uint ch1_id, ch2_id; + + /* channel IDs in 80+80 must be in range */ + ch1_id = CHSPEC_CHAN1(chanspec); + ch2_id = CHSPEC_CHAN2(chanspec); + if (ch1_id >= WF_NUM_5G_80M_CHANS || ch2_id >= WF_NUM_5G_80M_CHANS) + return TRUE; + + } else if (chspec_bw == WL_CHANSPEC_BW_20 || chspec_bw == WL_CHANSPEC_BW_40 || + chspec_bw == WL_CHANSPEC_BW_80 || chspec_bw == WL_CHANSPEC_BW_160) { + + if (chspec_ch > MAXCHANNEL) { + return TRUE; + } + } else { + /* invalid bandwidth */ + return TRUE; + } + } else { + /* must be 2G or 5G band */ + return TRUE; + } + + /* side band needs to be consistent with bandwidth */ + if (chspec_bw == WL_CHANSPEC_BW_20) { + if (CHSPEC_CTL_SB(chanspec) != WL_CHANSPEC_CTL_SB_LLL) + return TRUE; + } else if (chspec_bw == WL_CHANSPEC_BW_40) { + if (CHSPEC_CTL_SB(chanspec) > WL_CHANSPEC_CTL_SB_LLU) + return TRUE; + } else if (chspec_bw == WL_CHANSPEC_BW_80 || + chspec_bw == WL_CHANSPEC_BW_8080) { + /* both 80MHz and 80+80MHz use 80MHz side bands. + * 80+80 SB info is relative to the primary 80MHz sub-band. + */ + if (CHSPEC_CTL_SB(chanspec) > WL_CHANSPEC_CTL_SB_LUU) + return TRUE; + } + else if (chspec_bw == WL_CHANSPEC_BW_160) { + ASSERT(CHSPEC_CTL_SB(chanspec) <= WL_CHANSPEC_CTL_SB_UUU); + } + return FALSE; +} + +/* + * Verify the chanspec specifies a valid channel according to 802.11. + * RETURNS: TRUE if the chanspec is a valid 802.11 channel + */ +bool +wf_chspec_valid(chanspec_t chanspec) +{ + uint chspec_bw = CHSPEC_BW(chanspec); + uint chspec_ch = CHSPEC_CHANNEL(chanspec); + + if (wf_chspec_malformed(chanspec)) + return FALSE; + + if (CHSPEC_IS2G(chanspec)) { + /* must be valid bandwidth and channel range */ + if (chspec_bw == WL_CHANSPEC_BW_20) { + if (chspec_ch >= 1 && chspec_ch <= 14) + return TRUE; + } else if (chspec_bw == WL_CHANSPEC_BW_40) { + if (chspec_ch >= 3 && chspec_ch <= 11) + return TRUE; + } + } else if (CHSPEC_IS5G(chanspec)) { + if (chspec_bw == WL_CHANSPEC_BW_8080) { + uint16 ch1, ch2; + + ch1 = wf_5g_80m_chans[CHSPEC_CHAN1(chanspec)]; + ch2 = wf_5g_80m_chans[CHSPEC_CHAN2(chanspec)]; + + /* the two channels must be separated by more than 80MHz by VHT req */ + if ((ch2 > ch1 + CH_80MHZ_APART) || + (ch1 > ch2 + CH_80MHZ_APART)) + return TRUE; + } else { + const uint8 *center_ch; + uint num_ch, i; + + if (chspec_bw == WL_CHANSPEC_BW_20 || chspec_bw == WL_CHANSPEC_BW_40) { + center_ch = wf_5g_40m_chans; + num_ch = WF_NUM_5G_40M_CHANS; + } else if (chspec_bw == WL_CHANSPEC_BW_80) { + center_ch = wf_5g_80m_chans; + num_ch = WF_NUM_5G_80M_CHANS; + } else if (chspec_bw == WL_CHANSPEC_BW_160) { + center_ch = wf_5g_160m_chans; + num_ch = WF_NUM_5G_160M_CHANS; + } else { + /* invalid bandwidth */ + return FALSE; + } + + /* check for a valid center channel */ + if (chspec_bw == WL_CHANSPEC_BW_20) { + /* We don't have an array of legal 20MHz 5G channels, but they are + * each side of the legal 40MHz channels. Check the chanspec + * channel against either side of the 40MHz channels. + */ + for (i = 0; i < num_ch; i ++) { + if (chspec_ch == (uint)LOWER_20_SB(center_ch[i]) || + chspec_ch == (uint)UPPER_20_SB(center_ch[i])) + break; /* match found */ + } + + if (i == num_ch) { + /* check for channel 165 which is not the side band + * of 40MHz 5G channel + */ + if (chspec_ch == 165) + i = 0; + + /* check for legacy JP channels on failure */ + if (chspec_ch == 34 || chspec_ch == 38 || + chspec_ch == 42 || chspec_ch == 46) + i = 0; + } + } else { + /* check the chanspec channel to each legal channel */ + for (i = 0; i < num_ch; i ++) { + if (chspec_ch == center_ch[i]) + break; /* match found */ + } + } + + if (i < num_ch) { + /* match found */ + return TRUE; + } + } + } + + return FALSE; +} + +/* + * This function returns TRUE if both the chanspec can co-exist in PHY. + * Addition to primary20 channel, the function checks for side band for 2g 40 channels + */ +bool +wf_chspec_coexist(chanspec_t chspec1, chanspec_t chspec2) +{ + bool same_primary; + + same_primary = (wf_chspec_primary20_chan(chspec1) == wf_chspec_primary20_chan(chspec2)); + + if (same_primary && CHSPEC_IS2G(chspec1)) { + if (CHSPEC_IS40(chspec1) && CHSPEC_IS40(chspec2)) { + return (CHSPEC_CTL_SB(chspec1) == CHSPEC_CTL_SB(chspec2)); + } + } + return same_primary; +} + +/** + * Return the primary 20MHz channel. + * + * This function returns the channel number of the primary 20MHz channel. For + * 20MHz channels this is just the channel number. For 40MHz or wider channels + * it is the primary 20MHz channel specified by the chanspec. + * + * @param chspec input chanspec + * + * @return Returns the channel number of the primary 20MHz channel + */ +uint8 +wf_chspec_primary20_chan(chanspec_t chspec) +{ + uint center_chan; + uint bw_mhz; + uint sb; + + ASSERT(!wf_chspec_malformed(chspec)); + + /* Is there a sideband ? */ + if (CHSPEC_IS20(chspec)) { + return CHSPEC_CHANNEL(chspec); + } else { + sb = CHSPEC_CTL_SB(chspec) >> WL_CHANSPEC_CTL_SB_SHIFT; + + if (CHSPEC_IS8080(chspec)) { + /* For an 80+80 MHz channel, the sideband 'sb' field is an 80 MHz sideband + * (LL, LU, UL, LU) for the 80 MHz frequency segment 0. + */ + uint chan_id = CHSPEC_CHAN1(chspec); + + bw_mhz = 80; + + /* convert from channel index to channel number */ + center_chan = wf_5g_80m_chans[chan_id]; + } + else { + bw_mhz = wf_bw_chspec_to_mhz(chspec); + center_chan = CHSPEC_CHANNEL(chspec) >> WL_CHANSPEC_CHAN_SHIFT; + } + + return (channel_to_primary20_chan(center_chan, bw_mhz, sb)); + } +} + +/* given a chanspec, return the bandwidth string */ +const char * +BCMRAMFN(wf_chspec_to_bw_str)(chanspec_t chspec) +{ + return wf_chspec_bw_str[(CHSPEC_BW(chspec) >> WL_CHANSPEC_BW_SHIFT)]; +} + +/* + * Return the primary 20MHz chanspec of the given chanspec + */ +chanspec_t +wf_chspec_primary20_chspec(chanspec_t chspec) +{ + chanspec_t pri_chspec = chspec; + uint8 pri_chan; + + ASSERT(!wf_chspec_malformed(chspec)); + + /* Is there a sideband ? */ + if (!CHSPEC_IS20(chspec)) { + pri_chan = wf_chspec_primary20_chan(chspec); + pri_chspec = pri_chan | WL_CHANSPEC_BW_20; + pri_chspec |= CHSPEC_BAND(chspec); + } + return pri_chspec; +} + +/* return chanspec given primary 20MHz channel and bandwidth + * return 0 on error + */ +uint16 +wf_channel2chspec(uint pri_ch, uint bw) +{ + uint16 chspec; + const uint8 *center_ch = NULL; + int num_ch = 0; + int sb = -1; + int i = 0; + + chspec = ((pri_ch <= CH_MAX_2G_CHANNEL) ? WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G); + + chspec |= bw; + + if (bw == WL_CHANSPEC_BW_40) { + center_ch = wf_5g_40m_chans; + num_ch = WF_NUM_5G_40M_CHANS; + bw = 40; + } else if (bw == WL_CHANSPEC_BW_80) { + center_ch = wf_5g_80m_chans; + num_ch = WF_NUM_5G_80M_CHANS; + bw = 80; + } else if (bw == WL_CHANSPEC_BW_160) { + center_ch = wf_5g_160m_chans; + num_ch = WF_NUM_5G_160M_CHANS; + bw = 160; + } else if (bw == WL_CHANSPEC_BW_20) { + chspec |= pri_ch; + return chspec; + } else { + return 0; + } + + for (i = 0; i < num_ch; i ++) { + sb = channel_to_sb(center_ch[i], pri_ch, bw); + if (sb >= 0) { + chspec |= center_ch[i]; + chspec |= (sb << WL_CHANSPEC_CTL_SB_SHIFT); + break; + } + } + + /* check for no matching sb/center */ + if (sb < 0) { + return 0; + } + + return chspec; +} + +/* + * This function returns the chanspec for the primary 40MHz of an 80MHz or wider channel. + * The primary 20MHz channel of the returned 40MHz chanspec is the same as the primary 20MHz + * channel of the input chanspec. + */ +extern chanspec_t wf_chspec_primary40_chspec(chanspec_t chspec) +{ + chanspec_t chspec40 = chspec; + uint center_chan; + uint sb; + + ASSERT(!wf_chspec_malformed(chspec)); + + /* if the chanspec is > 80MHz, use the helper routine to find the primary 80 MHz channel */ + if (CHSPEC_IS8080(chspec) || CHSPEC_IS160(chspec)) { + chspec = wf_chspec_primary80_chspec(chspec); + } + + /* determine primary 40 MHz sub-channel of an 80 MHz chanspec */ + if (CHSPEC_IS80(chspec)) { + center_chan = CHSPEC_CHANNEL(chspec); + sb = CHSPEC_CTL_SB(chspec); + + if (sb < WL_CHANSPEC_CTL_SB_UL) { + /* Primary 40MHz is on lower side */ + center_chan -= CH_20MHZ_APART; + /* sideband bits are the same for LL/LU and L/U */ + } else { + /* Primary 40MHz is on upper side */ + center_chan += CH_20MHZ_APART; + /* sideband bits need to be adjusted by UL offset */ + sb -= WL_CHANSPEC_CTL_SB_UL; + } + + /* Create primary 40MHz chanspec */ + chspec40 = (WL_CHANSPEC_BAND_5G | WL_CHANSPEC_BW_40 | + sb | center_chan); + } + + return chspec40; +} + +/* + * Return the channel number for a given frequency and base frequency. + * The returned channel number is relative to the given base frequency. + * If the given base frequency is zero, a base frequency of 5 GHz is assumed for + * frequencies from 5 - 6 GHz, and 2.407 GHz is assumed for 2.4 - 2.5 GHz. + * + * Frequency is specified in MHz. + * The base frequency is specified as (start_factor * 500 kHz). + * Constants WF_CHAN_FACTOR_2_4_G, WF_CHAN_FACTOR_5_G are defined for + * 2.4 GHz and 5 GHz bands. + * + * The returned channel will be in the range [1, 14] in the 2.4 GHz band + * and [0, 200] otherwise. + * -1 is returned if the start_factor is WF_CHAN_FACTOR_2_4_G and the + * frequency is not a 2.4 GHz channel, or if the frequency is not and even + * multiple of 5 MHz from the base frequency to the base plus 1 GHz. + * + * Reference 802.11-2016, section 17.3.8.3 and section 16.3.6.3 + */ +int +wf_mhz2channel(uint freq, uint start_factor) +{ + int ch = -1; + uint base; + int offset; + + /* take the default channel start frequency */ + if (start_factor == 0) { + if (freq >= 2400 && freq <= 2500) + start_factor = WF_CHAN_FACTOR_2_4_G; + else if (freq >= 5000 && freq <= 6000) + start_factor = WF_CHAN_FACTOR_5_G; + } + + if (freq == 2484 && start_factor == WF_CHAN_FACTOR_2_4_G) + return 14; + + base = start_factor / 2; + + /* check that the frequency is in 1GHz range of the base */ + if ((freq < base) || (freq > base + 1000)) + return -1; + + offset = freq - base; + ch = offset / 5; + + /* check that frequency is a 5MHz multiple from the base */ + if (offset != (ch * 5)) + return -1; + + /* restricted channel range check for 2.4G */ + if (start_factor == WF_CHAN_FACTOR_2_4_G && (ch < 1 || ch > 13)) + return -1; + + return ch; +} + +/* + * Return the center frequency in MHz of the given channel and base frequency. + * The channel number is interpreted relative to the given base frequency. + * + * The valid channel range is [1, 14] in the 2.4 GHz band and [0, 200] otherwise. + * The base frequency is specified as (start_factor * 500 kHz). + * Constants WF_CHAN_FACTOR_2_4_G, WF_CHAN_FACTOR_4_G, and WF_CHAN_FACTOR_5_G + * are defined for 2.4 GHz, 4 GHz, and 5 GHz bands. + * The channel range of [1, 14] is only checked for a start_factor of + * WF_CHAN_FACTOR_2_4_G (4814 = 2407 * 2). + * Odd start_factors produce channels on .5 MHz boundaries, in which case + * the answer is rounded down to an integral MHz. + * -1 is returned for an out of range channel. + * + * Reference 802.11-2016, section 17.3.8.3 and section 16.3.6.3 + */ +int +wf_channel2mhz(uint ch, uint start_factor) +{ + int freq; + + if ((start_factor == WF_CHAN_FACTOR_2_4_G && (ch < 1 || ch > 14)) || + (ch > 200)) + freq = -1; + else if ((start_factor == WF_CHAN_FACTOR_2_4_G) && (ch == 14)) + freq = 2484; + else + freq = ch * 5 + start_factor / 2; + + return freq; +} + +static const uint16 sidebands[] = { + WL_CHANSPEC_CTL_SB_LLL, WL_CHANSPEC_CTL_SB_LLU, + WL_CHANSPEC_CTL_SB_LUL, WL_CHANSPEC_CTL_SB_LUU, + WL_CHANSPEC_CTL_SB_ULL, WL_CHANSPEC_CTL_SB_ULU, + WL_CHANSPEC_CTL_SB_UUL, WL_CHANSPEC_CTL_SB_UUU +}; + +/* + * Returns the chanspec 80Mhz channel corresponding to the following input + * parameters + * + * primary_channel - primary 20Mhz channel + * center_channel - center frequecny of the 80Mhz channel + * + * The center_channel can be one of {42, 58, 106, 122, 138, 155} + * + * returns INVCHANSPEC in case of error + */ +chanspec_t +wf_chspec_80(uint8 center_channel, uint8 primary_channel) +{ + + chanspec_t chanspec = INVCHANSPEC; + chanspec_t chanspec_cur; + uint i; + + for (i = 0; i < WF_NUM_SIDEBANDS_80MHZ; i++) { + chanspec_cur = CH80MHZ_CHSPEC(center_channel, sidebands[i]); + if (primary_channel == wf_chspec_primary20_chan(chanspec_cur)) { + chanspec = chanspec_cur; + break; + } + } + /* If the loop ended early, we are good, otherwise we did not + * find a 80MHz chanspec with the given center_channel that had a primary channel + *matching the given primary_channel. + */ + return chanspec; +} + +/* + * Returns the 80+80 chanspec corresponding to the following input parameters + * + * primary_20mhz - Primary 20 MHz channel + * chan0 - center channel number of one frequency segment + * chan1 - center channel number of the other frequency segment + * + * Parameters chan0 and chan1 are channel numbers in {42, 58, 106, 122, 138, 155}. + * The primary channel must be contained in one of the 80MHz channels. This routine + * will determine which frequency segment is the primary 80 MHz segment. + * + * Returns INVCHANSPEC in case of error. + * + * Refer to 802.11-2016 section 22.3.14 "Channelization". + */ +chanspec_t +wf_chspec_get8080_chspec(uint8 primary_20mhz, uint8 chan0, uint8 chan1) +{ + int sb = 0; + uint16 chanspec = 0; + int chan0_id = 0, chan1_id = 0; + int seg0, seg1; + + chan0_id = channel_80mhz_to_id(chan0); + chan1_id = channel_80mhz_to_id(chan1); + + /* make sure the channel numbers were valid */ + if (chan0_id == -1 || chan1_id == -1) + return INVCHANSPEC; + + /* does the primary channel fit with the 1st 80MHz channel ? */ + sb = channel_to_sb(chan0, primary_20mhz, 80); + if (sb >= 0) { + /* yes, so chan0 is frequency segment 0, and chan1 is seg 1 */ + seg0 = chan0_id; + seg1 = chan1_id; + } else { + /* no, so does the primary channel fit with the 2nd 80MHz channel ? */ + sb = channel_to_sb(chan1, primary_20mhz, 80); + if (sb < 0) { + /* no match for pri_ch to either 80MHz center channel */ + return INVCHANSPEC; + } + /* swapped, so chan1 is frequency segment 0, and chan0 is seg 1 */ + seg0 = chan1_id; + seg1 = chan0_id; + } + + chanspec = ((seg0 << WL_CHANSPEC_CHAN1_SHIFT) | + (seg1 << WL_CHANSPEC_CHAN2_SHIFT) | + (sb << WL_CHANSPEC_CTL_SB_SHIFT) | + WL_CHANSPEC_BW_8080 | + WL_CHANSPEC_BAND_5G); + + return chanspec; +} + +/* + * This function returns the 80Mhz channel for the given id. + */ +static uint8 +wf_chspec_get80Mhz_ch(uint8 chan_80Mhz_id) +{ + if (chan_80Mhz_id < WF_NUM_5G_80M_CHANS) + return wf_5g_80m_chans[chan_80Mhz_id]; + + return 0; +} + +/* + * Returns the center channel of the primary 80 MHz sub-band of the provided chanspec + */ +uint8 +wf_chspec_primary80_channel(chanspec_t chanspec) +{ + chanspec_t primary80_chspec; + uint8 primary80_chan; + + primary80_chspec = wf_chspec_primary80_chspec(chanspec); + + if (primary80_chspec == INVCHANSPEC) { + primary80_chan = INVCHANNEL; + } else { + primary80_chan = CHSPEC_CHANNEL(primary80_chspec); + } + + return primary80_chan; +} + +/* + * Returns the center channel of the secondary 80 MHz sub-band of the provided chanspec + */ +uint8 +wf_chspec_secondary80_channel(chanspec_t chanspec) +{ + chanspec_t secondary80_chspec; + uint8 secondary80_chan; + + secondary80_chspec = wf_chspec_secondary80_chspec(chanspec); + + if (secondary80_chspec == INVCHANSPEC) { + secondary80_chan = INVCHANNEL; + } else { + secondary80_chan = CHSPEC_CHANNEL(secondary80_chspec); + } + + return secondary80_chan; +} + +/* + * Returns the chanspec for the primary 80MHz sub-band of an 160MHz or 80+80 channel + */ +chanspec_t +wf_chspec_primary80_chspec(chanspec_t chspec) +{ + chanspec_t chspec80; + uint center_chan; + uint sb; + + ASSERT(!wf_chspec_malformed(chspec)); + + if (CHSPEC_IS80(chspec)) { + chspec80 = chspec; + } + else if (CHSPEC_IS8080(chspec)) { + sb = CHSPEC_CTL_SB(chspec); + + /* primary sub-band is stored in seg0 */ + center_chan = wf_chspec_get80Mhz_ch(CHSPEC_CHAN1(chspec)); + + /* Create primary 80MHz chanspec */ + chspec80 = (WL_CHANSPEC_BAND_5G | WL_CHANSPEC_BW_80 | sb | center_chan); + } + else if (CHSPEC_IS160(chspec)) { + center_chan = CHSPEC_CHANNEL(chspec); + sb = CHSPEC_CTL_SB(chspec); + + if (sb < WL_CHANSPEC_CTL_SB_ULL) { + /* Primary 80MHz is on lower side */ + center_chan -= CH_40MHZ_APART; + } + else { + /* Primary 80MHz is on upper side */ + center_chan += CH_40MHZ_APART; + sb -= WL_CHANSPEC_CTL_SB_ULL; + } + + /* Create primary 80MHz chanspec */ + chspec80 = (WL_CHANSPEC_BAND_5G | WL_CHANSPEC_BW_80 | sb | center_chan); + } + else { + chspec80 = INVCHANSPEC; + } + + return chspec80; +} + +/* + * Returns the chanspec for the secondary 80MHz sub-band of an 160MHz or 80+80 channel + */ +chanspec_t +wf_chspec_secondary80_chspec(chanspec_t chspec) +{ + chanspec_t chspec80; + uint center_chan; + + ASSERT(!wf_chspec_malformed(chspec)); + + if (CHSPEC_IS8080(chspec)) { + /* secondary sub-band is stored in seg1 */ + center_chan = wf_chspec_get80Mhz_ch(CHSPEC_CHAN2(chspec)); + + /* Create secondary 80MHz chanspec */ + chspec80 = (WL_CHANSPEC_BAND_5G | + WL_CHANSPEC_BW_80 | + WL_CHANSPEC_CTL_SB_LL | + center_chan); + } + else if (CHSPEC_IS160(chspec)) { + center_chan = CHSPEC_CHANNEL(chspec); + + if (CHSPEC_CTL_SB(chspec) < WL_CHANSPEC_CTL_SB_ULL) { + /* Primary 80MHz is on lower side */ + center_chan -= CH_40MHZ_APART; + } + else { + /* Primary 80MHz is on upper side */ + center_chan += CH_40MHZ_APART; + } + + /* Create secondary 80MHz chanspec */ + chspec80 = (WL_CHANSPEC_BAND_5G | + WL_CHANSPEC_BW_80 | + WL_CHANSPEC_CTL_SB_LL | + center_chan); + } + else { + chspec80 = INVCHANSPEC; + } + + return chspec80; +} + +/* + * For 160MHz or 80P80 chanspec, set ch[0]/ch[1] to be the low/high 80 Mhz channels + * + * For 20/40/80MHz chanspec, set ch[0] to be the center freq, and chan[1]=-1 + */ +void +wf_chspec_get_80p80_channels(chanspec_t chspec, uint8 *ch) +{ + + if (CHSPEC_IS8080(chspec)) { + ch[0] = wf_chspec_get80Mhz_ch(CHSPEC_CHAN1(chspec)); + ch[1] = wf_chspec_get80Mhz_ch(CHSPEC_CHAN2(chspec)); + } + else if (CHSPEC_IS160(chspec)) { + uint8 center_chan = CHSPEC_CHANNEL(chspec); + ch[0] = center_chan - CH_40MHZ_APART; + ch[1] = center_chan + CH_40MHZ_APART; + } + else { + /* for 20, 40, and 80 Mhz */ + ch[0] = CHSPEC_CHANNEL(chspec); + ch[1] = -1; + } + return; + +} + +#ifdef WL11AC_80P80 +uint8 +wf_chspec_channel(chanspec_t chspec) +{ + if (CHSPEC_IS8080(chspec)) { + return wf_chspec_primary80_channel(chspec); + } + else { + return ((uint8)((chspec) & WL_CHANSPEC_CHAN_MASK)); + } +} +#endif /* WL11AC_80P80 */ + +/* This routine returns the chanspec for a given operating class and + * channel number + */ +chanspec_t +wf_channel_create_chspec_frm_opclass(uint8 opclass, uint8 channel) +{ + chanspec_t chanspec = 0; + uint16 opclass_info = 0; + uint16 lookupindex = 0; + switch (opclass) { + case 115: + lookupindex = 1; + break; + case 124: + lookupindex = 3; + break; + case 125: + lookupindex = 5; + break; + case 81: + lookupindex = 12; + break; + case 116: + lookupindex = 22; + break; + case 119: + lookupindex = 23; + break; + case 126: + lookupindex = 25; + break; + case 83: + lookupindex = 32; + break; + case 84: + lookupindex = 33; + break; + default: + lookupindex = 12; + } + + if (lookupindex < 33) { + opclass_info = opclass_data[lookupindex-1]; + } + else { + opclass_info = opclass_data[11]; + } + chanspec = opclass_info | (uint16)channel; + return chanspec; +} + +/* This routine returns the opclass for a given chanspec */ +int +wf_channel_create_opclass_frm_chspec(chanspec_t chspec) +{ + BCM_REFERENCE(chspec); + /* TODO: Implement this function ! */ + return 12; /* opclass 12 for basic 2G channels */ +} + +/* Populates array with all 20MHz side bands of a given chanspec_t in the following order: + * primary20, secondary20, two secondary40s, four secondary80s. + * 'chspec' is the chanspec of interest + * 'pext' must point to an uint8 array of long enough to hold all side bands of the given chspec + * + * Works with 20, 40, 80, 80p80 and 160MHz chspec + */ +void +wf_get_all_ext(chanspec_t chspec, uint8 *pext) +{ +#ifdef WL11N_20MHZONLY + GET_ALL_SB(chspec, pext); +#else /* !WL11N_20MHZONLY */ + chanspec_t t = (CHSPEC_IS160(chspec) || CHSPEC_IS8080(chspec)) ? /* if bw > 80MHz */ + wf_chspec_primary80_chspec(chspec) : (chspec); /* extract primary 80 */ + /* primary20 channel as first element */ + uint8 pri_ch = (pext)[0] = wf_chspec_primary20_chan(t); + if (CHSPEC_IS20(chspec)) return; /* nothing more to do since 20MHz chspec */ + /* 20MHz EXT */ + (pext)[1] = pri_ch + (IS_CTL_IN_L20(t) ? CH_20MHZ_APART : -CH_20MHZ_APART); + if (CHSPEC_IS40(chspec)) return; /* nothing more to do since 40MHz chspec */ + /* center 40MHz EXT */ + t = wf_channel2chspec(pri_ch + (IS_CTL_IN_L40(chspec) ? + CH_40MHZ_APART : -CH_40MHZ_APART), WL_CHANSPEC_BW_40); + GET_ALL_SB(t, &((pext)[2])); /* get the 20MHz side bands in 40MHz EXT */ + if (CHSPEC_IS80(chspec)) return; /* nothing more to do since 80MHz chspec */ + t = CH80MHZ_CHSPEC(wf_chspec_secondary80_channel(chspec), WL_CHANSPEC_CTL_SB_LLL); + /* get the 20MHz side bands in 80MHz EXT (secondary) */ + GET_ALL_SB(t, &((pext)[4])); +#endif /* !WL11N_20MHZONLY */ +} + +/* + * Given two chanspecs, returns true if they overlap. + * (Overlap: At least one 20MHz subband is common between the two chanspecs provided) + */ +bool wf_chspec_overlap(chanspec_t chspec0, chanspec_t chspec1) +{ + uint8 ch0, ch1; + + FOREACH_20_SB(chspec0, ch0) { + FOREACH_20_SB(chspec1, ch1) { + if (ABS(ch0 - ch1) < CH_20MHZ_APART) { + return TRUE; + } + } + } + + return FALSE; +} + +uint8 +channel_bw_to_width(chanspec_t chspec) +{ + uint8 channel_width; + + if (CHSPEC_IS80(chspec)) + channel_width = VHT_OP_CHAN_WIDTH_80; + else if (CHSPEC_IS160(chspec)) + channel_width = VHT_OP_CHAN_WIDTH_160; + else if (CHSPEC_IS8080(chspec)) + channel_width = VHT_OP_CHAN_WIDTH_80_80; + else + channel_width = VHT_OP_CHAN_WIDTH_20_40; + + return channel_width; +} diff --git a/bcmdhd.100.10.315.x/bcmwifi_channels.h b/bcmdhd.100.10.315.x/bcmwifi_channels.h new file mode 100644 index 0000000..6376dd9 --- /dev/null +++ b/bcmdhd.100.10.315.x/bcmwifi_channels.h @@ -0,0 +1,803 @@ +/* + * Misc utility routines for WL and Apps + * This header file housing the define and function prototype use by + * both the wl driver, tools & Apps. + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmwifi_channels.h 695288 2017-04-19 17:20:39Z $ + */ + +#ifndef _bcmwifi_channels_h_ +#define _bcmwifi_channels_h_ + +/* A chanspec holds the channel number, band, bandwidth and primary 20MHz sideband */ +typedef uint16 chanspec_t; + +/* channel defines */ +#define CH_80MHZ_APART 16 +#define CH_40MHZ_APART 8 +#define CH_20MHZ_APART 4 +#define CH_10MHZ_APART 2 +#define CH_5MHZ_APART 1 /* 2G band channels are 5 Mhz apart */ +#define CH_MAX_2G_CHANNEL 14 /* Max channel in 2G band */ + +/* maximum # channels the s/w supports */ +#define MAXCHANNEL 224 /* max # supported channels. The max channel no is above, + * this is that + 1 rounded up to a multiple of NBBY (8). + * DO NOT MAKE it > 255: channels are uint8's all over + */ +#define MAXCHANNEL_NUM (MAXCHANNEL - 1) /* max channel number */ + +#define INVCHANNEL 255 /* error value for a bad channel */ + +/* channel bitvec */ +typedef struct { + uint8 vec[MAXCHANNEL/8]; /* bitvec of channels */ +} chanvec_t; + +/* make sure channel num is within valid range */ +#define CH_NUM_VALID_RANGE(ch_num) ((ch_num) > 0 && (ch_num) <= MAXCHANNEL_NUM) + +#define CHSPEC_CTLOVLP(sp1, sp2, sep) \ + (ABS(wf_chspec_ctlchan(sp1) - wf_chspec_ctlchan(sp2)) < (sep)) + +/* All builds use the new 11ac ratespec/chanspec */ +#undef D11AC_IOTYPES +#define D11AC_IOTYPES + +#define WL_CHANSPEC_CHAN_MASK 0x00ff +#define WL_CHANSPEC_CHAN_SHIFT 0 +#define WL_CHANSPEC_CHAN1_MASK 0x000f +#define WL_CHANSPEC_CHAN1_SHIFT 0 +#define WL_CHANSPEC_CHAN2_MASK 0x00f0 +#define WL_CHANSPEC_CHAN2_SHIFT 4 + +#define WL_CHANSPEC_CTL_SB_MASK 0x0700 +#define WL_CHANSPEC_CTL_SB_SHIFT 8 +#define WL_CHANSPEC_CTL_SB_LLL 0x0000 +#define WL_CHANSPEC_CTL_SB_LLU 0x0100 +#define WL_CHANSPEC_CTL_SB_LUL 0x0200 +#define WL_CHANSPEC_CTL_SB_LUU 0x0300 +#define WL_CHANSPEC_CTL_SB_ULL 0x0400 +#define WL_CHANSPEC_CTL_SB_ULU 0x0500 +#define WL_CHANSPEC_CTL_SB_UUL 0x0600 +#define WL_CHANSPEC_CTL_SB_UUU 0x0700 +#define WL_CHANSPEC_CTL_SB_LL WL_CHANSPEC_CTL_SB_LLL +#define WL_CHANSPEC_CTL_SB_LU WL_CHANSPEC_CTL_SB_LLU +#define WL_CHANSPEC_CTL_SB_UL WL_CHANSPEC_CTL_SB_LUL +#define WL_CHANSPEC_CTL_SB_UU WL_CHANSPEC_CTL_SB_LUU +#define WL_CHANSPEC_CTL_SB_L WL_CHANSPEC_CTL_SB_LLL +#define WL_CHANSPEC_CTL_SB_U WL_CHANSPEC_CTL_SB_LLU +#define WL_CHANSPEC_CTL_SB_LOWER WL_CHANSPEC_CTL_SB_LLL +#define WL_CHANSPEC_CTL_SB_UPPER WL_CHANSPEC_CTL_SB_LLU +#define WL_CHANSPEC_CTL_SB_NONE WL_CHANSPEC_CTL_SB_LLL + +#define WL_CHANSPEC_BW_MASK 0x3800 +#define WL_CHANSPEC_BW_SHIFT 11 +#define WL_CHANSPEC_BW_5 0x0000 +#define WL_CHANSPEC_BW_10 0x0800 +#define WL_CHANSPEC_BW_20 0x1000 +#define WL_CHANSPEC_BW_40 0x1800 +#define WL_CHANSPEC_BW_80 0x2000 +#define WL_CHANSPEC_BW_160 0x2800 +#define WL_CHANSPEC_BW_8080 0x3000 + +#define WL_CHANSPEC_BAND_MASK 0xc000 +#define WL_CHANSPEC_BAND_SHIFT 14 +#define WL_CHANSPEC_BAND_2G 0x0000 +#define WL_CHANSPEC_BAND_3G 0x4000 +#define WL_CHANSPEC_BAND_4G 0x8000 +#define WL_CHANSPEC_BAND_5G 0xc000 +#define INVCHANSPEC 255 +#define MAX_CHANSPEC 0xFFFF + +#define WL_CHANNEL_BAND(ch) (((ch) <= CH_MAX_2G_CHANNEL) ? \ + WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G) + +/* channel defines */ +#define LOWER_20_SB(channel) (((channel) > CH_10MHZ_APART) ? \ + ((channel) - CH_10MHZ_APART) : 0) +#define UPPER_20_SB(channel) (((channel) < (MAXCHANNEL - CH_10MHZ_APART)) ? \ + ((channel) + CH_10MHZ_APART) : 0) + +/* pass a 80MHz channel number (uint8) to get respective LL, UU, LU, UL */ +#define LL_20_SB(channel) (((channel) > 3 * CH_10MHZ_APART) ? ((channel) - 3 * CH_10MHZ_APART) : 0) +#define UU_20_SB(channel) (((channel) < (MAXCHANNEL - 3 * CH_10MHZ_APART)) ? \ + ((channel) + 3 * CH_10MHZ_APART) : 0) +#define LU_20_SB(channel) LOWER_20_SB(channel) +#define UL_20_SB(channel) UPPER_20_SB(channel) + +#define LOWER_40_SB(channel) ((channel) - CH_20MHZ_APART) +#define UPPER_40_SB(channel) ((channel) + CH_20MHZ_APART) +#define CHSPEC_WLCBANDUNIT(chspec) (CHSPEC_IS5G(chspec) ? BAND_5G_INDEX : BAND_2G_INDEX) +#define CH20MHZ_CHSPEC(channel) (chanspec_t)((chanspec_t)(channel) | WL_CHANSPEC_BW_20 | \ + (((channel) <= CH_MAX_2G_CHANNEL) ? \ + WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G)) +#define NEXT_20MHZ_CHAN(channel) (((channel) < (MAXCHANNEL - CH_20MHZ_APART)) ? \ + ((channel) + CH_20MHZ_APART) : 0) +#define CH40MHZ_CHSPEC(channel, ctlsb) (chanspec_t) \ + ((channel) | (ctlsb) | WL_CHANSPEC_BW_40 | \ + ((channel) <= CH_MAX_2G_CHANNEL ? WL_CHANSPEC_BAND_2G : \ + WL_CHANSPEC_BAND_5G)) +#define CH80MHZ_CHSPEC(channel, ctlsb) (chanspec_t) \ + ((channel) | (ctlsb) | \ + WL_CHANSPEC_BW_80 | WL_CHANSPEC_BAND_5G) +#define CH160MHZ_CHSPEC(channel, ctlsb) (chanspec_t) \ + ((channel) | (ctlsb) | \ + WL_CHANSPEC_BW_160 | WL_CHANSPEC_BAND_5G) + +/* simple MACROs to get different fields of chanspec */ +#ifdef WL11AC_80P80 +#define CHSPEC_CHANNEL(chspec) wf_chspec_channel(chspec) +#else +#define CHSPEC_CHANNEL(chspec) ((uint8)((chspec) & WL_CHANSPEC_CHAN_MASK)) +#endif // endif +#define CHSPEC_CHAN1(chspec) ((chspec) & WL_CHANSPEC_CHAN1_MASK) >> WL_CHANSPEC_CHAN1_SHIFT +#define CHSPEC_CHAN2(chspec) ((chspec) & WL_CHANSPEC_CHAN2_MASK) >> WL_CHANSPEC_CHAN2_SHIFT +#define CHSPEC_BAND(chspec) ((chspec) & WL_CHANSPEC_BAND_MASK) +#define CHSPEC_CTL_SB(chspec) ((chspec) & WL_CHANSPEC_CTL_SB_MASK) +#define CHSPEC_BW(chspec) ((chspec) & WL_CHANSPEC_BW_MASK) + +#ifdef WL11N_20MHZONLY +#define CHSPEC_IS20(chspec) 1 +#define CHSPEC_IS20_2G(chspec) ((((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_20) && \ + CHSPEC_IS2G(chspec)) +#ifndef CHSPEC_IS40 +#define CHSPEC_IS40(chspec) 0 +#endif // endif +#ifndef CHSPEC_IS80 +#define CHSPEC_IS80(chspec) 0 +#endif // endif +#ifndef CHSPEC_IS160 +#define CHSPEC_IS160(chspec) 0 +#endif // endif +#ifndef CHSPEC_IS8080 +#define CHSPEC_IS8080(chspec) 0 +#endif // endif + +/* see FOREACH_20_SB in !WL11N_20MHZONLY section */ +#define FOREACH_20_SB(chspec, channel) \ + for (channel = CHSPEC_CHANNEL(chspec); channel; channel = 0) + +/* see GET_ALL_SB in !WL11N_20MHZONLY section */ +#define GET_ALL_SB(chspec, psb) do { \ + psb[0] = CHSPEC_CHANNEL(chspec); \ +} while (0) + +#else /* !WL11N_20MHZONLY */ + +#define CHSPEC_IS20(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_20) +#define CHSPEC_IS20_5G(chspec) ((((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_20) && \ + CHSPEC_IS5G(chspec)) +#ifndef CHSPEC_IS40 +#define CHSPEC_IS40(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_40) +#endif // endif +#ifndef CHSPEC_IS80 +#define CHSPEC_IS80(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_80) +#endif // endif +#ifndef CHSPEC_IS160 +#define CHSPEC_IS160(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_160) +#endif // endif +#ifndef CHSPEC_IS8080 +#define CHSPEC_IS8080(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_8080) +#endif // endif + +/* pass a center channel and get channel offset from it by 10MHz */ +#define CH_OFF_10MHZ_MULTIPLES(channel, offset) ((uint8) (((offset) < 0) ? \ + (((channel) > (WL_CHANSPEC_CHAN_MASK & ((uint16)((-(offset)) * CH_10MHZ_APART)))) ?\ + ((channel) + (offset) * CH_10MHZ_APART) : 0) : \ + (((channel) < (uint16)(MAXCHANNEL - (offset) * CH_10MHZ_APART)) ? \ + ((channel) + (offset) * CH_10MHZ_APART) : 0))) + +#if defined(WL11AC_80P80) || defined(WL11AC_160) +/* pass a 160MHz center channel to get 20MHz subband channel numbers */ +#define LLL_20_SB_160(channel) CH_OFF_10MHZ_MULTIPLES(channel, -7) +#define LLU_20_SB_160(channel) CH_OFF_10MHZ_MULTIPLES(channel, -5) +#define LUL_20_SB_160(channel) CH_OFF_10MHZ_MULTIPLES(channel, -3) +#define LUU_20_SB_160(channel) CH_OFF_10MHZ_MULTIPLES(channel, -1) +#define ULL_20_SB_160(channel) CH_OFF_10MHZ_MULTIPLES(channel, 1) +#define ULU_20_SB_160(channel) CH_OFF_10MHZ_MULTIPLES(channel, 3) +#define UUL_20_SB_160(channel) CH_OFF_10MHZ_MULTIPLES(channel, 5) +#define UUU_20_SB_160(channel) CH_OFF_10MHZ_MULTIPLES(channel, 7) + +/* given an 80p80 channel, return the lower 80MHz sideband */ +#define LOWER_80_SB(chspec) (wf_chspec_primary80_channel(chspec) < \ + wf_chspec_secondary80_channel(chspec) ? \ + wf_chspec_primary80_channel(chspec) : wf_chspec_secondary80_channel(chspec)) + +/* given an 80p80 channel, return the upper 80MHz sideband */ +#define UPPER_80_SB(chspec) (wf_chspec_primary80_channel(chspec) > \ + wf_chspec_secondary80_channel(chspec) ? \ + wf_chspec_primary80_channel(chspec) : wf_chspec_secondary80_channel(chspec)) + +/* pass an 80P80 chanspec (not channel) to get 20MHz subnand channel numbers */ +#define LLL_20_SB_8080(chspec) CH_OFF_10MHZ_MULTIPLES(LOWER_80_SB(chspec), -3) +#define LLU_20_SB_8080(chspec) CH_OFF_10MHZ_MULTIPLES(LOWER_80_SB(chspec), -1) +#define LUL_20_SB_8080(chspec) CH_OFF_10MHZ_MULTIPLES(LOWER_80_SB(chspec), 1) +#define LUU_20_SB_8080(chspec) CH_OFF_10MHZ_MULTIPLES(LOWER_80_SB(chspec), 3) +#define ULL_20_SB_8080(chspec) CH_OFF_10MHZ_MULTIPLES(UPPER_80_SB(chspec), -3) +#define ULU_20_SB_8080(chspec) CH_OFF_10MHZ_MULTIPLES(UPPER_80_SB(chspec), -1) +#define UUL_20_SB_8080(chspec) CH_OFF_10MHZ_MULTIPLES(UPPER_80_SB(chspec), 1) +#define UUU_20_SB_8080(chspec) CH_OFF_10MHZ_MULTIPLES(UPPER_80_SB(chspec), 3) + +/* get lowest 20MHz sideband of a given chspec + * (works with 20, 40, 80, 160, 80p80) + */ +#define CH_FIRST_20_SB(chspec) ((uint8) (\ + CHSPEC_IS160(chspec) ? LLL_20_SB_160(CHSPEC_CHANNEL(chspec)) : (\ + CHSPEC_IS8080(chspec) ? LLL_20_SB_8080(chspec) : (\ + CHSPEC_IS80(chspec) ? LL_20_SB(CHSPEC_CHANNEL(chspec)) : (\ + CHSPEC_IS40(chspec) ? LOWER_20_SB(CHSPEC_CHANNEL(chspec)) : \ + CHSPEC_CHANNEL(chspec)))))) + +/* get upper most 20MHz sideband of a given chspec + * (works with 20, 40, 80, 160, 80p80) + */ +#define CH_LAST_20_SB(chspec) ((uint8) (\ + CHSPEC_IS160(chspec) ? UUU_20_SB_160(CHSPEC_CHANNEL(chspec)) : (\ + CHSPEC_IS8080(chspec) ? UUU_20_SB_8080(chspec) : (\ + CHSPEC_IS80(chspec) ? UU_20_SB(CHSPEC_CHANNEL(chspec)) : (\ + CHSPEC_IS40(chspec) ? UPPER_20_SB(CHSPEC_CHANNEL(chspec)) : \ + CHSPEC_CHANNEL(chspec)))))) + +/* call this with chspec and a valid 20MHz sideband of this channel to get the next 20MHz sideband + * (works with 80p80 only) + * resolves to 0 if called with upper most channel + */ +#define CH_NEXT_20_SB_IN_8080(chspec, channel) ((uint8) (\ + ((uint8) ((channel) + CH_20MHZ_APART) > CH_LAST_20_SB(chspec) ? 0 : \ + ((channel) == LUU_20_SB_8080(chspec) ? ULL_20_SB_8080(chspec) : \ + (channel) + CH_20MHZ_APART)))) + +/* call this with chspec and a valid 20MHz sideband of this channel to get the next 20MHz sideband + * (works with 20, 40, 80, 160, 80p80) + * resolves to 0 if called with upper most channel + */ +#define CH_NEXT_20_SB(chspec, channel) ((uint8) (\ + (CHSPEC_IS8080(chspec) ? CH_NEXT_20_SB_IN_8080((chspec), (channel)) : \ + ((uint8) ((channel) + CH_20MHZ_APART) > CH_LAST_20_SB(chspec) ? 0 : \ + ((channel) + CH_20MHZ_APART))))) + +#else /* WL11AC_80P80, WL11AC_160 */ + +#define LLL_20_SB_160(channel) 0 +#define LLU_20_SB_160(channel) 0 +#define LUL_20_SB_160(channel) 0 +#define LUU_20_SB_160(channel) 0 +#define ULL_20_SB_160(channel) 0 +#define ULU_20_SB_160(channel) 0 +#define UUL_20_SB_160(channel) 0 +#define UUU_20_SB_160(channel) 0 + +#define LOWER_80_SB(chspec) 0 + +#define UPPER_80_SB(chspec) 0 + +#define LLL_20_SB_8080(chspec) 0 +#define LLU_20_SB_8080(chspec) 0 +#define LUL_20_SB_8080(chspec) 0 +#define LUU_20_SB_8080(chspec) 0 +#define ULL_20_SB_8080(chspec) 0 +#define ULU_20_SB_8080(chspec) 0 +#define UUL_20_SB_8080(chspec) 0 +#define UUU_20_SB_8080(chspec) 0 + +/* get lowest 20MHz sideband of a given chspec + * (works with 20, 40, 80) + */ +#define CH_FIRST_20_SB(chspec) ((uint8) (\ + CHSPEC_IS80(chspec) ? LL_20_SB(CHSPEC_CHANNEL(chspec)) : (\ + CHSPEC_IS40(chspec) ? LOWER_20_SB(CHSPEC_CHANNEL(chspec)) : \ + CHSPEC_CHANNEL(chspec)))) +/* get upper most 20MHz sideband of a given chspec + * (works with 20, 40, 80, 160, 80p80) + */ +#define CH_LAST_20_SB(chspec) ((uint8) (\ + CHSPEC_IS80(chspec) ? UU_20_SB(CHSPEC_CHANNEL(chspec)) : (\ + CHSPEC_IS40(chspec) ? UPPER_20_SB(CHSPEC_CHANNEL(chspec)) : \ + CHSPEC_CHANNEL(chspec)))) + +/* call this with chspec and a valid 20MHz sideband of this channel to get the next 20MHz sideband + * (works with 20, 40, 80, 160, 80p80) + * resolves to 0 if called with upper most channel + */ +#define CH_NEXT_20_SB(chspec, channel) ((uint8) (\ + ((uint8) ((channel) + CH_20MHZ_APART) > CH_LAST_20_SB(chspec) ? 0 : \ + ((channel) + CH_20MHZ_APART)))) + +#endif /* WL11AC_80P80, WL11AC_160 */ + +/* Iterator for 20MHz side bands of a chanspec: (chanspec_t chspec, uint8 channel) + * 'chspec' chanspec_t of interest (used in loop, better to pass a resolved value than a macro) + * 'channel' must be a variable (not an expression). + */ +#define FOREACH_20_SB(chspec, channel) \ + for (channel = CH_FIRST_20_SB(chspec); channel; \ + channel = CH_NEXT_20_SB((chspec), channel)) + +/* Uses iterator to populate array with all side bands involved (sorted lower to upper). + * 'chspec' chanspec_t of interest + * 'psb' pointer to uint8 array of enough size to hold all side bands for the given chspec + */ +#define GET_ALL_SB(chspec, psb) do { \ + uint8 channel, idx = 0; \ + chanspec_t chspec_local = chspec; \ + FOREACH_20_SB(chspec_local, channel) \ + (psb)[idx++] = channel; \ +} while (0) + +/* given a chanspec of any bw, tests if primary20 SB is in lower 20, 40, 80 respectively */ +#define IS_CTL_IN_L20(chspec) !((chspec) & WL_CHANSPEC_CTL_SB_U) /* CTL SB is in low 20 of any 40 */ +#define IS_CTL_IN_L40(chspec) !((chspec) & WL_CHANSPEC_CTL_SB_UL) /* in low 40 of any 80 */ +#define IS_CTL_IN_L80(chspec) !((chspec) & WL_CHANSPEC_CTL_SB_ULL) /* in low 80 of 80p80/160 */ + +#endif /* !WL11N_20MHZONLY */ + +/* ULB introduced macros. Remove once ULB is cleaned from phy code */ +#define CHSPEC_IS2P5(chspec) 0 +#define CHSPEC_IS5(chspec) 0 +#define CHSPEC_IS10(chspec) 0 +#define CHSPEC_ISLE20(chspec) (CHSPEC_IS20(chspec)) +#define CHSPEC_BW_LE20(chspec) (CHSPEC_IS20(chspec)) + +#define BW_LE40(bw) ((bw) == WL_CHANSPEC_BW_20 || ((bw) == WL_CHANSPEC_BW_40)) +#define BW_LE80(bw) (BW_LE40(bw) || ((bw) == WL_CHANSPEC_BW_80)) +#define BW_LE160(bw) (BW_LE80(bw) || ((bw) == WL_CHANSPEC_BW_160)) + +#define CHSPEC_IS5G(chspec) (((chspec) & WL_CHANSPEC_BAND_MASK) == WL_CHANSPEC_BAND_5G) +#define CHSPEC_IS2G(chspec) (((chspec) & WL_CHANSPEC_BAND_MASK) == WL_CHANSPEC_BAND_2G) +#define CHSPEC_SB_UPPER(chspec) \ + ((((chspec) & WL_CHANSPEC_CTL_SB_MASK) == WL_CHANSPEC_CTL_SB_UPPER) && \ + (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_40)) +#define CHSPEC_SB_LOWER(chspec) \ + ((((chspec) & WL_CHANSPEC_CTL_SB_MASK) == WL_CHANSPEC_CTL_SB_LOWER) && \ + (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_40)) +#define CHSPEC2WLC_BAND(chspec) (CHSPEC_IS5G(chspec) ? WLC_BAND_5G : WLC_BAND_2G) + +/** + * Number of chars needed for wf_chspec_ntoa() destination character buffer. + */ +#define CHANSPEC_STR_LEN 20 + +/* + * This function returns TRUE if both the chanspec can co-exist in PHY. + * Addition to primary20 channel, the function checks for side band for 2g 40 channels + */ +extern bool wf_chspec_coexist(chanspec_t chspec1, chanspec_t chspec2); + +#define CHSPEC_IS_BW_160_WIDE(chspec) (CHSPEC_BW(chspec) == WL_CHANSPEC_BW_160 ||\ + CHSPEC_BW(chspec) == WL_CHANSPEC_BW_8080) + +/* BW inequality comparisons, LE (<=), GE (>=), LT (<), GT (>), comparisons can be made +* as simple numeric comparisons, with the exception that 160 is the same BW as 80+80, +* but have different numeric values; (WL_CHANSPEC_BW_160 < WL_CHANSPEC_BW_8080). +* +* The LT/LE/GT/GE macros check first checks whether both chspec bandwidth and bw are 160 wide. +* If both chspec bandwidth and bw is not 160 wide, then the comparison is made. +*/ +#define CHSPEC_BW_GE(chspec, bw) \ + ((CHSPEC_IS_BW_160_WIDE(chspec) &&\ + ((bw) == WL_CHANSPEC_BW_160 || (bw) == WL_CHANSPEC_BW_8080)) ||\ + (CHSPEC_BW(chspec) >= (bw))) + +#define CHSPEC_BW_LE(chspec, bw) \ + ((CHSPEC_IS_BW_160_WIDE(chspec) &&\ + ((bw) == WL_CHANSPEC_BW_160 || (bw) == WL_CHANSPEC_BW_8080)) ||\ + (CHSPEC_BW(chspec) <= (bw))) + +#define CHSPEC_BW_GT(chspec, bw) \ + (!(CHSPEC_IS_BW_160_WIDE(chspec) &&\ + ((bw) == WL_CHANSPEC_BW_160 || (bw) == WL_CHANSPEC_BW_8080)) &&\ + (CHSPEC_BW(chspec) > (bw))) + +#define CHSPEC_BW_LT(chspec, bw) \ + (!(CHSPEC_IS_BW_160_WIDE(chspec) &&\ + ((bw) == WL_CHANSPEC_BW_160 || (bw) == WL_CHANSPEC_BW_8080)) &&\ + (CHSPEC_BW(chspec) < (bw))) + +/* Legacy Chanspec defines + * These are the defines for the previous format of the chanspec_t + */ +#define WL_LCHANSPEC_CHAN_MASK 0x00ff +#define WL_LCHANSPEC_CHAN_SHIFT 0 + +#define WL_LCHANSPEC_CTL_SB_MASK 0x0300 +#define WL_LCHANSPEC_CTL_SB_SHIFT 8 +#define WL_LCHANSPEC_CTL_SB_LOWER 0x0100 +#define WL_LCHANSPEC_CTL_SB_UPPER 0x0200 +#define WL_LCHANSPEC_CTL_SB_NONE 0x0300 + +#define WL_LCHANSPEC_BW_MASK 0x0C00 +#define WL_LCHANSPEC_BW_SHIFT 10 +#define WL_LCHANSPEC_BW_10 0x0400 +#define WL_LCHANSPEC_BW_20 0x0800 +#define WL_LCHANSPEC_BW_40 0x0C00 + +#define WL_LCHANSPEC_BAND_MASK 0xf000 +#define WL_LCHANSPEC_BAND_SHIFT 12 +#define WL_LCHANSPEC_BAND_5G 0x1000 +#define WL_LCHANSPEC_BAND_2G 0x2000 + +#define LCHSPEC_CHANNEL(chspec) ((uint8)((chspec) & WL_LCHANSPEC_CHAN_MASK)) +#define LCHSPEC_BAND(chspec) ((chspec) & WL_LCHANSPEC_BAND_MASK) +#define LCHSPEC_CTL_SB(chspec) ((chspec) & WL_LCHANSPEC_CTL_SB_MASK) +#define LCHSPEC_BW(chspec) ((chspec) & WL_LCHANSPEC_BW_MASK) +#define LCHSPEC_IS10(chspec) (((chspec) & WL_LCHANSPEC_BW_MASK) == WL_LCHANSPEC_BW_10) +#define LCHSPEC_IS20(chspec) (((chspec) & WL_LCHANSPEC_BW_MASK) == WL_LCHANSPEC_BW_20) +#define LCHSPEC_IS40(chspec) (((chspec) & WL_LCHANSPEC_BW_MASK) == WL_LCHANSPEC_BW_40) +#define LCHSPEC_IS5G(chspec) (((chspec) & WL_LCHANSPEC_BAND_MASK) == WL_LCHANSPEC_BAND_5G) +#define LCHSPEC_IS2G(chspec) (((chspec) & WL_LCHANSPEC_BAND_MASK) == WL_LCHANSPEC_BAND_2G) + +#define LCHSPEC_SB_UPPER(chspec) \ + ((((chspec) & WL_LCHANSPEC_CTL_SB_MASK) == WL_LCHANSPEC_CTL_SB_UPPER) && \ + (((chspec) & WL_LCHANSPEC_BW_MASK) == WL_LCHANSPEC_BW_40)) +#define LCHSPEC_SB_LOWER(chspec) \ + ((((chspec) & WL_LCHANSPEC_CTL_SB_MASK) == WL_LCHANSPEC_CTL_SB_LOWER) && \ + (((chspec) & WL_LCHANSPEC_BW_MASK) == WL_LCHANSPEC_BW_40)) + +#define LCHSPEC_CREATE(chan, band, bw, sb) ((uint16)((chan) | (sb) | (bw) | (band))) + +#define CH20MHZ_LCHSPEC(channel) \ + (chanspec_t)((chanspec_t)(channel) | WL_LCHANSPEC_BW_20 | \ + WL_LCHANSPEC_CTL_SB_NONE | (((channel) <= CH_MAX_2G_CHANNEL) ? \ + WL_LCHANSPEC_BAND_2G : WL_LCHANSPEC_BAND_5G)) + +#define GET_ALL_EXT wf_get_all_ext + +/* + * WF_CHAN_FACTOR_* constants are used to calculate channel frequency + * given a channel number. + * chan_freq = chan_factor * 500Mhz + chan_number * 5 + */ + +/** + * Channel Factor for the starting frequence of 2.4 GHz channels. + * The value corresponds to 2407 MHz. + */ +#define WF_CHAN_FACTOR_2_4_G 4814 /* 2.4 GHz band, 2407 MHz */ + +/** + * Channel Factor for the starting frequence of 5 GHz channels. + * The value corresponds to 5000 MHz. + */ +#define WF_CHAN_FACTOR_5_G 10000 /* 5 GHz band, 5000 MHz */ + +/** + * Channel Factor for the starting frequence of 4.9 GHz channels. + * The value corresponds to 4000 MHz. + */ +#define WF_CHAN_FACTOR_4_G 8000 /* 4.9 GHz band for Japan */ + +#define WLC_2G_25MHZ_OFFSET 5 /* 2.4GHz band channel offset */ + +/** + * No of sub-band vlaue of the specified Mhz chanspec + */ +#define WF_NUM_SIDEBANDS_40MHZ 2 +#define WF_NUM_SIDEBANDS_80MHZ 4 +#define WF_NUM_SIDEBANDS_8080MHZ 4 +#define WF_NUM_SIDEBANDS_160MHZ 8 + +/** + * Return the chanspec bandwidth in MHz + * Bandwidth of 160 MHz will be returned for 80+80MHz chanspecs. + * + * @param chspec chanspec_t + * + * @return bandwidth of chspec in MHz units + */ +extern uint wf_bw_chspec_to_mhz(chanspec_t chspec); + +/** + * Convert chanspec to ascii string + * + * @param chspec chanspec format + * @param buf ascii string of chanspec + * + * @return pointer to buf with room for at least CHANSPEC_STR_LEN bytes + * Original chanspec in case of error + * + * @see CHANSPEC_STR_LEN + */ +extern char * wf_chspec_ntoa_ex(chanspec_t chspec, char *buf); + +/** + * Convert chanspec to ascii string + * + * @param chspec chanspec format + * @param buf ascii string of chanspec + * + * @return pointer to buf with room for at least CHANSPEC_STR_LEN bytes + * NULL in case of error + * + * @see CHANSPEC_STR_LEN + */ +extern char * wf_chspec_ntoa(chanspec_t chspec, char *buf); + +/** + * Convert ascii string to chanspec + * + * @param a pointer to input string + * + * @return >= 0 if successful or 0 otherwise + */ +extern chanspec_t wf_chspec_aton(const char *a); + +/** + * Verify the chanspec fields are valid. + * + * Verify the chanspec is using a legal set field values, i.e. that the chanspec + * specified a band, bw, primary_sb, and channel and that the combination could be + * legal given some set of circumstances. + * + * @param chanspec input chanspec to verify + * + * @return TRUE if the chanspec is malformed, FALSE if it looks good. + */ +extern bool wf_chspec_malformed(chanspec_t chanspec); + +/** + * Verify the chanspec specifies a valid channel according to 802.11. + * + * @param chanspec input chanspec to verify + * + * @return TRUE if the chanspec is a valid 802.11 channel + */ +extern bool wf_chspec_valid(chanspec_t chanspec); + +/** + * Return the primary 20MHz channel. + * + * This function returns the channel number of the primary 20MHz channel. For + * 20MHz channels this is just the channel number. For 40MHz or wider channels + * it is the primary 20MHz channel specified by the chanspec. + * + * @param chspec input chanspec + * + * @return Returns the channel number of the primary 20MHz channel + */ +extern uint8 wf_chspec_primary20_chan(chanspec_t chspec); + +/* alias for old function name */ +#define wf_chspec_ctlchan(c) wf_chspec_primary20_chan(c) + +/** + * Return the bandwidth string. + * + * This function returns the bandwidth string for the passed chanspec. + * + * @param chspec input chanspec + * + * @return Returns the bandwidth string: + * "5", "10", "20", "40", "80", "160", "80+80" + */ +extern const char *wf_chspec_to_bw_str(chanspec_t chspec); + +/** + * Return the primary 20MHz chanspec. + * + * This function returns the chanspec of the primary 20MHz channel. For 20MHz + * channels this is just the chanspec. For 40MHz or wider channels it is the + * chanspec of the primary 20MHZ channel specified by the chanspec. + * + * @param chspec input chanspec + * + * @return Returns the chanspec of the primary 20MHz channel + */ +extern chanspec_t wf_chspec_primary20_chspec(chanspec_t chspec); + +/* alias for old function name */ +#define wf_chspec_ctlchspec(c) wf_chspec_primary20_chspec(c) + +/** + * Return the primary 40MHz chanspec. + * + * This function returns the chanspec for the primary 40MHz of an 80MHz or wider channel. + * The primary 20MHz channel of the returned 40MHz chanspec is the same as the primary 20MHz + * channel of the input chanspec. + */ +extern chanspec_t wf_chspec_primary40_chspec(chanspec_t chspec); + +/* + * Return the channel number for a given frequency and base frequency. + * The returned channel number is relative to the given base frequency. + * If the given base frequency is zero, a base frequency of 5 GHz is assumed for + * frequencies from 5 - 6 GHz, and 2.407 GHz is assumed for 2.4 - 2.5 GHz. + * + * Frequency is specified in MHz. + * The base frequency is specified as (start_factor * 500 kHz). + * Constants WF_CHAN_FACTOR_2_4_G, WF_CHAN_FACTOR_5_G are defined for + * 2.4 GHz and 5 GHz bands. + * + * The returned channel will be in the range [1, 14] in the 2.4 GHz band + * and [0, 200] otherwise. + * -1 is returned if the start_factor is WF_CHAN_FACTOR_2_4_G and the + * frequency is not a 2.4 GHz channel, or if the frequency is not and even + * multiple of 5 MHz from the base frequency to the base plus 1 GHz. + * + * Reference 802.11-2016, section 17.3.8.3 and section 16.3.6.3 + * + * @param freq frequency in MHz + * @param start_factor base frequency in 500 kHz units, e.g. 10000 for 5 GHz + * + * @return Returns a channel number + * + * @see WF_CHAN_FACTOR_2_4_G + * @see WF_CHAN_FACTOR_5_G + */ +extern int wf_mhz2channel(uint freq, uint start_factor); + +/** + * Return the center frequency in MHz of the given channel and base frequency. + * + * Return the center frequency in MHz of the given channel and base frequency. + * The channel number is interpreted relative to the given base frequency. + * + * The valid channel range is [1, 14] in the 2.4 GHz band and [0, 200] otherwise. + * The base frequency is specified as (start_factor * 500 kHz). + * Constants WF_CHAN_FACTOR_2_4_G, WF_CHAN_FACTOR_5_G are defined for + * 2.4 GHz and 5 GHz bands. + * The channel range of [1, 14] is only checked for a start_factor of + * WF_CHAN_FACTOR_2_4_G (4814). + * Odd start_factors produce channels on .5 MHz boundaries, in which case + * the answer is rounded down to an integral MHz. + * -1 is returned for an out of range channel. + * + * Reference 802.11-2016, section 17.3.8.3 and section 16.3.6.3 + * + * @param channel input channel number + * @param start_factor base frequency in 500 kHz units, e.g. 10000 for 5 GHz + * + * @return Returns a frequency in MHz + * + * @see WF_CHAN_FACTOR_2_4_G + * @see WF_CHAN_FACTOR_5_G + */ +extern int wf_channel2mhz(uint channel, uint start_factor); + +/** + * Returns the chanspec 80Mhz channel corresponding to the following input + * parameters + * + * primary_channel - primary 20Mhz channel + * center_channel - center frequecny of the 80Mhz channel + * + * The center_channel can be one of {42, 58, 106, 122, 138, 155} + * + * returns INVCHANSPEC in case of error + */ +extern chanspec_t wf_chspec_80(uint8 center_channel, uint8 primary_channel); + +/** + * Convert ctl chan and bw to chanspec + * + * @param ctl_ch channel + * @param bw bandwidth + * + * @return > 0 if successful or 0 otherwise + * + */ +extern uint16 wf_channel2chspec(uint ctl_ch, uint bw); + +extern uint wf_channel2freq(uint channel); +extern uint wf_freq2channel(uint freq); + +/* + * Returns the 80+80 MHz chanspec corresponding to the following input parameters + * + * primary_20mhz - Primary 20 MHz channel + * chan0_80MHz - center channel number of one frequency segment + * chan1_80MHz - center channel number of the other frequency segment + * + * Parameters chan0_80MHz and chan1_80MHz are channel numbers in {42, 58, 106, 122, 138, 155}. + * The primary channel must be contained in one of the 80MHz channels. This routine + * will determine which frequency segment is the primary 80 MHz segment. + * + * Returns INVCHANSPEC in case of error. + * + * Refer to 802.11-2016 section 22.3.14 "Channelization". + */ +extern chanspec_t wf_chspec_get8080_chspec(uint8 primary_20mhz, + uint8 chan0_80Mhz, uint8 chan1_80Mhz); + +/** + * Returns the center channel of the primary 80 MHz sub-band of the provided chanspec + * + * @param chspec input chanspec + * + * @return center channel number of the primary 80MHz sub-band of the input. + * Will return the center channel of an input 80MHz chspec. + * Will return INVCHANNEL if the chspec is malformed or less than 80MHz bw. + */ +extern uint8 wf_chspec_primary80_channel(chanspec_t chanspec); + +/** + * Returns the center channel of the secondary 80 MHz sub-band of the provided chanspec + * + * @param chspec input chanspec + * + * @return center channel number of the secondary 80MHz sub-band of the input. + * Will return INVCHANNEL if the chspec is malformed or bw is not greater than 80MHz. + */ +extern uint8 wf_chspec_secondary80_channel(chanspec_t chanspec); + +/** + * Returns the chanspec for the primary 80MHz sub-band of an 160MHz or 80+80 channel + * + * @param chspec input chanspec + * + * @return An 80MHz chanspec describing the primary 80MHz sub-band of the input. + * Will return an input 80MHz chspec as is. + * Will return INVCHANSPEC if the chspec is malformed or less than 80MHz bw. + */ +extern chanspec_t wf_chspec_primary80_chspec(chanspec_t chspec); + +/** + * Returns the chanspec for the secondary 80MHz sub-band of an 160MHz or 80+80 channel + * The sideband in the chanspec is always set to WL_CHANSPEC_CTL_SB_LL since this sub-band + * does not contain the primary 20MHz channel. + * + * @param chspec input chanspec + * + * @return An 80MHz chanspec describing the secondary 80MHz sub-band of the input. + * Will return INVCHANSPEC if the chspec is malformed or bw is not greater than 80MHz. + */ +extern chanspec_t wf_chspec_secondary80_chspec(chanspec_t chspec); + +/* + * For 160MHz or 80P80 chanspec, set ch[0]/ch[1] to be the low/high 80 Mhz channels + * + * For 20/40/80MHz chanspec, set ch[0] to be the center freq, and chan[1]=-1 + */ +extern void wf_chspec_get_80p80_channels(chanspec_t chspec, uint8 *ch); + +#ifdef WL11AC_80P80 +/* + * This function returns the centre chanel for the given chanspec. + * In case of 80+80 chanspec it returns the primary 80 Mhz centre channel + */ +extern uint8 wf_chspec_channel(chanspec_t chspec); +#endif // endif +extern chanspec_t wf_channel_create_chspec_frm_opclass(uint8 opclass, uint8 channel); +extern int wf_channel_create_opclass_frm_chspec(chanspec_t chspec); + +/* Populates array with all 20MHz side bands of a given chanspec_t in the following order: + * primary20, ext20, two ext40s, four ext80s. + * 'chspec' is the chanspec of interest + * 'pext' must point to an uint8 array of long enough to hold all side bands of the given chspec + * + * Works with 20, 40, 80, 80p80 and 160MHz chspec + */ + +extern void wf_get_all_ext(chanspec_t chspec, uint8 *chan_ptr); + +/* + * Given two chanspecs, returns true if they overlap. + * (Overlap: At least one 20MHz subband is common between the two chanspecs provided) + */ +extern bool wf_chspec_overlap(chanspec_t chspec0, chanspec_t chspec1); + +extern uint8 channel_bw_to_width(chanspec_t chspec); +#endif /* _bcmwifi_channels_h_ */ diff --git a/bcmdhd.100.10.315.x/bcmwifi_rates.h b/bcmdhd.100.10.315.x/bcmwifi_rates.h new file mode 100644 index 0000000..da2c15b --- /dev/null +++ b/bcmdhd.100.10.315.x/bcmwifi_rates.h @@ -0,0 +1,831 @@ +/* + * Indices for 802.11 a/b/g/n/ac 1-3 chain symmetric transmit rates + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmwifi_rates.h 697006 2017-05-01 19:13:40Z $ + */ + +#ifndef _bcmwifi_rates_h_ +#define _bcmwifi_rates_h_ + +#include + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +#define WL_RATESET_SZ_DSSS 4 +#define WL_RATESET_SZ_OFDM 8 +#define WL_RATESET_SZ_VHT_MCS 10 +#define WL_RATESET_SZ_VHT_MCS_P 12 /* 10 VHT rates + 2 proprietary rates */ +#define WL_RATESET_SZ_HE_MCS 12 /* 12 HE rates (mcs 0-11) */ + +#define WL_RATESET_SZ_HT_MCS 8 + +#define WL_RATESET_SZ_HT_IOCTL 8 /* MAC histogram, compatibility with wl utility */ + +#define WL_TX_CHAINS_MAX 4 + +#define WL_RATE_DISABLED (-128) /* Power value corresponding to unsupported rate */ + +/* Transmit channel bandwidths */ +typedef enum wl_tx_bw { + WL_TX_BW_20, + WL_TX_BW_40, + WL_TX_BW_80, + WL_TX_BW_20IN40, + WL_TX_BW_20IN80, + WL_TX_BW_40IN80, + WL_TX_BW_160, + WL_TX_BW_20IN160, + WL_TX_BW_40IN160, + WL_TX_BW_80IN160, + WL_TX_BW_ALL, + WL_TX_BW_8080, + WL_TX_BW_8080CHAN2, + WL_TX_BW_20IN8080, + WL_TX_BW_40IN8080, + WL_TX_BW_80IN8080, + WL_TX_BW_2P5, + WL_TX_BW_5, + WL_TX_BW_10 +} wl_tx_bw_t; + +/* + * Transmit modes. + * Not all modes are listed here, only those required for disambiguation. e.g. SPEXP is not listed + */ +typedef enum wl_tx_mode { + WL_TX_MODE_NONE, + WL_TX_MODE_STBC, + WL_TX_MODE_CDD, + WL_TX_MODE_TXBF, + WL_NUM_TX_MODES +} wl_tx_mode_t; + +/* Number of transmit chains */ +typedef enum wl_tx_chains { + WL_TX_CHAINS_1 = 1, + WL_TX_CHAINS_2, + WL_TX_CHAINS_3, + WL_TX_CHAINS_4 +} wl_tx_chains_t; + +/* Number of transmit streams */ +typedef enum wl_tx_nss { + WL_TX_NSS_1 = 1, + WL_TX_NSS_2, + WL_TX_NSS_3, + WL_TX_NSS_4 +} wl_tx_nss_t; + +/* This enum maps each rate to a CLM index */ + +typedef enum clm_rates { + /************ + * 1 chain * + ************ + */ + + /* 1 Stream */ + WL_RATE_1X1_DSSS_1 = 0, + WL_RATE_1X1_DSSS_2 = 1, + WL_RATE_1X1_DSSS_5_5 = 2, + WL_RATE_1X1_DSSS_11 = 3, + + WL_RATE_1X1_OFDM_6 = 4, + WL_RATE_1X1_OFDM_9 = 5, + WL_RATE_1X1_OFDM_12 = 6, + WL_RATE_1X1_OFDM_18 = 7, + WL_RATE_1X1_OFDM_24 = 8, + WL_RATE_1X1_OFDM_36 = 9, + WL_RATE_1X1_OFDM_48 = 10, + WL_RATE_1X1_OFDM_54 = 11, + + WL_RATE_1X1_MCS0 = 12, + WL_RATE_1X1_MCS1 = 13, + WL_RATE_1X1_MCS2 = 14, + WL_RATE_1X1_MCS3 = 15, + WL_RATE_1X1_MCS4 = 16, + WL_RATE_1X1_MCS5 = 17, + WL_RATE_1X1_MCS6 = 18, + WL_RATE_1X1_MCS7 = 19, + WL_RATE_P_1X1_MCS87 = 20, + WL_RATE_P_1X1_MCS88 = 21, + + WL_RATE_1X1_VHT0SS1 = 12, + WL_RATE_1X1_VHT1SS1 = 13, + WL_RATE_1X1_VHT2SS1 = 14, + WL_RATE_1X1_VHT3SS1 = 15, + WL_RATE_1X1_VHT4SS1 = 16, + WL_RATE_1X1_VHT5SS1 = 17, + WL_RATE_1X1_VHT6SS1 = 18, + WL_RATE_1X1_VHT7SS1 = 19, + WL_RATE_1X1_VHT8SS1 = 20, + WL_RATE_1X1_VHT9SS1 = 21, + WL_RATE_P_1X1_VHT10SS1 = 22, + WL_RATE_P_1X1_VHT11SS1 = 23, + + /************ + * 2 chains * + ************ + */ + + /* 1 Stream expanded + 1 */ + WL_RATE_1X2_DSSS_1 = 24, + WL_RATE_1X2_DSSS_2 = 25, + WL_RATE_1X2_DSSS_5_5 = 26, + WL_RATE_1X2_DSSS_11 = 27, + + WL_RATE_1X2_CDD_OFDM_6 = 28, + WL_RATE_1X2_CDD_OFDM_9 = 29, + WL_RATE_1X2_CDD_OFDM_12 = 30, + WL_RATE_1X2_CDD_OFDM_18 = 31, + WL_RATE_1X2_CDD_OFDM_24 = 32, + WL_RATE_1X2_CDD_OFDM_36 = 33, + WL_RATE_1X2_CDD_OFDM_48 = 34, + WL_RATE_1X2_CDD_OFDM_54 = 35, + + WL_RATE_1X2_CDD_MCS0 = 36, + WL_RATE_1X2_CDD_MCS1 = 37, + WL_RATE_1X2_CDD_MCS2 = 38, + WL_RATE_1X2_CDD_MCS3 = 39, + WL_RATE_1X2_CDD_MCS4 = 40, + WL_RATE_1X2_CDD_MCS5 = 41, + WL_RATE_1X2_CDD_MCS6 = 42, + WL_RATE_1X2_CDD_MCS7 = 43, + WL_RATE_P_1X2_CDD_MCS87 = 44, + WL_RATE_P_1X2_CDD_MCS88 = 45, + + WL_RATE_1X2_VHT0SS1 = 36, + WL_RATE_1X2_VHT1SS1 = 37, + WL_RATE_1X2_VHT2SS1 = 38, + WL_RATE_1X2_VHT3SS1 = 39, + WL_RATE_1X2_VHT4SS1 = 40, + WL_RATE_1X2_VHT5SS1 = 41, + WL_RATE_1X2_VHT6SS1 = 42, + WL_RATE_1X2_VHT7SS1 = 43, + WL_RATE_1X2_VHT8SS1 = 44, + WL_RATE_1X2_VHT9SS1 = 45, + WL_RATE_P_1X2_VHT10SS1 = 46, + WL_RATE_P_1X2_VHT11SS1 = 47, + + /* 2 Streams */ + WL_RATE_2X2_STBC_MCS0 = 48, + WL_RATE_2X2_STBC_MCS1 = 49, + WL_RATE_2X2_STBC_MCS2 = 50, + WL_RATE_2X2_STBC_MCS3 = 51, + WL_RATE_2X2_STBC_MCS4 = 52, + WL_RATE_2X2_STBC_MCS5 = 53, + WL_RATE_2X2_STBC_MCS6 = 54, + WL_RATE_2X2_STBC_MCS7 = 55, + WL_RATE_P_2X2_STBC_MCS87 = 56, + WL_RATE_P_2X2_STBC_MCS88 = 57, + + WL_RATE_2X2_STBC_VHT0SS1 = 48, + WL_RATE_2X2_STBC_VHT1SS1 = 49, + WL_RATE_2X2_STBC_VHT2SS1 = 50, + WL_RATE_2X2_STBC_VHT3SS1 = 51, + WL_RATE_2X2_STBC_VHT4SS1 = 52, + WL_RATE_2X2_STBC_VHT5SS1 = 53, + WL_RATE_2X2_STBC_VHT6SS1 = 54, + WL_RATE_2X2_STBC_VHT7SS1 = 55, + WL_RATE_2X2_STBC_VHT8SS1 = 56, + WL_RATE_2X2_STBC_VHT9SS1 = 57, + WL_RATE_P_2X2_STBC_VHT10SS1 = 58, + WL_RATE_P_2X2_STBC_VHT11SS1 = 59, + + WL_RATE_2X2_SDM_MCS8 = 60, + WL_RATE_2X2_SDM_MCS9 = 61, + WL_RATE_2X2_SDM_MCS10 = 62, + WL_RATE_2X2_SDM_MCS11 = 63, + WL_RATE_2X2_SDM_MCS12 = 64, + WL_RATE_2X2_SDM_MCS13 = 65, + WL_RATE_2X2_SDM_MCS14 = 66, + WL_RATE_2X2_SDM_MCS15 = 67, + WL_RATE_P_2X2_SDM_MCS99 = 68, + WL_RATE_P_2X2_SDM_MCS100 = 69, + + WL_RATE_2X2_VHT0SS2 = 60, + WL_RATE_2X2_VHT1SS2 = 61, + WL_RATE_2X2_VHT2SS2 = 62, + WL_RATE_2X2_VHT3SS2 = 63, + WL_RATE_2X2_VHT4SS2 = 64, + WL_RATE_2X2_VHT5SS2 = 65, + WL_RATE_2X2_VHT6SS2 = 66, + WL_RATE_2X2_VHT7SS2 = 67, + WL_RATE_2X2_VHT8SS2 = 68, + WL_RATE_2X2_VHT9SS2 = 69, + WL_RATE_P_2X2_VHT10SS2 = 70, + WL_RATE_P_2X2_VHT11SS2 = 71, + + /**************************** + * TX Beamforming, 2 chains * + **************************** + */ + + /* 1 Stream expanded + 1 */ + WL_RATE_1X2_TXBF_OFDM_6 = 72, + WL_RATE_1X2_TXBF_OFDM_9 = 73, + WL_RATE_1X2_TXBF_OFDM_12 = 74, + WL_RATE_1X2_TXBF_OFDM_18 = 75, + WL_RATE_1X2_TXBF_OFDM_24 = 76, + WL_RATE_1X2_TXBF_OFDM_36 = 77, + WL_RATE_1X2_TXBF_OFDM_48 = 78, + WL_RATE_1X2_TXBF_OFDM_54 = 79, + + WL_RATE_1X2_TXBF_MCS0 = 80, + WL_RATE_1X2_TXBF_MCS1 = 81, + WL_RATE_1X2_TXBF_MCS2 = 82, + WL_RATE_1X2_TXBF_MCS3 = 83, + WL_RATE_1X2_TXBF_MCS4 = 84, + WL_RATE_1X2_TXBF_MCS5 = 85, + WL_RATE_1X2_TXBF_MCS6 = 86, + WL_RATE_1X2_TXBF_MCS7 = 87, + WL_RATE_P_1X2_TXBF_MCS87 = 88, + WL_RATE_P_1X2_TXBF_MCS88 = 89, + + WL_RATE_1X2_TXBF_VHT0SS1 = 80, + WL_RATE_1X2_TXBF_VHT1SS1 = 81, + WL_RATE_1X2_TXBF_VHT2SS1 = 82, + WL_RATE_1X2_TXBF_VHT3SS1 = 83, + WL_RATE_1X2_TXBF_VHT4SS1 = 84, + WL_RATE_1X2_TXBF_VHT5SS1 = 85, + WL_RATE_1X2_TXBF_VHT6SS1 = 86, + WL_RATE_1X2_TXBF_VHT7SS1 = 87, + WL_RATE_1X2_TXBF_VHT8SS1 = 88, + WL_RATE_1X2_TXBF_VHT9SS1 = 89, + WL_RATE_P_1X2_TXBF_VHT10SS1 = 90, + WL_RATE_P_1X2_TXBF_VHT11SS1 = 91, + + /* 2 Streams */ + WL_RATE_2X2_TXBF_SDM_MCS8 = 92, + WL_RATE_2X2_TXBF_SDM_MCS9 = 93, + WL_RATE_2X2_TXBF_SDM_MCS10 = 94, + WL_RATE_2X2_TXBF_SDM_MCS11 = 95, + WL_RATE_2X2_TXBF_SDM_MCS12 = 96, + WL_RATE_2X2_TXBF_SDM_MCS13 = 97, + WL_RATE_2X2_TXBF_SDM_MCS14 = 98, + WL_RATE_2X2_TXBF_SDM_MCS15 = 99, + WL_RATE_P_2X2_TXBF_SDM_MCS99 = 100, + WL_RATE_P_2X2_TXBF_SDM_MCS100 = 101, + + WL_RATE_2X2_TXBF_VHT0SS2 = 92, + WL_RATE_2X2_TXBF_VHT1SS2 = 93, + WL_RATE_2X2_TXBF_VHT2SS2 = 94, + WL_RATE_2X2_TXBF_VHT3SS2 = 95, + WL_RATE_2X2_TXBF_VHT4SS2 = 96, + WL_RATE_2X2_TXBF_VHT5SS2 = 97, + WL_RATE_2X2_TXBF_VHT6SS2 = 98, + WL_RATE_2X2_TXBF_VHT7SS2 = 99, + WL_RATE_2X2_TXBF_VHT8SS2 = 100, + WL_RATE_2X2_TXBF_VHT9SS2 = 101, + WL_RATE_P_2X2_TXBF_VHT10SS2 = 102, + WL_RATE_P_2X2_TXBF_VHT11SS2 = 103, + + /************ + * 3 chains * + ************ + */ + + /* 1 Stream expanded + 2 */ + WL_RATE_1X3_DSSS_1 = 104, + WL_RATE_1X3_DSSS_2 = 105, + WL_RATE_1X3_DSSS_5_5 = 106, + WL_RATE_1X3_DSSS_11 = 107, + + WL_RATE_1X3_CDD_OFDM_6 = 108, + WL_RATE_1X3_CDD_OFDM_9 = 109, + WL_RATE_1X3_CDD_OFDM_12 = 110, + WL_RATE_1X3_CDD_OFDM_18 = 111, + WL_RATE_1X3_CDD_OFDM_24 = 112, + WL_RATE_1X3_CDD_OFDM_36 = 113, + WL_RATE_1X3_CDD_OFDM_48 = 114, + WL_RATE_1X3_CDD_OFDM_54 = 115, + + WL_RATE_1X3_CDD_MCS0 = 116, + WL_RATE_1X3_CDD_MCS1 = 117, + WL_RATE_1X3_CDD_MCS2 = 118, + WL_RATE_1X3_CDD_MCS3 = 119, + WL_RATE_1X3_CDD_MCS4 = 120, + WL_RATE_1X3_CDD_MCS5 = 121, + WL_RATE_1X3_CDD_MCS6 = 122, + WL_RATE_1X3_CDD_MCS7 = 123, + WL_RATE_P_1X3_CDD_MCS87 = 124, + WL_RATE_P_1X3_CDD_MCS88 = 125, + + WL_RATE_1X3_VHT0SS1 = 116, + WL_RATE_1X3_VHT1SS1 = 117, + WL_RATE_1X3_VHT2SS1 = 118, + WL_RATE_1X3_VHT3SS1 = 119, + WL_RATE_1X3_VHT4SS1 = 120, + WL_RATE_1X3_VHT5SS1 = 121, + WL_RATE_1X3_VHT6SS1 = 122, + WL_RATE_1X3_VHT7SS1 = 123, + WL_RATE_1X3_VHT8SS1 = 124, + WL_RATE_1X3_VHT9SS1 = 125, + WL_RATE_P_1X3_VHT10SS1 = 126, + WL_RATE_P_1X3_VHT11SS1 = 127, + + /* 2 Streams expanded + 1 */ + WL_RATE_2X3_STBC_MCS0 = 128, + WL_RATE_2X3_STBC_MCS1 = 129, + WL_RATE_2X3_STBC_MCS2 = 130, + WL_RATE_2X3_STBC_MCS3 = 131, + WL_RATE_2X3_STBC_MCS4 = 132, + WL_RATE_2X3_STBC_MCS5 = 133, + WL_RATE_2X3_STBC_MCS6 = 134, + WL_RATE_2X3_STBC_MCS7 = 135, + WL_RATE_P_2X3_STBC_MCS87 = 136, + WL_RATE_P_2X3_STBC_MCS88 = 137, + + WL_RATE_2X3_STBC_VHT0SS1 = 128, + WL_RATE_2X3_STBC_VHT1SS1 = 129, + WL_RATE_2X3_STBC_VHT2SS1 = 130, + WL_RATE_2X3_STBC_VHT3SS1 = 131, + WL_RATE_2X3_STBC_VHT4SS1 = 132, + WL_RATE_2X3_STBC_VHT5SS1 = 133, + WL_RATE_2X3_STBC_VHT6SS1 = 134, + WL_RATE_2X3_STBC_VHT7SS1 = 135, + WL_RATE_2X3_STBC_VHT8SS1 = 136, + WL_RATE_2X3_STBC_VHT9SS1 = 137, + WL_RATE_P_2X3_STBC_VHT10SS1 = 138, + WL_RATE_P_2X3_STBC_VHT11SS1 = 139, + + WL_RATE_2X3_SDM_MCS8 = 140, + WL_RATE_2X3_SDM_MCS9 = 141, + WL_RATE_2X3_SDM_MCS10 = 142, + WL_RATE_2X3_SDM_MCS11 = 143, + WL_RATE_2X3_SDM_MCS12 = 144, + WL_RATE_2X3_SDM_MCS13 = 145, + WL_RATE_2X3_SDM_MCS14 = 146, + WL_RATE_2X3_SDM_MCS15 = 147, + WL_RATE_P_2X3_SDM_MCS99 = 148, + WL_RATE_P_2X3_SDM_MCS100 = 149, + + WL_RATE_2X3_VHT0SS2 = 140, + WL_RATE_2X3_VHT1SS2 = 141, + WL_RATE_2X3_VHT2SS2 = 142, + WL_RATE_2X3_VHT3SS2 = 143, + WL_RATE_2X3_VHT4SS2 = 144, + WL_RATE_2X3_VHT5SS2 = 145, + WL_RATE_2X3_VHT6SS2 = 146, + WL_RATE_2X3_VHT7SS2 = 147, + WL_RATE_2X3_VHT8SS2 = 148, + WL_RATE_2X3_VHT9SS2 = 149, + WL_RATE_P_2X3_VHT10SS2 = 150, + WL_RATE_P_2X3_VHT11SS2 = 151, + + /* 3 Streams */ + WL_RATE_3X3_SDM_MCS16 = 152, + WL_RATE_3X3_SDM_MCS17 = 153, + WL_RATE_3X3_SDM_MCS18 = 154, + WL_RATE_3X3_SDM_MCS19 = 155, + WL_RATE_3X3_SDM_MCS20 = 156, + WL_RATE_3X3_SDM_MCS21 = 157, + WL_RATE_3X3_SDM_MCS22 = 158, + WL_RATE_3X3_SDM_MCS23 = 159, + WL_RATE_P_3X3_SDM_MCS101 = 160, + WL_RATE_P_3X3_SDM_MCS102 = 161, + + WL_RATE_3X3_VHT0SS3 = 152, + WL_RATE_3X3_VHT1SS3 = 153, + WL_RATE_3X3_VHT2SS3 = 154, + WL_RATE_3X3_VHT3SS3 = 155, + WL_RATE_3X3_VHT4SS3 = 156, + WL_RATE_3X3_VHT5SS3 = 157, + WL_RATE_3X3_VHT6SS3 = 158, + WL_RATE_3X3_VHT7SS3 = 159, + WL_RATE_3X3_VHT8SS3 = 160, + WL_RATE_3X3_VHT9SS3 = 161, + WL_RATE_P_3X3_VHT10SS3 = 162, + WL_RATE_P_3X3_VHT11SS3 = 163, + + /**************************** + * TX Beamforming, 3 chains * + **************************** + */ + + /* 1 Stream expanded + 2 */ + WL_RATE_1X3_TXBF_OFDM_6 = 164, + WL_RATE_1X3_TXBF_OFDM_9 = 165, + WL_RATE_1X3_TXBF_OFDM_12 = 166, + WL_RATE_1X3_TXBF_OFDM_18 = 167, + WL_RATE_1X3_TXBF_OFDM_24 = 168, + WL_RATE_1X3_TXBF_OFDM_36 = 169, + WL_RATE_1X3_TXBF_OFDM_48 = 170, + WL_RATE_1X3_TXBF_OFDM_54 = 171, + + WL_RATE_1X3_TXBF_MCS0 = 172, + WL_RATE_1X3_TXBF_MCS1 = 173, + WL_RATE_1X3_TXBF_MCS2 = 174, + WL_RATE_1X3_TXBF_MCS3 = 175, + WL_RATE_1X3_TXBF_MCS4 = 176, + WL_RATE_1X3_TXBF_MCS5 = 177, + WL_RATE_1X3_TXBF_MCS6 = 178, + WL_RATE_1X3_TXBF_MCS7 = 179, + WL_RATE_P_1X3_TXBF_MCS87 = 180, + WL_RATE_P_1X3_TXBF_MCS88 = 181, + + WL_RATE_1X3_TXBF_VHT0SS1 = 172, + WL_RATE_1X3_TXBF_VHT1SS1 = 173, + WL_RATE_1X3_TXBF_VHT2SS1 = 174, + WL_RATE_1X3_TXBF_VHT3SS1 = 175, + WL_RATE_1X3_TXBF_VHT4SS1 = 176, + WL_RATE_1X3_TXBF_VHT5SS1 = 177, + WL_RATE_1X3_TXBF_VHT6SS1 = 178, + WL_RATE_1X3_TXBF_VHT7SS1 = 179, + WL_RATE_1X3_TXBF_VHT8SS1 = 180, + WL_RATE_1X3_TXBF_VHT9SS1 = 181, + WL_RATE_P_1X3_TXBF_VHT10SS1 = 182, + WL_RATE_P_1X3_TXBF_VHT11SS1 = 183, + + /* 2 Streams expanded + 1 */ + WL_RATE_2X3_TXBF_SDM_MCS8 = 184, + WL_RATE_2X3_TXBF_SDM_MCS9 = 185, + WL_RATE_2X3_TXBF_SDM_MCS10 = 186, + WL_RATE_2X3_TXBF_SDM_MCS11 = 187, + WL_RATE_2X3_TXBF_SDM_MCS12 = 188, + WL_RATE_2X3_TXBF_SDM_MCS13 = 189, + WL_RATE_2X3_TXBF_SDM_MCS14 = 190, + WL_RATE_2X3_TXBF_SDM_MCS15 = 191, + WL_RATE_P_2X3_TXBF_SDM_MCS99 = 192, + WL_RATE_P_2X3_TXBF_SDM_MCS100 = 193, + + WL_RATE_2X3_TXBF_VHT0SS2 = 184, + WL_RATE_2X3_TXBF_VHT1SS2 = 185, + WL_RATE_2X3_TXBF_VHT2SS2 = 186, + WL_RATE_2X3_TXBF_VHT3SS2 = 187, + WL_RATE_2X3_TXBF_VHT4SS2 = 188, + WL_RATE_2X3_TXBF_VHT5SS2 = 189, + WL_RATE_2X3_TXBF_VHT6SS2 = 190, + WL_RATE_2X3_TXBF_VHT7SS2 = 191, + WL_RATE_2X3_TXBF_VHT8SS2 = 192, + WL_RATE_2X3_TXBF_VHT9SS2 = 193, + WL_RATE_P_2X3_TXBF_VHT10SS2 = 194, + WL_RATE_P_2X3_TXBF_VHT11SS2 = 195, + + /* 3 Streams */ + WL_RATE_3X3_TXBF_SDM_MCS16 = 196, + WL_RATE_3X3_TXBF_SDM_MCS17 = 197, + WL_RATE_3X3_TXBF_SDM_MCS18 = 198, + WL_RATE_3X3_TXBF_SDM_MCS19 = 199, + WL_RATE_3X3_TXBF_SDM_MCS20 = 200, + WL_RATE_3X3_TXBF_SDM_MCS21 = 201, + WL_RATE_3X3_TXBF_SDM_MCS22 = 202, + WL_RATE_3X3_TXBF_SDM_MCS23 = 203, + WL_RATE_P_3X3_TXBF_SDM_MCS101 = 204, + WL_RATE_P_3X3_TXBF_SDM_MCS102 = 205, + + WL_RATE_3X3_TXBF_VHT0SS3 = 196, + WL_RATE_3X3_TXBF_VHT1SS3 = 197, + WL_RATE_3X3_TXBF_VHT2SS3 = 198, + WL_RATE_3X3_TXBF_VHT3SS3 = 199, + WL_RATE_3X3_TXBF_VHT4SS3 = 200, + WL_RATE_3X3_TXBF_VHT5SS3 = 201, + WL_RATE_3X3_TXBF_VHT6SS3 = 202, + WL_RATE_3X3_TXBF_VHT7SS3 = 203, + WL_RATE_3X3_TXBF_VHT8SS3 = 204, + WL_RATE_3X3_TXBF_VHT9SS3 = 205, + WL_RATE_P_3X3_TXBF_VHT10SS3 = 206, + WL_RATE_P_3X3_TXBF_VHT11SS3 = 207, + + /************ + * 4 chains * + ************ + */ + + /* 1 Stream expanded + 3 */ + WL_RATE_1X4_DSSS_1 = 208, + WL_RATE_1X4_DSSS_2 = 209, + WL_RATE_1X4_DSSS_5_5 = 210, + WL_RATE_1X4_DSSS_11 = 211, + + WL_RATE_1X4_CDD_OFDM_6 = 212, + WL_RATE_1X4_CDD_OFDM_9 = 213, + WL_RATE_1X4_CDD_OFDM_12 = 214, + WL_RATE_1X4_CDD_OFDM_18 = 215, + WL_RATE_1X4_CDD_OFDM_24 = 216, + WL_RATE_1X4_CDD_OFDM_36 = 217, + WL_RATE_1X4_CDD_OFDM_48 = 218, + WL_RATE_1X4_CDD_OFDM_54 = 219, + + WL_RATE_1X4_CDD_MCS0 = 220, + WL_RATE_1X4_CDD_MCS1 = 221, + WL_RATE_1X4_CDD_MCS2 = 222, + WL_RATE_1X4_CDD_MCS3 = 223, + WL_RATE_1X4_CDD_MCS4 = 224, + WL_RATE_1X4_CDD_MCS5 = 225, + WL_RATE_1X4_CDD_MCS6 = 226, + WL_RATE_1X4_CDD_MCS7 = 227, + WL_RATE_P_1X4_CDD_MCS87 = 228, + WL_RATE_P_1X4_CDD_MCS88 = 229, + + WL_RATE_1X4_VHT0SS1 = 220, + WL_RATE_1X4_VHT1SS1 = 221, + WL_RATE_1X4_VHT2SS1 = 222, + WL_RATE_1X4_VHT3SS1 = 223, + WL_RATE_1X4_VHT4SS1 = 224, + WL_RATE_1X4_VHT5SS1 = 225, + WL_RATE_1X4_VHT6SS1 = 226, + WL_RATE_1X4_VHT7SS1 = 227, + WL_RATE_1X4_VHT8SS1 = 228, + WL_RATE_1X4_VHT9SS1 = 229, + WL_RATE_P_1X4_VHT10SS1 = 230, + WL_RATE_P_1X4_VHT11SS1 = 231, + + /* 2 Streams expanded + 2 */ + WL_RATE_2X4_STBC_MCS0 = 232, + WL_RATE_2X4_STBC_MCS1 = 233, + WL_RATE_2X4_STBC_MCS2 = 234, + WL_RATE_2X4_STBC_MCS3 = 235, + WL_RATE_2X4_STBC_MCS4 = 236, + WL_RATE_2X4_STBC_MCS5 = 237, + WL_RATE_2X4_STBC_MCS6 = 238, + WL_RATE_2X4_STBC_MCS7 = 239, + WL_RATE_P_2X4_STBC_MCS87 = 240, + WL_RATE_P_2X4_STBC_MCS88 = 241, + + WL_RATE_2X4_STBC_VHT0SS1 = 232, + WL_RATE_2X4_STBC_VHT1SS1 = 233, + WL_RATE_2X4_STBC_VHT2SS1 = 234, + WL_RATE_2X4_STBC_VHT3SS1 = 235, + WL_RATE_2X4_STBC_VHT4SS1 = 236, + WL_RATE_2X4_STBC_VHT5SS1 = 237, + WL_RATE_2X4_STBC_VHT6SS1 = 238, + WL_RATE_2X4_STBC_VHT7SS1 = 239, + WL_RATE_2X4_STBC_VHT8SS1 = 240, + WL_RATE_2X4_STBC_VHT9SS1 = 241, + WL_RATE_P_2X4_STBC_VHT10SS1 = 242, + WL_RATE_P_2X4_STBC_VHT11SS1 = 243, + + WL_RATE_2X4_SDM_MCS8 = 244, + WL_RATE_2X4_SDM_MCS9 = 245, + WL_RATE_2X4_SDM_MCS10 = 246, + WL_RATE_2X4_SDM_MCS11 = 247, + WL_RATE_2X4_SDM_MCS12 = 248, + WL_RATE_2X4_SDM_MCS13 = 249, + WL_RATE_2X4_SDM_MCS14 = 250, + WL_RATE_2X4_SDM_MCS15 = 251, + WL_RATE_P_2X4_SDM_MCS99 = 252, + WL_RATE_P_2X4_SDM_MCS100 = 253, + + WL_RATE_2X4_VHT0SS2 = 244, + WL_RATE_2X4_VHT1SS2 = 245, + WL_RATE_2X4_VHT2SS2 = 246, + WL_RATE_2X4_VHT3SS2 = 247, + WL_RATE_2X4_VHT4SS2 = 248, + WL_RATE_2X4_VHT5SS2 = 249, + WL_RATE_2X4_VHT6SS2 = 250, + WL_RATE_2X4_VHT7SS2 = 251, + WL_RATE_2X4_VHT8SS2 = 252, + WL_RATE_2X4_VHT9SS2 = 253, + WL_RATE_P_2X4_VHT10SS2 = 254, + WL_RATE_P_2X4_VHT11SS2 = 255, + + /* 3 Streams expanded + 1 */ + WL_RATE_3X4_SDM_MCS16 = 256, + WL_RATE_3X4_SDM_MCS17 = 257, + WL_RATE_3X4_SDM_MCS18 = 258, + WL_RATE_3X4_SDM_MCS19 = 259, + WL_RATE_3X4_SDM_MCS20 = 260, + WL_RATE_3X4_SDM_MCS21 = 261, + WL_RATE_3X4_SDM_MCS22 = 262, + WL_RATE_3X4_SDM_MCS23 = 263, + WL_RATE_P_3X4_SDM_MCS101 = 264, + WL_RATE_P_3X4_SDM_MCS102 = 265, + + WL_RATE_3X4_VHT0SS3 = 256, + WL_RATE_3X4_VHT1SS3 = 257, + WL_RATE_3X4_VHT2SS3 = 258, + WL_RATE_3X4_VHT3SS3 = 259, + WL_RATE_3X4_VHT4SS3 = 260, + WL_RATE_3X4_VHT5SS3 = 261, + WL_RATE_3X4_VHT6SS3 = 262, + WL_RATE_3X4_VHT7SS3 = 263, + WL_RATE_3X4_VHT8SS3 = 264, + WL_RATE_3X4_VHT9SS3 = 265, + WL_RATE_P_3X4_VHT10SS3 = 266, + WL_RATE_P_3X4_VHT11SS3 = 267, + + /* 4 Streams */ + WL_RATE_4X4_SDM_MCS24 = 268, + WL_RATE_4X4_SDM_MCS25 = 269, + WL_RATE_4X4_SDM_MCS26 = 270, + WL_RATE_4X4_SDM_MCS27 = 271, + WL_RATE_4X4_SDM_MCS28 = 272, + WL_RATE_4X4_SDM_MCS29 = 273, + WL_RATE_4X4_SDM_MCS30 = 274, + WL_RATE_4X4_SDM_MCS31 = 275, + WL_RATE_P_4X4_SDM_MCS103 = 276, + WL_RATE_P_4X4_SDM_MCS104 = 277, + + WL_RATE_4X4_VHT0SS4 = 268, + WL_RATE_4X4_VHT1SS4 = 269, + WL_RATE_4X4_VHT2SS4 = 270, + WL_RATE_4X4_VHT3SS4 = 271, + WL_RATE_4X4_VHT4SS4 = 272, + WL_RATE_4X4_VHT5SS4 = 273, + WL_RATE_4X4_VHT6SS4 = 274, + WL_RATE_4X4_VHT7SS4 = 275, + WL_RATE_4X4_VHT8SS4 = 276, + WL_RATE_4X4_VHT9SS4 = 277, + WL_RATE_P_4X4_VHT10SS4 = 278, + WL_RATE_P_4X4_VHT11SS4 = 279, + + /**************************** + * TX Beamforming, 4 chains * + **************************** + */ + + /* 1 Stream expanded + 3 */ + WL_RATE_1X4_TXBF_OFDM_6 = 280, + WL_RATE_1X4_TXBF_OFDM_9 = 281, + WL_RATE_1X4_TXBF_OFDM_12 = 282, + WL_RATE_1X4_TXBF_OFDM_18 = 283, + WL_RATE_1X4_TXBF_OFDM_24 = 284, + WL_RATE_1X4_TXBF_OFDM_36 = 285, + WL_RATE_1X4_TXBF_OFDM_48 = 286, + WL_RATE_1X4_TXBF_OFDM_54 = 287, + + WL_RATE_1X4_TXBF_MCS0 = 288, + WL_RATE_1X4_TXBF_MCS1 = 289, + WL_RATE_1X4_TXBF_MCS2 = 290, + WL_RATE_1X4_TXBF_MCS3 = 291, + WL_RATE_1X4_TXBF_MCS4 = 292, + WL_RATE_1X4_TXBF_MCS5 = 293, + WL_RATE_1X4_TXBF_MCS6 = 294, + WL_RATE_1X4_TXBF_MCS7 = 295, + WL_RATE_P_1X4_TXBF_MCS87 = 296, + WL_RATE_P_1X4_TXBF_MCS88 = 297, + + WL_RATE_1X4_TXBF_VHT0SS1 = 288, + WL_RATE_1X4_TXBF_VHT1SS1 = 289, + WL_RATE_1X4_TXBF_VHT2SS1 = 290, + WL_RATE_1X4_TXBF_VHT3SS1 = 291, + WL_RATE_1X4_TXBF_VHT4SS1 = 292, + WL_RATE_1X4_TXBF_VHT5SS1 = 293, + WL_RATE_1X4_TXBF_VHT6SS1 = 294, + WL_RATE_1X4_TXBF_VHT7SS1 = 295, + WL_RATE_1X4_TXBF_VHT8SS1 = 296, + WL_RATE_1X4_TXBF_VHT9SS1 = 297, + WL_RATE_P_1X4_TXBF_VHT10SS1 = 298, + WL_RATE_P_1X4_TXBF_VHT11SS1 = 299, + + /* 2 Streams expanded + 2 */ + WL_RATE_2X4_TXBF_SDM_MCS8 = 300, + WL_RATE_2X4_TXBF_SDM_MCS9 = 301, + WL_RATE_2X4_TXBF_SDM_MCS10 = 302, + WL_RATE_2X4_TXBF_SDM_MCS11 = 303, + WL_RATE_2X4_TXBF_SDM_MCS12 = 304, + WL_RATE_2X4_TXBF_SDM_MCS13 = 305, + WL_RATE_2X4_TXBF_SDM_MCS14 = 306, + WL_RATE_2X4_TXBF_SDM_MCS15 = 307, + WL_RATE_P_2X4_TXBF_SDM_MCS99 = 308, + WL_RATE_P_2X4_TXBF_SDM_MCS100 = 309, + + WL_RATE_2X4_TXBF_VHT0SS2 = 300, + WL_RATE_2X4_TXBF_VHT1SS2 = 301, + WL_RATE_2X4_TXBF_VHT2SS2 = 302, + WL_RATE_2X4_TXBF_VHT3SS2 = 303, + WL_RATE_2X4_TXBF_VHT4SS2 = 304, + WL_RATE_2X4_TXBF_VHT5SS2 = 305, + WL_RATE_2X4_TXBF_VHT6SS2 = 306, + WL_RATE_2X4_TXBF_VHT7SS2 = 307, + WL_RATE_2X4_TXBF_VHT8SS2 = 308, + WL_RATE_2X4_TXBF_VHT9SS2 = 309, + WL_RATE_P_2X4_TXBF_VHT10SS2 = 310, + WL_RATE_P_2X4_TXBF_VHT11SS2 = 311, + + /* 3 Streams expanded + 1 */ + WL_RATE_3X4_TXBF_SDM_MCS16 = 312, + WL_RATE_3X4_TXBF_SDM_MCS17 = 313, + WL_RATE_3X4_TXBF_SDM_MCS18 = 314, + WL_RATE_3X4_TXBF_SDM_MCS19 = 315, + WL_RATE_3X4_TXBF_SDM_MCS20 = 316, + WL_RATE_3X4_TXBF_SDM_MCS21 = 317, + WL_RATE_3X4_TXBF_SDM_MCS22 = 318, + WL_RATE_3X4_TXBF_SDM_MCS23 = 319, + WL_RATE_P_3X4_TXBF_SDM_MCS101 = 320, + WL_RATE_P_3X4_TXBF_SDM_MCS102 = 321, + + WL_RATE_3X4_TXBF_VHT0SS3 = 312, + WL_RATE_3X4_TXBF_VHT1SS3 = 313, + WL_RATE_3X4_TXBF_VHT2SS3 = 314, + WL_RATE_3X4_TXBF_VHT3SS3 = 315, + WL_RATE_3X4_TXBF_VHT4SS3 = 316, + WL_RATE_3X4_TXBF_VHT5SS3 = 317, + WL_RATE_3X4_TXBF_VHT6SS3 = 318, + WL_RATE_3X4_TXBF_VHT7SS3 = 319, + WL_RATE_P_3X4_TXBF_VHT8SS3 = 320, + WL_RATE_P_3X4_TXBF_VHT9SS3 = 321, + WL_RATE_P_3X4_TXBF_VHT10SS3 = 322, + WL_RATE_P_3X4_TXBF_VHT11SS3 = 323, + + /* 4 Streams */ + WL_RATE_4X4_TXBF_SDM_MCS24 = 324, + WL_RATE_4X4_TXBF_SDM_MCS25 = 325, + WL_RATE_4X4_TXBF_SDM_MCS26 = 326, + WL_RATE_4X4_TXBF_SDM_MCS27 = 327, + WL_RATE_4X4_TXBF_SDM_MCS28 = 328, + WL_RATE_4X4_TXBF_SDM_MCS29 = 329, + WL_RATE_4X4_TXBF_SDM_MCS30 = 330, + WL_RATE_4X4_TXBF_SDM_MCS31 = 331, + WL_RATE_P_4X4_TXBF_SDM_MCS103 = 332, + WL_RATE_P_4X4_TXBF_SDM_MCS104 = 333, + + WL_RATE_4X4_TXBF_VHT0SS4 = 324, + WL_RATE_4X4_TXBF_VHT1SS4 = 325, + WL_RATE_4X4_TXBF_VHT2SS4 = 326, + WL_RATE_4X4_TXBF_VHT3SS4 = 327, + WL_RATE_4X4_TXBF_VHT4SS4 = 328, + WL_RATE_4X4_TXBF_VHT5SS4 = 329, + WL_RATE_4X4_TXBF_VHT6SS4 = 330, + WL_RATE_4X4_TXBF_VHT7SS4 = 331, + WL_RATE_P_4X4_TXBF_VHT8SS4 = 332, + WL_RATE_P_4X4_TXBF_VHT9SS4 = 333, + WL_RATE_P_4X4_TXBF_VHT10SS4 = 334, + WL_RATE_P_4X4_TXBF_VHT11SS4 = 335 + +} clm_rates_t; + +/* Number of rate codes */ +#define WL_NUMRATES 336 + +/* MCS rates */ +#define WLC_MAX_VHT_MCS 11 /**< Std VHT MCS 0-9 plus prop VHT MCS 10-11 */ +#define WLC_MAX_HE_MCS 11 /**< Std HE MCS 0-11 */ + +/* Convert encoded rate value in plcp header to numerical rates in 500 KHz increments */ +#define OFDM_PHY2MAC_RATE(rlpt) plcp_ofdm_rate_tbl[(rlpt) & 0x7] +#define CCK_PHY2MAC_RATE(signal) ((signal)/5) + +/* given a proprietary MCS, get number of spatial streams */ +#define GET_PROPRIETARY_11N_MCS_NSS(mcs) (1 + ((mcs) - 85) / 8) + +#define GET_11N_MCS_NSS(mcs) ((mcs) < 32 ? (1 + ((mcs) / 8)) : \ + ((mcs) == 32 ? 1 : GET_PROPRIETARY_11N_MCS_NSS(mcs))) + +#define IS_PROPRIETARY_11N_MCS(mcs) FALSE +#define IS_PROPRIETARY_11N_SS_MCS(mcs) FALSE /**< is proprietary HT single stream MCS */ + +/* Store HE mcs map for all NSS in a compact form: + * + * bit[0:2] mcs code for NSS 1 + * bit[3:5] mcs code for NSS 2 + * ... + * bit[21:23] mcs code for NSS 8 + */ + +/** + * 3 bits are used for encoding each NSS mcs map (HE MCS MAP is 24 bits) + */ +#define HE_CAP_MCS_CODE_NONE 7 + +/* macros to access above compact format */ +#define HE_CAP_MCS_NSS_SET_MASK 0x00ffffff /* Field is to be 24 bits long */ +#define HE_CAP_MCS_NSS_GET_SS_IDX(nss) (((nss)-1) * HE_CAP_MCS_CODE_SIZE) +#define HE_CAP_MCS_NSS_GET_MCS(nss, mcs_nss_map) \ + (((mcs_nss_map) >> HE_CAP_MCS_NSS_GET_SS_IDX(nss)) & HE_CAP_MCS_CODE_MASK) +#define HE_CAP_MCS_NSS_SET_MCS(nss, mcs_code, mcs_nss_map) \ + do { \ + (mcs_nss_map) &= (~(HE_CAP_MCS_CODE_MASK << HE_CAP_MCS_NSS_GET_SS_IDX(nss))); \ + (mcs_nss_map) |= (((mcs_code) & HE_CAP_MCS_CODE_MASK) << HE_CAP_MCS_NSS_GET_SS_IDX(nss)); \ + (mcs_nss_map) &= (HE_CAP_MCS_NSS_SET_MASK); \ + } while (0) + +extern const uint8 plcp_ofdm_rate_tbl[]; + +uint8 wf_get_single_stream_mcs(uint mcs); + +uint8 wf_vht_plcp_to_rate(uint8 *plcp); +uint wf_mcs_to_rate(uint mcs, uint nss, uint bw, int sgi); +uint wf_he_mcs_to_rate(uint mcs, uint nss, uint bw, uint gi, bool dcm); +uint wf_mcs_to_Ndbps(uint mcs, uint nss, uint bw); +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* _bcmwifi_rates_h_ */ diff --git a/bcmdhd.100.10.315.x/bcmwifi_rspec.h b/bcmdhd.100.10.315.x/bcmwifi_rspec.h new file mode 100644 index 0000000..37494fd --- /dev/null +++ b/bcmdhd.100.10.315.x/bcmwifi_rspec.h @@ -0,0 +1,212 @@ +/* + * Common OS-independent driver header for rate management. + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmwifi_rspec.h 736703 2017-12-18 06:55:37Z $ + */ + +#ifndef _bcmwifi_rspec_h_ +#define _bcmwifi_rspec_h_ + +#include + +/** + * =================================================================================== + * rate spec : holds rate and mode specific information required to generate a tx frame. + * Legacy CCK and OFDM information is held in the same manner as was done in the past. + * (in the lower byte) the upper 3 bytes primarily hold MIMO specific information + * =================================================================================== + */ +typedef uint32 ratespec_t; + +/* Rate spec. definitions */ +#define WL_RSPEC_RATE_MASK 0x000000FF /**< Legacy rate or MCS or MCS + NSS */ +#define WL_RSPEC_TXEXP_MASK 0x00000300 /**< Tx chain expansion beyond Nsts */ +#define WL_RSPEC_TXEXP_SHIFT 8 +#define WL_RSPEC_HE_GI_MASK 0x00000C00 /* HE GI indices */ +#define WL_RSPEC_HE_GI_SHIFT 10 +#define WL_RSPEC_BW_MASK 0x00070000 /**< Band width */ +#define WL_RSPEC_BW_SHIFT 16 +#define WL_RSPEC_DCM 0x00080000 /**< Dual Carrier Modulation */ +#define WL_RSPEC_STBC 0x00100000 /**< STBC expansion, Nsts = 2 * Nss */ +#define WL_RSPEC_TXBF 0x00200000 +#define WL_RSPEC_LDPC 0x00400000 +#define WL_RSPEC_SGI 0x00800000 +#define WL_RSPEC_SHORT_PREAMBLE 0x00800000 /**< DSSS short preable - Encoding 0 */ +#define WL_RSPEC_ENCODING_MASK 0x03000000 /**< Encoding of RSPEC_RATE field */ +#define WL_RSPEC_ENCODING_SHIFT 24 + +#define WL_RSPEC_OVERRIDE_RATE 0x40000000 /**< override rate only */ +#define WL_RSPEC_OVERRIDE_MODE 0x80000000 /**< override both rate & mode */ + +/* ======== RSPEC_HE_GI|RSPEC_SGI fields for HE ======== */ + +/* GI for HE */ +#define RSPEC_HE_LTF_GI(rspec) (((rspec) & WL_RSPEC_HE_GI_MASK) >> WL_RSPEC_HE_GI_SHIFT) +#define WL_RSPEC_HE_1x_LTF_GI_0_8us (0x0) +#define WL_RSPEC_HE_2x_LTF_GI_0_8us (0x1) +#define WL_RSPEC_HE_2x_LTF_GI_1_6us (0x2) +#define WL_RSPEC_HE_4x_LTF_GI_3_2us (0x3) +#define RSPEC_ISHEGI(rspec) (RSPEC_HE_LTF_GI(rspec) > WL_RSPEC_HE_1x_LTF_GI_0_8us) +#define HE_GI_TO_RSPEC(gi) (((gi) << WL_RSPEC_HE_GI_SHIFT) & WL_RSPEC_HE_GI_MASK) +/* ======== RSPEC_RATE field ======== */ + +/* Encoding 0 - legacy rate */ +/* DSSS, CCK, and OFDM rates in [500kbps] units */ +#define WL_RSPEC_LEGACY_RATE_MASK 0x0000007F +#define WLC_RATE_1M 2 +#define WLC_RATE_2M 4 +#define WLC_RATE_5M5 11 +#define WLC_RATE_11M 22 +#define WLC_RATE_6M 12 +#define WLC_RATE_9M 18 +#define WLC_RATE_12M 24 +#define WLC_RATE_18M 36 +#define WLC_RATE_24M 48 +#define WLC_RATE_36M 72 +#define WLC_RATE_48M 96 +#define WLC_RATE_54M 108 + +/* Encoding 1 - HT MCS */ +#define WL_RSPEC_HT_MCS_MASK 0x0000007F /**< HT MCS value mask in rspec */ + +/* Encoding 2 - VHT MCS + NSS */ +#define WL_RSPEC_VHT_MCS_MASK 0x0000000F /**< VHT MCS value mask in rspec */ +#define WL_RSPEC_VHT_NSS_MASK 0x000000F0 /**< VHT Nss value mask in rspec */ +#define WL_RSPEC_VHT_NSS_SHIFT 4 /**< VHT Nss value shift in rspec */ + +/* Encoding 3 - HE MCS + NSS */ +#define WL_RSPEC_HE_MCS_MASK 0x0000000F /**< HE MCS value mask in rspec */ +#define WL_RSPEC_HE_NSS_MASK 0x000000F0 /**< HE Nss value mask in rspec */ +#define WL_RSPEC_HE_NSS_SHIFT 4 /**< HE Nss value shift in rpsec */ + +/* ======== RSPEC_BW field ======== */ + +#define WL_RSPEC_BW_UNSPECIFIED 0 +#define WL_RSPEC_BW_20MHZ 0x00010000 +#define WL_RSPEC_BW_40MHZ 0x00020000 +#define WL_RSPEC_BW_80MHZ 0x00030000 +#define WL_RSPEC_BW_160MHZ 0x00040000 + +/* ======== RSPEC_ENCODING field ======== */ + +#define WL_RSPEC_ENCODE_RATE 0x00000000 /**< Legacy rate is stored in RSPEC_RATE */ +#define WL_RSPEC_ENCODE_HT 0x01000000 /**< HT MCS is stored in RSPEC_RATE */ +#define WL_RSPEC_ENCODE_VHT 0x02000000 /**< VHT MCS and NSS are stored in RSPEC_RATE */ +#define WL_RSPEC_ENCODE_HE 0x03000000 /**< HE MCS and NSS are stored in RSPEC_RATE */ + +/** + * =============================== + * Handy macros to parse rate spec + * =============================== + */ +#define RSPEC_BW(rspec) ((rspec) & WL_RSPEC_BW_MASK) +#define RSPEC_IS20MHZ(rspec) (RSPEC_BW(rspec) == WL_RSPEC_BW_20MHZ) +#define RSPEC_IS40MHZ(rspec) (RSPEC_BW(rspec) == WL_RSPEC_BW_40MHZ) +#define RSPEC_IS80MHZ(rspec) (RSPEC_BW(rspec) == WL_RSPEC_BW_80MHZ) +#define RSPEC_IS160MHZ(rspec) (RSPEC_BW(rspec) == WL_RSPEC_BW_160MHZ) + +#define RSPEC_ISSGI(rspec) (((rspec) & WL_RSPEC_SGI) != 0) +#define RSPEC_ISLDPC(rspec) (((rspec) & WL_RSPEC_LDPC) != 0) +#define RSPEC_ISSTBC(rspec) (((rspec) & WL_RSPEC_STBC) != 0) +#define RSPEC_ISTXBF(rspec) (((rspec) & WL_RSPEC_TXBF) != 0) + +#define RSPEC_TXEXP(rspec) (((rspec) & WL_RSPEC_TXEXP_MASK) >> WL_RSPEC_TXEXP_SHIFT) + +#define RSPEC_ENCODE(rspec) (((rspec) & WL_RSPEC_ENCODING_MASK) >> WL_RSPEC_ENCODING_SHIFT) +#define RSPEC_ISLEGACY(rspec) (((rspec) & WL_RSPEC_ENCODING_MASK) == WL_RSPEC_ENCODE_RATE) + +#define RSPEC_ISCCK(rspec) (RSPEC_ISLEGACY(rspec) && \ + (int8)rate_info[(rspec) & WL_RSPEC_LEGACY_RATE_MASK] > 0) +#define RSPEC_ISOFDM(rspec) (RSPEC_ISLEGACY(rspec) && \ + (int8)rate_info[(rspec) & WL_RSPEC_LEGACY_RATE_MASK] < 0) + +#define RSPEC_ISHT(rspec) (((rspec) & WL_RSPEC_ENCODING_MASK) == WL_RSPEC_ENCODE_HT) +#ifdef WL11AC +#define RSPEC_ISVHT(rspec) (((rspec) & WL_RSPEC_ENCODING_MASK) == WL_RSPEC_ENCODE_VHT) +#else /* WL11AC */ +#define RSPEC_ISVHT(rspec) 0 +#endif /* WL11AC */ +#ifdef WL11AX +#define RSPEC_ISHE(rspec) (((rspec) & WL_RSPEC_ENCODING_MASK) == WL_RSPEC_ENCODE_HE) +#else /* WL11AX */ +#define RSPEC_ISHE(rspec) 0 +#endif /* WL11AX */ + +/** + * ================================ + * Handy macros to create rate spec + * ================================ + */ +/* create ratespecs */ +#define LEGACY_RSPEC(rate) (WL_RSPEC_ENCODE_RATE | WL_RSPEC_BW_20MHZ | \ + ((rate) & WL_RSPEC_LEGACY_RATE_MASK)) +#define CCK_RSPEC(cck) LEGACY_RSPEC(cck) +#define OFDM_RSPEC(ofdm) LEGACY_RSPEC(ofdm) +#define HT_RSPEC(mcs) (WL_RSPEC_ENCODE_HT | ((mcs) & WL_RSPEC_HT_MCS_MASK)) +#define VHT_RSPEC(mcs, nss) (WL_RSPEC_ENCODE_VHT | \ + (((nss) << WL_RSPEC_VHT_NSS_SHIFT) & WL_RSPEC_VHT_NSS_MASK) | \ + ((mcs) & WL_RSPEC_VHT_MCS_MASK)) +#define HE_RSPEC(mcs, nss) (WL_RSPEC_ENCODE_HE | \ + (((nss) << WL_RSPEC_HE_NSS_SHIFT) & WL_RSPEC_HE_NSS_MASK) | \ + ((mcs) & WL_RSPEC_HE_MCS_MASK)) + +/** + * ================== + * Other handy macros + * ================== + */ + +/* return rate in unit of Kbps */ +#define RSPEC2KBPS(rspec) wf_rspec_to_rate(rspec) + +/* return rate in unit of 500Kbps */ +#define RSPEC2RATE(rspec) ((rspec) & WL_RSPEC_LEGACY_RATE_MASK) + +/** + * ================================= + * Macros to use the rate_info table + * ================================= + */ +/* phy_rate table index is in [500kbps] units */ +#define WLC_MAXRATE 108 /**< in 500kbps units */ +extern const uint8 rate_info[]; +/* phy_rate table value is encoded */ +#define RATE_INFO_OFDM_MASK 0x80 /* ofdm mask */ +#define RATE_INFO_RATE_MASK 0x7f /* rate signal index mask */ +#define RATE_INFO_M_RATE_MASK 0x0f /* M_RATE_TABLE index mask */ +#define RATE_INFO_RATE_ISCCK(r) ((r) <= WLC_MAXRATE && (int8)rate_info[r] > 0) +#define RATE_INFO_RATE_ISOFDM(r) ((r) <= WLC_MAXRATE && (int8)rate_info[r] < 0) + +/** + * =================== + * function prototypes + * =================== + */ +ratespec_t wf_vht_plcp_to_rspec(uint8 *plcp); +ratespec_t wf_he_plcp_to_rspec(uint8 *plcp); +uint wf_rspec_to_rate(ratespec_t rspec); + +#endif /* _bcmwifi_rspec_h_ */ diff --git a/bcmdhd.100.10.315.x/bcmxtlv.c b/bcmdhd.100.10.315.x/bcmxtlv.c new file mode 100644 index 0000000..d15a3d3 --- /dev/null +++ b/bcmdhd.100.10.315.x/bcmxtlv.c @@ -0,0 +1,612 @@ +/* + * Driver O/S-independent utility routines + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmxtlv.c 700655 2017-05-20 06:09:06Z $ + */ + +#include + +#include +#include + +#include + +#ifdef BCMDRIVER +#include +#else /* !BCMDRIVER */ +#include +#include +#include +#ifndef ASSERT +#define ASSERT(exp) +#endif // endif +#endif /* !BCMDRIVER */ + +#include +#include +#include + +int +bcm_xtlv_hdr_size(bcm_xtlv_opts_t opts) +{ + int len = (int)OFFSETOF(bcm_xtlv_t, data); /* nominal */ + if (opts & BCM_XTLV_OPTION_LENU8) --len; + if (opts & BCM_XTLV_OPTION_IDU8) --len; + + return len; +} + +bool +bcm_valid_xtlv(const bcm_xtlv_t *elt, int buf_len, bcm_xtlv_opts_t opts) +{ + return elt != NULL && + buf_len >= bcm_xtlv_hdr_size(opts) && + buf_len >= bcm_xtlv_size(elt, opts); +} + +int +bcm_xtlv_size_for_data(int dlen, bcm_xtlv_opts_t opts) +{ + int hsz; + + hsz = bcm_xtlv_hdr_size(opts); + return ((opts & BCM_XTLV_OPTION_ALIGN32) ? ALIGN_SIZE(dlen + hsz, 4) + : (dlen + hsz)); +} + +int +bcm_xtlv_size(const bcm_xtlv_t *elt, bcm_xtlv_opts_t opts) +{ + int size; /* size including header, data, and any pad */ + int len; /* length wthout padding */ + + len = BCM_XTLV_LEN_EX(elt, opts); + size = bcm_xtlv_size_for_data(len, opts); + return size; +} + +int +bcm_xtlv_len(const bcm_xtlv_t *elt, bcm_xtlv_opts_t opts) +{ + const uint8 *lenp; + int len; + + lenp = (const uint8 *)&elt->len; /* nominal */ + if (opts & BCM_XTLV_OPTION_IDU8) --lenp; + + if (opts & BCM_XTLV_OPTION_LENU8) + len = *lenp; + else + len = ltoh16_ua(lenp); + + return len; +} + +int +bcm_xtlv_id(const bcm_xtlv_t *elt, bcm_xtlv_opts_t opts) +{ + int id = 0; + if (opts & BCM_XTLV_OPTION_IDU8) + id = *(const uint8 *)elt; + else + id = ltoh16_ua((const uint8 *)elt); + + return id; +} + +bcm_xtlv_t * +bcm_next_xtlv(const bcm_xtlv_t *elt, int *buflen, bcm_xtlv_opts_t opts) +{ + int sz; + /* advance to next elt */ + sz = BCM_XTLV_SIZE_EX(elt, opts); + elt = (const bcm_xtlv_t*)((const uint8 *)elt + sz); + *buflen -= sz; + + /* validate next elt */ + if (!bcm_valid_xtlv(elt, *buflen, opts)) + return NULL; + + GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); + return (bcm_xtlv_t *)(elt); + GCC_DIAGNOSTIC_POP(); +} + +int +bcm_xtlv_buf_init(bcm_xtlvbuf_t *tlv_buf, uint8 *buf, uint16 len, bcm_xtlv_opts_t opts) +{ + if (!tlv_buf || !buf || !len) + return BCME_BADARG; + + tlv_buf->opts = opts; + tlv_buf->size = len; + tlv_buf->head = buf; + tlv_buf->buf = buf; + return BCME_OK; +} + +uint16 +bcm_xtlv_buf_len(bcm_xtlvbuf_t *tbuf) +{ + uint16 len; + + if (tbuf) + len = (uint16)(tbuf->buf - tbuf->head); + else + len = 0; + + return len; +} + +uint16 +bcm_xtlv_buf_rlen(bcm_xtlvbuf_t *tbuf) +{ + uint16 rlen; + if (tbuf) + rlen = tbuf->size - bcm_xtlv_buf_len(tbuf); + else + rlen = 0; + + return rlen; +} + +uint8 * +bcm_xtlv_buf(bcm_xtlvbuf_t *tbuf) +{ + return tbuf ? tbuf->buf : NULL; +} + +uint8 * +bcm_xtlv_head(bcm_xtlvbuf_t *tbuf) +{ + return tbuf ? tbuf->head : NULL; +} + +void +bcm_xtlv_pack_xtlv(bcm_xtlv_t *xtlv, uint16 type, uint16 len, const uint8 *data, + bcm_xtlv_opts_t opts) +{ + uint8 *data_buf; + bcm_xtlv_opts_t mask = BCM_XTLV_OPTION_IDU8 | BCM_XTLV_OPTION_LENU8; + + if (!(opts & mask)) { /* default */ + uint8 *idp = (uint8 *)xtlv; + uint8 *lenp = idp + sizeof(xtlv->id); + htol16_ua_store(type, idp); + htol16_ua_store(len, lenp); + data_buf = lenp + sizeof(uint16); + } else if ((opts & mask) == mask) { /* u8 id and u8 len */ + uint8 *idp = (uint8 *)xtlv; + uint8 *lenp = idp + 1; + *idp = (uint8)type; + *lenp = (uint8)len; + data_buf = lenp + sizeof(uint8); + } else if (opts & BCM_XTLV_OPTION_IDU8) { /* u8 id, u16 len */ + uint8 *idp = (uint8 *)xtlv; + uint8 *lenp = idp + 1; + *idp = (uint8)type; + htol16_ua_store(len, lenp); + data_buf = lenp + sizeof(uint16); + } else if (opts & BCM_XTLV_OPTION_LENU8) { /* u16 id, u8 len */ + uint8 *idp = (uint8 *)xtlv; + uint8 *lenp = idp + sizeof(uint16); + htol16_ua_store(type, idp); + *lenp = (uint8)len; + data_buf = lenp + sizeof(uint8); + } else { + ASSERT(!"Unexpected xtlv option"); + return; + } + + if (opts & BCM_XTLV_OPTION_LENU8) { + ASSERT(len <= 0x00ff); + len &= 0xff; + } + + if (data != NULL) + memcpy(data_buf, data, len); +} + +/* xtlv header is always packed in LE order */ +void +bcm_xtlv_unpack_xtlv(const bcm_xtlv_t *xtlv, uint16 *type, uint16 *len, + const uint8 **data, bcm_xtlv_opts_t opts) +{ + if (type) + *type = (uint16)bcm_xtlv_id(xtlv, opts); + if (len) + *len = (uint16)bcm_xtlv_len(xtlv, opts); + if (data) + *data = (const uint8 *)xtlv + BCM_XTLV_HDR_SIZE_EX(opts); +} + +int +bcm_xtlv_put_data(bcm_xtlvbuf_t *tbuf, uint16 type, const uint8 *data, int n) +{ + bcm_xtlv_t *xtlv; + int size; + + if (tbuf == NULL) + return BCME_BADARG; + + size = bcm_xtlv_size_for_data(n, tbuf->opts); + if (bcm_xtlv_buf_rlen(tbuf) < size) + return BCME_NOMEM; + + xtlv = (bcm_xtlv_t *)bcm_xtlv_buf(tbuf); + bcm_xtlv_pack_xtlv(xtlv, type, (uint16)n, data, tbuf->opts); + tbuf->buf += size; /* note: data may be NULL, reserves space */ + return BCME_OK; +} + +static int +bcm_xtlv_put_int(bcm_xtlvbuf_t *tbuf, uint16 type, const uint8 *data, int n, int int_sz) +{ + bcm_xtlv_t *xtlv; + int xtlv_len; + uint8 *xtlv_data; + int err = BCME_OK; + + if (tbuf == NULL) { + err = BCME_BADARG; + goto done; + } + + xtlv = (bcm_xtlv_t *)bcm_xtlv_buf(tbuf); + + /* put type and length in xtlv and reserve data space */ + xtlv_len = n * int_sz; + err = bcm_xtlv_put_data(tbuf, type, NULL, xtlv_len); + if (err != BCME_OK) + goto done; + + xtlv_data = (uint8 *)xtlv + bcm_xtlv_hdr_size(tbuf->opts); + + /* write data w/ little-endianness into buffer - single loop, aligned access */ + for (; n != 0; --n, xtlv_data += int_sz, data += int_sz) { + switch (int_sz) { + case sizeof(uint8): + break; + case sizeof(uint16): + { + uint16 v = load16_ua(data); + htol16_ua_store(v, xtlv_data); + break; + } + case sizeof(uint32): + { + uint32 v = load32_ua(data); + htol32_ua_store(v, xtlv_data); + break; + } + case sizeof(uint64): + { + uint64 v = load64_ua(data); + htol64_ua_store(v, xtlv_data); + break; + } + default: + err = BCME_UNSUPPORTED; + goto done; + } + } + +done: + return err; +} + +int +bcm_xtlv_put16(bcm_xtlvbuf_t *tbuf, uint16 type, const uint16 *data, int n) +{ + return bcm_xtlv_put_int(tbuf, type, (const uint8 *)data, n, sizeof(uint16)); +} + +int +bcm_xtlv_put32(bcm_xtlvbuf_t *tbuf, uint16 type, const uint32 *data, int n) +{ + return bcm_xtlv_put_int(tbuf, type, (const uint8 *)data, n, sizeof(uint32)); +} + +int +bcm_xtlv_put64(bcm_xtlvbuf_t *tbuf, uint16 type, const uint64 *data, int n) +{ + return bcm_xtlv_put_int(tbuf, type, (const uint8 *)data, n, sizeof(uint64)); +} + +/* + * upacks xtlv record from buf checks the type + * copies data to callers buffer + * advances tlv pointer to next record + * caller's resposible for dst space check + */ +int +bcm_unpack_xtlv_entry(const uint8 **tlv_buf, uint16 xpct_type, uint16 xpct_len, + uint8 *dst_data, bcm_xtlv_opts_t opts) +{ + const bcm_xtlv_t *ptlv = (const bcm_xtlv_t *)*tlv_buf; + uint16 len; + uint16 type; + const uint8 *data; + + ASSERT(ptlv); + + bcm_xtlv_unpack_xtlv(ptlv, &type, &len, &data, opts); + if (len) { + if ((type != xpct_type) || (len > xpct_len)) + return BCME_BADARG; + if (dst_data && data) + memcpy(dst_data, data, len); /* copy data to dst */ + } + + *tlv_buf += BCM_XTLV_SIZE_EX(ptlv, opts); + return BCME_OK; +} + +/* + * packs user data into tlv record and advances tlv pointer to next xtlv slot + * buflen is used for tlv_buf space check + */ +int +bcm_pack_xtlv_entry(uint8 **tlv_buf, uint16 *buflen, uint16 type, uint16 len, + const uint8 *src_data, bcm_xtlv_opts_t opts) +{ + bcm_xtlv_t *ptlv = (bcm_xtlv_t *)*tlv_buf; + int size; + + ASSERT(ptlv); + + size = bcm_xtlv_size_for_data(len, opts); + + /* copy data from tlv buffer to dst provided by user */ + if (size > *buflen) + return BCME_BADLEN; + + bcm_xtlv_pack_xtlv(ptlv, type, len, src_data, opts); + + /* advance callers pointer to tlv buff */ + *tlv_buf = (uint8*)(*tlv_buf) + size; + /* decrement the len */ + *buflen -= (uint16)size; + return BCME_OK; +} + +/* + * unpack all xtlv records from the issue a callback + * to set function one call per found tlv record + */ +int +bcm_unpack_xtlv_buf(void *ctx, const uint8 *tlv_buf, uint16 buflen, bcm_xtlv_opts_t opts, + bcm_xtlv_unpack_cbfn_t *cbfn) +{ + uint16 len; + uint16 type; + int res = BCME_OK; + int size; + const bcm_xtlv_t *ptlv; + int sbuflen = buflen; + const uint8 *data; + int hdr_size; + + ASSERT(!buflen || tlv_buf); + ASSERT(!buflen || cbfn); + + hdr_size = BCM_XTLV_HDR_SIZE_EX(opts); + while (sbuflen >= hdr_size) { + ptlv = (const bcm_xtlv_t *)tlv_buf; + + bcm_xtlv_unpack_xtlv(ptlv, &type, &len, &data, opts); + size = bcm_xtlv_size_for_data(len, opts); + + sbuflen -= size; + if (sbuflen < 0) /* check for buffer overrun */ + break; + + if ((res = cbfn(ctx, data, type, len)) != BCME_OK) + break; + tlv_buf += size; + } + return res; +} + +int +bcm_pack_xtlv_buf(void *ctx, uint8 *tlv_buf, uint16 buflen, bcm_xtlv_opts_t opts, + bcm_pack_xtlv_next_info_cbfn_t get_next, bcm_pack_xtlv_pack_next_cbfn_t pack_next, + int *outlen) +{ + int res = BCME_OK; + uint16 tlv_id; + uint16 tlv_len; + uint8 *startp; + uint8 *endp; + uint8 *buf; + bool more; + int size; + int hdr_size; + + ASSERT(get_next && pack_next); + + buf = tlv_buf; + startp = buf; + endp = (uint8 *)buf + buflen; + more = TRUE; + hdr_size = BCM_XTLV_HDR_SIZE_EX(opts); + + while (more && (buf < endp)) { + more = get_next(ctx, &tlv_id, &tlv_len); + size = bcm_xtlv_size_for_data(tlv_len, opts); + if ((buf + size) > endp) { + res = BCME_BUFTOOSHORT; + goto done; + } + + bcm_xtlv_pack_xtlv((bcm_xtlv_t *)buf, tlv_id, tlv_len, NULL, opts); + pack_next(ctx, tlv_id, tlv_len, buf + hdr_size); + buf += size; + } + + if (more) + res = BCME_BUFTOOSHORT; + +done: + if (outlen) { + *outlen = (int)(buf - startp); + } + return res; +} + +/* + * pack xtlv buffer from memory according to xtlv_desc_t + */ +int +bcm_pack_xtlv_buf_from_mem(uint8 **tlv_buf, uint16 *buflen, const xtlv_desc_t *items, + bcm_xtlv_opts_t opts) +{ + int res = BCME_OK; + uint8 *ptlv = *tlv_buf; + + while (items->type != 0) { + if (items->len && items->ptr) { + res = bcm_pack_xtlv_entry(&ptlv, buflen, items->type, + items->len, items->ptr, opts); + if (res != BCME_OK) + break; + } + items++; + } + + *tlv_buf = ptlv; /* update the external pointer */ + return res; +} + +/* + * unpack xtlv buffer to memory according to xtlv_desc_t + * + */ +int +bcm_unpack_xtlv_buf_to_mem(uint8 *tlv_buf, int *buflen, xtlv_desc_t *items, + bcm_xtlv_opts_t opts) +{ + int res = BCME_OK; + bcm_xtlv_t *elt; + + elt = bcm_valid_xtlv((bcm_xtlv_t *)tlv_buf, *buflen, opts) ? (bcm_xtlv_t *)tlv_buf : NULL; + if (!elt || !items) { + res = BCME_BADARG; + return res; + } + + for (; elt != NULL && res == BCME_OK; elt = bcm_next_xtlv(elt, buflen, opts)) { + /* find matches in desc_t items */ + xtlv_desc_t *dst_desc = items; + uint16 len, type; + const uint8 *data; + + bcm_xtlv_unpack_xtlv(elt, &type, &len, &data, opts); + while (dst_desc->type != 0) { + if (type == dst_desc->type) { + if (len != dst_desc->len) { + res = BCME_BADLEN; + } else { + memcpy(dst_desc->ptr, data, len); + } + break; + } + dst_desc++; + } + } + + if (res == BCME_OK && *buflen != 0) + res = BCME_BUFTOOSHORT; + + return res; +} + +/* + * return data pointer of a given ID from xtlv buffer. + * If the specified xTLV ID is found, on return *datalen will contain + * the the data length of the xTLV ID. + */ +const uint8* +bcm_get_data_from_xtlv_buf(const uint8 *tlv_buf, uint16 buflen, uint16 id, + uint16 *datalen, bcm_xtlv_opts_t opts) +{ + const uint8 *retptr = NULL; + uint16 type, len; + int size; + const bcm_xtlv_t *ptlv; + int sbuflen = buflen; + const uint8 *data; + int hdr_size; + + hdr_size = BCM_XTLV_HDR_SIZE_EX(opts); + + /* Init the datalength */ + if (datalen) { + *datalen = 0; + } + while (sbuflen >= hdr_size) { + ptlv = (const bcm_xtlv_t *)tlv_buf; + bcm_xtlv_unpack_xtlv(ptlv, &type, &len, &data, opts); + + size = bcm_xtlv_size_for_data(len, opts); + sbuflen -= size; + if (sbuflen < 0) /* buffer overrun? */ + break; + + if (id == type) { + retptr = data; + if (datalen) + *datalen = len; + break; + } + + tlv_buf += size; + } + + return retptr; +} + +bcm_xtlv_t* +bcm_xtlv_bcopy(const bcm_xtlv_t *src, bcm_xtlv_t *dst, + int src_buf_len, int dst_buf_len, bcm_xtlv_opts_t opts) +{ + bcm_xtlv_t *dst_next = NULL; + src = (src && bcm_valid_xtlv(src, src_buf_len, opts)) ? src : NULL; + if (src && dst) { + uint16 type; + uint16 len; + const uint8 *data; + int size; + bcm_xtlv_unpack_xtlv(src, &type, &len, &data, opts); + size = bcm_xtlv_size_for_data(len, opts); + if (size <= dst_buf_len) { + bcm_xtlv_pack_xtlv(dst, type, len, data, opts); + dst_next = (bcm_xtlv_t *)((uint8 *)dst + size); + } + } + + return dst_next; +} diff --git a/bcmdhd.100.10.315.x/dbus.c b/bcmdhd.100.10.315.x/dbus.c new file mode 100644 index 0000000..39feab3 --- /dev/null +++ b/bcmdhd.100.10.315.x/dbus.c @@ -0,0 +1,2929 @@ +/** @file dbus.c + * + * Hides details of USB / SDIO / SPI interfaces and OS details. It is intended to shield details and + * provide the caller with one common bus interface for all dongle devices. In practice, it is only + * used for USB interfaces. DBUS is not a protocol, but an abstraction layer. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dbus.c 553311 2015-04-29 10:23:08Z $ + */ + + +#include "osl.h" +#include "dbus.h" +#include +#include +#include +#include +#ifdef PROP_TXSTATUS /* a form of flow control between host and dongle */ +#include +#endif +#include + +#if defined(BCM_REQUEST_FW) +#include +#include +#include +#include +#include +#include +#include +#endif + + + +#if defined(BCM_REQUEST_FW) +#ifndef VARS_MAX +#define VARS_MAX 8192 +#endif +#endif + +#ifdef DBUS_USB_LOOPBACK +extern bool is_loopback_pkt(void *buf); +extern int matches_loopback_pkt(void *buf); +#endif + +/** General info for all BUS types */ +typedef struct dbus_irbq { + dbus_irb_t *head; + dbus_irb_t *tail; + int cnt; +} dbus_irbq_t; + +/** + * This private structure dhd_bus_t is also declared in dbus_usb_linux.c. + * All the fields must be consistent in both declarations. + */ +typedef struct dhd_bus { + dbus_pub_t pub; /* MUST BE FIRST */ + dhd_pub_t *dhd; + + void *cbarg; + dbus_callbacks_t *cbs; /* callbacks to higher level, e.g. dhd_linux.c */ + void *bus_info; + dbus_intf_t *drvintf; /* callbacks to lower level, e.g. dbus_usb.c or dbus_usb_linux.c */ + uint8 *fw; + int fwlen; + uint32 errmask; + int rx_low_watermark; /* avoid rx overflow by filling rx with free IRBs */ + int tx_low_watermark; + bool txoff; + bool txoverride; /* flow control related */ + bool rxoff; + bool tx_timer_ticking; + + + dbus_irbq_t *rx_q; + dbus_irbq_t *tx_q; + + uint8 *nvram; + int nvram_len; + uint8 *image; /* buffer for combine fw and nvram */ + int image_len; + uint8 *orig_fw; + int origfw_len; + int decomp_memsize; + dbus_extdl_t extdl; + int nvram_nontxt; +#if defined(BCM_REQUEST_FW) + void *firmware; + void *nvfile; +#endif + char *fw_path; /* module_param: path to firmware image */ + char *nv_path; /* module_param: path to nvram vars file */ +} dhd_bus_t; + +struct exec_parms { + union { + /* Can consolidate same params, if need be, but this shows + * group of parameters per function + */ + struct { + dbus_irbq_t *q; + dbus_irb_t *b; + } qenq; + + struct { + dbus_irbq_t *q; + } qdeq; + }; +}; + +#define EXEC_RXLOCK(info, fn, a) \ + info->drvintf->exec_rxlock(dhd_bus->bus_info, ((exec_cb_t)fn), ((struct exec_parms *) a)) + +#define EXEC_TXLOCK(info, fn, a) \ + info->drvintf->exec_txlock(dhd_bus->bus_info, ((exec_cb_t)fn), ((struct exec_parms *) a)) + +/* + * Callbacks common for all BUS + */ +static void dbus_if_send_irb_timeout(void *handle, dbus_irb_tx_t *txirb); +static void dbus_if_send_irb_complete(void *handle, dbus_irb_tx_t *txirb, int status); +static void dbus_if_recv_irb_complete(void *handle, dbus_irb_rx_t *rxirb, int status); +static void dbus_if_errhandler(void *handle, int err); +static void dbus_if_ctl_complete(void *handle, int type, int status); +static void dbus_if_state_change(void *handle, int state); +static void *dbus_if_pktget(void *handle, uint len, bool send); +static void dbus_if_pktfree(void *handle, void *p, bool send); +static struct dbus_irb *dbus_if_getirb(void *cbarg, bool send); +static void dbus_if_rxerr_indicate(void *handle, bool on); + +void * dhd_dbus_probe_cb(void *arg, const char *desc, uint32 bustype, + uint16 bus_no, uint16 slot, uint32 hdrlen); +void dhd_dbus_disconnect_cb(void *arg); +void dbus_detach(dhd_bus_t *pub); + +/** functions in this file that are called by lower DBUS levels, e.g. dbus_usb.c */ +static dbus_intf_callbacks_t dbus_intf_cbs = { + dbus_if_send_irb_timeout, + dbus_if_send_irb_complete, + dbus_if_recv_irb_complete, + dbus_if_errhandler, + dbus_if_ctl_complete, + dbus_if_state_change, + NULL, /* isr */ + NULL, /* dpc */ + NULL, /* watchdog */ + dbus_if_pktget, + dbus_if_pktfree, + dbus_if_getirb, + dbus_if_rxerr_indicate +}; + +/* + * Need global for probe() and disconnect() since + * attach() is not called at probe and detach() + * can be called inside disconnect() + */ +static dbus_intf_t *g_busintf = NULL; +static probe_cb_t probe_cb = NULL; +static disconnect_cb_t disconnect_cb = NULL; +static void *probe_arg = NULL; +static void *disc_arg = NULL; + +#if defined(BCM_REQUEST_FW) +int8 *nonfwnvram = NULL; /* stand-alone multi-nvram given with driver load */ +int nonfwnvramlen = 0; +#endif /* #if defined(BCM_REQUEST_FW) */ + +static void* q_enq(dbus_irbq_t *q, dbus_irb_t *b); +static void* q_enq_exec(struct exec_parms *args); +static dbus_irb_t*q_deq(dbus_irbq_t *q); +static void* q_deq_exec(struct exec_parms *args); +static int dbus_tx_timer_init(dhd_bus_t *dhd_bus); +static int dbus_tx_timer_start(dhd_bus_t *dhd_bus, uint timeout); +static int dbus_tx_timer_stop(dhd_bus_t *dhd_bus); +static int dbus_irbq_init(dhd_bus_t *dhd_bus, dbus_irbq_t *q, int nq, int size_irb); +static int dbus_irbq_deinit(dhd_bus_t *dhd_bus, dbus_irbq_t *q, int size_irb); +static int dbus_rxirbs_fill(dhd_bus_t *dhd_bus); +static int dbus_send_irb(dbus_pub_t *pub, uint8 *buf, int len, void *pkt, void *info); +static void dbus_disconnect(void *handle); +static void *dbus_probe(void *arg, const char *desc, uint32 bustype, + uint16 bus_no, uint16 slot, uint32 hdrlen); + +#if defined(BCM_REQUEST_FW) +extern char * dngl_firmware; +extern unsigned int dngl_fwlen; +#ifndef EXTERNAL_FW_PATH +static int dbus_get_nvram(dhd_bus_t *dhd_bus); +static int dbus_jumbo_nvram(dhd_bus_t *dhd_bus); +static int dbus_otp(dhd_bus_t *dhd_bus, uint16 *boardtype, uint16 *boardrev); +static int dbus_select_nvram(dhd_bus_t *dhd_bus, int8 *jumbonvram, int jumbolen, +uint16 boardtype, uint16 boardrev, int8 **nvram, int *nvram_len); +#endif /* !EXTERNAL_FW_PATH */ +extern int dbus_zlib_decomp(dhd_bus_t *dhd_bus); +extern void *dbus_zlib_calloc(int num, int size); +extern void dbus_zlib_free(void *ptr); +#endif + +/* function */ +void +dbus_flowctrl_tx(void *dbi, bool on) +{ + dhd_bus_t *dhd_bus = dbi; + + if (dhd_bus == NULL) + return; + + DBUSTRACE(("%s on %d\n", __FUNCTION__, on)); + + if (dhd_bus->txoff == on) + return; + + dhd_bus->txoff = on; + + if (dhd_bus->cbs && dhd_bus->cbs->txflowcontrol) + dhd_bus->cbs->txflowcontrol(dhd_bus->cbarg, on); +} + +/** + * if lower level DBUS signaled a rx error, more free rx IRBs should be allocated or flow control + * should kick in to make more free rx IRBs available. + */ +static void +dbus_if_rxerr_indicate(void *handle, bool on) +{ + dhd_bus_t *dhd_bus = (dhd_bus_t *) handle; + + DBUSTRACE(("%s, on %d\n", __FUNCTION__, on)); + + if (dhd_bus == NULL) + return; + + if (dhd_bus->txoverride == on) + return; + + dhd_bus->txoverride = on; /* flow control */ + + if (!on) + dbus_rxirbs_fill(dhd_bus); + +} + +/** q_enq()/q_deq() are executed with protection via exec_rxlock()/exec_txlock() */ +static void* +q_enq(dbus_irbq_t *q, dbus_irb_t *b) +{ + ASSERT(q->tail != b); + ASSERT(b->next == NULL); + b->next = NULL; + if (q->tail) { + q->tail->next = b; + q->tail = b; + } else + q->head = q->tail = b; + + q->cnt++; + + return b; +} + +static void* +q_enq_exec(struct exec_parms *args) +{ + return q_enq(args->qenq.q, args->qenq.b); +} + +static dbus_irb_t* +q_deq(dbus_irbq_t *q) +{ + dbus_irb_t *b; + + b = q->head; + if (b) { + q->head = q->head->next; + b->next = NULL; + + if (q->head == NULL) + q->tail = q->head; + + q->cnt--; + } + return b; +} + +static void* +q_deq_exec(struct exec_parms *args) +{ + return q_deq(args->qdeq.q); +} + +/** + * called during attach phase. Status @ Dec 2012: this function does nothing since for all of the + * lower DBUS levels dhd_bus->drvintf->tx_timer_init is NULL. + */ +static int +dbus_tx_timer_init(dhd_bus_t *dhd_bus) +{ + if (dhd_bus && dhd_bus->drvintf && dhd_bus->drvintf->tx_timer_init) + return dhd_bus->drvintf->tx_timer_init(dhd_bus->bus_info); + else + return DBUS_ERR; +} + +static int +dbus_tx_timer_start(dhd_bus_t *dhd_bus, uint timeout) +{ + if (dhd_bus == NULL) + return DBUS_ERR; + + if (dhd_bus->tx_timer_ticking) + return DBUS_OK; + + if (dhd_bus->drvintf && dhd_bus->drvintf->tx_timer_start) { + if (dhd_bus->drvintf->tx_timer_start(dhd_bus->bus_info, timeout) == DBUS_OK) { + dhd_bus->tx_timer_ticking = TRUE; + return DBUS_OK; + } + } + + return DBUS_ERR; +} + +static int +dbus_tx_timer_stop(dhd_bus_t *dhd_bus) +{ + if (dhd_bus == NULL) + return DBUS_ERR; + + if (!dhd_bus->tx_timer_ticking) + return DBUS_OK; + + if (dhd_bus->drvintf && dhd_bus->drvintf->tx_timer_stop) { + if (dhd_bus->drvintf->tx_timer_stop(dhd_bus->bus_info) == DBUS_OK) { + dhd_bus->tx_timer_ticking = FALSE; + return DBUS_OK; + } + } + + return DBUS_ERR; +} + +/** called during attach phase. */ +static int +dbus_irbq_init(dhd_bus_t *dhd_bus, dbus_irbq_t *q, int nq, int size_irb) +{ + int i; + dbus_irb_t *irb; + + ASSERT(q); + ASSERT(dhd_bus); + + for (i = 0; i < nq; i++) { + /* MALLOC dbus_irb_tx or dbus_irb_rx, but cast to simple dbus_irb_t linkedlist */ + irb = (dbus_irb_t *) MALLOC(dhd_bus->pub.osh, size_irb); + if (irb == NULL) { + ASSERT(irb); + return DBUS_ERR; + } + bzero(irb, size_irb); + + /* q_enq() does not need to go through EXEC_xxLOCK() during init() */ + q_enq(q, irb); + } + + return DBUS_OK; +} + +/** called during detach phase or when attach failed */ +static int +dbus_irbq_deinit(dhd_bus_t *dhd_bus, dbus_irbq_t *q, int size_irb) +{ + dbus_irb_t *irb; + + ASSERT(q); + ASSERT(dhd_bus); + + /* q_deq() does not need to go through EXEC_xxLOCK() + * during deinit(); all callbacks are stopped by this time + */ + while ((irb = q_deq(q)) != NULL) { + MFREE(dhd_bus->pub.osh, irb, size_irb); + } + + if (q->cnt) + DBUSERR(("deinit: q->cnt=%d > 0\n", q->cnt)); + return DBUS_OK; +} + +/** multiple code paths require the rx queue to be filled with more free IRBs */ +static int +dbus_rxirbs_fill(dhd_bus_t *dhd_bus) +{ + int err = DBUS_OK; + + + dbus_irb_rx_t *rxirb; + struct exec_parms args; + + ASSERT(dhd_bus); + if (dhd_bus->pub.busstate != DBUS_STATE_UP) { + DBUSERR(("dbus_rxirbs_fill: DBUS not up \n")); + return DBUS_ERR; + } else if (!dhd_bus->drvintf || (dhd_bus->drvintf->recv_irb == NULL)) { + /* Lower edge bus interface does not support recv_irb(). + * No need to pre-submit IRBs in this case. + */ + return DBUS_ERR; + } + + /* The dongle recv callback is freerunning without lock. So multiple callbacks(and this + * refill) can run in parallel. While the rxoff condition is triggered outside, + * below while loop has to check and abort posting more to avoid RPC rxq overflow. + */ + args.qdeq.q = dhd_bus->rx_q; + while ((!dhd_bus->rxoff) && + (rxirb = (EXEC_RXLOCK(dhd_bus, q_deq_exec, &args))) != NULL) { + err = dhd_bus->drvintf->recv_irb(dhd_bus->bus_info, rxirb); + if (err == DBUS_ERR_RXDROP || err == DBUS_ERR_RXFAIL) { + /* Add the the free rxirb back to the queue + * and wait till later + */ + bzero(rxirb, sizeof(dbus_irb_rx_t)); + args.qenq.q = dhd_bus->rx_q; + args.qenq.b = (dbus_irb_t *) rxirb; + EXEC_RXLOCK(dhd_bus, q_enq_exec, &args); + break; + } else if (err != DBUS_OK) { + int i = 0; + while (i++ < 100) { + DBUSERR(("%s :: memory leak for rxirb note?\n", __FUNCTION__)); + } + } + } + return err; +} /* dbus_rxirbs_fill */ + +/** called when the DBUS interface state changed. */ +void +dbus_flowctrl_rx(dbus_pub_t *pub, bool on) +{ + dhd_bus_t *dhd_bus = (dhd_bus_t *) pub; + + if (dhd_bus == NULL) + return; + + DBUSTRACE(("%s\n", __FUNCTION__)); + + if (dhd_bus->rxoff == on) + return; + + dhd_bus->rxoff = on; + + if (dhd_bus->pub.busstate == DBUS_STATE_UP) { + if (!on) { + /* post more irbs, resume rx if necessary */ + dbus_rxirbs_fill(dhd_bus); + if (dhd_bus && dhd_bus->drvintf->recv_resume) { + dhd_bus->drvintf->recv_resume(dhd_bus->bus_info); + } + } else { + /* ??? cancell posted irbs first */ + + if (dhd_bus && dhd_bus->drvintf->recv_stop) { + dhd_bus->drvintf->recv_stop(dhd_bus->bus_info); + } + } + } +} + +/** + * Several code paths in this file want to send a buffer to the dongle. This function handles both + * sending of a buffer or a pkt. + */ +static int +dbus_send_irb(dbus_pub_t *pub, uint8 *buf, int len, void *pkt, void *info) +{ + dhd_bus_t *dhd_bus = (dhd_bus_t *) pub; + int err = DBUS_OK; + dbus_irb_tx_t *txirb = NULL; + int txirb_pending; + struct exec_parms args; + + if (dhd_bus == NULL) + return DBUS_ERR; + + DBUSTRACE(("%s\n", __FUNCTION__)); + + if (dhd_bus->pub.busstate == DBUS_STATE_UP || + dhd_bus->pub.busstate == DBUS_STATE_SLEEP) { + args.qdeq.q = dhd_bus->tx_q; + if (dhd_bus->drvintf) + txirb = EXEC_TXLOCK(dhd_bus, q_deq_exec, &args); + + if (txirb == NULL) { + DBUSERR(("Out of tx dbus_bufs\n")); + return DBUS_ERR; + } + + if (pkt != NULL) { + txirb->pkt = pkt; + txirb->buf = NULL; + txirb->len = 0; + } else if (buf != NULL) { + txirb->pkt = NULL; + txirb->buf = buf; + txirb->len = len; + } else { + ASSERT(0); /* Should not happen */ + } + txirb->info = info; + txirb->arg = NULL; + txirb->retry_count = 0; + + if (dhd_bus->drvintf && dhd_bus->drvintf->send_irb) { + /* call lower DBUS level send_irb function */ + err = dhd_bus->drvintf->send_irb(dhd_bus->bus_info, txirb); + if (err == DBUS_ERR_TXDROP) { + /* tx fail and no completion routine to clean up, reclaim irb NOW */ + DBUSERR(("%s: send_irb failed, status = %d\n", __FUNCTION__, err)); + bzero(txirb, sizeof(dbus_irb_tx_t)); + args.qenq.q = dhd_bus->tx_q; + args.qenq.b = (dbus_irb_t *) txirb; + EXEC_TXLOCK(dhd_bus, q_enq_exec, &args); + } else { + dbus_tx_timer_start(dhd_bus, DBUS_TX_TIMEOUT_INTERVAL); + txirb_pending = dhd_bus->pub.ntxq - dhd_bus->tx_q->cnt; + if (txirb_pending > (dhd_bus->tx_low_watermark * 3)) { + dbus_flowctrl_tx(dhd_bus, TRUE); + } + } + } + } else { + err = DBUS_ERR_TXFAIL; + DBUSTRACE(("%s: bus down, send_irb failed\n", __FUNCTION__)); + } + + return err; +} /* dbus_send_irb */ + +#if defined(BCM_REQUEST_FW) + +/** + * Before downloading a firmware image into the dongle, the validity of the image must be checked. + */ +static int +check_file(osl_t *osh, unsigned char *headers) +{ + struct trx_header *trx; + int actual_len = -1; + + /* Extract trx header */ + trx = (struct trx_header *)headers; + if (ltoh32(trx->magic) != TRX_MAGIC) { + printf("Error: trx bad hdr %x\n", ltoh32(trx->magic)); + return -1; + } + + headers += SIZEOF_TRX(trx); + + /* TRX V1: get firmware len */ + /* TRX V2: get firmware len and DSG/CFG lengths */ + if (ltoh32(trx->flag_version) & TRX_UNCOMP_IMAGE) { + actual_len = ltoh32(trx->offsets[TRX_OFFSETS_DLFWLEN_IDX]) + + SIZEOF_TRX(trx); +#ifdef BCMTRXV2 + if (ISTRX_V2(trx)) { + actual_len += ltoh32(trx->offsets[TRX_OFFSETS_DSG_LEN_IDX]) + + ltoh32(trx->offsets[TRX_OFFSETS_CFG_LEN_IDX]); + } +#endif + return actual_len; + } else { + printf("compressed image\n"); + } + + return -1; +} + +#ifdef EXTERNAL_FW_PATH +static int +dbus_get_fw_nvram(dhd_bus_t *dhd_bus, char *pfw_path, char *pnv_path) +{ + int bcmerror = -1, i; + uint len, total_len; + void *nv_image = NULL, *fw_image = NULL; + char *nv_memblock = NULL, *fw_memblock = NULL; + char *bufp; + bool file_exists; + uint8 nvram_words_pad = 0; + uint memblock_size = 2048; + uint8 *memptr; + int actual_fwlen; + struct trx_header *hdr; + uint32 img_offset = 0; + int offset = 0; + + /* For Get nvram */ + file_exists = ((pnv_path != NULL) && (pnv_path[0] != '\0')); + if (file_exists) { + nv_image = dhd_os_open_image1(dhd_bus->dhd, pnv_path); + if (nv_image == NULL) { + printf("%s: Open nvram file failed %s\n", __FUNCTION__, pnv_path); + goto err; + } + } + nv_memblock = MALLOC(dhd_bus->pub.osh, MAX_NVRAMBUF_SIZE); + if (nv_memblock == NULL) { + DBUSERR(("%s: Failed to allocate memory %d bytes\n", + __FUNCTION__, MAX_NVRAMBUF_SIZE)); + goto err; + } + len = dhd_os_get_image_block(nv_memblock, MAX_NVRAMBUF_SIZE, nv_image); + if (len > 0 && len < MAX_NVRAMBUF_SIZE) { + bufp = (char *)nv_memblock; + bufp[len] = 0; + dhd_bus->nvram_len = process_nvram_vars(bufp, len); + if (dhd_bus->nvram_len % 4) + nvram_words_pad = 4 - dhd_bus->nvram_len % 4; + } else { + DBUSERR(("%s: error reading nvram file: %d\n", __FUNCTION__, len)); + bcmerror = DBUS_ERR_NVRAM; + goto err; + } + if (nv_image) + dhd_os_close_image1(dhd_bus->dhd, nv_image); + + /* For Get first block of fw to calculate total_len */ + file_exists = ((pfw_path != NULL) && (pfw_path[0] != '\0')); + if (file_exists) { + fw_image = dhd_os_open_image1(dhd_bus->dhd, pfw_path); + if (fw_image == NULL) { + printf("%s: Open fw file failed %s\n", __FUNCTION__, pfw_path); + goto err; + } + } + memptr = fw_memblock = MALLOC(dhd_bus->pub.osh, memblock_size); + if (fw_memblock == NULL) { + DBUSERR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, + memblock_size)); + goto err; + } + len = dhd_os_get_image_block((char*)memptr, memblock_size, fw_image); + if ((actual_fwlen = check_file(dhd_bus->pub.osh, memptr)) <= 0) { + DBUSERR(("%s: bad firmware format!\n", __FUNCTION__)); + goto err; + } + + total_len = actual_fwlen + dhd_bus->nvram_len + nvram_words_pad; + dhd_bus->image = MALLOC(dhd_bus->pub.osh, total_len); + dhd_bus->image_len = total_len; + if (dhd_bus->image == NULL) { + DBUSERR(("%s: malloc failed!\n", __FUNCTION__)); + goto err; + } + + /* Step1: Copy trx header + firmwre */ + memptr = fw_memblock; + do { + if (len < 0) { + DBUSERR(("%s: dhd_os_get_image_block failed (%d)\n", __FUNCTION__, len)); + bcmerror = BCME_ERROR; + goto err; + } + bcopy(memptr, dhd_bus->image+offset, len); + offset += len; + } while ((len = dhd_os_get_image_block((char*)memptr, memblock_size, fw_image))); + /* Step2: Copy NVRAM + pad */ + hdr = (struct trx_header *)dhd_bus->image; + img_offset = SIZEOF_TRX(hdr) + hdr->offsets[TRX_OFFSETS_DLFWLEN_IDX]; + bcopy(nv_memblock, (uint8 *)(dhd_bus->image + img_offset), + dhd_bus->nvram_len); + img_offset += dhd_bus->nvram_len; + if (nvram_words_pad) { + bzero(&dhd_bus->image[img_offset], nvram_words_pad); + img_offset += nvram_words_pad; + } +#ifdef BCMTRXV2 + /* Step3: Copy DSG/CFG for V2 */ + if (ISTRX_V2(hdr) && + (hdr->offsets[TRX_OFFSETS_DSG_LEN_IDX] || + hdr->offsets[TRX_OFFSETS_CFG_LEN_IDX])) { + DBUSERR(("%s: fix me\n", __FUNCTION__)); + } +#endif /* BCMTRXV2 */ + /* Step4: update TRX header for nvram size */ + hdr = (struct trx_header *)dhd_bus->image; + hdr->len = htol32(total_len); + /* Pass the actual fw len */ + hdr->offsets[TRX_OFFSETS_NVM_LEN_IDX] = + htol32(dhd_bus->nvram_len + nvram_words_pad); + /* Calculate CRC over header */ + hdr->crc32 = hndcrc32((uint8 *)&hdr->flag_version, + SIZEOF_TRX(hdr) - OFFSETOF(struct trx_header, flag_version), + CRC32_INIT_VALUE); + + /* Calculate CRC over data */ + for (i = SIZEOF_TRX(hdr); i < total_len; ++i) + hdr->crc32 = hndcrc32((uint8 *)&dhd_bus->image[i], 1, hdr->crc32); + hdr->crc32 = htol32(hdr->crc32); + + bcmerror = DBUS_OK; + +err: + if (fw_memblock) + MFREE(dhd_bus->pub.osh, fw_memblock, MAX_NVRAMBUF_SIZE); + if (fw_image) + dhd_os_close_image1(dhd_bus->dhd, fw_image); + if (nv_memblock) + MFREE(dhd_bus->pub.osh, nv_memblock, MAX_NVRAMBUF_SIZE); + if (nv_image) + dhd_os_close_image1(dhd_bus->dhd, nv_image); + + return bcmerror; +} + +/** + * during driver initialization ('attach') or after PnP 'resume', firmware needs to be loaded into + * the dongle + */ +static int +dbus_do_download(dhd_bus_t *dhd_bus, char *pfw_path, char *pnv_path) +{ + int err = DBUS_OK; + + err = dbus_get_fw_nvram(dhd_bus, pfw_path, pnv_path); + if (err) { + DBUSERR(("dbus_do_download: fail to get nvram %d\n", err)); + return err; + } + + if (dhd_bus->drvintf->dlstart && dhd_bus->drvintf->dlrun) { + err = dhd_bus->drvintf->dlstart(dhd_bus->bus_info, + dhd_bus->image, dhd_bus->image_len); + if (err == DBUS_OK) { + err = dhd_bus->drvintf->dlrun(dhd_bus->bus_info); + } + } else + err = DBUS_ERR; + + if (dhd_bus->image) { + MFREE(dhd_bus->pub.osh, dhd_bus->image, dhd_bus->image_len); + dhd_bus->image = NULL; + dhd_bus->image_len = 0; + } + + return err; +} /* dbus_do_download */ +#else + +/** + * It is easy for the user to pass one jumbo nvram file to the driver than a set of smaller files. + * The 'jumbo nvram' file format is essentially a set of nvram files. Before commencing firmware + * download, the dongle needs to be probed so that the correct nvram contents within the jumbo nvram + * file is selected. + */ +static int +dbus_jumbo_nvram(dhd_bus_t *dhd_bus) +{ + int8 *nvram = NULL; + int nvram_len = 0; + int ret = DBUS_OK; + uint16 boardrev = 0xFFFF; + uint16 boardtype = 0xFFFF; + + /* read the otp for boardrev & boardtype + * if boardtype/rev are present in otp + * select nvram data for that boardtype/rev + */ + dbus_otp(dhd_bus, &boardtype, &boardrev); + + ret = dbus_select_nvram(dhd_bus, dhd_bus->extdl.vars, dhd_bus->extdl.varslen, + boardtype, boardrev, &nvram, &nvram_len); + + if (ret == DBUS_JUMBO_BAD_FORMAT) + return DBUS_ERR_NVRAM; + else if (ret == DBUS_JUMBO_NOMATCH && + (boardtype != 0xFFFF || boardrev != 0xFFFF)) { + DBUSERR(("No matching NVRAM for boardtype 0x%02x boardrev 0x%02x\n", + boardtype, boardrev)); + return DBUS_ERR_NVRAM; + } + dhd_bus->nvram = nvram; + dhd_bus->nvram_len = nvram_len; + + return DBUS_OK; +} + +/** before commencing fw download, the correct NVRAM image to download has to be picked */ +static int +dbus_get_nvram(dhd_bus_t *dhd_bus) +{ + int len, i; + struct trx_header *hdr; + int actual_fwlen; + uint32 img_offset = 0; + + dhd_bus->nvram_len = 0; + if (dhd_bus->extdl.varslen) { + if (DBUS_OK != dbus_jumbo_nvram(dhd_bus)) + return DBUS_ERR_NVRAM; + DBUSERR(("NVRAM %d bytes downloaded\n", dhd_bus->nvram_len)); + } +#if defined(BCM_REQUEST_FW) + else if (nonfwnvram) { + dhd_bus->nvram = nonfwnvram; + dhd_bus->nvram_len = nonfwnvramlen; + DBUSERR(("NVRAM %d bytes downloaded\n", dhd_bus->nvram_len)); + } +#endif + if (dhd_bus->nvram) { + uint8 nvram_words_pad = 0; + /* Validate the format/length etc of the file */ + if ((actual_fwlen = check_file(dhd_bus->pub.osh, dhd_bus->fw)) <= 0) { + DBUSERR(("%s: bad firmware format!\n", __FUNCTION__)); + return DBUS_ERR_NVRAM; + } + + if (!dhd_bus->nvram_nontxt) { + /* host supplied nvram could be in .txt format + * with all the comments etc... + */ + dhd_bus->nvram_len = process_nvram_vars(dhd_bus->nvram, + dhd_bus->nvram_len); + } + if (dhd_bus->nvram_len % 4) + nvram_words_pad = 4 - dhd_bus->nvram_len % 4; + + len = actual_fwlen + dhd_bus->nvram_len + nvram_words_pad; + dhd_bus->image = MALLOC(dhd_bus->pub.osh, len); + dhd_bus->image_len = len; + if (dhd_bus->image == NULL) { + DBUSERR(("%s: malloc failed!\n", __FUNCTION__)); + return DBUS_ERR_NVRAM; + } + hdr = (struct trx_header *)dhd_bus->fw; + /* Step1: Copy trx header + firmwre */ + img_offset = SIZEOF_TRX(hdr) + hdr->offsets[TRX_OFFSETS_DLFWLEN_IDX]; + bcopy(dhd_bus->fw, dhd_bus->image, img_offset); + /* Step2: Copy NVRAM + pad */ + bcopy(dhd_bus->nvram, (uint8 *)(dhd_bus->image + img_offset), + dhd_bus->nvram_len); + img_offset += dhd_bus->nvram_len; + if (nvram_words_pad) { + bzero(&dhd_bus->image[img_offset], + nvram_words_pad); + img_offset += nvram_words_pad; + } +#ifdef BCMTRXV2 + /* Step3: Copy DSG/CFG for V2 */ + if (ISTRX_V2(hdr) && + (hdr->offsets[TRX_OFFSETS_DSG_LEN_IDX] || + hdr->offsets[TRX_OFFSETS_CFG_LEN_IDX])) { + + bcopy(dhd_bus->fw + SIZEOF_TRX(hdr) + + hdr->offsets[TRX_OFFSETS_DLFWLEN_IDX] + + hdr->offsets[TRX_OFFSETS_NVM_LEN_IDX], + dhd_bus->image + img_offset, + hdr->offsets[TRX_OFFSETS_DSG_LEN_IDX] + + hdr->offsets[TRX_OFFSETS_CFG_LEN_IDX]); + + img_offset += hdr->offsets[TRX_OFFSETS_DSG_LEN_IDX] + + hdr->offsets[TRX_OFFSETS_CFG_LEN_IDX]; + } +#endif /* BCMTRXV2 */ + /* Step4: update TRX header for nvram size */ + hdr = (struct trx_header *)dhd_bus->image; + hdr->len = htol32(len); + /* Pass the actual fw len */ + hdr->offsets[TRX_OFFSETS_NVM_LEN_IDX] = + htol32(dhd_bus->nvram_len + nvram_words_pad); + /* Calculate CRC over header */ + hdr->crc32 = hndcrc32((uint8 *)&hdr->flag_version, + SIZEOF_TRX(hdr) - OFFSETOF(struct trx_header, flag_version), + CRC32_INIT_VALUE); + + /* Calculate CRC over data */ + for (i = SIZEOF_TRX(hdr); i < len; ++i) + hdr->crc32 = hndcrc32((uint8 *)&dhd_bus->image[i], 1, hdr->crc32); + hdr->crc32 = htol32(hdr->crc32); + } else { + dhd_bus->image = dhd_bus->fw; + dhd_bus->image_len = (uint32)dhd_bus->fwlen; + } + + return DBUS_OK; +} /* dbus_get_nvram */ + +/** + * during driver initialization ('attach') or after PnP 'resume', firmware needs to be loaded into + * the dongle + */ +static int +dbus_do_download(dhd_bus_t *dhd_bus) +{ + int err = DBUS_OK; +#ifndef BCM_REQUEST_FW + int decomp_override = 0; +#endif +#ifdef BCM_REQUEST_FW + uint16 boardrev = 0xFFFF, boardtype = 0xFFFF; + int8 *temp_nvram; + int temp_len; +#endif + +#if defined(BCM_REQUEST_FW) + dhd_bus->firmware = dbus_get_fw_nvfile(dhd_bus->pub.attrib.devid, + dhd_bus->pub.attrib.chiprev, &dhd_bus->fw, &dhd_bus->fwlen, + DBUS_FIRMWARE, 0, 0); + if (!dhd_bus->firmware) + return DBUS_ERR; +#endif + + dhd_bus->image = dhd_bus->fw; + dhd_bus->image_len = (uint32)dhd_bus->fwlen; + +#ifndef BCM_REQUEST_FW + if (UNZIP_ENAB(dhd_bus) && !decomp_override) { + err = dbus_zlib_decomp(dhd_bus); + if (err) { + DBUSERR(("dbus_attach: fw decompress fail %d\n", err)); + return err; + } + } +#endif + +#if defined(BCM_REQUEST_FW) + /* check if firmware is appended with nvram file */ + err = dbus_otp(dhd_bus, &boardtype, &boardrev); + /* check if nvram is provided as separte file */ + nonfwnvram = NULL; + nonfwnvramlen = 0; + dhd_bus->nvfile = dbus_get_fw_nvfile(dhd_bus->pub.attrib.devid, + dhd_bus->pub.attrib.chiprev, (void *)&temp_nvram, &temp_len, + DBUS_NVFILE, boardtype, boardrev); + if (dhd_bus->nvfile) { + int8 *tmp = MALLOC(dhd_bus->pub.osh, temp_len); + if (tmp) { + bcopy(temp_nvram, tmp, temp_len); + nonfwnvram = tmp; + nonfwnvramlen = temp_len; + } else { + err = DBUS_ERR; + goto fail; + } + } +#endif /* defined(BCM_REQUEST_FW) */ + + err = dbus_get_nvram(dhd_bus); + if (err) { + DBUSERR(("dbus_do_download: fail to get nvram %d\n", err)); + return err; + } + + + if (dhd_bus->drvintf->dlstart && dhd_bus->drvintf->dlrun) { + err = dhd_bus->drvintf->dlstart(dhd_bus->bus_info, + dhd_bus->image, dhd_bus->image_len); + + if (err == DBUS_OK) + err = dhd_bus->drvintf->dlrun(dhd_bus->bus_info); + } else + err = DBUS_ERR; + + if (dhd_bus->nvram) { + MFREE(dhd_bus->pub.osh, dhd_bus->image, dhd_bus->image_len); + dhd_bus->image = dhd_bus->fw; + dhd_bus->image_len = (uint32)dhd_bus->fwlen; + } + +#ifndef BCM_REQUEST_FW + if (UNZIP_ENAB(dhd_bus) && (!decomp_override) && dhd_bus->orig_fw) { + MFREE(dhd_bus->pub.osh, dhd_bus->fw, dhd_bus->decomp_memsize); + dhd_bus->image = dhd_bus->fw = dhd_bus->orig_fw; + dhd_bus->image_len = dhd_bus->fwlen = dhd_bus->origfw_len; + } +#endif + +#if defined(BCM_REQUEST_FW) +fail: + if (dhd_bus->firmware) { + dbus_release_fw_nvfile(dhd_bus->firmware); + dhd_bus->firmware = NULL; + } + if (dhd_bus->nvfile) { + dbus_release_fw_nvfile(dhd_bus->nvfile); + dhd_bus->nvfile = NULL; + } + if (nonfwnvram) { + MFREE(dhd_bus->pub.osh, nonfwnvram, nonfwnvramlen); + nonfwnvram = NULL; + nonfwnvramlen = 0; + } +#endif + return err; +} /* dbus_do_download */ +#endif /* EXTERNAL_FW_PATH */ +#endif + +/** required for DBUS deregistration */ +static void +dbus_disconnect(void *handle) +{ + DBUSTRACE(("%s\n", __FUNCTION__)); + + if (disconnect_cb) + disconnect_cb(disc_arg); +} + +/** + * This function is called when the sent irb times out without a tx response status. + * DBUS adds reliability by resending timed out IRBs DBUS_TX_RETRY_LIMIT times. + */ +static void +dbus_if_send_irb_timeout(void *handle, dbus_irb_tx_t *txirb) +{ + dhd_bus_t *dhd_bus = (dhd_bus_t *) handle; + + if ((dhd_bus == NULL) || (dhd_bus->drvintf == NULL) || (txirb == NULL)) { + return; + } + + DBUSTRACE(("%s\n", __FUNCTION__)); + + return; + +} /* dbus_if_send_irb_timeout */ + +/** + * When lower DBUS level signals that a send IRB completed, either successful or not, the higher + * level (e.g. dhd_linux.c) has to be notified, and transmit flow control has to be evaluated. + */ +static void BCMFASTPATH +dbus_if_send_irb_complete(void *handle, dbus_irb_tx_t *txirb, int status) +{ + dhd_bus_t *dhd_bus = (dhd_bus_t *) handle; + int txirb_pending; + struct exec_parms args; + void *pktinfo; + + if ((dhd_bus == NULL) || (txirb == NULL)) { + return; + } + + DBUSTRACE(("%s: status = %d\n", __FUNCTION__, status)); + + dbus_tx_timer_stop(dhd_bus); + + /* re-queue BEFORE calling send_complete which will assume that this irb + is now available. + */ + pktinfo = txirb->info; + bzero(txirb, sizeof(dbus_irb_tx_t)); + args.qenq.q = dhd_bus->tx_q; + args.qenq.b = (dbus_irb_t *) txirb; + EXEC_TXLOCK(dhd_bus, q_enq_exec, &args); + + if (dhd_bus->pub.busstate != DBUS_STATE_DOWN) { + if ((status == DBUS_OK) || (status == DBUS_ERR_NODEVICE)) { + if (dhd_bus->cbs && dhd_bus->cbs->send_complete) + dhd_bus->cbs->send_complete(dhd_bus->cbarg, pktinfo, + status); + + if (status == DBUS_OK) { + txirb_pending = dhd_bus->pub.ntxq - dhd_bus->tx_q->cnt; + if (txirb_pending) + dbus_tx_timer_start(dhd_bus, DBUS_TX_TIMEOUT_INTERVAL); + if ((txirb_pending < dhd_bus->tx_low_watermark) && + dhd_bus->txoff && !dhd_bus->txoverride) { + dbus_flowctrl_tx(dhd_bus, OFF); + } + } + } else { + DBUSERR(("%s: %d WARNING freeing orphan pkt %p\n", __FUNCTION__, __LINE__, + pktinfo)); +#if defined(BCM_RPC_NOCOPY) || defined(BCM_RPC_TXNOCOPY) || defined(BCM_RPC_TOC) + if (pktinfo) + if (dhd_bus->cbs && dhd_bus->cbs->send_complete) + dhd_bus->cbs->send_complete(dhd_bus->cbarg, pktinfo, + status); +#else + dbus_if_pktfree(dhd_bus, (void*)pktinfo, TRUE); +#endif /* defined(BCM_RPC_NOCOPY) || defined(BCM_RPC_TXNOCOPY) || defined(BCM_RPC_TOC) */ + } + } else { + DBUSERR(("%s: %d WARNING freeing orphan pkt %p\n", __FUNCTION__, __LINE__, + pktinfo)); +#if defined(BCM_RPC_NOCOPY) || defined(BCM_RPC_TXNOCOPY) || defined(BCM_RPC_TOC) + if (pktinfo) + if (dhd_bus->cbs && dhd_bus->cbs->send_complete) + dhd_bus->cbs->send_complete(dhd_bus->cbarg, pktinfo, + status); +#else + dbus_if_pktfree(dhd_bus, (void*)pktinfo, TRUE); +#endif /* defined(BCM_RPC_NOCOPY) || defined(BCM_RPC_TXNOCOPY) defined(BCM_RPC_TOC) */ + } +} /* dbus_if_send_irb_complete */ + +/** + * When lower DBUS level signals that a receive IRB completed, either successful or not, the higher + * level (e.g. dhd_linux.c) has to be notified, and fresh free receive IRBs may have to be given + * to lower levels. + */ +static void BCMFASTPATH +dbus_if_recv_irb_complete(void *handle, dbus_irb_rx_t *rxirb, int status) +{ + dhd_bus_t *dhd_bus = (dhd_bus_t *) handle; + int rxirb_pending; + struct exec_parms args; + + if ((dhd_bus == NULL) || (rxirb == NULL)) { + return; + } + DBUSTRACE(("%s\n", __FUNCTION__)); + if (dhd_bus->pub.busstate != DBUS_STATE_DOWN && + dhd_bus->pub.busstate != DBUS_STATE_SLEEP) { + if (status == DBUS_OK) { + if ((rxirb->buf != NULL) && (rxirb->actual_len > 0)) { +#ifdef DBUS_USB_LOOPBACK + if (is_loopback_pkt(rxirb->buf)) { + matches_loopback_pkt(rxirb->buf); + } else +#endif + if (dhd_bus->cbs && dhd_bus->cbs->recv_buf) { + dhd_bus->cbs->recv_buf(dhd_bus->cbarg, rxirb->buf, + rxirb->actual_len); + } + } else if (rxirb->pkt != NULL) { + if (dhd_bus->cbs && dhd_bus->cbs->recv_pkt) + dhd_bus->cbs->recv_pkt(dhd_bus->cbarg, rxirb->pkt); + } else { + ASSERT(0); /* Should not happen */ + } + + rxirb_pending = dhd_bus->pub.nrxq - dhd_bus->rx_q->cnt - 1; + if ((rxirb_pending <= dhd_bus->rx_low_watermark) && + !dhd_bus->rxoff) { + DBUSTRACE(("Low watermark so submit more %d <= %d \n", + dhd_bus->rx_low_watermark, rxirb_pending)); + dbus_rxirbs_fill(dhd_bus); + } else if (dhd_bus->rxoff) + DBUSTRACE(("rx flow controlled. not filling more. cut_rxq=%d\n", + dhd_bus->rx_q->cnt)); + } else if (status == DBUS_ERR_NODEVICE) { + DBUSERR(("%s: %d status = %d, buf %p\n", __FUNCTION__, __LINE__, status, + rxirb->buf)); +#if defined(BCM_RPC_NOCOPY) || defined(BCM_RPC_RXNOCOPY) + if (rxirb->buf) { + PKTFRMNATIVE(dhd_bus->pub.osh, rxirb->buf); + PKTFREE(dhd_bus->pub.osh, rxirb->buf, FALSE); + } +#endif /* BCM_RPC_NOCOPY || BCM_RPC_TXNOCOPY || BCM_RPC_TOC */ + } else { + if (status != DBUS_ERR_RXZLP) + DBUSERR(("%s: %d status = %d, buf %p\n", __FUNCTION__, __LINE__, + status, rxirb->buf)); +#if defined(BCM_RPC_NOCOPY) || defined(BCM_RPC_RXNOCOPY) + if (rxirb->buf) { + PKTFRMNATIVE(dhd_bus->pub.osh, rxirb->buf); + PKTFREE(dhd_bus->pub.osh, rxirb->buf, FALSE); + } +#endif /* BCM_RPC_NOCOPY || BCM_RPC_TXNOCOPY || BCM_RPC_TOC */ + } + } else { + DBUSTRACE(("%s: DBUS down, ignoring recv callback. buf %p\n", __FUNCTION__, + rxirb->buf)); +#if defined(BCM_RPC_NOCOPY) || defined(BCM_RPC_RXNOCOPY) + if (rxirb->buf) { + PKTFRMNATIVE(dhd_bus->pub.osh, rxirb->buf); + PKTFREE(dhd_bus->pub.osh, rxirb->buf, FALSE); + } +#endif /* BCM_RPC_NOCOPY || BCM_RPC_TXNOCOPY || BCM_RPC_TOC */ + } + if (dhd_bus->rx_q != NULL) { + bzero(rxirb, sizeof(dbus_irb_rx_t)); + args.qenq.q = dhd_bus->rx_q; + args.qenq.b = (dbus_irb_t *) rxirb; + EXEC_RXLOCK(dhd_bus, q_enq_exec, &args); + } else + MFREE(dhd_bus->pub.osh, rxirb, sizeof(dbus_irb_tx_t)); +} /* dbus_if_recv_irb_complete */ + +/** + * Accumulate errors signaled by lower DBUS levels and signal them to higher (e.g. dhd_linux.c) + * level. + */ +static void +dbus_if_errhandler(void *handle, int err) +{ + dhd_bus_t *dhd_bus = handle; + uint32 mask = 0; + + if (dhd_bus == NULL) + return; + + switch (err) { + case DBUS_ERR_TXFAIL: + dhd_bus->pub.stats.tx_errors++; + mask |= ERR_CBMASK_TXFAIL; + break; + case DBUS_ERR_TXDROP: + dhd_bus->pub.stats.tx_dropped++; + mask |= ERR_CBMASK_TXFAIL; + break; + case DBUS_ERR_RXFAIL: + dhd_bus->pub.stats.rx_errors++; + mask |= ERR_CBMASK_RXFAIL; + break; + case DBUS_ERR_RXDROP: + dhd_bus->pub.stats.rx_dropped++; + mask |= ERR_CBMASK_RXFAIL; + break; + default: + break; + } + + if (dhd_bus->cbs && dhd_bus->cbs->errhandler && (dhd_bus->errmask & mask)) + dhd_bus->cbs->errhandler(dhd_bus->cbarg, err); +} + +/** + * When lower DBUS level signals control IRB completed, higher level (e.g. dhd_linux.c) has to be + * notified. + */ +static void +dbus_if_ctl_complete(void *handle, int type, int status) +{ + dhd_bus_t *dhd_bus = (dhd_bus_t *) handle; + + DBUSTRACE(("%s\n", __FUNCTION__)); + + if (dhd_bus == NULL) { + DBUSERR(("%s: dhd_bus is NULL\n", __FUNCTION__)); + return; + } + + if (dhd_bus->pub.busstate != DBUS_STATE_DOWN) { + if (dhd_bus->cbs && dhd_bus->cbs->ctl_complete) + dhd_bus->cbs->ctl_complete(dhd_bus->cbarg, type, status); + } +} + +/** + * Rx related functionality (flow control, posting of free IRBs to rx queue) is dependent upon the + * bus state. When lower DBUS level signals a change in the interface state, take appropriate action + * and forward the signaling to the higher (e.g. dhd_linux.c) level. + */ +static void +dbus_if_state_change(void *handle, int state) +{ + dhd_bus_t *dhd_bus = (dhd_bus_t *) handle; + int old_state; + + if (dhd_bus == NULL) + return; + + if (dhd_bus->pub.busstate == state) + return; + old_state = dhd_bus->pub.busstate; + if (state == DBUS_STATE_DISCONNECT) { + DBUSERR(("DBUS disconnected\n")); + } + + /* Ignore USB SUSPEND while not up yet */ + if (state == DBUS_STATE_SLEEP && old_state != DBUS_STATE_UP) + return; + + DBUSTRACE(("dbus state change from %d to to %d\n", old_state, state)); + + /* Don't update state if it's PnP firmware re-download */ + if (state != DBUS_STATE_PNP_FWDL) + dhd_bus->pub.busstate = state; + else + dbus_flowctrl_rx(handle, FALSE); + if (state == DBUS_STATE_SLEEP) + dbus_flowctrl_rx(handle, TRUE); + if (state == DBUS_STATE_UP) { + dbus_rxirbs_fill(dhd_bus); + dbus_flowctrl_rx(handle, FALSE); + } + + if (dhd_bus->cbs && dhd_bus->cbs->state_change) + dhd_bus->cbs->state_change(dhd_bus->cbarg, state); +} + +/** Forward request for packet from lower DBUS layer to higher layer (e.g. dhd_linux.c) */ +static void * +dbus_if_pktget(void *handle, uint len, bool send) +{ + dhd_bus_t *dhd_bus = (dhd_bus_t *) handle; + void *p = NULL; + + if (dhd_bus == NULL) + return NULL; + + if (dhd_bus->cbs && dhd_bus->cbs->pktget) + p = dhd_bus->cbs->pktget(dhd_bus->cbarg, len, send); + else + ASSERT(0); + + return p; +} + +/** Forward request to free packet from lower DBUS layer to higher layer (e.g. dhd_linux.c) */ +static void +dbus_if_pktfree(void *handle, void *p, bool send) +{ + dhd_bus_t *dhd_bus = (dhd_bus_t *) handle; + + if (dhd_bus == NULL) + return; + + if (dhd_bus->cbs && dhd_bus->cbs->pktfree) + dhd_bus->cbs->pktfree(dhd_bus->cbarg, p, send); + else + ASSERT(0); +} + +/** Lower DBUS level requests either a send or receive IRB */ +static struct dbus_irb* +dbus_if_getirb(void *cbarg, bool send) +{ + dhd_bus_t *dhd_bus = (dhd_bus_t *) cbarg; + struct exec_parms args; + struct dbus_irb *irb; + + if ((dhd_bus == NULL) || (dhd_bus->pub.busstate != DBUS_STATE_UP)) + return NULL; + + if (send == TRUE) { + args.qdeq.q = dhd_bus->tx_q; + irb = EXEC_TXLOCK(dhd_bus, q_deq_exec, &args); + } else { + args.qdeq.q = dhd_bus->rx_q; + irb = EXEC_RXLOCK(dhd_bus, q_deq_exec, &args); + } + + return irb; +} + +/** + * Called as part of DBUS bus registration. Calls back into higher level (e.g. dhd_linux.c) probe + * function. + */ +static void * +dbus_probe(void *arg, const char *desc, uint32 bustype, uint16 bus_no, + uint16 slot, uint32 hdrlen) +{ + DBUSTRACE(("%s\n", __FUNCTION__)); + if (probe_cb) { + disc_arg = probe_cb(probe_arg, desc, bustype, bus_no, slot, hdrlen); + return disc_arg; + } + + return (void *)DBUS_ERR; +} + +/** + * As part of initialization, higher level (e.g. dhd_linux.c) requests DBUS to prepare for + * action. + */ +int +dhd_bus_register(void) +{ + int err; + + DBUSTRACE(("%s: Enter\n", __FUNCTION__)); + + probe_cb = dhd_dbus_probe_cb; + disconnect_cb = dhd_dbus_disconnect_cb; + probe_arg = NULL; + + err = dbus_bus_register(0xa5c, 0x48f, dbus_probe, /* call lower DBUS level register function */ + dbus_disconnect, NULL, &g_busintf, NULL, NULL); + + /* Device not detected */ + if (err == DBUS_ERR_NODEVICE) + err = DBUS_OK; + + return err; +} + +dhd_pub_t *g_pub = NULL; +void +dhd_bus_unregister(void) +{ + int ret; + + DBUSTRACE(("%s\n", __FUNCTION__)); + + DHD_MUTEX_LOCK(); + if (g_pub) { + g_pub->dhd_remove = TRUE; + if (!g_pub->bus) { + dhd_dbus_disconnect_cb(g_pub->bus); + } + } + probe_cb = NULL; + DHD_MUTEX_UNLOCK(); + ret = dbus_bus_deregister(); + disconnect_cb = NULL; + probe_arg = NULL; +} + +/** As part of initialization, data structures have to be allocated and initialized */ +dhd_bus_t * +dbus_attach(osl_t *osh, int rxsize, int nrxq, int ntxq, dhd_pub_t *pub, + dbus_callbacks_t *cbs, dbus_extdl_t *extdl, struct shared_info *sh) +{ + dhd_bus_t *dhd_bus; + int err; + + if ((g_busintf == NULL) || (g_busintf->attach == NULL) || (cbs == NULL)) + return NULL; + + DBUSTRACE(("%s\n", __FUNCTION__)); + + if ((nrxq <= 0) || (ntxq <= 0)) + return NULL; + + dhd_bus = MALLOC(osh, sizeof(dhd_bus_t)); + if (dhd_bus == NULL) { + DBUSERR(("%s: malloc failed %zu\n", __FUNCTION__, sizeof(dhd_bus_t))); + return NULL; + } + + bzero(dhd_bus, sizeof(dhd_bus_t)); + + /* BUS-specific driver interface (at a lower DBUS level) */ + dhd_bus->drvintf = g_busintf; + dhd_bus->cbarg = pub; + dhd_bus->cbs = cbs; + + dhd_bus->pub.sh = sh; + dhd_bus->pub.osh = osh; + dhd_bus->pub.rxsize = rxsize; + + dhd_bus->pub.nrxq = nrxq; + dhd_bus->rx_low_watermark = nrxq / 2; /* keep enough posted rx urbs */ + dhd_bus->pub.ntxq = ntxq; + dhd_bus->tx_low_watermark = ntxq / 4; /* flow control when too many tx urbs posted */ + + dhd_bus->tx_q = MALLOC(osh, sizeof(dbus_irbq_t)); + if (dhd_bus->tx_q == NULL) + goto error; + else { + bzero(dhd_bus->tx_q, sizeof(dbus_irbq_t)); + err = dbus_irbq_init(dhd_bus, dhd_bus->tx_q, ntxq, sizeof(dbus_irb_tx_t)); + if (err != DBUS_OK) + goto error; + } + + dhd_bus->rx_q = MALLOC(osh, sizeof(dbus_irbq_t)); + if (dhd_bus->rx_q == NULL) + goto error; + else { + bzero(dhd_bus->rx_q, sizeof(dbus_irbq_t)); + err = dbus_irbq_init(dhd_bus, dhd_bus->rx_q, nrxq, sizeof(dbus_irb_rx_t)); + if (err != DBUS_OK) + goto error; + } + + + dhd_bus->bus_info = (void *)g_busintf->attach(&dhd_bus->pub, + dhd_bus, &dbus_intf_cbs); + if (dhd_bus->bus_info == NULL) + goto error; + + dbus_tx_timer_init(dhd_bus); + +#if defined(BCM_REQUEST_FW) + /* Need to copy external image for re-download */ + if (extdl && extdl->fw && (extdl->fwlen > 0)) { + dhd_bus->extdl.fw = MALLOC(osh, extdl->fwlen); + if (dhd_bus->extdl.fw) { + bcopy(extdl->fw, dhd_bus->extdl.fw, extdl->fwlen); + dhd_bus->extdl.fwlen = extdl->fwlen; + } + } + + if (extdl && extdl->vars && (extdl->varslen > 0)) { + dhd_bus->extdl.vars = MALLOC(osh, extdl->varslen); + if (dhd_bus->extdl.vars) { + bcopy(extdl->vars, dhd_bus->extdl.vars, extdl->varslen); + dhd_bus->extdl.varslen = extdl->varslen; + } + } +#endif + + return (dhd_bus_t *)dhd_bus; + +error: + DBUSERR(("%s: Failed\n", __FUNCTION__)); + dbus_detach(dhd_bus); + return NULL; +} /* dbus_attach */ + +void +dbus_detach(dhd_bus_t *pub) +{ + dhd_bus_t *dhd_bus = (dhd_bus_t *) pub; + osl_t *osh; + + DBUSTRACE(("%s\n", __FUNCTION__)); + + if (dhd_bus == NULL) + return; + + dbus_tx_timer_stop(dhd_bus); + + osh = pub->pub.osh; + + if (dhd_bus->drvintf && dhd_bus->drvintf->detach) + dhd_bus->drvintf->detach((dbus_pub_t *)dhd_bus, dhd_bus->bus_info); + + if (dhd_bus->tx_q) { + dbus_irbq_deinit(dhd_bus, dhd_bus->tx_q, sizeof(dbus_irb_tx_t)); + MFREE(osh, dhd_bus->tx_q, sizeof(dbus_irbq_t)); + dhd_bus->tx_q = NULL; + } + + if (dhd_bus->rx_q) { + dbus_irbq_deinit(dhd_bus, dhd_bus->rx_q, sizeof(dbus_irb_rx_t)); + MFREE(osh, dhd_bus->rx_q, sizeof(dbus_irbq_t)); + dhd_bus->rx_q = NULL; + } + + + if (dhd_bus->extdl.fw && (dhd_bus->extdl.fwlen > 0)) { + MFREE(osh, dhd_bus->extdl.fw, dhd_bus->extdl.fwlen); + dhd_bus->extdl.fw = NULL; + dhd_bus->extdl.fwlen = 0; + } + + if (dhd_bus->extdl.vars && (dhd_bus->extdl.varslen > 0)) { + MFREE(osh, dhd_bus->extdl.vars, dhd_bus->extdl.varslen); + dhd_bus->extdl.vars = NULL; + dhd_bus->extdl.varslen = 0; + } + + MFREE(osh, dhd_bus, sizeof(dhd_bus_t)); +} /* dbus_detach */ + +int dbus_dlneeded(dhd_bus_t *pub) +{ + dhd_bus_t *dhd_bus = (dhd_bus_t *) pub; + int dlneeded = DBUS_ERR; + + if (!dhd_bus) { + DBUSERR(("%s: dhd_bus is NULL\n", __FUNCTION__)); + return DBUS_ERR; + } + + DBUSTRACE(("%s: state %d\n", __FUNCTION__, dhd_bus->pub.busstate)); + + if (dhd_bus->drvintf->dlneeded) { + dlneeded = dhd_bus->drvintf->dlneeded(dhd_bus->bus_info); + } + printf("%s: dlneeded=%d\n", __FUNCTION__, dlneeded); + + /* dlneeded > 0: need to download + * dlneeded = 0: downloaded + * dlneeded < 0: bus error*/ + return dlneeded; +} + +#if defined(BCM_REQUEST_FW) +int dbus_download_firmware(dhd_bus_t *pub, char *pfw_path, char *pnv_path) +{ + dhd_bus_t *dhd_bus = (dhd_bus_t *) pub; + int err = DBUS_OK; + + if (!dhd_bus) { + DBUSERR(("%s: dhd_bus is NULL\n", __FUNCTION__)); + return DBUS_ERR; + } + + DBUSTRACE(("%s: state %d\n", __FUNCTION__, dhd_bus->pub.busstate)); + + dhd_bus->pub.busstate = DBUS_STATE_DL_PENDING; +#ifdef EXTERNAL_FW_PATH + err = dbus_do_download(dhd_bus, pfw_path, pnv_path); +#else + err = dbus_do_download(dhd_bus); +#endif /* EXTERNAL_FW_PATH */ + if (err == DBUS_OK) { + dhd_bus->pub.busstate = DBUS_STATE_DL_DONE; + } else { + DBUSERR(("%s: download failed (%d)\n", __FUNCTION__, err)); + } + + return err; +} +#endif + +/** + * higher layer requests us to 'up' the interface to the dongle. Prerequisite is that firmware (not + * bootloader) must be active in the dongle. + */ +int +dbus_up(struct dhd_bus *pub) +{ + dhd_bus_t *dhd_bus = (dhd_bus_t *) pub; + int err = DBUS_OK; + + DBUSTRACE(("%s\n", __FUNCTION__)); + + if (dhd_bus == NULL) { + DBUSERR(("%s: dhd_bus is NULL\n", __FUNCTION__)); + return DBUS_ERR; + } + + if ((dhd_bus->pub.busstate == DBUS_STATE_DL_DONE) || + (dhd_bus->pub.busstate == DBUS_STATE_DOWN) || + (dhd_bus->pub.busstate == DBUS_STATE_SLEEP)) { + if (dhd_bus->drvintf && dhd_bus->drvintf->up) { + err = dhd_bus->drvintf->up(dhd_bus->bus_info); + + if (err == DBUS_OK) { + dbus_rxirbs_fill(dhd_bus); + } + } + } else + err = DBUS_ERR; + + return err; +} + +/** higher layer requests us to 'down' the interface to the dongle. */ +int +dbus_down(dbus_pub_t *pub) +{ + dhd_bus_t *dhd_bus = (dhd_bus_t *) pub; + + DBUSTRACE(("%s\n", __FUNCTION__)); + + if (dhd_bus == NULL) + return DBUS_ERR; + + dbus_tx_timer_stop(dhd_bus); + + if (dhd_bus->pub.busstate == DBUS_STATE_UP || + dhd_bus->pub.busstate == DBUS_STATE_SLEEP) { + if (dhd_bus->drvintf && dhd_bus->drvintf->down) + return dhd_bus->drvintf->down(dhd_bus->bus_info); + } + + return DBUS_ERR; +} + +int +dbus_shutdown(dbus_pub_t *pub) +{ + dhd_bus_t *dhd_bus = (dhd_bus_t *) pub; + + DBUSTRACE(("%s\n", __FUNCTION__)); + + if (dhd_bus == NULL) + return DBUS_ERR; + + if (dhd_bus->drvintf && dhd_bus->drvintf->shutdown) + return dhd_bus->drvintf->shutdown(dhd_bus->bus_info); + + return DBUS_OK; +} + +int +dbus_stop(struct dhd_bus *pub) +{ + dhd_bus_t *dhd_bus = (dhd_bus_t *) pub; + + DBUSTRACE(("%s\n", __FUNCTION__)); + + if (dhd_bus == NULL) + return DBUS_ERR; + + if (dhd_bus->pub.busstate == DBUS_STATE_UP || + dhd_bus->pub.busstate == DBUS_STATE_SLEEP) { + if (dhd_bus->drvintf && dhd_bus->drvintf->stop) + return dhd_bus->drvintf->stop(dhd_bus->bus_info); + } + + return DBUS_ERR; +} + +int dbus_send_txdata(dbus_pub_t *dbus, void *pktbuf) +{ + return dbus_send_pkt(dbus, pktbuf, pktbuf /* pktinfo */); +} + +int +dbus_send_buf(dbus_pub_t *pub, uint8 *buf, int len, void *info) +{ + return dbus_send_irb(pub, buf, len, NULL, info); +} + +int +dbus_send_pkt(dbus_pub_t *pub, void *pkt, void *info) +{ + return dbus_send_irb(pub, NULL, 0, pkt, info); +} + +int +dbus_send_ctl(struct dhd_bus *pub, uint8 *buf, int len) +{ + dhd_bus_t *dhd_bus = (dhd_bus_t *) pub; + + if (dhd_bus == NULL) { + DBUSERR(("%s: dhd_bus is NULL\n", __FUNCTION__)); + return DBUS_ERR; + } + + if (dhd_bus->pub.busstate == DBUS_STATE_UP || + dhd_bus->pub.busstate == DBUS_STATE_SLEEP) { + if (dhd_bus->drvintf && dhd_bus->drvintf->send_ctl) + return dhd_bus->drvintf->send_ctl(dhd_bus->bus_info, buf, len); + } else { + DBUSERR(("%s: bustate=%d\n", __FUNCTION__, dhd_bus->pub.busstate)); + } + + return DBUS_ERR; +} + +int +dbus_recv_ctl(struct dhd_bus *pub, uint8 *buf, int len) +{ + dhd_bus_t *dhd_bus = (dhd_bus_t *) pub; + + if ((dhd_bus == NULL) || (buf == NULL)) + return DBUS_ERR; + + if (dhd_bus->pub.busstate == DBUS_STATE_UP || + dhd_bus->pub.busstate == DBUS_STATE_SLEEP) { + if (dhd_bus->drvintf && dhd_bus->drvintf->recv_ctl) + return dhd_bus->drvintf->recv_ctl(dhd_bus->bus_info, buf, len); + } + + return DBUS_ERR; +} + +/** Only called via RPC (Dec 2012) */ +int +dbus_recv_bulk(dbus_pub_t *pub, uint32 ep_idx) +{ + dhd_bus_t *dhd_bus = (dhd_bus_t *) pub; + + dbus_irb_rx_t *rxirb; + struct exec_parms args; + int status; + + + if (dhd_bus == NULL) + return DBUS_ERR; + + args.qdeq.q = dhd_bus->rx_q; + if (dhd_bus->pub.busstate == DBUS_STATE_UP) { + if (dhd_bus->drvintf && dhd_bus->drvintf->recv_irb_from_ep) { + if ((rxirb = (EXEC_RXLOCK(dhd_bus, q_deq_exec, &args))) != NULL) { + status = dhd_bus->drvintf->recv_irb_from_ep(dhd_bus->bus_info, + rxirb, ep_idx); + if (status == DBUS_ERR_RXDROP) { + bzero(rxirb, sizeof(dbus_irb_rx_t)); + args.qenq.q = dhd_bus->rx_q; + args.qenq.b = (dbus_irb_t *) rxirb; + EXEC_RXLOCK(dhd_bus, q_enq_exec, &args); + } + } + } + } + + return DBUS_ERR; +} + +/** only called by dhd_cdc.c (Dec 2012) */ +int +dbus_poll_intr(dbus_pub_t *pub) +{ + dhd_bus_t *dhd_bus = (dhd_bus_t *) pub; + + int status = DBUS_ERR; + + if (dhd_bus == NULL) + return DBUS_ERR; + + if (dhd_bus->pub.busstate == DBUS_STATE_UP) { + if (dhd_bus->drvintf && dhd_bus->drvintf->recv_irb_from_ep) { + status = dhd_bus->drvintf->recv_irb_from_ep(dhd_bus->bus_info, + NULL, 0xff); + } + } + return status; +} + +/** called by nobody (Dec 2012) */ +void * +dbus_pktget(dbus_pub_t *pub, int len) +{ + dhd_bus_t *dhd_bus = (dhd_bus_t *) pub; + + if ((dhd_bus == NULL) || (len < 0)) + return NULL; + + return PKTGET(dhd_bus->pub.osh, len, TRUE); +} + +/** called by nobody (Dec 2012) */ +void +dbus_pktfree(dbus_pub_t *pub, void* pkt) +{ + dhd_bus_t *dhd_bus = (dhd_bus_t *) pub; + + if ((dhd_bus == NULL) || (pkt == NULL)) + return; + + PKTFREE(dhd_bus->pub.osh, pkt, TRUE); +} + +/** called by nobody (Dec 2012) */ +int +dbus_get_stats(dbus_pub_t *pub, dbus_stats_t *stats) +{ + dhd_bus_t *dhd_bus = (dhd_bus_t *) pub; + + if ((dhd_bus == NULL) || (stats == NULL)) + return DBUS_ERR; + + bcopy(&dhd_bus->pub.stats, stats, sizeof(dbus_stats_t)); + + return DBUS_OK; +} + +int +dbus_get_attrib(dhd_bus_t *pub, dbus_attrib_t *attrib) +{ + dhd_bus_t *dhd_bus = (dhd_bus_t *) pub; + int err = DBUS_ERR; + + if ((dhd_bus == NULL) || (attrib == NULL)) + return DBUS_ERR; + + if (dhd_bus->drvintf && dhd_bus->drvintf->get_attrib) { + err = dhd_bus->drvintf->get_attrib(dhd_bus->bus_info, + &dhd_bus->pub.attrib); + } + + bcopy(&dhd_bus->pub.attrib, attrib, sizeof(dbus_attrib_t)); + return err; +} + +int +dbus_get_device_speed(dbus_pub_t *pub) +{ + dhd_bus_t *dhd_bus = (dhd_bus_t *) pub; + + if (dhd_bus == NULL) + return INVALID_SPEED; + + return (dhd_bus->pub.device_speed); +} + +int +dbus_set_config(dbus_pub_t *pub, dbus_config_t *config) +{ + dhd_bus_t *dhd_bus = (dhd_bus_t *) pub; + int err = DBUS_ERR; + + if ((dhd_bus == NULL) || (config == NULL)) + return DBUS_ERR; + + if (dhd_bus->drvintf && dhd_bus->drvintf->set_config) { + err = dhd_bus->drvintf->set_config(dhd_bus->bus_info, + config); + + if ((config->config_id == DBUS_CONFIG_ID_AGGR_LIMIT) && + (!err) && + (dhd_bus->pub.busstate == DBUS_STATE_UP)) { + dbus_rxirbs_fill(dhd_bus); + } + } + + return err; +} + +int +dbus_get_config(dbus_pub_t *pub, dbus_config_t *config) +{ + dhd_bus_t *dhd_bus = (dhd_bus_t *) pub; + int err = DBUS_ERR; + + if ((dhd_bus == NULL) || (config == NULL)) + return DBUS_ERR; + + if (dhd_bus->drvintf && dhd_bus->drvintf->get_config) { + err = dhd_bus->drvintf->get_config(dhd_bus->bus_info, + config); + } + + return err; +} + +int +dbus_set_errmask(dbus_pub_t *pub, uint32 mask) +{ + dhd_bus_t *dhd_bus = (dhd_bus_t *) pub; + int err = DBUS_OK; + + if (dhd_bus == NULL) + return DBUS_ERR; + + dhd_bus->errmask = mask; + return err; +} + +int +dbus_pnp_resume(dbus_pub_t *pub, int *fw_reload) +{ + dhd_bus_t *dhd_bus = (dhd_bus_t *) pub; + int err = DBUS_ERR; + bool fwdl = FALSE; + + DBUSTRACE(("%s\n", __FUNCTION__)); + + if (dhd_bus == NULL) + return DBUS_ERR; + + if (dhd_bus->pub.busstate == DBUS_STATE_UP) { + return DBUS_OK; + } + + + + if (dhd_bus->drvintf->pnp) { + err = dhd_bus->drvintf->pnp(dhd_bus->bus_info, + DBUS_PNP_RESUME); + } + + if (dhd_bus->drvintf->recv_needed) { + if (dhd_bus->drvintf->recv_needed(dhd_bus->bus_info)) { + /* Refill after sleep/hibernate */ + dbus_rxirbs_fill(dhd_bus); + } + } + + + if (fw_reload) + *fw_reload = fwdl; + + return err; +} /* dbus_pnp_resume */ + +int +dbus_pnp_sleep(dbus_pub_t *pub) +{ + dhd_bus_t *dhd_bus = (dhd_bus_t *) pub; + int err = DBUS_ERR; + + DBUSTRACE(("%s\n", __FUNCTION__)); + + if (dhd_bus == NULL) + return DBUS_ERR; + + dbus_tx_timer_stop(dhd_bus); + + if (dhd_bus->drvintf && dhd_bus->drvintf->pnp) { + err = dhd_bus->drvintf->pnp(dhd_bus->bus_info, + DBUS_PNP_SLEEP); + } + + return err; +} + +int +dbus_pnp_disconnect(dbus_pub_t *pub) +{ + dhd_bus_t *dhd_bus = (dhd_bus_t *) pub; + int err = DBUS_ERR; + + DBUSTRACE(("%s\n", __FUNCTION__)); + + if (dhd_bus == NULL) + return DBUS_ERR; + + dbus_tx_timer_stop(dhd_bus); + + if (dhd_bus->drvintf && dhd_bus->drvintf->pnp) { + err = dhd_bus->drvintf->pnp(dhd_bus->bus_info, + DBUS_PNP_DISCONNECT); + } + + return err; +} + +int +dhd_bus_iovar_op(dhd_pub_t *dhdp, const char *name, + void *params, int plen, void *arg, int len, bool set) +{ + dhd_bus_t *dhd_bus = (dhd_bus_t *) dhdp->bus; + int err = DBUS_ERR; + + DBUSTRACE(("%s\n", __FUNCTION__)); + + if (dhd_bus == NULL) + return DBUS_ERR; + + if (dhd_bus->drvintf && dhd_bus->drvintf->iovar_op) { + err = dhd_bus->drvintf->iovar_op(dhd_bus->bus_info, + name, params, plen, arg, len, set); + } + + return err; +} + + +void * +dhd_dbus_txq(const dbus_pub_t *pub) +{ + return NULL; +} + +uint +dhd_dbus_hdrlen(const dbus_pub_t *pub) +{ + return 0; +} + +void * +dbus_get_devinfo(dbus_pub_t *pub) +{ + return pub->dev_info; +} + +#if defined(BCM_REQUEST_FW) && !defined(EXTERNAL_FW_PATH) +static int +dbus_otp(dhd_bus_t *dhd_bus, uint16 *boardtype, uint16 *boardrev) +{ + uint32 value = 0; + uint8 *cis; + uint16 *otpinfo; + uint32 i; + bool standard_cis = TRUE; + uint8 tup, tlen; + bool btype_present = FALSE; + bool brev_present = FALSE; + int ret; + int devid; + uint16 btype = 0; + uint16 brev = 0; + uint32 otp_size = 0, otp_addr = 0, otp_sw_rgn = 0; + + if (dhd_bus == NULL || dhd_bus->drvintf == NULL || + dhd_bus->drvintf->readreg == NULL) + return DBUS_ERR; + + devid = dhd_bus->pub.attrib.devid; + + if ((devid == BCM43234_CHIP_ID) || (devid == BCM43235_CHIP_ID) || + (devid == BCM43236_CHIP_ID)) { + + otp_size = BCM_OTP_SIZE_43236; + otp_sw_rgn = BCM_OTP_SW_RGN_43236; + otp_addr = BCM_OTP_ADDR_43236; + + } else { + return DBUS_ERR_NVRAM; + } + + cis = MALLOC(dhd_bus->pub.osh, otp_size * 2); + if (cis == NULL) + return DBUS_ERR; + + otpinfo = (uint16 *) cis; + + for (i = 0; i < otp_size; i++) { + + ret = dhd_bus->drvintf->readreg(dhd_bus->bus_info, + otp_addr + ((otp_sw_rgn + i) << 1), 2, &value); + + if (ret != DBUS_OK) { + MFREE(dhd_bus->pub.osh, cis, otp_size * 2); + return ret; + } + otpinfo[i] = (uint16) value; + } + + for (i = 0; i < (otp_size << 1); ) { + + if (standard_cis) { + tup = cis[i++]; + if (tup == CISTPL_NULL || tup == CISTPL_END) + tlen = 0; + else + tlen = cis[i++]; + } else { + if (cis[i] == CISTPL_NULL || cis[i] == CISTPL_END) { + tlen = 0; + tup = cis[i]; + } else { + tlen = cis[i]; + tup = CISTPL_BRCM_HNBU; + } + ++i; + } + + if (tup == CISTPL_END || (i + tlen) >= (otp_size << 1)) { + break; + } + + switch (tup) { + + case CISTPL_BRCM_HNBU: + + switch (cis[i]) { + + case HNBU_BOARDTYPE: + + btype = (uint16) ((cis[i + 2] << 8) + cis[i + 1]); + btype_present = TRUE; + DBUSTRACE(("%s: HNBU_BOARDTYPE = 0x%2x\n", __FUNCTION__, + (uint32)btype)); + break; + + case HNBU_BOARDREV: + + if (tlen == 2) + brev = (uint16) cis[i + 1]; + else + brev = (uint16) ((cis[i + 2] << 8) + cis[i + 1]); + brev_present = TRUE; + DBUSTRACE(("%s: HNBU_BOARDREV = 0x%2x\n", __FUNCTION__, + (uint32)*boardrev)); + break; + + case HNBU_HNBUCIS: + DBUSTRACE(("%s: HNBU_HNBUCIS\n", __FUNCTION__)); + tlen++; + standard_cis = FALSE; + break; + } + break; + } + + i += tlen; + } + + MFREE(dhd_bus->pub.osh, cis, otp_size * 2); + + if (btype_present == TRUE && brev_present == TRUE) { + *boardtype = btype; + *boardrev = brev; + DBUSERR(("otp boardtype = 0x%2x boardrev = 0x%2x\n", + *boardtype, *boardrev)); + + return DBUS_OK; + } + else + return DBUS_ERR; +} /* dbus_otp */ + +static int +dbus_select_nvram(dhd_bus_t *dhd_bus, int8 *jumbonvram, int jumbolen, +uint16 boardtype, uint16 boardrev, int8 **nvram, int *nvram_len) +{ + /* Multi board nvram file format is contenation of nvram info with \r + * The file format for two contatenated set is + * \nBroadcom Jumbo Nvram file\nfirst_set\nsecond_set\nthird_set\n + */ + uint8 *nvram_start = NULL, *nvram_end = NULL; + uint8 *nvram_start_prev = NULL, *nvram_end_prev = NULL; + uint16 btype = 0, brev = 0; + int len = 0; + char *field; + + *nvram = NULL; + *nvram_len = 0; + + if (strncmp(BCM_JUMBO_START, jumbonvram, strlen(BCM_JUMBO_START))) { + /* single nvram file in the native format */ + DBUSTRACE(("%s: Non-Jumbo NVRAM File \n", __FUNCTION__)); + *nvram = jumbonvram; + *nvram_len = jumbolen; + return DBUS_OK; + } else { + DBUSTRACE(("%s: Jumbo NVRAM File \n", __FUNCTION__)); + } + + /* sanity test the end of the config sets for proper ending */ + if (jumbonvram[jumbolen - 1] != BCM_JUMBO_NVRAM_DELIMIT || + jumbonvram[jumbolen - 2] != '\0') { + DBUSERR(("%s: Bad Jumbo NVRAM file format\n", __FUNCTION__)); + return DBUS_JUMBO_BAD_FORMAT; + } + + dhd_bus->nvram_nontxt = DBUS_NVRAM_NONTXT; + + nvram_start = jumbonvram; + + while (*nvram_start != BCM_JUMBO_NVRAM_DELIMIT && len < jumbolen) { + + /* consume the first file info line + * \nBroadcom Jumbo Nvram file\nfile1\n ... + */ + len ++; + nvram_start ++; + } + + nvram_end = nvram_start; + + /* search for "boardrev=0xabcd" and "boardtype=0x1234" information in + * the concatenated nvram config files /sets + */ + + while (len < jumbolen) { + + if (*nvram_end == '\0') { + /* end of a config set is marked by multiple null characters */ + len ++; + nvram_end ++; + DBUSTRACE(("%s: NULL chr len = %d char = 0x%x\n", __FUNCTION__, + len, *nvram_end)); + continue; + + } else if (*nvram_end == BCM_JUMBO_NVRAM_DELIMIT) { + + /* config set delimiter is reached */ + /* check if next config set is present or not + * return if next config is not present + */ + + /* start search the next config set */ + nvram_start_prev = nvram_start; + nvram_end_prev = nvram_end; + + nvram_end ++; + nvram_start = nvram_end; + btype = brev = 0; + DBUSTRACE(("%s: going to next record len = %d " + "char = 0x%x \n", __FUNCTION__, len, *nvram_end)); + len ++; + if (len >= jumbolen) { + + *nvram = nvram_start_prev; + *nvram_len = (int)(nvram_end_prev - nvram_start_prev); + + DBUSTRACE(("%s: no more len = %d nvram_end = 0x%p", + __FUNCTION__, len, nvram_end)); + + return DBUS_JUMBO_NOMATCH; + + } else { + continue; + } + + } else { + + DBUSTRACE(("%s: config str = %s\n", __FUNCTION__, nvram_end)); + + if (bcmp(nvram_end, "boardtype", strlen("boardtype")) == 0) { + + field = strchr(nvram_end, '='); + field++; + btype = (uint16)bcm_strtoul(field, NULL, 0); + + DBUSTRACE(("%s: btype = 0x%x boardtype = 0x%x \n", __FUNCTION__, + btype, boardtype)); + } + + if (bcmp(nvram_end, "boardrev", strlen("boardrev")) == 0) { + + field = strchr(nvram_end, '='); + field++; + brev = (uint16)bcm_strtoul(field, NULL, 0); + + DBUSTRACE(("%s: brev = 0x%x boardrev = 0x%x \n", __FUNCTION__, + brev, boardrev)); + } + if (btype == boardtype && brev == boardrev) { + /* locate nvram config set end - ie.find '\r' char */ + while (*nvram_end != BCM_JUMBO_NVRAM_DELIMIT) + nvram_end ++; + *nvram = nvram_start; + *nvram_len = (int) (nvram_end - nvram_start); + DBUSTRACE(("found len = %d nvram_start = 0x%p " + "nvram_end = 0x%p\n", *nvram_len, nvram_start, nvram_end)); + return DBUS_OK; + } + + len += (strlen(nvram_end) + 1); + nvram_end += (strlen(nvram_end) + 1); + } + } + return DBUS_JUMBO_NOMATCH; +} /* dbus_select_nvram */ + +#endif + +#define DBUS_NRXQ 50 +#define DBUS_NTXQ 100 + +static void +dhd_dbus_send_complete(void *handle, void *info, int status) +{ + dhd_pub_t *dhd = (dhd_pub_t *)handle; + void *pkt = info; + + if ((dhd == NULL) || (pkt == NULL)) { + DBUSERR(("dhd or pkt is NULL\n")); + return; + } + + if (status == DBUS_OK) { + dhd->dstats.tx_packets++; + } else { + DBUSERR(("TX error=%d\n", status)); + dhd->dstats.tx_errors++; + } +#ifdef PROP_TXSTATUS + if (DHD_PKTTAG_WLFCPKT(PKTTAG(pkt)) && + (dhd_wlfc_txcomplete(dhd, pkt, status == 0) != WLFC_UNSUPPORTED)) { + return; + } +#endif /* PROP_TXSTATUS */ + PKTFREE(dhd->osh, pkt, TRUE); +} + +static void +dhd_dbus_recv_pkt(void *handle, void *pkt) +{ + uchar reorder_info_buf[WLHOST_REORDERDATA_TOTLEN]; + uint reorder_info_len; + uint pkt_count; + dhd_pub_t *dhd = (dhd_pub_t *)handle; + int ifidx = 0; + + if (dhd == NULL) { + DBUSERR(("%s: dhd is NULL\n", __FUNCTION__)); + return; + } + + /* If the protocol uses a data header, check and remove it */ + if (dhd_prot_hdrpull(dhd, &ifidx, pkt, reorder_info_buf, + &reorder_info_len) != 0) { + DBUSERR(("rx protocol error\n")); + PKTFREE(dhd->osh, pkt, FALSE); + dhd->rx_errors++; + return; + } + + if (reorder_info_len) { + /* Reordering info from the firmware */ + dhd_process_pkt_reorder_info(dhd, reorder_info_buf, reorder_info_len, + &pkt, &pkt_count); + if (pkt_count == 0) + return; + } + else { + pkt_count = 1; + } + dhd_rx_frame(dhd, ifidx, pkt, pkt_count, 0); +} + +static void +dhd_dbus_recv_buf(void *handle, uint8 *buf, int len) +{ + dhd_pub_t *dhd = (dhd_pub_t *)handle; + void *pkt; + + if (dhd == NULL) { + DBUSERR(("%s: dhd is NULL\n", __FUNCTION__)); + return; + } + + if ((pkt = PKTGET(dhd->osh, len, FALSE)) == NULL) { + DBUSERR(("PKTGET (rx) failed=%d\n", len)); + return; + } + + bcopy(buf, PKTDATA(dhd->osh, pkt), len); + dhd_dbus_recv_pkt(dhd, pkt); +} + +static void +dhd_dbus_txflowcontrol(void *handle, bool onoff) +{ + dhd_pub_t *dhd = (dhd_pub_t *)handle; + bool wlfc_enabled = FALSE; + + if (dhd == NULL) { + DBUSERR(("%s: dhd is NULL\n", __FUNCTION__)); + return; + } + +#ifdef PROP_TXSTATUS + wlfc_enabled = (dhd_wlfc_flowcontrol(dhd, onoff, !onoff) != WLFC_UNSUPPORTED); +#endif + + if (!wlfc_enabled) { + dhd_txflowcontrol(dhd, ALL_INTERFACES, onoff); + } +} + +static void +dhd_dbus_errhandler(void *handle, int err) +{ +} + +static void +dhd_dbus_ctl_complete(void *handle, int type, int status) +{ + dhd_pub_t *dhd = (dhd_pub_t *)handle; + + if (dhd == NULL) { + DBUSERR(("%s: dhd is NULL\n", __FUNCTION__)); + return; + } + + if (type == DBUS_CBCTL_READ) { + if (status == DBUS_OK) + dhd->rx_ctlpkts++; + else + dhd->rx_ctlerrs++; + } else if (type == DBUS_CBCTL_WRITE) { + if (status == DBUS_OK) + dhd->tx_ctlpkts++; + else + dhd->tx_ctlerrs++; + } + + dhd_prot_ctl_complete(dhd); +} + +static void +dhd_dbus_state_change(void *handle, int state) +{ + dhd_pub_t *dhd = (dhd_pub_t *)handle; + + if (dhd == NULL) { + DBUSERR(("%s: dhd is NULL\n", __FUNCTION__)); + return; + } + + switch (state) { + + case DBUS_STATE_DL_NEEDED: + DBUSERR(("%s: firmware request cannot be handled\n", __FUNCTION__)); + break; + case DBUS_STATE_DOWN: + DBUSTRACE(("%s: DBUS is down\n", __FUNCTION__)); + dhd->busstate = DHD_BUS_DOWN; + break; + case DBUS_STATE_UP: + DBUSTRACE(("%s: DBUS is up\n", __FUNCTION__)); + dhd->busstate = DHD_BUS_DATA; + break; + default: + break; + } + + DBUSERR(("%s: DBUS current state=%d\n", __FUNCTION__, state)); +} + +static void * +dhd_dbus_pktget(void *handle, uint len, bool send) +{ + dhd_pub_t *dhd = (dhd_pub_t *)handle; + void *p = NULL; + + if (dhd == NULL) { + DBUSERR(("%s: dhd is NULL\n", __FUNCTION__)); + return NULL; + } + + if (send == TRUE) { + dhd_os_sdlock_txq(dhd); + p = PKTGET(dhd->osh, len, TRUE); + dhd_os_sdunlock_txq(dhd); + } else { + dhd_os_sdlock_rxq(dhd); + p = PKTGET(dhd->osh, len, FALSE); + dhd_os_sdunlock_rxq(dhd); + } + + return p; +} + +static void +dhd_dbus_pktfree(void *handle, void *p, bool send) +{ + dhd_pub_t *dhd = (dhd_pub_t *)handle; + + if (dhd == NULL) { + DBUSERR(("%s: dhd is NULL\n", __FUNCTION__)); + return; + } + + if (send == TRUE) { +#ifdef PROP_TXSTATUS + if (DHD_PKTTAG_WLFCPKT(PKTTAG(p)) && + (dhd_wlfc_txcomplete(dhd, p, FALSE) != WLFC_UNSUPPORTED)) { + return; + } +#endif /* PROP_TXSTATUS */ + + dhd_os_sdlock_txq(dhd); + PKTFREE(dhd->osh, p, TRUE); + dhd_os_sdunlock_txq(dhd); + } else { + dhd_os_sdlock_rxq(dhd); + PKTFREE(dhd->osh, p, FALSE); + dhd_os_sdunlock_rxq(dhd); + } +} + + +static dbus_callbacks_t dhd_dbus_cbs = { + dhd_dbus_send_complete, + dhd_dbus_recv_buf, + dhd_dbus_recv_pkt, + dhd_dbus_txflowcontrol, + dhd_dbus_errhandler, + dhd_dbus_ctl_complete, + dhd_dbus_state_change, + dhd_dbus_pktget, + dhd_dbus_pktfree +}; + +uint +dhd_bus_chip(struct dhd_bus *bus) +{ + ASSERT(bus != NULL); + return bus->pub.attrib.devid; +} + +uint +dhd_bus_chiprev(struct dhd_bus *bus) +{ + ASSERT(bus); + ASSERT(bus != NULL); + return bus->pub.attrib.chiprev; +} + +void +dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf) +{ + bcm_bprintf(strbuf, "Bus USB\n"); +} + +void +dhd_bus_clearcounts(dhd_pub_t *dhdp) +{ +} + +int +dhd_bus_txdata(struct dhd_bus *bus, void *pktbuf) +{ + DBUSTRACE(("%s\n", __FUNCTION__)); + if (bus->txoff) { + DBUSTRACE(("txoff\n")); + return BCME_EPERM; + } + return dbus_send_txdata(&bus->pub, pktbuf); +} + +static void +dhd_dbus_advertise_bus_cleanup(dhd_pub_t *dhdp) +{ + unsigned long flags; + int timeleft; + + DHD_LINUX_GENERAL_LOCK(dhdp, flags); + dhdp->busstate = DHD_BUS_DOWN_IN_PROGRESS; + DHD_LINUX_GENERAL_UNLOCK(dhdp, flags); + + timeleft = dhd_os_busbusy_wait_negation(dhdp, &dhdp->dhd_bus_busy_state); + if ((timeleft == 0) || (timeleft == 1)) { + DBUSERR(("%s : Timeout due to dhd_bus_busy_state=0x%x\n", + __FUNCTION__, dhdp->dhd_bus_busy_state)); + ASSERT(0); + } + + return; +} + +static void +dhd_dbus_advertise_bus_remove(dhd_pub_t *dhdp) +{ + unsigned long flags; + int timeleft; + + DHD_LINUX_GENERAL_LOCK(dhdp, flags); + dhdp->busstate = DHD_BUS_REMOVE; + DHD_LINUX_GENERAL_UNLOCK(dhdp, flags); + + timeleft = dhd_os_busbusy_wait_negation(dhdp, &dhdp->dhd_bus_busy_state); + if ((timeleft == 0) || (timeleft == 1)) { + DBUSERR(("%s : Timeout due to dhd_bus_busy_state=0x%x\n", + __FUNCTION__, dhdp->dhd_bus_busy_state)); + ASSERT(0); + } + + return; +} + +int +dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag) +{ + int bcmerror = 0; + unsigned long flags; + wifi_adapter_info_t *adapter = (wifi_adapter_info_t *)dhdp->adapter; + + if (flag == TRUE) { + if (!dhdp->dongle_reset) { + DBUSERR(("%s: == Power OFF ==\n", __FUNCTION__)); + dhd_dbus_advertise_bus_cleanup(dhdp); + dhd_os_wd_timer(dhdp, 0); +#if !defined(IGNORE_ETH0_DOWN) + /* Force flow control as protection when stop come before ifconfig_down */ + dhd_txflowcontrol(dhdp, ALL_INTERFACES, ON); +#endif /* !defined(IGNORE_ETH0_DOWN) */ + dbus_stop(dhdp->bus); + + dhdp->dongle_reset = TRUE; + dhdp->up = FALSE; + + DHD_LINUX_GENERAL_LOCK(dhdp, flags); + dhdp->busstate = DHD_BUS_DOWN; + DHD_LINUX_GENERAL_UNLOCK(dhdp, flags); + wifi_clr_adapter_status(adapter, WIFI_STATUS_FW_READY); + + printf("%s: WLAN OFF DONE\n", __FUNCTION__); + /* App can now remove power from device */ + } else + bcmerror = BCME_ERROR; + } else { + /* App must have restored power to device before calling */ + printf("\n\n%s: == WLAN ON ==\n", __FUNCTION__); + if (dhdp->dongle_reset) { + /* Turn on WLAN */ + DHD_MUTEX_UNLOCK(); + wait_event_interruptible_timeout(adapter->status_event, + wifi_get_adapter_status(adapter, WIFI_STATUS_FW_READY), + msecs_to_jiffies(DHD_FW_READY_TIMEOUT)); + DHD_MUTEX_LOCK(); + bcmerror = dbus_up(dhdp->bus); + if (bcmerror == BCME_OK) { + dhdp->dongle_reset = FALSE; + dhdp->up = TRUE; +#if !defined(IGNORE_ETH0_DOWN) + /* Restore flow control */ + dhd_txflowcontrol(dhdp, ALL_INTERFACES, OFF); +#endif + dhd_os_wd_timer(dhdp, dhd_watchdog_ms); + + DBUSTRACE(("%s: WLAN ON DONE\n", __FUNCTION__)); + } else { + DBUSERR(("%s: failed to dbus_up with code %d\n", __FUNCTION__, bcmerror)); + } + } + } + +#ifdef PKT_STATICS + memset((uint8*) &tx_statics, 0, sizeof(pkt_statics_t)); +#endif + return bcmerror; +} + +void +dhd_set_path_params(struct dhd_bus *bus) +{ + /* External conf takes precedence if specified */ + dhd_conf_preinit(bus->dhd); + + if (bus->dhd->conf_path[0] == '\0') { + dhd_conf_set_path(bus->dhd, "config.txt", bus->dhd->conf_path, bus->nv_path); + } + if (bus->dhd->clm_path[0] == '\0') { + dhd_conf_set_path(bus->dhd, "clm.blob", bus->dhd->clm_path, bus->fw_path); + } +#ifdef CONFIG_PATH_AUTO_SELECT + dhd_conf_set_conf_name_by_chip(bus->dhd, bus->dhd->conf_path); +#endif + + dhd_conf_read_config(bus->dhd, bus->dhd->conf_path); + + dhd_conf_set_fw_name_by_chip(bus->dhd, bus->fw_path); + dhd_conf_set_nv_name_by_chip(bus->dhd, bus->nv_path); + dhd_conf_set_clm_name_by_chip(bus->dhd, bus->dhd->clm_path); + + printf("Final fw_path=%s\n", bus->fw_path); + printf("Final nv_path=%s\n", bus->nv_path); + printf("Final clm_path=%s\n", bus->dhd->clm_path); + printf("Final conf_path=%s\n", bus->dhd->conf_path); + +} + +void +dhd_bus_update_fw_nv_path(struct dhd_bus *bus, char *pfw_path, + char *pnv_path, char *pclm_path, char *pconf_path) +{ + DBUSTRACE(("%s\n", __FUNCTION__)); + + if (bus == NULL) { + DBUSERR(("%s: bus is NULL\n", __FUNCTION__)); + return; + } + + bus->fw_path = pfw_path; + bus->nv_path = pnv_path; + bus->dhd->clm_path = pclm_path; + bus->dhd->conf_path = pconf_path; + + dhd_set_path_params(bus); + +} + +/* + * hdrlen is space to reserve in pkt headroom for DBUS + */ +void * +dhd_dbus_probe_cb(void *arg, const char *desc, uint32 bustype, + uint16 bus_no, uint16 slot, uint32 hdrlen) +{ + osl_t *osh = NULL; + dhd_bus_t *bus = NULL; + dhd_pub_t *pub = NULL; + uint rxsz; + int dlneeded = 0; + wifi_adapter_info_t *adapter = NULL; + + DBUSTRACE(("%s: Enter\n", __FUNCTION__)); + + adapter = dhd_wifi_platform_get_adapter(bustype, bus_no, slot); + + if (!g_pub) { + /* Ask the OS interface part for an OSL handle */ + if (!(osh = osl_attach(NULL, bustype, TRUE))) { + DBUSERR(("%s: OSL attach failed\n", __FUNCTION__)); + goto fail; + } + + /* Attach to the dhd/OS interface */ + if (!(pub = dhd_attach(osh, bus, hdrlen, adapter))) { + DBUSERR(("%s: dhd_attach failed\n", __FUNCTION__)); + goto fail; + } + } else { + pub = g_pub; + osh = pub->osh; + } + + if (pub->bus) { + DBUSERR(("%s: wrong probe\n", __FUNCTION__)); + goto fail; + } + + rxsz = dhd_get_rxsz(pub); + bus = dbus_attach(osh, rxsz, DBUS_NRXQ, DBUS_NTXQ, pub, &dhd_dbus_cbs, NULL, NULL); + if (bus) { + pub->bus = bus; + bus->dhd = pub; + + dlneeded = dbus_dlneeded(bus); + if (dlneeded >= 0) { + if (!g_pub) { + dhd_conf_reset(pub); + dhd_conf_set_chiprev(pub, bus->pub.attrib.devid, bus->pub.attrib.chiprev); + dhd_conf_preinit(pub); + } + } + + if (g_pub || dhd_download_fw_on_driverload) { + if (dlneeded == 0) { + wifi_set_adapter_status(adapter, WIFI_STATUS_FW_READY); +#ifdef BCM_REQUEST_FW + } else if (dlneeded > 0) { + dhd_set_path(bus->dhd); + if (dbus_download_firmware(bus, bus->fw_path, bus->nv_path) != DBUS_OK) + goto fail; +#endif + } + } + } else { + DBUSERR(("%s: dbus_attach failed\n", __FUNCTION__)); + } + + if (!g_pub) { + /* Ok, finish the attach to the OS network interface */ + if (dhd_register_if(pub, 0, TRUE) != 0) { + DBUSERR(("%s: dhd_register_if failed\n", __FUNCTION__)); + goto fail; + } + pub->hang_report = TRUE; +#if defined(MULTIPLE_SUPPLICANT) + wl_android_post_init(); // terence 20120530: fix critical section in dhd_open and dhdsdio_probe +#endif + g_pub = pub; + } + + DBUSTRACE(("%s: Exit\n", __FUNCTION__)); + wifi_clr_adapter_status(adapter, WIFI_STATUS_DETTACH); + wifi_set_adapter_status(adapter, WIFI_STATUS_ATTACH); + wake_up_interruptible(&adapter->status_event); + /* This is passed to dhd_dbus_disconnect_cb */ + return bus; + +fail: + if (pub && pub->bus) { + dbus_detach(pub->bus); + pub->bus = NULL; + } + /* Release resources in reverse order */ + if (!g_pub) { + if (pub) { + dhd_detach(pub); + dhd_free(pub); + } + if (osh) { + osl_detach(osh); + } + } + + printf("%s: Failed\n", __FUNCTION__); + return NULL; +} + +void +dhd_dbus_disconnect_cb(void *arg) +{ + dhd_bus_t *bus = (dhd_bus_t *)arg; + dhd_pub_t *pub = g_pub; + osl_t *osh; + wifi_adapter_info_t *adapter = NULL; + + adapter = (wifi_adapter_info_t *)pub->adapter; + + if (pub && !pub->dhd_remove && bus == NULL) { + DBUSERR(("%s: bus is NULL\n", __FUNCTION__)); + return; + } + if (!adapter) { + DBUSERR(("%s: adapter is NULL\n", __FUNCTION__)); + return; + } + + printf("%s: Enter dhd_remove=%d on %s\n", __FUNCTION__, + pub->dhd_remove, adapter->name); + if (!pub->dhd_remove) { + /* Advertise bus remove during rmmod */ + dhd_dbus_advertise_bus_remove(bus->dhd); + dbus_detach(pub->bus); + pub->bus = NULL; + wifi_clr_adapter_status(adapter, WIFI_STATUS_ATTACH); + wifi_set_adapter_status(adapter, WIFI_STATUS_DETTACH); + wake_up_interruptible(&adapter->status_event); + } else { + osh = pub->osh; + dhd_detach(pub); + if (pub->bus) { + dbus_detach(pub->bus); + pub->bus = NULL; + } + dhd_free(pub); + g_pub = NULL; + if (MALLOCED(osh)) { + DBUSERR(("%s: MEMORY LEAK %d bytes\n", __FUNCTION__, MALLOCED(osh))); + } + osl_detach(osh); + } + + DBUSTRACE(("%s: Exit\n", __FUNCTION__)); +} + +#ifdef LINUX_EXTERNAL_MODULE_DBUS + +static int __init +bcm_dbus_module_init(void) +{ + printf("Inserting bcm_dbus module \n"); + return 0; +} + +static void __exit +bcm_dbus_module_exit(void) +{ + printf("Removing bcm_dbus module \n"); + return; +} + +EXPORT_SYMBOL(dbus_pnp_sleep); +EXPORT_SYMBOL(dbus_get_devinfo); +EXPORT_SYMBOL(dbus_detach); +EXPORT_SYMBOL(dbus_get_attrib); +EXPORT_SYMBOL(dbus_down); +EXPORT_SYMBOL(dbus_pnp_resume); +EXPORT_SYMBOL(dbus_set_config); +EXPORT_SYMBOL(dbus_flowctrl_rx); +EXPORT_SYMBOL(dbus_up); +EXPORT_SYMBOL(dbus_get_device_speed); +EXPORT_SYMBOL(dbus_send_pkt); +EXPORT_SYMBOL(dbus_recv_ctl); +EXPORT_SYMBOL(dbus_attach); + +MODULE_LICENSE("GPL"); + +module_init(bcm_dbus_module_init); +module_exit(bcm_dbus_module_exit); + +#endif /* #ifdef LINUX_EXTERNAL_MODULE_DBUS */ diff --git a/bcmdhd.100.10.315.x/dbus_usb.c b/bcmdhd.100.10.315.x/dbus_usb.c new file mode 100644 index 0000000..8a496dd --- /dev/null +++ b/bcmdhd.100.10.315.x/dbus_usb.c @@ -0,0 +1,1172 @@ +/* + * Dongle BUS interface for USB, OS independent + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dbus_usb.c 565557 2015-06-22 19:29:44Z $ + */ + +/** + * @file @brief + * This file contains DBUS code that is USB, but not OS specific. DBUS is a Broadcom proprietary + * host specific abstraction layer. + */ + +#include +#include +#include +#include +#include +#include +#include + +uint dbus_msglevel = DBUS_ERROR_VAL; +module_param(dbus_msglevel, int, 0); + + +#define USB_DLIMAGE_RETRY_TIMEOUT 3000 /* retry Timeout */ +#define USB_SFLASH_DLIMAGE_SPINWAIT 150 /* in unit of ms */ +#define USB_SFLASH_DLIMAGE_LIMIT 2000 /* spinwait limit (ms) */ +#define POSTBOOT_ID 0xA123 /* ID to detect if dongle has boot up */ +#define USB_RESETCFG_SPINWAIT 1 /* wait after resetcfg (ms) */ +#define USB_DEV_ISBAD(u) (u->pub->attrib.devid == 0xDEAD) +#define USB_DLGO_SPINWAIT 100 /* wait after DL_GO (ms) */ +#define TEST_CHIP 0x4328 + +typedef struct { + dbus_pub_t *pub; + + void *cbarg; + dbus_intf_callbacks_t *cbs; /** callbacks into higher DBUS level (dbus.c) */ + dbus_intf_t *drvintf; + void *usbosl_info; + uint32 rdlram_base_addr; + uint32 rdlram_size; +} usb_info_t; + +/* + * Callbacks common to all USB + */ +static void dbus_usb_disconnect(void *handle); +static void dbus_usb_send_irb_timeout(void *handle, dbus_irb_tx_t *txirb); +static void dbus_usb_send_irb_complete(void *handle, dbus_irb_tx_t *txirb, int status); +static void dbus_usb_recv_irb_complete(void *handle, dbus_irb_rx_t *rxirb, int status); +static void dbus_usb_errhandler(void *handle, int err); +static void dbus_usb_ctl_complete(void *handle, int type, int status); +static void dbus_usb_state_change(void *handle, int state); +static struct dbus_irb* dbus_usb_getirb(void *handle, bool send); +static void dbus_usb_rxerr_indicate(void *handle, bool on); +#if !defined(BCM_REQUEST_FW) +static int dbus_usb_resetcfg(usb_info_t *usbinfo); +#endif +static int dbus_usb_iovar_op(void *bus, const char *name, + void *params, int plen, void *arg, int len, bool set); +static int dbus_iovar_process(usb_info_t* usbinfo, const char *name, + void *params, int plen, void *arg, int len, bool set); +static int dbus_usb_doiovar(usb_info_t *bus, const bcm_iovar_t *vi, uint32 actionid, + const char *name, void *params, int plen, void *arg, int len, int val_size); +static int dhdusb_downloadvars(usb_info_t *bus, void *arg, int len); + +static int dbus_usb_dl_writeimage(usb_info_t *usbinfo, uint8 *fw, int fwlen); +static int dbus_usb_dlstart(void *bus, uint8 *fw, int len); +static int dbus_usb_dlneeded(void *bus); +static int dbus_usb_dlrun(void *bus); +static int dbus_usb_rdl_dwnld_state(usb_info_t *usbinfo); + + +/* OS specific */ +extern bool dbus_usbos_dl_cmd(void *info, uint8 cmd, void *buffer, int buflen); +extern int dbus_usbos_wait(void *info, uint16 ms); +extern int dbus_write_membytes(usb_info_t *usbinfo, bool set, uint32 address, + uint8 *data, uint size); +extern bool dbus_usbos_dl_send_bulk(void *info, void *buffer, int len); +extern int dbus_usbos_loopback_tx(void *usbos_info_ptr, int cnt, int size); + +/** + * These functions are called by the lower DBUS level (dbus_usb_os.c) to notify this DBUS level + * (dbus_usb.c) of an event. + */ +static dbus_intf_callbacks_t dbus_usb_intf_cbs = { + dbus_usb_send_irb_timeout, + dbus_usb_send_irb_complete, + dbus_usb_recv_irb_complete, + dbus_usb_errhandler, + dbus_usb_ctl_complete, + dbus_usb_state_change, + NULL, /* isr */ + NULL, /* dpc */ + NULL, /* watchdog */ + NULL, /* dbus_if_pktget */ + NULL, /* dbus_if_pktfree */ + dbus_usb_getirb, + dbus_usb_rxerr_indicate +}; + +/* IOVar table */ +enum { + IOV_SET_DOWNLOAD_STATE = 1, + IOV_DBUS_MSGLEVEL, + IOV_MEMBYTES, + IOV_VARS, + IOV_LOOPBACK_TX +}; + +const bcm_iovar_t dhdusb_iovars[] = { + {"vars", IOV_VARS, 0, IOVT_BUFFER, 0 }, + {"dbus_msglevel", IOV_DBUS_MSGLEVEL, 0, IOVT_UINT32, 0 }, + {"dwnldstate", IOV_SET_DOWNLOAD_STATE, 0, IOVT_BOOL, 0 }, + {"membytes", IOV_MEMBYTES, 0, IOVT_BUFFER, 2 * sizeof(int) }, + {"usb_lb_txfer", IOV_LOOPBACK_TX, 0, IOVT_BUFFER, 2 * sizeof(int) }, + {NULL, 0, 0, 0, 0 } +}; + +/* + * Need global for probe() and disconnect() since + * attach() is not called at probe and detach() + * can be called inside disconnect() + */ +static probe_cb_t probe_cb = NULL; +static disconnect_cb_t disconnect_cb = NULL; +static void *probe_arg = NULL; +static void *disc_arg = NULL; +static dbus_intf_t *g_dbusintf = NULL; +static dbus_intf_t dbus_usb_intf; /** functions called by higher layer DBUS into lower layer */ + +/* + * dbus_intf_t common to all USB + * These functions override dbus_usb_.c. + */ +static void *dbus_usb_attach(dbus_pub_t *pub, void *cbarg, dbus_intf_callbacks_t *cbs); +static void dbus_usb_detach(dbus_pub_t *pub, void *info); +static void * dbus_usb_probe(void *arg, const char *desc, uint32 bustype, + uint16 bus_no, uint16 slot, uint32 hdrlen); + +/* functions */ + +/** + * As part of DBUS initialization/registration, the higher level DBUS (dbus.c) needs to know what + * lower level DBUS functions to call (in both dbus_usb.c and dbus_usb_os.c). + */ +static void * +dbus_usb_probe(void *arg, const char *desc, uint32 bustype, uint16 bus_no, + uint16 slot, uint32 hdrlen) +{ + DBUSTRACE(("%s(): \n", __FUNCTION__)); + if (probe_cb) { + + if (g_dbusintf != NULL) { + /* First, initialize all lower-level functions as default + * so that dbus.c simply calls directly to dbus_usb_os.c. + */ + bcopy(g_dbusintf, &dbus_usb_intf, sizeof(dbus_intf_t)); + + /* Second, selectively override functions we need, if any. */ + dbus_usb_intf.attach = dbus_usb_attach; + dbus_usb_intf.detach = dbus_usb_detach; + dbus_usb_intf.iovar_op = dbus_usb_iovar_op; + dbus_usb_intf.dlstart = dbus_usb_dlstart; + dbus_usb_intf.dlneeded = dbus_usb_dlneeded; + dbus_usb_intf.dlrun = dbus_usb_dlrun; + } + + disc_arg = probe_cb(probe_arg, "DBUS USB", USB_BUS, bus_no, slot, hdrlen); + return disc_arg; + } + + return NULL; +} + +/** + * On return, *intf contains this or lower-level DBUS functions to be called by higher + * level (dbus.c) + */ +int +dbus_bus_register(int vid, int pid, probe_cb_t prcb, + disconnect_cb_t discb, void *prarg, dbus_intf_t **intf, void *param1, void *param2) +{ + int err; + + DBUSTRACE(("%s(): \n", __FUNCTION__)); + probe_cb = prcb; + disconnect_cb = discb; + probe_arg = prarg; + + *intf = &dbus_usb_intf; + + err = dbus_bus_osl_register(vid, pid, dbus_usb_probe, + dbus_usb_disconnect, NULL, &g_dbusintf, param1, param2); + + ASSERT(g_dbusintf); + return err; +} + +int +dbus_bus_deregister() +{ + DBUSTRACE(("%s(): \n", __FUNCTION__)); + return dbus_bus_osl_deregister(); +} + +/** initialization consists of registration followed by 'attach'. */ +void * +dbus_usb_attach(dbus_pub_t *pub, void *cbarg, dbus_intf_callbacks_t *cbs) +{ + usb_info_t *usb_info; + + DBUSTRACE(("%s(): \n", __FUNCTION__)); + + if ((g_dbusintf == NULL) || (g_dbusintf->attach == NULL)) + return NULL; + + /* Sanity check for BUS_INFO() */ + ASSERT(OFFSETOF(usb_info_t, pub) == 0); + + usb_info = MALLOC(pub->osh, sizeof(usb_info_t)); + if (usb_info == NULL) + return NULL; + + bzero(usb_info, sizeof(usb_info_t)); + + usb_info->pub = pub; + usb_info->cbarg = cbarg; + usb_info->cbs = cbs; + + usb_info->usbosl_info = (dbus_pub_t *)g_dbusintf->attach(pub, + usb_info, &dbus_usb_intf_cbs); + if (usb_info->usbosl_info == NULL) { + MFREE(pub->osh, usb_info, sizeof(usb_info_t)); + return NULL; + } + + /* Save USB OS-specific driver entry points */ + usb_info->drvintf = g_dbusintf; + + pub->bus = usb_info; +#if !defined(BCM_REQUEST_FW) + if (!dbus_usb_resetcfg(usb_info)) { + usb_info->pub->busstate = DBUS_STATE_DL_DONE; + } +#endif + /* Return Lower layer info */ + return (void *) usb_info->usbosl_info; +} + +void +dbus_usb_detach(dbus_pub_t *pub, void *info) +{ + usb_info_t *usb_info = (usb_info_t *) pub->bus; + osl_t *osh = pub->osh; + + if (usb_info == NULL) + return; + + if (usb_info->drvintf && usb_info->drvintf->detach) + usb_info->drvintf->detach(pub, usb_info->usbosl_info); + + MFREE(osh, usb_info, sizeof(usb_info_t)); +} + +void +dbus_usb_disconnect(void *handle) +{ + DBUSTRACE(("%s(): \n", __FUNCTION__)); + if (disconnect_cb) + disconnect_cb(disc_arg); +} + +/** + * When the lower DBUS level (dbus_usb_os.c) signals this event, the higher DBUS level has to be + * notified. + */ +static void +dbus_usb_send_irb_timeout(void *handle, dbus_irb_tx_t *txirb) +{ + usb_info_t *usb_info = (usb_info_t *) handle; + + DBUSTRACE(("%s\n", __FUNCTION__)); + + if (usb_info == NULL) + return; + + if (usb_info->cbs && usb_info->cbs->send_irb_timeout) + usb_info->cbs->send_irb_timeout(usb_info->cbarg, txirb); +} + +/** + * When the lower DBUS level (dbus_usb_os.c) signals this event, the higher DBUS level has to be + * notified. + */ +static void +dbus_usb_send_irb_complete(void *handle, dbus_irb_tx_t *txirb, int status) +{ + usb_info_t *usb_info = (usb_info_t *) handle; + + if (usb_info == NULL) + return; + + if (usb_info->cbs && usb_info->cbs->send_irb_complete) + usb_info->cbs->send_irb_complete(usb_info->cbarg, txirb, status); +} + +/** + * When the lower DBUS level (dbus_usb_os.c) signals this event, the higher DBUS level has to be + * notified. + */ +static void +dbus_usb_recv_irb_complete(void *handle, dbus_irb_rx_t *rxirb, int status) +{ + usb_info_t *usb_info = (usb_info_t *) handle; + + if (usb_info == NULL) + return; + + if (usb_info->cbs && usb_info->cbs->recv_irb_complete) + usb_info->cbs->recv_irb_complete(usb_info->cbarg, rxirb, status); +} + +/** Lower DBUS level (dbus_usb_os.c) requests a free IRB. Pass this on to the higher DBUS level. */ +static struct dbus_irb* +dbus_usb_getirb(void *handle, bool send) +{ + usb_info_t *usb_info = (usb_info_t *) handle; + + if (usb_info == NULL) + return NULL; + + if (usb_info->cbs && usb_info->cbs->getirb) + return usb_info->cbs->getirb(usb_info->cbarg, send); + + return NULL; +} + +/** + * When the lower DBUS level (dbus_usb_os.c) signals this event, the higher DBUS level has to be + * notified. + */ +static void +dbus_usb_rxerr_indicate(void *handle, bool on) +{ + usb_info_t *usb_info = (usb_info_t *) handle; + + if (usb_info == NULL) + return; + + if (usb_info->cbs && usb_info->cbs->rxerr_indicate) + usb_info->cbs->rxerr_indicate(usb_info->cbarg, on); +} + +/** + * When the lower DBUS level (dbus_usb_os.c) signals this event, the higher DBUS level has to be + * notified. + */ +static void +dbus_usb_errhandler(void *handle, int err) +{ + usb_info_t *usb_info = (usb_info_t *) handle; + + if (usb_info == NULL) + return; + + if (usb_info->cbs && usb_info->cbs->errhandler) + usb_info->cbs->errhandler(usb_info->cbarg, err); +} + +/** + * When the lower DBUS level (dbus_usb_os.c) signals this event, the higher DBUS level has to be + * notified. + */ +static void +dbus_usb_ctl_complete(void *handle, int type, int status) +{ + usb_info_t *usb_info = (usb_info_t *) handle; + + DBUSTRACE(("%s\n", __FUNCTION__)); + + if (usb_info == NULL) { + DBUSERR(("%s: usb_info is NULL\n", __FUNCTION__)); + return; + } + + if (usb_info->cbs && usb_info->cbs->ctl_complete) + usb_info->cbs->ctl_complete(usb_info->cbarg, type, status); +} + +/** + * When the lower DBUS level (dbus_usb_os.c) signals this event, the higher DBUS level has to be + * notified. + */ +static void +dbus_usb_state_change(void *handle, int state) +{ + usb_info_t *usb_info = (usb_info_t *) handle; + + if (usb_info == NULL) + return; + + if (usb_info->cbs && usb_info->cbs->state_change) + usb_info->cbs->state_change(usb_info->cbarg, state); +} + +/** called by higher DBUS level (dbus.c) */ +static int +dbus_usb_iovar_op(void *bus, const char *name, + void *params, int plen, void *arg, int len, bool set) +{ + int err = DBUS_OK; + + err = dbus_iovar_process((usb_info_t*)bus, name, params, plen, arg, len, set); + return err; +} + +/** process iovar request from higher DBUS level */ +static int +dbus_iovar_process(usb_info_t* usbinfo, const char *name, + void *params, int plen, void *arg, int len, bool set) +{ + const bcm_iovar_t *vi = NULL; + int bcmerror = 0; + int val_size; + uint32 actionid; + + DBUSTRACE(("%s: Enter\n", __FUNCTION__)); + + ASSERT(name); + ASSERT(len >= 0); + + /* Get MUST have return space */ + ASSERT(set || (arg && len)); + + /* Set does NOT take qualifiers */ + ASSERT(!set || (!params && !plen)); + + /* Look up var locally; if not found pass to host driver */ + if ((vi = bcm_iovar_lookup(dhdusb_iovars, name)) == NULL) { + /* Not Supported */ + bcmerror = BCME_UNSUPPORTED; + DBUSTRACE(("%s: IOVAR %s is not supported\n", name, __FUNCTION__)); + goto exit; + + } + + DBUSTRACE(("%s: %s %s, len %d plen %d\n", __FUNCTION__, + name, (set ? "set" : "get"), len, plen)); + + /* set up 'params' pointer in case this is a set command so that + * the convenience int and bool code can be common to set and get + */ + if (params == NULL) { + params = arg; + plen = len; + } + + if (vi->type == IOVT_VOID) + val_size = 0; + else if (vi->type == IOVT_BUFFER) + val_size = len; + else + /* all other types are integer sized */ + val_size = sizeof(int); + + actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid); + bcmerror = dbus_usb_doiovar(usbinfo, vi, actionid, + name, params, plen, arg, len, val_size); + +exit: + return bcmerror; +} /* dbus_iovar_process */ + +static int +dbus_usb_doiovar(usb_info_t *bus, const bcm_iovar_t *vi, uint32 actionid, const char *name, + void *params, int plen, void *arg, int len, int val_size) +{ + int bcmerror = 0; + int32 int_val = 0; + int32 int_val2 = 0; + bool bool_val = 0; + + DBUSTRACE(("%s: Enter, action %d name %s params %p plen %d arg %p len %d val_size %d\n", + __FUNCTION__, actionid, name, params, plen, arg, len, val_size)); + + if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, IOV_ISSET(actionid))) != 0) + goto exit; + + if (plen >= (int)sizeof(int_val)) + bcopy(params, &int_val, sizeof(int_val)); + + if (plen >= (int)sizeof(int_val) * 2) + bcopy((void*)((uintptr)params + sizeof(int_val)), &int_val2, sizeof(int_val2)); + + bool_val = (int_val != 0) ? TRUE : FALSE; + + switch (actionid) { + + case IOV_SVAL(IOV_MEMBYTES): + case IOV_GVAL(IOV_MEMBYTES): + { + uint32 address; + uint size, dsize; + uint8 *data; + + bool set = (actionid == IOV_SVAL(IOV_MEMBYTES)); + + ASSERT(plen >= 2*sizeof(int)); + + address = (uint32)int_val; + BCM_REFERENCE(address); + bcopy((char *)params + sizeof(int_val), &int_val, sizeof(int_val)); + size = (uint)int_val; + + /* Do some validation */ + dsize = set ? plen - (2 * sizeof(int)) : len; + if (dsize < size) { + DBUSTRACE(("%s: error on %s membytes, addr 0x%08x size %d dsize %d\n", + __FUNCTION__, (set ? "set" : "get"), address, size, dsize)); + bcmerror = BCME_BADARG; + break; + } + DBUSTRACE(("%s: Request to %s %d bytes at address 0x%08x\n", __FUNCTION__, + (set ? "write" : "read"), size, address)); + + /* Generate the actual data pointer */ + data = set ? (uint8*)params + 2 * sizeof(int): (uint8*)arg; + + /* Call to do the transfer */ + bcmerror = dbus_usb_dl_writeimage(BUS_INFO(bus, usb_info_t), data, size); + } + break; + + + case IOV_SVAL(IOV_SET_DOWNLOAD_STATE): + + if (bool_val == TRUE) { + bcmerror = dbus_usb_dlneeded(bus); + dbus_usb_rdl_dwnld_state(BUS_INFO(bus, usb_info_t)); + } else { + usb_info_t *usbinfo = BUS_INFO(bus, usb_info_t); + bcmerror = dbus_usb_dlrun(bus); + usbinfo->pub->busstate = DBUS_STATE_DL_DONE; + } + break; + + case IOV_SVAL(IOV_VARS): + bcmerror = dhdusb_downloadvars(BUS_INFO(bus, usb_info_t), arg, len); + break; + + case IOV_GVAL(IOV_DBUS_MSGLEVEL): + int_val = (int32)dbus_msglevel; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_DBUS_MSGLEVEL): + dbus_msglevel = int_val; + break; + +#ifdef DBUS_USB_LOOPBACK + case IOV_SVAL(IOV_LOOPBACK_TX): + bcmerror = dbus_usbos_loopback_tx(BUS_INFO(bus, usb_info_t), int_val, + int_val2); + break; +#endif + default: + bcmerror = BCME_UNSUPPORTED; + break; + } + +exit: + return bcmerror; +} /* dbus_usb_doiovar */ + +/** higher DBUS level (dbus.c) wants to set NVRAM variables in dongle */ +static int +dhdusb_downloadvars(usb_info_t *bus, void *arg, int len) +{ + int bcmerror = 0; + uint32 varsize; + uint32 varaddr; + uint32 varsizew; + + if (!len) { + bcmerror = BCME_BUFTOOSHORT; + goto err; + } + + /* RAM size is not set. Set it at dbus_usb_dlneeded */ + if (!bus->rdlram_size) + bcmerror = BCME_ERROR; + + /* Even if there are no vars are to be written, we still need to set the ramsize. */ + varsize = len ? ROUNDUP(len, 4) : 0; + varaddr = (bus->rdlram_size - 4) - varsize; + + /* Write the vars list */ + DBUSTRACE(("WriteVars: @%x varsize=%d\n", varaddr, varsize)); + bcmerror = dbus_write_membytes(bus->usbosl_info, TRUE, (varaddr + bus->rdlram_base_addr), + arg, varsize); + + /* adjust to the user specified RAM */ + DBUSTRACE(("Usable memory size: %d\n", bus->rdlram_size)); + DBUSTRACE(("Vars are at %d, orig varsize is %d\n", varaddr, varsize)); + + varsize = ((bus->rdlram_size - 4) - varaddr); + + /* + * Determine the length token: + * Varsize, converted to words, in lower 16-bits, checksum in upper 16-bits. + */ + if (bcmerror) { + varsizew = 0; + } else { + varsizew = varsize / 4; + varsizew = (~varsizew << 16) | (varsizew & 0x0000FFFF); + varsizew = htol32(varsizew); + } + + DBUSTRACE(("New varsize is %d, length token=0x%08x\n", varsize, varsizew)); + + /* Write the length token to the last word */ + bcmerror = dbus_write_membytes(bus->usbosl_info, TRUE, ((bus->rdlram_size - 4) + + bus->rdlram_base_addr), (uint8*)&varsizew, 4); +err: + return bcmerror; +} /* dbus_usb_doiovar */ + +#if !defined(BCM_REQUEST_FW) +/** + * After downloading firmware into dongle and starting it, we need to know if the firmware is + * indeed up and running. + */ +static int +dbus_usb_resetcfg(usb_info_t *usbinfo) +{ + void *osinfo; + bootrom_id_t id; + uint16 waittime = 0; + + uint32 starttime = 0; + uint32 endtime = 0; + + DBUSTRACE(("%s\n", __FUNCTION__)); + + if (usbinfo == NULL) + return DBUS_ERR; + + osinfo = usbinfo->usbosl_info; + ASSERT(osinfo); + + /* Give dongle chance to boot */ + dbus_usbos_wait(osinfo, USB_SFLASH_DLIMAGE_SPINWAIT); + waittime = USB_SFLASH_DLIMAGE_SPINWAIT; + while (waittime < USB_DLIMAGE_RETRY_TIMEOUT) { + + starttime = OSL_SYSUPTIME(); + + id.chip = 0xDEAD; /* Get the ID */ + dbus_usbos_dl_cmd(osinfo, DL_GETVER, &id, sizeof(bootrom_id_t)); + id.chip = ltoh32(id.chip); + + endtime = OSL_SYSUPTIME(); + waittime += (endtime - starttime); + + if (id.chip == POSTBOOT_ID) + break; + } + + if (id.chip == POSTBOOT_ID) { + DBUSERR(("%s: download done. Bootup time = %d ms postboot chip 0x%x/rev 0x%x\n", + __FUNCTION__, waittime, id.chip, id.chiprev)); + + dbus_usbos_dl_cmd(osinfo, DL_RESETCFG, &id, sizeof(bootrom_id_t)); + + dbus_usbos_wait(osinfo, USB_RESETCFG_SPINWAIT); + return DBUS_OK; + } else { + DBUSERR(("%s: Cannot talk to Dongle. Wait time = %d ms. Firmware is not UP \n", + __FUNCTION__, waittime)); + return DBUS_ERR; + } + + return DBUS_OK; +} +#endif + +/** before firmware download, the dongle has to be prepared to receive the fw image */ +static int +dbus_usb_rdl_dwnld_state(usb_info_t *usbinfo) +{ + void *osinfo = usbinfo->usbosl_info; + rdl_state_t state; + int err = DBUS_OK; + + /* 1) Prepare USB boot loader for runtime image */ + dbus_usbos_dl_cmd(osinfo, DL_START, &state, sizeof(rdl_state_t)); + + state.state = ltoh32(state.state); + state.bytes = ltoh32(state.bytes); + + /* 2) Check we are in the Waiting state */ + if (state.state != DL_WAITING) { + DBUSERR(("%s: Failed to DL_START\n", __FUNCTION__)); + err = DBUS_ERR; + goto fail; + } + +fail: + return err; +} + +/** + * Dongle contains bootcode in ROM but firmware is (partially) contained in dongle RAM. Therefore, + * firmware has to be downloaded into dongle RAM. + */ +static int +dbus_usb_dl_writeimage(usb_info_t *usbinfo, uint8 *fw, int fwlen) +{ + osl_t *osh = usbinfo->pub->osh; + void *osinfo = usbinfo->usbosl_info; + unsigned int sendlen, sent, dllen; + char *bulkchunk = NULL, *dlpos; + rdl_state_t state; + int err = DBUS_OK; + bootrom_id_t id; + uint16 wait, wait_time; + uint32 dl_trunk_size = RDL_CHUNK; + + if (BCM4350_CHIP(usbinfo->pub->attrib.devid)) + dl_trunk_size = RDL_CHUNK_MAX; + + while (!bulkchunk) { + bulkchunk = MALLOC(osh, dl_trunk_size); + if (dl_trunk_size == RDL_CHUNK) + break; + if (!bulkchunk) { + dl_trunk_size /= 2; + if (dl_trunk_size < RDL_CHUNK) + dl_trunk_size = RDL_CHUNK; + } + } + + if (bulkchunk == NULL) { + err = DBUS_ERR; + goto fail; + } + + sent = 0; + dlpos = fw; + dllen = fwlen; + + /* Get chip id and rev */ + id.chip = usbinfo->pub->attrib.devid; + id.chiprev = usbinfo->pub->attrib.chiprev; + + DBUSTRACE(("enter %s: fwlen=%d\n", __FUNCTION__, fwlen)); + + dbus_usbos_dl_cmd(osinfo, DL_GETSTATE, &state, sizeof(rdl_state_t)); + + /* 3) Load the image */ + while ((sent < dllen)) { + /* Wait until the usb device reports it received all the bytes we sent */ + + if (sent < dllen) { + if ((dllen-sent) < dl_trunk_size) + sendlen = dllen-sent; + else + sendlen = dl_trunk_size; + + /* simply avoid having to send a ZLP by ensuring we never have an even + * multiple of 64 + */ + if (!(sendlen % 64)) + sendlen -= 4; + + /* send data */ + memcpy(bulkchunk, dlpos, sendlen); + if (!dbus_usbos_dl_send_bulk(osinfo, bulkchunk, sendlen)) { + err = DBUS_ERR; + goto fail; + } + + dlpos += sendlen; + sent += sendlen; + DBUSTRACE(("%s: sendlen %d\n", __FUNCTION__, sendlen)); + } + + wait = 0; + wait_time = USB_SFLASH_DLIMAGE_SPINWAIT; + while (!dbus_usbos_dl_cmd(osinfo, DL_GETSTATE, &state, + sizeof(rdl_state_t))) { + if ((id.chip == 43236) && (id.chiprev == 0)) { + DBUSERR(("%s: 43236a0 SFlash delay, waiting for dongle crc check " + "completion!!!\n", __FUNCTION__)); + dbus_usbos_wait(osinfo, wait_time); + wait += wait_time; + if (wait >= USB_SFLASH_DLIMAGE_LIMIT) { + DBUSERR(("%s: DL_GETSTATE Failed xxxx\n", __FUNCTION__)); + err = DBUS_ERR; + goto fail; + break; + } + } else { + DBUSERR(("%s: DL_GETSTATE Failed xxxx\n", __FUNCTION__)); + err = DBUS_ERR; + goto fail; + } + } + + state.state = ltoh32(state.state); + state.bytes = ltoh32(state.bytes); + + /* restart if an error is reported */ + if ((state.state == DL_BAD_HDR) || (state.state == DL_BAD_CRC)) { + DBUSERR(("%s: Bad Hdr or Bad CRC\n", __FUNCTION__)); + err = DBUS_ERR; + goto fail; + } + + } +fail: + if (bulkchunk) + MFREE(osh, bulkchunk, dl_trunk_size); + + return err; +} /* dbus_usb_dl_writeimage */ + +/** Higher level DBUS layer (dbus.c) requests this layer to download image into dongle */ +static int +dbus_usb_dlstart(void *bus, uint8 *fw, int len) +{ + usb_info_t *usbinfo = BUS_INFO(bus, usb_info_t); + int err; + + DBUSTRACE(("%s\n", __FUNCTION__)); + + if (usbinfo == NULL) + return DBUS_ERR; + + if (USB_DEV_ISBAD(usbinfo)) + return DBUS_ERR; + + err = dbus_usb_rdl_dwnld_state(usbinfo); + + if (DBUS_OK == err) { + err = dbus_usb_dl_writeimage(usbinfo, fw, len); + if (err == DBUS_OK) + usbinfo->pub->busstate = DBUS_STATE_DL_DONE; + else + usbinfo->pub->busstate = DBUS_STATE_DL_PENDING; + } else + usbinfo->pub->busstate = DBUS_STATE_DL_PENDING; + + return err; +} + +static bool +dbus_usb_update_chipinfo(usb_info_t *usbinfo, uint32 chip) +{ + bool retval = TRUE; + /* based on the CHIP Id, store the ram size which is needed for NVRAM download. */ + switch (chip) { + + case 0x4319: + usbinfo->rdlram_size = RDL_RAM_SIZE_4319; + usbinfo->rdlram_base_addr = RDL_RAM_BASE_4319; + break; + + case 0x4329: + usbinfo->rdlram_size = RDL_RAM_SIZE_4329; + usbinfo->rdlram_base_addr = RDL_RAM_BASE_4329; + break; + + case 43234: + case 43235: + case 43236: + usbinfo->rdlram_size = RDL_RAM_SIZE_43236; + usbinfo->rdlram_base_addr = RDL_RAM_BASE_43236; + break; + + case 0x4328: + usbinfo->rdlram_size = RDL_RAM_SIZE_4328; + usbinfo->rdlram_base_addr = RDL_RAM_BASE_4328; + break; + + case 0x4322: + usbinfo->rdlram_size = RDL_RAM_SIZE_4322; + usbinfo->rdlram_base_addr = RDL_RAM_BASE_4322; + break; + + case 0x4360: + case 0xAA06: + usbinfo->rdlram_size = RDL_RAM_SIZE_4360; + usbinfo->rdlram_base_addr = RDL_RAM_BASE_4360; + break; + + case 43242: + case 43243: + usbinfo->rdlram_size = RDL_RAM_SIZE_43242; + usbinfo->rdlram_base_addr = RDL_RAM_BASE_43242; + break; + + case 43143: + usbinfo->rdlram_size = RDL_RAM_SIZE_43143; + usbinfo->rdlram_base_addr = RDL_RAM_BASE_43143; + break; + + case 0x4350: + case 43556: + case 43558: + case 43569: + usbinfo->rdlram_size = RDL_RAM_SIZE_4350; + usbinfo->rdlram_base_addr = RDL_RAM_BASE_4350; + break; + + case POSTBOOT_ID: + break; + + default: + DBUSERR(("%s: Chip 0x%x Ram size is not known\n", __FUNCTION__, chip)); + retval = FALSE; + break; + + } + + return retval; +} /* dbus_usb_update_chipinfo */ + +/** higher DBUS level (dbus.c) wants to know if firmware download is required. */ +static int +dbus_usb_dlneeded(void *bus) +{ + usb_info_t *usbinfo = BUS_INFO(bus, usb_info_t); + void *osinfo; + bootrom_id_t id; + int dl_needed = 1; + + DBUSTRACE(("%s\n", __FUNCTION__)); + + if (usbinfo == NULL) + return DBUS_ERR; + + osinfo = usbinfo->usbosl_info; + ASSERT(osinfo); + + /* Check if firmware downloaded already by querying runtime ID */ + id.chip = 0xDEAD; + dbus_usbos_dl_cmd(osinfo, DL_GETVER, &id, sizeof(bootrom_id_t)); + + id.chip = ltoh32(id.chip); + id.chiprev = ltoh32(id.chiprev); + + if (FALSE == dbus_usb_update_chipinfo(usbinfo, id.chip)) { + dl_needed = DBUS_ERR; + goto exit; + } + + DBUSERR(("%s: chip 0x%x rev 0x%x\n", __FUNCTION__, id.chip, id.chiprev)); + if (id.chip == POSTBOOT_ID) { + /* This code is needed to support two enumerations on USB1.1 scenario */ + DBUSERR(("%s: Firmware already downloaded\n", __FUNCTION__)); + + dbus_usbos_dl_cmd(osinfo, DL_RESETCFG, &id, sizeof(bootrom_id_t)); + dl_needed = DBUS_OK; + if (usbinfo->pub->busstate == DBUS_STATE_DL_PENDING) + usbinfo->pub->busstate = DBUS_STATE_DL_DONE; + } else { + usbinfo->pub->attrib.devid = id.chip; + usbinfo->pub->attrib.chiprev = id.chiprev; + } + +exit: + return dl_needed; +} + +/** After issuing firmware download, higher DBUS level (dbus.c) wants to start the firmware. */ +static int +dbus_usb_dlrun(void *bus) +{ + usb_info_t *usbinfo = BUS_INFO(bus, usb_info_t); + void *osinfo; + rdl_state_t state; + int err = DBUS_OK; + + DBUSTRACE(("%s\n", __FUNCTION__)); + + if (usbinfo == NULL) + return DBUS_ERR; + + if (USB_DEV_ISBAD(usbinfo)) + return DBUS_ERR; + + osinfo = usbinfo->usbosl_info; + ASSERT(osinfo); + + /* Check we are runnable */ + dbus_usbos_dl_cmd(osinfo, DL_GETSTATE, &state, sizeof(rdl_state_t)); + + state.state = ltoh32(state.state); + state.bytes = ltoh32(state.bytes); + + /* Start the image */ + if (state.state == DL_RUNNABLE) { + DBUSTRACE(("%s: Issue DL_GO\n", __FUNCTION__)); + dbus_usbos_dl_cmd(osinfo, DL_GO, &state, sizeof(rdl_state_t)); + + if (usbinfo->pub->attrib.devid == TEST_CHIP) + dbus_usbos_wait(osinfo, USB_DLGO_SPINWAIT); + +// dbus_usb_resetcfg(usbinfo); + /* The Donlge may go for re-enumeration. */ + } else { + DBUSERR(("%s: Dongle not runnable\n", __FUNCTION__)); + err = DBUS_ERR; + } + + return err; +} + +/** + * As preparation for firmware download, higher DBUS level (dbus.c) requests the firmware image + * to be used for the type of dongle detected. Directly called by dbus.c (so not via a callback + * construction) + */ +void +dbus_bus_fw_get(void *bus, uint8 **fw, int *fwlen, int *decomp) +{ + usb_info_t *usbinfo = BUS_INFO(bus, usb_info_t); + unsigned int devid; + unsigned int crev; + + devid = usbinfo->pub->attrib.devid; + crev = usbinfo->pub->attrib.chiprev; + + *fw = NULL; + *fwlen = 0; + + switch (devid) { + case BCM43236_CHIP_ID: + case BCM43235_CHIP_ID: + case BCM43234_CHIP_ID: + case BCM43238_CHIP_ID: { + if (crev == 3 || crev == 2 || crev == 1) { +#ifdef EMBED_IMAGE_43236b + *fw = (uint8 *)dlarray_43236b; + *fwlen = sizeof(dlarray_43236b); + +#endif + } + } break; + case BCM4360_CHIP_ID: + case BCM4352_CHIP_ID: + case BCM43526_CHIP_ID: +#ifdef EMBED_IMAGE_43526a + if (crev <= 2) { + *fw = (uint8 *)dlarray_43526a; + *fwlen = sizeof(dlarray_43526a); + } +#endif +#ifdef EMBED_IMAGE_43526b + if (crev > 2) { + *fw = (uint8 *)dlarray_43526b; + *fwlen = sizeof(dlarray_43526b); + } +#endif + break; + + case BCM43242_CHIP_ID: +#ifdef EMBED_IMAGE_43242a0 + *fw = (uint8 *)dlarray_43242a0; + *fwlen = sizeof(dlarray_43242a0); +#endif + break; + + case BCM43143_CHIP_ID: +#ifdef EMBED_IMAGE_43143a0 + *fw = (uint8 *)dlarray_43143a0; + *fwlen = sizeof(dlarray_43143a0); +#endif +#ifdef EMBED_IMAGE_43143b0 + *fw = (uint8 *)dlarray_43143b0; + *fwlen = sizeof(dlarray_43143b0); +#endif + break; + + case BCM4350_CHIP_ID: + case BCM4354_CHIP_ID: + case BCM43556_CHIP_ID: + case BCM43558_CHIP_ID: + case BCM43566_CHIP_ID: + case BCM43568_CHIP_ID: + case BCM43570_CHIP_ID: + case BCM4358_CHIP_ID: +#ifdef EMBED_IMAGE_4350a0 + if (crev == 0) { + *fw = (uint8 *)dlarray_4350a0; + *fwlen = sizeof(dlarray_4350a0); + } +#endif +#ifdef EMBED_IMAGE_4350b0 + if (crev == 1) { + *fw = (uint8 *)dlarray_4350b0; + *fwlen = sizeof(dlarray_4350b0); + } +#endif +#ifdef EMBED_IMAGE_4350b1 + if (crev == 2) { + *fw = (uint8 *)dlarray_4350b1; + *fwlen = sizeof(dlarray_4350b1); + } +#endif +#ifdef EMBED_IMAGE_43556b1 + if (crev == 2) { + *fw = (uint8 *)dlarray_43556b1; + *fwlen = sizeof(dlarray_43556b1); + } +#endif +#ifdef EMBED_IMAGE_4350c0 + if (crev == 3) { + *fw = (uint8 *)dlarray_4350c0; + *fwlen = sizeof(dlarray_4350c0); + } +#endif /* EMBED_IMAGE_4350c0 */ +#ifdef EMBED_IMAGE_4350c1 + if (crev == 4) { + *fw = (uint8 *)dlarray_4350c1; + *fwlen = sizeof(dlarray_4350c1); + } +#endif /* EMBED_IMAGE_4350c1 */ + break; + case BCM43569_CHIP_ID: +#ifdef EMBED_IMAGE_43569a0 + if (crev == 0) { + *fw = (uint8 *)dlarray_43569a0; + *fwlen = sizeof(dlarray_43569a0); + } +#endif /* EMBED_IMAGE_43569a0 */ + break; + default: +#ifdef EMBED_IMAGE_GENERIC + *fw = (uint8 *)dlarray; + *fwlen = sizeof(dlarray); +#endif + break; + } +} /* dbus_bus_fw_get */ diff --git a/bcmdhd.100.10.315.x/dbus_usb_linux.c b/bcmdhd.100.10.315.x/dbus_usb_linux.c new file mode 100644 index 0000000..10927d8 --- /dev/null +++ b/bcmdhd.100.10.315.x/dbus_usb_linux.c @@ -0,0 +1,3403 @@ +/* + * Dongle BUS interface + * USB Linux Implementation + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dbus_usb_linux.c 564663 2015-06-18 02:34:42Z $ + */ + +/** + * @file @brief + * This file contains DBUS code that is USB *and* OS (Linux) specific. DBUS is a Broadcom + * proprietary host specific abstraction layer. + */ + +#include +#include + +/** + * DBUS_LINUX_RXDPC is created for router platform performance tuning. A separate thread is created + * to handle USB RX and avoid the call chain getting too long and enhance cache hit rate. + * + * DBUS_LINUX_RXDPC setting is in wlconfig file. + */ + +/* + * If DBUS_LINUX_RXDPC is off, spin_lock_bh() for CTFPOOL in + * linux_osl.c has to be changed to spin_lock_irqsave() because + * PKTGET/PKTFREE are no longer in bottom half. + * + * Right now we have another queue rpcq in wl_linux.c. Maybe we + * can eliminate that one to reduce the overhead. + * + * Enabling 2nd EP and DBUS_LINUX_RXDPC causing traffic from + * both EP's to be queued in the same rx queue. If we want + * RXDPC to work with 2nd EP. The EP for RPC call return + * should bypass the dpc and go directly up. + */ + +/* #define DBUS_LINUX_RXDPC */ + +/* Dbus histogram for ntxq, nrxq, dpc parameter tuning */ +/* #define DBUS_LINUX_HIST */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if defined(USBOS_THREAD) || defined(USBOS_TX_THREAD) + +/** + * The usb-thread is designed to provide currency on multiprocessors and SMP linux kernels. On the + * dual cores platform, the WLAN driver, without threads, executed only on CPU0. The driver consumed + * almost of 100% on CPU0, while CPU1 remained idle. The behavior was observed on Broadcom's STB. + * + * The WLAN driver consumed most of CPU0 and not CPU1 because tasklets/queues, software irq, and + * hardware irq are executing from CPU0, only. CPU0 became the system's bottle-neck. TPUT is lower + * and system's responsiveness is slower. + * + * To improve system responsiveness and TPUT usb-thread was implemented. The system's threads could + * be scheduled to run on any core. One core could be processing data in the usb-layer and the other + * core could be processing data in the wl-layer. + * + * For further info see [WlThreadAndUsbThread] Twiki. + */ + +#include +#include +#include +#include +#include +#include +#endif /* USBOS_THREAD || USBOS_TX_THREAD */ + + + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) +#define KERNEL26 +#endif + +/** + * Starting with the 3.10 kernel release, dynamic PM support for USB is present whenever + * the kernel was built with CONFIG_PM_RUNTIME enabled. The CONFIG_USB_SUSPEND option has + * been eliminated. + */ +#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 21)) && defined(CONFIG_USB_SUSPEND)) \ + || ((LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)) && defined(CONFIG_PM_RUNTIME)) +/* For USB power management support, see Linux kernel: Documentation/usb/power-management.txt */ +#define USB_SUSPEND_AVAILABLE +#endif + +/* Define alternate fw/nvram paths used in Android */ +#ifdef OEM_ANDROID +#define CONFIG_ANDROID_BCMDHD_FW_PATH "broadcom/dhd/firmware/fw.bin.trx" +#define CONFIG_ANDROID_BCMDHD_NVRAM_PATH "broadcom/dhd/nvrams/nvm.txt" +#endif /* OEM_ANDROID */ + +static inline int usb_submit_urb_linux(struct urb *urb) +{ + +#ifdef BCM_MAX_URB_LEN + if (urb && (urb->transfer_buffer_length > BCM_MAX_URB_LEN)) { + DBUSERR(("URB transfer length=%d exceeded %d ra=%p\n", urb->transfer_buffer_length, + BCM_MAX_URB_LEN, __builtin_return_address(0))); + return DBUS_ERR; + } +#endif + +#ifdef KERNEL26 + return usb_submit_urb(urb, GFP_ATOMIC); +#else + return usb_submit_urb(urb); +#endif + +} + +#define USB_SUBMIT_URB(urb) usb_submit_urb_linux(urb) + +#ifdef KERNEL26 + +#define USB_ALLOC_URB() usb_alloc_urb(0, GFP_ATOMIC) +#define USB_UNLINK_URB(urb) (usb_kill_urb(urb)) +#define USB_FREE_URB(urb) (usb_free_urb(urb)) +#define USB_REGISTER() usb_register(&dbus_usbdev) +#define USB_DEREGISTER() usb_deregister(&dbus_usbdev) + +#ifdef USB_SUSPEND_AVAILABLE + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)) +#define USB_AUTOPM_SET_INTERFACE(intf) usb_autopm_set_interface(intf) +#else +#define USB_ENABLE_AUTOSUSPEND(udev) usb_enable_autosuspend(udev) +#define USB_DISABLE_AUTOSUSPEND(udev) usb_disable_autosuspend(udev) +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)) */ + +#define USB_AUTOPM_GET_INTERFACE(intf) usb_autopm_get_interface(intf) +#define USB_AUTOPM_PUT_INTERFACE(intf) usb_autopm_put_interface(intf) +#define USB_AUTOPM_GET_INTERFACE_ASYNC(intf) usb_autopm_get_interface_async(intf) +#define USB_AUTOPM_PUT_INTERFACE_ASYNC(intf) usb_autopm_put_interface_async(intf) +#define USB_MARK_LAST_BUSY(dev) usb_mark_last_busy(dev) + +#else /* USB_SUSPEND_AVAILABLE */ + +#define USB_AUTOPM_GET_INTERFACE(intf) do {} while (0) +#define USB_AUTOPM_PUT_INTERFACE(intf) do {} while (0) +#define USB_AUTOPM_GET_INTERFACE_ASYNC(intf) do {} while (0) +#define USB_AUTOPM_PUT_INTERFACE_ASYNC(intf) do {} while (0) +#define USB_MARK_LAST_BUSY(dev) do {} while (0) +#endif /* USB_SUSPEND_AVAILABLE */ + +#define USB_CONTROL_MSG(dev, pipe, request, requesttype, value, index, data, size, timeout) \ + usb_control_msg((dev), (pipe), (request), (requesttype), (value), (index), \ + (data), (size), (timeout)) +#define USB_BULK_MSG(dev, pipe, data, len, actual_length, timeout) \ + usb_bulk_msg((dev), (pipe), (data), (len), (actual_length), (timeout)) +#define USB_BUFFER_ALLOC(dev, size, mem, dma) usb_buffer_alloc(dev, size, mem, dma) +#define USB_BUFFER_FREE(dev, size, data, dma) usb_buffer_free(dev, size, data, dma) + +#ifdef WL_URB_ZPKT +#define URB_QUEUE_BULK URB_ZERO_PACKET +#else +#define URB_QUEUE_BULK 0 +#endif /* WL_URB_ZPKT */ + +#define CALLBACK_ARGS struct urb *urb, struct pt_regs *regs +#define CALLBACK_ARGS_DATA urb, regs +#define CONFIGDESC(usb) (&((usb)->actconfig)->desc) +#define IFPTR(usb, idx) ((usb)->actconfig->interface[idx]) +#define IFALTS(usb, idx) (IFPTR((usb), (idx))->altsetting[0]) +#define IFDESC(usb, idx) IFALTS((usb), (idx)).desc +#define IFEPDESC(usb, idx, ep) (IFALTS((usb), (idx)).endpoint[ep]).desc + +#else /* KERNEL26 */ + +#define USB_ALLOC_URB() usb_alloc_urb(0) +#define USB_UNLINK_URB(urb) usb_unlink_urb(urb) +#define USB_FREE_URB(urb) (usb_free_urb(urb)) +#define USB_REGISTER() usb_register(&dbus_usbdev) +#define USB_DEREGISTER() usb_deregister(&dbus_usbdev) +#define USB_AUTOPM_GET_INTERFACE(intf) do {} while (0) +#define USB_AUTOPM_GET_INTERFACE_ASYNC(intf) do {} while (0) +#define USB_AUTOPM_PUT_INTERFACE_ASYNC(intf) do {} while (0) +#define USB_MARK_LAST_BUSY(dev) do {} while (0) + +#define USB_CONTROL_MSG(dev, pipe, request, requesttype, value, index, data, size, timeout) \ + usb_control_msg((dev), (pipe), (request), (requesttype), (value), (index), \ + (data), (size), (timeout)) +#define USB_BUFFER_ALLOC(dev, size, mem, dma) kmalloc(size, mem) +#define USB_BUFFER_FREE(dev, size, data, dma) kfree(data) + +#ifdef WL_URB_ZPKT +#define URB_QUEUE_BULK USB_QUEUE_BULK|URB_ZERO_PACKET +#else +#define URB_QUEUE_BULK 0 +#endif /* WL_URB_ZPKT */ + +#define CALLBACK_ARGS struct urb *urb +#define CALLBACK_ARGS_DATA urb +#define CONFIGDESC(usb) ((usb)->actconfig) +#define IFPTR(usb, idx) (&(usb)->actconfig->interface[idx]) +#define IFALTS(usb, idx) ((usb)->actconfig->interface[idx].altsetting[0]) +#define IFDESC(usb, idx) IFALTS((usb), (idx)) +#define IFEPDESC(usb, idx, ep) (IFALTS((usb), (idx)).endpoint[ep]) + + +#endif /* KERNEL26 */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)) +#define USB_SPEED_SUPER 5 +#endif /* #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)) */ + +#define CONTROL_IF 0 +#define BULK_IF 0 + +#ifdef BCMUSBDEV_COMPOSITE +#define USB_COMPIF_MAX 4 + +#define USB_CLASS_WIRELESS 0xe0 +#define USB_CLASS_MISC 0xef +#define USB_SUBCLASS_COMMON 0x02 +#define USB_PROTO_IAD 0x01 +#define USB_PROTO_VENDOR 0xff + +#define USB_QUIRK_NO_SET_INTF 0x04 /* device does not support set_interface */ +#endif /* BCMUSBDEV_COMPOSITE */ + +#define USB_SYNC_WAIT_TIMEOUT 300 /* ms */ + +/* Private data kept in skb */ +#define SKB_PRIV(skb, idx) (&((void **)skb->cb)[idx]) +#define SKB_PRIV_URB(skb) (*(struct urb **)SKB_PRIV(skb, 0)) + +#ifndef DBUS_USB_RXQUEUE_BATCH_ADD +/* items to add each time within limit */ +#define DBUS_USB_RXQUEUE_BATCH_ADD 8 +#endif + +#ifndef DBUS_USB_RXQUEUE_LOWER_WATERMARK +/* add a new batch req to rx queue when waiting item count reduce to this number */ +#define DBUS_USB_RXQUEUE_LOWER_WATERMARK 4 +#endif + +enum usbos_suspend_state { + USBOS_SUSPEND_STATE_DEVICE_ACTIVE = 0, /* Device is busy, won't allow suspend */ + USBOS_SUSPEND_STATE_SUSPEND_PENDING, /* Device is idle, can be suspended */ + /* Wating PM to suspend */ + USBOS_SUSPEND_STATE_SUSPENDED /* Device suspended */ +}; + +enum usbos_request_state { + USBOS_REQUEST_STATE_UNSCHEDULED = 0, /* USB TX request not scheduled */ + USBOS_REQUEST_STATE_SCHEDULED, /* USB TX request given to TX thread */ + USBOS_REQUEST_STATE_SUBMITTED /* USB TX request submitted */ +}; + +typedef struct { + uint32 notification; + uint32 reserved; +} intr_t; + +typedef struct { + dbus_pub_t *pub; + + void *cbarg; + dbus_intf_callbacks_t *cbs; + + /* Imported */ + struct usb_device *usb; /* USB device pointer from OS */ + struct urb *intr_urb; /* URB for interrupt endpoint */ + struct list_head req_rxfreeq; + struct list_head req_txfreeq; + struct list_head req_rxpostedq; /* Posted down to USB driver for RX */ + struct list_head req_txpostedq; /* Posted down to USB driver for TX */ + spinlock_t rxfree_lock; /* Lock for rx free list */ + spinlock_t txfree_lock; /* Lock for tx free list */ + spinlock_t rxposted_lock; /* Lock for rx posted list */ + spinlock_t txposted_lock; /* Lock for tx posted list */ + uint rx_pipe, tx_pipe, intr_pipe, rx_pipe2; /* Pipe numbers for USB I/O */ + uint rxbuf_len; + + struct list_head req_rxpendingq; /* RXDPC: Pending for dpc to send up */ + spinlock_t rxpending_lock; /* RXDPC: Lock for rx pending list */ + long dpc_pid; + struct semaphore dpc_sem; + struct completion dpc_exited; + int rxpending; + + struct urb *ctl_urb; + int ctl_in_pipe, ctl_out_pipe; + struct usb_ctrlrequest ctl_write; + struct usb_ctrlrequest ctl_read; + struct semaphore ctl_lock; /* Lock for CTRL transfers via tx_thread */ +#ifdef USBOS_TX_THREAD + enum usbos_request_state ctl_state; +#endif /* USBOS_TX_THREAD */ + + spinlock_t rxlock; /* Lock for rxq management */ + spinlock_t txlock; /* Lock for txq management */ + + int intr_size; /* Size of interrupt message */ + int interval; /* Interrupt polling interval */ + intr_t intr; /* Data buffer for interrupt endpoint */ + + int maxps; + atomic_t txposted; + atomic_t rxposted; + atomic_t txallocated; + atomic_t rxallocated; + bool rxctl_deferrespok; /* Get a response for setup from dongle */ + + wait_queue_head_t wait; + bool waitdone; + int sync_urb_status; + + struct urb *blk_urb; /* Used for downloading embedded image */ + +#ifdef USBOS_THREAD + spinlock_t ctrl_lock; + spinlock_t usbos_list_lock; + struct list_head usbos_list; + struct list_head usbos_free_list; + atomic_t usbos_list_cnt; + wait_queue_head_t usbos_queue_head; + struct task_struct *usbos_kt; +#endif /* USBOS_THREAD */ + +#ifdef USBOS_TX_THREAD + spinlock_t usbos_tx_list_lock; + struct list_head usbos_tx_list; + wait_queue_head_t usbos_tx_queue_head; + struct task_struct *usbos_tx_kt; +#endif /* USBOS_TX_THREAD */ + + struct dma_pool *qtd_pool; /* QTD pool for USB optimization only */ + int tx_ep, rx_ep, rx2_ep; /* EPs for USB optimization */ + struct usb_device *usb_device; /* USB device for optimization */ +} usbos_info_t; + +typedef struct urb_req { + void *pkt; + int buf_len; + struct urb *urb; + void *arg; + usbos_info_t *usbinfo; + struct list_head urb_list; +} urb_req_t; + +#ifdef USBOS_THREAD +typedef struct usbos_list_entry { + struct list_head list; /* must be first */ + void *urb_context; + int urb_length; + int urb_status; +} usbos_list_entry_t; + +static void* dbus_usbos_thread_init(usbos_info_t *usbos_info); +static void dbus_usbos_thread_deinit(usbos_info_t *usbos_info); +static void dbus_usbos_dispatch_schedule(CALLBACK_ARGS); +static int dbus_usbos_thread_func(void *data); +#endif /* USBOS_THREAD */ + +#ifdef USBOS_TX_THREAD +void* dbus_usbos_tx_thread_init(usbos_info_t *usbos_info); +void dbus_usbos_tx_thread_deinit(usbos_info_t *usbos_info); +int dbus_usbos_tx_thread_func(void *data); +#endif /* USBOS_TX_THREAD */ + +/* Shared Function prototypes */ +bool dbus_usbos_dl_cmd(usbos_info_t *usbinfo, uint8 cmd, void *buffer, int buflen); +int dbus_usbos_wait(usbos_info_t *usbinfo, uint16 ms); +bool dbus_usbos_dl_send_bulk(usbos_info_t *usbinfo, void *buffer, int len); +int dbus_write_membytes(usbos_info_t *usbinfo, bool set, uint32 address, uint8 *data, uint size); + +/* Local function prototypes */ +static void dbus_usbos_send_complete(CALLBACK_ARGS); +static void dbus_usbos_recv_complete(CALLBACK_ARGS); +static int dbus_usbos_errhandler(void *bus, int err); +static int dbus_usbos_state_change(void *bus, int state); +static void dbusos_stop(usbos_info_t *usbos_info); + +#ifdef KERNEL26 +static int dbus_usbos_probe(struct usb_interface *intf, const struct usb_device_id *id); +static void dbus_usbos_disconnect(struct usb_interface *intf); +#if defined(USB_SUSPEND_AVAILABLE) +static int dbus_usbos_resume(struct usb_interface *intf); +static int dbus_usbos_suspend(struct usb_interface *intf, pm_message_t message); +/* at the moment, used for full dongle host driver only */ +static int dbus_usbos_reset_resume(struct usb_interface *intf); +#endif /* USB_SUSPEND_AVAILABLE */ +#else /* KERNEL26 */ +static void *dbus_usbos_probe(struct usb_device *usb, unsigned int ifnum, + const struct usb_device_id *id); +static void dbus_usbos_disconnect(struct usb_device *usb, void *ptr); +#endif /* KERNEL26 */ + + +/** + * have to disable missing-field-initializers warning as last element {} triggers it + * and different versions of kernel have different number of members so it is impossible + * to specify the initializer. BTW issuing the warning here is bug og GCC as universal + * zero {0} specified in C99 standard as correct way of initialization of struct to all zeros + */ +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \ + 4 && __GNUC_MINOR__ >= 6)) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wmissing-field-initializers" +#endif + +static struct usb_device_id devid_table[] = { + { USB_DEVICE(BCM_DNGL_VID, 0x0000) }, /* Configurable via register() */ +#if defined(BCM_REQUEST_FW) + { USB_DEVICE(BCM_DNGL_VID, BCM_DNGL_BL_PID_4328) }, + { USB_DEVICE(BCM_DNGL_VID, BCM_DNGL_BL_PID_4322) }, + { USB_DEVICE(BCM_DNGL_VID, BCM_DNGL_BL_PID_4319) }, + { USB_DEVICE(BCM_DNGL_VID, BCM_DNGL_BL_PID_43236) }, + { USB_DEVICE(BCM_DNGL_VID, BCM_DNGL_BL_PID_43143) }, + { USB_DEVICE(BCM_DNGL_VID, BCM_DNGL_BL_PID_43242) }, + { USB_DEVICE(BCM_DNGL_VID, BCM_DNGL_BL_PID_4360) }, + { USB_DEVICE(BCM_DNGL_VID, BCM_DNGL_BL_PID_4350) }, + { USB_DEVICE(BCM_DNGL_VID, BCM_DNGL_BL_PID_43569) }, +#endif +#ifdef EXTENDED_VID_PID + EXTENDED_VID_PID, +#endif /* EXTENDED_VID_PID */ + { USB_DEVICE(BCM_DNGL_VID, BCM_DNGL_BDC_PID) }, /* Default BDC */ + { } +}; + +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \ + 4 && __GNUC_MINOR__ >= 6)) +#pragma GCC diagnostic pop +#endif + +MODULE_DEVICE_TABLE(usb, devid_table); + +/** functions called by the Linux kernel USB subsystem */ +static struct usb_driver dbus_usbdev = { + name: "dbus_usbdev", + probe: dbus_usbos_probe, + disconnect: dbus_usbos_disconnect, + id_table: devid_table, +#if defined(USB_SUSPEND_AVAILABLE) + suspend: dbus_usbos_suspend, + resume: dbus_usbos_resume, + reset_resume: dbus_usbos_reset_resume, + /* Linux USB core will allow autosuspend for devices bound to this driver */ + supports_autosuspend: 1 +#endif /* USB_SUSPEND_AVAILABLE */ +}; + +/** + * This stores USB info during Linux probe callback since attach() is not called yet at this point + */ +typedef struct { + void *usbos_info; + struct usb_device *usb; /* USB device pointer from OS */ + uint rx_pipe; /* Pipe numbers for USB I/O */ + uint tx_pipe; /* Pipe numbers for USB I/O */ + uint intr_pipe; /* Pipe numbers for USB I/O */ + uint rx_pipe2; /* Pipe numbers for USB I/O */ + int intr_size; /* Size of interrupt message */ + int interval; /* Interrupt polling interval */ + bool dldone; + int vid; + int pid; + bool dereged; + bool disc_cb_done; + DEVICE_SPEED device_speed; + enum usbos_suspend_state suspend_state; + struct usb_interface *intf; +} probe_info_t; + +/* + * USB Linux dbus_intf_t + */ +static void *dbus_usbos_intf_attach(dbus_pub_t *pub, void *cbarg, dbus_intf_callbacks_t *cbs); +static void dbus_usbos_intf_detach(dbus_pub_t *pub, void *info); +static int dbus_usbos_intf_send_irb(void *bus, dbus_irb_tx_t *txirb); +static int dbus_usbos_intf_recv_irb(void *bus, dbus_irb_rx_t *rxirb); +static int dbus_usbos_intf_recv_irb_from_ep(void *bus, dbus_irb_rx_t *rxirb, uint32 ep_idx); +static int dbus_usbos_intf_cancel_irb(void *bus, dbus_irb_tx_t *txirb); +static int dbus_usbos_intf_send_ctl(void *bus, uint8 *buf, int len); +static int dbus_usbos_intf_recv_ctl(void *bus, uint8 *buf, int len); +static int dbus_usbos_intf_get_attrib(void *bus, dbus_attrib_t *attrib); +static int dbus_usbos_intf_up(void *bus); +static int dbus_usbos_intf_down(void *bus); +static int dbus_usbos_intf_stop(void *bus); +static int dbus_usbos_readreg(void *bus, uint32 regaddr, int datalen, uint32 *value); +extern int dbus_usbos_loopback_tx(void *usbos_info_ptr, int cnt, int size); +int dbus_usbos_writereg(void *bus, uint32 regaddr, int datalen, uint32 data); +static int dbus_usbos_intf_set_config(void *bus, dbus_config_t *config); +static bool dbus_usbos_intf_recv_needed(void *bus); +static void *dbus_usbos_intf_exec_rxlock(void *bus, exec_cb_t cb, struct exec_parms *args); +static void *dbus_usbos_intf_exec_txlock(void *bus, exec_cb_t cb, struct exec_parms *args); +#ifdef BCMUSBDEV_COMPOSITE +static int dbus_usbos_intf_wlan(struct usb_device *usb); +#endif /* BCMUSBDEV_COMPOSITE */ + +/** functions called by dbus_usb.c */ +static dbus_intf_t dbus_usbos_intf = { + .attach = dbus_usbos_intf_attach, + .detach = dbus_usbos_intf_detach, + .up = dbus_usbos_intf_up, + .down = dbus_usbos_intf_down, + .send_irb = dbus_usbos_intf_send_irb, + .recv_irb = dbus_usbos_intf_recv_irb, + .cancel_irb = dbus_usbos_intf_cancel_irb, + .send_ctl = dbus_usbos_intf_send_ctl, + .recv_ctl = dbus_usbos_intf_recv_ctl, + .get_stats = NULL, + .get_attrib = dbus_usbos_intf_get_attrib, + .remove = NULL, + .resume = NULL, + .suspend = NULL, + .stop = dbus_usbos_intf_stop, + .reset = NULL, + .pktget = NULL, + .pktfree = NULL, + .iovar_op = NULL, + .dump = NULL, + .set_config = dbus_usbos_intf_set_config, + .get_config = NULL, + .device_exists = NULL, + .dlneeded = NULL, + .dlstart = NULL, + .dlrun = NULL, + .recv_needed = dbus_usbos_intf_recv_needed, + .exec_rxlock = dbus_usbos_intf_exec_rxlock, + .exec_txlock = dbus_usbos_intf_exec_txlock, + + .tx_timer_init = NULL, + .tx_timer_start = NULL, + .tx_timer_stop = NULL, + + .sched_dpc = NULL, + .lock = NULL, + .unlock = NULL, + .sched_probe_cb = NULL, + + .shutdown = NULL, + + .recv_stop = NULL, + .recv_resume = NULL, + + .recv_irb_from_ep = dbus_usbos_intf_recv_irb_from_ep, + .readreg = dbus_usbos_readreg +}; + +static probe_info_t g_probe_info; +static probe_cb_t probe_cb = NULL; +static disconnect_cb_t disconnect_cb = NULL; +static void *probe_arg = NULL; +static void *disc_arg = NULL; + + + +static volatile int loopback_rx_cnt, loopback_tx_cnt; +int loopback_size; +bool is_loopback_pkt(void *buf); +int matches_loopback_pkt(void *buf); + +/** + * multiple code paths in this file dequeue a URB request, this function makes sure that it happens + * in a concurrency save manner. Don't call this from a sleepable process context. + */ +static urb_req_t * BCMFASTPATH +dbus_usbos_qdeq(struct list_head *urbreq_q, spinlock_t *lock) +{ + unsigned long flags; + urb_req_t *req; + + ASSERT(urbreq_q != NULL); + + spin_lock_irqsave(lock, flags); + + if (list_empty(urbreq_q)) { + req = NULL; + } else { + ASSERT(urbreq_q->next != NULL); + ASSERT(urbreq_q->next != urbreq_q); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif + req = list_entry(urbreq_q->next, urb_req_t, urb_list); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif + list_del_init(&req->urb_list); + } + + spin_unlock_irqrestore(lock, flags); + + return req; +} + +static void BCMFASTPATH +dbus_usbos_qenq(struct list_head *urbreq_q, urb_req_t *req, spinlock_t *lock) +{ + unsigned long flags; + + spin_lock_irqsave(lock, flags); + + list_add_tail(&req->urb_list, urbreq_q); + + spin_unlock_irqrestore(lock, flags); +} + +/** + * multiple code paths in this file remove a URB request from a list, this function makes sure that + * it happens in a concurrency save manner. Don't call this from a sleepable process context. + * Is quite similar to dbus_usbos_qdeq(), I wonder why this function is needed. + */ +static void +dbus_usbos_req_del(urb_req_t *req, spinlock_t *lock) +{ + unsigned long flags; + + spin_lock_irqsave(lock, flags); + + list_del_init(&req->urb_list); + + spin_unlock_irqrestore(lock, flags); +} + + +/** + * Driver requires a pool of URBs to operate. This function is called during + * initialization (attach phase), allocates a number of URBs, and puts them + * on the free (req_rxfreeq and req_txfreeq) queue + */ +static int +dbus_usbos_urbreqs_alloc(usbos_info_t *usbos_info, uint32 count, bool is_rx) +{ + int i; + int allocated = 0; + int err = DBUS_OK; + + for (i = 0; i < count; i++) { + urb_req_t *req; + + req = MALLOC(usbos_info->pub->osh, sizeof(urb_req_t)); + if (req == NULL) { + DBUSERR(("%s: MALLOC req failed\n", __FUNCTION__)); + err = DBUS_ERR_NOMEM; + goto fail; + } + bzero(req, sizeof(urb_req_t)); + + req->urb = USB_ALLOC_URB(); + if (req->urb == NULL) { + DBUSERR(("%s: USB_ALLOC_URB req->urb failed\n", __FUNCTION__)); + err = DBUS_ERR_NOMEM; + goto fail; + } + + INIT_LIST_HEAD(&req->urb_list); + + if (is_rx) { +#if defined(BCM_RPC_NOCOPY) || defined(BCM_RPC_RXNOCOPY) + /* don't allocate now. Do it on demand */ + req->pkt = NULL; +#else + /* pre-allocate buffers never to be released */ + req->pkt = MALLOC(usbos_info->pub->osh, usbos_info->rxbuf_len); + if (req->pkt == NULL) { + DBUSERR(("%s: MALLOC req->pkt failed\n", __FUNCTION__)); + err = DBUS_ERR_NOMEM; + goto fail; + } +#endif + req->buf_len = usbos_info->rxbuf_len; + dbus_usbos_qenq(&usbos_info->req_rxfreeq, req, &usbos_info->rxfree_lock); + } else { + req->buf_len = 0; + dbus_usbos_qenq(&usbos_info->req_txfreeq, req, &usbos_info->txfree_lock); + } + allocated++; + continue; + +fail: + if (req) { + if (is_rx && req->pkt) { +#if defined(BCM_RPC_NOCOPY) || defined(BCM_RPC_RXNOCOPY) + /* req->pkt is NULL in "NOCOPY" mode */ +#else + MFREE(usbos_info->pub->osh, req->pkt, req->buf_len); +#endif + } + if (req->urb) { + USB_FREE_URB(req->urb); + } + MFREE(usbos_info->pub->osh, req, sizeof(urb_req_t)); + } + break; + } + + atomic_add(allocated, is_rx ? &usbos_info->rxallocated : &usbos_info->txallocated); + + if (is_rx) { + DBUSTRACE(("%s: add %d (total %d) rx buf, each has %d bytes\n", __FUNCTION__, + allocated, atomic_read(&usbos_info->rxallocated), usbos_info->rxbuf_len)); + } else { + DBUSTRACE(("%s: add %d (total %d) tx req\n", __FUNCTION__, + allocated, atomic_read(&usbos_info->txallocated))); + } + + return err; +} /* dbus_usbos_urbreqs_alloc */ + +/** Typically called during detach or when attach failed. Don't call until all URBs unlinked */ +static int +dbus_usbos_urbreqs_free(usbos_info_t *usbos_info, bool is_rx) +{ + int rtn = 0; + urb_req_t *req; + struct list_head *req_q; + spinlock_t *lock; + + if (is_rx) { + req_q = &usbos_info->req_rxfreeq; + lock = &usbos_info->rxfree_lock; + } else { + req_q = &usbos_info->req_txfreeq; + lock = &usbos_info->txfree_lock; + } + while ((req = dbus_usbos_qdeq(req_q, lock)) != NULL) { + + if (is_rx) { + if (req->pkt) { + /* We do MFREE instead of PKTFREE because the pkt has been + * converted to native already + */ + MFREE(usbos_info->pub->osh, req->pkt, req->buf_len); + req->pkt = NULL; + req->buf_len = 0; + } + } else { + /* sending req should not be assigned pkt buffer */ + ASSERT(req->pkt == NULL); + } + + if (req->urb) { + USB_FREE_URB(req->urb); + req->urb = NULL; + } + MFREE(usbos_info->pub->osh, req, sizeof(urb_req_t)); + + rtn++; + } + return rtn; +} /* dbus_usbos_urbreqs_free */ + +/** + * called by Linux kernel on URB completion. Upper DBUS layer (dbus_usb.c) has to be notified of + * send completion. + */ +void +dbus_usbos_send_complete(CALLBACK_ARGS) +{ + urb_req_t *req = urb->context; + dbus_irb_tx_t *txirb = req->arg; + usbos_info_t *usbos_info = req->usbinfo; + unsigned long flags; + int status = DBUS_OK; + int txposted; + + USB_AUTOPM_PUT_INTERFACE_ASYNC(g_probe_info.intf); + + spin_lock_irqsave(&usbos_info->txlock, flags); + + dbus_usbos_req_del(req, &usbos_info->txposted_lock); + txposted = atomic_dec_return(&usbos_info->txposted); + if (unlikely (txposted < 0)) { + DBUSERR(("%s ERROR: txposted is negative (%d)!!\n", __FUNCTION__, txposted)); + } + spin_unlock_irqrestore(&usbos_info->txlock, flags); + + if (unlikely (urb->status)) { + status = DBUS_ERR_TXFAIL; + DBUSTRACE(("txfail status %d\n", urb->status)); + } + +#if defined(BCM_RPC_NOCOPY) || defined(BCM_RPC_RXNOCOPY) + /* sending req should not be assigned pkt buffer */ + ASSERT(req->pkt == NULL); +#endif + /* txirb should always be set, except for ZLP. ZLP is reusing this callback function. */ + if (txirb != NULL) { + if (txirb->send_buf != NULL) { + MFREE(usbos_info->pub->osh, txirb->send_buf, req->buf_len); + txirb->send_buf = NULL; + req->buf_len = 0; + } + if (likely (usbos_info->cbarg && usbos_info->cbs)) { + if (likely (usbos_info->cbs->send_irb_complete != NULL)) + usbos_info->cbs->send_irb_complete(usbos_info->cbarg, txirb, status); + } + } + + dbus_usbos_qenq(&usbos_info->req_txfreeq, req, &usbos_info->txfree_lock); +} /* dbus_usbos_send_complete */ + +/** + * In order to receive USB traffic from the dongle, we need to supply the Linux kernel with a free + * URB that is going to contain received data. + */ +static int BCMFASTPATH +dbus_usbos_recv_urb_submit(usbos_info_t *usbos_info, dbus_irb_rx_t *rxirb, uint32 ep_idx) +{ + urb_req_t *req; + int ret = DBUS_OK; + unsigned long flags; + void *p; + uint rx_pipe; + int rxposted; + + BCM_REFERENCE(rxposted); + + if (!(req = dbus_usbos_qdeq(&usbos_info->req_rxfreeq, &usbos_info->rxfree_lock))) { + DBUSTRACE(("%s No free URB!\n", __FUNCTION__)); + return DBUS_ERR_RXDROP; + } + + spin_lock_irqsave(&usbos_info->rxlock, flags); + +#if defined(BCM_RPC_NOCOPY) || defined(BCM_RPC_RXNOCOPY) + req->pkt = rxirb->pkt = PKTGET(usbos_info->pub->osh, req->buf_len, FALSE); + if (!rxirb->pkt) { + DBUSERR(("%s: PKTGET failed\n", __FUNCTION__)); + dbus_usbos_qenq(&usbos_info->req_rxfreeq, req, &usbos_info->rxfree_lock); + ret = DBUS_ERR_RXDROP; + goto fail; + } + /* consider the packet "native" so we don't count it as MALLOCED in the osl */ + PKTTONATIVE(usbos_info->pub->osh, req->pkt); + rxirb->buf = NULL; + p = PKTDATA(usbos_info->pub->osh, req->pkt); +#else + if (req->buf_len != usbos_info->rxbuf_len) { + ASSERT(req->pkt); + MFREE(usbos_info->pub->osh, req->pkt, req->buf_len); + DBUSTRACE(("%s: replace rx buff: old len %d, new len %d\n", __FUNCTION__, + req->buf_len, usbos_info->rxbuf_len)); + req->buf_len = 0; + req->pkt = MALLOC(usbos_info->pub->osh, usbos_info->rxbuf_len); + if (req->pkt == NULL) { + DBUSERR(("%s: MALLOC req->pkt failed\n", __FUNCTION__)); + ret = DBUS_ERR_NOMEM; + goto fail; + } + req->buf_len = usbos_info->rxbuf_len; + } + rxirb->buf = req->pkt; + p = rxirb->buf; +#endif /* defined(BCM_RPC_NOCOPY) */ + rxirb->buf_len = req->buf_len; + req->usbinfo = usbos_info; + req->arg = rxirb; + if (ep_idx == 0) { + rx_pipe = usbos_info->rx_pipe; + } else { + rx_pipe = usbos_info->rx_pipe2; + ASSERT(usbos_info->rx_pipe2); + } + /* Prepare the URB */ + usb_fill_bulk_urb(req->urb, usbos_info->usb, rx_pipe, + p, + rxirb->buf_len, + (usb_complete_t)dbus_usbos_recv_complete, req); + req->urb->transfer_flags |= URB_QUEUE_BULK; + + if ((ret = USB_SUBMIT_URB(req->urb))) { + DBUSERR(("%s USB_SUBMIT_URB failed. status %d\n", __FUNCTION__, ret)); + dbus_usbos_qenq(&usbos_info->req_rxfreeq, req, &usbos_info->rxfree_lock); + ret = DBUS_ERR_RXFAIL; + goto fail; + } + rxposted = atomic_inc_return(&usbos_info->rxposted); + + dbus_usbos_qenq(&usbos_info->req_rxpostedq, req, &usbos_info->rxposted_lock); +fail: + spin_unlock_irqrestore(&usbos_info->rxlock, flags); + return ret; +} /* dbus_usbos_recv_urb_submit */ + + +/** + * Called by worked thread when a 'receive URB' completed or Linux kernel when it returns a URB to + * this driver. + */ +static void BCMFASTPATH +dbus_usbos_recv_complete_handle(urb_req_t *req, int len, int status) +{ + dbus_irb_rx_t *rxirb = req->arg; + usbos_info_t *usbos_info = req->usbinfo; + unsigned long flags; + int rxallocated, rxposted; + int dbus_status = DBUS_OK; + bool killed = (g_probe_info.suspend_state == USBOS_SUSPEND_STATE_SUSPEND_PENDING) ? 1 : 0; + + spin_lock_irqsave(&usbos_info->rxlock, flags); + dbus_usbos_req_del(req, &usbos_info->rxposted_lock); + rxposted = atomic_dec_return(&usbos_info->rxposted); + rxallocated = atomic_read(&usbos_info->rxallocated); + spin_unlock_irqrestore(&usbos_info->rxlock, flags); + + if ((rxallocated < usbos_info->pub->nrxq) && (!status) && + (rxposted == DBUS_USB_RXQUEUE_LOWER_WATERMARK)) { + DBUSTRACE(("%s: need more rx buf: rxallocated %d rxposted %d!\n", + __FUNCTION__, rxallocated, rxposted)); + dbus_usbos_urbreqs_alloc(usbos_info, + MIN(DBUS_USB_RXQUEUE_BATCH_ADD, + usbos_info->pub->nrxq - rxallocated), TRUE); + } + + /* Handle errors */ + if (status) { + /* + * Linux 2.4 disconnect: -ENOENT or -EILSEQ for CRC error; rmmod: -ENOENT + * Linux 2.6 disconnect: -EPROTO, rmmod: -ESHUTDOWN + */ + if ((status == -ENOENT && (!killed))|| status == -ESHUTDOWN) { + /* NOTE: unlink() can not be called from URB callback(). + * Do not call dbusos_stop() here. + */ + DBUSTRACE(("%s rx error %d\n", __FUNCTION__, status)); + dbus_usbos_state_change(usbos_info, DBUS_STATE_DOWN); + } else if (status == -EPROTO) { + DBUSTRACE(("%s rx error %d\n", __FUNCTION__, status)); + } else if (killed && (status == -EHOSTUNREACH || status == -ENOENT)) { + /* Device is suspended */ + } else { + DBUSTRACE(("%s rx error %d\n", __FUNCTION__, status)); + dbus_usbos_errhandler(usbos_info, DBUS_ERR_RXFAIL); + } + + /* On error, don't submit more URBs yet */ + rxirb->buf = NULL; + rxirb->actual_len = 0; + dbus_status = DBUS_ERR_RXFAIL; + goto fail; + } + + /* Make the skb represent the received urb */ + rxirb->actual_len = len; + + if (rxirb->actual_len < sizeof(uint32)) { + DBUSTRACE(("small pkt len %d, process as ZLP\n", rxirb->actual_len)); + dbus_status = DBUS_ERR_RXZLP; + } + +fail: +#if defined(BCM_RPC_NOCOPY) || defined(BCM_RPC_RXNOCOPY) + /* detach the packet from the queue */ + req->pkt = NULL; +#endif /* BCM_RPC_NOCOPY || BCM_RPC_RXNOCOPY */ + + if (usbos_info->cbarg && usbos_info->cbs) { + if (usbos_info->cbs->recv_irb_complete) { + usbos_info->cbs->recv_irb_complete(usbos_info->cbarg, rxirb, dbus_status); + } + } + + dbus_usbos_qenq(&usbos_info->req_rxfreeq, req, &usbos_info->rxfree_lock); + + /* Mark the interface as busy to reset USB autosuspend timer */ + USB_MARK_LAST_BUSY(usbos_info->usb); +} /* dbus_usbos_recv_complete_handle */ + +/** called by Linux kernel when it returns a URB to this driver */ +static void +dbus_usbos_recv_complete(CALLBACK_ARGS) +{ +#ifdef USBOS_THREAD + dbus_usbos_dispatch_schedule(CALLBACK_ARGS_DATA); +#else /* !USBOS_THREAD */ + dbus_usbos_recv_complete_handle(urb->context, urb->actual_length, urb->status); +#endif /* USBOS_THREAD */ +} + + +/** + * If Linux notifies our driver that a control read or write URB has completed, we should notify + * the DBUS layer above us (dbus_usb.c in this case). + */ +static void +dbus_usbos_ctl_complete(usbos_info_t *usbos_info, int type, int urbstatus) +{ + int status = DBUS_ERR; + + if (usbos_info == NULL) + return; + + switch (urbstatus) { + case 0: + status = DBUS_OK; + break; + case -EINPROGRESS: + case -ENOENT: + default: +#ifdef INTR_EP_ENABLE + DBUSERR(("%s:%d fail status %d bus:%d susp:%d intr:%d ctli:%d ctlo:%d\n", + __FUNCTION__, type, urbstatus, + usbos_info->pub->busstate, g_probe_info.suspend_state, + usbos_info->intr_urb_submitted, usbos_info->ctlin_urb_submitted, + usbos_info->ctlout_urb_submitted)); +#else + DBUSERR(("%s: failed with status %d\n", __FUNCTION__, urbstatus)); + status = DBUS_ERR; + break; +#endif /* INTR_EP_ENABLE */ + } + + if (usbos_info->cbarg && usbos_info->cbs) { + if (usbos_info->cbs->ctl_complete) + usbos_info->cbs->ctl_complete(usbos_info->cbarg, type, status); + } +} + +/** called by Linux */ +static void +dbus_usbos_ctlread_complete(CALLBACK_ARGS) +{ + usbos_info_t *usbos_info = (usbos_info_t *)urb->context; + + ASSERT(urb); + usbos_info = (usbos_info_t *)urb->context; + + dbus_usbos_ctl_complete(usbos_info, DBUS_CBCTL_READ, urb->status); + +#ifdef USBOS_THREAD + if (usbos_info->rxctl_deferrespok) { + usbos_info->ctl_read.bRequestType = USB_DIR_IN | USB_TYPE_CLASS | + USB_RECIP_INTERFACE; + usbos_info->ctl_read.bRequest = 1; + } +#endif + + up(&usbos_info->ctl_lock); + + USB_AUTOPM_PUT_INTERFACE_ASYNC(g_probe_info.intf); +} + +/** called by Linux */ +static void +dbus_usbos_ctlwrite_complete(CALLBACK_ARGS) +{ + usbos_info_t *usbos_info = (usbos_info_t *)urb->context; + + ASSERT(urb); + usbos_info = (usbos_info_t *)urb->context; + + dbus_usbos_ctl_complete(usbos_info, DBUS_CBCTL_WRITE, urb->status); + +#ifdef USBOS_TX_THREAD + usbos_info->ctl_state = USBOS_REQUEST_STATE_UNSCHEDULED; +#endif /* USBOS_TX_THREAD */ + + up(&usbos_info->ctl_lock); + + USB_AUTOPM_PUT_INTERFACE_ASYNC(g_probe_info.intf); +} + +#ifdef INTR_EP_ENABLE +/** called by Linux */ +static void +dbus_usbos_intr_complete(CALLBACK_ARGS) +{ + usbos_info_t *usbos_info = (usbos_info_t *)urb->context; + bool killed = (g_probe_info.suspend_state == USBOS_SUSPEND_STATE_SUSPEND_PENDING) ? 1 : 0; + + if (usbos_info == NULL || usbos_info->pub == NULL) + return; + if ((urb->status == -ENOENT && (!killed)) || urb->status == -ESHUTDOWN || + urb->status == -ENODEV) { + dbus_usbos_state_change(usbos_info, DBUS_STATE_DOWN); + } + + if (usbos_info->pub->busstate == DBUS_STATE_DOWN) { + DBUSERR(("%s: intr cb when DBUS down, ignoring\n", __FUNCTION__)); + return; + } + dbus_usbos_ctl_complete(usbos_info, DBUS_CBINTR_POLL, urb->status); +} +#endif /* INTR_EP_ENABLE */ + +/** + * when the bus is going to sleep or halt, the Linux kernel requires us to take ownership of our + * URBs again. Multiple code paths in this file require a list of URBs to be cancelled in a + * concurrency save manner. + */ +static void +dbus_usbos_unlink(struct list_head *urbreq_q, spinlock_t *lock) +{ + urb_req_t *req; + + /* dbus_usbos_recv_complete() adds req back to req_freeq */ + while ((req = dbus_usbos_qdeq(urbreq_q, lock)) != NULL) { + ASSERT(req->urb != NULL); + USB_UNLINK_URB(req->urb); + } +} + +/** multiple code paths in this file require the bus to stop */ +static void +dbus_usbos_cancel_all_urbs(usbos_info_t *usbos_info) +{ + int rxposted, txposted; + + DBUSTRACE(("%s: unlink all URBs\n", __FUNCTION__)); + +#ifdef USBOS_TX_THREAD + usbos_info->ctl_state = USBOS_REQUEST_STATE_UNSCHEDULED; + + /* Yield the CPU to TX thread so all pending requests are submitted */ + while (!list_empty(&usbos_info->usbos_tx_list)) { + wake_up_interruptible(&usbos_info->usbos_tx_queue_head); + OSL_SLEEP(10); + } +#endif /* USBOS_TX_THREAD */ + + /* tell Linux kernel to cancel a single intr, ctl and blk URB */ + if (usbos_info->intr_urb) + USB_UNLINK_URB(usbos_info->intr_urb); + if (usbos_info->ctl_urb) + USB_UNLINK_URB(usbos_info->ctl_urb); + if (usbos_info->blk_urb) + USB_UNLINK_URB(usbos_info->blk_urb); + + dbus_usbos_unlink(&usbos_info->req_txpostedq, &usbos_info->txposted_lock); + dbus_usbos_unlink(&usbos_info->req_rxpostedq, &usbos_info->rxposted_lock); + + /* Wait until the callbacks for all submitted URBs have been called, because the + * handler needs to know is an USB suspend is in progress. + */ + SPINWAIT((atomic_read(&usbos_info->txposted) != 0 || + atomic_read(&usbos_info->rxposted) != 0), 10000); + + txposted = atomic_read(&usbos_info->txposted); + rxposted = atomic_read(&usbos_info->rxposted); + if (txposted != 0 || rxposted != 0) { + DBUSERR(("%s ERROR: REQs posted, rx=%d tx=%d!\n", + __FUNCTION__, rxposted, txposted)); + } +} /* dbus_usbos_cancel_all_urbs */ + +/** multiple code paths require the bus to stop */ +static void +dbusos_stop(usbos_info_t *usbos_info) +{ + urb_req_t *req; + int rxposted; + req = NULL; + BCM_REFERENCE(req); + + ASSERT(usbos_info); + + dbus_usbos_state_change(usbos_info, DBUS_STATE_DOWN); + + dbus_usbos_cancel_all_urbs(usbos_info); + +#ifdef USBOS_THREAD + /* yield the CPU to rx packet thread */ + while (1) { + if (atomic_read(&usbos_info->usbos_list_cnt) <= 0) break; + wake_up_interruptible(&usbos_info->usbos_queue_head); + OSL_SLEEP(3); + } +#endif /* USBOS_THREAD */ + + rxposted = atomic_read(&usbos_info->rxposted); + if (rxposted > 0) { + DBUSERR(("%s ERROR: rx REQs posted=%d in stop!\n", __FUNCTION__, + rxposted)); + } + + ASSERT(atomic_read(&usbos_info->txposted) == 0 && rxposted == 0); + +} /* dbusos_stop */ + +#if defined(USB_SUSPEND_AVAILABLE) + +/** + * Linux kernel sports a 'USB auto suspend' feature. See: http://lwn.net/Articles/373550/ + * The suspend method is called by the Linux kernel to warn the driver that the device is going to + * be suspended. If the driver returns a negative error code, the suspend will be aborted. If the + * driver returns 0, it must cancel all outstanding URBs (usb_kill_urb()) and not submit any more. + */ +static int +dbus_usbos_suspend(struct usb_interface *intf, + pm_message_t message) +{ + DBUSERR(("%s suspend state: %d\n", __FUNCTION__, g_probe_info.suspend_state)); + /* DHD for full dongle model */ + g_probe_info.suspend_state = USBOS_SUSPEND_STATE_SUSPEND_PENDING; + dbus_usbos_state_change((usbos_info_t*)g_probe_info.usbos_info, DBUS_STATE_SLEEP); + dbus_usbos_cancel_all_urbs((usbos_info_t*)g_probe_info.usbos_info); + g_probe_info.suspend_state = USBOS_SUSPEND_STATE_SUSPENDED; + + return 0; +} + +/** + * The resume method is called to tell the driver that the device has been resumed and the driver + * can return to normal operation. URBs may once more be submitted. + */ +static int dbus_usbos_resume(struct usb_interface *intf) +{ + DBUSERR(("%s Device resumed\n", __FUNCTION__)); + + dbus_usbos_state_change((usbos_info_t*)g_probe_info.usbos_info, DBUS_STATE_UP); + g_probe_info.suspend_state = USBOS_SUSPEND_STATE_DEVICE_ACTIVE; + return 0; +} + +/** +* This function is directly called by the Linux kernel, when the suspended device has been reset +* instead of being resumed +*/ +static int dbus_usbos_reset_resume(struct usb_interface *intf) +{ + DBUSERR(("%s Device reset resumed\n", __FUNCTION__)); + + /* The device may have lost power, so a firmware download may be required */ + dbus_usbos_state_change((usbos_info_t*)g_probe_info.usbos_info, DBUS_STATE_DL_NEEDED); + g_probe_info.suspend_state = USBOS_SUSPEND_STATE_DEVICE_ACTIVE; + return 0; +} + +#endif /* USB_SUSPEND_AVAILABLE */ + +/** + * Called by Linux kernel at initialization time, kernel wants to know if our driver will accept the + * caller supplied USB interface. Note that USB drivers are bound to interfaces, and not to USB + * devices. + */ +#ifdef KERNEL26 +#define DBUS_USBOS_PROBE() static int dbus_usbos_probe(struct usb_interface *intf, const struct usb_device_id *id) +#define DBUS_USBOS_DISCONNECT() static void dbus_usbos_disconnect(struct usb_interface *intf) +#else +#define DBUS_USBOS_PROBE() static void * dbus_usbos_probe(struct usb_device *usb, unsigned int ifnum, const struct usb_device_id *id) +#define DBUS_USBOS_DISCONNECT() static void dbus_usbos_disconnect(struct usb_device *usb, void *ptr) +#endif /* KERNEL26 */ + +DBUS_USBOS_PROBE() +{ + int ep; + struct usb_endpoint_descriptor *endpoint; + int ret = 0; +#ifdef KERNEL26 + struct usb_device *usb = interface_to_usbdev(intf); +#else + int claimed = 0; +#endif + int num_of_eps; +#ifdef BCMUSBDEV_COMPOSITE + int wlan_if = -1; + bool intr_ep = FALSE; +#endif /* BCMUSBDEV_COMPOSITE */ + wifi_adapter_info_t *adapter; + + DHD_MUTEX_LOCK(); + + DBUSERR(("%s: bus num(busnum)=%d, slot num (portnum)=%d\n", __FUNCTION__, + usb->bus->busnum, usb->portnum)); + adapter = dhd_wifi_platform_attach_adapter(USB_BUS, usb->bus->busnum, + usb->portnum, WIFI_STATUS_POWER_ON); + if (adapter == NULL) { + DBUSERR(("%s: can't find adapter info for this chip\n", __FUNCTION__)); + goto fail; + } + +#ifdef BCMUSBDEV_COMPOSITE + wlan_if = dbus_usbos_intf_wlan(usb); +#ifdef KERNEL26 + if ((wlan_if >= 0) && (IFPTR(usb, wlan_if) == intf)) +#else + if (wlan_if == ifnum) +#endif /* KERNEL26 */ + { +#endif /* BCMUSBDEV_COMPOSITE */ + g_probe_info.usb = usb; + g_probe_info.dldone = TRUE; +#ifdef BCMUSBDEV_COMPOSITE + } else { + DBUSTRACE(("dbus_usbos_probe: skip probe for non WLAN interface\n")); + ret = BCME_UNSUPPORTED; + goto fail; + } +#endif /* BCMUSBDEV_COMPOSITE */ + +#ifdef KERNEL26 + g_probe_info.intf = intf; +#endif /* KERNEL26 */ + +#ifdef BCMUSBDEV_COMPOSITE + if (IFDESC(usb, wlan_if).bInterfaceNumber > USB_COMPIF_MAX) +#else + if (IFDESC(usb, CONTROL_IF).bInterfaceNumber) +#endif /* BCMUSBDEV_COMPOSITE */ + { + ret = -1; + goto fail; + } + if (id != NULL) { + g_probe_info.vid = id->idVendor; + g_probe_info.pid = id->idProduct; + } + +#ifdef KERNEL26 + usb_set_intfdata(intf, &g_probe_info); +#endif + + /* Check that the device supports only one configuration */ + if (usb->descriptor.bNumConfigurations != 1) { + ret = -1; + goto fail; + } + + if (usb->descriptor.bDeviceClass != USB_CLASS_VENDOR_SPEC) { +#ifdef BCMUSBDEV_COMPOSITE + if ((usb->descriptor.bDeviceClass != USB_CLASS_MISC) && + (usb->descriptor.bDeviceClass != USB_CLASS_WIRELESS)) { +#endif /* BCMUSBDEV_COMPOSITE */ + ret = -1; + goto fail; +#ifdef BCMUSBDEV_COMPOSITE + } +#endif /* BCMUSBDEV_COMPOSITE */ + } + + /* + * Only the BDC interface configuration is supported: + * Device class: USB_CLASS_VENDOR_SPEC + * if0 class: USB_CLASS_VENDOR_SPEC + * if0/ep0: control + * if0/ep1: bulk in + * if0/ep2: bulk out (ok if swapped with bulk in) + */ + if (CONFIGDESC(usb)->bNumInterfaces != 1) { +#ifdef BCMUSBDEV_COMPOSITE + if (CONFIGDESC(usb)->bNumInterfaces > USB_COMPIF_MAX) { +#endif /* BCMUSBDEV_COMPOSITE */ + ret = -1; + goto fail; +#ifdef BCMUSBDEV_COMPOSITE + } +#endif /* BCMUSBDEV_COMPOSITE */ + } + + /* Check interface */ +#ifndef KERNEL26 +#ifdef BCMUSBDEV_COMPOSITE + if (usb_interface_claimed(IFPTR(usb, wlan_if))) +#else + if (usb_interface_claimed(IFPTR(usb, CONTROL_IF))) +#endif /* BCMUSBDEV_COMPOSITE */ + { + ret = -1; + goto fail; + } +#endif /* !KERNEL26 */ + +#ifdef BCMUSBDEV_COMPOSITE + if ((IFDESC(usb, wlan_if).bInterfaceClass != USB_CLASS_VENDOR_SPEC || + IFDESC(usb, wlan_if).bInterfaceSubClass != 2 || + IFDESC(usb, wlan_if).bInterfaceProtocol != 0xff) && + (IFDESC(usb, wlan_if).bInterfaceClass != USB_CLASS_MISC || + IFDESC(usb, wlan_if).bInterfaceSubClass != USB_SUBCLASS_COMMON || + IFDESC(usb, wlan_if).bInterfaceProtocol != USB_PROTO_IAD)) +#else + if (IFDESC(usb, CONTROL_IF).bInterfaceClass != USB_CLASS_VENDOR_SPEC || + IFDESC(usb, CONTROL_IF).bInterfaceSubClass != 2 || + IFDESC(usb, CONTROL_IF).bInterfaceProtocol != 0xff) +#endif /* BCMUSBDEV_COMPOSITE */ + { +#ifdef BCMUSBDEV_COMPOSITE + DBUSERR(("%s: invalid control interface: class %d, subclass %d, proto %d\n", + __FUNCTION__, + IFDESC(usb, wlan_if).bInterfaceClass, + IFDESC(usb, wlan_if).bInterfaceSubClass, + IFDESC(usb, wlan_if).bInterfaceProtocol)); +#else + DBUSERR(("%s: invalid control interface: class %d, subclass %d, proto %d\n", + __FUNCTION__, + IFDESC(usb, CONTROL_IF).bInterfaceClass, + IFDESC(usb, CONTROL_IF).bInterfaceSubClass, + IFDESC(usb, CONTROL_IF).bInterfaceProtocol)); +#endif /* BCMUSBDEV_COMPOSITE */ + ret = -1; + goto fail; + } + + /* Check control endpoint */ +#ifdef BCMUSBDEV_COMPOSITE + endpoint = &IFEPDESC(usb, wlan_if, 0); +#else + endpoint = &IFEPDESC(usb, CONTROL_IF, 0); +#endif /* BCMUSBDEV_COMPOSITE */ + if ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) != USB_ENDPOINT_XFER_INT) { +#ifdef BCMUSBDEV_COMPOSITE + if ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) != + USB_ENDPOINT_XFER_BULK) { +#endif /* BCMUSBDEV_COMPOSITE */ + DBUSERR(("%s: invalid control endpoint %d\n", + __FUNCTION__, endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)); + ret = -1; + goto fail; +#ifdef BCMUSBDEV_COMPOSITE + } +#endif /* BCMUSBDEV_COMPOSITE */ + } + +#ifdef BCMUSBDEV_COMPOSITE + if ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == USB_ENDPOINT_XFER_INT) { +#endif /* BCMUSBDEV_COMPOSITE */ + g_probe_info.intr_pipe = + usb_rcvintpipe(usb, endpoint->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); +#ifdef BCMUSBDEV_COMPOSITE + intr_ep = TRUE; + } +#endif /* BCMUSBDEV_COMPOSITE */ + +#ifndef KERNEL26 + /* Claim interface */ +#ifdef BCMUSBDEV_COMPOSITE + usb_driver_claim_interface(&dbus_usbdev, IFPTR(usb, wlan_if), &g_probe_info); +#else + usb_driver_claim_interface(&dbus_usbdev, IFPTR(usb, CONTROL_IF), &g_probe_info); +#endif /* BCMUSBDEV_COMPOSITE */ + claimed = 1; +#endif /* !KERNEL26 */ + g_probe_info.rx_pipe = 0; + g_probe_info.rx_pipe2 = 0; + g_probe_info.tx_pipe = 0; +#ifdef BCMUSBDEV_COMPOSITE + if (intr_ep) + ep = 1; + else + ep = 0; + num_of_eps = IFDESC(usb, wlan_if).bNumEndpoints - 1; +#else + num_of_eps = IFDESC(usb, BULK_IF).bNumEndpoints - 1; +#endif /* BCMUSBDEV_COMPOSITE */ + + if ((num_of_eps != 2) && (num_of_eps != 3)) { +#ifdef BCMUSBDEV_COMPOSITE + if (num_of_eps > 7) +#endif /* BCMUSBDEV_COMPOSITE */ + ASSERT(0); + } + /* Check data endpoints and get pipes */ +#ifdef BCMUSBDEV_COMPOSITE + for (; ep <= num_of_eps; ep++) +#else + for (ep = 1; ep <= num_of_eps; ep++) +#endif /* BCMUSBDEV_COMPOSITE */ + { +#ifdef BCMUSBDEV_COMPOSITE + endpoint = &IFEPDESC(usb, wlan_if, ep); +#else + endpoint = &IFEPDESC(usb, BULK_IF, ep); +#endif /* BCMUSBDEV_COMPOSITE */ + if ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) != + USB_ENDPOINT_XFER_BULK) { + DBUSERR(("%s: invalid data endpoint %d\n", + __FUNCTION__, ep)); + ret = -1; + goto fail; + } + + if ((endpoint->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN) { + /* direction: dongle->host */ + if (!g_probe_info.rx_pipe) { + g_probe_info.rx_pipe = usb_rcvbulkpipe(usb, + (endpoint->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK)); + } else { + g_probe_info.rx_pipe2 = usb_rcvbulkpipe(usb, + (endpoint->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK)); + } + + } else + g_probe_info.tx_pipe = usb_sndbulkpipe(usb, (endpoint->bEndpointAddress & + USB_ENDPOINT_NUMBER_MASK)); + } + + /* Allocate interrupt URB and data buffer */ + /* RNDIS says 8-byte intr, our old drivers used 4-byte */ +#ifdef BCMUSBDEV_COMPOSITE + g_probe_info.intr_size = (IFEPDESC(usb, wlan_if, 0).wMaxPacketSize == 16) ? 8 : 4; + g_probe_info.interval = IFEPDESC(usb, wlan_if, 0).bInterval; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 21)) + usb->quirks |= USB_QUIRK_NO_SET_INTF; +#endif +#else + g_probe_info.intr_size = (IFEPDESC(usb, CONTROL_IF, 0).wMaxPacketSize == 16) ? 8 : 4; + g_probe_info.interval = IFEPDESC(usb, CONTROL_IF, 0).bInterval; +#endif /* BCMUSBDEV_COMPOSITE */ + +#ifndef KERNEL26 + /* usb_fill_int_urb does the interval decoding in 2.6 */ + if (usb->speed == USB_SPEED_HIGH) + g_probe_info.interval = 1 << (g_probe_info.interval - 1); +#endif + if (usb->speed == USB_SPEED_SUPER) { + g_probe_info.device_speed = SUPER_SPEED; + DBUSERR(("super speed device detected\n")); + } else if (usb->speed == USB_SPEED_HIGH) { + g_probe_info.device_speed = HIGH_SPEED; + DBUSERR(("high speed device detected\n")); + } else { + g_probe_info.device_speed = FULL_SPEED; + DBUSERR(("full speed device detected\n")); + } + if (g_probe_info.dereged == FALSE && probe_cb) { + disc_arg = probe_cb(probe_arg, "", USB_BUS, usb->bus->busnum, usb->portnum, 0); + } + + g_probe_info.disc_cb_done = FALSE; + +#ifdef KERNEL26 + intf->needs_remote_wakeup = 1; +#endif /* KERNEL26 */ + DHD_MUTEX_UNLOCK(); + + /* Success */ +#ifdef KERNEL26 + return DBUS_OK; +#else + usb_inc_dev_use(usb); + return &g_probe_info; +#endif + +fail: + printf("%s: Exit ret=%d\n", __FUNCTION__, ret); +#ifdef BCMUSBDEV_COMPOSITE + if (ret != BCME_UNSUPPORTED) +#endif /* BCMUSBDEV_COMPOSITE */ + DBUSERR(("%s: failed with errno %d\n", __FUNCTION__, ret)); +#ifndef KERNEL26 + if (claimed) +#ifdef BCMUSBDEV_COMPOSITE + usb_driver_release_interface(&dbus_usbdev, IFPTR(usb, wlan_if)); +#else + usb_driver_release_interface(&dbus_usbdev, IFPTR(usb, CONTROL_IF)); +#endif /* BCMUSBDEV_COMPOSITE */ +#endif /* !KERNEL26 */ + + DHD_MUTEX_UNLOCK(); +#ifdef KERNEL26 + usb_set_intfdata(intf, NULL); + return ret; +#else + return NULL; +#endif +} /* dbus_usbos_probe */ + +/** Called by Linux kernel, is the counter part of dbus_usbos_probe() */ +DBUS_USBOS_DISCONNECT() +{ +#ifdef KERNEL26 + struct usb_device *usb = interface_to_usbdev(intf); + probe_info_t *probe_usb_init_data = usb_get_intfdata(intf); +#else + probe_info_t *probe_usb_init_data = (probe_info_t *) ptr; +#endif + usbos_info_t *usbos_info; + + DHD_MUTEX_LOCK(); + + DBUSERR(("%s: bus num(busnum)=%d, slot num (portnum)=%d\n", __FUNCTION__, + usb->bus->busnum, usb->portnum)); + + if (probe_usb_init_data) { + usbos_info = (usbos_info_t *) probe_usb_init_data->usbos_info; + if (usbos_info) { + if ((probe_usb_init_data->dereged == FALSE) && disconnect_cb && disc_arg) { + disconnect_cb(disc_arg); + disc_arg = NULL; + probe_usb_init_data->disc_cb_done = TRUE; + } + } + } + + if (usb) { +#ifndef KERNEL26 +#ifdef BCMUSBDEV_COMPOSITE + usb_driver_release_interface(&dbus_usbdev, IFPTR(usb, wlan_if)); +#else + usb_driver_release_interface(&dbus_usbdev, IFPTR(usb, CONTROL_IF)); +#endif /* BCMUSBDEV_COMPOSITE */ + usb_dec_dev_use(usb); +#endif /* !KERNEL26 */ + } + DHD_MUTEX_UNLOCK(); +} /* dbus_usbos_disconnect */ + +#define LOOPBACK_PKT_START 0xBABE1234 + +bool is_loopback_pkt(void *buf) +{ + + uint32 *buf_ptr = (uint32 *) buf; + + if (*buf_ptr == LOOPBACK_PKT_START) + return TRUE; + return FALSE; + +} + +int matches_loopback_pkt(void *buf) +{ + int i, j; + unsigned char *cbuf = (unsigned char *) buf; + + for (i = 4; i < loopback_size; i++) { + if (cbuf[i] != (i % 256)) { + printf("%s: mismatch at i=%d %d : ", __FUNCTION__, i, cbuf[i]); + for (j = i; ((j < i+ 16) && (j < loopback_size)); j++) { + printf("%d ", cbuf[j]); + } + printf("\n"); + return 0; + } + } + loopback_rx_cnt++; + return 1; +} + +int dbus_usbos_loopback_tx(void *usbos_info_ptr, int cnt, int size) +{ + usbos_info_t *usbos_info = (usbos_info_t *) usbos_info_ptr; + unsigned char *buf; + int j; + void* p = NULL; + int rc, last_rx_cnt; + int tx_failed_cnt; + int max_size = 1650; + int usb_packet_size = 512; + int min_packet_size = 10; + + if (size % usb_packet_size == 0) { + size = size - 1; + DBUSERR(("%s: overriding size=%d \n", __FUNCTION__, size)); + } + + if (size < min_packet_size) { + size = min_packet_size; + DBUSERR(("%s: overriding size=%d\n", __FUNCTION__, min_packet_size)); + } + if (size > max_size) { + size = max_size; + DBUSERR(("%s: overriding size=%d\n", __FUNCTION__, max_size)); + } + + loopback_tx_cnt = 0; + loopback_rx_cnt = 0; + tx_failed_cnt = 0; + loopback_size = size; + + while (loopback_tx_cnt < cnt) { + uint32 *x; + int pkt_size = loopback_size; + + p = PKTGET(usbos_info->pub->osh, pkt_size, TRUE); + if (p == NULL) { + DBUSERR(("%s:%d Failed to allocate packet sz=%d\n", + __FUNCTION__, __LINE__, pkt_size)); + return BCME_ERROR; + } + x = (uint32*) PKTDATA(usbos_info->pub->osh, p); + *x = LOOPBACK_PKT_START; + buf = (unsigned char*) x; + for (j = 4; j < pkt_size; j++) { + buf[j] = j % 256; + } + rc = dbus_send_buf(usbos_info->pub, buf, pkt_size, p); + if (rc != BCME_OK) { + DBUSERR(("%s:%d Freeing packet \n", __FUNCTION__, __LINE__)); + PKTFREE(usbos_info->pub->osh, p, TRUE); + dbus_usbos_wait(usbos_info, 1); + tx_failed_cnt++; + } else { + loopback_tx_cnt++; + tx_failed_cnt = 0; + } + if (tx_failed_cnt == 5) { + DBUSERR(("%s : Failed to send loopback packets cnt=%d loopback_tx_cnt=%d\n", + __FUNCTION__, cnt, loopback_tx_cnt)); + break; + } + } + printf("Transmitted %d loopback packets of size %d\n", loopback_tx_cnt, loopback_size); + + last_rx_cnt = loopback_rx_cnt; + while (loopback_rx_cnt < loopback_tx_cnt) { + dbus_usbos_wait(usbos_info, 1); + if (loopback_rx_cnt <= last_rx_cnt) { + DBUSERR(("%s: Matched rx cnt stuck at %d \n", __FUNCTION__, last_rx_cnt)); + return BCME_ERROR; + } + last_rx_cnt = loopback_rx_cnt; + } + printf("Received %d loopback packets of size %d\n", loopback_tx_cnt, loopback_size); + + return BCME_OK; +} /* dbus_usbos_loopback_tx */ + +/** + * Higher layer (dbus_usb.c) wants to transmit an I/O Request Block + * @param[in] txirb txirb->pkt, if non-zero, contains a single or a chain of packets + */ +static int +dbus_usbos_intf_send_irb(void *bus, dbus_irb_tx_t *txirb) +{ + usbos_info_t *usbos_info = (usbos_info_t *) bus; + urb_req_t *req, *req_zlp = NULL; + int ret = DBUS_OK; + unsigned long flags; + void *pkt; + uint32 buffer_length; + uint8 *buf; + + if ((usbos_info == NULL) || !usbos_info->tx_pipe) { + return DBUS_ERR; + } + + if (txirb->pkt != NULL) { + buffer_length = pkttotlen(usbos_info->pub->osh, txirb->pkt); + /* In case of multiple packets the values below may be overwritten */ + txirb->send_buf = NULL; + buf = PKTDATA(usbos_info->pub->osh, txirb->pkt); + } else { /* txirb->buf != NULL */ + ASSERT(txirb->buf != NULL); + ASSERT(txirb->send_buf == NULL); + buffer_length = txirb->len; + buf = txirb->buf; + } + + if (!(req = dbus_usbos_qdeq(&usbos_info->req_txfreeq, &usbos_info->txfree_lock))) { + DBUSERR(("%s No free URB!\n", __FUNCTION__)); + return DBUS_ERR_TXDROP; + } + + /* If not using standard Linux kernel functionality for handling Zero Length Packet(ZLP), + * the dbus needs to generate ZLP when length is multiple of MaxPacketSize. + */ +#ifndef WL_URB_ZPKT + if (!(buffer_length % usbos_info->maxps)) { + if (!(req_zlp = + dbus_usbos_qdeq(&usbos_info->req_txfreeq, &usbos_info->txfree_lock))) { + DBUSERR(("%s No free URB for ZLP!\n", __FUNCTION__)); + dbus_usbos_qenq(&usbos_info->req_txfreeq, req, &usbos_info->txfree_lock); + return DBUS_ERR_TXDROP; + } + + /* No txirb, so that dbus_usbos_send_complete can differentiate between + * DATA and ZLP. + */ + req_zlp->arg = NULL; + req_zlp->usbinfo = usbos_info; + req_zlp->buf_len = 0; + + usb_fill_bulk_urb(req_zlp->urb, usbos_info->usb, usbos_info->tx_pipe, NULL, + 0, (usb_complete_t)dbus_usbos_send_complete, req_zlp); + + req_zlp->urb->transfer_flags |= URB_QUEUE_BULK; + } +#endif /* !WL_URB_ZPKT */ + +#ifndef USBOS_TX_THREAD + /* Disable USB autosuspend until this request completes, request USB resume if needed. + * Because this call runs asynchronously, there is no guarantee the bus is resumed before + * the URB is submitted, and the URB might be dropped. Use USBOS_TX_THREAD to avoid + * this. + */ + USB_AUTOPM_GET_INTERFACE_ASYNC(g_probe_info.intf); +#endif /* !USBOS_TX_THREAD */ + + spin_lock_irqsave(&usbos_info->txlock, flags); + + req->arg = txirb; + req->usbinfo = usbos_info; + req->buf_len = 0; + + /* Prepare the URB */ + if (txirb->pkt != NULL) { + uint32 pktlen; + uint8 *transfer_buf; + + /* For multiple packets, allocate contiguous buffer and copy packet data to it */ + if (PKTNEXT(usbos_info->pub->osh, txirb->pkt)) { + transfer_buf = MALLOC(usbos_info->pub->osh, buffer_length); + if (!transfer_buf) { + ret = DBUS_ERR_TXDROP; + DBUSERR(("fail to alloc to usb buffer\n")); + goto fail; + } + + pkt = txirb->pkt; + txirb->send_buf = transfer_buf; + req->buf_len = buffer_length; + + while (pkt) { + pktlen = PKTLEN(usbos_info->pub->osh, pkt); + bcopy(PKTDATA(usbos_info->pub->osh, pkt), transfer_buf, pktlen); + transfer_buf += pktlen; + pkt = PKTNEXT(usbos_info->pub->osh, pkt); + } + + ASSERT(((uint8 *) txirb->send_buf + buffer_length) == transfer_buf); + + /* Overwrite buf pointer with pointer to allocated contiguous transfer_buf + */ + buf = txirb->send_buf; + } + } + + usb_fill_bulk_urb(req->urb, usbos_info->usb, usbos_info->tx_pipe, buf, + buffer_length, (usb_complete_t)dbus_usbos_send_complete, req); + + req->urb->transfer_flags |= URB_QUEUE_BULK; + +#ifdef USBOS_TX_THREAD + /* Enqueue TX request, the TX thread will resume the bus if needed and submit + * it asynchronously + */ + dbus_usbos_qenq(&usbos_info->usbos_tx_list, req, &usbos_info->usbos_tx_list_lock); + if (req_zlp != NULL) { + dbus_usbos_qenq(&usbos_info->usbos_tx_list, req_zlp, + &usbos_info->usbos_tx_list_lock); + } + spin_unlock_irqrestore(&usbos_info->txlock, flags); + + wake_up_interruptible(&usbos_info->usbos_tx_queue_head); + return DBUS_OK; +#else + if ((ret = USB_SUBMIT_URB(req->urb))) { + ret = DBUS_ERR_TXDROP; + goto fail; + } + + dbus_usbos_qenq(&usbos_info->req_txpostedq, req, &usbos_info->txposted_lock); + atomic_inc(&usbos_info->txposted); + + if (req_zlp != NULL) { + if ((ret = USB_SUBMIT_URB(req_zlp->urb))) { + DBUSERR(("failed to submit ZLP URB!\n")); + ASSERT(0); + ret = DBUS_ERR_TXDROP; + goto fail2; + } + + dbus_usbos_qenq(&usbos_info->req_txpostedq, req_zlp, &usbos_info->txposted_lock); + /* Also increment txposted for zlp packet, as it will be decremented in + * dbus_usbos_send_complete() + */ + atomic_inc(&usbos_info->txposted); + } + + spin_unlock_irqrestore(&usbos_info->txlock, flags); + return DBUS_OK; +#endif /* USBOS_TX_THREAD */ + +fail: + if (txirb->send_buf != NULL) { + MFREE(usbos_info->pub->osh, txirb->send_buf, req->buf_len); + txirb->send_buf = NULL; + req->buf_len = 0; + } + dbus_usbos_qenq(&usbos_info->req_txfreeq, req, &usbos_info->txfree_lock); +#ifndef USBOS_TX_THREAD +fail2: +#endif + if (req_zlp != NULL) { + dbus_usbos_qenq(&usbos_info->req_txfreeq, req_zlp, &usbos_info->txfree_lock); + } + + spin_unlock_irqrestore(&usbos_info->txlock, flags); + +#ifndef USBOS_TX_THREAD + USB_AUTOPM_PUT_INTERFACE_ASYNC(g_probe_info.intf); +#endif /* !USBOS_TX_THREAD */ + + return ret; +} /* dbus_usbos_intf_send_irb */ + +/** Higher layer (dbus_usb.c) recycles a received (and used) packet. */ +static int +dbus_usbos_intf_recv_irb(void *bus, dbus_irb_rx_t *rxirb) +{ + usbos_info_t *usbos_info = (usbos_info_t *) bus; + int ret = DBUS_OK; + + if (usbos_info == NULL) + return DBUS_ERR; + + ret = dbus_usbos_recv_urb_submit(usbos_info, rxirb, 0); + return ret; +} + +static int +dbus_usbos_intf_recv_irb_from_ep(void *bus, dbus_irb_rx_t *rxirb, uint32 ep_idx) +{ + usbos_info_t *usbos_info = (usbos_info_t *) bus; + int ret = DBUS_OK; + + if (usbos_info == NULL) + return DBUS_ERR; + +#ifdef INTR_EP_ENABLE + /* By specifying the ep_idx value of 0xff, the cdc layer is asking to + * submit an interrupt URB + */ + if (rxirb == NULL && ep_idx == 0xff) { + /* submit intr URB */ + if ((ret = USB_SUBMIT_URB(usbos_info->intr_urb)) < 0) { + DBUSERR(("%s intr USB_SUBMIT_URB failed, status %d\n", + __FUNCTION__, ret)); + } + return ret; + } +#else + if (rxirb == NULL) { + return DBUS_ERR; + } +#endif /* INTR_EP_ENABLE */ + + ret = dbus_usbos_recv_urb_submit(usbos_info, rxirb, ep_idx); + return ret; +} + +/** Higher layer (dbus_usb.c) want to cancel an IRB */ +static int +dbus_usbos_intf_cancel_irb(void *bus, dbus_irb_tx_t *txirb) +{ + usbos_info_t *usbos_info = (usbos_info_t *) bus; + + if (usbos_info == NULL) + return DBUS_ERR; + + return DBUS_ERR; +} + +/** Only one CTL transfer can be pending at any time. This function may block. */ +static int +dbus_usbos_intf_send_ctl(void *bus, uint8 *buf, int len) +{ + usbos_info_t *usbos_info = (usbos_info_t *) bus; + uint16 size; +#ifndef USBOS_TX_THREAD + int status; +#endif /* USBOS_TX_THREAD */ + + if ((usbos_info == NULL) || (buf == NULL) || (len == 0)) + return DBUS_ERR; + + if (usbos_info->ctl_urb == NULL) + return DBUS_ERR; + + /* Block until a pending CTL transfer has completed */ + if (down_interruptible(&usbos_info->ctl_lock) != 0) { + return DBUS_ERR_TXCTLFAIL; + } + +#ifdef USBOS_TX_THREAD + ASSERT(usbos_info->ctl_state == USBOS_REQUEST_STATE_UNSCHEDULED); +#else + /* Disable USB autosuspend until this request completes, request USB resume if needed. + * Because this call runs asynchronously, there is no guarantee the bus is resumed before + * the URB is submitted, and the URB might be dropped. Use USBOS_TX_THREAD to avoid + * this. + */ + USB_AUTOPM_GET_INTERFACE_ASYNC(g_probe_info.intf); +#endif /* USBOS_TX_THREAD */ + + size = len; + usbos_info->ctl_write.wLength = cpu_to_le16p(&size); + usbos_info->ctl_urb->transfer_buffer_length = size; + + usb_fill_control_urb(usbos_info->ctl_urb, + usbos_info->usb, + usb_sndctrlpipe(usbos_info->usb, 0), + (unsigned char *) &usbos_info->ctl_write, + buf, size, (usb_complete_t)dbus_usbos_ctlwrite_complete, usbos_info); + +#ifdef USBOS_TX_THREAD + /* Enqueue CTRL request for transmission by the TX thread. The + * USB bus will first be resumed if needed. + */ + usbos_info->ctl_state = USBOS_REQUEST_STATE_SCHEDULED; + wake_up_interruptible(&usbos_info->usbos_tx_queue_head); +#else + status = USB_SUBMIT_URB(usbos_info->ctl_urb); + if (status < 0) { + DBUSERR(("%s: usb_submit_urb failed %d\n", __FUNCTION__, status)); + up(&usbos_info->ctl_lock); + + USB_AUTOPM_PUT_INTERFACE_ASYNC(g_probe_info.intf); + + return DBUS_ERR_TXCTLFAIL; + } +#endif /* USBOS_TX_THREAD */ + + return DBUS_OK; +} /* dbus_usbos_intf_send_ctl */ + +/** This function does not seem to be called by anyone, including dbus_usb.c */ +static int +dbus_usbos_intf_recv_ctl(void *bus, uint8 *buf, int len) +{ + usbos_info_t *usbos_info = (usbos_info_t *) bus; + int status; + uint16 size; + + if ((usbos_info == NULL) || (buf == NULL) || (len == 0)) + return DBUS_ERR; + + if (usbos_info->ctl_urb == NULL) + return DBUS_ERR; + + /* Block until a pending CTRL transfer has completed */ + if (down_interruptible(&usbos_info->ctl_lock) != 0) { + return DBUS_ERR_TXCTLFAIL; + } + + /* Disable USB autosuspend until this request completes, request USB resume if needed. */ + USB_AUTOPM_GET_INTERFACE_ASYNC(g_probe_info.intf); + + size = len; + usbos_info->ctl_read.wLength = cpu_to_le16p(&size); + usbos_info->ctl_urb->transfer_buffer_length = size; + + if (usbos_info->rxctl_deferrespok) { + /* BMAC model */ + usbos_info->ctl_read.bRequestType = USB_DIR_IN | USB_TYPE_VENDOR | + USB_RECIP_INTERFACE; + usbos_info->ctl_read.bRequest = DL_DEFER_RESP_OK; + } else { + /* full dongle model */ + usbos_info->ctl_read.bRequestType = USB_DIR_IN | USB_TYPE_CLASS | + USB_RECIP_INTERFACE; + usbos_info->ctl_read.bRequest = 1; + } + + usb_fill_control_urb(usbos_info->ctl_urb, + usbos_info->usb, + usb_rcvctrlpipe(usbos_info->usb, 0), + (unsigned char *) &usbos_info->ctl_read, + buf, size, (usb_complete_t)dbus_usbos_ctlread_complete, usbos_info); + + status = USB_SUBMIT_URB(usbos_info->ctl_urb); + if (status < 0) { + DBUSERR(("%s: usb_submit_urb failed %d\n", __FUNCTION__, status)); + up(&usbos_info->ctl_lock); + + USB_AUTOPM_PUT_INTERFACE_ASYNC(g_probe_info.intf); + + return DBUS_ERR_RXCTLFAIL; + } + + return DBUS_OK; +} + +static int +dbus_usbos_intf_get_attrib(void *bus, dbus_attrib_t *attrib) +{ + usbos_info_t *usbos_info = (usbos_info_t *) bus; + + if ((usbos_info == NULL) || (attrib == NULL)) + return DBUS_ERR; + + attrib->bustype = DBUS_USB; + attrib->vid = g_probe_info.vid; + attrib->pid = g_probe_info.pid; + attrib->devid = 0x4322; + + attrib->nchan = 1; + + /* MaxPacketSize for USB hi-speed bulk out is 512 bytes + * and 64-bytes for full-speed. + * When sending pkt > MaxPacketSize, Host SW breaks it + * up into multiple packets. + */ + attrib->mtu = usbos_info->maxps; + + return DBUS_OK; +} + +/** Called by higher layer (dbus_usb.c) when it wants to 'up' the USB interface to the dongle */ +static int +dbus_usbos_intf_up(void *bus) +{ + usbos_info_t *usbos_info = (usbos_info_t *) bus; + uint16 ifnum; +#ifdef BCMUSBDEV_COMPOSITE + int wlan_if = 0; +#endif + if (usbos_info == NULL) + return DBUS_ERR; + + if (usbos_info->usb == NULL) + return DBUS_ERR; + +#if defined(INTR_EP_ENABLE) + /* full dongle use intr EP, bmac doesn't use it */ + if (usbos_info->intr_urb) { + int ret; + + usb_fill_int_urb(usbos_info->intr_urb, usbos_info->usb, + usbos_info->intr_pipe, &usbos_info->intr, + usbos_info->intr_size, (usb_complete_t)dbus_usbos_intr_complete, + usbos_info, usbos_info->interval); + + if ((ret = USB_SUBMIT_URB(usbos_info->intr_urb))) { + DBUSERR(("%s USB_SUBMIT_URB failed with status %d\n", __FUNCTION__, ret)); + return DBUS_ERR; + } + } +#endif + + if (usbos_info->ctl_urb) { + usbos_info->ctl_in_pipe = usb_rcvctrlpipe(usbos_info->usb, 0); + usbos_info->ctl_out_pipe = usb_sndctrlpipe(usbos_info->usb, 0); + +#ifdef BCMUSBDEV_COMPOSITE + wlan_if = dbus_usbos_intf_wlan(usbos_info->usb); + ifnum = cpu_to_le16(IFDESC(usbos_info->usb, wlan_if).bInterfaceNumber); +#else + ifnum = cpu_to_le16(IFDESC(usbos_info->usb, CONTROL_IF).bInterfaceNumber); +#endif /* BCMUSBDEV_COMPOSITE */ + /* CTL Write */ + usbos_info->ctl_write.bRequestType = + USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE; + usbos_info->ctl_write.bRequest = 0; + usbos_info->ctl_write.wValue = cpu_to_le16(0); + usbos_info->ctl_write.wIndex = cpu_to_le16p(&ifnum); + + /* CTL Read */ + usbos_info->ctl_read.bRequestType = + USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE; + usbos_info->ctl_read.bRequest = 1; + usbos_info->ctl_read.wValue = cpu_to_le16(0); + usbos_info->ctl_read.wIndex = cpu_to_le16p(&ifnum); + } + + /* Success, indicate usbos_info is fully up */ + dbus_usbos_state_change(usbos_info, DBUS_STATE_UP); + + return DBUS_OK; +} /* dbus_usbos_intf_up */ + +static int +dbus_usbos_intf_down(void *bus) +{ + usbos_info_t *usbos_info = (usbos_info_t *) bus; + + if (usbos_info == NULL) + return DBUS_ERR; + + dbusos_stop(usbos_info); + return DBUS_OK; +} + +static int +dbus_usbos_intf_stop(void *bus) +{ + usbos_info_t *usbos_info = (usbos_info_t *) bus; + + if (usbos_info == NULL) + return DBUS_ERR; + + dbusos_stop(usbos_info); + return DBUS_OK; +} + + +/** Called by higher layer (dbus_usb.c) */ +static int +dbus_usbos_intf_set_config(void *bus, dbus_config_t *config) +{ + int err = DBUS_ERR; + usbos_info_t* usbos_info = bus; + + if (config->config_id == DBUS_CONFIG_ID_RXCTL_DEFERRES) { + usbos_info->rxctl_deferrespok = config->rxctl_deferrespok; + err = DBUS_OK; + } else if (config->config_id == DBUS_CONFIG_ID_AGGR_LIMIT) { + /* DBUS_CONFIG_ID_AGGR_LIMIT shouldn't be called after probe stage */ + ASSERT(disc_arg == NULL); + ASSERT(config->aggr_param.maxrxsf > 0); + ASSERT(config->aggr_param.maxrxsize > 0); + if (config->aggr_param.maxrxsize > usbos_info->rxbuf_len) { + int state = usbos_info->pub->busstate; + dbus_usbos_unlink(&usbos_info->req_rxpostedq, &usbos_info->rxposted_lock); + while (atomic_read(&usbos_info->rxposted)) { + DBUSTRACE(("%s rxposted is %d, delay 1 ms\n", __FUNCTION__, + atomic_read(&usbos_info->rxposted))); + dbus_usbos_wait(usbos_info, 1); + } + usbos_info->rxbuf_len = config->aggr_param.maxrxsize; + dbus_usbos_state_change(usbos_info, state); + } + err = DBUS_OK; + } + + return err; +} + + +/** Called by dbus_usb.c when it wants to download firmware into the dongle */ +bool +dbus_usbos_dl_cmd(usbos_info_t *usbinfo, uint8 cmd, void *buffer, int buflen) +{ + int transferred; + int index = 0; + char *tmpbuf; + + if ((usbinfo == NULL) || (buffer == NULL) || (buflen == 0)) + return FALSE; + + tmpbuf = (char *) MALLOC(usbinfo->pub->osh, buflen); + if (!tmpbuf) { + DBUSERR(("%s: Unable to allocate memory \n", __FUNCTION__)); + return FALSE; + } + +#ifdef BCM_REQUEST_FW + if (cmd == DL_GO) { + index = 1; + } +#endif + + /* Disable USB autosuspend until this request completes, request USB resume if needed. */ + USB_AUTOPM_GET_INTERFACE(g_probe_info.intf); + + transferred = USB_CONTROL_MSG(usbinfo->usb, usb_rcvctrlpipe(usbinfo->usb, 0), + cmd, (USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE), + 0, index, + (void*) tmpbuf, buflen, USB_CTRL_EP_TIMEOUT); + if (transferred == buflen) { + memcpy(buffer, tmpbuf, buflen); + } else { + DBUSERR(("%s: usb_control_msg failed %d\n", __FUNCTION__, transferred)); + } + + USB_AUTOPM_PUT_INTERFACE(g_probe_info.intf); + + MFREE(usbinfo->pub->osh, tmpbuf, buflen); + return (transferred == buflen); +} + +/** + * Called by dbus_usb.c when it wants to download a buffer into the dongle (e.g. as part of the + * download process, when writing nvram variables). + */ +int +dbus_write_membytes(usbos_info_t* usbinfo, bool set, uint32 address, uint8 *data, uint size) +{ + hwacc_t hwacc; + int write_bytes = 4; + int status; + int retval = 0; + + DBUSTRACE(("Enter:%s\n", __FUNCTION__)); + + /* Read is not supported */ + if (set == 0) { + DBUSERR(("Currently read is not supported!!\n")); + return -1; + } + + USB_AUTOPM_GET_INTERFACE(g_probe_info.intf); + + hwacc.cmd = DL_CMD_WRHW; + hwacc.addr = address; + + DBUSTRACE(("Address:%x size:%d", hwacc.addr, size)); + do { + if (size >= 4) { + write_bytes = 4; + } else if (size >= 2) { + write_bytes = 2; + } else { + write_bytes = 1; + } + + hwacc.len = write_bytes; + + while (size >= write_bytes) { + hwacc.data = *((unsigned int*)data); + + status = USB_CONTROL_MSG(usbinfo->usb, usb_sndctrlpipe(usbinfo->usb, 0), + DL_WRHW, (USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE), + 1, 0, (char *)&hwacc, sizeof(hwacc_t), USB_CTRL_EP_TIMEOUT); + + if (status < 0) { + retval = -1; + DBUSERR((" Ctrl write hwacc failed w/status %d @ address:%x \n", + status, hwacc.addr)); + goto err; + } + + hwacc.addr += write_bytes; + data += write_bytes; + size -= write_bytes; + } + } while (size > 0); + +err: + USB_AUTOPM_PUT_INTERFACE(g_probe_info.intf); + + return retval; +} + +int +dbus_usbos_readreg(void *bus, uint32 regaddr, int datalen, uint32 *value) +{ + usbos_info_t *usbinfo = (usbos_info_t *) bus; + int ret = DBUS_OK; + int transferred; + uint32 cmd; + hwacc_t hwacc; + + if (usbinfo == NULL) + return DBUS_ERR; + + if (datalen == 1) + cmd = DL_RDHW8; + else if (datalen == 2) + cmd = DL_RDHW16; + else + cmd = DL_RDHW32; + + USB_AUTOPM_GET_INTERFACE(g_probe_info.intf); + + transferred = USB_CONTROL_MSG(usbinfo->usb, usb_rcvctrlpipe(usbinfo->usb, 0), + cmd, (USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE), + (uint16)(regaddr), (uint16)(regaddr >> 16), + (void *) &hwacc, sizeof(hwacc_t), USB_CTRL_EP_TIMEOUT); + + if (transferred >= sizeof(hwacc_t)) { + *value = hwacc.data; + } else { + DBUSERR(("%s: usb_control_msg failed %d\n", __FUNCTION__, transferred)); + ret = DBUS_ERR; + } + + USB_AUTOPM_PUT_INTERFACE(g_probe_info.intf); + + return ret; +} + +int +dbus_usbos_writereg(void *bus, uint32 regaddr, int datalen, uint32 data) +{ + usbos_info_t *usbinfo = (usbos_info_t *) bus; + int ret = DBUS_OK; + int transferred; + uint32 cmd = DL_WRHW; + hwacc_t hwacc; + + if (usbinfo == NULL) + return DBUS_ERR; + + USB_AUTOPM_GET_INTERFACE(g_probe_info.intf); + + hwacc.cmd = DL_WRHW; + hwacc.addr = regaddr; + hwacc.data = data; + hwacc.len = datalen; + + transferred = USB_CONTROL_MSG(usbinfo->usb, usb_sndctrlpipe(usbinfo->usb, 0), + cmd, (USB_DIR_OUT| USB_TYPE_VENDOR | USB_RECIP_INTERFACE), + 1, 0, + (void *) &hwacc, sizeof(hwacc_t), USB_CTRL_EP_TIMEOUT); + + if (transferred != sizeof(hwacc_t)) { + DBUSERR(("%s: usb_control_msg failed %d\n", __FUNCTION__, transferred)); + ret = DBUS_ERR; + } + + USB_AUTOPM_PUT_INTERFACE(g_probe_info.intf); + + return ret; +} + +int +dbus_usbos_wait(usbos_info_t *usbinfo, uint16 ms) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) + if (in_interrupt()) + mdelay(ms); + else + msleep_interruptible(ms); +#else + wait_ms(ms); +#endif + return DBUS_OK; +} + +/** Called by dbus_usb.c as part of the firmware download process */ +bool +dbus_usbos_dl_send_bulk(usbos_info_t *usbinfo, void *buffer, int len) +{ + bool ret = TRUE; + int status; + int transferred = 0; + + if (usbinfo == NULL) + return DBUS_ERR; + + USB_AUTOPM_GET_INTERFACE(g_probe_info.intf); + + status = USB_BULK_MSG(usbinfo->usb, usbinfo->tx_pipe, + buffer, len, + &transferred, USB_BULK_EP_TIMEOUT); + + if (status < 0) { + DBUSERR(("%s: usb_bulk_msg failed %d\n", __FUNCTION__, status)); + ret = FALSE; + } + + USB_AUTOPM_PUT_INTERFACE(g_probe_info.intf); + + return ret; +} + +static bool +dbus_usbos_intf_recv_needed(void *bus) +{ + return FALSE; +} + +/** + * Higher layer (dbus_usb.c) wants to execute a function on the condition that the rx spin lock has + * been acquired. + */ +static void* +dbus_usbos_intf_exec_rxlock(void *bus, exec_cb_t cb, struct exec_parms *args) +{ + usbos_info_t *usbos_info = (usbos_info_t *) bus; + void *ret; + unsigned long flags; + + if (usbos_info == NULL) + return NULL; + + spin_lock_irqsave(&usbos_info->rxlock, flags); + ret = cb(args); + spin_unlock_irqrestore(&usbos_info->rxlock, flags); + + return ret; +} + +static void* +dbus_usbos_intf_exec_txlock(void *bus, exec_cb_t cb, struct exec_parms *args) +{ + usbos_info_t *usbos_info = (usbos_info_t *) bus; + void *ret; + unsigned long flags; + + if (usbos_info == NULL) + return NULL; + + spin_lock_irqsave(&usbos_info->txlock, flags); + ret = cb(args); + spin_unlock_irqrestore(&usbos_info->txlock, flags); + + return ret; +} + +/** + * if an error condition was detected in this module, the higher DBUS layer (dbus_usb.c) has to + * be notified. + */ +int +dbus_usbos_errhandler(void *bus, int err) +{ + usbos_info_t *usbos_info = (usbos_info_t *) bus; + + if (usbos_info == NULL) + return DBUS_ERR; + + if (usbos_info->cbarg && usbos_info->cbs) { + if (usbos_info->cbs->errhandler) + usbos_info->cbs->errhandler(usbos_info->cbarg, err); + } + + return DBUS_OK; +} + +/** + * if a change in bus state was detected in this module, the higher DBUS layer (dbus_usb.c) has to + * be notified. + */ +int +dbus_usbos_state_change(void *bus, int state) +{ + usbos_info_t *usbos_info = (usbos_info_t *) bus; + + if (usbos_info == NULL) + return DBUS_ERR; + + if (usbos_info->cbarg && usbos_info->cbs) { + if (usbos_info->cbs->state_change) + usbos_info->cbs->state_change(usbos_info->cbarg, state); + } + + usbos_info->pub->busstate = state; + return DBUS_OK; +} + +int +dbus_bus_osl_register(int vid, int pid, probe_cb_t prcb, + disconnect_cb_t discb, void *prarg, dbus_intf_t **intf, void *param1, void *param2) +{ + bzero(&g_probe_info, sizeof(probe_info_t)); + + probe_cb = prcb; + disconnect_cb = discb; + probe_arg = prarg; + + devid_table[0].idVendor = vid; + devid_table[0].idProduct = pid; + + *intf = &dbus_usbos_intf; + + USB_REGISTER(); + + return DBUS_ERR_NODEVICE; +} + +int +dbus_bus_osl_deregister() +{ + g_probe_info.dereged = TRUE; + + DHD_MUTEX_LOCK(); + if (disconnect_cb && disc_arg && (g_probe_info.disc_cb_done == FALSE)) { + disconnect_cb(disc_arg); + disc_arg = NULL; + } + DHD_MUTEX_UNLOCK(); + + USB_DEREGISTER(); + + return DBUS_OK; +} + +void * +dbus_usbos_intf_attach(dbus_pub_t *pub, void *cbarg, dbus_intf_callbacks_t *cbs) +{ + usbos_info_t *usbos_info; + + if (g_probe_info.dldone == FALSE) { + DBUSERR(("%s: err device not downloaded!\n", __FUNCTION__)); + return NULL; + } + + /* Sanity check for BUS_INFO() */ + ASSERT(OFFSETOF(usbos_info_t, pub) == 0); + + usbos_info = MALLOC(pub->osh, sizeof(usbos_info_t)); + if (usbos_info == NULL) + return NULL; + + bzero(usbos_info, sizeof(usbos_info_t)); + + usbos_info->pub = pub; + usbos_info->cbarg = cbarg; + usbos_info->cbs = cbs; + + /* Needed for disconnect() */ + g_probe_info.usbos_info = usbos_info; + + /* Update USB Info */ + usbos_info->usb = g_probe_info.usb; + usbos_info->rx_pipe = g_probe_info.rx_pipe; + usbos_info->rx_pipe2 = g_probe_info.rx_pipe2; + usbos_info->tx_pipe = g_probe_info.tx_pipe; + usbos_info->intr_pipe = g_probe_info.intr_pipe; + usbos_info->intr_size = g_probe_info.intr_size; + usbos_info->interval = g_probe_info.interval; + usbos_info->pub->device_speed = g_probe_info.device_speed; + if (usbos_info->rx_pipe2) { + usbos_info->pub->attrib.has_2nd_bulk_in_ep = 1; + } else { + usbos_info->pub->attrib.has_2nd_bulk_in_ep = 0; + } + + if (usbos_info->tx_pipe) + usbos_info->maxps = usb_maxpacket(usbos_info->usb, + usbos_info->tx_pipe, usb_pipeout(usbos_info->tx_pipe)); + + INIT_LIST_HEAD(&usbos_info->req_rxfreeq); + INIT_LIST_HEAD(&usbos_info->req_txfreeq); + INIT_LIST_HEAD(&usbos_info->req_rxpostedq); + INIT_LIST_HEAD(&usbos_info->req_txpostedq); + spin_lock_init(&usbos_info->rxfree_lock); + spin_lock_init(&usbos_info->txfree_lock); + spin_lock_init(&usbos_info->rxposted_lock); + spin_lock_init(&usbos_info->txposted_lock); + spin_lock_init(&usbos_info->rxlock); + spin_lock_init(&usbos_info->txlock); + + atomic_set(&usbos_info->rxposted, 0); + atomic_set(&usbos_info->txposted, 0); + + +#ifdef USB_DISABLE_INT_EP + usbos_info->intr_urb = NULL; +#else + if (!(usbos_info->intr_urb = USB_ALLOC_URB())) { + DBUSERR(("%s: usb_alloc_urb (tx) failed\n", __FUNCTION__)); + goto fail; + } +#endif + + if (!(usbos_info->ctl_urb = USB_ALLOC_URB())) { + DBUSERR(("%s: usb_alloc_urb (tx) failed\n", __FUNCTION__)); + goto fail; + } + + init_waitqueue_head(&usbos_info->wait); + + if (!(usbos_info->blk_urb = USB_ALLOC_URB())) { /* for embedded image downloading */ + DBUSERR(("%s: usb_alloc_urb (tx) failed\n", __FUNCTION__)); + goto fail; + } + + usbos_info->rxbuf_len = (uint)usbos_info->pub->rxsize; + + + + atomic_set(&usbos_info->txallocated, 0); + if (DBUS_OK != dbus_usbos_urbreqs_alloc(usbos_info, + usbos_info->pub->ntxq, FALSE)) { + goto fail; + } + + atomic_set(&usbos_info->rxallocated, 0); + if (DBUS_OK != dbus_usbos_urbreqs_alloc(usbos_info, + MIN(DBUS_USB_RXQUEUE_BATCH_ADD, usbos_info->pub->nrxq), + TRUE)) { + goto fail; + } + + sema_init(&usbos_info->ctl_lock, 1); + +#ifdef USBOS_THREAD + if (dbus_usbos_thread_init(usbos_info) == NULL) + goto fail; +#endif /* USBOS_THREAD */ + +#ifdef USBOS_TX_THREAD + if (dbus_usbos_tx_thread_init(usbos_info) == NULL) + goto fail; +#endif /* USBOS_TX_THREAD */ + + pub->dev_info = g_probe_info.usb; + + + return (void *) usbos_info; +fail: + if (usbos_info->intr_urb) { + USB_FREE_URB(usbos_info->intr_urb); + usbos_info->intr_urb = NULL; + } + + if (usbos_info->ctl_urb) { + USB_FREE_URB(usbos_info->ctl_urb); + usbos_info->ctl_urb = NULL; + } + +#if defined(BCM_REQUEST_FW) + if (usbos_info->blk_urb) { + USB_FREE_URB(usbos_info->blk_urb); + usbos_info->blk_urb = NULL; + } +#endif + + dbus_usbos_urbreqs_free(usbos_info, TRUE); + atomic_set(&usbos_info->rxallocated, 0); + dbus_usbos_urbreqs_free(usbos_info, FALSE); + atomic_set(&usbos_info->txallocated, 0); + + g_probe_info.usbos_info = NULL; + + MFREE(pub->osh, usbos_info, sizeof(usbos_info_t)); + return NULL; +} /* dbus_usbos_intf_attach */ + +void +dbus_usbos_intf_detach(dbus_pub_t *pub, void *info) +{ + usbos_info_t *usbos_info = (usbos_info_t *) info; + osl_t *osh = pub->osh; + + if (usbos_info == NULL) { + return; + } + +#ifdef USBOS_TX_THREAD + dbus_usbos_tx_thread_deinit(usbos_info); +#endif /* USBOS_TX_THREAD */ + + /* Must unlink all URBs prior to driver unload; + * otherwise an URB callback can occur after driver + * has been de-allocated and rmmod'd + */ + dbusos_stop(usbos_info); + + if (usbos_info->intr_urb) { + USB_FREE_URB(usbos_info->intr_urb); + usbos_info->intr_urb = NULL; + } + + if (usbos_info->ctl_urb) { + USB_FREE_URB(usbos_info->ctl_urb); + usbos_info->ctl_urb = NULL; + } + + if (usbos_info->blk_urb) { + USB_FREE_URB(usbos_info->blk_urb); + usbos_info->blk_urb = NULL; + } + + dbus_usbos_urbreqs_free(usbos_info, TRUE); + atomic_set(&usbos_info->rxallocated, 0); + dbus_usbos_urbreqs_free(usbos_info, FALSE); + atomic_set(&usbos_info->txallocated, 0); + +#ifdef USBOS_THREAD + dbus_usbos_thread_deinit(usbos_info); +#endif /* USBOS_THREAD */ + + g_probe_info.usbos_info = NULL; + MFREE(osh, usbos_info, sizeof(usbos_info_t)); +} /* dbus_usbos_intf_detach */ + + +#ifdef USBOS_TX_THREAD + +void* +dbus_usbos_tx_thread_init(usbos_info_t *usbos_info) +{ + spin_lock_init(&usbos_info->usbos_tx_list_lock); + INIT_LIST_HEAD(&usbos_info->usbos_tx_list); + init_waitqueue_head(&usbos_info->usbos_tx_queue_head); + + usbos_info->usbos_tx_kt = kthread_create(dbus_usbos_tx_thread_func, + usbos_info, "usb-tx-thread"); + + if (IS_ERR(usbos_info->usbos_tx_kt)) { + DBUSERR(("Thread Creation failed\n")); + return (NULL); + } + + usbos_info->ctl_state = USBOS_REQUEST_STATE_UNSCHEDULED; + wake_up_process(usbos_info->usbos_tx_kt); + + return (usbos_info->usbos_tx_kt); +} + +void +dbus_usbos_tx_thread_deinit(usbos_info_t *usbos_info) +{ + urb_req_t *req; + + if (usbos_info->usbos_tx_kt) { + wake_up_interruptible(&usbos_info->usbos_tx_queue_head); + kthread_stop(usbos_info->usbos_tx_kt); + } + + /* Move pending requests to free queue so they can be freed */ + while ((req = dbus_usbos_qdeq( + &usbos_info->usbos_tx_list, &usbos_info->usbos_tx_list_lock)) != NULL) { + dbus_usbos_qenq(&usbos_info->req_txfreeq, req, &usbos_info->txfree_lock); + } +} + +/** + * Allow USB in-band resume to block by submitting CTRL and DATA URBs on a separate thread. + */ +int +dbus_usbos_tx_thread_func(void *data) +{ + usbos_info_t *usbos_info = (usbos_info_t *)data; + urb_req_t *req; + dbus_irb_tx_t *txirb; + int ret; + unsigned long flags; + +#ifdef WL_THREADNICE + set_user_nice(current, WL_THREADNICE); +#endif + + while (1) { + /* Wait until there are URBs to submit */ + wait_event_interruptible_timeout( + usbos_info->usbos_tx_queue_head, + !list_empty(&usbos_info->usbos_tx_list) || + usbos_info->ctl_state == USBOS_REQUEST_STATE_SCHEDULED, + 100); + + if (kthread_should_stop()) + break; + + /* Submit CTRL URB if needed */ + if (usbos_info->ctl_state == USBOS_REQUEST_STATE_SCHEDULED) { + + /* Disable USB autosuspend until this request completes. If the + * interface was suspended, this call blocks until it has been resumed. + */ + USB_AUTOPM_GET_INTERFACE(g_probe_info.intf); + + usbos_info->ctl_state = USBOS_REQUEST_STATE_SUBMITTED; + + ret = USB_SUBMIT_URB(usbos_info->ctl_urb); + if (ret != 0) { + DBUSERR(("%s CTRL USB_SUBMIT_URB failed, status %d\n", + __FUNCTION__, ret)); + + usbos_info->ctl_state = USBOS_REQUEST_STATE_UNSCHEDULED; + up(&usbos_info->ctl_lock); + + USB_AUTOPM_PUT_INTERFACE_ASYNC(g_probe_info.intf); + } + } + + /* Submit all available TX URBs */ + while ((req = dbus_usbos_qdeq(&usbos_info->usbos_tx_list, + &usbos_info->usbos_tx_list_lock)) != NULL) { + + /* Disable USB autosuspend until this request completes. If the + * interface was suspended, this call blocks until it has been resumed. + */ + USB_AUTOPM_GET_INTERFACE(g_probe_info.intf); + + spin_lock_irqsave(&usbos_info->txlock, flags); + + ret = USB_SUBMIT_URB(req->urb); + if (ret == 0) { + /* URB submitted successfully */ + dbus_usbos_qenq(&usbos_info->req_txpostedq, req, + &usbos_info->txposted_lock); + atomic_inc(&usbos_info->txposted); + } else { + /* Submitting the URB failed. */ + DBUSERR(("%s TX USB_SUBMIT_URB failed, status %d\n", + __FUNCTION__, ret)); + + USB_AUTOPM_PUT_INTERFACE_ASYNC(g_probe_info.intf); + } + + spin_unlock_irqrestore(&usbos_info->txlock, flags); + + if (ret != 0) { + /* Cleanup and notify higher layers */ + dbus_usbos_qenq(&usbos_info->req_txfreeq, req, + &usbos_info->txfree_lock); + + txirb = req->arg; + if (txirb->send_buf) { + MFREE(usbos_info->pub->osh, txirb->send_buf, req->buf_len); + txirb->send_buf = NULL; + req->buf_len = 0; + } + + if (likely (usbos_info->cbarg && usbos_info->cbs)) { + if (likely (usbos_info->cbs->send_irb_complete != NULL)) + usbos_info->cbs->send_irb_complete( + usbos_info->cbarg, txirb, DBUS_ERR_TXDROP); + } + } + } + } + + return 0; +} /* dbus_usbos_tx_thread_func */ + +#endif /* USBOS_TX_THREAD */ + +#ifdef USBOS_THREAD + +/** + * Increase system performance by creating a USB thread that runs parallel to other system + * activity. + */ +static void* +dbus_usbos_thread_init(usbos_info_t *usbos_info) +{ + usbos_list_entry_t *entry; + unsigned long flags, ii; + + spin_lock_init(&usbos_info->usbos_list_lock); + spin_lock_init(&usbos_info->ctrl_lock); + INIT_LIST_HEAD(&usbos_info->usbos_list); + INIT_LIST_HEAD(&usbos_info->usbos_free_list); + init_waitqueue_head(&usbos_info->usbos_queue_head); + atomic_set(&usbos_info->usbos_list_cnt, 0); + + + for (ii = 0; ii < (usbos_info->pub->nrxq + usbos_info->pub->ntxq); ii++) { + entry = MALLOC(usbos_info->pub->osh, sizeof(usbos_list_entry_t)); + if (entry) { + spin_lock_irqsave(&usbos_info->usbos_list_lock, flags); + list_add_tail((struct list_head*) entry, &usbos_info->usbos_free_list); + spin_unlock_irqrestore(&usbos_info->usbos_list_lock, flags); + } else { + DBUSERR(("Failed to create list\n")); + } + } + + usbos_info->usbos_kt = kthread_create(dbus_usbos_thread_func, + usbos_info, "usb-thread"); + + if (IS_ERR(usbos_info->usbos_kt)) { + DBUSERR(("Thread Creation failed\n")); + return (NULL); + } + + wake_up_process(usbos_info->usbos_kt); + + return (usbos_info->usbos_kt); +} + +static void +dbus_usbos_thread_deinit(usbos_info_t *usbos_info) +{ + struct list_head *cur, *next; + usbos_list_entry_t *entry; + unsigned long flags; + + if (usbos_info->usbos_kt) { + wake_up_interruptible(&usbos_info->usbos_queue_head); + kthread_stop(usbos_info->usbos_kt); + } +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif + list_for_each_safe(cur, next, &usbos_info->usbos_list) + { + entry = list_entry(cur, struct usbos_list_entry, list); + /* detach this entry from the list and then free the entry */ + spin_lock_irqsave(&usbos_info->usbos_list_lock, flags); + list_del(cur); + MFREE(usbos_info->pub->osh, entry, sizeof(usbos_list_entry_t)); + spin_unlock_irqrestore(&usbos_info->usbos_list_lock, flags); + } + + list_for_each_safe(cur, next, &usbos_info->usbos_free_list) + { + entry = list_entry(cur, struct usbos_list_entry, list); + /* detach this entry from the list and then free the entry */ + spin_lock_irqsave(&usbos_info->usbos_list_lock, flags); + list_del(cur); + MFREE(usbos_info->pub->osh, entry, sizeof(usbos_list_entry_t)); + spin_unlock_irqrestore(&usbos_info->usbos_list_lock, flags); + } +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif +} + +/** Process completed URBs in a worker thread */ +static int +dbus_usbos_thread_func(void *data) +{ + usbos_info_t *usbos_info = (usbos_info_t *)data; + usbos_list_entry_t *entry; + struct list_head *cur, *next; + unsigned long flags; + +#ifdef WL_THREADNICE + set_user_nice(current, WL_THREADNICE); +#endif + + while (1) { + /* If the list is empty, then go to sleep */ + wait_event_interruptible_timeout + (usbos_info->usbos_queue_head, + atomic_read(&usbos_info->usbos_list_cnt) > 0, + 100); + + if (kthread_should_stop()) + break; + + spin_lock_irqsave(&usbos_info->usbos_list_lock, flags); + + /* For each entry on the list, process it. Remove the entry from + * the list when done. + */ +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif + list_for_each_safe(cur, next, &usbos_info->usbos_list) + { + urb_req_t *req; + int len; + int stat; + usbos_info_t *usbos_info_local; + + entry = list_entry(cur, struct usbos_list_entry, list); + if (entry == NULL) + break; + + req = entry->urb_context; + len = entry->urb_length; + stat = entry->urb_status; + usbos_info_local = req->usbinfo; + + /* detach this entry from the list and attach it to the free list */ + list_del_init(cur); + spin_unlock_irqrestore(&usbos_info_local->usbos_list_lock, flags); + + dbus_usbos_recv_complete_handle(req, len, stat); + + spin_lock_irqsave(&usbos_info_local->usbos_list_lock, flags); + + list_add_tail(cur, &usbos_info_local->usbos_free_list); + + atomic_dec(&usbos_info_local->usbos_list_cnt); + } + + spin_unlock_irqrestore(&usbos_info->usbos_list_lock, flags); + + } +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif + + return 0; +} /* dbus_usbos_thread_func */ + +/** Called on Linux calling URB callback, see dbus_usbos_recv_complete() */ +static void +dbus_usbos_dispatch_schedule(CALLBACK_ARGS) +{ + urb_req_t *req = urb->context; + usbos_info_t *usbos_info = req->usbinfo; + usbos_list_entry_t *entry; + unsigned long flags; + struct list_head *cur; + + spin_lock_irqsave(&usbos_info->usbos_list_lock, flags); + + cur = usbos_info->usbos_free_list.next; +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif + entry = list_entry(cur, struct usbos_list_entry, list); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif + + /* detach this entry from the free list and prepare it insert it to use list */ + list_del_init(cur); + + if (entry) { + entry->urb_context = urb->context; + entry->urb_length = urb->actual_length; + entry->urb_status = urb->status; + + atomic_inc(&usbos_info->usbos_list_cnt); + list_add_tail(cur, &usbos_info->usbos_list); + } else { + DBUSERR(("!!!!!!OUT OF MEMORY!!!!!!!\n")); + } + + spin_unlock_irqrestore(&usbos_info->usbos_list_lock, flags); + + /* thread */ + wake_up_interruptible(&usbos_info->usbos_queue_head); +} /* dbus_usbos_dispatch_schedule */ + +#endif /* USBOS_THREAD */ + + + + +#ifdef BCM_REQUEST_FW + +struct request_fw_context { + const struct firmware *firmware; + struct semaphore lock; +}; + +/* + * Callback for dbus_request_firmware(). + */ +static void +dbus_request_firmware_done(const struct firmware *firmware, void *ctx) +{ + struct request_fw_context *context = (struct request_fw_context*)ctx; + + /* Store the received firmware handle in the context and wake requester */ + context->firmware = firmware; + up(&context->lock); +} + +/* + * Send a firmware request and wait for completion. + * + * The use of the asynchronous version of request_firmware() is needed to avoid + * kernel oopses when we just come out of system hibernate. + */ +static int +dbus_request_firmware(const char *name, const struct firmware **firmware) +{ + struct request_fw_context *context; + int ret; + + context = kzalloc(sizeof(*context), GFP_KERNEL); + if (!context) + return -ENOMEM; + + sema_init(&context->lock, 0); + + ret = request_firmware_nowait(THIS_MODULE, true, name, &g_probe_info.usb->dev, + GFP_KERNEL, context, dbus_request_firmware_done); + if (ret) { + kfree(context); + return ret; + } + + /* Wait for completion */ + if (down_interruptible(&context->lock) != 0) { + kfree(context); + return -ERESTARTSYS; + } + + *firmware = context->firmware; + kfree(context); + + return *firmware != NULL ? 0 : -ENOENT; +} + +static void * +dbus_get_fwfile(int devid, int chiprev, uint8 **fw, int *fwlen, uint16 boardtype, uint16 boardrev) +{ + const struct firmware *firmware = NULL; +#ifndef OEM_ANDROID + s8 *device_id = NULL; + s8 *chip_rev = ""; +#endif /* OEM_ANDROID */ + s8 file_name[64]; + int ret; + +#ifndef OEM_ANDROID + switch (devid) { + case BCM4350_CHIP_ID: + case BCM4354_CHIP_ID: + case BCM43556_CHIP_ID: + case BCM43558_CHIP_ID: + case BCM43566_CHIP_ID: + case BCM43568_CHIP_ID: + case BCM43570_CHIP_ID: + case BCM4358_CHIP_ID: + device_id = "4350"; + break; + case BCM43143_CHIP_ID: + device_id = "43143"; + break; + case BCM43234_CHIP_ID: + case BCM43235_CHIP_ID: + case BCM43236_CHIP_ID: + device_id = "43236"; + break; + case BCM43242_CHIP_ID: + device_id = "43242"; + break; + case BCM43238_CHIP_ID: + device_id = "43238"; + break; + case BCM43526_CHIP_ID: + device_id = "43526"; + break; + case BCM43569_CHIP_ID: + device_id = "43569"; + switch (chiprev) { + case 0: + chip_rev = "a0"; + break; + case 2: + chip_rev = "a2"; + break; + default: + break; + } + break; + default: + DBUSERR(("unsupported device %x\n", devid)); + return NULL; + } + + /* Load firmware */ + snprintf(file_name, sizeof(file_name), "brcm/bcm%s%s-firmware.bin", device_id, chip_rev); +#else + snprintf(file_name, sizeof(file_name), "%s", CONFIG_ANDROID_BCMDHD_FW_PATH); +#endif /* OEM_ANDROID */ + + ret = dbus_request_firmware(file_name, &firmware); + if (ret) { + DBUSERR(("fail to request firmware %s\n", file_name)); + return NULL; + } + + *fwlen = firmware->size; + *fw = (uint8 *)firmware->data; + return (void *)firmware; + +} + +static void * +dbus_get_nvfile(int devid, int chiprev, uint8 **fw, int *fwlen, uint16 boardtype, uint16 boardrev) +{ + const struct firmware *firmware = NULL; +#ifndef OEM_ANDROID + s8 *device_id = NULL; + s8 *chip_rev = ""; +#endif /* OEM_ANDROID */ + s8 file_name[64]; + int ret; + +#ifndef OEM_ANDROID + switch (devid) { + case BCM4350_CHIP_ID: + case BCM4354_CHIP_ID: + case BCM43556_CHIP_ID: + case BCM43558_CHIP_ID: + case BCM43566_CHIP_ID: + case BCM43568_CHIP_ID: + case BCM43570_CHIP_ID: + case BCM4358_CHIP_ID: + device_id = "4350"; + break; + case BCM43143_CHIP_ID: + device_id = "43143"; + break; + case BCM43234_CHIP_ID: + device_id = "43234"; + break; + case BCM43235_CHIP_ID: + device_id = "43235"; + break; + case BCM43236_CHIP_ID: + device_id = "43236"; + break; + case BCM43238_CHIP_ID: + device_id = "43238"; + break; + case BCM43242_CHIP_ID: + device_id = "43242"; + break; + case BCM43526_CHIP_ID: + device_id = "43526"; + break; + case BCM43569_CHIP_ID: + device_id = "43569"; + switch (chiprev) { + case 0: + chip_rev = "a0"; + break; + case 2: + chip_rev = "a2"; + break; + default: + break; + } + break; + default: + DBUSERR(("unsupported device %x\n", devid)); + return NULL; + } + + /* Load board specific nvram file */ + snprintf(file_name, sizeof(file_name), "brcm/bcm%s%s-%2x-%2x.nvm", + device_id, chip_rev, boardtype, boardrev); +#else + snprintf(file_name, sizeof(file_name), "%s", CONFIG_ANDROID_BCMDHD_NVRAM_PATH); +#endif /* OEM_ANDROID */ + + ret = dbus_request_firmware(file_name, &firmware); + if (ret) { + DBUSERR(("fail to request nvram %s\n", file_name)); + +#ifndef OEM_ANDROID + /* Load generic nvram file */ + snprintf(file_name, sizeof(file_name), "brcm/bcm%s%s.nvm", + device_id, chip_rev); + + ret = dbus_request_firmware(file_name, &firmware); +#endif /* OEM_ANDROID */ + + if (ret) { + DBUSERR(("fail to request nvram %s\n", file_name)); + return NULL; + } + } + + *fwlen = firmware->size; + *fw = (uint8 *)firmware->data; + return (void *)firmware; +} + +void * +dbus_get_fw_nvfile(int devid, int chiprev, uint8 **fw, int *fwlen, int type, uint16 boardtype, + uint16 boardrev) +{ + switch (type) { + case DBUS_FIRMWARE: + return dbus_get_fwfile(devid, chiprev, fw, fwlen, boardtype, boardrev); + case DBUS_NVFILE: + return dbus_get_nvfile(devid, chiprev, fw, fwlen, boardtype, boardrev); + default: + return NULL; + } +} + +void +dbus_release_fw_nvfile(void *firmware) +{ + release_firmware((struct firmware *)firmware); +} +#endif /* BCM_REQUEST_FW */ + +#ifdef BCMUSBDEV_COMPOSITE +/** + * For a composite device the interface order is not guaranteed, scan the device struct for the WLAN + * interface. + */ +static int +dbus_usbos_intf_wlan(struct usb_device *usb) +{ + int i, num_of_eps, ep, intf_wlan = -1; + int num_intf = CONFIGDESC(usb)->bNumInterfaces; + struct usb_endpoint_descriptor *endpoint; + + for (i = 0; i < num_intf; i++) { + if (IFDESC(usb, i).bInterfaceClass != USB_CLASS_VENDOR_SPEC) + continue; + num_of_eps = IFDESC(usb, i).bNumEndpoints; + + for (ep = 0; ep < num_of_eps; ep++) { + endpoint = &IFEPDESC(usb, i, ep); + if ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == + USB_ENDPOINT_XFER_BULK) { + intf_wlan = i; + break; + } + } + if (ep < num_of_eps) + break; + } + + return intf_wlan; +} +#endif /* BCMUSBDEV_COMPOSITE */ diff --git a/bcmdhd.100.10.315.x/dhd.h b/bcmdhd.100.10.315.x/dhd.h new file mode 100644 index 0000000..425043d --- /dev/null +++ b/bcmdhd.100.10.315.x/dhd.h @@ -0,0 +1,3069 @@ +/* + * Header file describing the internal (inter-module) DHD interfaces. + * + * Provides type definitions and function prototypes used to link the + * DHD OS, bus, and protocol modules. + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd.h 771671 2018-07-11 06:58:25Z $ + */ + +/**************** + * Common types * + */ + +#ifndef _dhd_h_ +#define _dhd_h_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_HAS_WAKELOCK) +#include +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined (CONFIG_HAS_WAKELOCK) */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) +#include +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) */ +/* The kernel threading is sdio-specific */ +struct task_struct; +struct sched_param; +#if defined(BT_OVER_SDIO) +#include +#endif /* defined (BT_OVER_SDIO) */ +int setScheduler(struct task_struct *p, int policy, struct sched_param *param); +int get_scheduler_policy(struct task_struct *p); +#define MAX_EVENT 16 + +#define ALL_INTERFACES 0xff + +/* H2D and D2H ring dump is enabled by default */ +#ifdef PCIE_FULL_DONGLE +#define DHD_DUMP_PCIE_RINGS +#endif /* PCIE_FULL_DONGLE */ + +#include +#include +#include +#include +#if defined(DUMP_IOCTL_IOV_LIST) || defined(DHD_DEBUG) +#include +#endif /* DUMP_IOCTL_IOV_LIST || DHD_DEBUG */ + +#if defined(BCMWDF) +#include +#include +#endif /* (BCMWDF) */ + +#ifdef DHD_ERPOM +#include +#endif /* DHD_ERPOM */ + +#ifdef DEBUG_DPC_THREAD_WATCHDOG +#define MAX_RESCHED_CNT 600 +#endif /* DEBUG_DPC_THREAD_WATCHDOG */ + +#if defined(KEEP_ALIVE) +/* Default KEEP_ALIVE Period is 55 sec to prevent AP from sending Keep Alive probe frame */ +#define KEEP_ALIVE_PERIOD 55000 +#define NULL_PKT_STR "null_pkt" +#endif /* KEEP_ALIVE */ + +/* By default enabled from here, later the WQ code will be removed */ +#define DHD_USE_KTHREAD_FOR_LOGTRACE + +/* + * Earlier DHD used to have it own time stamp for printk and + * Dongle used to have its own time stamp for console messages + * With this flag, DHD and Dongle console messges will have same time zone + */ +#define DHD_H2D_LOG_TIME_SYNC +/* Forward decls */ +struct dhd_bus; +struct dhd_prot; +struct dhd_info; +struct dhd_ioctl; +struct dhd_dbg; +struct dhd_ts; + +/* The level of bus communication with the dongle */ +enum dhd_bus_state { + DHD_BUS_DOWN, /* Not ready for frame transfers */ + DHD_BUS_LOAD, /* Download access only (CPU reset) */ + DHD_BUS_DATA, /* Ready for frame transfers */ + DHD_BUS_SUSPEND, /* Bus has been suspended */ + DHD_BUS_DOWN_IN_PROGRESS, /* Bus going Down */ + DHD_BUS_REMOVE, /* Bus has been removed */ +}; + +/* The level of bus communication with the dongle */ +enum dhd_bus_devreset_type { + DHD_BUS_DEVRESET_ON = 0, /* ON */ + DHD_BUS_DEVRESET_OFF = 1, /* OFF */ + DHD_BUS_DEVRESET_FLR = 2, /* FLR */ + DHD_BUS_DEVRESET_FLR_FORCE_FAIL = 3, /* FLR FORCE FAIL */ + DHD_BUS_DEVRESET_QUIESCE = 4, /* FLR */ +}; + +/* + * Bit fields to Indicate clean up process that wait till they are finished. + * Future synchronizable processes can add their bit filed below and update + * their functionalities accordingly + */ +#define DHD_BUS_BUSY_IN_TX 0x01 +#define DHD_BUS_BUSY_IN_SEND_PKT 0x02 +#define DHD_BUS_BUSY_IN_DPC 0x04 +#define DHD_BUS_BUSY_IN_WD 0x08 +#define DHD_BUS_BUSY_IN_IOVAR 0x10 +#define DHD_BUS_BUSY_IN_DHD_IOVAR 0x20 +#define DHD_BUS_BUSY_SUSPEND_IN_PROGRESS 0x40 +#define DHD_BUS_BUSY_RESUME_IN_PROGRESS 0x80 +#define DHD_BUS_BUSY_RPM_SUSPEND_IN_PROGRESS 0x100 +#define DHD_BUS_BUSY_RPM_SUSPEND_DONE 0x200 +#define DHD_BUS_BUSY_RPM_RESUME_IN_PROGRESS 0x400 +#define DHD_BUS_BUSY_RPM_ALL (DHD_BUS_BUSY_RPM_SUSPEND_DONE | \ + DHD_BUS_BUSY_RPM_SUSPEND_IN_PROGRESS | \ + DHD_BUS_BUSY_RPM_RESUME_IN_PROGRESS) +#define DHD_BUS_BUSY_IN_CHECKDIED 0x800 +#define DHD_BUS_BUSY_IN_MEMDUMP 0x1000 +#define DHD_BUS_BUSY_IN_SSSRDUMP 0x2000 +#define DHD_BUS_BUSY_IN_LOGDUMP 0x4000 + +#define DHD_BUS_BUSY_SET_IN_TX(dhdp) \ + (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_TX +#define DHD_BUS_BUSY_SET_IN_SEND_PKT(dhdp) \ + (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_SEND_PKT +#define DHD_BUS_BUSY_SET_IN_DPC(dhdp) \ + (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_DPC +#define DHD_BUS_BUSY_SET_IN_WD(dhdp) \ + (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_WD +#define DHD_BUS_BUSY_SET_IN_IOVAR(dhdp) \ + (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_IOVAR +#define DHD_BUS_BUSY_SET_IN_DHD_IOVAR(dhdp) \ + (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_DHD_IOVAR +#define DHD_BUS_BUSY_SET_SUSPEND_IN_PROGRESS(dhdp) \ + (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_SUSPEND_IN_PROGRESS +#define DHD_BUS_BUSY_SET_RESUME_IN_PROGRESS(dhdp) \ + (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_RESUME_IN_PROGRESS +#define DHD_BUS_BUSY_SET_RPM_SUSPEND_IN_PROGRESS(dhdp) \ + (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_RPM_SUSPEND_IN_PROGRESS +#define DHD_BUS_BUSY_SET_RPM_SUSPEND_DONE(dhdp) \ + (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_RPM_SUSPEND_DONE +#define DHD_BUS_BUSY_SET_RPM_RESUME_IN_PROGRESS(dhdp) \ + (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_RPM_RESUME_IN_PROGRESS +#define DHD_BUS_BUSY_SET_IN_CHECKDIED(dhdp) \ + (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_CHECKDIED +#define DHD_BUS_BUSY_SET_IN_MEMDUMP(dhdp) \ + (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_MEMDUMP +#define DHD_BUS_BUSY_SET_IN_SSSRDUMP(dhdp) \ + (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_SSSRDUMP +#define DHD_BUS_BUSY_SET_IN_LOGDUMP(dhdp) \ + (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_LOGDUMP + +#define DHD_BUS_BUSY_CLEAR_IN_TX(dhdp) \ + (dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX +#define DHD_BUS_BUSY_CLEAR_IN_SEND_PKT(dhdp) \ + (dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_SEND_PKT +#define DHD_BUS_BUSY_CLEAR_IN_DPC(dhdp) \ + (dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_DPC +#define DHD_BUS_BUSY_CLEAR_IN_WD(dhdp) \ + (dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_WD +#define DHD_BUS_BUSY_CLEAR_IN_IOVAR(dhdp) \ + (dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_IOVAR +#define DHD_BUS_BUSY_CLEAR_IN_DHD_IOVAR(dhdp) \ + (dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_DHD_IOVAR +#define DHD_BUS_BUSY_CLEAR_SUSPEND_IN_PROGRESS(dhdp) \ + (dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_SUSPEND_IN_PROGRESS +#define DHD_BUS_BUSY_CLEAR_RESUME_IN_PROGRESS(dhdp) \ + (dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_RESUME_IN_PROGRESS +#define DHD_BUS_BUSY_CLEAR_RPM_SUSPEND_IN_PROGRESS(dhdp) \ + (dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_RPM_SUSPEND_IN_PROGRESS +#define DHD_BUS_BUSY_CLEAR_RPM_SUSPEND_DONE(dhdp) \ + (dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_RPM_SUSPEND_DONE +#define DHD_BUS_BUSY_CLEAR_RPM_RESUME_IN_PROGRESS(dhdp) \ + (dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_RPM_RESUME_IN_PROGRESS +#define DHD_BUS_BUSY_CLEAR_IN_CHECKDIED(dhdp) \ + (dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_CHECKDIED +#define DHD_BUS_BUSY_CLEAR_IN_MEMDUMP(dhdp) \ + (dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_MEMDUMP +#define DHD_BUS_BUSY_CLEAR_IN_SSSRDUMP(dhdp) \ + (dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_SSSRDUMP +#define DHD_BUS_BUSY_CLEAR_IN_LOGDUMP(dhdp) \ + (dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_LOGDUMP + +#define DHD_BUS_BUSY_CHECK_IN_TX(dhdp) \ + ((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_IN_TX) +#define DHD_BUS_BUSY_CHECK_IN_SEND_PKT(dhdp) \ + ((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_IN_SEND_PKT) +#define DHD_BUS_BUSY_CHECK_IN_DPC(dhdp) \ + ((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_IN_DPC) +#define DHD_BUS_BUSY_CHECK_IN_WD(dhdp) \ + ((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_IN_WD) +#define DHD_BUS_BUSY_CHECK_IN_IOVAR(dhdp) \ + ((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_IN_IOVAR) +#define DHD_BUS_BUSY_CHECK_IN_DHD_IOVAR(dhdp) \ + ((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_IN_DHD_IOVAR) +#define DHD_BUS_BUSY_CHECK_SUSPEND_IN_PROGRESS(dhdp) \ + ((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_SUSPEND_IN_PROGRESS) +#define DHD_BUS_BUSY_CHECK_RESUME_IN_PROGRESS(dhdp) \ + ((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_RESUME_IN_PROGRESS) +#define DHD_BUS_BUSY_CHECK_RPM_SUSPEND_IN_PROGRESS(dhdp) \ + ((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_RPM_SUSPEND_IN_PROGRESS) +#define DHD_BUS_BUSY_CHECK_RPM_SUSPEND_DONE(dhdp) \ + ((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_RPM_SUSPEND_DONE) +#define DHD_BUS_BUSY_CHECK_RPM_RESUME_IN_PROGRESS(dhdp) \ + ((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_RPM_RESUME_IN_PROGRESS) +#define DHD_BUS_BUSY_CHECK_RPM_ALL(dhdp) \ + ((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_RPM_ALL) +#define DHD_BUS_BUSY_CHECK_IN_CHECKDIED(dhdp) \ + ((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_IN_CHECKDIED) +#define DHD_BUS_BUSY_CHECK_IN_MEMDUMP(dhdp) \ + ((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_IN_MEMDUMP) +#define DHD_BUS_BUSY_CHECK_IN_SSSRDUMP(dhdp) \ + ((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_IN_SSSRDUMP) +#define DHD_BUS_BUSY_CHECK_IN_LOGDUMP(dhdp) \ + ((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_IN_LOGDUMP) +#define DHD_BUS_BUSY_CHECK_IDLE(dhdp) \ + ((dhdp)->dhd_bus_busy_state == 0) + +#define DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhdp) \ + ((dhdp)->busstate == DHD_BUS_SUSPEND || DHD_BUS_BUSY_CHECK_SUSPEND_IN_PROGRESS(dhdp)) + +#define DHD_BUS_CHECK_ANY_SUSPEND_IN_PROGRESS(dhdp) \ + (DHD_BUS_BUSY_CHECK_SUSPEND_IN_PROGRESS(dhdp) || \ + DHD_BUS_BUSY_CHECK_RPM_SUSPEND_IN_PROGRESS(dhdp)) + +#define DHD_BUS_CHECK_SUSPEND_OR_ANY_SUSPEND_IN_PROGRESS(dhdp) \ + ((dhdp)->busstate == DHD_BUS_SUSPEND || DHD_BUS_CHECK_ANY_SUSPEND_IN_PROGRESS(dhdp)) + +#define DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp) \ + ((dhdp)->busstate == DHD_BUS_DOWN || (dhdp)->busstate == DHD_BUS_DOWN_IN_PROGRESS || \ + (dhdp)->busstate == DHD_BUS_REMOVE) + +#define DHD_BUS_CHECK_REMOVE(dhdp) \ + ((dhdp)->busstate == DHD_BUS_REMOVE) + +#define MAX_MTU_SZ (1600u) + +/* (u64)result = (u64)dividend / (u64)divisor */ +#define DIV_U64_BY_U64(dividend, divisor) div64_u64(dividend, divisor) + +/* (u64)result = (u64)dividend / (u32)divisor */ +#define DIV_U64_BY_U32(dividend, divisor) div_u64(dividend, divisor) + +/* Be careful while using this, as it divides dividend also + * (u32)remainder = (u64)dividend % (u32)divisor + * (u64)dividend = (u64)dividend / (u32)divisor + */ +#define DIV_AND_MOD_U64_BY_U32(dividend, divisor) do_div(dividend, divisor) + +/* (u32)remainder = (u64)dividend % (u32)divisor */ +#define MOD_U64_BY_U32(dividend, divisor) ({ \ + uint64 temp_dividend = (dividend); \ + uint32 rem = DIV_AND_MOD_U64_BY_U32(temp_dividend, (divisor)); \ + rem; \ +}) + +#define SEC_USEC_FMT \ + "%015llu.%06u" + +#define GET_SEC_USEC(t) \ + DIV_U64_BY_U32(t, USEC_PER_SEC), MOD_U64_BY_U32(t, USEC_PER_SEC) + +/* Download Types */ +typedef enum download_type { + FW, + NVRAM, + CLM_BLOB, + TXCAP_BLOB +} download_type_t; + +/* For supporting multiple interfaces */ +#define DHD_MAX_IFS 16 +#define DHD_MAX_STATIC_IFS 1 +#define DHD_DEL_IF -0xE +#define DHD_BAD_IF -0xF +#define DHD_DUMMY_INFO_IF 0xDEAF /* Hack i/f to handle events from INFO Ring */ +#define DHD_EVENT_IF DHD_DUMMY_INFO_IF + +enum dhd_op_flags { +/* Firmware requested operation mode */ + DHD_FLAG_STA_MODE = (1 << (0)), /* STA only */ + DHD_FLAG_HOSTAP_MODE = (1 << (1)), /* SOFTAP only */ + DHD_FLAG_P2P_MODE = (1 << (2)), /* P2P Only */ + /* STA + P2P */ + DHD_FLAG_CONCURR_SINGLE_CHAN_MODE = (DHD_FLAG_STA_MODE | DHD_FLAG_P2P_MODE), + /* STA + SoftAP */ + DHD_FLAG_CONCURR_STA_HOSTAP_MODE = (DHD_FLAG_STA_MODE | DHD_FLAG_HOSTAP_MODE), + DHD_FLAG_CONCURR_MULTI_CHAN_MODE = (1 << (4)), /* STA + P2P */ + /* Current P2P mode for P2P connection */ + DHD_FLAG_P2P_GC_MODE = (1 << (5)), + DHD_FLAG_P2P_GO_MODE = (1 << (6)), + DHD_FLAG_MBSS_MODE = (1 << (7)), /* MBSS in future */ + DHD_FLAG_IBSS_MODE = (1 << (8)), + DHD_FLAG_MFG_MODE = (1 << (9)), + DHD_FLAG_RSDB_MODE = (1 << (10)), + DHD_FLAG_MP2P_MODE = (1 << (11)) +}; + +#define DHD_OPMODE_SUPPORTED(dhd, opmode_flag) \ + (dhd ? ((((dhd_pub_t *)dhd)->op_mode) & opmode_flag) : -1) +#define DHD_OPMODE_STA_SOFTAP_CONCURR(dhd) \ + (dhd ? (((dhd->op_mode) & DHD_FLAG_CONCURR_STA_HOSTAP_MODE) == \ + DHD_FLAG_CONCURR_STA_HOSTAP_MODE) : 0) + +/* Max sequential TX/RX Control timeouts to set HANG event */ +#ifndef MAX_CNTL_TX_TIMEOUT +#define MAX_CNTL_TX_TIMEOUT 2 +#endif /* MAX_CNTL_TX_TIMEOUT */ +#ifndef MAX_CNTL_RX_TIMEOUT +#define MAX_CNTL_RX_TIMEOUT 1 +#endif /* MAX_CNTL_RX_TIMEOUT */ + +#define DHD_SCAN_ASSOC_ACTIVE_TIME 40 /* ms: Embedded default Active setting from DHD */ +#define DHD_SCAN_UNASSOC_ACTIVE_TIME 80 /* ms: Embedded def. Unassoc Active setting from DHD */ +#define DHD_SCAN_PASSIVE_TIME 130 /* ms: Embedded default Passive setting from DHD */ +#define DHD_SCAN_HOME_TIME 45 /* ms: Embedded default Home time setting from DHD */ +#define DHD_SCAN_HOME_AWAY_TIME 100 /* ms: Embedded default Home Away time setting from DHD */ + +#ifndef POWERUP_MAX_RETRY +#define POWERUP_MAX_RETRY 3 /* how many times we retry to power up the chip */ +#endif // endif +#ifndef POWERUP_WAIT_MS +#define POWERUP_WAIT_MS 2000 /* ms: time out in waiting wifi to come up */ +#endif // endif +/* + * MAX_NVRAMBUF_SIZE determines the size of the Buffer in the DHD that holds + * the NVRAM data. That is the size of the buffer pointed by bus->vars + * This also needs to be increased to 16K to support NVRAM size higher than 8K + */ +#define MAX_NVRAMBUF_SIZE (16 * 1024) /* max nvram buf size */ +#define MAX_CLM_BUF_SIZE (48 * 1024) /* max clm blob size */ +#define MAX_TXCAP_BUF_SIZE (16 * 1024) /* max txcap blob size */ +#ifdef DHD_DEBUG +#define DHD_JOIN_MAX_TIME_DEFAULT 10000 /* ms: Max time out for joining AP */ +#define DHD_SCAN_DEF_TIMEOUT 10000 /* ms: Max time out for scan in progress */ +#endif // endif + +#ifndef CONFIG_BCMDHD_CLM_PATH +#define CONFIG_BCMDHD_CLM_PATH "/etc/wifi/bcmdhd_clm.blob" +#endif /* CONFIG_BCMDHD_CLM_PATH */ +#define WL_CCODE_NULL_COUNTRY "#n" + +#define FW_VER_STR_LEN 128 +#define FWID_STR_LEN 256 +#define CLM_VER_STR_LEN 128 +#define BUS_API_REV_STR_LEN 128 +#define FW_VER_STR "Version" +#define FWID_STR_1 "FWID: 01-" +#define FWID_STR_2 "FWID=01-" +extern char bus_api_revision[]; + +enum dhd_bus_wake_state { + WAKE_LOCK_OFF, + WAKE_LOCK_PRIV, + WAKE_LOCK_DPC, + WAKE_LOCK_IOCTL, + WAKE_LOCK_DOWNLOAD, + WAKE_LOCK_TMOUT, + WAKE_LOCK_WATCHDOG, + WAKE_LOCK_LINK_DOWN_TMOUT, + WAKE_LOCK_PNO_FIND_TMOUT, + WAKE_LOCK_SOFTAP_SET, + WAKE_LOCK_SOFTAP_STOP, + WAKE_LOCK_SOFTAP_START, + WAKE_LOCK_SOFTAP_THREAD +}; + +enum dhd_prealloc_index { + DHD_PREALLOC_PROT = 0, + DHD_PREALLOC_RXBUF, + DHD_PREALLOC_DATABUF, + DHD_PREALLOC_OSL_BUF, +#if defined(STATIC_WL_PRIV_STRUCT) + DHD_PREALLOC_WIPHY_ESCAN0 = 5, +#ifdef DUAL_ESCAN_RESULT_BUFFER + DHD_PREALLOC_WIPHY_ESCAN1, +#endif /* DUAL_ESCAN_RESULT_BUFFER */ +#endif /* STATIC_WL_PRIV_STRUCT */ + DHD_PREALLOC_DHD_INFO = 7, + DHD_PREALLOC_DHD_WLFC_INFO = 8, + DHD_PREALLOC_IF_FLOW_LKUP = 9, + /* 10 */ + DHD_PREALLOC_MEMDUMP_RAM = 11, + DHD_PREALLOC_DHD_WLFC_HANGER = 12, + DHD_PREALLOC_PKTID_MAP = 13, + DHD_PREALLOC_PKTID_MAP_IOCTL = 14, + DHD_PREALLOC_DHD_LOG_DUMP_BUF = 15, + DHD_PREALLOC_DHD_LOG_DUMP_BUF_EX = 16, + DHD_PREALLOC_DHD_PKTLOG_DUMP_BUF = 17, + DHD_PREALLOC_STAT_REPORT_BUF = 18, + DHD_PREALLOC_WL_ESCAN_INFO = 19, + DHD_PREALLOC_FW_VERBOSE_RING = 20, + DHD_PREALLOC_FW_EVENT_RING = 21, + DHD_PREALLOC_DHD_EVENT_RING = 22, + DHD_PREALLOC_NAN_EVENT_RING = 23 +}; + +enum dhd_dongledump_mode { + DUMP_DISABLED = 0, + DUMP_MEMONLY, + DUMP_MEMFILE, + DUMP_MEMFILE_BUGON, + DUMP_MEMFILE_MAX +}; + +enum dhd_dongledump_type { + DUMP_TYPE_RESUMED_ON_TIMEOUT = 1, + DUMP_TYPE_D3_ACK_TIMEOUT, + DUMP_TYPE_DONGLE_TRAP, + DUMP_TYPE_MEMORY_CORRUPTION, + DUMP_TYPE_PKTID_AUDIT_FAILURE, + DUMP_TYPE_PKTID_INVALID, + DUMP_TYPE_SCAN_TIMEOUT, + DUMP_TYPE_SCAN_BUSY, + DUMP_TYPE_BY_SYSDUMP, + DUMP_TYPE_BY_LIVELOCK, + DUMP_TYPE_AP_LINKUP_FAILURE, + DUMP_TYPE_AP_ABNORMAL_ACCESS, + DUMP_TYPE_CFG_VENDOR_TRIGGERED, + DUMP_TYPE_RESUMED_ON_TIMEOUT_TX, + DUMP_TYPE_RESUMED_ON_TIMEOUT_RX, + DUMP_TYPE_RESUMED_ON_INVALID_RING_RDWR, + DUMP_TYPE_TRANS_ID_MISMATCH, + DUMP_TYPE_IFACE_OP_FAILURE, +#ifdef DEBUG_DNGL_INIT_FAIL + DUMP_TYPE_DONGLE_INIT_FAILURE, +#endif /* DEBUG_DNGL_INIT_FAIL */ + DUMP_TYPE_DONGLE_HOST_EVENT, + DUMP_TYPE_SMMU_FAULT, + DUMP_TYPE_RESUMED_UNKNOWN, +#ifdef DHD_ERPOM + DUMP_TYPE_DUE_TO_BT, +#endif /* DHD_ERPOM */ + DUMP_TYPE_LOGSET_BEYOND_RANGE, + DUMP_TYPE_BY_USER +}; + +enum dhd_hang_reason { + HANG_REASON_MASK = 0x8000, + HANG_REASON_IOCTL_RESP_TIMEOUT = 0x8001, + HANG_REASON_DONGLE_TRAP = 0x8002, + HANG_REASON_D3_ACK_TIMEOUT = 0x8003, + HANG_REASON_BUS_DOWN = 0x8004, + HANG_REASON_MSGBUF_LIVELOCK = 0x8006, + HANG_REASON_IFACE_DEL_FAILURE = 0x8007, + HANG_REASON_HT_AVAIL_ERROR = 0x8008, + HANG_REASON_PCIE_RC_LINK_UP_FAIL = 0x8009, + HANG_REASON_PCIE_PKTID_ERROR = 0x800A, + HANG_REASON_IFACE_ADD_FAILURE = 0x800B, + HANG_REASON_PCIE_LINK_DOWN = 0x8805, + HANG_REASON_INVALID_EVENT_OR_DATA = 0x8806, + HANG_REASON_UNKNOWN = 0x8807, + HANG_REASON_MAX = 0x8808 +}; + +#define WLC_E_DEAUTH_MAX_REASON 0x0FFF + +enum dhd_rsdb_scan_features { + /* Downgraded scan feature for AP active */ + RSDB_SCAN_DOWNGRADED_AP_SCAN = 0x01, + /* Downgraded scan feature for P2P Discovery */ + RSDB_SCAN_DOWNGRADED_P2P_DISC_SCAN = 0x02, + /* Enable channel pruning for ROAM SCAN */ + RSDB_SCAN_DOWNGRADED_CH_PRUNE_ROAM = 0x10, + /* Enable channel pruning for any SCAN */ + RSDB_SCAN_DOWNGRADED_CH_PRUNE_ALL = 0x20 +}; + +#define VENDOR_SEND_HANG_EXT_INFO_LEN (800 + 1) +#define VENDOR_SEND_HANG_EXT_INFO_VER 20170905 +#define HANG_INFO_TRAP_T_NAME_MAX 6 +#define HANG_INFO_TRAP_T_REASON_IDX 0 +#define HANG_INFO_TRAP_T_SUBTYPE_IDX 2 +#define HANG_INFO_TRAP_T_OFFSET_IDX 3 +#define HANG_INFO_TRAP_T_EPC_IDX 4 +#define HANG_FIELD_STR_MAX_LEN 9 +#define HANG_FIELD_CNT_MAX 69 +#define HANG_FIELD_IF_FAILURE_CNT 10 +#define HANG_FIELD_IOCTL_RESP_TIMEOUT_CNT 8 +#define HANG_FIELD_TRAP_T_STACK_CNT_MAX 16 +#define HANG_FIELD_MISMATCH_CNT 10 +#define HANG_INFO_BIGDATA_KEY_STACK_CNT 4 + +#define DEBUG_DUMP_TIME_BUF_LEN (16 + 1) +/* delimiter between values */ +#define HANG_KEY_DEL ' ' +#define HANG_RAW_DEL '_' + +/* Packet alignment for most efficient SDIO (can change based on platform) */ +#ifndef DHD_SDALIGN +#define DHD_SDALIGN 32 +#endif // endif + +#define DHD_TX_CONTEXT_MASK 0xff +#define DHD_TX_START_XMIT 0x01 +#define DHD_TX_SEND_PKT 0x02 +#define DHD_IF_SET_TX_ACTIVE(ifp, context) \ + ifp->tx_paths_active |= context; +#define DHD_IF_CLR_TX_ACTIVE(ifp, context) \ + ifp->tx_paths_active &= ~context; +#define DHD_IF_IS_TX_ACTIVE(ifp) \ + (ifp->tx_paths_active) +/** + * DMA-able buffer parameters + * - dmaaddr_t is 32bits on a 32bit host. + * dhd_dma_buf::pa may not be used as a sh_addr_t, bcm_addr64_t or uintptr + * - dhd_dma_buf::_alloced is ONLY for freeing a DMA-able buffer. + */ +typedef struct dhd_dma_buf { + void *va; /* virtual address of buffer */ + uint32 len; /* user requested buffer length */ + dmaaddr_t pa; /* physical address of buffer */ + void *dmah; /* dma mapper handle */ + void *secdma; /* secure dma sec_cma_info handle */ + uint32 _alloced; /* actual size of buffer allocated with align and pad */ +} dhd_dma_buf_t; + +/* host reordering packts logic */ +/* followed the structure to hold the reorder buffers (void **p) */ +typedef struct reorder_info { + void **p; + uint8 flow_id; + uint8 cur_idx; + uint8 exp_idx; + uint8 max_idx; + uint8 pend_pkts; +} reorder_info_t; + +/* throughput test packet format */ +typedef struct tput_pkt { + /* header */ + uint8 mac_sta[ETHER_ADDR_LEN]; + uint8 mac_ap[ETHER_ADDR_LEN]; + uint16 pkt_type; + uint8 PAD[2]; + /* data */ + uint32 crc32; + uint32 pkt_id; + uint32 num_pkts; +} tput_pkt_t; + +typedef enum { + TPUT_PKT_TYPE_NORMAL, + TPUT_PKT_TYPE_STOP +} tput_pkt_type_t; + +#define TPUT_TEST_MAX_PAYLOAD 1500 +#define TPUT_TEST_WAIT_TIMEOUT_DEFAULT 5000 + +#ifdef DHDTCPACK_SUPPRESS + +enum { + /* TCPACK suppress off */ + TCPACK_SUP_OFF, + /* Replace TCPACK in txq when new coming one has higher ACK number. */ + TCPACK_SUP_REPLACE, + /* TCPACK_SUP_REPLACE + delayed TCPACK TX unless ACK to PSH DATA. + * This will give benefits to Half-Duplex bus interface(e.g. SDIO) that + * 1. we are able to read TCP DATA packets first from the bus + * 2. TCPACKs that don't need to hurry delivered remains longer in TXQ so can be suppressed. + */ + TCPACK_SUP_DELAYTX, + TCPACK_SUP_HOLD, + TCPACK_SUP_LAST_MODE +}; +#endif /* DHDTCPACK_SUPPRESS */ + +#define DHD_NULL_CHK_AND_RET(cond) \ + if (!cond) { \ + DHD_ERROR(("%s " #cond " is NULL\n", __FUNCTION__)); \ + return; \ + } + +#define DHD_NULL_CHK_AND_RET_VAL(cond, value) \ + if (!cond) { \ + DHD_ERROR(("%s " #cond " is NULL\n", __FUNCTION__)); \ + return value; \ + } + +#define DHD_NULL_CHK_AND_GOTO(cond, label) \ + if (!cond) { \ + DHD_ERROR(("%s " #cond " is NULL\n", __FUNCTION__)); \ + goto label; \ + } + +/* + * Accumulating the queue lengths of all flowring queues in a parent object, + * to assert flow control, when the cummulative queue length crosses an upper + * threshold defined on a parent object. Upper threshold may be maintained + * at a station level, at an interface level, or at a dhd instance. + * + * cumm_ctr_t abstraction: + * cumm_ctr_t abstraction may be enhanced to use an object with a hysterisis + * pause on/off threshold callback. + * All macros use the address of the cummulative length in the parent objects. + * + * BCM_GMAC3 builds use a single perimeter lock, as opposed to a per queue lock. + * Cummulative counters in parent objects may be updated without spinlocks. + * + * In non BCM_GMAC3, if a cummulative queue length is desired across all flows + * belonging to either of (a station, or an interface or a dhd instance), then + * an atomic operation is required using an atomic_t cummulative counters or + * using a spinlock. BCM_ROUTER_DHD uses the Linux atomic_t construct. + */ + +/* Cummulative length not supported. */ +typedef uint32 cumm_ctr_t; +#define DHD_CUMM_CTR_PTR(clen) ((cumm_ctr_t*)(clen)) +#define DHD_CUMM_CTR(clen) *(DHD_CUMM_CTR_PTR(clen)) /* accessor */ +#define DHD_CUMM_CTR_READ(clen) DHD_CUMM_CTR(clen) /* read access */ +#define DHD_CUMM_CTR_INIT(clen) \ + ASSERT(DHD_CUMM_CTR_PTR(clen) != DHD_CUMM_CTR_PTR(NULL)); +#define DHD_CUMM_CTR_INCR(clen) \ + ASSERT(DHD_CUMM_CTR_PTR(clen) != DHD_CUMM_CTR_PTR(NULL)); +#define DHD_CUMM_CTR_DECR(clen) \ + ASSERT(DHD_CUMM_CTR_PTR(clen) != DHD_CUMM_CTR_PTR(NULL)); + +#if defined(WLTDLS) && defined(PCIE_FULL_DONGLE) +struct tdls_peer_node { + uint8 addr[ETHER_ADDR_LEN]; + struct tdls_peer_node *next; +}; +typedef struct tdls_peer_node tdls_peer_node_t; +typedef struct { + tdls_peer_node_t *node; + uint8 tdls_peer_count; +} tdls_peer_tbl_t; +#endif /* defined(WLTDLS) && defined(PCIE_FULL_DONGLE) */ + +#ifdef DHD_LOG_DUMP +#define LOG_DUMP_MAGIC 0xDEB3DEB3 +#define HEALTH_CHK_BUF_SIZE 256 + +#ifdef EWP_ECNTRS_LOGGING +#define ECNTR_RING_ID 0xECDB +#define ECNTR_RING_NAME "ewp_ecntr_ring" +#endif /* EWP_ECNTRS_LOGGING */ + +#if defined(DEBUGABILITY) && defined(EWP_ECNTRS_LOGGING) +#error "Duplicate rings will be created since both the features are enabled" +#endif /* DEBUGABILITY && EWP_ECNTRS_LOGGING */ + +typedef enum { + LOG_DUMP_SECTION_GENERAL = 0, + LOG_DUMP_SECTION_ECNTRS, + LOG_DUMP_SECTION_SPECIAL, + LOG_DUMP_SECTION_DHD_DUMP, + LOG_DUMP_SECTION_EXT_TRAP, + LOG_DUMP_SECTION_HEALTH_CHK, + LOG_DUMP_SECTION_PRESERVE, + LOG_DUMP_SECTION_COOKIE, + LOG_DUMP_SECTION_FLOWRING +} log_dump_section_type_t; + +/* Each section in the debug_dump log file shall begin with a header */ +typedef struct { + uint32 magic; /* 0xDEB3DEB3 */ + uint32 type; /* of type log_dump_section_type_t */ + uint64 timestamp; + uint32 length; /* length of the section that follows */ + uint32 pad; +} log_dump_section_hdr_t; + +/* below structure describe ring buffer. */ +struct dhd_log_dump_buf +{ + spinlock_t lock; + void *dhd_pub; + unsigned int enable; + unsigned int wraparound; + unsigned long max; + unsigned int remain; + char* present; + char* front; + char* buffer; +}; + +#define DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE 256 +#define DHD_LOG_DUMP_MAX_TAIL_FLUSH_SIZE (80 * 1024) + +extern void dhd_log_dump_write(int type, char *binary_data, + int binary_len, const char *fmt, ...); +extern char *dhd_log_dump_get_timestamp(void); +bool dhd_log_dump_ecntr_enabled(void); +#endif /* DHD_LOG_DUMP */ + +/* DEBUG_DUMP SUB COMMAND */ +enum { + CMD_DEFAULT, + CMD_UNWANTED, + CMD_DISCONNECTED, + CMD_MAX +}; + +#define DHD_LOG_DUMP_TS_MULTIPLIER_VALUE 60 +#define DHD_LOG_DUMP_TS_FMT_YYMMDDHHMMSSMSMS "%02d%02d%02d%02d%02d%02d%04d" +#define DHD_DEBUG_DUMP_TYPE "debug_dump_FORUSER" +#define DHD_DUMP_SUBSTR_UNWANTED "_unwanted" +#define DHD_DUMP_SUBSTR_DISCONNECTED "_disconnected" + +extern void get_debug_dump_time(char *str); +extern void clear_debug_dump_time(char *str); + +#define WL_MAX_PRESERVE_BUFFER (NUM_EVENT_LOG_SETS) +#define FW_LOGSET_MASK_ALL 0xFF + +#define DHD_COMMON_DUMP_PATH "/data/misc/wifi/" + +struct cntry_locales_custom { + char iso_abbrev[WLC_CNTRY_BUF_SZ]; /* ISO 3166-1 country abbreviation */ + char custom_locale[WLC_CNTRY_BUF_SZ]; /* Custom firmware locale */ + int32 custom_locale_rev; /* Custom local revisin default -1 */ +}; + +int dhd_send_msg_to_daemon(struct sk_buff *skb, void *data, int size); + +#ifdef DMAMAP_STATS +typedef struct dmamap_stats { + uint64 txdata; + uint64 txdata_sz; + uint64 rxdata; + uint64 rxdata_sz; + uint64 ioctl_rx; + uint64 ioctl_rx_sz; + uint64 event_rx; + uint64 event_rx_sz; + uint64 info_rx; + uint64 info_rx_sz; + uint64 tsbuf_rx; + uint64 tsbuf_rx_sz; +} dma_stats_t; +#endif /* DMAMAP_STATS */ + +/* see wlfc_proto.h for tx status details */ +#define DHD_MAX_TX_STATUS_MSGS 9u + +#ifdef TX_STATUS_LATENCY_STATS +typedef struct dhd_if_tx_status_latency { + /* total number of tx_status received on this interface */ + uint64 num_tx_status; + /* cumulative tx_status latency for this interface */ + uint64 cum_tx_status_latency; +} dhd_if_tx_status_latency_t; +#endif /* TX_STATUS_LATENCY_STATS */ + +#if defined(SHOW_LOGTRACE) && defined(DHD_USE_KTHREAD_FOR_LOGTRACE) +/* Timestamps to trace dhd_logtrace_thread() */ +struct dhd_logtrace_thr_ts { + uint64 entry_time; + uint64 sem_down_time; + uint64 flush_time; + uint64 unexpected_break_time; + uint64 complete_time; +}; +#endif /* SHOW_LOGTRACE && DHD_USE_KTHREAD_FOR_LOGTRACE */ + +/** + * Common structure for module and instance linkage. + * Instantiated once per hardware (dongle) instance that this DHD manages. + */ +typedef struct dhd_pub { + /* Linkage ponters */ + osl_t *osh; /* OSL handle */ + struct dhd_bus *bus; /* Bus module handle */ + struct dhd_prot *prot; /* Protocol module handle */ + struct dhd_info *info; /* Info module handle */ + struct dhd_dbg *dbg; /* Debugability module handle */ +#if defined(SHOW_LOGTRACE) && defined(DHD_USE_KTHREAD_FOR_LOGTRACE) + struct dhd_logtrace_thr_ts logtrace_thr_ts; +#endif /* SHOW_LOGTRACE && DHD_USE_KTHREAD_FOR_LOGTRACE */ + + /* to NDIS developer, the structure dhd_common is redundant, + * please do NOT merge it back from other branches !!! + */ + +#ifdef BCMDBUS + struct dbus_pub *dbus; +#endif /* BCMDBUS */ + + /* Internal dhd items */ + bool up; /* Driver up/down (to OS) */ +#ifdef WL_CFG80211 + spinlock_t up_lock; /* Synchronization with CFG80211 down */ +#endif /* WL_CFG80211 */ + bool txoff; /* Transmit flow-controlled */ + bool dongle_reset; /* TRUE = DEVRESET put dongle into reset */ + enum dhd_bus_state busstate; + uint dhd_bus_busy_state; /* Bus busy state */ + uint hdrlen; /* Total DHD header length (proto + bus) */ + uint maxctl; /* Max size rxctl request from proto to bus */ + uint rxsz; /* Rx buffer size bus module should use */ + uint8 wme_dp; /* wme discard priority */ + + /* Dongle media info */ + bool iswl; /* Dongle-resident driver is wl */ + ulong drv_version; /* Version of dongle-resident driver */ + struct ether_addr mac; /* MAC address obtained from dongle */ + dngl_stats_t dstats; /* Stats for dongle-based data */ + + /* Additional stats for the bus level */ + ulong tx_packets; /* Data packets sent to dongle */ + ulong tx_dropped; /* Data packets dropped in dhd */ + ulong tx_multicast; /* Multicast data packets sent to dongle */ + ulong tx_errors; /* Errors in sending data to dongle */ + ulong tx_ctlpkts; /* Control packets sent to dongle */ + ulong tx_ctlerrs; /* Errors sending control frames to dongle */ + ulong rx_packets; /* Packets sent up the network interface */ + ulong rx_multicast; /* Multicast packets sent up the network interface */ + ulong rx_errors; /* Errors processing rx data packets */ + ulong rx_ctlpkts; /* Control frames processed from dongle */ + ulong rx_ctlerrs; /* Errors in processing rx control frames */ + ulong rx_dropped; /* Packets dropped locally (no memory) */ + ulong rx_flushed; /* Packets flushed due to unscheduled sendup thread */ + ulong wd_dpc_sched; /* Number of times dhd dpc scheduled by watchdog timer */ + ulong rx_pktgetfail; /* Number of PKTGET failures in DHD on RX */ + ulong tx_pktgetfail; /* Number of PKTGET failures in DHD on TX */ + ulong rx_readahead_cnt; /* Number of packets where header read-ahead was used. */ + ulong tx_realloc; /* Number of tx packets we had to realloc for headroom */ + ulong fc_packets; /* Number of flow control pkts recvd */ + ulong tx_big_packets; /* Dropped data packets that are larger than MAX_MTU_SZ */ +#ifdef DMAMAP_STATS + /* DMA Mapping statistics */ + dma_stats_t dma_stats; +#endif /* DMAMAP_STATS */ + + /* Last error return */ + int bcmerror; + uint tickcnt; + + /* Last error from dongle */ + int dongle_error; + + uint8 country_code[WLC_CNTRY_BUF_SZ]; + + /* Suspend disable flag and "in suspend" flag */ + int suspend_disable_flag; /* "1" to disable all extra powersaving during suspend */ + int in_suspend; /* flag set to 1 when early suspend called */ +#ifdef PNO_SUPPORT + int pno_enable; /* pno status : "1" is pno enable */ + int pno_suspend; /* pno suspend status : "1" is pno suspended */ +#endif /* PNO_SUPPORT */ + /* DTIM skip value, default 0(or 1) means wake each DTIM + * 3 means skip 2 DTIMs and wake up 3rd DTIM(9th beacon when AP DTIM is 3) + */ + int suspend_bcn_li_dtim; /* bcn_li_dtim value in suspend mode */ +#ifdef PKT_FILTER_SUPPORT + int early_suspended; /* Early suspend status */ + int dhcp_in_progress; /* DHCP period */ +#endif // endif + + /* Pkt filter defination */ + char * pktfilter[100]; + int pktfilter_count; + + wl_country_t dhd_cspec; /* Current Locale info */ +#ifdef CUSTOM_COUNTRY_CODE + uint dhd_cflags; +#endif /* CUSTOM_COUNTRY_CODE */ +#if defined(DHD_BLOB_EXISTENCE_CHECK) + bool is_blob; /* Checking for existance of Blob file */ +#endif /* DHD_BLOB_EXISTENCE_CHECK */ + bool force_country_change; + char eventmask[WL_EVENTING_MASK_LEN]; + int op_mode; /* STA, HostAPD, WFD, SoftAP */ + +/* Set this to 1 to use a seperate interface (p2p0) for p2p operations. + * For ICS MR1 releases it should be disable to be compatable with ICS MR1 Framework + * see target dhd-cdc-sdmmc-panda-cfg80211-icsmr1-gpl-debug in Makefile + */ +/* #define WL_ENABLE_P2P_IF 1 */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) + struct mutex wl_start_stop_lock; /* lock/unlock for Android start/stop */ + struct mutex wl_softap_lock; /* lock/unlock for any SoftAP/STA settings */ +#endif // endif + +#ifdef PROP_TXSTATUS + bool wlfc_enabled; + int wlfc_mode; + void* wlfc_state; + /* + Mode in which the dhd flow control shall operate. Must be set before + traffic starts to the device. + 0 - Do not do any proptxtstatus flow control + 1 - Use implied credit from a packet status + 2 - Use explicit credit + 3 - Only AMPDU hostreorder used. no wlfc. + */ + uint8 proptxstatus_mode; + bool proptxstatus_txoff; + bool proptxstatus_module_ignore; + bool proptxstatus_credit_ignore; + bool proptxstatus_txstatus_ignore; + + bool wlfc_rxpkt_chk; +#ifdef LIMIT_BORROW + bool wlfc_borrow_allowed; +#endif /* LIMIT_BORROW */ + /* + * implement below functions in each platform if needed. + */ + /* platform specific function whether to skip flow control */ + bool (*skip_fc)(void * dhdp, uint8 ifx); + /* platform specific function for wlfc_enable and wlfc_deinit */ + void (*plat_init)(void *dhd); + void (*plat_deinit)(void *dhd); +#ifdef DHD_WLFC_THREAD + bool wlfc_thread_go; + struct task_struct* wlfc_thread; + wait_queue_head_t wlfc_wqhead; +#endif /* DHD_WLFC_THREAD */ +#endif /* PROP_TXSTATUS */ +#ifdef PNO_SUPPORT + void *pno_state; +#endif // endif +#ifdef RTT_SUPPORT + void *rtt_state; + bool rtt_supported; +#endif // endif +#ifdef ROAM_AP_ENV_DETECTION + bool roam_env_detection; +#endif // endif + bool dongle_isolation; + bool is_pcie_watchdog_reset; + bool dongle_trap_occured; /* flag for sending HANG event to upper layer */ + bool iovar_timeout_occured; /* flag to indicate iovar resumed on timeout */ +#ifdef PCIE_FULL_DONGLE + bool d3ack_timeout_occured; /* flag to indicate d3ack resumed on timeout */ + bool livelock_occured; /* flag to indicate livelock occured */ +#endif /* PCIE_FULL_DONGLE */ +#ifdef BT_OVER_SDIO + bool is_bt_recovery_required; +#endif // endif +#ifdef DHD_MAP_LOGGING + bool smmu_fault_occurred; /* flag to indicate SMMU Fault */ +#endif /* DHD_MAP_LOGGING */ + int hang_was_sent; + int rxcnt_timeout; /* counter rxcnt timeout to send HANG */ + int txcnt_timeout; /* counter txcnt timeout to send HANG */ +#ifdef BCMPCIE + int d3ackcnt_timeout; /* counter d3ack timeout to send HANG */ +#endif /* BCMPCIE */ + bool hang_report; /* enable hang report by default */ + uint16 hang_reason; /* reason codes for HANG event */ +#if defined(CONFIG_BCM_DETECT_CONSECUTIVE_HANG) + uint hang_counts; +#endif /* CONFIG_BCM_DETECT_CONSECUTIVE_HANG */ +#ifdef WLTDLS + bool tdls_enable; +#endif // endif + struct reorder_info *reorder_bufs[WLHOST_REORDERDATA_MAXFLOWS]; + #define WLC_IOCTL_MAXBUF_FWCAP 1024 + char fw_capabilities[WLC_IOCTL_MAXBUF_FWCAP]; + #define MAXSKBPEND 1024 + void *skbbuf[MAXSKBPEND]; + uint32 store_idx; + uint32 sent_idx; +#ifdef DHDTCPACK_SUPPRESS + uint8 tcpack_sup_mode; /* TCPACK suppress mode */ + void *tcpack_sup_module; /* TCPACK suppress module */ + uint32 tcpack_sup_ratio; + uint32 tcpack_sup_delay; +#endif /* DHDTCPACK_SUPPRESS */ +#if defined(ARP_OFFLOAD_SUPPORT) + uint32 arp_version; +#endif // endif +#if defined(BCMSUP_4WAY_HANDSHAKE) + bool fw_4way_handshake; /* Whether firmware will to do the 4way handshake. */ +#endif // endif +#ifdef DEBUG_DPC_THREAD_WATCHDOG + bool dhd_bug_on; +#endif /* DEBUG_DPC_THREAD_WATCHDOG */ +#ifdef CUSTOM_SET_CPUCORE + struct task_struct * current_dpc; + struct task_struct * current_rxf; + int chan_isvht80; +#endif /* CUSTOM_SET_CPUCORE */ + + void *sta_pool; /* pre-allocated pool of sta objects */ + void *staid_allocator; /* allocator of sta indexes */ +#ifdef PCIE_FULL_DONGLE + bool flow_rings_inited; /* set this flag after initializing flow rings */ +#endif /* PCIE_FULL_DONGLE */ + void *flowid_allocator; /* unique flowid allocator */ + void *flow_ring_table; /* flow ring table, include prot and bus info */ + void *if_flow_lkup; /* per interface flowid lkup hash table */ + void *flowid_lock; /* per os lock for flowid info protection */ + void *flowring_list_lock; /* per os lock for flowring list protection */ + uint32 num_flow_rings; + cumm_ctr_t cumm_ctr; /* cumm queue length placeholder */ + cumm_ctr_t l2cumm_ctr; /* level 2 cumm queue length placeholder */ + uint32 d2h_sync_mode; /* D2H DMA completion sync mode */ + uint8 flow_prio_map[NUMPRIO]; + uint8 flow_prio_map_type; + char enable_log[MAX_EVENT]; + bool dma_d2h_ring_upd_support; + bool dma_h2d_ring_upd_support; + bool dma_ring_upd_overwrite; /* host overwrites support setting */ + + bool idma_enable; + uint idma_inited; + + bool ifrm_enable; /* implicit frm enable */ + uint ifrm_inited; /* implicit frm init */ + + bool dar_enable; /* use DAR registers */ + uint dar_inited; + + bool fast_delete_ring_support; /* fast delete ring supported */ + +#ifdef DHD_L2_FILTER + unsigned long l2_filter_cnt; /* for L2_FILTER ARP table timeout */ +#endif /* DHD_L2_FILTER */ +#ifdef DHD_SSSR_DUMP + bool sssr_inited; + sssr_reg_info_v1_t sssr_reg_info; + uint8 *sssr_mempool; + uint *sssr_d11_before[MAX_NUM_D11CORES]; + uint *sssr_d11_after[MAX_NUM_D11CORES]; + bool sssr_d11_outofreset[MAX_NUM_D11CORES]; + uint *sssr_dig_buf_before; + uint *sssr_dig_buf_after; + uint32 sssr_dump_mode; +#endif /* DHD_SSSR_DUMP */ + uint8 *soc_ram; + uint32 soc_ram_length; + uint32 memdump_type; +#ifdef DHD_FW_COREDUMP + uint32 memdump_enabled; +#ifdef DHD_DEBUG_UART + bool memdump_success; +#endif /* DHD_DEBUG_UART */ +#endif /* DHD_FW_COREDUMP */ +#ifdef PCIE_FULL_DONGLE +#ifdef WLTDLS + tdls_peer_tbl_t peer_tbl; +#endif /* WLTDLS */ + uint8 tx_in_progress; +#endif /* PCIE_FULL_DONGLE */ +#ifdef DHD_ULP + void *dhd_ulp; +#endif // endif +#ifdef WLTDLS + uint32 tdls_mode; +#endif // endif +#ifdef GSCAN_SUPPORT + bool lazy_roam_enable; +#endif // endif +#if defined(PKT_FILTER_SUPPORT) && defined(APF) + bool apf_set; +#endif /* PKT_FILTER_SUPPORT && APF */ + void *macdbg_info; +#ifdef DHD_WET + void *wet_info; +#endif // endif + bool h2d_phase_supported; + bool force_dongletrap_on_bad_h2d_phase; + uint32 dongle_trap_data; + bool fw_download_done; + trap_t last_trap_info; /* trap info from the last trap */ + uint8 rand_mac_oui[DOT11_OUI_LEN]; +#ifdef DHD_LOSSLESS_ROAMING + uint8 dequeue_prec_map; + uint8 prio_8021x; +#endif // endif +#ifdef WL_NATOE + struct dhd_nfct_info *nfct; + spinlock_t nfct_lock; +#endif /* WL_NATOE */ + /* timesync link */ + struct dhd_ts *ts; + bool d2h_hostrdy_supported; +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM + atomic_t block_bus; +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ +#if defined(DBG_PKT_MON) + bool d11_tx_status; +#endif // endif + uint16 ndo_version; /* ND offload version supported */ +#ifdef NDO_CONFIG_SUPPORT + bool ndo_enable; /* ND offload feature enable */ + bool ndo_host_ip_overflow; /* # of host ip addr exceed FW capacity */ + uint32 ndo_max_host_ip; /* # of host ip addr supported by FW */ +#endif /* NDO_CONFIG_SUPPORT */ +#if defined(DHD_LOG_DUMP) + /* buffer to hold 'dhd dump' data before dumping to file */ + uint8 *concise_dbg_buf; + uint64 last_file_posn; + int logdump_periodic_flush; + /* ecounter debug ring */ +#ifdef EWP_ECNTRS_LOGGING + void *ecntr_dbg_ring; +#endif // endif +#ifdef DNGL_EVENT_SUPPORT + uint8 health_chk_event_data[HEALTH_CHK_BUF_SIZE]; +#endif // endif + void *logdump_cookie; +#endif /* DHD_LOG_DUMP */ + uint32 dhd_console_ms; /** interval for polling the dongle for console (log) messages */ + bool ext_trap_data_supported; + uint32 *extended_trap_data; +#ifdef DUMP_IOCTL_IOV_LIST + /* dump iovar list */ + dll_t dump_iovlist_head; + uint8 dump_iovlist_len; +#endif /* DUMP_IOCTL_IOV_LIST */ +#ifdef CUSTOM_SET_ANTNPM + uint32 mimo_ant_set; +#endif /* CUSTOM_SET_ANTNPM */ +#ifdef DHD_DEBUG + /* memwaste feature */ + dll_t mw_list_head; /* memwaste list head */ + uint32 mw_id; /* memwaste list unique id */ +#endif /* DHD_DEBUG */ +#ifdef WLTDLS + spinlock_t tdls_lock; +#endif /* WLTDLS */ + uint pcie_txs_metadata_enable; + uint wbtext_policy; /* wbtext policy of dongle */ + bool wbtext_support; /* for product policy only */ +#ifdef SHOW_LOGTRACE +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) + struct mutex dhd_trace_lock; +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */ +#endif /* SHOW_LOGTRACE */ + + bool max_dtim_enable; /* use MAX bcn_li_dtim value in suspend mode */ + tput_test_t tput_data; + uint64 tput_start_ts; + uint64 tput_stop_ts; +#ifdef WL_MONITOR + bool monitor_enable; +#endif // endif + uint dhd_watchdog_ms_backup; + void *event_log_filter; + char debug_dump_time_str[DEBUG_DUMP_TIME_BUF_LEN]; + uint32 logset_prsrv_mask; + bool wl_event_enabled; + bool logtrace_pkt_sendup; +#ifdef DHD_DUMP_MNGR + struct _dhd_dump_file_manage *dump_file_manage; +#endif /* DHD_DUMP_MNGR */ + int debug_dump_subcmd; + bool hscb_enable; + wait_queue_head_t tx_completion_wait; + uint32 batch_tx_pkts_cmpl; + uint32 batch_tx_num_pkts; +#ifdef DHD_ERPOM + bool enable_erpom; + pom_func_handler_t pom_wlan_handler; + int (*pom_func_register)(pom_func_handler_t *func); + int (*pom_func_deregister)(pom_func_handler_t *func); + int (*pom_toggle_reg_on)(uchar func_id, uchar reason); +#endif /* DHD_ERPOM */ +#ifdef EWP_EDL + bool dongle_edl_support; + dhd_dma_buf_t edl_ring_mem; +#endif /* EWP_EDL */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) + struct mutex ndev_op_sync; +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */ + + bool debug_buf_dest_support; + uint32 debug_buf_dest_stat[DEBUG_BUF_DEST_MAX]; +#if defined(DHD_H2D_LOG_TIME_SYNC) +#define DHD_H2D_LOG_TIME_STAMP_MATCH (10000) /* 10 Seconds */ + /* + * Interval for updating the dongle console message time stamp with the Host (DHD) + * time stamp + */ + uint32 dhd_rte_time_sync_ms; +#endif /* DHD_H2D_LOG_TIME_SYNC */ + char *clm_path; /* module_param: path to clm vars file */ + char *conf_path; /* module_param: path to config vars file */ + struct dhd_conf *conf; /* Bus module handle */ + void *adapter; /* adapter information, interrupt, fw path etc. */ +#ifdef BCMDBUS + bool dhd_remove; +#endif /* BCMDBUS */ +#if defined(WL_WIRELESS_EXT) +#if defined(WL_ESCAN) + void *escan; +#else + void *iscan; +#endif +#endif +#ifdef WL_EXT_IAPSTA + void *iapsta_params; +#endif +} dhd_pub_t; + +typedef struct { + uint rxwake; + uint rcwake; +#ifdef DHD_WAKE_RX_STATUS + uint rx_bcast; + uint rx_arp; + uint rx_mcast; + uint rx_multi_ipv6; + uint rx_icmpv6; + uint rx_icmpv6_ra; + uint rx_icmpv6_na; + uint rx_icmpv6_ns; + uint rx_multi_ipv4; + uint rx_multi_other; + uint rx_ucast; +#endif /* DHD_WAKE_RX_STATUS */ +#ifdef DHD_WAKE_EVENT_STATUS + uint rc_event[WLC_E_LAST]; +#endif /* DHD_WAKE_EVENT_STATUS */ +} wake_counts_t; + +#if defined(PCIE_FULL_DONGLE) + +/* Packet Tag for PCIE Full Dongle DHD */ +typedef struct dhd_pkttag_fd { + uint16 flowid; /* Flowring Id */ + uint16 ifid; +#ifndef DHD_PCIE_PKTID + uint16 dma_len; /* pkt len for DMA_MAP/UNMAP */ + dmaaddr_t pa; /* physical address */ + void *dmah; /* dma mapper handle */ + void *secdma; /* secure dma sec_cma_info handle */ +#endif /* !DHD_PCIE_PKTID */ +#ifdef TX_STATUS_LATENCY_STATS + uint64 q_time_us; /* time when tx pkt queued to flowring */ +#endif /* TX_STATUS_LATENCY_STATS */ +} dhd_pkttag_fd_t; + +/* Packet Tag for DHD PCIE Full Dongle */ +#define DHD_PKTTAG_FD(pkt) ((dhd_pkttag_fd_t *)(PKTTAG(pkt))) + +#define DHD_PKT_GET_FLOWID(pkt) ((DHD_PKTTAG_FD(pkt))->flowid) +#define DHD_PKT_SET_FLOWID(pkt, pkt_flowid) \ + DHD_PKTTAG_FD(pkt)->flowid = (uint16)(pkt_flowid) + +#define DHD_PKT_GET_DATAOFF(pkt) ((DHD_PKTTAG_FD(pkt))->dataoff) +#define DHD_PKT_SET_DATAOFF(pkt, pkt_dataoff) \ + DHD_PKTTAG_FD(pkt)->dataoff = (uint16)(pkt_dataoff) + +#define DHD_PKT_GET_DMA_LEN(pkt) ((DHD_PKTTAG_FD(pkt))->dma_len) +#define DHD_PKT_SET_DMA_LEN(pkt, pkt_dma_len) \ + DHD_PKTTAG_FD(pkt)->dma_len = (uint16)(pkt_dma_len) + +#define DHD_PKT_GET_PA(pkt) ((DHD_PKTTAG_FD(pkt))->pa) +#define DHD_PKT_SET_PA(pkt, pkt_pa) \ + DHD_PKTTAG_FD(pkt)->pa = (dmaaddr_t)(pkt_pa) + +#define DHD_PKT_GET_DMAH(pkt) ((DHD_PKTTAG_FD(pkt))->dmah) +#define DHD_PKT_SET_DMAH(pkt, pkt_dmah) \ + DHD_PKTTAG_FD(pkt)->dmah = (void *)(pkt_dmah) + +#define DHD_PKT_GET_SECDMA(pkt) ((DHD_PKTTAG_FD(pkt))->secdma) +#define DHD_PKT_SET_SECDMA(pkt, pkt_secdma) \ + DHD_PKTTAG_FD(pkt)->secdma = (void *)(pkt_secdma) + +#ifdef TX_STATUS_LATENCY_STATS +#define DHD_PKT_GET_QTIME(pkt) ((DHD_PKTTAG_FD(pkt))->q_time_us) +#define DHD_PKT_SET_QTIME(pkt, pkt_q_time_us) \ + DHD_PKTTAG_FD(pkt)->q_time_us = (uint64)(pkt_q_time_us) +#endif /* TX_STATUS_LATENCY_STATS */ +#endif /* PCIE_FULL_DONGLE */ + +#if defined(BCMWDF) +typedef struct { + dhd_pub_t *dhd_pub; +} dhd_workitem_context_t; + +WDF_DECLARE_CONTEXT_TYPE_WITH_NAME(dhd_workitem_context_t, dhd_get_dhd_workitem_context) +#endif /* (BCMWDF) */ + + #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) + + #define DHD_PM_RESUME_WAIT_INIT(a) DECLARE_WAIT_QUEUE_HEAD(a); + #define _DHD_PM_RESUME_WAIT(a, b) do {\ + int retry = 0; \ + SMP_RD_BARRIER_DEPENDS(); \ + while (dhd_mmc_suspend && retry++ != b) { \ + SMP_RD_BARRIER_DEPENDS(); \ + wait_event_interruptible_timeout(a, !dhd_mmc_suspend, 1); \ + } \ + } while (0) + #define DHD_PM_RESUME_WAIT(a) _DHD_PM_RESUME_WAIT(a, 200) + #define DHD_PM_RESUME_WAIT_FOREVER(a) _DHD_PM_RESUME_WAIT(a, ~0) + #define DHD_PM_RESUME_RETURN_ERROR(a) do { \ + if (dhd_mmc_suspend) { \ + printf("%s[%d]: mmc is still in suspend state!!!\n", \ + __FUNCTION__, __LINE__); \ + return a; \ + } \ + } while (0) + #define DHD_PM_RESUME_RETURN do { if (dhd_mmc_suspend) return; } while (0) + + #define DHD_SPINWAIT_SLEEP_INIT(a) DECLARE_WAIT_QUEUE_HEAD(a); + #define SPINWAIT_SLEEP(a, exp, us) do { \ + uint countdown = (us) + 9999; \ + while ((exp) && (countdown >= 10000)) { \ + wait_event_interruptible_timeout(a, FALSE, 1); \ + countdown -= 10000; \ + } \ + } while (0) + + #else + + #define DHD_PM_RESUME_WAIT_INIT(a) + #define DHD_PM_RESUME_WAIT(a) + #define DHD_PM_RESUME_WAIT_FOREVER(a) + #define DHD_PM_RESUME_RETURN_ERROR(a) + #define DHD_PM_RESUME_RETURN + + #define DHD_SPINWAIT_SLEEP_INIT(a) + #define SPINWAIT_SLEEP(a, exp, us) do { \ + uint countdown = (us) + 9; \ + while ((exp) && (countdown >= 10)) { \ + OSL_DELAY(10); \ + countdown -= 10; \ + } \ + } while (0) + + #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */ + +#ifndef OSL_SLEEP +#define OSL_SLEEP(ms) OSL_DELAY(ms*1000) +#endif /* OSL_SLEEP */ + +#define DHD_IF_VIF 0x01 /* Virtual IF (Hidden from user) */ + +#ifdef PNO_SUPPORT +int dhd_pno_clean(dhd_pub_t *dhd); +#endif /* PNO_SUPPORT */ + +/* + * Wake locks are an Android power management concept. They are used by applications and services + * to request CPU resources. + */ +extern int dhd_os_wake_lock(dhd_pub_t *pub); +extern int dhd_os_wake_unlock(dhd_pub_t *pub); +extern int dhd_os_wake_lock_waive(dhd_pub_t *pub); +extern int dhd_os_wake_lock_restore(dhd_pub_t *pub); +extern void dhd_event_wake_lock(dhd_pub_t *pub); +extern void dhd_event_wake_unlock(dhd_pub_t *pub); +extern void dhd_pm_wake_lock_timeout(dhd_pub_t *pub, int val); +extern void dhd_pm_wake_unlock(dhd_pub_t *pub); +extern void dhd_txfl_wake_lock_timeout(dhd_pub_t *pub, int val); +extern void dhd_txfl_wake_unlock(dhd_pub_t *pub); +extern int dhd_os_wake_lock_timeout(dhd_pub_t *pub); +extern int dhd_os_wake_lock_rx_timeout_enable(dhd_pub_t *pub, int val); +extern int dhd_os_wake_lock_ctrl_timeout_enable(dhd_pub_t *pub, int val); +extern int dhd_os_wake_lock_ctrl_timeout_cancel(dhd_pub_t *pub); +extern int dhd_os_wd_wake_lock(dhd_pub_t *pub); +extern int dhd_os_wd_wake_unlock(dhd_pub_t *pub); +extern void dhd_os_wake_lock_init(struct dhd_info *dhd); +extern void dhd_os_wake_lock_destroy(struct dhd_info *dhd); +#ifdef DHD_USE_SCAN_WAKELOCK +extern void dhd_os_scan_wake_lock_timeout(dhd_pub_t *pub, int val); +extern void dhd_os_scan_wake_unlock(dhd_pub_t *pub); +#endif /* BCMPCIE_SCAN_WAKELOCK */ + +inline static void MUTEX_LOCK_SOFTAP_SET_INIT(dhd_pub_t * dhdp) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) + mutex_init(&dhdp->wl_softap_lock); +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */ +} + +inline static void MUTEX_LOCK_SOFTAP_SET(dhd_pub_t * dhdp) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) + mutex_lock(&dhdp->wl_softap_lock); +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */ +} + +inline static void MUTEX_UNLOCK_SOFTAP_SET(dhd_pub_t * dhdp) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) + mutex_unlock(&dhdp->wl_softap_lock); +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */ +} + +#ifdef DHD_DEBUG_WAKE_LOCK +#define DHD_OS_WAKE_LOCK(pub) \ + do { \ + printf("call wake_lock: %s %d\n", \ + __FUNCTION__, __LINE__); \ + dhd_os_wake_lock(pub); \ + } while (0) +#define DHD_OS_WAKE_UNLOCK(pub) \ + do { \ + printf("call wake_unlock: %s %d\n", \ + __FUNCTION__, __LINE__); \ + dhd_os_wake_unlock(pub); \ + } while (0) +#define DHD_EVENT_WAKE_LOCK(pub) \ + do { \ + printf("call event wake_lock: %s %d\n", \ + __FUNCTION__, __LINE__); \ + dhd_event_wake_lock(pub); \ + } while (0) +#define DHD_EVENT_WAKE_UNLOCK(pub) \ + do { \ + printf("call event wake_unlock: %s %d\n", \ + __FUNCTION__, __LINE__); \ + dhd_event_wake_unlock(pub); \ + } while (0) +#define DHD_PM_WAKE_LOCK_TIMEOUT(pub, val) \ + do { \ + printf("call pm_wake_timeout enable\n"); \ + dhd_pm_wake_lock_timeout(pub, val); \ + } while (0) +#define DHD_PM_WAKE_UNLOCK(pub) \ + do { \ + printf("call pm_wake unlock\n"); \ + dhd_pm_wake_unlock(pub); \ + } while (0) +#define DHD_TXFL_WAKE_LOCK_TIMEOUT(pub, val) \ + do { \ + printf("call pm_wake_timeout enable\n"); \ + dhd_txfl_wake_lock_timeout(pub, val); \ + } while (0) +#define DHD_TXFL_WAKE_UNLOCK(pub) \ + do { \ + printf("call pm_wake unlock\n"); \ + dhd_txfl_wake_unlock(pub); \ + } while (0) +#define DHD_OS_WAKE_LOCK_TIMEOUT(pub) \ + do { \ + printf("call wake_lock_timeout: %s %d\n", \ + __FUNCTION__, __LINE__); \ + dhd_os_wake_lock_timeout(pub); \ + } while (0) +#define DHD_OS_WAKE_LOCK_RX_TIMEOUT_ENABLE(pub, val) \ + do { \ + printf("call wake_lock_rx_timeout_enable[%d]: %s %d\n", \ + val, __FUNCTION__, __LINE__); \ + dhd_os_wake_lock_rx_timeout_enable(pub, val); \ + } while (0) +#define DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(pub, val) \ + do { \ + printf("call wake_lock_ctrl_timeout_enable[%d]: %s %d\n", \ + val, __FUNCTION__, __LINE__); \ + dhd_os_wake_lock_ctrl_timeout_enable(pub, val); \ + } while (0) +#define DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_CANCEL(pub) \ + do { \ + printf("call wake_lock_ctrl_timeout_cancel: %s %d\n", \ + __FUNCTION__, __LINE__); \ + dhd_os_wake_lock_ctrl_timeout_cancel(pub); \ + } while (0) +#define DHD_OS_WAKE_LOCK_WAIVE(pub) \ + do { \ + printf("call wake_lock_waive: %s %d\n", \ + __FUNCTION__, __LINE__); \ + dhd_os_wake_lock_waive(pub); \ + } while (0) +#define DHD_OS_WAKE_LOCK_RESTORE(pub) \ + do { \ + printf("call wake_lock_restore: %s %d\n", \ + __FUNCTION__, __LINE__); \ + dhd_os_wake_lock_restore(pub); \ + } while (0) +#define DHD_OS_WAKE_LOCK_INIT(dhd) \ + do { \ + printf("call wake_lock_init: %s %d\n", \ + __FUNCTION__, __LINE__); \ + dhd_os_wake_lock_init(dhd); \ + } while (0) +#define DHD_OS_WAKE_LOCK_DESTROY(dhd) \ + do { \ + printf("call wake_lock_destroy: %s %d\n", \ + __FUNCTION__, __LINE__); \ + dhd_os_wake_lock_destroy(dhd); \ + } while (0) +#else +#define DHD_OS_WAKE_LOCK(pub) dhd_os_wake_lock(pub) +#define DHD_OS_WAKE_UNLOCK(pub) dhd_os_wake_unlock(pub) +#define DHD_EVENT_WAKE_LOCK(pub) dhd_event_wake_lock(pub) +#define DHD_EVENT_WAKE_UNLOCK(pub) dhd_event_wake_unlock(pub) +#define DHD_PM_WAKE_LOCK_TIMEOUT(pub, val) dhd_pm_wake_lock_timeout(pub, val) +#define DHD_PM_WAKE_UNLOCK(pub) dhd_pm_wake_unlock(pub) +#define DHD_TXFL_WAKE_LOCK_TIMEOUT(pub, val) dhd_txfl_wake_lock_timeout(pub, val) +#define DHD_TXFL_WAKE_UNLOCK(pub) dhd_txfl_wake_unlock(pub) +#define DHD_OS_WAKE_LOCK_TIMEOUT(pub) dhd_os_wake_lock_timeout(pub) +#define DHD_OS_WAKE_LOCK_RX_TIMEOUT_ENABLE(pub, val) \ + dhd_os_wake_lock_rx_timeout_enable(pub, val) +#define DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(pub, val) \ + dhd_os_wake_lock_ctrl_timeout_enable(pub, val) +#define DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_CANCEL(pub) \ + dhd_os_wake_lock_ctrl_timeout_cancel(pub) +#define DHD_OS_WAKE_LOCK_WAIVE(pub) dhd_os_wake_lock_waive(pub) +#define DHD_OS_WAKE_LOCK_RESTORE(pub) dhd_os_wake_lock_restore(pub) +#define DHD_OS_WAKE_LOCK_INIT(dhd) dhd_os_wake_lock_init(dhd); +#define DHD_OS_WAKE_LOCK_DESTROY(dhd) dhd_os_wake_lock_destroy(dhd); +#endif /* DHD_DEBUG_WAKE_LOCK */ + +#define DHD_OS_WD_WAKE_LOCK(pub) dhd_os_wd_wake_lock(pub) +#define DHD_OS_WD_WAKE_UNLOCK(pub) dhd_os_wd_wake_unlock(pub) + +#ifdef DHD_USE_SCAN_WAKELOCK +#ifdef DHD_DEBUG_SCAN_WAKELOCK +#define DHD_OS_SCAN_WAKE_LOCK_TIMEOUT(pub, val) \ + do { \ + printf("call wake_lock_scan: %s %d\n", \ + __FUNCTION__, __LINE__); \ + dhd_os_scan_wake_lock_timeout(pub, val); \ + } while (0) +#define DHD_OS_SCAN_WAKE_UNLOCK(pub) \ + do { \ + printf("call wake_unlock_scan: %s %d\n", \ + __FUNCTION__, __LINE__); \ + dhd_os_scan_wake_unlock(pub); \ + } while (0) +#else +#define DHD_OS_SCAN_WAKE_LOCK_TIMEOUT(pub, val) dhd_os_scan_wake_lock_timeout(pub, val) +#define DHD_OS_SCAN_WAKE_UNLOCK(pub) dhd_os_scan_wake_unlock(pub) +#endif /* DHD_DEBUG_SCAN_WAKELOCK */ +#else +#define DHD_OS_SCAN_WAKE_LOCK_TIMEOUT(pub, val) +#define DHD_OS_SCAN_WAKE_UNLOCK(pub) +#endif /* DHD_USE_SCAN_WAKELOCK */ + +#ifdef BCMPCIE_OOB_HOST_WAKE +#define OOB_WAKE_LOCK_TIMEOUT 500 +extern void dhd_os_oob_irq_wake_lock_timeout(dhd_pub_t *pub, int val); +extern void dhd_os_oob_irq_wake_unlock(dhd_pub_t *pub); +extern int dhdpcie_get_oob_irq_num(struct dhd_bus *bus); +#define DHD_OS_OOB_IRQ_WAKE_LOCK_TIMEOUT(pub, val) dhd_os_oob_irq_wake_lock_timeout(pub, val) +#define DHD_OS_OOB_IRQ_WAKE_UNLOCK(pub) dhd_os_oob_irq_wake_unlock(pub) +#endif /* BCMPCIE_OOB_HOST_WAKE */ + +#define DHD_PACKET_TIMEOUT_MS 500 +#define DHD_EVENT_TIMEOUT_MS 1500 +#define SCAN_WAKE_LOCK_TIMEOUT 10000 +#define MAX_TX_TIMEOUT 500 + +/* Enum for IOCTL recieved status */ +typedef enum dhd_ioctl_recieved_status +{ + IOCTL_WAIT = 0, + IOCTL_RETURN_ON_SUCCESS, + IOCTL_RETURN_ON_TRAP, + IOCTL_RETURN_ON_BUS_STOP, + IOCTL_RETURN_ON_ERROR +} dhd_ioctl_recieved_status_t; + +/* interface operations (register, remove) should be atomic, use this lock to prevent race + * condition among wifi on/off and interface operation functions + */ +void dhd_net_if_lock(struct net_device *dev); +void dhd_net_if_unlock(struct net_device *dev); + +#if defined(MULTIPLE_SUPPLICANT) +extern void wl_android_post_init(void); // terence 20120530: fix critical section in dhd_open and dhdsdio_probe +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && defined(MULTIPLE_SUPPLICANT) +extern struct mutex _dhd_mutex_lock_; +#define DHD_MUTEX_IS_LOCK_RETURN() \ + if (mutex_is_locked(&_dhd_mutex_lock_) != 0) { \ + printf("%s : probe is already running! return.\n", __FUNCTION__); \ + return 0; \ + } +#define DHD_MUTEX_LOCK() \ + do { \ + if (mutex_is_locked(&_dhd_mutex_lock_) == 0) { \ + printf("%s : no mutex held. set lock\n", __FUNCTION__); \ + } else { \ + printf("%s : mutex is locked!. wait for unlocking\n", __FUNCTION__); \ + } \ + mutex_lock(&_dhd_mutex_lock_); \ + } while (0) +#define DHD_MUTEX_UNLOCK() \ + do { \ + mutex_unlock(&_dhd_mutex_lock_); \ + printf("%s : the lock is released.\n", __FUNCTION__); \ + } while (0) +#else +#define DHD_MUTEX_IS_LOCK_RETURN(a) do {} while (0) +#define DHD_MUTEX_LOCK(a) do {} while (0) +#define DHD_MUTEX_UNLOCK(a) do {} while (0) +#endif + +typedef enum dhd_attach_states +{ + DHD_ATTACH_STATE_INIT = 0x0, + DHD_ATTACH_STATE_NET_ALLOC = 0x1, + DHD_ATTACH_STATE_DHD_ALLOC = 0x2, + DHD_ATTACH_STATE_ADD_IF = 0x4, + DHD_ATTACH_STATE_PROT_ATTACH = 0x8, + DHD_ATTACH_STATE_WL_ATTACH = 0x10, + DHD_ATTACH_STATE_THREADS_CREATED = 0x20, + DHD_ATTACH_STATE_WAKELOCKS_INIT = 0x40, + DHD_ATTACH_STATE_CFG80211 = 0x80, + DHD_ATTACH_STATE_EARLYSUSPEND_DONE = 0x100, + DHD_ATTACH_TIMESYNC_ATTACH_DONE = 0x200, + DHD_ATTACH_LOGTRACE_INIT = 0x400, + DHD_ATTACH_STATE_LB_ATTACH_DONE = 0x800, + DHD_ATTACH_STATE_DONE = 0x1000 +} dhd_attach_states_t; + +/* Value -1 means we are unsuccessful in creating the kthread. */ +#define DHD_PID_KT_INVALID -1 +/* Value -2 means we are unsuccessful in both creating the kthread and tasklet */ +#define DHD_PID_KT_TL_INVALID -2 + +/* default reporting period */ +#define ECOUNTERS_DEFAULT_PERIOD 0 + +/* default number of reports. '0' indicates forever */ +#define ECOUNTERS_NUM_REPORTS 0 + +typedef struct ecounters_cfg { + uint16 type; + uint16 if_slice_idx; + uint16 stats_rep; +} ecounters_cfg_t; + +typedef struct event_ecounters_cfg { + uint16 event_id; + uint16 type; + uint16 if_slice_idx; + uint16 stats_rep; +} event_ecounters_cfg_t; + +typedef struct ecountersv2_xtlv_list_elt { + /* Not quite the exact bcm_xtlv_t type as data could be pointing to other pieces in + * memory at the time of parsing arguments. + */ + uint16 id; + uint16 len; + uint8 *data; + struct ecountersv2_xtlv_list_elt *next; +} ecountersv2_xtlv_list_elt_t; + +typedef struct ecountersv2_processed_xtlv_list_elt { + uint8 *data; + struct ecountersv2_processed_xtlv_list_elt *next; +} ecountersv2_processed_xtlv_list_elt; + +/* + * Exported from dhd OS modules (dhd_linux/dhd_ndis) + */ + +/* Indication from bus module regarding presence/insertion of dongle. + * Return dhd_pub_t pointer, used as handle to OS module in later calls. + * Returned structure should have bus and prot pointers filled in. + * bus_hdrlen specifies required headroom for bus module header. + */ +extern dhd_pub_t *dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen +#ifdef BCMDBUS + , void *adapter +#endif +); +#if defined(WLP2P) && defined(WL_CFG80211) +/* To allow attach/detach calls corresponding to p2p0 interface */ +extern int dhd_attach_p2p(dhd_pub_t *); +extern int dhd_detach_p2p(dhd_pub_t *); +#endif /* WLP2P && WL_CFG80211 */ +extern int dhd_register_if(dhd_pub_t *dhdp, int idx, bool need_rtnl_lock); + +/* Indication from bus module regarding removal/absence of dongle */ +extern void dhd_detach(dhd_pub_t *dhdp); +extern void dhd_free(dhd_pub_t *dhdp); +extern void dhd_clear(dhd_pub_t *dhdp); + +/* Indication from bus module to change flow-control state */ +extern void dhd_txflowcontrol(dhd_pub_t *dhdp, int ifidx, bool on); + +/* Store the status of a connection attempt for later retrieval by an iovar */ +extern void dhd_store_conn_status(uint32 event, uint32 status, uint32 reason); + +extern bool dhd_prec_enq(dhd_pub_t *dhdp, struct pktq *q, void *pkt, int prec); + +extern void dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *rxp, int numpkt, uint8 chan); + +/* Return pointer to interface name */ +extern char *dhd_ifname(dhd_pub_t *dhdp, int idx); + +#ifdef DHD_UCODE_DOWNLOAD +/* Returns the ucode path */ +extern char *dhd_get_ucode_path(dhd_pub_t *dhdp); +#endif /* DHD_UCODE_DOWNLOAD */ + +/* Request scheduling of the bus dpc */ +extern void dhd_sched_dpc(dhd_pub_t *dhdp); + +/* Notify tx completion */ +extern void dhd_txcomplete(dhd_pub_t *dhdp, void *txp, bool success); + +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM +extern void dhd_bus_wakeup_work(dhd_pub_t *dhdp); +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + +#define WIFI_FEATURE_INFRA 0x0001 /* Basic infrastructure mode */ +#define WIFI_FEATURE_INFRA_5G 0x0002 /* Support for 5 GHz Band */ +#define WIFI_FEATURE_HOTSPOT 0x0004 /* Support for GAS/ANQP */ +#define WIFI_FEATURE_P2P 0x0008 /* Wifi-Direct */ +#define WIFI_FEATURE_SOFT_AP 0x0010 /* Soft AP */ +#define WIFI_FEATURE_GSCAN 0x0020 /* Google-Scan APIs */ +#define WIFI_FEATURE_NAN 0x0040 /* Neighbor Awareness Networking */ +#define WIFI_FEATURE_D2D_RTT 0x0080 /* Device-to-device RTT */ +#define WIFI_FEATURE_D2AP_RTT 0x0100 /* Device-to-AP RTT */ +#define WIFI_FEATURE_BATCH_SCAN 0x0200 /* Batched Scan (legacy) */ +#define WIFI_FEATURE_PNO 0x0400 /* Preferred network offload */ +#define WIFI_FEATURE_ADDITIONAL_STA 0x0800 /* Support for two STAs */ +#define WIFI_FEATURE_TDLS 0x1000 /* Tunnel directed link setup */ +#define WIFI_FEATURE_TDLS_OFFCHANNEL 0x2000 /* Support for TDLS off channel */ +#define WIFI_FEATURE_EPR 0x4000 /* Enhanced power reporting */ +#define WIFI_FEATURE_AP_STA 0x8000 /* Support for AP STA Concurrency */ +#define WIFI_FEATURE_LINKSTAT 0x10000 /* Support for Linkstats */ +#define WIFI_FEATURE_LOGGER 0x20000 /* WiFi Logger */ +#define WIFI_FEATURE_HAL_EPNO 0x40000 /* WiFi PNO enhanced */ +#define WIFI_FEATURE_RSSI_MONITOR 0x80000 /* RSSI Monitor */ +#define WIFI_FEATURE_MKEEP_ALIVE 0x100000 /* WiFi mkeep_alive */ +#define WIFI_FEATURE_CONFIG_NDO 0x200000 /* ND offload configure */ +#define WIFI_FEATURE_TX_TRANSMIT_POWER 0x400000 /* Capture Tx transmit power levels */ +#define WIFI_FEATURE_CONTROL_ROAMING 0x800000 /* Enable/Disable firmware roaming */ +#define WIFI_FEATURE_FILTER_IE 0x1000000 /* Probe req ie filter */ +#define WIFI_FEATURE_SCAN_RAND 0x2000000 /* Support MAC & Prb SN randomization */ +#define WIFI_FEATURE_INVALID 0xFFFFFFFF /* Invalid Feature */ + +#define MAX_FEATURE_SET_CONCURRRENT_GROUPS 3 + +extern int dhd_dev_get_feature_set(struct net_device *dev); +extern int dhd_dev_get_feature_set_matrix(struct net_device *dev, int num); +extern int dhd_dev_cfg_rand_mac_oui(struct net_device *dev, uint8 *oui); +#ifdef CUSTOM_FORCE_NODFS_FLAG +extern int dhd_dev_set_nodfs(struct net_device *dev, uint nodfs); +#endif /* CUSTOM_FORCE_NODFS_FLAG */ +#ifdef NDO_CONFIG_SUPPORT +#ifndef NDO_MAX_HOST_IP_ENTRIES +#define NDO_MAX_HOST_IP_ENTRIES 10 +#endif /* NDO_MAX_HOST_IP_ENTRIES */ + +extern int dhd_dev_ndo_cfg(struct net_device *dev, u8 enable); +extern int dhd_dev_ndo_update_inet6addr(struct net_device * dev); +#endif /* NDO_CONFIG_SUPPORT */ +extern int dhd_set_rand_mac_oui(dhd_pub_t *dhd); +#ifdef GSCAN_SUPPORT +extern int dhd_dev_set_lazy_roam_cfg(struct net_device *dev, + wlc_roam_exp_params_t *roam_param); +extern int dhd_dev_lazy_roam_enable(struct net_device *dev, uint32 enable); +extern int dhd_dev_set_lazy_roam_bssid_pref(struct net_device *dev, + wl_bssid_pref_cfg_t *bssid_pref, uint32 flush); +#endif /* GSCAN_SUPPORT */ +#if defined(GSCAN_SUPPORT) || defined(ROAMEXP_SUPPORT) +extern int dhd_dev_set_blacklist_bssid(struct net_device *dev, maclist_t *blacklist, + uint32 len, uint32 flush); +extern int dhd_dev_set_whitelist_ssid(struct net_device *dev, wl_ssid_whitelist_t *whitelist, + uint32 len, uint32 flush); +#endif /* GSCAN_SUPPORT || ROAMEXP_SUPPORT */ + +/* OS independent layer functions */ +extern void dhd_os_dhdiovar_lock(dhd_pub_t *pub); +extern void dhd_os_dhdiovar_unlock(dhd_pub_t *pub); +void dhd_os_logdump_lock(dhd_pub_t *pub); +void dhd_os_logdump_unlock(dhd_pub_t *pub); +extern int dhd_os_proto_block(dhd_pub_t * pub); +extern int dhd_os_proto_unblock(dhd_pub_t * pub); +extern int dhd_os_ioctl_resp_wait(dhd_pub_t * pub, uint * condition, bool resched); +extern int dhd_os_ioctl_resp_wake(dhd_pub_t * pub); +extern unsigned int dhd_os_get_ioctl_resp_timeout(void); +extern void dhd_os_set_ioctl_resp_timeout(unsigned int timeout_msec); +extern void dhd_os_ioctl_resp_lock(dhd_pub_t * pub); +extern void dhd_os_ioctl_resp_unlock(dhd_pub_t * pub); +#ifdef PCIE_FULL_DONGLE +extern void dhd_wakeup_ioctl_event(dhd_pub_t *pub, dhd_ioctl_recieved_status_t reason); +#else +static INLINE void dhd_wakeup_ioctl_event(dhd_pub_t *pub, dhd_ioctl_recieved_status_t reason) +{ printf("%s is NOT implemented for SDIO", __FUNCTION__); return; } +#endif // endif +#ifdef SHOW_LOGTRACE +/* Bound and delay are fine tuned after several experiments and these + * are the best case values to handle bombarding of console logs. + */ +#define DHD_EVENT_LOGTRACE_BOUND 1 +#define DHD_EVENT_LOGTRACE_RESCHEDULE_DELAY_MS 200 +extern int dhd_os_read_file(void *file, char *buf, uint32 size); +extern int dhd_os_seek_file(void *file, int64 offset); +void dhd_sendup_info_buf(dhd_pub_t *dhdp, uint8 *msg); +#endif /* SHOW_LOGTRACE */ +int dhd_os_write_file_posn(void *fp, unsigned long *posn, + void *buf, unsigned long buflen); + +#if defined(PCIE_FULL_DONGLE) +extern void dhd_pcie_backplane_access_lock(dhd_pub_t * pub); +extern void dhd_pcie_backplane_access_unlock(dhd_pub_t * pub); +#endif /* defined(PCIE_FULL_DONGLE) */ + +extern void +dhd_pcie_dump_core_regs(dhd_pub_t * pub, uint32 index, uint32 first_addr, uint32 last_addr); +extern void wl_dhdpcie_dump_regs(void * context); + +#define DHD_OS_IOCTL_RESP_LOCK(x) +#define DHD_OS_IOCTL_RESP_UNLOCK(x) + +extern int dhd_os_get_image_block(char * buf, int len, void * image); +extern int dhd_os_get_image_size(void * image); +#if defined(BT_OVER_SDIO) +extern int dhd_os_gets_image(dhd_pub_t *pub, char *str, int len, void *image); +extern void dhdsdio_bus_usr_cnt_inc(dhd_pub_t *pub); +extern void dhdsdio_bus_usr_cnt_dec(dhd_pub_t *pub); +#endif /* (BT_OVER_SDIO) */ +extern void *dhd_os_open_image1(dhd_pub_t *pub, char *filename); /* rev1 function signature */ +extern void dhd_os_close_image1(dhd_pub_t *pub, void *image); +extern void dhd_os_wd_timer(void *bus, uint wdtick); +extern void dhd_os_sdlock(dhd_pub_t * pub); +extern void dhd_os_sdunlock(dhd_pub_t * pub); +extern void dhd_os_sdlock_txq(dhd_pub_t * pub); +extern void dhd_os_sdunlock_txq(dhd_pub_t * pub); +extern void dhd_os_sdlock_rxq(dhd_pub_t * pub); +extern void dhd_os_sdunlock_rxq(dhd_pub_t * pub); +extern void dhd_os_sdlock_sndup_rxq(dhd_pub_t * pub); +extern void dhd_os_tracelog(const char *format, ...); +#ifdef DHDTCPACK_SUPPRESS +extern unsigned long dhd_os_tcpacklock(dhd_pub_t *pub); +extern void dhd_os_tcpackunlock(dhd_pub_t *pub, unsigned long flags); +#endif /* DHDTCPACK_SUPPRESS */ + +extern int dhd_customer_oob_irq_map(void *adapter, unsigned long *irq_flags_ptr); +extern int dhd_customer_gpio_wlan_ctrl(void *adapter, int onoff); +extern int dhd_custom_get_mac_address(void *adapter, unsigned char *buf); +#if defined(CUSTOM_COUNTRY_CODE) +extern void get_customized_country_code(void *adapter, char *country_iso_code, + wl_country_t *cspec, u32 flags); +#else +extern void get_customized_country_code(void *adapter, char *country_iso_code, wl_country_t *cspec); +#endif /* CUSTOM_COUNTRY_CODE */ +extern void dhd_os_sdunlock_sndup_rxq(dhd_pub_t * pub); +extern void dhd_os_sdlock_eventq(dhd_pub_t * pub); +extern void dhd_os_sdunlock_eventq(dhd_pub_t * pub); +extern bool dhd_os_check_hang(dhd_pub_t *dhdp, int ifidx, int ret); +extern int dhd_os_send_hang_message(dhd_pub_t *dhdp); +extern void dhd_set_version_info(dhd_pub_t *pub, char *fw); +extern bool dhd_os_check_if_up(dhd_pub_t *pub); +extern int dhd_os_check_wakelock(dhd_pub_t *pub); +extern int dhd_os_check_wakelock_all(dhd_pub_t *pub); +extern int dhd_get_instance(dhd_pub_t *pub); +#ifdef CUSTOM_SET_CPUCORE +extern void dhd_set_cpucore(dhd_pub_t *dhd, int set); +#endif /* CUSTOM_SET_CPUCORE */ + +#if defined(KEEP_ALIVE) +extern int dhd_keep_alive_onoff(dhd_pub_t *dhd); +#endif /* KEEP_ALIVE */ + +#if defined(DHD_FW_COREDUMP) +void dhd_schedule_memdump(dhd_pub_t *dhdp, uint8 *buf, uint32 size); +#endif /* DHD_FW_COREDUMP */ + +void dhd_schedule_sssr_dump(dhd_pub_t *dhdp, uint32 dump_mode); + +#ifdef PKT_FILTER_SUPPORT +#define DHD_UNICAST_FILTER_NUM 0 +#define DHD_BROADCAST_FILTER_NUM 1 +#define DHD_MULTICAST4_FILTER_NUM 2 +#define DHD_MULTICAST6_FILTER_NUM 3 +#define DHD_MDNS_FILTER_NUM 4 +#define DHD_ARP_FILTER_NUM 5 +#define DHD_BROADCAST_ARP_FILTER_NUM 6 +#define DHD_IP4BCAST_DROP_FILTER_NUM 7 +#define DHD_LLC_STP_DROP_FILTER_NUM 8 +#define DHD_LLC_XID_DROP_FILTER_NUM 9 +#define DISCARD_IPV4_MCAST "102 1 6 IP4_H:16 0xf0 0xe0" +#define DISCARD_IPV6_MCAST "103 1 6 IP6_H:24 0xff 0xff" +#define DISCARD_IPV4_BCAST "107 1 6 IP4_H:16 0xffffffff 0xffffffff" +#define DISCARD_LLC_STP "108 1 6 ETH_H:14 0xFFFFFFFFFFFF 0xAAAA0300000C" +#define DISCARD_LLC_XID "109 1 6 ETH_H:14 0xFFFFFF 0x0001AF" +extern int dhd_os_enable_packet_filter(dhd_pub_t *dhdp, int val); +extern void dhd_enable_packet_filter(int value, dhd_pub_t *dhd); +extern int dhd_packet_filter_add_remove(dhd_pub_t *dhdp, int add_remove, int num); +extern int net_os_enable_packet_filter(struct net_device *dev, int val); +extern int net_os_rxfilter_add_remove(struct net_device *dev, int val, int num); +extern int net_os_set_suspend_bcn_li_dtim(struct net_device *dev, int val); + +#define MAX_PKTFLT_BUF_SIZE 2048 +#define MAX_PKTFLT_FIXED_PATTERN_SIZE 32 +#define MAX_PKTFLT_FIXED_BUF_SIZE \ + (WL_PKT_FILTER_FIXED_LEN + MAX_PKTFLT_FIXED_PATTERN_SIZE * 2) +#endif /* PKT_FILTER_SUPPORT */ + +#if defined(BCMPCIE) +extern int dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd, int *dtim_period, int *bcn_interval); +#else +extern int dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd); +#endif /* OEM_ANDROID && BCMPCIE */ + +extern bool dhd_support_sta_mode(dhd_pub_t *dhd); +extern int write_to_file(dhd_pub_t *dhd, uint8 *buf, int size); + +#ifdef RSSI_MONITOR_SUPPORT +extern int dhd_dev_set_rssi_monitor_cfg(struct net_device *dev, int start, + int8 max_rssi, int8 min_rssi); +#endif /* RSSI_MONITOR_SUPPORT */ + +#ifdef DHDTCPACK_SUPPRESS +int dhd_dev_set_tcpack_sup_mode_cfg(struct net_device *dev, uint8 enable); +#endif /* DHDTCPACK_SUPPRESS */ + +#define DHD_RSSI_MONITOR_EVT_VERSION 1 +typedef struct { + uint8 version; + int8 cur_rssi; + struct ether_addr BSSID; +} dhd_rssi_monitor_evt_t; + +typedef struct { + uint32 limit; /* Expiration time (usec) */ + uint32 increment; /* Current expiration increment (usec) */ + uint32 elapsed; /* Current elapsed time (usec) */ + uint32 tick; /* O/S tick time (usec) */ +} dhd_timeout_t; + +#ifdef SHOW_LOGTRACE +typedef struct { + uint num_fmts; + char **fmts; + char *raw_fmts; + char *raw_sstr; + uint32 fmts_size; + uint32 raw_fmts_size; + uint32 raw_sstr_size; + uint32 ramstart; + uint32 rodata_start; + uint32 rodata_end; + char *rom_raw_sstr; + uint32 rom_raw_sstr_size; + uint32 rom_ramstart; + uint32 rom_rodata_start; + uint32 rom_rodata_end; +} dhd_event_log_t; +#endif /* SHOW_LOGTRACE */ + +#if defined(DHD_NON_DMA_M2M_CORRUPTION) +#define PCIE_DMAXFER_LPBK_LENGTH 4096 +typedef struct dhd_pcie_dmaxfer_lpbk { + union { + uint32 length; + uint32 status; + } u; + uint32 srcdelay; + uint32 destdelay; + uint32 lpbkmode; + uint32 wait; + uint32 core; +} dhd_pcie_dmaxfer_lpbk_t; +#endif /* DHD_NON_DMA_M2M_CORRUPTION */ +enum d11_lpbk_type { + M2M_DMA_LPBK = 0, + D11_LPBK = 1, + BMC_LPBK = 2, + M2M_NON_DMA_LPBK = 3, + D11_HOST_MEM_LPBK = 4, + BMC_HOST_MEM_LPBK = 5, + MAX_LPBK = 6 +}; + +#ifdef KEEP_ALIVE +extern int dhd_dev_start_mkeep_alive(dhd_pub_t *dhd_pub, uint8 mkeep_alive_id, uint8 *ip_pkt, + uint16 ip_pkt_len, uint8* src_mac_addr, uint8* dst_mac_addr, uint32 period_msec); +extern int dhd_dev_stop_mkeep_alive(dhd_pub_t *dhd_pub, uint8 mkeep_alive_id); +#endif /* KEEP_ALIVE */ + +#if defined(PKT_FILTER_SUPPORT) && defined(APF) +/* + * As per Google's current implementation, there will be only one APF filter. + * Therefore, userspace doesn't bother about filter id and because of that + * DHD has to manage the filter id. + */ +#define PKT_FILTER_APF_ID 200 +#define DHD_APF_LOCK(ndev) dhd_apf_lock(ndev) +#define DHD_APF_UNLOCK(ndev) dhd_apf_unlock(ndev) + +extern void dhd_apf_lock(struct net_device *dev); +extern void dhd_apf_unlock(struct net_device *dev); +extern int dhd_dev_apf_get_version(struct net_device *ndev, uint32 *version); +extern int dhd_dev_apf_get_max_len(struct net_device *ndev, uint32 *max_len); +extern int dhd_dev_apf_add_filter(struct net_device *ndev, u8* program, + uint32 program_len); +extern int dhd_dev_apf_enable_filter(struct net_device *ndev); +extern int dhd_dev_apf_disable_filter(struct net_device *ndev); +extern int dhd_dev_apf_delete_filter(struct net_device *ndev); +#endif /* PKT_FILTER_SUPPORT && APF */ + +extern void dhd_timeout_start(dhd_timeout_t *tmo, uint usec); +extern int dhd_timeout_expired(dhd_timeout_t *tmo); + +extern int dhd_ifname2idx(struct dhd_info *dhd, char *name); +extern int dhd_net2idx(struct dhd_info *dhd, struct net_device *net); +extern struct net_device * dhd_idx2net(void *pub, int ifidx); +extern int net_os_send_hang_message(struct net_device *dev); +extern int net_os_send_hang_message_reason(struct net_device *dev, const char *string_num); +extern bool dhd_wowl_cap(void *bus); +extern int wl_host_event(dhd_pub_t *dhd_pub, int *idx, void *pktdata, uint pktlen, + wl_event_msg_t *, void **data_ptr, void *); +extern int wl_process_host_event(dhd_pub_t *dhd_pub, int *idx, void *pktdata, uint pktlen, + wl_event_msg_t *, void **data_ptr, void *); +extern void wl_event_to_host_order(wl_event_msg_t * evt); +extern int wl_host_event_get_data(void *pktdata, uint pktlen, bcm_event_msg_u_t *evu); +extern int dhd_wl_ioctl(dhd_pub_t *dhd_pub, int ifindex, wl_ioctl_t *ioc, void *buf, int len); +extern int dhd_wl_ioctl_cmd(dhd_pub_t *dhd_pub, int cmd, void *arg, int len, uint8 set, + int ifindex); +extern int dhd_wl_ioctl_get_intiovar(dhd_pub_t *dhd_pub, char *name, uint *pval, + int cmd, uint8 set, int ifidx); +extern int dhd_wl_ioctl_set_intiovar(dhd_pub_t *dhd_pub, char *name, uint val, + int cmd, uint8 set, int ifidx); +extern void dhd_common_init(osl_t *osh); + +extern int dhd_do_driver_init(struct net_device *net); +extern int dhd_event_ifadd(struct dhd_info *dhd, struct wl_event_data_if *ifevent, + char *name, uint8 *mac); +extern int dhd_event_ifdel(struct dhd_info *dhd, struct wl_event_data_if *ifevent, + char *name, uint8 *mac); +extern int dhd_event_ifchange(struct dhd_info *dhd, struct wl_event_data_if *ifevent, + char *name, uint8 *mac); +#ifdef DHD_UPDATE_INTF_MAC +extern int dhd_op_if_update(dhd_pub_t *dhdpub, int ifidx); +#endif /* DHD_UPDATE_INTF_MAC */ +extern struct net_device* dhd_allocate_if(dhd_pub_t *dhdpub, int ifidx, const char *name, + uint8 *mac, uint8 bssidx, bool need_rtnl_lock, const char *dngl_name); +extern int dhd_remove_if(dhd_pub_t *dhdpub, int ifidx, bool need_rtnl_lock); +#ifdef WL_STATIC_IF +extern s32 dhd_update_iflist_info(dhd_pub_t *dhdp, struct net_device *ndev, int ifidx, + uint8 *mac, uint8 bssidx, const char *dngl_name, int if_state); +#endif /* WL_STATIC_IF */ +extern void dhd_vif_add(struct dhd_info *dhd, int ifidx, char * name); +extern void dhd_vif_del(struct dhd_info *dhd, int ifidx); +extern void dhd_event(struct dhd_info *dhd, char *evpkt, int evlen, int ifidx); +extern void dhd_vif_sendup(struct dhd_info *dhd, int ifidx, uchar *cp, int len); + +#ifdef WL_NATOE +extern int dhd_natoe_ct_event(dhd_pub_t *dhd, char *data); +#endif /* WL_NATOE */ + +/* Send packet to dongle via data channel */ +extern int dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pkt); + +/* send up locally generated event */ +extern void dhd_sendup_event_common(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data); +/* Send event to host */ +extern void dhd_sendup_event(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data); +#ifdef LOG_INTO_TCPDUMP +extern void dhd_sendup_log(dhd_pub_t *dhdp, void *data, int len); +#endif /* LOG_INTO_TCPDUMP */ +#ifdef SHOW_LOGTRACE +void dhd_sendup_info_buf(dhd_pub_t *dhdp, uint8 *msg); +#endif // endif +extern int dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag); +extern uint dhd_bus_status(dhd_pub_t *dhdp); +extern int dhd_bus_start(dhd_pub_t *dhdp); +extern int dhd_bus_suspend(dhd_pub_t *dhdpub); +extern int dhd_bus_resume(dhd_pub_t *dhdpub, int stage); +extern int dhd_bus_membytes(dhd_pub_t *dhdp, bool set, uint32 address, uint8 *data, uint size); +extern void dhd_print_buf(void *pbuf, int len, int bytes_per_line); +extern bool dhd_is_associated(dhd_pub_t *dhd, uint8 ifidx, int *retval); +#if defined(BCMSDIO) || defined(BCMPCIE) +extern uint dhd_bus_chip_id(dhd_pub_t *dhdp); +extern uint dhd_bus_chiprev_id(dhd_pub_t *dhdp); +extern uint dhd_bus_chippkg_id(dhd_pub_t *dhdp); +#endif /* defined(BCMSDIO) || defined(BCMPCIE) */ +int dhd_bus_get_fw_mode(dhd_pub_t *dhdp); + +#if defined(KEEP_ALIVE) +extern int dhd_keep_alive_onoff(dhd_pub_t *dhd); +#endif /* KEEP_ALIVE */ + +/* OS spin lock API */ +extern void *dhd_os_spin_lock_init(osl_t *osh); +extern void dhd_os_spin_lock_deinit(osl_t *osh, void *lock); +extern unsigned long dhd_os_spin_lock(void *lock); +void dhd_os_spin_unlock(void *lock, unsigned long flags); + +/* linux is defined for DHD EFI builds also, +* since its cross-compiled for EFI from linux. +* dbgring_lock apis are meant only for linux +* to use mutexes, other OSes will continue to +* use dhd_os_spin_lock +*/ +void *dhd_os_dbgring_lock_init(osl_t *osh); +void dhd_os_dbgring_lock_deinit(osl_t *osh, void *mtx); +unsigned long dhd_os_dbgring_lock(void *lock); +void dhd_os_dbgring_unlock(void *lock, unsigned long flags); + +static INLINE int dhd_os_tput_test_wait(dhd_pub_t *pub, uint *condition, + uint timeout_ms) +{ return 0; } +static INLINE int dhd_os_tput_test_wake(dhd_pub_t * pub) +{ return 0; } + +extern int dhd_os_busbusy_wait_negation(dhd_pub_t * pub, uint * condition); +extern int dhd_os_busbusy_wake(dhd_pub_t * pub); +extern void dhd_os_tx_completion_wake(dhd_pub_t *dhd); +extern int dhd_os_busbusy_wait_condition(dhd_pub_t *pub, uint *var, uint condition); +int dhd_os_busbusy_wait_bitmask(dhd_pub_t *pub, uint *var, + uint bitmask, uint condition); +extern int dhd_os_d3ack_wait(dhd_pub_t * pub, uint * condition); +extern int dhd_os_d3ack_wake(dhd_pub_t * pub); +extern int dhd_os_dmaxfer_wait(dhd_pub_t *pub, uint *condition); +extern int dhd_os_dmaxfer_wake(dhd_pub_t *pub); + +/* + * Manage sta objects in an interface. Interface is identified by an ifindex and + * sta(s) within an interfaces are managed using a MacAddress of the sta. + */ +struct dhd_sta; +extern bool dhd_sta_associated(dhd_pub_t *dhdp, uint32 bssidx, uint8 *mac); +extern struct dhd_sta *dhd_find_sta(void *pub, int ifidx, void *ea); +extern struct dhd_sta *dhd_findadd_sta(void *pub, int ifidx, void *ea); +extern void dhd_del_all_sta(void *pub, int ifidx); +extern void dhd_del_sta(void *pub, int ifidx, void *ea); +extern int dhd_get_ap_isolate(dhd_pub_t *dhdp, uint32 idx); +extern int dhd_set_ap_isolate(dhd_pub_t *dhdp, uint32 idx, int val); +extern int dhd_bssidx2idx(dhd_pub_t *dhdp, uint32 bssidx); +extern struct net_device *dhd_linux_get_primary_netdev(dhd_pub_t *dhdp); + +extern bool dhd_is_concurrent_mode(dhd_pub_t *dhd); +int dhd_iovar(dhd_pub_t *pub, int ifidx, char *name, char *param_buf, uint param_len, + char *res_buf, uint res_len, int set); +extern int dhd_getiovar(dhd_pub_t *pub, int ifidx, char *name, char *cmd_buf, + uint cmd_len, char **resptr, uint resp_len); + +#ifdef DHD_MCAST_REGEN +extern int dhd_get_mcast_regen_bss_enable(dhd_pub_t *dhdp, uint32 idx); +extern int dhd_set_mcast_regen_bss_enable(dhd_pub_t *dhdp, uint32 idx, int val); +#endif // endif +typedef enum cust_gpio_modes { + WLAN_RESET_ON, + WLAN_RESET_OFF, + WLAN_POWER_ON, + WLAN_POWER_OFF +} cust_gpio_modes_t; + +typedef struct dmaxref_mem_map { + dhd_dma_buf_t *srcmem; + dhd_dma_buf_t *dstmem; +} dmaxref_mem_map_t; + +extern int wl_iw_iscan_set_scan_broadcast_prep(struct net_device *dev, uint flag); +extern int wl_iw_send_priv_event(struct net_device *dev, char *flag); + +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM +extern void dhd_flush_rx_tx_wq(dhd_pub_t *dhdp); +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + +/* + * Insmod parameters for debug/test + */ + +/* Watchdog timer interval */ +extern uint dhd_watchdog_ms; +extern bool dhd_os_wd_timer_enabled(void *bus); + +/** Default console output poll interval */ +extern uint dhd_console_ms; + +extern uint android_msg_level; +extern uint config_msg_level; +extern uint sd_msglevel; +#ifdef BCMDBUS +extern uint dbus_msglevel; +#endif /* BCMDBUS */ +#ifdef WL_WIRELESS_EXT +extern uint iw_msg_level; +#endif +#ifdef WL_CFG80211 +extern uint wl_dbg_level; +#endif + +extern uint dhd_slpauto; + +/* Use interrupts */ +extern uint dhd_intr; + +/* Use polling */ +extern uint dhd_poll; + +/* ARP offload agent mode */ +extern uint dhd_arp_mode; + +/* ARP offload enable */ +extern uint dhd_arp_enable; + +/* Pkt filte enable control */ +extern uint dhd_pkt_filter_enable; + +/* Pkt filter init setup */ +extern uint dhd_pkt_filter_init; + +/* Pkt filter mode control */ +extern uint dhd_master_mode; + +/* Roaming mode control */ +extern uint dhd_roam_disable; + +/* Roaming mode control */ +extern uint dhd_radio_up; + +/* Initial idletime ticks (may be -1 for immediate idle, 0 for no idle) */ +extern int dhd_idletime; +#ifdef DHD_USE_IDLECOUNT +#define DHD_IDLETIME_TICKS 5 +#else +#define DHD_IDLETIME_TICKS 1 +#endif /* DHD_USE_IDLECOUNT */ + +/* SDIO Drive Strength */ +extern uint dhd_sdiod_drive_strength; + +/* triggers bcm_bprintf to print to kernel log */ +extern bool bcm_bprintf_bypass; + +/* Override to force tx queueing all the time */ +extern uint dhd_force_tx_queueing; + +/* Default bcn_timeout value is 4 */ +#define DEFAULT_BCN_TIMEOUT_VALUE 4 +#ifndef CUSTOM_BCN_TIMEOUT_SETTING +#define CUSTOM_BCN_TIMEOUT_SETTING DEFAULT_BCN_TIMEOUT_VALUE +#endif // endif + +/* Default KEEP_ALIVE Period is 55 sec to prevent AP from sending Keep Alive probe frame */ +#define DEFAULT_KEEP_ALIVE_VALUE 55000 /* msec */ +#ifndef CUSTOM_KEEP_ALIVE_SETTING +#define CUSTOM_KEEP_ALIVE_SETTING DEFAULT_KEEP_ALIVE_VALUE +#endif /* DEFAULT_KEEP_ALIVE_VALUE */ + +#define NULL_PKT_STR "null_pkt" + +/* hooks for custom glom setting option via Makefile */ +#define DEFAULT_GLOM_VALUE -1 +#ifndef CUSTOM_GLOM_SETTING +#define CUSTOM_GLOM_SETTING DEFAULT_GLOM_VALUE +#endif // endif +#define WL_AUTO_ROAM_TRIGGER -75 +/* hooks for custom Roaming Trigger setting via Makefile */ +#define DEFAULT_ROAM_TRIGGER_VALUE -75 /* dBm default roam trigger all band */ +#define DEFAULT_ROAM_TRIGGER_SETTING -1 +#ifndef CUSTOM_ROAM_TRIGGER_SETTING +#define CUSTOM_ROAM_TRIGGER_SETTING DEFAULT_ROAM_TRIGGER_VALUE +#endif // endif + +/* hooks for custom Roaming Romaing setting via Makefile */ +#define DEFAULT_ROAM_DELTA_VALUE 10 /* dBm default roam delta all band */ +#define DEFAULT_ROAM_DELTA_SETTING -1 +#ifndef CUSTOM_ROAM_DELTA_SETTING +#define CUSTOM_ROAM_DELTA_SETTING DEFAULT_ROAM_DELTA_VALUE +#endif // endif + +/* hooks for custom PNO Event wake lock to guarantee enough time + for the Platform to detect Event before system suspended +*/ +#define DEFAULT_PNO_EVENT_LOCK_xTIME 2 /* multiplay of DHD_PACKET_TIMEOUT_MS */ +#ifndef CUSTOM_PNO_EVENT_LOCK_xTIME +#define CUSTOM_PNO_EVENT_LOCK_xTIME DEFAULT_PNO_EVENT_LOCK_xTIME +#endif // endif +/* hooks for custom dhd_dpc_prio setting option via Makefile */ +#define DEFAULT_DHP_DPC_PRIO 1 +#ifndef CUSTOM_DPC_PRIO_SETTING +#define CUSTOM_DPC_PRIO_SETTING DEFAULT_DHP_DPC_PRIO +#endif // endif + +#ifndef CUSTOM_LISTEN_INTERVAL +#define CUSTOM_LISTEN_INTERVAL LISTEN_INTERVAL +#endif /* CUSTOM_LISTEN_INTERVAL */ + +#define DEFAULT_SUSPEND_BCN_LI_DTIM 3 +#ifndef CUSTOM_SUSPEND_BCN_LI_DTIM +#define CUSTOM_SUSPEND_BCN_LI_DTIM DEFAULT_SUSPEND_BCN_LI_DTIM +#endif // endif + +#ifndef BCN_TIMEOUT_IN_SUSPEND +#define BCN_TIMEOUT_IN_SUSPEND 6 /* bcn timeout value in suspend mode */ +#endif // endif + +#ifndef CUSTOM_RXF_PRIO_SETTING +#define CUSTOM_RXF_PRIO_SETTING MAX((CUSTOM_DPC_PRIO_SETTING - 1), 1) +#endif // endif + +#define DEFAULT_WIFI_TURNOFF_DELAY 0 +#ifndef WIFI_TURNOFF_DELAY +#define WIFI_TURNOFF_DELAY DEFAULT_WIFI_TURNOFF_DELAY +#endif /* WIFI_TURNOFF_DELAY */ + +#define DEFAULT_WIFI_TURNON_DELAY 200 +#ifndef WIFI_TURNON_DELAY +#define WIFI_TURNON_DELAY DEFAULT_WIFI_TURNON_DELAY +#endif /* WIFI_TURNON_DELAY */ + +#ifdef BCMSDIO +#define DEFAULT_DHD_WATCHDOG_INTERVAL_MS 10 /* msec */ +#else +#define DEFAULT_DHD_WATCHDOG_INTERVAL_MS 0 /* msec */ +#endif +#ifndef CUSTOM_DHD_WATCHDOG_MS +#define CUSTOM_DHD_WATCHDOG_MS DEFAULT_DHD_WATCHDOG_INTERVAL_MS +#endif /* DEFAULT_DHD_WATCHDOG_INTERVAL_MS */ + +#define DEFAULT_ASSOC_RETRY_MAX 3 +#ifndef CUSTOM_ASSOC_RETRY_MAX +#define CUSTOM_ASSOC_RETRY_MAX DEFAULT_ASSOC_RETRY_MAX +#endif /* DEFAULT_ASSOC_RETRY_MAX */ + +#if defined(BCMSDIO) || defined(DISABLE_FRAMEBURST) +#define DEFAULT_FRAMEBURST_SET 0 +#else +#define DEFAULT_FRAMEBURST_SET 1 +#endif /* BCMSDIO */ + +#ifndef CUSTOM_FRAMEBURST_SET +#define CUSTOM_FRAMEBURST_SET DEFAULT_FRAMEBURST_SET +#endif /* CUSTOM_FRAMEBURST_SET */ + +#ifdef WLTDLS +#ifndef CUSTOM_TDLS_IDLE_MODE_SETTING +#define CUSTOM_TDLS_IDLE_MODE_SETTING 60000 /* 60sec to tear down TDLS of not active */ +#endif // endif +#ifndef CUSTOM_TDLS_RSSI_THRESHOLD_HIGH +#define CUSTOM_TDLS_RSSI_THRESHOLD_HIGH -70 /* rssi threshold for establishing TDLS link */ +#endif // endif +#ifndef CUSTOM_TDLS_RSSI_THRESHOLD_LOW +#define CUSTOM_TDLS_RSSI_THRESHOLD_LOW -80 /* rssi threshold for tearing down TDLS link */ +#endif // endif +#ifndef CUSTOM_TDLS_PCKTCNT_THRESHOLD_HIGH +#define CUSTOM_TDLS_PCKTCNT_THRESHOLD_HIGH 100 /* pkt/sec threshold for establishing TDLS link */ +#endif // endif +#ifndef CUSTOM_TDLS_PCKTCNT_THRESHOLD_LOW +#define CUSTOM_TDLS_PCKTCNT_THRESHOLD_LOW 10 /* pkt/sec threshold for tearing down TDLS link */ +#endif // endif +#endif /* WLTDLS */ + +#if defined(VSDB) || defined(ROAM_ENABLE) +#define DEFAULT_BCN_TIMEOUT 8 +#else +#define DEFAULT_BCN_TIMEOUT 4 +#endif // endif + +#ifndef CUSTOM_BCN_TIMEOUT +#define CUSTOM_BCN_TIMEOUT DEFAULT_BCN_TIMEOUT +#endif // endif + +#define MAX_DTIM_SKIP_BEACON_INTERVAL 100 /* max allowed associated AP beacon for DTIM skip */ +#ifndef MAX_DTIM_ALLOWED_INTERVAL +#define MAX_DTIM_ALLOWED_INTERVAL 600 /* max allowed total beacon interval for DTIM skip */ +#endif // endif + +#ifndef MIN_DTIM_FOR_ROAM_THRES_EXTEND +#define MIN_DTIM_FOR_ROAM_THRES_EXTEND 600 /* minimum dtim interval to extend roam threshold */ +#endif // endif + +#define NO_DTIM_SKIP 1 +#ifdef SDTEST +/* Echo packet generator (SDIO), pkts/s */ +extern uint dhd_pktgen; + +/* Echo packet len (0 => sawtooth, max 1800) */ +extern uint dhd_pktgen_len; +#define MAX_PKTGEN_LEN 1800 +#endif // endif + +/* optionally set by a module_param_string() */ +#define MOD_PARAM_PATHLEN 2048 +#define MOD_PARAM_INFOLEN 512 +#define MOD_PARAM_SRLEN 64 + +#ifdef SOFTAP +extern char fw_path2[MOD_PARAM_PATHLEN]; +#endif // endif + +#if defined(ANDROID_PLATFORM_VERSION) +#if (ANDROID_PLATFORM_VERSION < 7) +#define DHD_LEGACY_FILE_PATH +#define VENDOR_PATH "/system" +#elif (ANDROID_PLATFORM_VERSION == 7) +#define VENDOR_PATH "/system" +#elif (ANDROID_PLATFORM_VERSION >= 8) +#define VENDOR_PATH "/vendor" +#endif /* ANDROID_PLATFORM_VERSION < 7 */ +#else +#define VENDOR_PATH "" +#endif /* ANDROID_PLATFORM_VERSION */ + +#if defined(DHD_LEGACY_FILE_PATH) +#define PLATFORM_PATH "/data/" +#elif defined(PLATFORM_SLP) +#define PLATFORM_PATH "/opt/etc/" +#else +#if defined(ANDROID_PLATFORM_VERSION) +#if (ANDROID_PLATFORM_VERSION >= 9) +#define PLATFORM_PATH "/data/vendor/conn/" +#else +#define PLATFORM_PATH "/data/misc/conn/" +#endif /* ANDROID_PLATFORM_VERSION >= 9 */ +#else +#define PLATFORM_PATH "/data/misc/conn/" +#endif /* ANDROID_PLATFORM_VERSION */ +#endif /* DHD_LEGACY_FILE_PATH */ + +/* Flag to indicate if we should download firmware on driver load */ +extern uint dhd_download_fw_on_driverload; +#ifndef BCMDBUS +extern int allow_delay_fwdl; +#endif /* !BCMDBUS */ + +extern int dhd_process_cid_mac(dhd_pub_t *dhdp, bool prepost); +extern int dhd_write_file(const char *filepath, char *buf, int buf_len); +extern int dhd_read_file(const char *filepath, char *buf, int buf_len); +extern int dhd_write_file_and_check(const char *filepath, char *buf, int buf_len); + +#ifdef READ_MACADDR +extern int dhd_set_macaddr_from_file(dhd_pub_t *dhdp); +#else +static INLINE int dhd_set_macaddr_from_file(dhd_pub_t *dhdp) { return 0; } +#endif /* READ_MACADDR */ +#ifdef WRITE_MACADDR +extern int dhd_write_macaddr(struct ether_addr *mac); +#else +static INLINE int dhd_write_macaddr(struct ether_addr *mac) { return 0; } +#endif /* WRITE_MACADDR */ +#ifdef USE_CID_CHECK +extern int dhd_check_module_cid(dhd_pub_t *dhdp); +extern char *dhd_get_cid_info(unsigned char *vid, int vid_length); +#else +static INLINE int dhd_check_module_cid(dhd_pub_t *dhdp) { return 0; } +#endif /* USE_CID_CHECK */ +#ifdef GET_MAC_FROM_OTP +extern int dhd_check_module_mac(dhd_pub_t *dhdp); +#else +static INLINE int dhd_check_module_mac(dhd_pub_t *dhdp) { return 0; } +#endif /* GET_MAC_FROM_OTP */ + +#if defined(READ_MACADDR) || defined(WRITE_MACADDR) || defined(USE_CID_CHECK) || \ + defined(GET_MAC_FROM_OTP) +#define DHD_USE_CISINFO +#endif /* READ_MACADDR || WRITE_MACADDR || USE_CID_CHECK || GET_MAC_FROM_OTP */ + +#ifdef DHD_USE_CISINFO +int dhd_read_cis(dhd_pub_t *dhdp); +void dhd_clear_cis(dhd_pub_t *dhdp); +#if defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK) +extern int dhd_check_module_b85a(void); +extern int dhd_check_module_b90(void); +#define BCM4359_MODULE_TYPE_B90B 1 +#define BCM4359_MODULE_TYPE_B90S 2 +#endif /* defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK) */ +#if defined(USE_CID_CHECK) +extern int dhd_check_module_bcm(char *module_type, int index, bool *is_murata_fem); +#endif /* defined(USE_CID_CHECK) */ +#else +static INLINE int dhd_read_cis(dhd_pub_t *dhdp) { return 0; } +static INLINE void dhd_clear_cis(dhd_pub_t *dhdp) { } +#endif /* DHD_USE_CISINFO */ + +#if defined(WL_CFG80211) && defined(SUPPORT_DEEP_SLEEP) +/* Flags to indicate if we distingish power off policy when + * user set the memu "Keep Wi-Fi on during sleep" to "Never" + */ +extern int trigger_deep_sleep; +int dhd_deepsleep(struct net_device *dev, int flag); +#endif /* WL_CFG80211 && SUPPORT_DEEP_SLEEP */ + +extern void dhd_wait_for_event(dhd_pub_t *dhd, bool *lockvar); +extern void dhd_wait_event_wakeup(dhd_pub_t*dhd); + +#define IFLOCK_INIT(lock) *lock = 0 +#define IFLOCK(lock) while (InterlockedCompareExchange((lock), 1, 0)) \ + NdisStallExecution(1); +#define IFUNLOCK(lock) InterlockedExchange((lock), 0) +#define IFLOCK_FREE(lock) +#define FW_SUPPORTED(dhd, capa) ((strstr(dhd->fw_capabilities, " " #capa " ") != NULL)) +#ifdef ARP_OFFLOAD_SUPPORT +#define MAX_IPV4_ENTRIES 8 +void dhd_arp_offload_set(dhd_pub_t * dhd, int arp_mode); +void dhd_arp_offload_enable(dhd_pub_t * dhd, int arp_enable); + +/* dhd_commn arp offload wrapers */ +void dhd_aoe_hostip_clr(dhd_pub_t *dhd, int idx); +void dhd_aoe_arp_clr(dhd_pub_t *dhd, int idx); +int dhd_arp_get_arp_hostip_table(dhd_pub_t *dhd, void *buf, int buflen, int idx); +void dhd_arp_offload_add_ip(dhd_pub_t *dhd, uint32 ipaddr, int idx); +#endif /* ARP_OFFLOAD_SUPPORT */ +#ifdef WLTDLS +int dhd_tdls_enable(struct net_device *dev, bool tdls_on, bool auto_on, struct ether_addr *mac); +int dhd_tdls_set_mode(dhd_pub_t *dhd, bool wfd_mode); +#ifdef PCIE_FULL_DONGLE +int dhd_tdls_update_peer_info(dhd_pub_t *dhdp, wl_event_msg_t *event); +int dhd_tdls_event_handler(dhd_pub_t *dhd_pub, wl_event_msg_t *event); +int dhd_free_tdls_peer_list(dhd_pub_t *dhd_pub); +#endif /* PCIE_FULL_DONGLE */ +#endif /* WLTDLS */ + +/* Neighbor Discovery Offload Support */ +extern int dhd_ndo_enable(dhd_pub_t * dhd, int ndo_enable); +int dhd_ndo_add_ip(dhd_pub_t *dhd, char* ipaddr, int idx); +int dhd_ndo_remove_ip(dhd_pub_t *dhd, int idx); + +/* Enhanced ND offload support */ +uint16 dhd_ndo_get_version(dhd_pub_t *dhdp); +int dhd_ndo_add_ip_with_type(dhd_pub_t *dhdp, char *ipv6addr, uint8 type, int idx); +int dhd_ndo_remove_ip_by_addr(dhd_pub_t *dhdp, char *ipv6addr, int idx); +int dhd_ndo_remove_ip_by_type(dhd_pub_t *dhdp, uint8 type, int idx); +int dhd_ndo_unsolicited_na_filter_enable(dhd_pub_t *dhdp, int enable); + +/* ioctl processing for nl80211 */ +int dhd_ioctl_process(dhd_pub_t *pub, int ifidx, struct dhd_ioctl *ioc, void *data_buf); + +#if defined(SUPPORT_MULTIPLE_REVISION) +extern int +concate_revision(struct dhd_bus *bus, char *fwpath, char *nvpath); +#endif /* SUPPORT_MULTIPLE_REVISION */ +void dhd_bus_update_fw_nv_path(struct dhd_bus *bus, char *pfw_path, char *pnv_path, + char *pclm_path, char *pconf_path); +void dhd_set_bus_state(void *bus, uint32 state); + +/* Remove proper pkts(either one no-frag pkt or whole fragmented pkts) */ +typedef int (*f_droppkt_t)(dhd_pub_t *dhdp, int prec, void* p, bool bPktInQ); +extern bool dhd_prec_drop_pkts(dhd_pub_t *dhdp, struct pktq *pq, int prec, f_droppkt_t fn); + +#ifdef PROP_TXSTATUS +int dhd_os_wlfc_block(dhd_pub_t *pub); +int dhd_os_wlfc_unblock(dhd_pub_t *pub); +extern const uint8 prio2fifo[]; +#endif /* PROP_TXSTATUS */ + +int dhd_os_socram_dump(struct net_device *dev, uint32 *dump_size); +int dhd_os_get_socram_dump(struct net_device *dev, char **buf, uint32 *size); +int dhd_common_socram_dump(dhd_pub_t *dhdp); + +int dhd_dump(dhd_pub_t *dhdp, char *buf, int buflen); + +int dhd_os_get_version(struct net_device *dev, bool dhd_ver, char **buf, uint32 size); + +uint8* dhd_os_prealloc(dhd_pub_t *dhdpub, int section, uint size, bool kmalloc_if_fail); +void dhd_os_prefree(dhd_pub_t *dhdpub, void *addr, uint size); + +#if defined(CONFIG_DHD_USE_STATIC_BUF) +#define DHD_OS_PREALLOC(dhdpub, section, size) dhd_os_prealloc(dhdpub, section, size, FALSE) +#define DHD_OS_PREFREE(dhdpub, addr, size) dhd_os_prefree(dhdpub, addr, size) +#else +#define DHD_OS_PREALLOC(dhdpub, section, size) MALLOC(dhdpub->osh, size) +#define DHD_OS_PREFREE(dhdpub, addr, size) MFREE(dhdpub->osh, addr, size) +#endif /* defined(CONFIG_DHD_USE_STATIC_BUF) */ + +#ifdef USE_WFA_CERT_CONF +enum { + SET_PARAM_BUS_TXGLOM_MODE, + SET_PARAM_ROAMOFF, +#ifdef USE_WL_FRAMEBURST + SET_PARAM_FRAMEBURST, +#endif /* USE_WL_FRAMEBURST */ +#ifdef USE_WL_TXBF + SET_PARAM_TXBF, +#endif /* USE_WL_TXBF */ +#ifdef PROP_TXSTATUS + SET_PARAM_PROPTX, + SET_PARAM_PROPTXMODE, +#endif /* PROP_TXSTATUS */ + PARAM_LAST_VALUE +}; +extern int sec_get_param_wfa_cert(dhd_pub_t *dhd, int mode, uint* read_val); +#endif /* USE_WFA_CERT_CONF */ + +#define dhd_add_flowid(pub, ifidx, ac_prio, ea, flowid) do {} while (0) +#define dhd_del_flowid(pub, ifidx, flowid) do {} while (0) +bool dhd_wet_chainable(dhd_pub_t *dhdp); + +extern unsigned long dhd_os_general_spin_lock(dhd_pub_t *pub); +extern void dhd_os_general_spin_unlock(dhd_pub_t *pub, unsigned long flags); + +/** Miscellaenous DHD Spin Locks */ + +/* Disable router 3GMAC bypass path perimeter lock */ +#define DHD_PERIM_LOCK(dhdp) do {} while (0) +#define DHD_PERIM_UNLOCK(dhdp) do {} while (0) +#define DHD_PERIM_LOCK_ALL(processor_id) do {} while (0) +#define DHD_PERIM_UNLOCK_ALL(processor_id) do {} while (0) + +/* Enable DHD general spin lock/unlock */ +#define DHD_GENERAL_LOCK(dhdp, flags) \ + (flags) = dhd_os_general_spin_lock(dhdp) +#define DHD_GENERAL_UNLOCK(dhdp, flags) \ + dhd_os_general_spin_unlock((dhdp), (flags)) + +/* Enable DHD timer spin lock/unlock */ +#define DHD_TIMER_LOCK(lock, flags) (flags) = dhd_os_spin_lock(lock) +#define DHD_TIMER_UNLOCK(lock, flags) dhd_os_spin_unlock(lock, (flags)) + +/* Enable DHD flowring spin lock/unlock */ +#define DHD_FLOWRING_LOCK(lock, flags) (flags) = dhd_os_spin_lock(lock) +#define DHD_FLOWRING_UNLOCK(lock, flags) dhd_os_spin_unlock((lock), (flags)) + +/* Enable DHD common flowring info spin lock/unlock */ +#define DHD_FLOWID_LOCK(lock, flags) (flags) = dhd_os_spin_lock(lock) +#define DHD_FLOWID_UNLOCK(lock, flags) dhd_os_spin_unlock((lock), (flags)) + +/* Enable DHD common flowring list spin lock/unlock */ +#define DHD_FLOWRING_LIST_LOCK(lock, flags) (flags) = dhd_os_spin_lock(lock) +#define DHD_FLOWRING_LIST_UNLOCK(lock, flags) dhd_os_spin_unlock((lock), (flags)) + +#define DHD_SPIN_LOCK(lock, flags) (flags) = dhd_os_spin_lock(lock) +#define DHD_SPIN_UNLOCK(lock, flags) dhd_os_spin_unlock((lock), (flags)) + +#define DHD_RING_LOCK(lock, flags) (flags) = dhd_os_spin_lock(lock) +#define DHD_RING_UNLOCK(lock, flags) dhd_os_spin_unlock((lock), (flags)) + +#define DHD_BUS_LOCK(lock, flags) (flags) = dhd_os_spin_lock(lock) +#define DHD_BUS_UNLOCK(lock, flags) dhd_os_spin_unlock((lock), (flags)) + +#define DHD_BUS_INB_DW_LOCK(lock, flags) (flags) = dhd_os_spin_lock(lock) +#define DHD_BUS_INB_DW_UNLOCK(lock, flags) dhd_os_spin_unlock((lock), (flags)) + +/* Enable DHD TDLS peer list spin lock/unlock */ +#ifdef WLTDLS +#define DHD_TDLS_LOCK(lock, flags) (flags) = dhd_os_spin_lock(lock) +#define DHD_TDLS_UNLOCK(lock, flags) dhd_os_spin_unlock((lock), (flags)) +#endif /* WLTDLS */ + +#define DHD_BUS_INB_DW_LOCK(lock, flags) (flags) = dhd_os_spin_lock(lock) +#define DHD_BUS_INB_DW_UNLOCK(lock, flags) dhd_os_spin_unlock((lock), (flags)) + +#ifdef DBG_PKT_MON +/* Enable DHD PKT MON spin lock/unlock */ +#define DHD_PKT_MON_LOCK(lock, flags) (flags) = dhd_os_spin_lock(lock) +#define DHD_PKT_MON_UNLOCK(lock, flags) dhd_os_spin_unlock(lock, (flags)) +#endif /* DBG_PKT_MON */ + +#define DHD_LINUX_GENERAL_LOCK(dhdp, flags) DHD_GENERAL_LOCK(dhdp, flags) +#define DHD_LINUX_GENERAL_UNLOCK(dhdp, flags) DHD_GENERAL_UNLOCK(dhdp, flags) + +/* linux is defined for DHD EFI builds also, +* since its cross-compiled for EFI from linux +*/ +#define DHD_DBG_RING_LOCK_INIT(osh) dhd_os_dbgring_lock_init(osh) +#define DHD_DBG_RING_LOCK_DEINIT(osh, lock) dhd_os_dbgring_lock_deinit(osh, (lock)) +#define DHD_DBG_RING_LOCK(lock, flags) (flags) = dhd_os_dbgring_lock(lock) +#define DHD_DBG_RING_UNLOCK(lock, flags) dhd_os_dbgring_unlock((lock), flags) + +extern void dhd_dump_to_kernelog(dhd_pub_t *dhdp); + +extern void dhd_print_tasklet_status(dhd_pub_t *dhd); + +#ifdef BCMDBUS +extern uint dhd_get_rxsz(dhd_pub_t *pub); +extern void dhd_set_path(dhd_pub_t *pub); +extern void dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf); +extern void dhd_bus_clearcounts(dhd_pub_t *dhdp); +#endif /* BCMDBUS */ + +#ifdef DHD_L2_FILTER +extern int dhd_get_parp_status(dhd_pub_t *dhdp, uint32 idx); +extern int dhd_set_parp_status(dhd_pub_t *dhdp, uint32 idx, int val); +extern int dhd_get_dhcp_unicast_status(dhd_pub_t *dhdp, uint32 idx); +extern int dhd_set_dhcp_unicast_status(dhd_pub_t *dhdp, uint32 idx, int val); +extern int dhd_get_block_ping_status(dhd_pub_t *dhdp, uint32 idx); +extern int dhd_set_block_ping_status(dhd_pub_t *dhdp, uint32 idx, int val); +extern int dhd_get_grat_arp_status(dhd_pub_t *dhdp, uint32 idx); +extern int dhd_set_grat_arp_status(dhd_pub_t *dhdp, uint32 idx, int val); +extern int dhd_get_block_tdls_status(dhd_pub_t *dhdp, uint32 idx); +extern int dhd_set_block_tdls_status(dhd_pub_t *dhdp, uint32 idx, int val); +#endif /* DHD_L2_FILTER */ + +typedef struct wl_io_pport { + dhd_pub_t *dhd_pub; + uint ifidx; +} wl_io_pport_t; + +typedef struct wl_evt_pport { + dhd_pub_t *dhd_pub; + int *ifidx; + void *pktdata; + uint data_len; + void **data_ptr; + void *raw_event; +} wl_evt_pport_t; + +extern void *dhd_pub_shim(dhd_pub_t *dhd_pub); +#ifdef DHD_FW_COREDUMP +void* dhd_get_fwdump_buf(dhd_pub_t *dhd_pub, uint32 length); +#endif /* DHD_FW_COREDUMP */ + +#if defined(SET_RPS_CPUS) +int dhd_rps_cpus_enable(struct net_device *net, int enable); +int custom_rps_map_set(struct netdev_rx_queue *queue, char *buf, size_t len); +void custom_rps_map_clear(struct netdev_rx_queue *queue); +#define PRIMARY_INF 0 +#define VIRTUAL_INF 1 +#if defined(CONFIG_MACH_UNIVERSAL5433) || defined(CONFIG_MACH_UNIVERSAL7420) || \ + defined(CONFIG_SOC_EXYNOS8890) +#define RPS_CPUS_MASK "10" +#define RPS_CPUS_MASK_P2P "10" +#define RPS_CPUS_MASK_IBSS "10" +#define RPS_CPUS_WLAN_CORE_ID 4 +#else +#define RPS_CPUS_MASK "6" +#define RPS_CPUS_MASK_P2P "6" +#define RPS_CPUS_MASK_IBSS "6" +#endif /* CONFIG_MACH_UNIVERSAL5433 || CONFIG_MACH_UNIVERSAL7420 || CONFIG_SOC_EXYNOS8890 */ +#endif // endif + +int dhd_get_download_buffer(dhd_pub_t *dhd, char *file_path, download_type_t component, + char ** buffer, int *length); + +void dhd_free_download_buffer(dhd_pub_t *dhd, void *buffer, int length); + +int dhd_download_blob(dhd_pub_t *dhd, unsigned char *buf, + uint32 len, char *iovar); + +int dhd_download_blob_cached(dhd_pub_t *dhd, char *file_path, + uint32 len, char *iovar); + +int dhd_apply_default_txcap(dhd_pub_t *dhd, char *txcap_path); +int dhd_apply_default_clm(dhd_pub_t *dhd, char *clm_path); + +#ifdef SHOW_LOGTRACE +int dhd_parse_logstrs_file(osl_t *osh, char *raw_fmts, int logstrs_size, + dhd_event_log_t *event_log); +int dhd_parse_map_file(osl_t *osh, void *file, uint32 *ramstart, + uint32 *rodata_start, uint32 *rodata_end); +#ifdef PCIE_FULL_DONGLE +int dhd_event_logtrace_infobuf_pkt_process(dhd_pub_t *dhdp, void *pktbuf, + dhd_event_log_t *event_data); +#endif /* PCIE_FULL_DONGLE */ +#endif /* SHOW_LOGTRACE */ + +#define dhd_is_device_removed(x) FALSE +#define dhd_os_ind_firmware_stall(x) + +#if defined(DHD_FW_COREDUMP) +extern void dhd_get_memdump_info(dhd_pub_t *dhd); +#endif /* defined(DHD_FW_COREDUMP) */ +#ifdef BCMASSERT_LOG +extern void dhd_get_assert_info(dhd_pub_t *dhd); +#else +static INLINE void dhd_get_assert_info(dhd_pub_t *dhd) { } +#endif /* BCMASSERT_LOG */ + +#define DMAXFER_FREE(dhdp, dmap) dhd_schedule_dmaxfer_free(dhdp, dmap); + +#if defined(PCIE_FULL_DONGLE) +extern void dmaxfer_free_prev_dmaaddr(dhd_pub_t *dhdp, dmaxref_mem_map_t *dmmap); +void dhd_schedule_dmaxfer_free(dhd_pub_t *dhdp, dmaxref_mem_map_t *dmmap); +#endif /* PCIE_FULL_DONGLE */ + +#define DHD_LB_STATS_NOOP do { /* noop */ } while (0) +#if defined(DHD_LB_STATS) +#include +extern void dhd_lb_stats_init(dhd_pub_t *dhd); +extern void dhd_lb_stats_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf); +extern void dhd_lb_stats_update_napi_histo(dhd_pub_t *dhdp, uint32 count); +extern void dhd_lb_stats_update_txc_histo(dhd_pub_t *dhdp, uint32 count); +extern void dhd_lb_stats_update_rxc_histo(dhd_pub_t *dhdp, uint32 count); +extern void dhd_lb_stats_txc_percpu_cnt_incr(dhd_pub_t *dhdp); +extern void dhd_lb_stats_rxc_percpu_cnt_incr(dhd_pub_t *dhdp); +#define DHD_LB_STATS_INIT(dhdp) dhd_lb_stats_init(dhdp) +#define DHD_LB_STATS_DEINIT(dhdp) dhd_lb_stats_deinit(dhdp) +/* Reset is called from common layer so it takes dhd_pub_t as argument */ +#define DHD_LB_STATS_RESET(dhdp) dhd_lb_stats_init(dhdp) +#define DHD_LB_STATS_CLR(x) (x) = 0U +#define DHD_LB_STATS_INCR(x) (x) = (x) + 1 +#define DHD_LB_STATS_ADD(x, c) (x) = (x) + (c) +#define DHD_LB_STATS_PERCPU_ARR_INCR(x) \ + { \ + int cpu = get_cpu(); put_cpu(); \ + DHD_LB_STATS_INCR(x[cpu]); \ + } +#define DHD_LB_STATS_UPDATE_NAPI_HISTO(dhdp, x) dhd_lb_stats_update_napi_histo(dhdp, x) +#define DHD_LB_STATS_UPDATE_TXC_HISTO(dhdp, x) dhd_lb_stats_update_txc_histo(dhdp, x) +#define DHD_LB_STATS_UPDATE_RXC_HISTO(dhdp, x) dhd_lb_stats_update_rxc_histo(dhdp, x) +#define DHD_LB_STATS_TXC_PERCPU_CNT_INCR(dhdp) dhd_lb_stats_txc_percpu_cnt_incr(dhdp) +#define DHD_LB_STATS_RXC_PERCPU_CNT_INCR(dhdp) dhd_lb_stats_rxc_percpu_cnt_incr(dhdp) +#else /* !DHD_LB_STATS */ +#define DHD_LB_STATS_INIT(dhdp) DHD_LB_STATS_NOOP +#define DHD_LB_STATS_DEINIT(dhdp) DHD_LB_STATS_NOOP +#define DHD_LB_STATS_RESET(dhdp) DHD_LB_STATS_NOOP +#define DHD_LB_STATS_CLR(x) DHD_LB_STATS_NOOP +#define DHD_LB_STATS_INCR(x) DHD_LB_STATS_NOOP +#define DHD_LB_STATS_ADD(x, c) DHD_LB_STATS_NOOP +#define DHD_LB_STATS_PERCPU_ARR_INCR(x) DHD_LB_STATS_NOOP +#define DHD_LB_STATS_UPDATE_NAPI_HISTO(dhd, x) DHD_LB_STATS_NOOP +#define DHD_LB_STATS_UPDATE_TXC_HISTO(dhd, x) DHD_LB_STATS_NOOP +#define DHD_LB_STATS_UPDATE_RXC_HISTO(dhd, x) DHD_LB_STATS_NOOP +#define DHD_LB_STATS_TXC_PERCPU_CNT_INCR(dhdp) DHD_LB_STATS_NOOP +#define DHD_LB_STATS_RXC_PERCPU_CNT_INCR(dhdp) DHD_LB_STATS_NOOP +#endif /* !DHD_LB_STATS */ +#ifdef DHD_LB_IRQSET +extern void dhd_irq_set_affinity(dhd_pub_t *dhdp); +#endif /* DHD_LB_IRQSET */ + +#ifdef DHD_SSSR_DUMP +#define DHD_SSSR_MEMPOOL_SIZE (1024 * 1024) /* 1MB size */ + +/* used in sssr_dump_mode */ +#define SSSR_DUMP_MODE_SSSR 0 /* dump both *before* and *after* files */ +#define SSSR_DUMP_MODE_FIS 1 /* dump *after* files only */ + +extern int dhd_sssr_mempool_init(dhd_pub_t *dhd); +extern void dhd_sssr_mempool_deinit(dhd_pub_t *dhd); +extern int dhd_sssr_dump_init(dhd_pub_t *dhd); +extern void dhd_sssr_dump_deinit(dhd_pub_t *dhd); +#define DHD_SSSR_MEMPOOL_INIT(dhdp) dhd_sssr_mempool_init(dhdp) +#define DHD_SSSR_MEMPOOL_DEINIT(dhdp) dhd_sssr_mempool_deinit(dhdp) +#define DHD_SSSR_DUMP_INIT(dhdp) dhd_sssr_dump_init(dhdp) +#define DHD_SSSR_DUMP_DEINIT(dhdp) dhd_sssr_dump_deinit(dhdp) +#else +#define DHD_SSSR_MEMPOOL_INIT(dhdp) do { /* noop */ } while (0) +#define DHD_SSSR_MEMPOOL_DEINIT(dhdp) do { /* noop */ } while (0) +#define DHD_SSSR_DUMP_INIT(dhdp) do { /* noop */ } while (0) +#define DHD_SSSR_DUMP_DEINIT(dhdp) do { /* noop */ } while (0) +#endif /* DHD_SSSR_DUMP */ + +#ifdef SHOW_LOGTRACE +void dhd_get_read_buf_ptr(dhd_pub_t *dhd_pub, trace_buf_info_t *read_buf_info); +#endif /* SHOW_LOGTRACE */ + +#ifdef BCMPCIE +extern int dhd_prot_debug_info_print(dhd_pub_t *dhd); +extern bool dhd_bus_skip_clm(dhd_pub_t *dhdp); +#else +#define dhd_prot_debug_info_print(x) +static INLINE bool dhd_bus_skip_clm(dhd_pub_t *dhd_pub) +{ return 0; } +#endif /* BCMPCIE */ + +bool dhd_fw_download_status(dhd_pub_t * dhd_pub); +void dhd_show_kirqstats(dhd_pub_t *dhd); + +/* Bitmask used for Join Timeout */ +#define WLC_SSID_MASK 0x01 +#define WLC_WPA_MASK 0x02 + +extern int dhd_start_join_timer(dhd_pub_t *pub); +extern int dhd_stop_join_timer(dhd_pub_t *pub); +extern int dhd_start_scan_timer(dhd_pub_t *pub, bool is_escan); +extern int dhd_stop_scan_timer(dhd_pub_t *pub, bool is_escan, uint16 sync_id); +extern int dhd_start_cmd_timer(dhd_pub_t *pub); +extern int dhd_stop_cmd_timer(dhd_pub_t *pub); +extern int dhd_start_bus_timer(dhd_pub_t *pub); +extern int dhd_stop_bus_timer(dhd_pub_t *pub); +extern uint16 dhd_get_request_id(dhd_pub_t *pub); +extern int dhd_set_request_id(dhd_pub_t *pub, uint16 id, uint32 cmd); +extern void dhd_set_join_error(dhd_pub_t *pub, uint32 mask); +extern void dhd_clear_join_error(dhd_pub_t *pub, uint32 mask); +extern void dhd_get_scan_to_val(dhd_pub_t *pub, uint32 *to_val); +extern void dhd_set_scan_to_val(dhd_pub_t *pub, uint32 to_val); +extern void dhd_get_join_to_val(dhd_pub_t *pub, uint32 *to_val); +extern void dhd_set_join_to_val(dhd_pub_t *pub, uint32 to_val); +extern void dhd_get_cmd_to_val(dhd_pub_t *pub, uint32 *to_val); +extern void dhd_set_cmd_to_val(dhd_pub_t *pub, uint32 to_val); +extern void dhd_get_bus_to_val(dhd_pub_t *pub, uint32 *to_val); +extern void dhd_set_bus_to_val(dhd_pub_t *pub, uint32 to_val); +extern int dhd_start_timesync_timer(dhd_pub_t *pub); +extern int dhd_stop_timesync_timer(dhd_pub_t *pub); + +#ifdef DHD_PKTID_AUDIT_ENABLED +void dhd_pktid_error_handler(dhd_pub_t *dhdp); +#endif /* DHD_PKTID_AUDIT_ENABLED */ + +#define DHD_DISABLE_RUNTIME_PM(dhdp) +#define DHD_ENABLE_RUNTIME_PM(dhdp) + +extern bool dhd_prot_is_cmpl_ring_empty(dhd_pub_t *dhd, void *prot_info); +extern void dhd_prot_dump_ring_ptrs(void *prot_info); + +/* + * Enable this macro if you want to track the calls to wake lock + * This records can be printed using the following command + * cat /sys/bcm-dhd/wklock_trace + * DHD_TRACE_WAKE_LOCK supports over linux 2.6.0 version + */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) +#undef DHD_TRACE_WAKE_LOCK +#endif /* KERNEL_VER < KERNEL_VERSION(2, 6, 0) */ + +#if defined(DHD_TRACE_WAKE_LOCK) +void dhd_wk_lock_stats_dump(dhd_pub_t *dhdp); +#endif // endif + +extern bool dhd_query_bus_erros(dhd_pub_t *dhdp); + +#if defined(CONFIG_64BIT) +#define DHD_SUPPORT_64BIT +#endif /* (linux || LINUX) && CONFIG_64BIT */ + +#if defined(DHD_ERPOM) +extern void dhd_schedule_reset(dhd_pub_t *dhdp); +#else +static INLINE void dhd_schedule_reset(dhd_pub_t *dhdp) {;} +#endif // endif + +extern void init_dhd_timeouts(dhd_pub_t *pub); +extern void deinit_dhd_timeouts(dhd_pub_t *pub); + +typedef enum timeout_resons { + DHD_REASON_COMMAND_TO, + DHD_REASON_JOIN_TO, + DHD_REASON_SCAN_TO, + DHD_REASON_OQS_TO +} timeout_reasons_t; + +extern void dhd_prhex(const char *msg, volatile uchar *buf, uint nbytes, uint8 dbg_level); +int dhd_tput_test(dhd_pub_t *dhd, tput_test_t *tput_data); +void dhd_tput_test_rx(dhd_pub_t *dhd, void *pkt); +static INLINE int dhd_get_max_txbufs(dhd_pub_t *dhdp) +{ return -1; } + +#ifdef FILTER_IE +int dhd_read_from_file(dhd_pub_t *dhd); +int dhd_parse_filter_ie(dhd_pub_t *dhd, uint8 *buf); +int dhd_get_filter_ie_count(dhd_pub_t *dhd, uint8 *buf); +int dhd_parse_oui(dhd_pub_t *dhd, uint8 *inbuf, uint8 *oui, int len); +int dhd_check_valid_ie(dhd_pub_t *dhdp, uint8 *buf, int len); +#endif /* FILTER_IE */ + +uint16 dhd_prot_get_ioctl_trans_id(dhd_pub_t *dhdp); + +#ifdef SET_PCIE_IRQ_CPU_CORE +extern void dhd_set_irq_cpucore(dhd_pub_t *dhdp, int set); +extern void set_irq_cpucore(unsigned int irq, int set); +#endif /* SET_PCIE_IRQ_CPU_CORE */ + +#ifdef DHD_WAKE_STATUS +wake_counts_t* dhd_get_wakecount(dhd_pub_t *dhdp); +#endif /* DHD_WAKE_STATUS */ +extern int dhd_get_random_bytes(uint8 *buf, uint len); +#if defined(DHD_BLOB_EXISTENCE_CHECK) +extern void dhd_set_blob_support(dhd_pub_t *dhdp, char *fw_path); +#endif /* DHD_BLOB_EXISTENCE_CHECK */ + +/* configuration of ecounters. API's tp start/stop. currently supported only for linux */ +extern int dhd_start_ecounters(dhd_pub_t *dhd); +extern int dhd_stop_ecounters(dhd_pub_t *dhd); +extern int dhd_start_event_ecounters(dhd_pub_t *dhd); +extern int dhd_stop_event_ecounters(dhd_pub_t *dhd); + +int dhd_get_preserve_log_numbers(dhd_pub_t *dhd, uint32 *logset_mask); + +#ifdef DHD_LOG_DUMP +void dhd_schedule_log_dump(dhd_pub_t *dhdp, void *type); +void dhd_log_dump_trigger(dhd_pub_t *dhdp, int subcmd); +int dhd_log_dump_ring_to_file(dhd_pub_t *dhdp, void *ring_ptr, void *file, + unsigned long *file_posn, log_dump_section_hdr_t *sec_hdr); +int dhd_log_dump_cookie_to_file(dhd_pub_t *dhdp, void *fp, unsigned long *f_pos); +int dhd_logdump_cookie_init(dhd_pub_t *dhdp, uint8 *buf, uint32 buf_size); +void dhd_logdump_cookie_deinit(dhd_pub_t *dhdp); +void dhd_logdump_cookie_save(dhd_pub_t *dhdp, char *cookie, char *type); +int dhd_logdump_cookie_get(dhd_pub_t *dhdp, char *ret_cookie, uint32 buf_size); +int dhd_logdump_cookie_count(dhd_pub_t *dhdp); +#endif /* DHD_LOG_DUMP */ + +#define DHD_PCIE_CONFIG_SAVE(bus) pci_save_state(bus->dev) +#define DHD_PCIE_CONFIG_RESTORE(bus) pci_restore_state(bus->dev) + +typedef struct dhd_pkt_parse { + uint32 proto; /* Network layer protocol */ + uint32 t1; /* n-tuple */ + uint32 t2; +} dhd_pkt_parse_t; + +/* ========= RING API functions : exposed to others ============= */ +#define DHD_RING_TYPE_FIXED 1 +uint32 dhd_ring_get_hdr_size(void); +void *dhd_ring_init(uint8 *buf, uint32 buf_size, uint32 elem_size, uint32 elem_cnt); +void dhd_ring_deinit(void *_ring); +void *dhd_ring_get_first(void *_ring); +void dhd_ring_free_first(void *_ring); +void *dhd_ring_get_last(void *_ring); +void *dhd_ring_get_next(void *_ring, void *cur); +void *dhd_ring_get_prev(void *_ring, void *cur); +void *dhd_ring_get_empty(void *_ring); +int dhd_ring_get_cur_size(void *_ring); +void dhd_ring_lock(void *ring, void *fist_ptr, void *last_ptr); +void dhd_ring_lock_free(void *ring); +void *dhd_ring_lock_get_first(void *_ring); +void *dhd_ring_lock_get_last(void *_ring); +int dhd_ring_lock_get_count(void *_ring); +void dhd_ring_lock_free_first(void *ring); + +#define DHD_DUMP_TYPE_NAME_SIZE 32 +#define DHD_DUMP_FILE_PATH_SIZE 256 +#define DHD_DUMP_FILE_COUNT_MAX 5 +#define DHD_DUMP_TYPE_COUNT_MAX 10 + +#ifdef DHD_DUMP_MNGR +typedef struct _DFM_elem { + char type_name[DHD_DUMP_TYPE_NAME_SIZE]; + char file_path[DHD_DUMP_FILE_COUNT_MAX][DHD_DUMP_FILE_PATH_SIZE]; + int file_idx; +} DFM_elem_t; + +typedef struct _dhd_dump_file_manage { + DFM_elem_t elems[DHD_DUMP_TYPE_COUNT_MAX]; +} dhd_dump_file_manage_t; + +extern void dhd_dump_file_manage_enqueue(dhd_pub_t *dhd, char *dump_path, char *fname); +#endif /* DHD_DUMP_MNGR */ + +#ifdef DHD_DUMP_PCIE_RINGS +extern int dhd_d2h_h2d_ring_dump(dhd_pub_t *dhd, void *file, unsigned long *file_posn); +#endif /* DHD_DUMP_PCIE_RINGS */ + +#ifdef EWP_EDL +#define DHD_EDL_RING_SIZE (D2HRING_EDL_MAX_ITEM * D2HRING_EDL_ITEMSIZE) +int dhd_event_logtrace_process_edl(dhd_pub_t *dhdp, uint8 *data, + void *evt_decode_data); +int dhd_edl_mem_init(dhd_pub_t *dhd); +void dhd_edl_mem_deinit(dhd_pub_t *dhd); +void dhd_prot_edl_ring_tcm_rd_update(dhd_pub_t *dhd); +#define DHD_EDL_MEM_INIT(dhdp) dhd_edl_mem_init(dhdp) +#define DHD_EDL_MEM_DEINIT(dhdp) dhd_edl_mem_deinit(dhdp) +#define DHD_EDL_RING_TCM_RD_UPDATE(dhdp) \ + dhd_prot_edl_ring_tcm_rd_update(dhdp) +#else +#define DHD_EDL_MEM_INIT(dhdp) do { /* noop */ } while (0) +#define DHD_EDL_MEM_DEINIT(dhdp) do { /* noop */ } while (0) +#define DHD_EDL_RING_TCM_RD_UPDATE(dhdp) do { /* noop */ } while (0) +#endif /* EWP_EDL */ + +void dhd_schedule_logtrace(void *dhd_info); +int dhd_print_fw_ver_from_file(dhd_pub_t *dhdp, char *fwpath); + +#define HD_PREFIX_SIZE 2 /* hexadecimal prefix size */ +#define HD_BYTE_SIZE 2 /* hexadecimal byte size */ + +#if defined(DHD_H2D_LOG_TIME_SYNC) +void dhd_h2d_log_time_sync_deferred_wq_schedule(dhd_pub_t *dhdp); +void dhd_h2d_log_time_sync(dhd_pub_t *dhdp); +#endif /* DHD_H2D_LOG_TIME_SYNC */ +extern void dhd_cleanup_if(struct net_device *net); + +#endif /* _dhd_h_ */ diff --git a/bcmdhd.100.10.315.x/dhd_bus.h b/bcmdhd.100.10.315.x/dhd_bus.h new file mode 100644 index 0000000..c5f60bd --- /dev/null +++ b/bcmdhd.100.10.315.x/dhd_bus.h @@ -0,0 +1,321 @@ +/* + * Header file describing the internal (inter-module) DHD interfaces. + * + * Provides type definitions and function prototypes used to link the + * DHD OS, bus, and protocol modules. + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_bus.h 770457 2018-07-03 08:45:49Z $ + */ + +#ifndef _dhd_bus_h_ +#define _dhd_bus_h_ + +extern int dbus_up(struct dhd_bus *pub); +extern int dbus_stop(struct dhd_bus *pub); +extern int dbus_send_ctl(struct dhd_bus *pub, uint8 *buf, int len); +extern int dbus_recv_ctl(struct dhd_bus *pub, uint8 *buf, int len); +/* + * Exported from dhd bus module (dhd_usb, dhd_sdio) + */ + +/* Indicate (dis)interest in finding dongles. */ +extern int dhd_bus_register(void); +extern void dhd_bus_unregister(void); + +/* Download firmware image and nvram image */ +extern int dhd_bus_download_firmware(struct dhd_bus *bus, osl_t *osh, + char *fw_path, char *nv_path, char *clm_path, char *conf_path); +#if defined(BT_OVER_SDIO) +extern int dhd_bus_download_btfw(struct dhd_bus *bus, osl_t *osh, char *btfw_path); +#endif /* defined (BT_OVER_SDIO) */ + +/* Stop bus module: clear pending frames, disable data flow */ +extern void dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex); + +/* Initialize bus module: prepare for communication w/dongle */ +extern int dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex); + +/* Get the Bus Idle Time */ +extern void dhd_bus_getidletime(dhd_pub_t *dhdp, int *idletime); + +/* Set the Bus Idle Time */ +extern void dhd_bus_setidletime(dhd_pub_t *dhdp, int idle_time); + +/* Send a data frame to the dongle. Callee disposes of txp. */ +#ifdef BCMPCIE +extern int dhd_bus_txdata(struct dhd_bus *bus, void *txp, uint8 ifidx); +#else +extern int dhd_bus_txdata(struct dhd_bus *bus, void *txp); +#endif // endif + +/* Send/receive a control message to/from the dongle. + * Expects caller to enforce a single outstanding transaction. + */ +extern int dhd_bus_txctl(struct dhd_bus *bus, uchar *msg, uint msglen); +extern int dhd_bus_rxctl(struct dhd_bus *bus, uchar *msg, uint msglen); + +/* Watchdog timer function */ +extern bool dhd_bus_watchdog(dhd_pub_t *dhd); + +extern int dhd_bus_oob_intr_register(dhd_pub_t *dhdp); +extern void dhd_bus_oob_intr_unregister(dhd_pub_t *dhdp); +extern void dhd_bus_oob_intr_set(dhd_pub_t *dhdp, bool enable); +extern void dhd_bus_dev_pm_stay_awake(dhd_pub_t *dhdpub); +extern void dhd_bus_dev_pm_relax(dhd_pub_t *dhdpub); +extern bool dhd_bus_dev_pm_enabled(dhd_pub_t *dhdpub); + +/* Device console input function */ +extern int dhd_bus_console_in(dhd_pub_t *dhd, uchar *msg, uint msglen); + +/* Deferred processing for the bus, return TRUE requests reschedule */ +extern bool dhd_bus_dpc(struct dhd_bus *bus); +extern void dhd_bus_isr(bool * InterruptRecognized, bool * QueueMiniportHandleInterrupt, void *arg); + +/* Check for and handle local prot-specific iovar commands */ +extern int dhd_bus_iovar_op(dhd_pub_t *dhdp, const char *name, + void *params, int plen, void *arg, int len, bool set); + +/* Add bus dump output to a buffer */ +extern void dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf); + +/* Clear any bus counters */ +extern void dhd_bus_clearcounts(dhd_pub_t *dhdp); + +/* return the dongle chipid */ +extern uint dhd_bus_chip(struct dhd_bus *bus); + +/* return the dongle chiprev */ +extern uint dhd_bus_chiprev(struct dhd_bus *bus); + +/* Set user-specified nvram parameters. */ +extern void dhd_bus_set_nvram_params(struct dhd_bus * bus, const char *nvram_params); + +extern void *dhd_bus_pub(struct dhd_bus *bus); +extern void *dhd_bus_txq(struct dhd_bus *bus); +extern void *dhd_bus_sih(struct dhd_bus *bus); +extern uint dhd_bus_hdrlen(struct dhd_bus *bus); +#ifdef BCMSDIO +extern void dhd_bus_set_dotxinrx(struct dhd_bus *bus, bool val); +/* return sdio io status */ +extern uint8 dhd_bus_is_ioready(struct dhd_bus *bus); +#else +#define dhd_bus_set_dotxinrx(a, b) do {} while (0) +#endif // endif + +#define DHD_SET_BUS_STATE_DOWN(_bus) do { \ + (_bus)->dhd->busstate = DHD_BUS_DOWN; \ +} while (0) + +/* Register a dummy SDIO client driver in order to be notified of new SDIO device */ +extern int dhd_bus_reg_sdio_notify(void* semaphore); +extern void dhd_bus_unreg_sdio_notify(void); +extern void dhd_txglom_enable(dhd_pub_t *dhdp, bool enable); +extern int dhd_bus_get_ids(struct dhd_bus *bus, uint32 *bus_type, uint32 *bus_num, + uint32 *slot_num); + +#if defined(DHD_FW_COREDUMP) && (defined(BCMPCIE) || defined(BCMSDIO)) +extern int dhd_bus_mem_dump(dhd_pub_t *dhd); +#else +#define dhd_bus_mem_dump(x) +#endif /* DHD_FW_COREDUMP && (BCMPCIE || BCMSDIO) */ + +#ifdef BCMPCIE +enum { + /* Scratch buffer confiuguration update */ + D2H_DMA_SCRATCH_BUF, + D2H_DMA_SCRATCH_BUF_LEN, + + /* DMA Indices array buffers for: H2D WR and RD, and D2H WR and RD */ + H2D_DMA_INDX_WR_BUF, /* update H2D WR dma indices buf base addr to dongle */ + H2D_DMA_INDX_RD_BUF, /* update H2D RD dma indices buf base addr to dongle */ + D2H_DMA_INDX_WR_BUF, /* update D2H WR dma indices buf base addr to dongle */ + D2H_DMA_INDX_RD_BUF, /* update D2H RD dma indices buf base addr to dongle */ + + /* DHD sets/gets WR or RD index, in host's H2D and D2H DMA indices buffer */ + H2D_DMA_INDX_WR_UPD, /* update H2D WR index in H2D WR dma indices buf */ + H2D_DMA_INDX_RD_UPD, /* update H2D RD index in H2D RD dma indices buf */ + D2H_DMA_INDX_WR_UPD, /* update D2H WR index in D2H WR dma indices buf */ + D2H_DMA_INDX_RD_UPD, /* update D2H RD index in D2H RD dma indices buf */ + + /* DHD Indices array buffers and update for: H2D flow ring WR */ + H2D_IFRM_INDX_WR_BUF, /* update H2D WR dma indices buf base addr to dongle */ + H2D_IFRM_INDX_WR_UPD, /* update H2D WR dma indices buf base addr to dongle */ + + /* H2D and D2H Mailbox data update */ + H2D_MB_DATA, + D2H_MB_DATA, + + /* (Common) MsgBuf Ring configuration update */ + RING_BUF_ADDR, /* update ring base address to dongle */ + RING_ITEM_LEN, /* update ring item size to dongle */ + RING_MAX_ITEMS, /* update ring max items to dongle */ + + /* Update of WR or RD index, for a MsgBuf Ring */ + RING_RD_UPD, /* update ring read index from/to dongle */ + RING_WR_UPD, /* update ring write index from/to dongle */ + + TOTAL_LFRAG_PACKET_CNT, + MAX_HOST_RXBUFS, + HOST_API_VERSION, + DNGL_TO_HOST_TRAP_ADDR, + HOST_SCB_ADDR, /* update host scb base address to dongle */ +}; + +typedef void (*dhd_mb_ring_t) (struct dhd_bus *, uint32); +typedef void (*dhd_mb_ring_2_t) (struct dhd_bus *, uint32, bool); +extern void dhd_bus_cmn_writeshared(struct dhd_bus *bus, void * data, uint32 len, uint8 type, + uint16 ringid); +extern void dhd_bus_ringbell(struct dhd_bus *bus, uint32 value); +extern void dhd_bus_ringbell_2(struct dhd_bus *bus, uint32 value, bool devwake); +extern void dhd_bus_cmn_readshared(struct dhd_bus *bus, void* data, uint8 type, uint16 ringid); +extern uint32 dhd_bus_get_sharedflags(struct dhd_bus *bus); +extern void dhd_bus_rx_frame(struct dhd_bus *bus, void* pkt, int ifidx, uint pkt_count); +extern void dhd_bus_start_queue(struct dhd_bus *bus); +extern void dhd_bus_stop_queue(struct dhd_bus *bus); +extern dhd_mb_ring_t dhd_bus_get_mbintr_fn(struct dhd_bus *bus); +extern dhd_mb_ring_2_t dhd_bus_get_mbintr_2_fn(struct dhd_bus *bus); +extern void dhd_bus_write_flow_ring_states(struct dhd_bus *bus, + void * data, uint16 flowid); +extern void dhd_bus_read_flow_ring_states(struct dhd_bus *bus, + void * data, uint8 flowid); +extern int dhd_bus_flow_ring_create_request(struct dhd_bus *bus, void *flow_ring_node); +extern void dhd_bus_clean_flow_ring(struct dhd_bus *bus, void *flow_ring_node); +extern void dhd_bus_flow_ring_create_response(struct dhd_bus *bus, uint16 flow_id, int32 status); +extern int dhd_bus_flow_ring_delete_request(struct dhd_bus *bus, void *flow_ring_node); +extern void dhd_bus_flow_ring_delete_response(struct dhd_bus *bus, uint16 flowid, uint32 status); +extern int dhd_bus_flow_ring_flush_request(struct dhd_bus *bus, void *flow_ring_node); +extern void dhd_bus_flow_ring_flush_response(struct dhd_bus *bus, uint16 flowid, uint32 status); +extern uint32 dhd_bus_max_h2d_queues(struct dhd_bus *bus); +extern int dhd_bus_schedule_queue(struct dhd_bus *bus, uint16 flow_id, bool txs); +extern void dhd_bus_set_linkdown(dhd_pub_t *dhdp, bool val); + +#ifdef IDLE_TX_FLOW_MGMT +extern void dhd_bus_flow_ring_resume_response(struct dhd_bus *bus, uint16 flowid, int32 status); +#endif /* IDLE_TX_FLOW_MGMT */ + +extern int dhdpcie_bus_clock_start(struct dhd_bus *bus); +extern int dhdpcie_bus_clock_stop(struct dhd_bus *bus); +extern int dhdpcie_bus_enable_device(struct dhd_bus *bus); +extern int dhdpcie_bus_disable_device(struct dhd_bus *bus); +extern int dhdpcie_bus_alloc_resource(struct dhd_bus *bus); +extern void dhdpcie_bus_free_resource(struct dhd_bus *bus); +extern bool dhdpcie_bus_dongle_attach(struct dhd_bus *bus); +extern int dhd_bus_release_dongle(struct dhd_bus *bus); +extern int dhd_bus_request_irq(struct dhd_bus *bus); +extern int dhdpcie_get_pcieirq(struct dhd_bus *bus, unsigned int *irq); + +extern struct device * dhd_bus_to_dev(struct dhd_bus *bus); + +extern void dhdpcie_cto_init(struct dhd_bus *bus, bool enable); +extern void dhdpcie_ssreset_dis_enum_rst(struct dhd_bus *bus); + +#ifdef DHD_FW_COREDUMP +extern struct dhd_bus *g_dhd_bus; +extern int dhd_dongle_mem_dump(void); +#endif /* DHD_FW_COREDUMP */ + +#ifdef IDLE_TX_FLOW_MGMT +extern void dhd_bus_idle_tx_ring_suspend(dhd_pub_t *dhd, uint16 flow_ring_id); +#endif /* IDLE_TX_FLOW_MGMT */ +extern void dhd_bus_handle_mb_data(struct dhd_bus *bus, uint32 d2h_mb_data); +#endif /* BCMPCIE */ + +/* dump the device trap informtation */ +extern void dhd_bus_dump_trap_info(struct dhd_bus *bus, struct bcmstrbuf *b); +extern void dhd_bus_copy_trap_sig(struct dhd_bus *bus, trap_t *tr); +/* Function to set default min res mask */ +extern bool dhd_bus_set_default_min_res_mask(struct dhd_bus *bus); + +/* Function to reset PMU registers */ +extern void dhd_bus_pmu_reg_reset(dhd_pub_t *dhdp); + +extern void dhd_bus_ucode_download(struct dhd_bus *bus); + +#ifdef DHD_ULP +extern void dhd_bus_ulp_disable_console(dhd_pub_t *dhdp); +#endif /* DHD_ULP */ +extern int dhd_bus_readwrite_bp_addr(dhd_pub_t *dhdp, uint addr, uint size, uint* data, bool read); +extern int dhd_get_idletime(dhd_pub_t *dhd); +#ifdef BCMPCIE +extern void dhd_bus_dump_console_buffer(struct dhd_bus *bus); +extern void dhd_bus_intr_count_dump(dhd_pub_t *dhdp); +#else +#define dhd_bus_dump_console_buffer(x) +static INLINE void dhd_bus_intr_count_dump(dhd_pub_t *dhdp) { UNUSED_PARAMETER(dhdp); } +#endif /* BCMPCIE */ + +#if defined(BCMPCIE) && defined(EWP_ETD_PRSRV_LOGS) +void dhdpcie_get_etd_preserve_logs(dhd_pub_t *dhd, uint8 *ext_trap_data, + void *event_decode_data); +#endif // endif + +extern uint16 dhd_get_chipid(dhd_pub_t *dhd); + +#ifdef DHD_WAKE_STATUS +extern wake_counts_t* dhd_bus_get_wakecount(dhd_pub_t *dhd); +extern int dhd_bus_get_bus_wake(dhd_pub_t * dhd); +#endif /* DHD_WAKE_STATUS */ + +#ifdef BT_OVER_SDIO +/* + * SDIO layer clock control functions exposed to be called from other layers. + * This is required especially in the case where the BUS is shared between + * BT and SDIO and we have to control the clock. The callers of this function + * are expected to hold the sdlock + */ +int __dhdsdio_clk_enable(struct dhd_bus *bus, bus_owner_t owner, int can_wait); +int __dhdsdio_clk_disable(struct dhd_bus *bus, bus_owner_t owner, int can_wait); +void dhdsdio_reset_bt_use_count(struct dhd_bus *bus); +#endif /* BT_OVER_SDIO */ + +int dhd_bus_perform_flr(struct dhd_bus *bus, bool force_fail); +extern bool dhd_bus_get_flr_force_fail(struct dhd_bus *bus); + +extern bool dhd_bus_aspm_enable_rc_ep(struct dhd_bus *bus, bool enable); +extern void dhd_bus_l1ss_enable_rc_ep(struct dhd_bus *bus, bool enable); + +#ifdef BCMPCIE +extern void dhdpcie_advertise_bus_cleanup(dhd_pub_t *dhdp); +extern void dhd_msgbuf_iovar_timeout_dump(dhd_pub_t *dhd); +#endif /* BCMPCIE */ + +extern bool dhd_bus_force_bt_quiesce_enabled(struct dhd_bus *bus); + +#ifdef DHD_SSSR_DUMP +extern int dhd_bus_sssr_dump(dhd_pub_t *dhd); + +extern int dhd_bus_fis_trigger(dhd_pub_t *dhd); +extern int dhd_bus_fis_dump(dhd_pub_t *dhd); + +#endif /* DHD_SSSR_DUMP */ +#ifdef PCIE_FULL_DONGLE +extern int dhdpcie_set_dma_ring_indices(dhd_pub_t *dhd, int32 int_val); +#endif /* PCIE_FULL_DONGLE */ + +#ifdef DHD_USE_BP_RESET +extern int dhd_bus_perform_bp_reset(struct dhd_bus *bus); +#endif /* DHD_USE_BP_RESET */ +#endif /* _dhd_bus_h_ */ diff --git a/bcmdhd.100.10.315.x/dhd_buzzz.h b/bcmdhd.100.10.315.x/dhd_buzzz.h new file mode 100644 index 0000000..67fec8e --- /dev/null +++ b/bcmdhd.100.10.315.x/dhd_buzzz.h @@ -0,0 +1,37 @@ +#ifndef _DHD_BUZZZ_H_INCLUDED_ +#define _DHD_BUZZZ_H_INCLUDED_ + +/* + * Broadcom logging system - Empty implementaiton + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id$ + */ + +#define dhd_buzzz_attach() do { /* noop */ } while (0) +#define dhd_buzzz_detach() do { /* noop */ } while (0) +#define dhd_buzzz_panic(x) do { /* noop */ } while (0) +#define BUZZZ_LOG(ID, N, ARG...) do { /* noop */ } while (0) + +#endif /* _DHD_BUZZZ_H_INCLUDED_ */ diff --git a/bcmdhd.100.10.315.x/dhd_cdc.c b/bcmdhd.100.10.315.x/dhd_cdc.c new file mode 100644 index 0000000..60cc35e --- /dev/null +++ b/bcmdhd.100.10.315.x/dhd_cdc.c @@ -0,0 +1,980 @@ +/* + * DHD Protocol Module for CDC and BDC. + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_cdc.c 752794 2018-03-19 04:00:31Z $ + * + * BDC is like CDC, except it includes a header for data packets to convey + * packet priority over the bus, and flags (e.g. to indicate checksum status + * for dongle offload.) + */ + +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include + +#ifdef PROP_TXSTATUS +#include +#include +#endif // endif +#ifdef BCMDBUS +#include +#endif /* BCMDBUS */ + +#ifdef DHD_ULP +#include +#endif /* DHD_ULP */ + +#define RETRIES 2 /* # of retries to retrieve matching ioctl response */ +#define BUS_HEADER_LEN (24+DHD_SDALIGN) /* Must be at least SDPCM_RESERVE + * defined in dhd_sdio.c (amount of header tha might be added) + * plus any space that might be needed for alignment padding. + */ +#define ROUND_UP_MARGIN 2048 /* Biggest SDIO block size possible for + * round off at the end of buffer + */ + +typedef struct dhd_prot { + uint16 reqid; + uint8 pending; + uint32 lastcmd; +#ifdef BCMDBUS + uint ctl_completed; +#endif /* BCMDBUS */ + uint8 bus_header[BUS_HEADER_LEN]; + cdc_ioctl_t msg; + unsigned char buf[WLC_IOCTL_MAXLEN + ROUND_UP_MARGIN]; +} dhd_prot_t; + +uint16 +dhd_prot_get_ioctl_trans_id(dhd_pub_t *dhdp) +{ + /* SDIO does not have ioctl_trans_id yet, so return -1 */ + return -1; +} + +static int +dhdcdc_msg(dhd_pub_t *dhd) +{ +#ifdef BCMDBUS + int timeout = 0; +#endif /* BCMDBUS */ + int err = 0; + dhd_prot_t *prot = dhd->prot; + int len = ltoh32(prot->msg.len) + sizeof(cdc_ioctl_t); + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + DHD_OS_WAKE_LOCK(dhd); + + /* NOTE : cdc->msg.len holds the desired length of the buffer to be + * returned. Only up to CDC_MAX_MSG_SIZE of this buffer area + * is actually sent to the dongle + */ + if (len > CDC_MAX_MSG_SIZE) + len = CDC_MAX_MSG_SIZE; + + /* Send request */ +#ifdef BCMDBUS + DHD_OS_IOCTL_RESP_LOCK(dhd); + prot->ctl_completed = FALSE; + err = dbus_send_ctl(dhd->bus, (void *)&prot->msg, len); + if (err) { + DHD_ERROR(("dbus_send_ctl error=%d\n", err)); + DHD_OS_IOCTL_RESP_UNLOCK(dhd); + DHD_OS_WAKE_UNLOCK(dhd); + return err; + } +#else + err = dhd_bus_txctl(dhd->bus, (uchar*)&prot->msg, len); +#endif /* BCMDBUS */ + +#ifdef BCMDBUS + timeout = dhd_os_ioctl_resp_wait(dhd, &prot->ctl_completed, false); + if ((!timeout) || (!prot->ctl_completed)) { + DHD_ERROR(("Txctl timeout %d ctl_completed %d\n", + timeout, prot->ctl_completed)); + DHD_ERROR(("Txctl wait timed out\n")); + err = -1; + } + DHD_OS_IOCTL_RESP_UNLOCK(dhd); +#endif /* BCMDBUS */ +#if defined(BCMDBUS) && defined(INTR_EP_ENABLE) + /* If the ctl write is successfully completed, wait for an acknowledgement + * that indicates that it is now ok to do ctl read from the dongle + */ + if (err != -1) { + DHD_OS_IOCTL_RESP_LOCK(dhd); + prot->ctl_completed = FALSE; + if (dbus_poll_intr(dhd->dbus)) { + DHD_ERROR(("dbus_poll_intr not submitted\n")); + } else { + /* interrupt polling is sucessfully submitted. Wait for dongle to send + * interrupt + */ + timeout = dhd_os_ioctl_resp_wait(dhd, &prot->ctl_completed, false); + if (!timeout) { + DHD_ERROR(("intr poll wait timed out\n")); + } + } + DHD_OS_IOCTL_RESP_UNLOCK(dhd); + } +#endif /* defined(BCMDBUS) && defined(INTR_EP_ENABLE) */ + DHD_OS_WAKE_UNLOCK(dhd); + return err; +} + +static int +dhdcdc_cmplt(dhd_pub_t *dhd, uint32 id, uint32 len) +{ +#ifdef BCMDBUS + int timeout = 0; +#endif /* BCMDBUS */ + int ret; + int cdc_len = len + sizeof(cdc_ioctl_t); + dhd_prot_t *prot = dhd->prot; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + do { +#ifdef BCMDBUS + DHD_OS_IOCTL_RESP_LOCK(dhd); + prot->ctl_completed = FALSE; + ret = dbus_recv_ctl(dhd->bus, (uchar*)&prot->msg, cdc_len); + if (ret) { + DHD_ERROR(("dbus_recv_ctl error=0x%x(%d)\n", ret, ret)); + DHD_OS_IOCTL_RESP_UNLOCK(dhd); + goto done; + } + timeout = dhd_os_ioctl_resp_wait(dhd, &prot->ctl_completed, false); + if ((!timeout) || (!prot->ctl_completed)) { + DHD_ERROR(("Rxctl timeout %d ctl_completed %d\n", + timeout, prot->ctl_completed)); + ret = -1; + DHD_OS_IOCTL_RESP_UNLOCK(dhd); + + goto done; + } + DHD_OS_IOCTL_RESP_UNLOCK(dhd); + + ret = cdc_len; +#else + ret = dhd_bus_rxctl(dhd->bus, (uchar*)&prot->msg, cdc_len); +#endif /* BCMDBUS */ + if (ret < 0) + break; + } while (CDC_IOC_ID(ltoh32(prot->msg.flags)) != id); + +#ifdef BCMDBUS +done: +#endif /* BCMDBUS */ + return ret; +} + +static int +dhdcdc_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action) +{ + dhd_prot_t *prot = dhd->prot; + cdc_ioctl_t *msg = &prot->msg; + int ret = 0, retries = 0; + uint32 id, flags = 0; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + DHD_CTL(("%s: cmd %d len %d\n", __FUNCTION__, cmd, len)); + + /* Respond "bcmerror" and "bcmerrorstr" with local cache */ + if (cmd == WLC_GET_VAR && buf) + { + if (!strcmp((char *)buf, "bcmerrorstr")) + { + strncpy((char *)buf, bcmerrorstr(dhd->dongle_error), BCME_STRLEN); + goto done; + } + else if (!strcmp((char *)buf, "bcmerror")) + { + *(int *)buf = dhd->dongle_error; + goto done; + } + } + + memset(msg, 0, sizeof(cdc_ioctl_t)); + +#ifdef BCMSPI + /* 11bit gSPI bus allows 2048bytes of max-data. We restrict 'len' + * value which is 8Kbytes for various 'get' commands to 2000. 48 bytes are + * left for sw headers and misc. + */ + if (len > 2000) { + DHD_ERROR(("dhdcdc_query_ioctl: len is truncated to 2000 bytes\n")); + len = 2000; + } +#endif /* BCMSPI */ + msg->cmd = htol32(cmd); + msg->len = htol32(len); + msg->flags = (++prot->reqid << CDCF_IOC_ID_SHIFT); + CDC_SET_IF_IDX(msg, ifidx); + /* add additional action bits */ + action &= WL_IOCTL_ACTION_MASK; + msg->flags |= (action << CDCF_IOC_ACTION_SHIFT); + msg->flags = htol32(msg->flags); + + if (buf) + memcpy(prot->buf, buf, len); + + if ((ret = dhdcdc_msg(dhd)) < 0) { + if (!dhd->hang_was_sent) + DHD_ERROR(("dhdcdc_query_ioctl: dhdcdc_msg failed w/status %d\n", ret)); + goto done; + } + +retry: + /* wait for interrupt and get first fragment */ + if ((ret = dhdcdc_cmplt(dhd, prot->reqid, len)) < 0) + goto done; + + flags = ltoh32(msg->flags); + id = (flags & CDCF_IOC_ID_MASK) >> CDCF_IOC_ID_SHIFT; + + if ((id < prot->reqid) && (++retries < RETRIES)) + goto retry; + if (id != prot->reqid) { + DHD_ERROR(("%s: %s: unexpected request id %d (expected %d)\n", + dhd_ifname(dhd, ifidx), __FUNCTION__, id, prot->reqid)); + ret = -EINVAL; + goto done; + } + + /* Copy info buffer */ + if (buf) + { + if (ret < (int)len) + len = ret; + memcpy(buf, (void*) prot->buf, len); + } + + /* Check the ERROR flag */ + if (flags & CDCF_IOC_ERROR) + { + ret = ltoh32(msg->status); + /* Cache error from dongle */ + dhd->dongle_error = ret; + } + +done: + return ret; +} + +#ifdef DHD_PM_CONTROL_FROM_FILE +extern bool g_pm_control; +#endif /* DHD_PM_CONTROL_FROM_FILE */ + +static int +dhdcdc_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action) +{ + dhd_prot_t *prot = dhd->prot; + cdc_ioctl_t *msg = &prot->msg; + int ret = 0; + uint32 flags, id; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + DHD_CTL(("%s: cmd %d len %d\n", __FUNCTION__, cmd, len)); + + if (dhd->busstate == DHD_BUS_DOWN) { + DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__)); + return -EIO; + } + + /* don't talk to the dongle if fw is about to be reloaded */ + if (dhd->hang_was_sent) { + DHD_ERROR(("%s: HANG was sent up earlier. Not talking to the chip\n", + __FUNCTION__)); + return -EIO; + } + + if (cmd == WLC_SET_PM) { +#ifdef DHD_PM_CONTROL_FROM_FILE + if (g_pm_control == TRUE) { + DHD_ERROR(("%s: SET PM ignored!(Requested:%d)\n", + __FUNCTION__, buf ? *(char *)buf : 0)); + goto done; + } +#endif /* DHD_PM_CONTROL_FROM_FILE */ + DHD_TRACE_HW4(("%s: SET PM to %d\n", __FUNCTION__, buf ? *(char *)buf : 0)); + } + + memset(msg, 0, sizeof(cdc_ioctl_t)); + + msg->cmd = htol32(cmd); + msg->len = htol32(len); + msg->flags = (++prot->reqid << CDCF_IOC_ID_SHIFT); + CDC_SET_IF_IDX(msg, ifidx); + /* add additional action bits */ + action &= WL_IOCTL_ACTION_MASK; + msg->flags |= (action << CDCF_IOC_ACTION_SHIFT) | CDCF_IOC_SET; + msg->flags = htol32(msg->flags); + + if (buf) + memcpy(prot->buf, buf, len); + +#ifdef DHD_ULP + if (buf && (!strncmp(buf, "ulp", sizeof("ulp")))) { + /* force all the writes after this point to NOT to use cached sbwad value */ + dhd_ulp_disable_cached_sbwad(dhd); + } +#endif /* DHD_ULP */ + + if ((ret = dhdcdc_msg(dhd)) < 0) { + DHD_ERROR(("%s: dhdcdc_msg failed w/status %d\n", __FUNCTION__, ret)); + goto done; + } + + if ((ret = dhdcdc_cmplt(dhd, prot->reqid, len)) < 0) + goto done; + + flags = ltoh32(msg->flags); + id = (flags & CDCF_IOC_ID_MASK) >> CDCF_IOC_ID_SHIFT; + + if (id != prot->reqid) { + DHD_ERROR(("%s: %s: unexpected request id %d (expected %d)\n", + dhd_ifname(dhd, ifidx), __FUNCTION__, id, prot->reqid)); + ret = -EINVAL; + goto done; + } + +#ifdef DHD_ULP + /* For ulp prototyping temporary */ + if ((ret = dhd_ulp_check_ulp_request(dhd, buf)) < 0) + goto done; +#endif /* DHD_ULP */ + + /* Check the ERROR flag */ + if (flags & CDCF_IOC_ERROR) + { + ret = ltoh32(msg->status); + /* Cache error from dongle */ + dhd->dongle_error = ret; + } + +done: + return ret; +} + +#ifdef BCMDBUS +int +dhd_prot_ctl_complete(dhd_pub_t *dhd) +{ + dhd_prot_t *prot; + + if (dhd == NULL) + return BCME_ERROR; + + prot = dhd->prot; + + ASSERT(prot); + DHD_OS_IOCTL_RESP_LOCK(dhd); + prot->ctl_completed = TRUE; + dhd_os_ioctl_resp_wake(dhd); + DHD_OS_IOCTL_RESP_UNLOCK(dhd); + return 0; +} +#endif /* BCMDBUS */ + +int +dhd_prot_ioctl(dhd_pub_t *dhd, int ifidx, wl_ioctl_t * ioc, void * buf, int len) +{ + dhd_prot_t *prot = dhd->prot; + int ret = -1; + uint8 action; + static int error_cnt = 0; + + if ((dhd->busstate == DHD_BUS_DOWN) || dhd->hang_was_sent) { + DHD_ERROR(("%s : bus is down. we have nothing to do - bs: %d, has: %d\n", + __FUNCTION__, dhd->busstate, dhd->hang_was_sent)); + goto done; + } + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + ASSERT(len <= WLC_IOCTL_MAXLEN); + + if (len > WLC_IOCTL_MAXLEN) + goto done; + + if (prot->pending == TRUE) { + DHD_ERROR(("CDC packet is pending!!!! cmd=0x%x (%lu) lastcmd=0x%x (%lu)\n", + ioc->cmd, (unsigned long)ioc->cmd, prot->lastcmd, + (unsigned long)prot->lastcmd)); + if ((ioc->cmd == WLC_SET_VAR) || (ioc->cmd == WLC_GET_VAR)) { + DHD_TRACE(("iovar cmd=%s\n", buf ? (char*)buf : "\0")); + } + goto done; + } + + prot->pending = TRUE; + prot->lastcmd = ioc->cmd; + action = ioc->set; + if (action & WL_IOCTL_ACTION_SET) + ret = dhdcdc_set_ioctl(dhd, ifidx, ioc->cmd, buf, len, action); + else { + ret = dhdcdc_query_ioctl(dhd, ifidx, ioc->cmd, buf, len, action); + if (ret > 0) + ioc->used = ret - sizeof(cdc_ioctl_t); + } + // terence 20130805: send hang event to wpa_supplicant + if (ret == -EIO) { + error_cnt++; + if (error_cnt > 2) + ret = -ETIMEDOUT; + } else + error_cnt = 0; + + /* Too many programs assume ioctl() returns 0 on success */ + if (ret >= 0) + ret = 0; + else { + cdc_ioctl_t *msg = &prot->msg; + ioc->needed = ltoh32(msg->len); /* len == needed when set/query fails from dongle */ + } + + /* Intercept the wme_dp ioctl here */ + if ((!ret) && (ioc->cmd == WLC_SET_VAR) && (!strcmp(buf, "wme_dp"))) { + int slen, val = 0; + + slen = strlen("wme_dp") + 1; + if (len >= (int)(slen + sizeof(int))) + bcopy(((char *)buf + slen), &val, sizeof(int)); + dhd->wme_dp = (uint8) ltoh32(val); + } + + prot->pending = FALSE; + +done: + + return ret; +} + +int +dhd_prot_iovar_op(dhd_pub_t *dhdp, const char *name, + void *params, int plen, void *arg, int len, bool set) +{ + return BCME_UNSUPPORTED; +} + +void +dhd_prot_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf) +{ + if (!dhdp || !dhdp->prot) { + return; + } + + bcm_bprintf(strbuf, "Protocol CDC: reqid %d\n", dhdp->prot->reqid); +#ifdef PROP_TXSTATUS + dhd_wlfc_dump(dhdp, strbuf); +#endif // endif +} + +/* The FreeBSD PKTPUSH could change the packet buf pinter + so we need to make it changable +*/ +#define PKTBUF pktbuf +void +dhd_prot_hdrpush(dhd_pub_t *dhd, int ifidx, void *PKTBUF) +{ +#ifdef BDC + struct bdc_header *h; +#endif /* BDC */ + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + +#ifdef BDC + /* Push BDC header used to convey priority for buses that don't */ + + PKTPUSH(dhd->osh, PKTBUF, BDC_HEADER_LEN); + + h = (struct bdc_header *)PKTDATA(dhd->osh, PKTBUF); + + h->flags = (BDC_PROTO_VER << BDC_FLAG_VER_SHIFT); + if (PKTSUMNEEDED(PKTBUF)) + h->flags |= BDC_FLAG_SUM_NEEDED; + + h->priority = (PKTPRIO(PKTBUF) & BDC_PRIORITY_MASK); + h->flags2 = 0; + h->dataOffset = 0; +#endif /* BDC */ + BDC_SET_IF_IDX(h, ifidx); +} +#undef PKTBUF /* Only defined in the above routine */ + +uint +dhd_prot_hdrlen(dhd_pub_t *dhd, void *PKTBUF) +{ + uint hdrlen = 0; +#ifdef BDC + /* Length of BDC(+WLFC) headers pushed */ + hdrlen = BDC_HEADER_LEN + (((struct bdc_header *)PKTBUF)->dataOffset * 4); +#endif // endif + return hdrlen; +} + +int +dhd_prot_hdrpull(dhd_pub_t *dhd, int *ifidx, void *pktbuf, uchar *reorder_buf_info, + uint *reorder_info_len) +{ +#ifdef BDC + struct bdc_header *h; +#endif // endif + uint8 data_offset = 0; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + +#ifdef BDC + if (reorder_info_len) + *reorder_info_len = 0; + /* Pop BDC header used to convey priority for buses that don't */ + + if (PKTLEN(dhd->osh, pktbuf) < BDC_HEADER_LEN) { + DHD_ERROR(("%s: rx data too short (%d < %d)\n", __FUNCTION__, + PKTLEN(dhd->osh, pktbuf), BDC_HEADER_LEN)); + return BCME_ERROR; + } + + h = (struct bdc_header *)PKTDATA(dhd->osh, pktbuf); + + if (!ifidx) { + /* for tx packet, skip the analysis */ + data_offset = h->dataOffset; + PKTPULL(dhd->osh, pktbuf, BDC_HEADER_LEN); + goto exit; + } + + *ifidx = BDC_GET_IF_IDX(h); + + if (((h->flags & BDC_FLAG_VER_MASK) >> BDC_FLAG_VER_SHIFT) != BDC_PROTO_VER) { + DHD_ERROR(("%s: non-BDC packet received, flags = 0x%x\n", + dhd_ifname(dhd, *ifidx), h->flags)); + if (((h->flags & BDC_FLAG_VER_MASK) >> BDC_FLAG_VER_SHIFT) == BDC_PROTO_VER_1) + h->dataOffset = 0; + else + return BCME_ERROR; + } + + if (h->flags & BDC_FLAG_SUM_GOOD) { + DHD_INFO(("%s: BDC packet received with good rx-csum, flags 0x%x\n", + dhd_ifname(dhd, *ifidx), h->flags)); + PKTSETSUMGOOD(pktbuf, TRUE); + } + + PKTSETPRIO(pktbuf, (h->priority & BDC_PRIORITY_MASK)); + data_offset = h->dataOffset; + PKTPULL(dhd->osh, pktbuf, BDC_HEADER_LEN); +#endif /* BDC */ + +#ifdef PROP_TXSTATUS + if (!DHD_PKTTAG_PKTDIR(PKTTAG(pktbuf))) { + /* + - parse txstatus only for packets that came from the firmware + */ + dhd_wlfc_parse_header_info(dhd, pktbuf, (data_offset << 2), + reorder_buf_info, reorder_info_len); + +#ifdef BCMDBUS +#ifndef DHD_WLFC_THREAD + dhd_wlfc_commit_packets(dhd, + (f_commitpkt_t)dhd_bus_txdata, dhd->bus, NULL, FALSE); +#endif /* DHD_WLFC_THREAD */ +#endif /* BCMDBUS */ + } +#endif /* PROP_TXSTATUS */ + +exit: + PKTPULL(dhd->osh, pktbuf, (data_offset << 2)); + return 0; +} + +int +dhd_prot_attach(dhd_pub_t *dhd) +{ + dhd_prot_t *cdc; + + if (!(cdc = (dhd_prot_t *)DHD_OS_PREALLOC(dhd, DHD_PREALLOC_PROT, sizeof(dhd_prot_t)))) { + DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__)); + goto fail; + } + memset(cdc, 0, sizeof(dhd_prot_t)); + + /* ensure that the msg buf directly follows the cdc msg struct */ + if ((uintptr)(&cdc->msg + 1) != (uintptr)cdc->buf) { + DHD_ERROR(("dhd_prot_t is not correctly defined\n")); + goto fail; + } + + dhd->prot = cdc; +#ifdef BDC + dhd->hdrlen += BDC_HEADER_LEN; +#endif // endif + dhd->maxctl = WLC_IOCTL_MAXLEN + sizeof(cdc_ioctl_t) + ROUND_UP_MARGIN; + return 0; + +fail: + if (cdc != NULL) + DHD_OS_PREFREE(dhd, cdc, sizeof(dhd_prot_t)); + return BCME_NOMEM; +} + +/* ~NOTE~ What if another thread is waiting on the semaphore? Holding it? */ +void +dhd_prot_detach(dhd_pub_t *dhd) +{ +#ifdef PROP_TXSTATUS + dhd_wlfc_deinit(dhd); +#endif // endif + DHD_OS_PREFREE(dhd, dhd->prot, sizeof(dhd_prot_t)); + dhd->prot = NULL; +} + +void +dhd_prot_dstats(dhd_pub_t *dhd) +{ + /* copy bus stats */ + + dhd->dstats.tx_packets = dhd->tx_packets; + dhd->dstats.tx_errors = dhd->tx_errors; + dhd->dstats.rx_packets = dhd->rx_packets; + dhd->dstats.rx_errors = dhd->rx_errors; + dhd->dstats.rx_dropped = dhd->rx_dropped; + dhd->dstats.multicast = dhd->rx_multicast; + return; +} + +int +dhd_sync_with_dongle(dhd_pub_t *dhd) +{ + int ret = 0; + wlc_rev_info_t revinfo; + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + +#ifdef DHD_FW_COREDUMP + /* Check the memdump capability */ + dhd_get_memdump_info(dhd); +#endif /* DHD_FW_COREDUMP */ + +#ifdef BCMASSERT_LOG + dhd_get_assert_info(dhd); +#endif /* BCMASSERT_LOG */ + + /* Get the device rev info */ + memset(&revinfo, 0, sizeof(revinfo)); + ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_REVINFO, &revinfo, sizeof(revinfo), FALSE, 0); + if (ret < 0) + goto done; +#if defined(BCMDBUS) + if (dhd_download_fw_on_driverload) { + dhd_conf_reset(dhd); + dhd_conf_set_chiprev(dhd, revinfo.chipnum, revinfo.chiprev); + dhd_conf_preinit(dhd); + dhd_conf_read_config(dhd, dhd->conf_path); + } +#endif /* BCMDBUS */ + + DHD_SSSR_DUMP_INIT(dhd); + + dhd_process_cid_mac(dhd, TRUE); + ret = dhd_preinit_ioctls(dhd); + dhd_process_cid_mac(dhd, FALSE); + + /* Always assumes wl for now */ + dhd->iswl = TRUE; + +done: + return ret; +} + +int dhd_prot_init(dhd_pub_t *dhd) +{ + return BCME_OK; +} + +void +dhd_prot_stop(dhd_pub_t *dhd) +{ +/* Nothing to do for CDC */ +} + +static void +dhd_get_hostreorder_pkts(void *osh, struct reorder_info *ptr, void **pkt, + uint32 *pkt_count, void **pplast, uint8 start, uint8 end) +{ + void *plast = NULL, *p; + uint32 pkt_cnt = 0; + + if (ptr->pend_pkts == 0) { + DHD_REORDER(("%s: no packets in reorder queue \n", __FUNCTION__)); + *pplast = NULL; + *pkt_count = 0; + *pkt = NULL; + return; + } + do { + p = (void *)(ptr->p[start]); + ptr->p[start] = NULL; + + if (p != NULL) { + if (plast == NULL) + *pkt = p; + else + PKTSETNEXT(osh, plast, p); + + plast = p; + pkt_cnt++; + } + start++; + if (start > ptr->max_idx) + start = 0; + } while (start != end); + *pplast = plast; + *pkt_count = pkt_cnt; + ptr->pend_pkts -= (uint8)pkt_cnt; +} + +int +dhd_process_pkt_reorder_info(dhd_pub_t *dhd, uchar *reorder_info_buf, uint reorder_info_len, + void **pkt, uint32 *pkt_count) +{ + uint8 flow_id, max_idx, cur_idx, exp_idx; + struct reorder_info *ptr; + uint8 flags; + void *cur_pkt, *plast = NULL; + uint32 cnt = 0; + + if (pkt == NULL) { + if (pkt_count != NULL) + *pkt_count = 0; + return 0; + } + + flow_id = reorder_info_buf[WLHOST_REORDERDATA_FLOWID_OFFSET]; + flags = reorder_info_buf[WLHOST_REORDERDATA_FLAGS_OFFSET]; + + DHD_REORDER(("flow_id %d, flags 0x%02x, idx(%d, %d, %d)\n", flow_id, flags, + reorder_info_buf[WLHOST_REORDERDATA_CURIDX_OFFSET], + reorder_info_buf[WLHOST_REORDERDATA_EXPIDX_OFFSET], + reorder_info_buf[WLHOST_REORDERDATA_MAXIDX_OFFSET])); + + /* validate flags and flow id */ + if (flags == 0xFF) { + DHD_ERROR(("%s: invalid flags...so ignore this packet\n", __FUNCTION__)); + *pkt_count = 1; + return 0; + } + + cur_pkt = *pkt; + *pkt = NULL; + + ptr = dhd->reorder_bufs[flow_id]; + if (flags & WLHOST_REORDERDATA_DEL_FLOW) { + uint32 buf_size = sizeof(struct reorder_info); + + DHD_REORDER(("%s: Flags indicating to delete a flow id %d\n", + __FUNCTION__, flow_id)); + + if (ptr == NULL) { + DHD_REORDER(("%s: received flags to cleanup, but no flow (%d) yet\n", + __FUNCTION__, flow_id)); + *pkt_count = 1; + *pkt = cur_pkt; + return 0; + } + + dhd_get_hostreorder_pkts(dhd->osh, ptr, pkt, &cnt, &plast, + ptr->exp_idx, ptr->exp_idx); + /* set it to the last packet */ + if (plast) { + PKTSETNEXT(dhd->osh, plast, cur_pkt); + cnt++; + } + else { + if (cnt != 0) { + DHD_ERROR(("%s: del flow: something fishy, pending packets %d\n", + __FUNCTION__, cnt)); + } + *pkt = cur_pkt; + cnt = 1; + } + buf_size += ((ptr->max_idx + 1) * sizeof(void *)); + MFREE(dhd->osh, ptr, buf_size); + dhd->reorder_bufs[flow_id] = NULL; + *pkt_count = cnt; + return 0; + } + /* all the other cases depend on the existance of the reorder struct for that flow id */ + if (ptr == NULL) { + uint32 buf_size_alloc = sizeof(reorder_info_t); + max_idx = reorder_info_buf[WLHOST_REORDERDATA_MAXIDX_OFFSET]; + + buf_size_alloc += ((max_idx + 1) * sizeof(void*)); + /* allocate space to hold the buffers, index etc */ + + DHD_REORDER(("%s: alloc buffer of size %d size, reorder info id %d, maxidx %d\n", + __FUNCTION__, buf_size_alloc, flow_id, max_idx)); + ptr = (struct reorder_info *)MALLOC(dhd->osh, buf_size_alloc); + if (ptr == NULL) { + DHD_ERROR(("%s: Malloc failed to alloc buffer\n", __FUNCTION__)); + *pkt_count = 1; + return 0; + } + bzero(ptr, buf_size_alloc); + dhd->reorder_bufs[flow_id] = ptr; + ptr->p = (void *)(ptr+1); + ptr->max_idx = max_idx; + } + if (flags & WLHOST_REORDERDATA_NEW_HOLE) { + DHD_REORDER(("%s: new hole, so cleanup pending buffers\n", __FUNCTION__)); + if (ptr->pend_pkts) { + dhd_get_hostreorder_pkts(dhd->osh, ptr, pkt, &cnt, &plast, + ptr->exp_idx, ptr->exp_idx); + ptr->pend_pkts = 0; + } + ptr->cur_idx = reorder_info_buf[WLHOST_REORDERDATA_CURIDX_OFFSET]; + ptr->exp_idx = reorder_info_buf[WLHOST_REORDERDATA_EXPIDX_OFFSET]; + ptr->max_idx = reorder_info_buf[WLHOST_REORDERDATA_MAXIDX_OFFSET]; + ptr->p[ptr->cur_idx] = cur_pkt; + ptr->pend_pkts++; + *pkt_count = cnt; + } + else if (flags & WLHOST_REORDERDATA_CURIDX_VALID) { + cur_idx = reorder_info_buf[WLHOST_REORDERDATA_CURIDX_OFFSET]; + exp_idx = reorder_info_buf[WLHOST_REORDERDATA_EXPIDX_OFFSET]; + + if ((exp_idx == ptr->exp_idx) && (cur_idx != ptr->exp_idx)) { + /* still in the current hole */ + /* enqueue the current on the buffer chain */ + if (ptr->p[cur_idx] != NULL) { + DHD_REORDER(("%s: HOLE: ERROR buffer pending..free it\n", + __FUNCTION__)); + PKTFREE(dhd->osh, ptr->p[cur_idx], TRUE); + ptr->p[cur_idx] = NULL; + } + ptr->p[cur_idx] = cur_pkt; + ptr->pend_pkts++; + ptr->cur_idx = cur_idx; + DHD_REORDER(("%s: fill up a hole..pending packets is %d\n", + __FUNCTION__, ptr->pend_pkts)); + *pkt_count = 0; + *pkt = NULL; + } + else if (ptr->exp_idx == cur_idx) { + /* got the right one ..flush from cur to exp and update exp */ + DHD_REORDER(("%s: got the right one now, cur_idx is %d\n", + __FUNCTION__, cur_idx)); + if (ptr->p[cur_idx] != NULL) { + DHD_REORDER(("%s: Error buffer pending..free it\n", + __FUNCTION__)); + PKTFREE(dhd->osh, ptr->p[cur_idx], TRUE); + ptr->p[cur_idx] = NULL; + } + ptr->p[cur_idx] = cur_pkt; + ptr->pend_pkts++; + + ptr->cur_idx = cur_idx; + ptr->exp_idx = exp_idx; + + dhd_get_hostreorder_pkts(dhd->osh, ptr, pkt, &cnt, &plast, + cur_idx, exp_idx); + *pkt_count = cnt; + DHD_REORDER(("%s: freeing up buffers %d, still pending %d\n", + __FUNCTION__, cnt, ptr->pend_pkts)); + } + else { + uint8 end_idx; + bool flush_current = FALSE; + /* both cur and exp are moved now .. */ + DHD_REORDER(("%s:, flow %d, both moved, cur %d(%d), exp %d(%d)\n", + __FUNCTION__, flow_id, ptr->cur_idx, cur_idx, + ptr->exp_idx, exp_idx)); + if (flags & WLHOST_REORDERDATA_FLUSH_ALL) + end_idx = ptr->exp_idx; + else + end_idx = exp_idx; + + /* flush pkts first */ + dhd_get_hostreorder_pkts(dhd->osh, ptr, pkt, &cnt, &plast, + ptr->exp_idx, end_idx); + + if (cur_idx == ptr->max_idx) { + if (exp_idx == 0) + flush_current = TRUE; + } else { + if (exp_idx == cur_idx + 1) + flush_current = TRUE; + } + if (flush_current) { + if (plast) + PKTSETNEXT(dhd->osh, plast, cur_pkt); + else + *pkt = cur_pkt; + cnt++; + } + else { + ptr->p[cur_idx] = cur_pkt; + ptr->pend_pkts++; + } + ptr->exp_idx = exp_idx; + ptr->cur_idx = cur_idx; + *pkt_count = cnt; + } + } + else { + uint8 end_idx; + /* no real packet but update to exp_seq...that means explicit window move */ + exp_idx = reorder_info_buf[WLHOST_REORDERDATA_EXPIDX_OFFSET]; + + DHD_REORDER(("%s: move the window, cur_idx is %d, exp is %d, new exp is %d\n", + __FUNCTION__, ptr->cur_idx, ptr->exp_idx, exp_idx)); + if (flags & WLHOST_REORDERDATA_FLUSH_ALL) + end_idx = ptr->exp_idx; + else + end_idx = exp_idx; + + dhd_get_hostreorder_pkts(dhd->osh, ptr, pkt, &cnt, &plast, ptr->exp_idx, end_idx); + if (plast) + PKTSETNEXT(dhd->osh, plast, cur_pkt); + else + *pkt = cur_pkt; + cnt++; + *pkt_count = cnt; + /* set the new expected idx */ + ptr->exp_idx = exp_idx; + } + return 0; +} diff --git a/bcmdhd.100.10.315.x/dhd_cfg80211.c b/bcmdhd.100.10.315.x/dhd_cfg80211.c new file mode 100644 index 0000000..306a4e3 --- /dev/null +++ b/bcmdhd.100.10.315.x/dhd_cfg80211.c @@ -0,0 +1,302 @@ +/* + * Linux cfg80211 driver - Dongle Host Driver (DHD) related + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_cfg80211.c 771186 2018-07-09 09:14:04Z $ + */ + +#include +#include + +#include +#include +#include +#include + +#ifdef PKT_FILTER_SUPPORT +#include +#include +#endif // endif + +#ifdef PKT_FILTER_SUPPORT +extern uint dhd_pkt_filter_enable; +extern uint dhd_master_mode; +extern void dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode); +#endif // endif + +static int dhd_dongle_up = FALSE; + +#include +#include +#include +#include +#include +#include + +static s32 wl_dongle_up(struct net_device *ndev); +static s32 wl_dongle_down(struct net_device *ndev); + +/** + * Function implementations + */ + +s32 dhd_cfg80211_init(struct bcm_cfg80211 *cfg) +{ + dhd_dongle_up = FALSE; + return 0; +} + +s32 dhd_cfg80211_deinit(struct bcm_cfg80211 *cfg) +{ + dhd_dongle_up = FALSE; + return 0; +} + +s32 dhd_cfg80211_down(struct bcm_cfg80211 *cfg) +{ + struct net_device *ndev; + s32 err = 0; + dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub); + + WL_TRACE(("In\n")); + if ((!dhd_dongle_up) || (!dhd->up)) { + WL_INFORM_MEM(("Dongle is already down\n")); + err = 0; + goto done; + } + ndev = bcmcfg_to_prmry_ndev(cfg); + wl_dongle_down(ndev); +done: + dhd_dongle_up = FALSE; + return err; +} + +s32 dhd_cfg80211_set_p2p_info(struct bcm_cfg80211 *cfg, int val) +{ + dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub); + dhd->op_mode |= val; + WL_ERR(("Set : op_mode=0x%04x\n", dhd->op_mode)); +#ifdef ARP_OFFLOAD_SUPPORT + if (dhd->arp_version == 1) { + /* IF P2P is enabled, disable arpoe */ + dhd_arp_offload_set(dhd, 0); + dhd_arp_offload_enable(dhd, false); + } +#endif /* ARP_OFFLOAD_SUPPORT */ + + return 0; +} + +s32 dhd_cfg80211_clean_p2p_info(struct bcm_cfg80211 *cfg) +{ + dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub); + dhd->op_mode &= ~(DHD_FLAG_P2P_GC_MODE | DHD_FLAG_P2P_GO_MODE); + WL_ERR(("Clean : op_mode=0x%04x\n", dhd->op_mode)); + +#ifdef ARP_OFFLOAD_SUPPORT + if (dhd->arp_version == 1) { + /* IF P2P is disabled, enable arpoe back for STA mode. */ + dhd_arp_offload_set(dhd, dhd_arp_mode); + dhd_arp_offload_enable(dhd, true); + } +#endif /* ARP_OFFLOAD_SUPPORT */ + + return 0; +} + +#ifdef WL_STATIC_IF +int32 +wl_cfg80211_update_iflist_info(struct bcm_cfg80211 *cfg, struct net_device *ndev, + int ifidx, uint8 *addr, int bssidx, char *name, int if_state) +{ + return dhd_update_iflist_info(cfg->pub, ndev, ifidx, addr, bssidx, name, if_state); +} +#endif /* WL_STATIC_IF */ + +struct net_device* wl_cfg80211_allocate_if(struct bcm_cfg80211 *cfg, int ifidx, const char *name, + uint8 *mac, uint8 bssidx, const char *dngl_name) +{ + return dhd_allocate_if(cfg->pub, ifidx, name, mac, bssidx, FALSE, dngl_name); +} + +int wl_cfg80211_register_if(struct bcm_cfg80211 *cfg, + int ifidx, struct net_device* ndev, bool rtnl_lock_reqd) +{ + return dhd_register_if(cfg->pub, ifidx, rtnl_lock_reqd); +} + +int wl_cfg80211_remove_if(struct bcm_cfg80211 *cfg, + int ifidx, struct net_device* ndev, bool rtnl_lock_reqd) +{ + return dhd_remove_if(cfg->pub, ifidx, rtnl_lock_reqd); +} + +void wl_cfg80211_cleanup_if(struct net_device *net) +{ + dhd_cleanup_if(net); +} + +struct net_device * dhd_cfg80211_netdev_free(struct net_device *ndev) +{ + struct bcm_cfg80211 *cfg; + + if (ndev) { + cfg = wl_get_cfg(ndev); + if (ndev->ieee80211_ptr) { + MFREE(cfg->osh, ndev->ieee80211_ptr, sizeof(struct wireless_dev)); + ndev->ieee80211_ptr = NULL; + } + free_netdev(ndev); + return NULL; + } + + return ndev; +} + +void dhd_netdev_free(struct net_device *ndev) +{ +#ifdef WL_CFG80211 + ndev = dhd_cfg80211_netdev_free(ndev); +#endif // endif + if (ndev) + free_netdev(ndev); +} + +static s32 +wl_dongle_up(struct net_device *ndev) +{ + s32 err = 0; + u32 local_up = 0; + + err = wldev_ioctl_set(ndev, WLC_UP, &local_up, sizeof(local_up)); + if (unlikely(err)) { + WL_ERR(("WLC_UP error (%d)\n", err)); + } + return err; +} + +static s32 +wl_dongle_down(struct net_device *ndev) +{ + s32 err = 0; + u32 local_down = 0; + + err = wldev_ioctl_set(ndev, WLC_DOWN, &local_down, sizeof(local_down)); + if (unlikely(err)) { + WL_ERR(("WLC_DOWN error (%d)\n", err)); + } + return err; +} + +s32 +wl_dongle_roam(struct net_device *ndev, u32 roamvar, u32 bcn_timeout) +{ + s32 err = 0; + + /* Setup timeout if Beacons are lost and roam is off to report link down */ + if (roamvar) { + err = wldev_iovar_setint(ndev, "bcn_timeout", bcn_timeout); + if (unlikely(err)) { + WL_ERR(("bcn_timeout error (%d)\n", err)); + goto dongle_rom_out; + } + } + /* Enable/Disable built-in roaming to allow supplicant to take care of roaming */ + err = wldev_iovar_setint(ndev, "roam_off", roamvar); + if (unlikely(err)) { + WL_ERR(("roam_off error (%d)\n", err)); + goto dongle_rom_out; + } +dongle_rom_out: + return err; +} + +s32 dhd_config_dongle(struct bcm_cfg80211 *cfg) +{ +#ifndef DHD_SDALIGN +#define DHD_SDALIGN 32 +#endif // endif + struct net_device *ndev; + s32 err = 0; + + WL_TRACE(("In\n")); + if (dhd_dongle_up) { + WL_ERR(("Dongle is already up\n")); + return err; + } + + ndev = bcmcfg_to_prmry_ndev(cfg); + + err = wl_dongle_up(ndev); + if (unlikely(err)) { + WL_ERR(("wl_dongle_up failed\n")); + goto default_conf_out; + } + dhd_dongle_up = true; + +default_conf_out: + + return err; + +} + +int dhd_cfgvendor_priv_string_handler(struct bcm_cfg80211 *cfg, struct wireless_dev *wdev, + const struct bcm_nlmsg_hdr *nlioc, void *buf) +{ + struct net_device *ndev = NULL; + dhd_pub_t *dhd; + dhd_ioctl_t ioc = { 0, NULL, 0, 0, 0, 0, 0}; + int ret = 0; + int8 index; + + WL_TRACE(("entry: cmd = %d\n", nlioc->cmd)); + + dhd = cfg->pub; + DHD_OS_WAKE_LOCK(dhd); + + ndev = wdev_to_wlc_ndev(wdev, cfg); + index = dhd_net2idx(dhd->info, ndev); + if (index == DHD_BAD_IF) { + WL_ERR(("Bad ifidx from wdev:%p\n", wdev)); + ret = BCME_ERROR; + goto done; + } + + ioc.cmd = nlioc->cmd; + ioc.len = nlioc->len; + ioc.set = nlioc->set; + ioc.driver = nlioc->magic; + ioc.buf = buf; + ret = dhd_ioctl_process(dhd, index, &ioc, buf); + if (ret) { + WL_TRACE(("dhd_ioctl_process return err %d\n", ret)); + ret = OSL_ERROR(ret); + goto done; + } + +done: + DHD_OS_WAKE_UNLOCK(dhd); + return ret; +} diff --git a/bcmdhd.100.10.315.x/dhd_cfg80211.h b/bcmdhd.100.10.315.x/dhd_cfg80211.h new file mode 100644 index 0000000..f533b59 --- /dev/null +++ b/bcmdhd.100.10.315.x/dhd_cfg80211.h @@ -0,0 +1,54 @@ +/* + * Linux cfg80211 driver - Dongle Host Driver (DHD) related + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_cfg80211.h 763539 2018-05-19 06:39:21Z $ + */ + +#ifndef __DHD_CFG80211__ +#define __DHD_CFG80211__ + +#include +#include +#include + +#ifndef WL_ERR +#define WL_ERR CFG80211_ERR +#endif // endif +#ifndef WL_TRACE +#define WL_TRACE CFG80211_TRACE +#endif // endif + +s32 dhd_cfg80211_init(struct bcm_cfg80211 *cfg); +s32 dhd_cfg80211_deinit(struct bcm_cfg80211 *cfg); +s32 dhd_cfg80211_down(struct bcm_cfg80211 *cfg); +s32 dhd_cfg80211_set_p2p_info(struct bcm_cfg80211 *cfg, int val); +s32 dhd_cfg80211_clean_p2p_info(struct bcm_cfg80211 *cfg); +s32 dhd_config_dongle(struct bcm_cfg80211 *cfg); +int dhd_cfgvendor_priv_string_handler(struct bcm_cfg80211 *cfg, + struct wireless_dev *wdev, const struct bcm_nlmsg_hdr *nlioc, void *data); + +s32 wl_dongle_roam(struct net_device *ndev, u32 roamvar, u32 bcn_timeout); +#endif /* __DHD_CFG80211__ */ diff --git a/bcmdhd.100.10.315.x/dhd_common.c b/bcmdhd.100.10.315.x/dhd_common.c new file mode 100644 index 0000000..f106d78 --- /dev/null +++ b/bcmdhd.100.10.315.x/dhd_common.c @@ -0,0 +1,6732 @@ +/* + * Broadcom Dongle Host Driver (DHD), common DHD core. + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_common.c 771671 2018-07-11 06:58:25Z $ + */ +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include + +#ifdef PCIE_FULL_DONGLE +#include +#endif /* PCIE_FULL_DONGLE */ + +#ifdef SHOW_LOGTRACE +#include +#endif /* SHOW_LOGTRACE */ + +#ifdef BCMPCIE +#include +#endif // endif + +#include +#include +#include +#include +#include +#include <802.1d.h> +#include +#include +#include +#include + +#ifdef WL_CFG80211 +#include +#endif // endif +#ifdef PNO_SUPPORT +#include +#endif // endif +#ifdef RTT_SUPPORT +#include +#endif // endif + +#ifdef DNGL_EVENT_SUPPORT +#include +#endif // endif + +#define htod32(i) (i) +#define htod16(i) (i) +#define dtoh32(i) (i) +#define dtoh16(i) (i) +#define htodchanspec(i) (i) +#define dtohchanspec(i) (i) + +#ifdef PROP_TXSTATUS +#include +#include +#endif // endif + +#ifdef DHD_L2_FILTER +#include +#endif /* DHD_L2_FILTER */ + +#ifdef DHD_PSTA +#include +#endif /* DHD_PSTA */ + +#ifdef DHD_WET +#include +#endif /* DHD_WET */ + +#ifdef DHD_LOG_DUMP +#include +#endif /* DHD_LOG_DUMP */ + +int dhd_msg_level = DHD_ERROR_VAL | DHD_FWLOG_VAL; + +#if defined(WL_WIRELESS_EXT) +#include +#endif // endif + +#ifdef DHD_ULP +#include +#endif /* DHD_ULP */ + +#ifdef DHD_DEBUG +#include +#endif /* DHD_DEBUG */ + +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM +#include +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + +#ifdef SOFTAP +char fw_path2[MOD_PARAM_PATHLEN]; +extern bool softap_enabled; +#endif // endif + +#ifdef SHOW_LOGTRACE +#define BYTES_AHEAD_NUM 10 /* address in map file is before these many bytes */ +#define READ_NUM_BYTES 1000 /* read map file each time this No. of bytes */ +#define GO_BACK_FILE_POS_NUM_BYTES 100 /* set file pos back to cur pos */ +static char *ramstart_str = " text_start"; /* string in mapfile has addr ramstart */ +static char *rodata_start_str = " rodata_start"; /* string in mapfile has addr rodata start */ +static char *rodata_end_str = " rodata_end"; /* string in mapfile has addr rodata end */ +#define RAMSTART_BIT 0x01 +#define RDSTART_BIT 0x02 +#define RDEND_BIT 0x04 +#define ALL_MAP_VAL (RAMSTART_BIT | RDSTART_BIT | RDEND_BIT) +#endif /* SHOW_LOGTRACE */ + +#ifdef SHOW_LOGTRACE +/* the fw file path is taken from either the module parameter at + * insmod time or is defined as a constant of different values + * for different platforms + */ +extern char *st_str_file_path; +#endif /* SHOW_LOGTRACE */ + +#define DHD_TPUT_MAX_TX_PKTS_BATCH 1000 + +#ifdef EWP_EDL +typedef struct msg_hdr_edl { + uint32 infobuf_ver; + info_buf_payload_hdr_t pyld_hdr; + msgtrace_hdr_t trace_hdr; +} msg_hdr_edl_t; +#endif /* EWP_EDL */ + +/* Last connection success/failure status */ +uint32 dhd_conn_event; +uint32 dhd_conn_status; +uint32 dhd_conn_reason; + +extern int dhd_iscan_request(void * dhdp, uint16 action); +extern void dhd_ind_scan_confirm(void *h, bool status); +extern int dhd_iscan_in_progress(void *h); +void dhd_iscan_lock(void); +void dhd_iscan_unlock(void); +extern int dhd_change_mtu(dhd_pub_t *dhd, int new_mtu, int ifidx); +#if !defined(AP) && defined(WLP2P) +extern int dhd_get_concurrent_capabilites(dhd_pub_t *dhd); +#endif // endif + +extern int dhd_socram_dump(struct dhd_bus *bus); +extern void dhd_set_packet_filter(dhd_pub_t *dhd); + +#ifdef DNGL_EVENT_SUPPORT +static void dngl_host_event_process(dhd_pub_t *dhdp, bcm_dngl_event_t *event, + bcm_dngl_event_msg_t *dngl_event, size_t pktlen); +static int dngl_host_event(dhd_pub_t *dhdp, void *pktdata, bcm_dngl_event_msg_t *dngl_event, + size_t pktlen); +#endif /* DNGL_EVENT_SUPPORT */ + +#define MAX_CHUNK_LEN 1408 /* 8 * 8 * 22 */ + +bool ap_cfg_running = FALSE; +bool ap_fw_loaded = FALSE; + +/* Version string to report */ +#ifdef DHD_DEBUG +#ifndef SRCBASE +#define SRCBASE "drivers/net/wireless/bcmdhd" +#endif // endif +#define DHD_COMPILED "\nCompiled in " SRCBASE +#endif /* DHD_DEBUG */ + +#define CHIPID_MISMATCH 8 + +#if defined(DHD_DEBUG) +const char dhd_version[] = "Dongle Host Driver, version " EPI_VERSION_STR; +#else +const char dhd_version[] = "\nDongle Host Driver, version " EPI_VERSION_STR; +#endif // endif +char fw_version[FW_VER_STR_LEN] = "\0"; +char clm_version[CLM_VER_STR_LEN] = "\0"; + +char bus_api_revision[BUS_API_REV_STR_LEN] = "\0"; + +void dhd_set_timer(void *bus, uint wdtick); + +static char* ioctl2str(uint32 ioctl); + +/* IOVar table */ +enum { + IOV_VERSION = 1, + IOV_WLMSGLEVEL, + IOV_MSGLEVEL, + IOV_BCMERRORSTR, + IOV_BCMERROR, + IOV_WDTICK, + IOV_DUMP, + IOV_CLEARCOUNTS, + IOV_LOGDUMP, + IOV_LOGCAL, + IOV_LOGSTAMP, + IOV_GPIOOB, + IOV_IOCTLTIMEOUT, + IOV_CONS, + IOV_DCONSOLE_POLL, +#if defined(DHD_DEBUG) + IOV_DHD_JOIN_TIMEOUT_DBG, + IOV_SCAN_TIMEOUT, + IOV_MEM_DEBUG, +#ifdef BCMPCIE + IOV_FLOW_RING_DEBUG, +#endif /* BCMPCIE */ +#endif /* defined(DHD_DEBUG) */ +#ifdef PROP_TXSTATUS + IOV_PROPTXSTATUS_ENABLE, + IOV_PROPTXSTATUS_MODE, + IOV_PROPTXSTATUS_OPT, + IOV_PROPTXSTATUS_MODULE_IGNORE, + IOV_PROPTXSTATUS_CREDIT_IGNORE, + IOV_PROPTXSTATUS_TXSTATUS_IGNORE, + IOV_PROPTXSTATUS_RXPKT_CHK, +#endif /* PROP_TXSTATUS */ + IOV_BUS_TYPE, + IOV_CHANGEMTU, + IOV_HOSTREORDER_FLOWS, +#ifdef DHDTCPACK_SUPPRESS + IOV_TCPACK_SUPPRESS, +#endif /* DHDTCPACK_SUPPRESS */ + IOV_AP_ISOLATE, +#ifdef DHD_L2_FILTER + IOV_DHCP_UNICAST, + IOV_BLOCK_PING, + IOV_PROXY_ARP, + IOV_GRAT_ARP, + IOV_BLOCK_TDLS, +#endif /* DHD_L2_FILTER */ + IOV_DHD_IE, +#ifdef DHD_PSTA + IOV_PSTA, +#endif /* DHD_PSTA */ +#ifdef DHD_WET + IOV_WET, + IOV_WET_HOST_IPV4, + IOV_WET_HOST_MAC, +#endif /* DHD_WET */ + IOV_CFG80211_OPMODE, + IOV_ASSERT_TYPE, + IOV_LMTEST, +#ifdef DHD_MCAST_REGEN + IOV_MCAST_REGEN_BSS_ENABLE, +#endif // endif +#ifdef SHOW_LOGTRACE + IOV_DUMP_TRACE_LOG, +#endif /* SHOW_LOGTRACE */ + IOV_DONGLE_TRAP_TYPE, + IOV_DONGLE_TRAP_INFO, + IOV_BPADDR, + IOV_DUMP_DONGLE, /**< dumps core registers and d11 memories */ +#if defined(DHD_LOG_DUMP) + IOV_LOG_DUMP, +#endif /* DHD_LOG_DUMP */ + IOV_TPUT_TEST, + IOV_FIS_TRIGGER, + IOV_DEBUG_BUF_DEST_STAT, + IOV_LAST +}; + +const bcm_iovar_t dhd_iovars[] = { + /* name varid flags flags2 type minlen */ + {"version", IOV_VERSION, 0, 0, IOVT_BUFFER, sizeof(dhd_version)}, + {"wlmsglevel", IOV_WLMSGLEVEL, 0, 0, IOVT_UINT32, 0 }, +#ifdef DHD_DEBUG + {"msglevel", IOV_MSGLEVEL, 0, 0, IOVT_UINT32, 0}, + {"mem_debug", IOV_MEM_DEBUG, 0, 0, IOVT_BUFFER, 0 }, +#ifdef BCMPCIE + {"flow_ring_debug", IOV_FLOW_RING_DEBUG, 0, 0, IOVT_BUFFER, 0 }, +#endif /* BCMPCIE */ +#endif /* DHD_DEBUG */ + {"bcmerrorstr", IOV_BCMERRORSTR, 0, 0, IOVT_BUFFER, BCME_STRLEN}, + {"bcmerror", IOV_BCMERROR, 0, 0, IOVT_INT8, 0}, + {"wdtick", IOV_WDTICK, 0, 0, IOVT_UINT32, 0}, + {"dump", IOV_DUMP, 0, 0, IOVT_BUFFER, DHD_IOCTL_MAXLEN}, + {"cons", IOV_CONS, 0, 0, IOVT_BUFFER, 0}, + {"dconpoll", IOV_DCONSOLE_POLL, 0, 0, IOVT_UINT32, 0}, + {"clearcounts", IOV_CLEARCOUNTS, 0, 0, IOVT_VOID, 0}, + {"gpioob", IOV_GPIOOB, 0, 0, IOVT_UINT32, 0}, + {"ioctl_timeout", IOV_IOCTLTIMEOUT, 0, 0, IOVT_UINT32, 0}, +#ifdef PROP_TXSTATUS + {"proptx", IOV_PROPTXSTATUS_ENABLE, 0, 0, IOVT_BOOL, 0 }, + /* + set the proptxtstatus operation mode: + 0 - Do not do any proptxtstatus flow control + 1 - Use implied credit from a packet status + 2 - Use explicit credit + */ + {"ptxmode", IOV_PROPTXSTATUS_MODE, 0, 0, IOVT_UINT32, 0 }, + {"proptx_opt", IOV_PROPTXSTATUS_OPT, 0, 0, IOVT_UINT32, 0 }, + {"pmodule_ignore", IOV_PROPTXSTATUS_MODULE_IGNORE, 0, 0, IOVT_BOOL, 0 }, + {"pcredit_ignore", IOV_PROPTXSTATUS_CREDIT_IGNORE, 0, 0, IOVT_BOOL, 0 }, + {"ptxstatus_ignore", IOV_PROPTXSTATUS_TXSTATUS_IGNORE, 0, 0, IOVT_BOOL, 0 }, + {"rxpkt_chk", IOV_PROPTXSTATUS_RXPKT_CHK, 0, 0, IOVT_BOOL, 0 }, +#endif /* PROP_TXSTATUS */ + {"bustype", IOV_BUS_TYPE, 0, 0, IOVT_UINT32, 0}, + {"changemtu", IOV_CHANGEMTU, 0, 0, IOVT_UINT32, 0 }, + {"host_reorder_flows", IOV_HOSTREORDER_FLOWS, 0, 0, IOVT_BUFFER, + (WLHOST_REORDERDATA_MAXFLOWS + 1) }, +#ifdef DHDTCPACK_SUPPRESS + {"tcpack_suppress", IOV_TCPACK_SUPPRESS, 0, 0, IOVT_UINT8, 0 }, +#endif /* DHDTCPACK_SUPPRESS */ +#ifdef DHD_L2_FILTER + {"dhcp_unicast", IOV_DHCP_UNICAST, (0), 0, IOVT_BOOL, 0 }, +#endif /* DHD_L2_FILTER */ + {"ap_isolate", IOV_AP_ISOLATE, (0), 0, IOVT_BOOL, 0}, +#ifdef DHD_L2_FILTER + {"block_ping", IOV_BLOCK_PING, (0), 0, IOVT_BOOL, 0}, + {"proxy_arp", IOV_PROXY_ARP, (0), 0, IOVT_BOOL, 0}, + {"grat_arp", IOV_GRAT_ARP, (0), 0, IOVT_BOOL, 0}, + {"block_tdls", IOV_BLOCK_TDLS, (0), IOVT_BOOL, 0}, +#endif /* DHD_L2_FILTER */ + {"dhd_ie", IOV_DHD_IE, (0), 0, IOVT_BUFFER, 0}, +#ifdef DHD_PSTA + /* PSTA/PSR Mode configuration. 0: DIABLED 1: PSTA 2: PSR */ + {"psta", IOV_PSTA, 0, 0, IOVT_UINT32, 0}, +#endif /* DHD PSTA */ +#ifdef DHD_WET + /* WET Mode configuration. 0: DIABLED 1: WET */ + {"wet", IOV_WET, 0, 0, IOVT_UINT32, 0}, + {"wet_host_ipv4", IOV_WET_HOST_IPV4, 0, 0, IOVT_UINT32, 0}, + {"wet_host_mac", IOV_WET_HOST_MAC, 0, 0, IOVT_BUFFER, 0}, +#endif /* DHD WET */ + {"op_mode", IOV_CFG80211_OPMODE, 0, 0, IOVT_UINT32, 0 }, + {"assert_type", IOV_ASSERT_TYPE, (0), 0, IOVT_UINT32, 0}, + {"lmtest", IOV_LMTEST, 0, 0, IOVT_UINT32, 0 }, +#ifdef DHD_MCAST_REGEN + {"mcast_regen_bss_enable", IOV_MCAST_REGEN_BSS_ENABLE, 0, 0, IOVT_BOOL, 0}, +#endif // endif +#ifdef SHOW_LOGTRACE + {"dump_trace_buf", IOV_DUMP_TRACE_LOG, 0, 0, IOVT_BUFFER, sizeof(trace_buf_info_t) }, +#endif /* SHOW_LOGTRACE */ + {"trap_type", IOV_DONGLE_TRAP_TYPE, 0, 0, IOVT_UINT32, 0 }, + {"trap_info", IOV_DONGLE_TRAP_INFO, 0, 0, IOVT_BUFFER, sizeof(trap_t) }, +#ifdef DHD_DEBUG + {"bpaddr", IOV_BPADDR, 0, 0, IOVT_BUFFER, sizeof(sdreg_t) }, +#endif /* DHD_DEBUG */ + {"dump_dongle", IOV_DUMP_DONGLE, 0, 0, IOVT_BUFFER, + MAX(sizeof(dump_dongle_in_t), sizeof(dump_dongle_out_t)) }, +#if defined(DHD_LOG_DUMP) + {"log_dump", IOV_LOG_DUMP, 0, 0, IOVT_UINT8, 0}, +#endif /* DHD_LOG_DUMP */ + {"debug_buf_dest_stat", IOV_DEBUG_BUF_DEST_STAT, 0, 0, IOVT_UINT32, 0 }, + {NULL, 0, 0, 0, 0, 0 } +}; + +#define DHD_IOVAR_BUF_SIZE 128 + +bool +dhd_query_bus_erros(dhd_pub_t *dhdp) +{ + bool ret = FALSE; + + if (dhdp->dongle_reset) { + DHD_ERROR(("%s: Dongle Reset occurred, cannot proceed\n", + __FUNCTION__)); + ret = TRUE; + } + + if (dhdp->dongle_trap_occured) { + DHD_ERROR(("%s: FW TRAP has occurred, cannot proceed\n", + __FUNCTION__)); + ret = TRUE; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + dhdp->hang_reason = HANG_REASON_DONGLE_TRAP; + dhd_os_send_hang_message(dhdp); +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */ + } + + if (dhdp->iovar_timeout_occured) { + DHD_ERROR(("%s: Resumed on timeout for previous IOVAR, cannot proceed\n", + __FUNCTION__)); + ret = TRUE; + } + +#ifdef PCIE_FULL_DONGLE + if (dhdp->d3ack_timeout_occured) { + DHD_ERROR(("%s: Resumed on timeout for previous D3ACK, cannot proceed\n", + __FUNCTION__)); + ret = TRUE; + } + if (dhdp->livelock_occured) { + DHD_ERROR(("%s: LIVELOCK occurred for previous msg, cannot proceed\n", + __FUNCTION__)); + ret = TRUE; + } +#endif /* PCIE_FULL_DONGLE */ + + return ret; +} + +#ifdef DHD_SSSR_DUMP + +/* This can be overwritten by module parameter defined in dhd_linux.c */ +uint support_sssr_dump = TRUE; + +int +dhd_sssr_mempool_init(dhd_pub_t *dhd) +{ + dhd->sssr_mempool = (uint8 *) MALLOCZ(dhd->osh, DHD_SSSR_MEMPOOL_SIZE); + if (dhd->sssr_mempool == NULL) { + DHD_ERROR(("%s: MALLOC of sssr_mempool failed\n", + __FUNCTION__)); + return BCME_ERROR; + } + return BCME_OK; +} + +void +dhd_sssr_mempool_deinit(dhd_pub_t *dhd) +{ + if (dhd->sssr_mempool) { + MFREE(dhd->osh, dhd->sssr_mempool, DHD_SSSR_MEMPOOL_SIZE); + dhd->sssr_mempool = NULL; + } +} + +void +dhd_dump_sssr_reg_info(sssr_reg_info_v1_t *sssr_reg_info) +{ +} + +int +dhd_get_sssr_reg_info(dhd_pub_t *dhd) +{ + int ret; + /* get sssr_reg_info from firmware */ + memset((void *)&dhd->sssr_reg_info, 0, sizeof(dhd->sssr_reg_info)); + ret = dhd_iovar(dhd, 0, "sssr_reg_info", NULL, 0, (char *)&dhd->sssr_reg_info, + sizeof(dhd->sssr_reg_info), FALSE); + if (ret < 0) { + DHD_ERROR(("%s: sssr_reg_info failed (error=%d)\n", + __FUNCTION__, ret)); + return BCME_ERROR; + } + + dhd_dump_sssr_reg_info(&dhd->sssr_reg_info); + return BCME_OK; +} + +uint32 +dhd_get_sssr_bufsize(dhd_pub_t *dhd) +{ + int i; + uint32 sssr_bufsize = 0; + /* Init all pointers to NULL */ + for (i = 0; i < MAX_NUM_D11CORES; i++) { + sssr_bufsize += dhd->sssr_reg_info.mac_regs[i].sr_size; + } + sssr_bufsize += dhd->sssr_reg_info.vasip_regs.vasip_sr_size; + + /* Double the size as different dumps will be saved before and after SR */ + sssr_bufsize = 2 * sssr_bufsize; + + return sssr_bufsize; +} + +int +dhd_sssr_dump_init(dhd_pub_t *dhd) +{ + int i; + uint32 sssr_bufsize; + uint32 mempool_used = 0; + + dhd->sssr_inited = FALSE; + + if (!support_sssr_dump) { + DHD_ERROR(("%s: sssr dump not inited as instructed by mod param\n", __FUNCTION__)); + return BCME_OK; + } + + /* check if sssr mempool is allocated */ + if (dhd->sssr_mempool == NULL) { + DHD_ERROR(("%s: sssr_mempool is not allocated\n", + __FUNCTION__)); + return BCME_ERROR; + } + + /* Get SSSR reg info */ + if (dhd_get_sssr_reg_info(dhd) != BCME_OK) { + DHD_ERROR(("%s: dhd_get_sssr_reg_info failed\n", __FUNCTION__)); + return BCME_ERROR; + } + + /* Validate structure version */ + if (dhd->sssr_reg_info.version > SSSR_REG_INFO_VER_1) { + DHD_ERROR(("%s: dhd->sssr_reg_info.version (%d : %d) mismatch\n", + __FUNCTION__, (int)dhd->sssr_reg_info.version, SSSR_REG_INFO_VER)); + return BCME_ERROR; + } + + /* Validate structure length */ + if (dhd->sssr_reg_info.length < sizeof(sssr_reg_info_v0_t)) { + DHD_ERROR(("%s: dhd->sssr_reg_info.length (%d : %d) mismatch\n", + __FUNCTION__, (int)dhd->sssr_reg_info.length, + (int)sizeof(dhd->sssr_reg_info))); + return BCME_ERROR; + } + + /* validate fifo size */ + sssr_bufsize = dhd_get_sssr_bufsize(dhd); + if (sssr_bufsize > DHD_SSSR_MEMPOOL_SIZE) { + DHD_ERROR(("%s: sssr_bufsize(%d) is greater than sssr_mempool(%d)\n", + __FUNCTION__, (int)sssr_bufsize, DHD_SSSR_MEMPOOL_SIZE)); + return BCME_ERROR; + } + + /* init all pointers to NULL */ + for (i = 0; i < MAX_NUM_D11CORES; i++) { + dhd->sssr_d11_before[i] = NULL; + dhd->sssr_d11_after[i] = NULL; + } + dhd->sssr_dig_buf_before = NULL; + dhd->sssr_dig_buf_after = NULL; + + /* Allocate memory */ + for (i = 0; i < MAX_NUM_D11CORES; i++) { + if (dhd->sssr_reg_info.mac_regs[i].sr_size) { + dhd->sssr_d11_before[i] = (uint32 *)(dhd->sssr_mempool + mempool_used); + mempool_used += dhd->sssr_reg_info.mac_regs[i].sr_size; + + dhd->sssr_d11_after[i] = (uint32 *)(dhd->sssr_mempool + mempool_used); + mempool_used += dhd->sssr_reg_info.mac_regs[i].sr_size; + } + } + + if (dhd->sssr_reg_info.vasip_regs.vasip_sr_size) { + dhd->sssr_dig_buf_before = (uint32 *)(dhd->sssr_mempool + mempool_used); + mempool_used += dhd->sssr_reg_info.vasip_regs.vasip_sr_size; + + dhd->sssr_dig_buf_after = (uint32 *)(dhd->sssr_mempool + mempool_used); + mempool_used += dhd->sssr_reg_info.vasip_regs.vasip_sr_size; + } else if ((dhd->sssr_reg_info.length > OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) && + dhd->sssr_reg_info.dig_mem_info.dig_sr_addr) { + dhd->sssr_dig_buf_before = (uint32 *)(dhd->sssr_mempool + mempool_used); + mempool_used += dhd->sssr_reg_info.dig_mem_info.dig_sr_size; + + dhd->sssr_dig_buf_after = (uint32 *)(dhd->sssr_mempool + mempool_used); + mempool_used += dhd->sssr_reg_info.dig_mem_info.dig_sr_size; + } + + dhd->sssr_inited = TRUE; + + return BCME_OK; + +} + +void +dhd_sssr_dump_deinit(dhd_pub_t *dhd) +{ + int i; + + dhd->sssr_inited = FALSE; + /* init all pointers to NULL */ + for (i = 0; i < MAX_NUM_D11CORES; i++) { + dhd->sssr_d11_before[i] = NULL; + dhd->sssr_d11_after[i] = NULL; + } + dhd->sssr_dig_buf_before = NULL; + dhd->sssr_dig_buf_after = NULL; + + return; +} + +#endif /* DHD_SSSR_DUMP */ + +#ifdef DHD_FW_COREDUMP +void* dhd_get_fwdump_buf(dhd_pub_t *dhd_pub, uint32 length) +{ + if (!dhd_pub->soc_ram) { +#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP) + dhd_pub->soc_ram = (uint8*)DHD_OS_PREALLOC(dhd_pub, + DHD_PREALLOC_MEMDUMP_RAM, length); +#else + dhd_pub->soc_ram = (uint8*) MALLOC(dhd_pub->osh, length); +#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */ + } + + if (dhd_pub->soc_ram == NULL) { + DHD_ERROR(("%s: Failed to allocate memory for fw crash snap shot.\n", + __FUNCTION__)); + dhd_pub->soc_ram_length = 0; + } else { + memset(dhd_pub->soc_ram, 0, length); + dhd_pub->soc_ram_length = length; + } + + /* soc_ram free handled in dhd_{free,clear} */ + return dhd_pub->soc_ram; +} +#endif /* DHD_FW_COREDUMP */ + +/* to NDIS developer, the structure dhd_common is redundant, + * please do NOT merge it back from other branches !!! + */ + +int +dhd_common_socram_dump(dhd_pub_t *dhdp) +{ +#ifdef BCMDBUS + return 0; +#else + return dhd_socram_dump(dhdp->bus); +#endif /* BCMDBUS */ +} + +int +dhd_dump(dhd_pub_t *dhdp, char *buf, int buflen) +{ + struct bcmstrbuf b; + struct bcmstrbuf *strbuf = &b; + + if (!dhdp || !dhdp->prot || !buf) { + return BCME_ERROR; + } + + bcm_binit(strbuf, buf, buflen); + + /* Base DHD info */ + bcm_bprintf(strbuf, "%s\n", dhd_version); + bcm_bprintf(strbuf, "\n"); + bcm_bprintf(strbuf, "pub.up %d pub.txoff %d pub.busstate %d\n", + dhdp->up, dhdp->txoff, dhdp->busstate); + bcm_bprintf(strbuf, "pub.hdrlen %u pub.maxctl %u pub.rxsz %u\n", + dhdp->hdrlen, dhdp->maxctl, dhdp->rxsz); + bcm_bprintf(strbuf, "pub.iswl %d pub.drv_version %ld pub.mac "MACDBG"\n", + dhdp->iswl, dhdp->drv_version, MAC2STRDBG(&dhdp->mac)); + bcm_bprintf(strbuf, "pub.bcmerror %d tickcnt %u\n", dhdp->bcmerror, dhdp->tickcnt); + + bcm_bprintf(strbuf, "dongle stats:\n"); + bcm_bprintf(strbuf, "tx_packets %lu tx_bytes %lu tx_errors %lu tx_dropped %lu\n", + dhdp->dstats.tx_packets, dhdp->dstats.tx_bytes, + dhdp->dstats.tx_errors, dhdp->dstats.tx_dropped); + bcm_bprintf(strbuf, "rx_packets %lu rx_bytes %lu rx_errors %lu rx_dropped %lu\n", + dhdp->dstats.rx_packets, dhdp->dstats.rx_bytes, + dhdp->dstats.rx_errors, dhdp->dstats.rx_dropped); + bcm_bprintf(strbuf, "multicast %lu\n", dhdp->dstats.multicast); + + bcm_bprintf(strbuf, "bus stats:\n"); + bcm_bprintf(strbuf, "tx_packets %lu tx_dropped %lu tx_multicast %lu tx_errors %lu\n", + dhdp->tx_packets, dhdp->tx_dropped, dhdp->tx_multicast, dhdp->tx_errors); + bcm_bprintf(strbuf, "tx_ctlpkts %lu tx_ctlerrs %lu\n", + dhdp->tx_ctlpkts, dhdp->tx_ctlerrs); + bcm_bprintf(strbuf, "rx_packets %lu rx_multicast %lu rx_errors %lu \n", + dhdp->rx_packets, dhdp->rx_multicast, dhdp->rx_errors); + bcm_bprintf(strbuf, "rx_ctlpkts %lu rx_ctlerrs %lu rx_dropped %lu\n", + dhdp->rx_ctlpkts, dhdp->rx_ctlerrs, dhdp->rx_dropped); + bcm_bprintf(strbuf, "rx_readahead_cnt %lu tx_realloc %lu\n", + dhdp->rx_readahead_cnt, dhdp->tx_realloc); + bcm_bprintf(strbuf, "tx_pktgetfail %lu rx_pktgetfail %lu\n", + dhdp->tx_pktgetfail, dhdp->rx_pktgetfail); + bcm_bprintf(strbuf, "tx_big_packets %lu\n", + dhdp->tx_big_packets); + bcm_bprintf(strbuf, "\n"); +#ifdef DMAMAP_STATS + /* Add DMA MAP info */ + bcm_bprintf(strbuf, "DMA MAP stats: \n"); + bcm_bprintf(strbuf, "txdata: %lu size: %luK, rxdata: %lu size: %luK\n", + dhdp->dma_stats.txdata, KB(dhdp->dma_stats.txdata_sz), + dhdp->dma_stats.rxdata, KB(dhdp->dma_stats.rxdata_sz)); +#ifndef IOCTLRESP_USE_CONSTMEM + bcm_bprintf(strbuf, "IOCTL RX: %lu size: %luK ,", + dhdp->dma_stats.ioctl_rx, KB(dhdp->dma_stats.ioctl_rx_sz)); +#endif /* !IOCTLRESP_USE_CONSTMEM */ + bcm_bprintf(strbuf, "EVENT RX: %lu size: %luK, INFO RX: %lu size: %luK, " + "TSBUF RX: %lu size %luK\n", + dhdp->dma_stats.event_rx, KB(dhdp->dma_stats.event_rx_sz), + dhdp->dma_stats.info_rx, KB(dhdp->dma_stats.info_rx_sz), + dhdp->dma_stats.tsbuf_rx, KB(dhdp->dma_stats.tsbuf_rx_sz)); + bcm_bprintf(strbuf, "Total : %luK \n", + KB(dhdp->dma_stats.txdata_sz + dhdp->dma_stats.rxdata_sz + + dhdp->dma_stats.ioctl_rx_sz + dhdp->dma_stats.event_rx_sz + + dhdp->dma_stats.tsbuf_rx_sz)); +#endif /* DMAMAP_STATS */ + + /* Add any prot info */ + dhd_prot_dump(dhdp, strbuf); + bcm_bprintf(strbuf, "\n"); + + /* Add any bus info */ + dhd_bus_dump(dhdp, strbuf); + +#if defined(DHD_LB_STATS) + dhd_lb_stats_dump(dhdp, strbuf); +#endif /* DHD_LB_STATS */ +#ifdef DHD_WET + if (dhd_get_wet_mode(dhdp)) { + bcm_bprintf(strbuf, "Wet Dump:\n"); + dhd_wet_dump(dhdp, strbuf); + } +#endif /* DHD_WET */ + + /* return remaining buffer length */ + return (!strbuf->size ? BCME_BUFTOOSHORT : strbuf->size); +} + +void +dhd_dump_to_kernelog(dhd_pub_t *dhdp) +{ + char buf[512]; + + DHD_ERROR(("F/W version: %s\n", fw_version)); + bcm_bprintf_bypass = TRUE; + dhd_dump(dhdp, buf, sizeof(buf)); + bcm_bprintf_bypass = FALSE; +} + +int +dhd_wl_ioctl_cmd(dhd_pub_t *dhd_pub, int cmd, void *arg, int len, uint8 set, int ifidx) +{ + wl_ioctl_t ioc; + + ioc.cmd = cmd; + ioc.buf = arg; + ioc.len = len; + ioc.set = set; + + return dhd_wl_ioctl(dhd_pub, ifidx, &ioc, arg, len); +} + +int +dhd_wl_ioctl_get_intiovar(dhd_pub_t *dhd_pub, char *name, uint *pval, + int cmd, uint8 set, int ifidx) +{ + char iovbuf[WLC_IOCTL_SMLEN]; + int ret = -1; + + memset(iovbuf, 0, sizeof(iovbuf)); + if (bcm_mkiovar(name, NULL, 0, iovbuf, sizeof(iovbuf))) { + ret = dhd_wl_ioctl_cmd(dhd_pub, cmd, iovbuf, sizeof(iovbuf), set, ifidx); + if (!ret) { + *pval = ltoh32(*((uint*)iovbuf)); + } else { + DHD_ERROR(("%s: get int iovar %s failed, ERR %d\n", + __FUNCTION__, name, ret)); + } + } else { + DHD_ERROR(("%s: mkiovar %s failed\n", + __FUNCTION__, name)); + } + + return ret; +} + +int +dhd_wl_ioctl_set_intiovar(dhd_pub_t *dhd_pub, char *name, uint val, + int cmd, uint8 set, int ifidx) +{ + char iovbuf[WLC_IOCTL_SMLEN]; + int ret = -1; + int lval = htol32(val); + uint len; + + len = bcm_mkiovar(name, (char*)&lval, sizeof(lval), iovbuf, sizeof(iovbuf)); + + if (len) { + ret = dhd_wl_ioctl_cmd(dhd_pub, cmd, iovbuf, len, set, ifidx); + if (ret) { + DHD_ERROR(("%s: set int iovar %s failed, ERR %d\n", + __FUNCTION__, name, ret)); + } + } else { + DHD_ERROR(("%s: mkiovar %s failed\n", + __FUNCTION__, name)); + } + + return ret; +} + +static struct ioctl2str_s { + uint32 ioctl; + char *name; +} ioctl2str_array[] = { + {WLC_UP, "UP"}, + {WLC_DOWN, "DOWN"}, + {WLC_SET_PROMISC, "SET_PROMISC"}, + {WLC_SET_INFRA, "SET_INFRA"}, + {WLC_SET_AUTH, "SET_AUTH"}, + {WLC_SET_SSID, "SET_SSID"}, + {WLC_RESTART, "RESTART"}, + {WLC_SET_CHANNEL, "SET_CHANNEL"}, + {WLC_SET_RATE_PARAMS, "SET_RATE_PARAMS"}, + {WLC_SET_KEY, "SET_KEY"}, + {WLC_SCAN, "SCAN"}, + {WLC_DISASSOC, "DISASSOC"}, + {WLC_REASSOC, "REASSOC"}, + {WLC_SET_COUNTRY, "SET_COUNTRY"}, + {WLC_SET_WAKE, "SET_WAKE"}, + {WLC_SET_SCANSUPPRESS, "SET_SCANSUPPRESS"}, + {WLC_SCB_DEAUTHORIZE, "SCB_DEAUTHORIZE"}, + {WLC_SET_WSEC, "SET_WSEC"}, + {WLC_SET_INTERFERENCE_MODE, "SET_INTERFERENCE_MODE"}, + {WLC_SET_RADAR, "SET_RADAR"}, + {0, NULL} +}; + +static char * +ioctl2str(uint32 ioctl) +{ + struct ioctl2str_s *p = ioctl2str_array; + + while (p->name != NULL) { + if (p->ioctl == ioctl) { + return p->name; + } + p++; + } + + return ""; +} + +/** + * @param ioc IO control struct, members are partially used by this function. + * @param buf [inout] Contains parameters to send to dongle, contains dongle response on return. + * @param len Maximum number of bytes that dongle is allowed to write into 'buf'. + */ +int +dhd_wl_ioctl(dhd_pub_t *dhd_pub, int ifidx, wl_ioctl_t *ioc, void *buf, int len) +{ + int ret = BCME_ERROR; + unsigned long flags; +#ifdef DUMP_IOCTL_IOV_LIST + dhd_iov_li_t *iov_li; +#endif /* DUMP_IOCTL_IOV_LIST */ + +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM + DHD_OS_WAKE_LOCK(dhd_pub); + if (pm_runtime_get_sync(dhd_bus_to_dev(dhd_pub->bus)) < 0) { + DHD_RPM(("%s: pm_runtime_get_sync error. \n", __FUNCTION__)); + DHD_OS_WAKE_UNLOCK(dhd_pub); + return BCME_ERROR; + } +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + +#ifdef KEEPIF_ON_DEVICE_RESET + if (ioc->cmd == WLC_GET_VAR) { + dbus_config_t config; + config.general_param = 0; + if (buf) { + if (!strcmp(buf, "wowl_activate")) { + /* 1 (TRUE) after decreased by 1 */ + config.general_param = 2; + } else if (!strcmp(buf, "wowl_clear")) { + /* 0 (FALSE) after decreased by 1 */ + config.general_param = 1; + } + } + if (config.general_param) { + config.config_id = DBUS_CONFIG_ID_KEEPIF_ON_DEVRESET; + config.general_param--; + dbus_set_config(dhd_pub->dbus, &config); + } + } +#endif /* KEEPIF_ON_DEVICE_RESET */ + + if (dhd_os_proto_block(dhd_pub)) + { +#ifdef DHD_LOG_DUMP + int slen, val, lval, min_len; + char *msg, tmp[64]; + + /* WLC_GET_VAR */ + if (ioc->cmd == WLC_GET_VAR && buf) { + min_len = MIN(sizeof(tmp) - 1, strlen(buf)); + memset(tmp, 0, sizeof(tmp)); + bcopy(buf, tmp, min_len); + tmp[min_len] = '\0'; + } +#endif /* DHD_LOG_DUMP */ + +#ifdef DHD_DISCONNECT_TRACE + if ((WLC_DISASSOC == ioc->cmd) || (WLC_DOWN == ioc->cmd) || + (WLC_DISASSOC_MYAP == ioc->cmd)) { + DHD_ERROR(("IOCTL Disconnect WiFi: %d\n", ioc->cmd)); + } +#endif /* HW_DISCONNECT_TRACE */ + /* logging of iovars that are send to the dongle, ./dhd msglevel +iovar */ + if (ioc->set == TRUE) { + char *pars = (char *)buf; // points at user buffer + if (ioc->cmd == WLC_SET_VAR && buf) { + DHD_DNGL_IOVAR_SET(("iovar:%d: set %s", ifidx, pars)); + if (ioc->len > 1 + sizeof(uint32)) { + // skip iovar name: + pars += strnlen(pars, ioc->len - 1 - sizeof(uint32)); + pars++; // skip NULL character + } + } else { + DHD_DNGL_IOVAR_SET(("ioctl:%d: set %d %s", + ifidx, ioc->cmd, ioctl2str(ioc->cmd))); + } + if (pars != NULL) { + DHD_DNGL_IOVAR_SET((" 0x%x\n", *(uint32*)pars)); + } else { + DHD_DNGL_IOVAR_SET((" NULL\n")); + } + } + + DHD_LINUX_GENERAL_LOCK(dhd_pub, flags); + if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhd_pub)) { + DHD_INFO(("%s: returning as busstate=%d\n", + __FUNCTION__, dhd_pub->busstate)); + DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags); + dhd_os_proto_unblock(dhd_pub); + return -ENODEV; + } + DHD_BUS_BUSY_SET_IN_IOVAR(dhd_pub); + DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags); + + DHD_LINUX_GENERAL_LOCK(dhd_pub, flags); + if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhd_pub)) { + DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n", + __FUNCTION__, dhd_pub->busstate, dhd_pub->dhd_bus_busy_state)); + DHD_BUS_BUSY_CLEAR_IN_IOVAR(dhd_pub); + dhd_os_busbusy_wake(dhd_pub); + DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags); + dhd_os_proto_unblock(dhd_pub); + return -ENODEV; + } + DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags); + +#ifdef DUMP_IOCTL_IOV_LIST + if (ioc->cmd != WLC_GET_MAGIC && ioc->cmd != WLC_GET_VERSION && buf) { + if (!(iov_li = MALLOC(dhd_pub->osh, sizeof(*iov_li)))) { + DHD_ERROR(("iovar dump list item allocation Failed\n")); + } else { + iov_li->cmd = ioc->cmd; + if (buf) + bcopy((char *)buf, iov_li->buff, strlen((char *)buf)+1); + dhd_iov_li_append(dhd_pub, &dhd_pub->dump_iovlist_head, + &iov_li->list); + } + } +#endif /* DUMP_IOCTL_IOV_LIST */ + + ret = dhd_prot_ioctl(dhd_pub, ifidx, ioc, buf, len); + +#ifdef DUMP_IOCTL_IOV_LIST + if (ret == -ETIMEDOUT) { + DHD_ERROR(("Last %d issued commands: Latest one is at bottom.\n", + IOV_LIST_MAX_LEN)); + dhd_iov_li_print(&dhd_pub->dump_iovlist_head); + } +#endif /* DUMP_IOCTL_IOV_LIST */ +#ifdef DHD_LOG_DUMP + if ((ioc->cmd == WLC_GET_VAR || ioc->cmd == WLC_SET_VAR) && + buf != NULL) { + if (buf) { + lval = 0; + slen = strlen(buf) + 1; + msg = (char*)buf; + if (len >= slen + sizeof(lval)) { + if (ioc->cmd == WLC_GET_VAR) { + msg = tmp; + lval = *(int*)buf; + } else { + min_len = MIN(ioc->len - slen, sizeof(int)); + bcopy((msg + slen), &lval, min_len); + } + if (!strncmp(msg, "cur_etheraddr", + strlen("cur_etheraddr"))) { + lval = 0; + } + } + DHD_IOVAR_MEM(( + "%s: cmd: %d, msg: %s val: 0x%x," + " len: %d, set: %d, txn-id: %d\n", + ioc->cmd == WLC_GET_VAR ? + "WLC_GET_VAR" : "WLC_SET_VAR", + ioc->cmd, msg, lval, ioc->len, ioc->set, + dhd_prot_get_ioctl_trans_id(dhd_pub))); + } else { + DHD_IOVAR_MEM(("%s: cmd: %d, len: %d, set: %d, txn-id: %d\n", + ioc->cmd == WLC_GET_VAR ? "WLC_GET_VAR" : "WLC_SET_VAR", + ioc->cmd, ioc->len, ioc->set, + dhd_prot_get_ioctl_trans_id(dhd_pub))); + } + } else { + slen = ioc->len; + if (buf != NULL && slen != 0) { + if (slen >= 4) { + val = *(int*)buf; + } else if (slen >= 2) { + val = *(short*)buf; + } else { + val = *(char*)buf; + } + /* Do not dump for WLC_GET_MAGIC and WLC_GET_VERSION */ + if (ioc->cmd != WLC_GET_MAGIC && ioc->cmd != WLC_GET_VERSION) + DHD_IOVAR_MEM(("WLC_IOCTL: cmd: %d, val: %d, len: %d, " + "set: %d\n", ioc->cmd, val, ioc->len, ioc->set)); + } else { + DHD_IOVAR_MEM(("WLC_IOCTL: cmd: %d, buf is NULL\n", ioc->cmd)); + } + } +#endif /* DHD_LOG_DUMP */ + if (ret && dhd_pub->up) { + /* Send hang event only if dhd_open() was success */ + dhd_os_check_hang(dhd_pub, ifidx, ret); + } + + if (ret == -ETIMEDOUT && !dhd_pub->up) { + DHD_ERROR(("%s: 'resumed on timeout' error is " + "occurred before the interface does not" + " bring up\n", __FUNCTION__)); + dhd_pub->busstate = DHD_BUS_DOWN; + } + + DHD_LINUX_GENERAL_LOCK(dhd_pub, flags); + DHD_BUS_BUSY_CLEAR_IN_IOVAR(dhd_pub); + dhd_os_busbusy_wake(dhd_pub); + DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags); + + dhd_os_proto_unblock(dhd_pub); + + } + +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM + pm_runtime_mark_last_busy(dhd_bus_to_dev(dhd_pub->bus)); + pm_runtime_put_autosuspend(dhd_bus_to_dev(dhd_pub->bus)); + + DHD_OS_WAKE_UNLOCK(dhd_pub); +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + + return ret; +} + +uint wl_get_port_num(wl_io_pport_t *io_pport) +{ + return 0; +} + +/* Get bssidx from iovar params + * Input: dhd_pub - pointer to dhd_pub_t + * params - IOVAR params + * Output: idx - BSS index + * val - ponter to the IOVAR arguments + */ +static int +dhd_iovar_parse_bssidx(dhd_pub_t *dhd_pub, const char *params, uint32 *idx, const char **val) +{ + char *prefix = "bsscfg:"; + uint32 bssidx; + + if (!(strncmp(params, prefix, strlen(prefix)))) { + /* per bss setting should be prefixed with 'bsscfg:' */ + const char *p = params + strlen(prefix); + + /* Skip Name */ + while (*p != '\0') + p++; + /* consider null */ + p = p + 1; + bcopy(p, &bssidx, sizeof(uint32)); + /* Get corresponding dhd index */ + bssidx = dhd_bssidx2idx(dhd_pub, htod32(bssidx)); + + if (bssidx >= DHD_MAX_IFS) { + DHD_ERROR(("%s Wrong bssidx provided\n", __FUNCTION__)); + return BCME_ERROR; + } + + /* skip bss idx */ + p += sizeof(uint32); + *val = p; + *idx = bssidx; + } else { + DHD_ERROR(("%s: bad parameter for per bss iovar\n", __FUNCTION__)); + return BCME_ERROR; + } + + return BCME_OK; +} + +#if defined(DHD_DEBUG) && defined(BCMDBUS) +/* USB Device console input function */ +int dhd_bus_console_in(dhd_pub_t *dhd, uchar *msg, uint msglen) +{ + DHD_TRACE(("%s \n", __FUNCTION__)); + + return dhd_iovar(dhd, 0, "cons", msg, msglen, NULL, 0, TRUE); + +} +#endif /* DHD_DEBUG && BCMDBUS */ + +#ifdef DHD_DEBUG +int +dhd_mem_debug(dhd_pub_t *dhd, uchar *msg, uint msglen) +{ + unsigned long int_arg = 0; + char *p; + char *end_ptr = NULL; + dhd_dbg_mwli_t *mw_li; + dll_t *item, *next; + /* check if mwalloc, mwquery or mwfree was supplied arguement with space */ + p = bcmstrstr((char *)msg, " "); + if (p != NULL) { + /* space should be converted to null as separation flag for firmware */ + *p = '\0'; + /* store the argument in int_arg */ + int_arg = bcm_strtoul(p+1, &end_ptr, 10); + } + + if (!p && !strcmp(msg, "query")) { + /* lets query the list inetrnally */ + if (dll_empty(dll_head_p(&dhd->mw_list_head))) { + DHD_ERROR(("memwaste list is empty, call mwalloc < size > to allocate\n")); + } else { + for (item = dll_head_p(&dhd->mw_list_head); + !dll_end(&dhd->mw_list_head, item); item = next) { + next = dll_next_p(item); + mw_li = (dhd_dbg_mwli_t *)CONTAINEROF(item, dhd_dbg_mwli_t, list); + DHD_ERROR(("item: \n", mw_li->id, mw_li->size)); + } + } + } else if (p && end_ptr && (*end_ptr == '\0') && !strcmp(msg, "alloc")) { + int32 alloc_handle; + /* convert size into KB and append as integer */ + *((int32 *)(p+1)) = int_arg*1024; + *(p+1+sizeof(int32)) = '\0'; + + /* recalculated length -> 5 bytes for "alloc" + 4 bytes for size + + * 1 bytes for null caracter + */ + msglen = strlen(msg) + sizeof(int32) + 1; + if (dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, msg, msglen+1, FALSE, 0) < 0) { + DHD_ERROR(("IOCTL failed for memdebug alloc\n")); + } + + /* returned allocated handle from dongle, basically address of the allocated unit */ + alloc_handle = *((int32 *)msg); + + /* add a node in the list with tuple */ + if (alloc_handle == 0) { + DHD_ERROR(("Reuqested size could not be allocated\n")); + } else if (!(mw_li = MALLOC(dhd->osh, sizeof(*mw_li)))) { + DHD_ERROR(("mw list item allocation Failed\n")); + } else { + mw_li->id = dhd->mw_id++; + mw_li->handle = alloc_handle; + mw_li->size = int_arg; + /* append the node in the list */ + dll_append(&dhd->mw_list_head, &mw_li->list); + } + } else if (p && end_ptr && (*end_ptr == '\0') && !strcmp(msg, "free")) { + /* inform dongle to free wasted chunk */ + int handle = 0; + int size = 0; + for (item = dll_head_p(&dhd->mw_list_head); + !dll_end(&dhd->mw_list_head, item); item = next) { + next = dll_next_p(item); + mw_li = (dhd_dbg_mwli_t *)CONTAINEROF(item, dhd_dbg_mwli_t, list); + + if (mw_li->id == (int)int_arg) { + handle = mw_li->handle; + size = mw_li->size; + dll_delete(item); + MFREE(dhd->osh, mw_li, sizeof(*mw_li)); + if (dll_empty(dll_head_p(&dhd->mw_list_head))) { + /* reset the id */ + dhd->mw_id = 0; + } + } + } + if (handle) { + int len; + /* append the free handle and the chunk size in first 8 bytes + * after the command and null character + */ + *((int32 *)(p+1)) = handle; + *((int32 *)((p+1)+sizeof(int32))) = size; + /* append null as terminator */ + *(p+1+2*sizeof(int32)) = '\0'; + /* recalculated length -> 4 bytes for "free" + 8 bytes for hadnle and size + * + 1 bytes for null caracter + */ + len = strlen(msg) + 2*sizeof(int32) + 1; + /* send iovar to free the chunk */ + if (dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, msg, len, FALSE, 0) < 0) { + DHD_ERROR(("IOCTL failed for memdebug free\n")); + } + } else { + DHD_ERROR(("specified id does not exist\n")); + } + } else { + /* for all the wrong argument formats */ + return BCME_BADARG; + } + return 0; +} +extern void +dhd_mw_list_delete(dhd_pub_t *dhd, dll_t *list_head) +{ + dll_t *item; + dhd_dbg_mwli_t *mw_li; + while (!(dll_empty(list_head))) { + item = dll_head_p(list_head); + mw_li = (dhd_dbg_mwli_t *)CONTAINEROF(item, dhd_dbg_mwli_t, list); + dll_delete(item); + MFREE(dhd->osh, mw_li, sizeof(*mw_li)); + } +} +#ifdef BCMPCIE +int +dhd_flow_ring_debug(dhd_pub_t *dhd, char *msg, uint msglen) +{ + flow_ring_table_t *flow_ring_table; + char *cmd; + char *end_ptr = NULL; + uint8 prio; + uint16 flowid; + int i; + int ret = 0; + cmd = bcmstrstr(msg, " "); + BCM_REFERENCE(prio); + if (cmd != NULL) { + /* in order to use string operations append null */ + *cmd = '\0'; + } else { + DHD_ERROR(("missing: create/delete args\n")); + return BCME_ERROR; + } + if (cmd && !strcmp(msg, "create")) { + /* extract <"source address", "destination address", "priority"> */ + uint8 sa[ETHER_ADDR_LEN], da[ETHER_ADDR_LEN]; + BCM_REFERENCE(sa); + BCM_REFERENCE(da); + msg = msg + strlen("create") + 1; + /* fill ethernet source address */ + for (i = 0; i < ETHER_ADDR_LEN; i++) { + sa[i] = (uint8)bcm_strtoul(msg, &end_ptr, 16); + if (*end_ptr == ':') { + msg = (end_ptr + 1); + } else if (i != 5) { + DHD_ERROR(("not a valid source mac addr\n")); + return BCME_ERROR; + } + } + if (*end_ptr != ' ') { + DHD_ERROR(("missing: destiantion mac id\n")); + return BCME_ERROR; + } else { + /* skip space */ + msg = end_ptr + 1; + } + /* fill ethernet destination address */ + for (i = 0; i < ETHER_ADDR_LEN; i++) { + da[i] = (uint8)bcm_strtoul(msg, &end_ptr, 16); + if (*end_ptr == ':') { + msg = (end_ptr + 1); + } else if (i != 5) { + DHD_ERROR(("not a valid destination mac addr\n")); + return BCME_ERROR; + } + } + if (*end_ptr != ' ') { + DHD_ERROR(("missing: priority\n")); + return BCME_ERROR; + } else { + msg = end_ptr + 1; + } + /* parse priority */ + prio = (uint8)bcm_strtoul(msg, &end_ptr, 10); + if (prio > MAXPRIO) { + DHD_ERROR(("%s: invalid priority. Must be between 0-7 inclusive\n", + __FUNCTION__)); + return BCME_ERROR; + } + + if (*end_ptr != '\0') { + DHD_ERROR(("msg not truncated with NULL character\n")); + return BCME_ERROR; + } + ret = dhd_flowid_debug_create(dhd, 0, prio, (char *)sa, (char *)da, &flowid); + if (ret != BCME_OK) { + DHD_ERROR(("%s: flowring creation failed ret: %d\n", __FUNCTION__, ret)); + return BCME_ERROR; + } + return BCME_OK; + + } else if (cmd && !strcmp(msg, "delete")) { + msg = msg + strlen("delete") + 1; + /* parse flowid */ + flowid = (uint16)bcm_strtoul(msg, &end_ptr, 10); + if (*end_ptr != '\0') { + DHD_ERROR(("msg not truncated with NULL character\n")); + return BCME_ERROR; + } + + /* Find flowid from ifidx 0 since this IOVAR creating flowring with ifidx 0 */ + if (dhd_flowid_find_by_ifidx(dhd, 0, flowid) != BCME_OK) + { + DHD_ERROR(("%s : Deleting not created flowid: %u\n", __FUNCTION__, flowid)); + return BCME_ERROR; + } + + flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table; + ret = dhd_bus_flow_ring_delete_request(dhd->bus, (void *)&flow_ring_table[flowid]); + if (ret != BCME_OK) { + DHD_ERROR(("%s: flowring deletion failed ret: %d\n", __FUNCTION__, ret)); + return BCME_ERROR; + } + return BCME_OK; + } + DHD_ERROR(("%s: neither create nor delete\n", __FUNCTION__)); + return BCME_ERROR; +} +#endif /* BCMPCIE */ +#endif /* DHD_DEBUG */ + +#ifdef PKT_STATICS +extern pkt_statics_t tx_statics; +extern void dhdsdio_txpktstatics(void); +#endif + +static int +dhd_doiovar(dhd_pub_t *dhd_pub, const bcm_iovar_t *vi, uint32 actionid, const char *name, + void *params, int plen, void *arg, int len, int val_size) +{ + int bcmerror = 0; + int32 int_val = 0; + uint32 dhd_ver_len, bus_api_rev_len; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + DHD_TRACE(("%s: actionid = %d; name %s\n", __FUNCTION__, actionid, name)); + + if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, IOV_ISSET(actionid))) != 0) + goto exit; + + if (plen >= (int)sizeof(int_val)) + bcopy(params, &int_val, sizeof(int_val)); + + switch (actionid) { + case IOV_GVAL(IOV_VERSION): + /* Need to have checked buffer length */ + dhd_ver_len = strlen(dhd_version); + bus_api_rev_len = strlen(bus_api_revision); + if (dhd_ver_len) + bcm_strncpy_s((char*)arg, dhd_ver_len, dhd_version, dhd_ver_len); + if (bus_api_rev_len) + bcm_strncat_s((char*)arg + dhd_ver_len, bus_api_rev_len, bus_api_revision, + bus_api_rev_len); +#ifdef PKT_STATICS + memset((uint8*) &tx_statics, 0, sizeof(pkt_statics_t)); +#endif + break; + + case IOV_GVAL(IOV_WLMSGLEVEL): + printf("android_msg_level=0x%x\n", android_msg_level); + printf("config_msg_level=0x%x\n", config_msg_level); +#if defined(WL_WIRELESS_EXT) + int_val = (int32)iw_msg_level; + bcopy(&int_val, arg, val_size); + printf("iw_msg_level=0x%x\n", iw_msg_level); +#endif +#ifdef WL_CFG80211 + int_val = (int32)wl_dbg_level; + bcopy(&int_val, arg, val_size); + printf("cfg_msg_level=0x%x\n", wl_dbg_level); +#endif + break; + + case IOV_SVAL(IOV_WLMSGLEVEL): + if (int_val & DHD_ANDROID_VAL) { + android_msg_level = (uint)(int_val & 0xFFFF); + printf("android_msg_level=0x%x\n", android_msg_level); + } + if (int_val & DHD_CONFIG_VAL) { + config_msg_level = (uint)(int_val & 0xFFFF); + printf("config_msg_level=0x%x\n", config_msg_level); + } +#if defined(WL_WIRELESS_EXT) + if (int_val & DHD_IW_VAL) { + iw_msg_level = (uint)(int_val & 0xFFFF); + printf("iw_msg_level=0x%x\n", iw_msg_level); + } +#endif +#ifdef WL_CFG80211 + if (int_val & DHD_CFG_VAL) { + wl_cfg80211_enable_trace((u32)(int_val & 0xFFFF)); + } +#endif + break; + + case IOV_GVAL(IOV_MSGLEVEL): + int_val = (int32)dhd_msg_level; + bcopy(&int_val, arg, val_size); +#ifdef PKT_STATICS + dhdsdio_txpktstatics(); +#endif + break; + + case IOV_SVAL(IOV_MSGLEVEL): + dhd_msg_level = int_val; + break; + + case IOV_GVAL(IOV_BCMERRORSTR): + bcm_strncpy_s((char *)arg, len, bcmerrorstr(dhd_pub->bcmerror), BCME_STRLEN); + ((char *)arg)[BCME_STRLEN - 1] = 0x00; + break; + + case IOV_GVAL(IOV_BCMERROR): + int_val = (int32)dhd_pub->bcmerror; + bcopy(&int_val, arg, val_size); + break; + +#ifndef BCMDBUS + case IOV_GVAL(IOV_WDTICK): + int_val = (int32)dhd_watchdog_ms; + bcopy(&int_val, arg, val_size); + break; +#endif /* !BCMDBUS */ + + case IOV_SVAL(IOV_WDTICK): + if (!dhd_pub->up) { + bcmerror = BCME_NOTUP; + break; + } + + dhd_watchdog_ms = (uint)int_val; + + dhd_os_wd_timer(dhd_pub, (uint)int_val); + break; + + case IOV_GVAL(IOV_DUMP): + if (dhd_dump(dhd_pub, arg, len) <= 0) + bcmerror = BCME_ERROR; + else + bcmerror = BCME_OK; + break; + +#ifndef BCMDBUS + case IOV_GVAL(IOV_DCONSOLE_POLL): + int_val = (int32)dhd_pub->dhd_console_ms; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_DCONSOLE_POLL): + dhd_pub->dhd_console_ms = (uint)int_val; + break; + +#if defined(DHD_DEBUG) + case IOV_SVAL(IOV_CONS): + if (len > 0) + bcmerror = dhd_bus_console_in(dhd_pub, arg, len - 1); + break; +#endif /* DHD_DEBUG */ +#endif /* !BCMDBUS */ + + case IOV_SVAL(IOV_CLEARCOUNTS): + dhd_pub->tx_packets = dhd_pub->rx_packets = 0; + dhd_pub->tx_errors = dhd_pub->rx_errors = 0; + dhd_pub->tx_ctlpkts = dhd_pub->rx_ctlpkts = 0; + dhd_pub->tx_ctlerrs = dhd_pub->rx_ctlerrs = 0; + dhd_pub->tx_dropped = 0; + dhd_pub->rx_dropped = 0; + dhd_pub->tx_pktgetfail = 0; + dhd_pub->rx_pktgetfail = 0; + dhd_pub->rx_readahead_cnt = 0; + dhd_pub->tx_realloc = 0; + dhd_pub->wd_dpc_sched = 0; + dhd_pub->tx_big_packets = 0; + memset(&dhd_pub->dstats, 0, sizeof(dhd_pub->dstats)); + dhd_bus_clearcounts(dhd_pub); +#ifdef PROP_TXSTATUS + /* clear proptxstatus related counters */ + dhd_wlfc_clear_counts(dhd_pub); +#endif /* PROP_TXSTATUS */ +#if defined(DHD_LB_STATS) + DHD_LB_STATS_RESET(dhd_pub); +#endif /* DHD_LB_STATS */ + break; + + case IOV_GVAL(IOV_IOCTLTIMEOUT): { + int_val = (int32)dhd_os_get_ioctl_resp_timeout(); + bcopy(&int_val, arg, sizeof(int_val)); + break; + } + + case IOV_SVAL(IOV_IOCTLTIMEOUT): { + if (int_val <= 0) + bcmerror = BCME_BADARG; + else + dhd_os_set_ioctl_resp_timeout((unsigned int)int_val); + break; + } + +#ifdef PROP_TXSTATUS + case IOV_GVAL(IOV_PROPTXSTATUS_ENABLE): { + bool wlfc_enab = FALSE; + bcmerror = dhd_wlfc_get_enable(dhd_pub, &wlfc_enab); + if (bcmerror != BCME_OK) + goto exit; + int_val = wlfc_enab ? 1 : 0; + bcopy(&int_val, arg, val_size); + break; + } + case IOV_SVAL(IOV_PROPTXSTATUS_ENABLE): { + bool wlfc_enab = FALSE; + bcmerror = dhd_wlfc_get_enable(dhd_pub, &wlfc_enab); + if (bcmerror != BCME_OK) + goto exit; + + /* wlfc is already set as desired */ + if (wlfc_enab == (int_val == 0 ? FALSE : TRUE)) + goto exit; + + if (int_val == TRUE) + bcmerror = dhd_wlfc_init(dhd_pub); + else + bcmerror = dhd_wlfc_deinit(dhd_pub); + + break; + } + case IOV_GVAL(IOV_PROPTXSTATUS_MODE): + bcmerror = dhd_wlfc_get_mode(dhd_pub, &int_val); + if (bcmerror != BCME_OK) + goto exit; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_PROPTXSTATUS_MODE): + dhd_wlfc_set_mode(dhd_pub, int_val); + break; + + case IOV_GVAL(IOV_PROPTXSTATUS_MODULE_IGNORE): + bcmerror = dhd_wlfc_get_module_ignore(dhd_pub, &int_val); + if (bcmerror != BCME_OK) + goto exit; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_PROPTXSTATUS_MODULE_IGNORE): + dhd_wlfc_set_module_ignore(dhd_pub, int_val); + break; + + case IOV_GVAL(IOV_PROPTXSTATUS_CREDIT_IGNORE): + bcmerror = dhd_wlfc_get_credit_ignore(dhd_pub, &int_val); + if (bcmerror != BCME_OK) + goto exit; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_PROPTXSTATUS_CREDIT_IGNORE): + dhd_wlfc_set_credit_ignore(dhd_pub, int_val); + break; + + case IOV_GVAL(IOV_PROPTXSTATUS_TXSTATUS_IGNORE): + bcmerror = dhd_wlfc_get_txstatus_ignore(dhd_pub, &int_val); + if (bcmerror != BCME_OK) + goto exit; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_PROPTXSTATUS_TXSTATUS_IGNORE): + dhd_wlfc_set_txstatus_ignore(dhd_pub, int_val); + break; + + case IOV_GVAL(IOV_PROPTXSTATUS_RXPKT_CHK): + bcmerror = dhd_wlfc_get_rxpkt_chk(dhd_pub, &int_val); + if (bcmerror != BCME_OK) + goto exit; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_PROPTXSTATUS_RXPKT_CHK): + dhd_wlfc_set_rxpkt_chk(dhd_pub, int_val); + break; + +#endif /* PROP_TXSTATUS */ + + case IOV_GVAL(IOV_BUS_TYPE): + /* The dhd application queries the driver to check if its usb or sdio. */ +#ifdef BCMDBUS + int_val = BUS_TYPE_USB; +#endif // endif +#ifdef BCMSDIO + int_val = BUS_TYPE_SDIO; +#endif // endif +#ifdef PCIE_FULL_DONGLE + int_val = BUS_TYPE_PCIE; +#endif // endif + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_CHANGEMTU): + int_val &= 0xffff; + bcmerror = dhd_change_mtu(dhd_pub, int_val, 0); + break; + + case IOV_GVAL(IOV_HOSTREORDER_FLOWS): + { + uint i = 0; + uint8 *ptr = (uint8 *)arg; + uint8 count = 0; + + ptr++; + for (i = 0; i < WLHOST_REORDERDATA_MAXFLOWS; i++) { + if (dhd_pub->reorder_bufs[i] != NULL) { + *ptr = dhd_pub->reorder_bufs[i]->flow_id; + ptr++; + count++; + } + } + ptr = (uint8 *)arg; + *ptr = count; + break; + } +#ifdef DHDTCPACK_SUPPRESS + case IOV_GVAL(IOV_TCPACK_SUPPRESS): { + int_val = (uint32)dhd_pub->tcpack_sup_mode; + bcopy(&int_val, arg, val_size); + break; + } + case IOV_SVAL(IOV_TCPACK_SUPPRESS): { + bcmerror = dhd_tcpack_suppress_set(dhd_pub, (uint8)int_val); + break; + } +#endif /* DHDTCPACK_SUPPRESS */ + +#ifdef DHD_L2_FILTER + case IOV_GVAL(IOV_DHCP_UNICAST): { + uint32 bssidx; + const char *val; + if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) { + DHD_ERROR(("%s: IOV_DHCP_UNICAST: bad parameterand name = %s\n", + __FUNCTION__, name)); + bcmerror = BCME_BADARG; + break; + } + int_val = dhd_get_dhcp_unicast_status(dhd_pub, bssidx); + memcpy(arg, &int_val, val_size); + break; + } + case IOV_SVAL(IOV_DHCP_UNICAST): { + uint32 bssidx; + const char *val; + if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) { + DHD_ERROR(("%s: IOV_DHCP_UNICAST: bad parameterand name = %s\n", + __FUNCTION__, name)); + bcmerror = BCME_BADARG; + break; + } + memcpy(&int_val, val, sizeof(int_val)); + bcmerror = dhd_set_dhcp_unicast_status(dhd_pub, bssidx, int_val ? 1 : 0); + break; + } + case IOV_GVAL(IOV_BLOCK_PING): { + uint32 bssidx; + const char *val; + + if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) { + DHD_ERROR(("%s: IOV_BLOCK_PING: bad parameter\n", __FUNCTION__)); + bcmerror = BCME_BADARG; + break; + } + int_val = dhd_get_block_ping_status(dhd_pub, bssidx); + memcpy(arg, &int_val, val_size); + break; + } + case IOV_SVAL(IOV_BLOCK_PING): { + uint32 bssidx; + const char *val; + + if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) { + DHD_ERROR(("%s: IOV_BLOCK_PING: bad parameter\n", __FUNCTION__)); + bcmerror = BCME_BADARG; + break; + } + memcpy(&int_val, val, sizeof(int_val)); + bcmerror = dhd_set_block_ping_status(dhd_pub, bssidx, int_val ? 1 : 0); + break; + } + case IOV_GVAL(IOV_PROXY_ARP): { + uint32 bssidx; + const char *val; + + if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) { + DHD_ERROR(("%s: IOV_PROXY_ARP: bad parameter\n", __FUNCTION__)); + bcmerror = BCME_BADARG; + break; + } + int_val = dhd_get_parp_status(dhd_pub, bssidx); + bcopy(&int_val, arg, val_size); + break; + } + case IOV_SVAL(IOV_PROXY_ARP): { + uint32 bssidx; + const char *val; + + if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) { + DHD_ERROR(("%s: IOV_PROXY_ARP: bad parameter\n", __FUNCTION__)); + bcmerror = BCME_BADARG; + break; + } + bcopy(val, &int_val, sizeof(int_val)); + + /* Issue a iovar request to WL to update the proxy arp capability bit + * in the Extended Capability IE of beacons/probe responses. + */ + bcmerror = dhd_iovar(dhd_pub, bssidx, "proxy_arp_advertise", val, sizeof(int_val), + NULL, 0, TRUE); + if (bcmerror == BCME_OK) { + dhd_set_parp_status(dhd_pub, bssidx, int_val ? 1 : 0); + } + break; + } + case IOV_GVAL(IOV_GRAT_ARP): { + uint32 bssidx; + const char *val; + + if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) { + DHD_ERROR(("%s: IOV_GRAT_ARP: bad parameter\n", __FUNCTION__)); + bcmerror = BCME_BADARG; + break; + } + int_val = dhd_get_grat_arp_status(dhd_pub, bssidx); + memcpy(arg, &int_val, val_size); + break; + } + case IOV_SVAL(IOV_GRAT_ARP): { + uint32 bssidx; + const char *val; + + if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) { + DHD_ERROR(("%s: IOV_GRAT_ARP: bad parameter\n", __FUNCTION__)); + bcmerror = BCME_BADARG; + break; + } + memcpy(&int_val, val, sizeof(int_val)); + bcmerror = dhd_set_grat_arp_status(dhd_pub, bssidx, int_val ? 1 : 0); + break; + } + case IOV_GVAL(IOV_BLOCK_TDLS): { + uint32 bssidx; + const char *val; + + if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) { + DHD_ERROR(("%s: IOV_BLOCK_TDLS: bad parameter\n", __FUNCTION__)); + bcmerror = BCME_BADARG; + break; + } + int_val = dhd_get_block_tdls_status(dhd_pub, bssidx); + memcpy(arg, &int_val, val_size); + break; + } + case IOV_SVAL(IOV_BLOCK_TDLS): { + uint32 bssidx; + const char *val; + + if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) { + DHD_ERROR(("%s: IOV_BLOCK_TDLS: bad parameter\n", __FUNCTION__)); + bcmerror = BCME_BADARG; + break; + } + memcpy(&int_val, val, sizeof(int_val)); + bcmerror = dhd_set_block_tdls_status(dhd_pub, bssidx, int_val ? 1 : 0); + break; + } +#endif /* DHD_L2_FILTER */ + case IOV_SVAL(IOV_DHD_IE): { + uint32 bssidx; + const char *val; + + if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) { + DHD_ERROR(("%s: dhd ie: bad parameter\n", __FUNCTION__)); + bcmerror = BCME_BADARG; + break; + } + + break; + } + case IOV_GVAL(IOV_AP_ISOLATE): { + uint32 bssidx; + const char *val; + + if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) { + DHD_ERROR(("%s: ap isoalate: bad parameter\n", __FUNCTION__)); + bcmerror = BCME_BADARG; + break; + } + + int_val = dhd_get_ap_isolate(dhd_pub, bssidx); + bcopy(&int_val, arg, val_size); + break; + } + case IOV_SVAL(IOV_AP_ISOLATE): { + uint32 bssidx; + const char *val; + + if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) { + DHD_ERROR(("%s: ap isolate: bad parameter\n", __FUNCTION__)); + bcmerror = BCME_BADARG; + break; + } + + ASSERT(val); + bcopy(val, &int_val, sizeof(uint32)); + dhd_set_ap_isolate(dhd_pub, bssidx, int_val); + break; + } +#ifdef DHD_PSTA + case IOV_GVAL(IOV_PSTA): { + int_val = dhd_get_psta_mode(dhd_pub); + bcopy(&int_val, arg, val_size); + break; + } + case IOV_SVAL(IOV_PSTA): { + if (int_val >= DHD_MODE_PSTA_DISABLED && int_val <= DHD_MODE_PSR) { + dhd_set_psta_mode(dhd_pub, int_val); + } else { + bcmerror = BCME_RANGE; + } + break; + } +#endif /* DHD_PSTA */ +#ifdef DHD_WET + case IOV_GVAL(IOV_WET): + int_val = dhd_get_wet_mode(dhd_pub); + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_WET): + if (int_val == 0 || int_val == 1) { + dhd_set_wet_mode(dhd_pub, int_val); + /* Delete the WET DB when disabled */ + if (!int_val) { + dhd_wet_sta_delete_list(dhd_pub); + } + } else { + bcmerror = BCME_RANGE; + } + break; + case IOV_SVAL(IOV_WET_HOST_IPV4): + dhd_set_wet_host_ipv4(dhd_pub, params, plen); + break; + case IOV_SVAL(IOV_WET_HOST_MAC): + dhd_set_wet_host_mac(dhd_pub, params, plen); + break; +#endif /* DHD_WET */ +#ifdef DHD_MCAST_REGEN + case IOV_GVAL(IOV_MCAST_REGEN_BSS_ENABLE): { + uint32 bssidx; + const char *val; + + if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) { + DHD_ERROR(("%s: mcast_regen_bss_enable: bad parameter\n", __FUNCTION__)); + bcmerror = BCME_BADARG; + break; + } + + int_val = dhd_get_mcast_regen_bss_enable(dhd_pub, bssidx); + bcopy(&int_val, arg, val_size); + break; + } + + case IOV_SVAL(IOV_MCAST_REGEN_BSS_ENABLE): { + uint32 bssidx; + const char *val; + + if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) { + DHD_ERROR(("%s: mcast_regen_bss_enable: bad parameter\n", __FUNCTION__)); + bcmerror = BCME_BADARG; + break; + } + + ASSERT(val); + bcopy(val, &int_val, sizeof(uint32)); + dhd_set_mcast_regen_bss_enable(dhd_pub, bssidx, int_val); + break; + } +#endif /* DHD_MCAST_REGEN */ + + case IOV_GVAL(IOV_CFG80211_OPMODE): { + int_val = (int32)dhd_pub->op_mode; + bcopy(&int_val, arg, sizeof(int_val)); + break; + } + case IOV_SVAL(IOV_CFG80211_OPMODE): { + if (int_val <= 0) + bcmerror = BCME_BADARG; + else + dhd_pub->op_mode = int_val; + break; + } + + case IOV_GVAL(IOV_ASSERT_TYPE): + int_val = g_assert_type; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_ASSERT_TYPE): + g_assert_type = (uint32)int_val; + break; + +#if !defined(MACOSX_DHD) + case IOV_GVAL(IOV_LMTEST): { + *(uint32 *)arg = (uint32)lmtest; + break; + } + + case IOV_SVAL(IOV_LMTEST): { + uint32 val = *(uint32 *)arg; + if (val > 50) + bcmerror = BCME_BADARG; + else { + lmtest = (uint)val; + DHD_ERROR(("%s: lmtest %s\n", + __FUNCTION__, (lmtest == FALSE)? "OFF" : "ON")); + } + break; + } +#endif // endif + +#ifdef SHOW_LOGTRACE + case IOV_GVAL(IOV_DUMP_TRACE_LOG): { + trace_buf_info_t *trace_buf_info; + + trace_buf_info = (trace_buf_info_t *)MALLOC(dhd_pub->osh, + sizeof(trace_buf_info_t)); + if (trace_buf_info != NULL) { + dhd_get_read_buf_ptr(dhd_pub, trace_buf_info); + memcpy((void*)arg, (void*)trace_buf_info, sizeof(trace_buf_info_t)); + MFREE(dhd_pub->osh, trace_buf_info, sizeof(trace_buf_info_t)); + } else { + DHD_ERROR(("Memory allocation Failed\n")); + bcmerror = BCME_NOMEM; + } + break; + } +#endif /* SHOW_LOGTRACE */ +#ifdef DHD_DEBUG +#if defined(BCMSDIO) || defined(BCMPCIE) + case IOV_GVAL(IOV_DONGLE_TRAP_TYPE): + if (dhd_pub->dongle_trap_occured) + int_val = ltoh32(dhd_pub->last_trap_info.type); + else + int_val = 0; + bcopy(&int_val, arg, val_size); + break; + + case IOV_GVAL(IOV_DONGLE_TRAP_INFO): + { + struct bcmstrbuf strbuf; + bcm_binit(&strbuf, arg, len); + if (dhd_pub->dongle_trap_occured == FALSE) { + bcm_bprintf(&strbuf, "no trap recorded\n"); + break; + } + dhd_bus_dump_trap_info(dhd_pub->bus, &strbuf); + break; + } + + case IOV_GVAL(IOV_BPADDR): + { + sdreg_t sdreg; + uint32 addr, size; + + memcpy(&sdreg, params, sizeof(sdreg)); + + addr = sdreg.offset; + size = sdreg.func; + + bcmerror = dhd_bus_readwrite_bp_addr(dhd_pub, addr, size, + (uint *)&int_val, TRUE); + + memcpy(arg, &int_val, sizeof(int32)); + + break; + } + + case IOV_SVAL(IOV_BPADDR): + { + sdreg_t sdreg; + uint32 addr, size; + + memcpy(&sdreg, params, sizeof(sdreg)); + + addr = sdreg.offset; + size = sdreg.func; + + bcmerror = dhd_bus_readwrite_bp_addr(dhd_pub, addr, size, + (uint *)&sdreg.value, + FALSE); + + break; + } +#endif /* BCMSDIO || BCMPCIE */ +#ifdef BCMPCIE + case IOV_SVAL(IOV_FLOW_RING_DEBUG): + { + bcmerror = dhd_flow_ring_debug(dhd_pub, arg, len); + break; + } +#endif /* BCMPCIE */ + case IOV_SVAL(IOV_MEM_DEBUG): + if (len > 0) { + bcmerror = dhd_mem_debug(dhd_pub, arg, len - 1); + } + break; +#endif /* DHD_DEBUG */ +#if defined(DHD_LOG_DUMP) + case IOV_GVAL(IOV_LOG_DUMP): + { + dhd_prot_debug_info_print(dhd_pub); + dhd_log_dump_trigger(dhd_pub, CMD_DEFAULT); + break; + } +#endif /* DHD_LOG_DUMP */ + case IOV_GVAL(IOV_DEBUG_BUF_DEST_STAT): + { + if (dhd_pub->debug_buf_dest_support) { + debug_buf_dest_stat_t *debug_buf_dest_stat = + (debug_buf_dest_stat_t *)arg; + memcpy(debug_buf_dest_stat, dhd_pub->debug_buf_dest_stat, + sizeof(dhd_pub->debug_buf_dest_stat)); + } else { + bcmerror = BCME_DISABLED; + } + break; + } + default: + bcmerror = BCME_UNSUPPORTED; + break; + } + +exit: + DHD_TRACE(("%s: actionid %d, bcmerror %d\n", __FUNCTION__, actionid, bcmerror)); + return bcmerror; +} + +/* Store the status of a connection attempt for later retrieval by an iovar */ +void +dhd_store_conn_status(uint32 event, uint32 status, uint32 reason) +{ + /* Do not overwrite a WLC_E_PRUNE with a WLC_E_SET_SSID + * because an encryption/rsn mismatch results in both events, and + * the important information is in the WLC_E_PRUNE. + */ + if (!(event == WLC_E_SET_SSID && status == WLC_E_STATUS_FAIL && + dhd_conn_event == WLC_E_PRUNE)) { + dhd_conn_event = event; + dhd_conn_status = status; + dhd_conn_reason = reason; + } +} + +bool +dhd_prec_enq(dhd_pub_t *dhdp, struct pktq *q, void *pkt, int prec) +{ + void *p; + int eprec = -1; /* precedence to evict from */ + bool discard_oldest; + + /* Fast case, precedence queue is not full and we are also not + * exceeding total queue length + */ + if (!pktqprec_full(q, prec) && !pktq_full(q)) { + pktq_penq(q, prec, pkt); + return TRUE; + } + + /* Determine precedence from which to evict packet, if any */ + if (pktqprec_full(q, prec)) + eprec = prec; + else if (pktq_full(q)) { + p = pktq_peek_tail(q, &eprec); + ASSERT(p); + if (eprec > prec || eprec < 0) + return FALSE; + } + + /* Evict if needed */ + if (eprec >= 0) { + /* Detect queueing to unconfigured precedence */ + ASSERT(!pktqprec_empty(q, eprec)); + discard_oldest = AC_BITMAP_TST(dhdp->wme_dp, eprec); + if (eprec == prec && !discard_oldest) + return FALSE; /* refuse newer (incoming) packet */ + /* Evict packet according to discard policy */ + p = discard_oldest ? pktq_pdeq(q, eprec) : pktq_pdeq_tail(q, eprec); + ASSERT(p); +#ifdef DHDTCPACK_SUPPRESS + if (dhd_tcpack_check_xmit(dhdp, p) == BCME_ERROR) { + DHD_ERROR(("%s %d: tcpack_suppress ERROR!!! Stop using it\n", + __FUNCTION__, __LINE__)); + dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_OFF); + } +#endif /* DHDTCPACK_SUPPRESS */ + PKTFREE(dhdp->osh, p, TRUE); + } + + /* Enqueue */ + p = pktq_penq(q, prec, pkt); + ASSERT(p); + + return TRUE; +} + +/* + * Functions to drop proper pkts from queue: + * If one pkt in queue is non-fragmented, drop first non-fragmented pkt only + * If all pkts in queue are all fragmented, find and drop one whole set fragmented pkts + * If can't find pkts matching upper 2 cases, drop first pkt anyway + */ +bool +dhd_prec_drop_pkts(dhd_pub_t *dhdp, struct pktq *pq, int prec, f_droppkt_t fn) +{ + struct pktq_prec *q = NULL; + void *p, *prev = NULL, *next = NULL, *first = NULL, *last = NULL, *prev_first = NULL; + pkt_frag_t frag_info; + + ASSERT(dhdp && pq); + ASSERT(prec >= 0 && prec < pq->num_prec); + + q = &pq->q[prec]; + p = q->head; + + if (p == NULL) + return FALSE; + + while (p) { + frag_info = pkt_frag_info(dhdp->osh, p); + if (frag_info == DHD_PKT_FRAG_NONE) { + break; + } else if (frag_info == DHD_PKT_FRAG_FIRST) { + if (first) { + /* No last frag pkt, use prev as last */ + last = prev; + break; + } else { + first = p; + prev_first = prev; + } + } else if (frag_info == DHD_PKT_FRAG_LAST) { + if (first) { + last = p; + break; + } + } + + prev = p; + p = PKTLINK(p); + } + + if ((p == NULL) || ((frag_info != DHD_PKT_FRAG_NONE) && !(first && last))) { + /* Not found matching pkts, use oldest */ + prev = NULL; + p = q->head; + frag_info = 0; + } + + if (frag_info == DHD_PKT_FRAG_NONE) { + first = last = p; + prev_first = prev; + } + + p = first; + while (p) { + next = PKTLINK(p); + q->n_pkts--; + pq->n_pkts_tot--; + +#ifdef WL_TXQ_STALL + q->dequeue_count++; +#endif // endif + + PKTSETLINK(p, NULL); + + if (fn) + fn(dhdp, prec, p, TRUE); + + if (p == last) + break; + + p = next; + } + + if (prev_first == NULL) { + if ((q->head = next) == NULL) + q->tail = NULL; + } else { + PKTSETLINK(prev_first, next); + if (!next) + q->tail = prev_first; + } + + return TRUE; +} + +static int +dhd_iovar_op(dhd_pub_t *dhd_pub, const char *name, + void *params, int plen, void *arg, int len, bool set) +{ + int bcmerror = 0; + int val_size; + const bcm_iovar_t *vi = NULL; + uint32 actionid; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + ASSERT(name); + ASSERT(len >= 0); + + /* Get MUST have return space */ + ASSERT(set || (arg && len)); + + /* Set does NOT take qualifiers */ + ASSERT(!set || (!params && !plen)); + + if ((vi = bcm_iovar_lookup(dhd_iovars, name)) == NULL) { + bcmerror = BCME_UNSUPPORTED; + goto exit; + } + + DHD_CTL(("%s: %s %s, len %d plen %d\n", __FUNCTION__, + name, (set ? "set" : "get"), len, plen)); + + /* set up 'params' pointer in case this is a set command so that + * the convenience int and bool code can be common to set and get + */ + if (params == NULL) { + params = arg; + plen = len; + } + + if (vi->type == IOVT_VOID) + val_size = 0; + else if (vi->type == IOVT_BUFFER) + val_size = len; + else + /* all other types are integer sized */ + val_size = sizeof(int); + + actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid); + + bcmerror = dhd_doiovar(dhd_pub, vi, actionid, name, params, plen, arg, len, val_size); + +exit: + return bcmerror; +} + +int +dhd_ioctl(dhd_pub_t * dhd_pub, dhd_ioctl_t *ioc, void *buf, uint buflen) +{ + int bcmerror = 0; + unsigned long flags; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (!buf) { + return BCME_BADARG; + } + + dhd_os_dhdiovar_lock(dhd_pub); + switch (ioc->cmd) { + case DHD_GET_MAGIC: + if (buflen < sizeof(int)) + bcmerror = BCME_BUFTOOSHORT; + else + *(int*)buf = DHD_IOCTL_MAGIC; + break; + + case DHD_GET_VERSION: + if (buflen < sizeof(int)) + bcmerror = BCME_BUFTOOSHORT; + else + *(int*)buf = DHD_IOCTL_VERSION; + break; + + case DHD_GET_VAR: + case DHD_SET_VAR: + { + char *arg; + uint arglen; + + DHD_LINUX_GENERAL_LOCK(dhd_pub, flags); + if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhd_pub) && + bcmstricmp((char *)buf, "devreset")) { + /* In platforms like FC19, the FW download is done via IOCTL + * and should not return error for IOCTLs fired before FW + * Download is done + */ + if (dhd_fw_download_status(dhd_pub)) { + DHD_ERROR(("%s: returning as busstate=%d\n", + __FUNCTION__, dhd_pub->busstate)); + DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags); + dhd_os_dhdiovar_unlock(dhd_pub); + return -ENODEV; + } + } + DHD_BUS_BUSY_SET_IN_DHD_IOVAR(dhd_pub); + DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags); + + DHD_LINUX_GENERAL_LOCK(dhd_pub, flags); + if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhd_pub)) { + /* If Suspend/Resume is tested via pcie_suspend IOVAR + * then continue to execute the IOVAR, return from here for + * other IOVARs, also include pciecfgreg and devreset to go + * through. + */ + if (bcmstricmp((char *)buf, "pcie_suspend") && + bcmstricmp((char *)buf, "pciecfgreg") && + bcmstricmp((char *)buf, "devreset") && + bcmstricmp((char *)buf, "sdio_suspend")) { + DHD_ERROR(("%s: bus is in suspend(%d)" + "or suspending(0x%x) state\n", + __FUNCTION__, dhd_pub->busstate, + dhd_pub->dhd_bus_busy_state)); + DHD_BUS_BUSY_CLEAR_IN_DHD_IOVAR(dhd_pub); + dhd_os_busbusy_wake(dhd_pub); + DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags); + dhd_os_dhdiovar_unlock(dhd_pub); + return -ENODEV; + } + } + /* During devreset ioctl, we call dhdpcie_advertise_bus_cleanup, + * which will wait for all the busy contexts to get over for + * particular time and call ASSERT if timeout happens. As during + * devreset ioctal, we made DHD_BUS_BUSY_SET_IN_DHD_IOVAR, + * to avoid ASSERT, clear the IOCTL busy state. "devreset" ioctl is + * not used in Production platforms but only used in FC19 setups. + */ + if (!bcmstricmp((char *)buf, "devreset")) { + DHD_BUS_BUSY_CLEAR_IN_DHD_IOVAR(dhd_pub); + } + DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags); + + /* scan past the name to any arguments */ + for (arg = buf, arglen = buflen; *arg && arglen; arg++, arglen--) + ; + + if (*arg) { + bcmerror = BCME_BUFTOOSHORT; + goto unlock_exit; + } + + /* account for the NUL terminator */ + arg++, arglen--; + /* call with the appropriate arguments */ + if (ioc->cmd == DHD_GET_VAR) { + bcmerror = dhd_iovar_op(dhd_pub, buf, arg, arglen, + buf, buflen, IOV_GET); + } else { + bcmerror = dhd_iovar_op(dhd_pub, buf, NULL, 0, + arg, arglen, IOV_SET); + } + if (bcmerror != BCME_UNSUPPORTED) { + goto unlock_exit; + } + + /* not in generic table, try protocol module */ + if (ioc->cmd == DHD_GET_VAR) { + bcmerror = dhd_prot_iovar_op(dhd_pub, buf, arg, + arglen, buf, buflen, IOV_GET); + } else { + bcmerror = dhd_prot_iovar_op(dhd_pub, buf, + NULL, 0, arg, arglen, IOV_SET); + } + if (bcmerror != BCME_UNSUPPORTED) { + goto unlock_exit; + } + + /* if still not found, try bus module */ + if (ioc->cmd == DHD_GET_VAR) { + bcmerror = dhd_bus_iovar_op(dhd_pub, buf, + arg, arglen, buf, buflen, IOV_GET); + } else { + bcmerror = dhd_bus_iovar_op(dhd_pub, buf, + NULL, 0, arg, arglen, IOV_SET); + } + if (bcmerror != BCME_UNSUPPORTED) { + goto unlock_exit; + } + + } + goto unlock_exit; + + default: + bcmerror = BCME_UNSUPPORTED; + } + dhd_os_dhdiovar_unlock(dhd_pub); + return bcmerror; + +unlock_exit: + DHD_LINUX_GENERAL_LOCK(dhd_pub, flags); + DHD_BUS_BUSY_CLEAR_IN_DHD_IOVAR(dhd_pub); + dhd_os_busbusy_wake(dhd_pub); + DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags); + dhd_os_dhdiovar_unlock(dhd_pub); + return bcmerror; +} + +#ifdef SHOW_EVENTS + +static void +wl_show_host_event(dhd_pub_t *dhd_pub, wl_event_msg_t *event, void *event_data, + void *raw_event_ptr, char *eventmask) +{ + uint i, status, reason; + bool group = FALSE, flush_txq = FALSE, link = FALSE; + bool host_data = FALSE; /* prints event data after the case when set */ + const char *auth_str; + const char *event_name; + uchar *buf; + char err_msg[256], eabuf[ETHER_ADDR_STR_LEN]; + uint event_type, flags, auth_type, datalen; + + event_type = ntoh32(event->event_type); + flags = ntoh16(event->flags); + status = ntoh32(event->status); + reason = ntoh32(event->reason); + BCM_REFERENCE(reason); + auth_type = ntoh32(event->auth_type); + datalen = ntoh32(event->datalen); + + /* debug dump of event messages */ + snprintf(eabuf, sizeof(eabuf), MACDBG, MAC2STRDBG(event->addr.octet)); + + event_name = bcmevent_get_name(event_type); + BCM_REFERENCE(event_name); + + if (flags & WLC_EVENT_MSG_LINK) + link = TRUE; + if (flags & WLC_EVENT_MSG_GROUP) + group = TRUE; + if (flags & WLC_EVENT_MSG_FLUSHTXQ) + flush_txq = TRUE; + + switch (event_type) { + case WLC_E_START: + case WLC_E_DEAUTH: + case WLC_E_DISASSOC: + DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf)); + break; + + case WLC_E_ASSOC_IND: + case WLC_E_REASSOC_IND: + + DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf)); + + break; + + case WLC_E_ASSOC: + case WLC_E_REASSOC: + if (status == WLC_E_STATUS_SUCCESS) { + DHD_EVENT(("MACEVENT: %s, MAC %s, SUCCESS\n", event_name, eabuf)); + } else if (status == WLC_E_STATUS_TIMEOUT) { + DHD_EVENT(("MACEVENT: %s, MAC %s, TIMEOUT\n", event_name, eabuf)); + } else if (status == WLC_E_STATUS_FAIL) { + DHD_EVENT(("MACEVENT: %s, MAC %s, FAILURE, reason %d\n", + event_name, eabuf, (int)reason)); + } else { + DHD_EVENT(("MACEVENT: %s, MAC %s, unexpected status %d\n", + event_name, eabuf, (int)status)); + } + + break; + + case WLC_E_DEAUTH_IND: + case WLC_E_DISASSOC_IND: + DHD_EVENT(("MACEVENT: %s, MAC %s, reason %d\n", event_name, eabuf, (int)reason)); + break; + + case WLC_E_AUTH: + case WLC_E_AUTH_IND: + if (auth_type == DOT11_OPEN_SYSTEM) + auth_str = "Open System"; + else if (auth_type == DOT11_SHARED_KEY) + auth_str = "Shared Key"; + else { + snprintf(err_msg, sizeof(err_msg), "AUTH unknown: %d", (int)auth_type); + auth_str = err_msg; + } + + if (event_type == WLC_E_AUTH_IND) { + DHD_EVENT(("MACEVENT: %s, MAC %s, %s\n", event_name, eabuf, auth_str)); + } else if (status == WLC_E_STATUS_SUCCESS) { + DHD_EVENT(("MACEVENT: %s, MAC %s, %s, SUCCESS\n", + event_name, eabuf, auth_str)); + } else if (status == WLC_E_STATUS_TIMEOUT) { + DHD_EVENT(("MACEVENT: %s, MAC %s, %s, TIMEOUT\n", + event_name, eabuf, auth_str)); + } else if (status == WLC_E_STATUS_FAIL) { + DHD_EVENT(("MACEVENT: %s, MAC %s, %s, FAILURE, reason %d\n", + event_name, eabuf, auth_str, (int)reason)); + } + BCM_REFERENCE(auth_str); + + break; + + case WLC_E_JOIN: + case WLC_E_ROAM: + case WLC_E_SET_SSID: + if (status == WLC_E_STATUS_SUCCESS) { + DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf)); + } else { + if (status == WLC_E_STATUS_FAIL) { + DHD_EVENT(("MACEVENT: %s, failed\n", event_name)); + } else if (status == WLC_E_STATUS_NO_NETWORKS) { + DHD_EVENT(("MACEVENT: %s, no networks found\n", event_name)); + } else { + DHD_EVENT(("MACEVENT: %s, unexpected status %d\n", + event_name, (int)status)); + } + } + break; + + case WLC_E_BEACON_RX: + if (status == WLC_E_STATUS_SUCCESS) { + DHD_EVENT(("MACEVENT: %s, SUCCESS\n", event_name)); + } else if (status == WLC_E_STATUS_FAIL) { + DHD_EVENT(("MACEVENT: %s, FAIL\n", event_name)); + } else { + DHD_EVENT(("MACEVENT: %s, status %d\n", event_name, status)); + } + break; + + case WLC_E_LINK: + DHD_EVENT(("MACEVENT: %s %s flags:0x%x status:%d\n", + event_name, link?"UP":"DOWN", flags, status)); + BCM_REFERENCE(link); + break; + + case WLC_E_MIC_ERROR: + DHD_EVENT(("MACEVENT: %s, MAC %s, Group %d, Flush %d\n", + event_name, eabuf, group, flush_txq)); + BCM_REFERENCE(group); + BCM_REFERENCE(flush_txq); + break; + + case WLC_E_ICV_ERROR: + case WLC_E_UNICAST_DECODE_ERROR: + case WLC_E_MULTICAST_DECODE_ERROR: + DHD_EVENT(("MACEVENT: %s, MAC %s\n", + event_name, eabuf)); + break; + + case WLC_E_TXFAIL: + DHD_EVENT(("MACEVENT: %s, RA %s status %d\n", event_name, eabuf, status)); + break; + + case WLC_E_ASSOC_REQ_IE: + case WLC_E_ASSOC_RESP_IE: + case WLC_E_PMKID_CACHE: + DHD_EVENT(("MACEVENT: %s\n", event_name)); + break; + + case WLC_E_SCAN_COMPLETE: + DHD_EVENT(("MACEVENT: %s\n", event_name)); + break; + case WLC_E_RSSI_LQM: + case WLC_E_PFN_NET_FOUND: + case WLC_E_PFN_NET_LOST: + case WLC_E_PFN_SCAN_COMPLETE: + case WLC_E_PFN_SCAN_NONE: + case WLC_E_PFN_SCAN_ALLGONE: + case WLC_E_PFN_GSCAN_FULL_RESULT: + case WLC_E_PFN_SSID_EXT: + DHD_EVENT(("PNOEVENT: %s\n", event_name)); + break; + + case WLC_E_PFN_SCAN_BACKOFF: + case WLC_E_PFN_BSSID_SCAN_BACKOFF: + DHD_EVENT(("PNOEVENT: %s, status %d, reason %d\n", + event_name, (int)status, (int)reason)); + break; + + case WLC_E_PSK_SUP: + case WLC_E_PRUNE: + DHD_EVENT(("MACEVENT: %s, status %d, reason %d\n", + event_name, (int)status, (int)reason)); + break; + +#ifdef WIFI_ACT_FRAME + case WLC_E_ACTION_FRAME: + DHD_TRACE(("MACEVENT: %s Bssid %s\n", event_name, eabuf)); + break; +#endif /* WIFI_ACT_FRAME */ + +#ifdef SHOW_LOGTRACE + case WLC_E_TRACE: + { + dhd_dbg_trace_evnt_handler(dhd_pub, event_data, raw_event_ptr, datalen); + break; + } +#endif /* SHOW_LOGTRACE */ + + case WLC_E_RSSI: + DHD_EVENT(("MACEVENT: %s %d\n", event_name, ntoh32(*((int *)event_data)))); + break; + + case WLC_E_SERVICE_FOUND: + case WLC_E_P2PO_ADD_DEVICE: + case WLC_E_P2PO_DEL_DEVICE: + DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf)); + break; + +#ifdef BT_WIFI_HANDOBER + case WLC_E_BT_WIFI_HANDOVER_REQ: + DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf)); + break; +#endif // endif + + case WLC_E_CCA_CHAN_QUAL: + if (datalen) { + cca_chan_qual_event_t *cca_event = (cca_chan_qual_event_t *)event_data; + if (cca_event->id == WL_CHAN_QUAL_FULL_CCA) { + DHD_EVENT(( + "MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d," + " channel 0x%02x (dur %dms ibss %dms obss %dms interf %dms" + " ts 0x%08x)\n", + event_name, event_type, eabuf, (int)status, + (int)reason, (int)auth_type, cca_event->chanspec, + cca_event->cca_busy_ext.duration, + cca_event->cca_busy_ext.congest_ibss, + cca_event->cca_busy_ext.congest_obss, + cca_event->cca_busy_ext.interference, + cca_event->cca_busy_ext.timestamp)); + } else if (cca_event->id == WL_CHAN_QUAL_CCA) { + DHD_EVENT(( + "MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d," + " channel 0x%02x (dur %dms busy %dms ts 0x%08x)\n", + event_name, event_type, eabuf, (int)status, + (int)reason, (int)auth_type, cca_event->chanspec, + cca_event->cca_busy.duration, + cca_event->cca_busy.congest, + cca_event->cca_busy.timestamp)); + } else if ((cca_event->id == WL_CHAN_QUAL_NF) || + (cca_event->id == WL_CHAN_QUAL_NF_LTE)) { + DHD_EVENT(( + "MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d," + " channel 0x%02x (NF[%d] %ddB)\n", + event_name, event_type, eabuf, (int)status, + (int)reason, (int)auth_type, cca_event->chanspec, + cca_event->id, cca_event->noise)); + } else { + DHD_EVENT(( + "MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d," + " channel 0x%02x (unknown ID %d)\n", + event_name, event_type, eabuf, (int)status, + (int)reason, (int)auth_type, cca_event->chanspec, + cca_event->id)); + } + } + break; + case WLC_E_ESCAN_RESULT: + { + wl_escan_result_v2_t *escan_result = + (wl_escan_result_v2_t *)event_data; + BCM_REFERENCE(escan_result); + if ((status == WLC_E_STATUS_SUCCESS) || (status == WLC_E_STATUS_ABORT)) { + DHD_EVENT(("MACEVENT: %s %d, status %d sync-id %u\n", + event_name, event_type, (int)status, + dtoh16(escan_result->sync_id))); + } else { + DHD_TRACE(("MACEVENT: %s %d, MAC %s, status %d \n", + event_name, event_type, eabuf, (int)status)); + } + + break; + } + case WLC_E_IF: + { + struct wl_event_data_if *ifevent = (struct wl_event_data_if *)event_data; + BCM_REFERENCE(ifevent); + + DHD_EVENT(("MACEVENT: %s, opcode:0x%d ifidx:%d role:%d\n", + event_name, ifevent->opcode, ifevent->ifidx, ifevent->role)); + break; + } +#ifdef SHOW_LOGTRACE + case WLC_E_MSCH: + { + wl_mschdbg_event_handler(dhd_pub, raw_event_ptr, reason, event_data, datalen); + break; + } +#endif /* SHOW_LOGTRACE */ + + case WLC_E_PSK_AUTH: + DHD_EVENT(("MACEVENT: %s, RA %s status %d Reason:%d\n", + event_name, eabuf, status, reason)); + break; + case WLC_E_AGGR_EVENT: + { + event_aggr_data_t *aggrbuf = event_data; + int j = 0, len = 0; + uint8 *data = aggrbuf->data; + DHD_EVENT(("MACEVENT: %s, num of events %d total len %d sub events: ", + event_name, aggrbuf->num_events, aggrbuf->len)); + for (j = 0; j < aggrbuf->num_events; j++) + { + wl_event_msg_t * sub_event = (wl_event_msg_t *)data; + if (len > aggrbuf->len) { + DHD_ERROR(("%s: Aggr events corrupted!", + __FUNCTION__)); + break; + } + DHD_EVENT(("\n Event type: %d ", ntoh32(sub_event->event_type))); + len += ALIGN_SIZE((ntoh32(sub_event->datalen) + + sizeof(wl_event_msg_t)), sizeof(uint64)); + buf = (uchar *)(data + sizeof(wl_event_msg_t)); + BCM_REFERENCE(buf); + DHD_EVENT((" data (%d) : ", ntoh32(sub_event->datalen))); + for (i = 0; i < ntoh32(sub_event->datalen); i++) { + DHD_EVENT((" 0x%02x ", buf[i])); + } + data = aggrbuf->data + len; + } + DHD_EVENT(("\n")); + } + break; + case WLC_E_NAN_CRITICAL: + { + DHD_LOG_MEM(("MACEVENT: %s, type:%d\n", event_name, reason)); + break; + } + case WLC_E_NAN_NON_CRITICAL: + { + DHD_TRACE(("MACEVENT: %s, type:%d\n", event_name, reason)); + break; + } + case WLC_E_RPSNOA: + { + rpsnoa_stats_t *stat = event_data; + if (datalen == sizeof(*stat)) { + DHD_EVENT(("MACEVENT: %s, band %s, status %d, pps %d\n", event_name, + (stat->band == WLC_BAND_2G) ? "2G":"5G", + stat->state, stat->last_pps)); + } + break; + } + case WLC_E_PHY_CAL: + { + DHD_EVENT(("MACEVENT: %s, reason:%d\n", event_name, reason)); + break; + } + default: + DHD_INFO(("MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d\n", + event_name, event_type, eabuf, (int)status, (int)reason, + (int)auth_type)); + break; + } + + /* show any appended data if message level is set to bytes or host_data is set */ + if ((DHD_BYTES_ON() || (host_data == TRUE)) && DHD_EVENT_ON() && datalen) { + buf = (uchar *) event_data; + BCM_REFERENCE(buf); + DHD_EVENT((" data (%d) : ", datalen)); + for (i = 0; i < datalen; i++) { + DHD_EVENT((" 0x%02x ", buf[i])); + } + DHD_EVENT(("\n")); + } +} /* wl_show_host_event */ +#endif /* SHOW_EVENTS */ + +#ifdef DNGL_EVENT_SUPPORT +/* Check whether packet is a BRCM dngl event pkt. If it is, process event data. */ + int +dngl_host_event(dhd_pub_t *dhdp, void *pktdata, bcm_dngl_event_msg_t *dngl_event, size_t pktlen) +{ + bcm_dngl_event_t *pvt_data = (bcm_dngl_event_t *)pktdata; + + dngl_host_event_process(dhdp, pvt_data, dngl_event, pktlen); + return BCME_OK; +} + +void +dngl_host_event_process(dhd_pub_t *dhdp, bcm_dngl_event_t *event, + bcm_dngl_event_msg_t *dngl_event, size_t pktlen) +{ + uint8 *p = (uint8 *)(event + 1); + uint16 type = ntoh16_ua((void *)&dngl_event->event_type); + uint16 datalen = ntoh16_ua((void *)&dngl_event->datalen); + uint16 version = ntoh16_ua((void *)&dngl_event->version); + + DHD_EVENT(("VERSION:%d, EVENT TYPE:%d, DATALEN:%d\n", version, type, datalen)); + if (datalen > (pktlen - sizeof(bcm_dngl_event_t) + ETHER_TYPE_LEN)) { + return; + } + if (version != BCM_DNGL_EVENT_MSG_VERSION) { + DHD_ERROR(("%s:version mismatch:%d:%d\n", __FUNCTION__, + version, BCM_DNGL_EVENT_MSG_VERSION)); + return; + } + switch (type) { + case DNGL_E_SOCRAM_IND: + { + bcm_dngl_socramind_t *socramind_ptr = (bcm_dngl_socramind_t *)p; + uint16 tag = ltoh32(socramind_ptr->tag); + uint16 taglen = ltoh32(socramind_ptr->length); + p = (uint8 *)socramind_ptr->value; + DHD_EVENT(("Tag:%d Len:%d Datalen:%d\n", tag, taglen, datalen)); + switch (tag) { + case SOCRAM_IND_ASSERT_TAG: + { + /* + * The payload consists of - + * null terminated function name padded till 32 bit boundary + + * Line number - (32 bits) + * Caller address (32 bits) + */ + char *fnname = (char *)p; + if (datalen < (ROUNDUP(strlen(fnname) + 1, sizeof(uint32)) + + sizeof(uint32) * 2)) { + DHD_ERROR(("Wrong length:%d\n", datalen)); + return; + } + DHD_EVENT(("ASSRT Function:%s ", p)); + p += ROUNDUP(strlen(p) + 1, sizeof(uint32)); + DHD_EVENT(("Line:%d ", *(uint32 *)p)); + p += sizeof(uint32); + DHD_EVENT(("Caller Addr:0x%x\n", *(uint32 *)p)); + break; + } + case SOCRAM_IND_TAG_HEALTH_CHECK: + { + bcm_dngl_healthcheck_t *dngl_hc = (bcm_dngl_healthcheck_t *)p; + DHD_EVENT(("SOCRAM_IND_HEALTHCHECK_TAG:%d Len:%d datalen:%d\n", + ltoh32(dngl_hc->top_module_tag), + ltoh32(dngl_hc->top_module_len), + datalen)); + if (DHD_EVENT_ON()) { + prhex("HEALTHCHECK", p, MIN(ltoh32(dngl_hc->top_module_len) + + BCM_XTLV_HDR_SIZE, datalen)); + } +#ifdef DHD_LOG_DUMP + memset(dhdp->health_chk_event_data, 0, HEALTH_CHK_BUF_SIZE); + memcpy(dhdp->health_chk_event_data, p, + MIN(ltoh32(dngl_hc->top_module_len), + HEALTH_CHK_BUF_SIZE)); +#endif /* DHD_LOG_DUMP */ + p = (uint8 *)dngl_hc->value; + + switch (ltoh32(dngl_hc->top_module_tag)) { + case HEALTH_CHECK_TOP_LEVEL_MODULE_PCIEDEV_RTE: + { + bcm_dngl_pcie_hc_t *pcie_hc; + pcie_hc = (bcm_dngl_pcie_hc_t *)p; + BCM_REFERENCE(pcie_hc); + if (ltoh32(dngl_hc->top_module_len) < + sizeof(bcm_dngl_pcie_hc_t)) { + DHD_ERROR(("Wrong length:%d\n", + ltoh32(dngl_hc->top_module_len))); + return; + } + DHD_EVENT(("%d:PCIE HC error:%d flag:0x%x," + " control:0x%x\n", + ltoh32(pcie_hc->version), + ltoh32(pcie_hc->pcie_err_ind_type), + ltoh32(pcie_hc->pcie_flag), + ltoh32(pcie_hc->pcie_control_reg))); + break; + } +#ifdef HCHK_COMMON_SW_EVENT + case HCHK_SW_ENTITY_WL_PRIMARY: + case HCHK_SW_ENTITY_WL_SECONDARY: + { + bcm_xtlv_t *wl_hc = (bcm_xtlv_t*)p; + + if (ltoh32(dngl_hc->top_module_len) < + sizeof(bcm_xtlv_t)) { + DHD_ERROR(("WL SW HC Wrong length:%d\n", + ltoh32(dngl_hc->top_module_len))); + return; + } + BCM_REFERENCE(wl_hc); + DHD_EVENT(("WL SW HC type %d len %d", + ltoh16(wl_hc->id), ltoh16(wl_hc->len))); + break; + } +#endif /* HCHK_COMMON_SW_EVENT */ + default: + { + DHD_ERROR(("%s:Unknown module TAG:%d\n", + __FUNCTION__, + ltoh32(dngl_hc->top_module_tag))); + break; + } + } + break; + } + default: + DHD_ERROR(("%s:Unknown TAG", __FUNCTION__)); + if (p && DHD_EVENT_ON()) { + prhex("SOCRAMIND", p, taglen); + } + break; + } + break; + } + default: + DHD_ERROR(("%s:Unknown DNGL Event Type:%d", __FUNCTION__, type)); + if (p && DHD_EVENT_ON()) { + prhex("SOCRAMIND", p, datalen); + } + break; + } +#ifndef BCMDBUS +#ifdef DHD_FW_COREDUMP + if (dhdp->memdump_enabled) { + dhdp->memdump_type = DUMP_TYPE_DONGLE_HOST_EVENT; + if (dhd_socram_dump(dhdp->bus)) { + DHD_ERROR(("%s: socram dump failed\n", __FUNCTION__)); + } + } +#else + dhd_dbg_send_urgent_evt(dhdp, p, datalen); +#endif /* DHD_FW_COREDUMP */ +#endif /* !BCMDBUS */ +} + +#endif /* DNGL_EVENT_SUPPORT */ + +/* Stub for now. Will become real function as soon as shim + * is being integrated to Android, Linux etc. + */ +int +wl_event_process_default(wl_event_msg_t *event, struct wl_evt_pport *evt_pport) +{ + return BCME_OK; +} + +int +wl_event_process(dhd_pub_t *dhd_pub, int *ifidx, void *pktdata, + uint pktlen, void **data_ptr, void *raw_event) +{ + wl_evt_pport_t evt_pport; + wl_event_msg_t event; + bcm_event_msg_u_t evu; + int ret; + + /* make sure it is a BRCM event pkt and record event data */ + ret = wl_host_event_get_data(pktdata, pktlen, &evu); + if (ret != BCME_OK) { + return ret; + } + + memcpy(&event, &evu.event, sizeof(wl_event_msg_t)); + + /* convert event from network order to host order */ + wl_event_to_host_order(&event); + + /* record event params to evt_pport */ + evt_pport.dhd_pub = dhd_pub; + evt_pport.ifidx = ifidx; + evt_pport.pktdata = pktdata; + evt_pport.data_ptr = data_ptr; + evt_pport.raw_event = raw_event; + evt_pport.data_len = pktlen; + + ret = wl_event_process_default(&event, &evt_pport); + + return ret; +} /* wl_event_process */ + +/* Check whether packet is a BRCM event pkt. If it is, record event data. */ +int +wl_host_event_get_data(void *pktdata, uint pktlen, bcm_event_msg_u_t *evu) +{ + int ret; + + ret = is_wlc_event_frame(pktdata, pktlen, 0, evu); + if (ret != BCME_OK) { + DHD_ERROR(("%s: Invalid event frame, err = %d\n", + __FUNCTION__, ret)); + } + + return ret; +} + +int +wl_process_host_event(dhd_pub_t *dhd_pub, int *ifidx, void *pktdata, uint pktlen, + wl_event_msg_t *event, void **data_ptr, void *raw_event) +{ + bcm_event_t *pvt_data = (bcm_event_t *)pktdata; + bcm_event_msg_u_t evu; + uint8 *event_data; + uint32 type, status, datalen, reason; + uint16 flags; + uint evlen; + int ret; + uint16 usr_subtype; + + ret = wl_host_event_get_data(pktdata, pktlen, &evu); + if (ret != BCME_OK) { + return ret; + } + + usr_subtype = ntoh16_ua((void *)&pvt_data->bcm_hdr.usr_subtype); + switch (usr_subtype) { + case BCMILCP_BCM_SUBTYPE_EVENT: + memcpy(event, &evu.event, sizeof(wl_event_msg_t)); + *data_ptr = &pvt_data[1]; + break; + case BCMILCP_BCM_SUBTYPE_DNGLEVENT: +#ifdef DNGL_EVENT_SUPPORT + /* If it is a DNGL event process it first */ + if (dngl_host_event(dhd_pub, pktdata, &evu.dngl_event, pktlen) == BCME_OK) { + /* + * Return error purposely to prevent DNGL event being processed + * as BRCM event + */ + return BCME_ERROR; + } +#endif /* DNGL_EVENT_SUPPORT */ + return BCME_NOTFOUND; + default: + return BCME_NOTFOUND; + } + + /* start wl_event_msg process */ + event_data = *data_ptr; + type = ntoh32_ua((void *)&event->event_type); + flags = ntoh16_ua((void *)&event->flags); + status = ntoh32_ua((void *)&event->status); + reason = ntoh32_ua((void *)&event->reason); + datalen = ntoh32_ua((void *)&event->datalen); + evlen = datalen + sizeof(bcm_event_t); + + switch (type) { +#ifdef PROP_TXSTATUS + case WLC_E_FIFO_CREDIT_MAP: + dhd_wlfc_enable(dhd_pub); + dhd_wlfc_FIFOcreditmap_event(dhd_pub, event_data); + WLFC_DBGMESG(("WLC_E_FIFO_CREDIT_MAP:(AC0,AC1,AC2,AC3),(BC_MC),(OTHER): " + "(%d,%d,%d,%d),(%d),(%d)\n", event_data[0], event_data[1], + event_data[2], + event_data[3], event_data[4], event_data[5])); + break; + + case WLC_E_BCMC_CREDIT_SUPPORT: + dhd_wlfc_BCMCCredit_support_event(dhd_pub); + break; +#ifdef LIMIT_BORROW + case WLC_E_ALLOW_CREDIT_BORROW: + dhd_wlfc_disable_credit_borrow_event(dhd_pub, event_data); + break; +#endif /* LIMIT_BORROW */ +#endif /* PROP_TXSTATUS */ + + case WLC_E_ULP: +#ifdef DHD_ULP + { + wl_ulp_event_t *ulp_evt = (wl_ulp_event_t *)event_data; + + /* Flush and disable console messages */ + if (ulp_evt->ulp_dongle_action == WL_ULP_DISABLE_CONSOLE) { +#ifdef DHD_ULP_NOT_USED + dhd_bus_ulp_disable_console(dhd_pub); +#endif /* DHD_ULP_NOT_USED */ + } + if (ulp_evt->ulp_dongle_action == WL_ULP_UCODE_DOWNLOAD) { + dhd_bus_ucode_download(dhd_pub->bus); + } + } +#endif /* DHD_ULP */ + break; + case WLC_E_TDLS_PEER_EVENT: +#if defined(WLTDLS) && defined(PCIE_FULL_DONGLE) + { + dhd_tdls_event_handler(dhd_pub, event); + } +#endif // endif + break; + + case WLC_E_IF: + { + struct wl_event_data_if *ifevent = (struct wl_event_data_if *)event_data; + + /* Ignore the event if NOIF is set */ + if (ifevent->reserved & WLC_E_IF_FLAGS_BSSCFG_NOIF) { + DHD_ERROR(("WLC_E_IF: NO_IF set, event Ignored\r\n")); + return (BCME_UNSUPPORTED); + } +#ifdef PCIE_FULL_DONGLE + dhd_update_interface_flow_info(dhd_pub, ifevent->ifidx, + ifevent->opcode, ifevent->role); +#endif // endif +#ifdef PROP_TXSTATUS + { + uint8* ea = pvt_data->eth.ether_dhost; + WLFC_DBGMESG(("WLC_E_IF: idx:%d, action:%s, iftype:%s, ["MACDBG"]\n" + ifevent->ifidx, + ((ifevent->opcode == WLC_E_IF_ADD) ? "ADD":"DEL"), + ((ifevent->role == 0) ? "STA":"AP "), + MAC2STRDBG(ea))); + (void)ea; + + if (ifevent->opcode == WLC_E_IF_CHANGE) + dhd_wlfc_interface_event(dhd_pub, + eWLFC_MAC_ENTRY_ACTION_UPDATE, + ifevent->ifidx, ifevent->role, ea); + else + dhd_wlfc_interface_event(dhd_pub, + ((ifevent->opcode == WLC_E_IF_ADD) ? + eWLFC_MAC_ENTRY_ACTION_ADD : eWLFC_MAC_ENTRY_ACTION_DEL), + ifevent->ifidx, ifevent->role, ea); + + /* dhd already has created an interface by default, for 0 */ + if (ifevent->ifidx == 0) + break; + } +#endif /* PROP_TXSTATUS */ + + if (ifevent->ifidx > 0 && ifevent->ifidx < DHD_MAX_IFS) { + if (ifevent->opcode == WLC_E_IF_ADD) { + if (dhd_event_ifadd(dhd_pub->info, ifevent, event->ifname, + event->addr.octet)) { + + DHD_ERROR(("%s: dhd_event_ifadd failed ifidx: %d %s\n", + __FUNCTION__, ifevent->ifidx, event->ifname)); + return (BCME_ERROR); + } + } else if (ifevent->opcode == WLC_E_IF_DEL) { +#ifdef PCIE_FULL_DONGLE + /* Delete flowrings unconditionally for i/f delete */ + dhd_flow_rings_delete(dhd_pub, (uint8)dhd_ifname2idx(dhd_pub->info, + event->ifname)); +#endif /* PCIE_FULL_DONGLE */ + dhd_event_ifdel(dhd_pub->info, ifevent, event->ifname, + event->addr.octet); + } else if (ifevent->opcode == WLC_E_IF_CHANGE) { +#ifdef WL_CFG80211 + dhd_event_ifchange(dhd_pub->info, ifevent, event->ifname, + event->addr.octet); +#endif /* WL_CFG80211 */ + } + } else { +#if !defined(PROP_TXSTATUS) && !defined(PCIE_FULL_DONGLE) && defined(WL_CFG80211) + DHD_INFO(("%s: Invalid ifidx %d for %s\n", + __FUNCTION__, ifevent->ifidx, event->ifname)); +#endif /* !PROP_TXSTATUS && !PCIE_FULL_DONGLE && WL_CFG80211 */ + } + /* send up the if event: btamp user needs it */ + *ifidx = dhd_ifname2idx(dhd_pub->info, event->ifname); + /* push up to external supp/auth */ + dhd_event(dhd_pub->info, (char *)pvt_data, evlen, *ifidx); + break; + } + + case WLC_E_NDIS_LINK: + break; + case WLC_E_PFN_NET_FOUND: + case WLC_E_PFN_SCAN_ALLGONE: /* share with WLC_E_PFN_BSSID_NET_LOST */ + case WLC_E_PFN_NET_LOST: + break; +#if defined(PNO_SUPPORT) + case WLC_E_PFN_BSSID_NET_FOUND: + case WLC_E_PFN_BEST_BATCHING: + dhd_pno_event_handler(dhd_pub, event, (void *)event_data); + break; +#endif // endif +#if defined(RTT_SUPPORT) + case WLC_E_PROXD: + dhd_rtt_event_handler(dhd_pub, event, (void *)event_data); + break; +#endif /* RTT_SUPPORT */ + /* These are what external supplicant/authenticator wants */ + case WLC_E_ASSOC_IND: + case WLC_E_AUTH_IND: + case WLC_E_REASSOC_IND: + dhd_findadd_sta(dhd_pub, + dhd_ifname2idx(dhd_pub->info, event->ifname), + &event->addr.octet); + break; +#ifndef BCMDBUS +#if defined(DHD_FW_COREDUMP) + case WLC_E_PSM_WATCHDOG: + DHD_ERROR(("%s: WLC_E_PSM_WATCHDOG event received : \n", __FUNCTION__)); + if (dhd_socram_dump(dhd_pub->bus) != BCME_OK) { + DHD_ERROR(("%s: socram dump ERROR : \n", __FUNCTION__)); + } + break; +#endif // endif +#endif /* !BCMDBUS */ + case WLC_E_NATOE_NFCT: +#ifdef WL_NATOE + DHD_EVENT(("%s: WLC_E_NATOE_NFCT event received \n", __FUNCTION__)); + dhd_natoe_ct_event(dhd_pub, event_data); +#endif /* WL_NATOE */ + break; +#ifdef WL_NAN + case WLC_E_SLOTTED_BSS_PEER_OP: + DHD_EVENT(("%s: WLC_E_SLOTTED_BSS_PEER_OP event received for peer: " + "" MACDBG ", status = %d\n", + __FUNCTION__, MAC2STRDBG(event->addr.octet), status)); + if (status == WLC_E_STATUS_SLOTTED_PEER_ADD) { + dhd_findadd_sta(dhd_pub, dhd_ifname2idx(dhd_pub->info, + event->ifname), &event->addr.octet); + } else if (status == WLC_E_STATUS_SLOTTED_PEER_DEL) { + uint8 ifindex = (uint8)dhd_ifname2idx(dhd_pub->info, event->ifname); + BCM_REFERENCE(ifindex); + dhd_del_sta(dhd_pub, dhd_ifname2idx(dhd_pub->info, + event->ifname), &event->addr.octet); +#ifdef PCIE_FULL_DONGLE + dhd_flow_rings_delete_for_peer(dhd_pub, ifindex, + (char *)&event->addr.octet[0]); +#endif // endif + } else { + DHD_ERROR(("%s: WLC_E_SLOTTED_BSS_PEER_OP: Status is not expected = %d\n", + __FUNCTION__, status)); + } + break; +#endif /* WL_NAN */ + case WLC_E_LINK: +#ifdef PCIE_FULL_DONGLE + if (dhd_update_interface_link_status(dhd_pub, (uint8)dhd_ifname2idx(dhd_pub->info, + event->ifname), (uint8)flags) != BCME_OK) { + DHD_ERROR(("%s: dhd_update_interface_link_status Failed.\n", + __FUNCTION__)); + break; + } + if (!flags) { + DHD_ERROR(("%s: Deleting all STA from assoc list and flowrings.\n", + __FUNCTION__)); + /* Delete all sta and flowrings */ + dhd_del_all_sta(dhd_pub, dhd_ifname2idx(dhd_pub->info, event->ifname)); + dhd_flow_rings_delete(dhd_pub, (uint8)dhd_ifname2idx(dhd_pub->info, + event->ifname)); + } + /* fall through */ +#endif /* PCIE_FULL_DONGLE */ + case WLC_E_DEAUTH: + case WLC_E_DEAUTH_IND: + case WLC_E_DISASSOC: + case WLC_E_DISASSOC_IND: +#ifdef PCIE_FULL_DONGLE + if (type != WLC_E_LINK) { + uint8 ifindex = (uint8)dhd_ifname2idx(dhd_pub->info, event->ifname); + uint8 role = dhd_flow_rings_ifindex2role(dhd_pub, ifindex); + uint8 del_sta = TRUE; +#ifdef WL_CFG80211 + if (role == WLC_E_IF_ROLE_STA && + !wl_cfg80211_is_roam_offload(dhd_idx2net(dhd_pub, ifindex)) && + !wl_cfg80211_is_event_from_connected_bssid( + dhd_idx2net(dhd_pub, ifindex), event, *ifidx)) { + del_sta = FALSE; + } +#endif /* WL_CFG80211 */ + DHD_EVENT(("%s: Link event %d, flags %x, status %x, role %d, del_sta %d\n", + __FUNCTION__, type, flags, status, role, del_sta)); + + if (del_sta) { + DHD_EVENT(("%s: Deleting STA " MACDBG "\n", + __FUNCTION__, MAC2STRDBG(event->addr.octet))); + + dhd_del_sta(dhd_pub, dhd_ifname2idx(dhd_pub->info, + event->ifname), &event->addr.octet); + /* Delete all flowrings for STA and P2P Client */ + if (role == WLC_E_IF_ROLE_STA || role == WLC_E_IF_ROLE_P2P_CLIENT) { + dhd_flow_rings_delete(dhd_pub, ifindex); + } else { + dhd_flow_rings_delete_for_peer(dhd_pub, ifindex, + (char *)&event->addr.octet[0]); + } + } + } +#endif /* PCIE_FULL_DONGLE */ + /* fall through */ + + default: + *ifidx = dhd_ifname2idx(dhd_pub->info, event->ifname); +#ifdef DHD_UPDATE_INTF_MAC + if ((WLC_E_LINK==type)&&(WLC_EVENT_MSG_LINK&flags)) { + dhd_event_ifchange(dhd_pub->info, + (struct wl_event_data_if *)event, + event->ifname, + event->addr.octet); + } +#endif /* DHD_UPDATE_INTF_MAC */ + /* push up to external supp/auth */ + dhd_event(dhd_pub->info, (char *)pvt_data, evlen, *ifidx); + DHD_TRACE(("%s: MAC event %d, flags %x, status %x\n", + __FUNCTION__, type, flags, status)); + BCM_REFERENCE(flags); + BCM_REFERENCE(status); + BCM_REFERENCE(reason); + + break; + } +#if defined(STBAP) + /* For routers, EAPD will be working on these events. + * Overwrite interface name to that event is pushed + * to host with its registered interface name + */ + memcpy(pvt_data->event.ifname, dhd_ifname(dhd_pub, *ifidx), IFNAMSIZ); +#endif // endif + +#ifdef SHOW_EVENTS + if (DHD_FWLOG_ON() || DHD_EVENT_ON()) { + wl_show_host_event(dhd_pub, event, + (void *)event_data, raw_event, dhd_pub->enable_log); + } +#endif /* SHOW_EVENTS */ + + return (BCME_OK); +} /* wl_process_host_event */ + +int +wl_host_event(dhd_pub_t *dhd_pub, int *ifidx, void *pktdata, uint pktlen, + wl_event_msg_t *event, void **data_ptr, void *raw_event) +{ + return wl_process_host_event(dhd_pub, ifidx, pktdata, pktlen, event, data_ptr, + raw_event); +} + +void +dhd_print_buf(void *pbuf, int len, int bytes_per_line) +{ +#ifdef DHD_DEBUG + int i, j = 0; + unsigned char *buf = pbuf; + + if (bytes_per_line == 0) { + bytes_per_line = len; + } + + for (i = 0; i < len; i++) { + printf("%2.2x", *buf++); + j++; + if (j == bytes_per_line) { + printf("\n"); + j = 0; + } else { + printf(":"); + } + } + printf("\n"); +#endif /* DHD_DEBUG */ +} +#ifndef strtoul +#define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base)) +#endif // endif + +#if defined(PKT_FILTER_SUPPORT) +/* Convert user's input in hex pattern to byte-size mask */ +int +wl_pattern_atoh(char *src, char *dst) +{ + int i; + if (strncmp(src, "0x", 2) != 0 && + strncmp(src, "0X", 2) != 0) { + DHD_ERROR(("Mask invalid format. Needs to start with 0x\n")); + return -1; + } + src = src + 2; /* Skip past 0x */ + if (strlen(src) % 2 != 0) { + DHD_ERROR(("Mask invalid format. Needs to be of even length\n")); + return -1; + } + for (i = 0; *src != '\0'; i++) { + char num[3]; + bcm_strncpy_s(num, sizeof(num), src, 2); + num[2] = '\0'; + dst[i] = (uint8)strtoul(num, NULL, 16); + src += 2; + } + return i; +} + +int +pattern_atoh_len(char *src, char *dst, int len) +{ + int i; + if (strncmp(src, "0x", HD_PREFIX_SIZE) != 0 && + strncmp(src, "0X", HD_PREFIX_SIZE) != 0) { + DHD_ERROR(("Mask invalid format. Needs to start with 0x\n")); + return -1; + } + src = src + HD_PREFIX_SIZE; /* Skip past 0x */ + if (strlen(src) % HD_BYTE_SIZE != 0) { + DHD_ERROR(("Mask invalid format. Needs to be of even length\n")); + return -1; + } + for (i = 0; *src != '\0'; i++) { + char num[HD_BYTE_SIZE + 1]; + + if (i > len - 1) { + DHD_ERROR(("pattern not in range, idx: %d len: %d\n", i, len)); + return -1; + } + bcm_strncpy_s(num, sizeof(num), src, HD_BYTE_SIZE); + num[HD_BYTE_SIZE] = '\0'; + dst[i] = (uint8)strtoul(num, NULL, 16); + src += HD_BYTE_SIZE; + } + return i; +} +#endif // endif + +#ifdef PKT_FILTER_SUPPORT +void +dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode) +{ + char *argv[8]; + int i = 0; + const char *str; + int buf_len; + int str_len; + char *arg_save = 0, *arg_org = 0; + int rc; + char buf[32] = {0}; + wl_pkt_filter_enable_t enable_parm; + wl_pkt_filter_enable_t * pkt_filterp; + + if (!arg) + return; + + if (!(arg_save = MALLOC(dhd->osh, strlen(arg) + 1))) { + DHD_ERROR(("%s: malloc failed\n", __FUNCTION__)); + goto fail; + } + arg_org = arg_save; + memcpy(arg_save, arg, strlen(arg) + 1); + + argv[i] = bcmstrtok(&arg_save, " ", 0); + + i = 0; + if (argv[i] == NULL) { + DHD_ERROR(("No args provided\n")); + goto fail; + } + + str = "pkt_filter_enable"; + str_len = strlen(str); + bcm_strncpy_s(buf, sizeof(buf) - 1, str, sizeof(buf) - 1); + buf[ sizeof(buf) - 1 ] = '\0'; + buf_len = str_len + 1; + + pkt_filterp = (wl_pkt_filter_enable_t *)(buf + str_len + 1); + + /* Parse packet filter id. */ + enable_parm.id = htod32(strtoul(argv[i], NULL, 0)); + if (dhd_conf_del_pkt_filter(dhd, enable_parm.id)) + goto fail; + + /* Parse enable/disable value. */ + enable_parm.enable = htod32(enable); + + buf_len += sizeof(enable_parm); + memcpy((char *)pkt_filterp, + &enable_parm, + sizeof(enable_parm)); + + /* Enable/disable the specified filter. */ + rc = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, buf_len, TRUE, 0); + rc = rc >= 0 ? 0 : rc; + if (rc) { + DHD_ERROR(("%s: failed to %s pktfilter %s, retcode = %d\n", + __FUNCTION__, enable?"enable":"disable", arg, rc)); + dhd_set_packet_filter(dhd); + rc = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, buf_len, TRUE, 0); + rc = rc >= 0 ? 0 : rc; + if (rc) { + DHD_TRACE_HW4(("%s: 2nd retry failed to add pktfilter %s, retcode = %d\n", + __FUNCTION__, arg, rc)); + } else { + DHD_TRACE_HW4(("%s: 2nd retry successfully added pktfilter %s\n", + __FUNCTION__, arg)); + } + } + else + DHD_TRACE(("%s: successfully %s pktfilter %s\n", + __FUNCTION__, enable?"enable":"disable", arg)); + + /* Contorl the master mode */ + rc = dhd_wl_ioctl_set_intiovar(dhd, "pkt_filter_mode", + master_mode, WLC_SET_VAR, TRUE, 0); + rc = rc >= 0 ? 0 : rc; + if (rc) + DHD_TRACE(("%s: failed to set pkt_filter_mode %d, retcode = %d\n", + __FUNCTION__, master_mode, rc)); + +fail: + if (arg_org) + MFREE(dhd->osh, arg_org, strlen(arg) + 1); +} + +/* Packet filter section: extended filters have named offsets, add table here */ +typedef struct { + char *name; + uint16 base; +} wl_pfbase_t; + +static wl_pfbase_t basenames[] = { WL_PKT_FILTER_BASE_NAMES }; + +static int +wl_pkt_filter_base_parse(char *name) +{ + uint i; + char *bname, *uname; + + for (i = 0; i < ARRAYSIZE(basenames); i++) { + bname = basenames[i].name; + for (uname = name; *uname; bname++, uname++) { + if (*bname != bcm_toupper(*uname)) { + break; + } + } + if (!*uname && !*bname) { + break; + } + } + + if (i < ARRAYSIZE(basenames)) { + return basenames[i].base; + } else { + return -1; + } +} + +void +dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg) +{ + const char *str; + wl_pkt_filter_t pkt_filter; + wl_pkt_filter_t *pkt_filterp; + int buf_len; + int str_len; + int rc = -1; + uint32 mask_size; + uint32 pattern_size; + char *argv[16], * buf = 0; + int i = 0; + char *arg_save = 0, *arg_org = 0; + + if (!arg) + return; + + if (!(arg_save = MALLOC(dhd->osh, strlen(arg) + 1))) { + DHD_ERROR(("%s: malloc failed\n", __FUNCTION__)); + goto fail; + } + + arg_org = arg_save; + + if (!(buf = MALLOC(dhd->osh, MAX_PKTFLT_BUF_SIZE))) { + DHD_ERROR(("%s: malloc failed\n", __FUNCTION__)); + goto fail; + } + + memset(buf, 0, MAX_PKTFLT_BUF_SIZE); + memcpy(arg_save, arg, strlen(arg) + 1); + + if (strlen(arg) > MAX_PKTFLT_BUF_SIZE) { + DHD_ERROR(("Not enough buffer %d < %d\n", (int)strlen(arg), (int)sizeof(buf))); + goto fail; + } + + argv[i] = bcmstrtok(&arg_save, " ", 0); + while (argv[i++]) + argv[i] = bcmstrtok(&arg_save, " ", 0); + + i = 0; + if (argv[i] == NULL) { + DHD_ERROR(("No args provided\n")); + goto fail; + } + + str = "pkt_filter_add"; + str_len = strlen(str); + bcm_strncpy_s(buf, MAX_PKTFLT_BUF_SIZE, str, str_len); + buf[ str_len ] = '\0'; + buf_len = str_len + 1; + + pkt_filterp = (wl_pkt_filter_t *) (buf + str_len + 1); + + /* Parse packet filter id. */ + pkt_filter.id = htod32(strtoul(argv[i], NULL, 0)); + + if (argv[++i] == NULL) { + DHD_ERROR(("Polarity not provided\n")); + goto fail; + } + + /* Parse filter polarity. */ + pkt_filter.negate_match = htod32(strtoul(argv[i], NULL, 0)); + + if (argv[++i] == NULL) { + DHD_ERROR(("Filter type not provided\n")); + goto fail; + } + + /* Parse filter type. */ + pkt_filter.type = htod32(strtoul(argv[i], NULL, 0)); + + if ((pkt_filter.type == 0) || (pkt_filter.type == 1)) { + if (argv[++i] == NULL) { + DHD_ERROR(("Offset not provided\n")); + goto fail; + } + + /* Parse pattern filter offset. */ + pkt_filter.u.pattern.offset = htod32(strtoul(argv[i], NULL, 0)); + + if (argv[++i] == NULL) { + DHD_ERROR(("Bitmask not provided\n")); + goto fail; + } + + /* Parse pattern filter mask. */ + rc = wl_pattern_atoh(argv[i], + (char *) pkt_filterp->u.pattern.mask_and_pattern); + + if (rc == -1) { + DHD_ERROR(("Rejecting: %s\n", argv[i])); + goto fail; + } + mask_size = htod32(rc); + if (argv[++i] == NULL) { + DHD_ERROR(("Pattern not provided\n")); + goto fail; + } + + /* Parse pattern filter pattern. */ + rc = wl_pattern_atoh(argv[i], + (char *) &pkt_filterp->u.pattern.mask_and_pattern[mask_size]); + + if (rc == -1) { + DHD_ERROR(("Rejecting: %s\n", argv[i])); + goto fail; + } + pattern_size = htod32(rc); + if (mask_size != pattern_size) { + DHD_ERROR(("Mask and pattern not the same size\n")); + goto fail; + } + + pkt_filter.u.pattern.size_bytes = mask_size; + buf_len += WL_PKT_FILTER_FIXED_LEN; + buf_len += (WL_PKT_FILTER_PATTERN_FIXED_LEN + 2 * mask_size); + + /* Keep-alive attributes are set in local variable (keep_alive_pkt), and + * then memcpy'ed into buffer (keep_alive_pktp) since there is no + * guarantee that the buffer is properly aligned. + */ + memcpy((char *)pkt_filterp, + &pkt_filter, + WL_PKT_FILTER_FIXED_LEN + WL_PKT_FILTER_PATTERN_FIXED_LEN); + } else if ((pkt_filter.type == 2) || (pkt_filter.type == 6)) { + int list_cnt = 0; + char *endptr = NULL; + wl_pkt_filter_pattern_listel_t *pf_el = + (wl_pkt_filter_pattern_listel_t *)&pkt_filterp->u.patlist.patterns[0]; + + while (argv[++i] != NULL) { + /* Check valid buffer size. */ + if ((buf_len + MAX_PKTFLT_FIXED_BUF_SIZE) > MAX_PKTFLT_BUF_SIZE) { + DHD_ERROR(("buffer over length MAX_PKTFLT_FIXED_BUF_SIZE\n")); + goto fail; + } + + /* Parse pattern filter base and offset. */ + if (bcm_isdigit(*argv[i])) { + /* Numeric base */ + rc = strtoul(argv[i], &endptr, 0); + } else { + endptr = strchr(argv[i], ':'); + if (endptr) { + *endptr = '\0'; + rc = wl_pkt_filter_base_parse(argv[i]); + if (rc == -1) { + printf("Invalid base %s\n", argv[i]); + goto fail; + } + *endptr = ':'; + } + } + + if (endptr == NULL) { + printf("Invalid [base:]offset format: %s\n", argv[i]); + goto fail; + } + + if (*endptr == ':') { + pf_el->base_offs = htod16(rc); + rc = strtoul(endptr + 1, &endptr, 0); + } else { + /* Must have had a numeric offset only */ + pf_el->base_offs = htod16(0); + } + + if (*endptr) { + printf("Invalid [base:]offset format: %s\n", argv[i]); + goto fail; + } + if (rc > 0x0000FFFF) { + printf("Offset too large\n"); + goto fail; + } + pf_el->rel_offs = htod16(rc); + + /* Clear match_flag (may be set in parsing which follows) */ + pf_el->match_flags = htod16(0); + + /* Parse pattern filter mask and pattern directly into ioctl buffer */ + if (argv[++i] == NULL) { + printf("Bitmask not provided\n"); + goto fail; + } + rc = wl_pattern_atoh(argv[i], (char*)pf_el->mask_and_data); + if ((rc == -1) || (rc > MAX_PKTFLT_FIXED_PATTERN_SIZE)) { + printf("Rejecting: %s\n", argv[i]); + goto fail; + } + mask_size = htod16(rc); + + if (argv[++i] == NULL) { + printf("Pattern not provided\n"); + goto fail; + } + + if (*argv[i] == '!') { + pf_el->match_flags = + htod16(WL_PKT_FILTER_MFLAG_NEG); + (argv[i])++; + } + if (*argv[i] == '\0') { + printf("Pattern not provided\n"); + goto fail; + } + rc = wl_pattern_atoh(argv[i], (char*)&pf_el->mask_and_data[rc]); + if ((rc == -1) || (rc > MAX_PKTFLT_FIXED_PATTERN_SIZE)) { + printf("Rejecting: %s\n", argv[i]); + goto fail; + } + pattern_size = htod16(rc); + + if (mask_size != pattern_size) { + printf("Mask and pattern not the same size\n"); + goto fail; + } + + pf_el->size_bytes = mask_size; + + /* Account for the size of this pattern element */ + buf_len += WL_PKT_FILTER_PATTERN_LISTEL_FIXED_LEN + 2 * rc; + + /* Move to next element location in ioctl buffer */ + pf_el = (wl_pkt_filter_pattern_listel_t*) + ((uint8*)pf_el + WL_PKT_FILTER_PATTERN_LISTEL_FIXED_LEN + 2 * rc); + + /* Count list element */ + list_cnt++; + } + + /* Account for initial fixed size, and copy initial fixed fields */ + buf_len += WL_PKT_FILTER_FIXED_LEN + WL_PKT_FILTER_PATTERN_LIST_FIXED_LEN; + + if (buf_len > MAX_PKTFLT_BUF_SIZE) { + DHD_ERROR(("buffer over length MAX_PKTFLT_BUF_SIZE\n")); + goto fail; + } + /* Update list count and total size */ + pkt_filter.u.patlist.list_cnt = list_cnt; + pkt_filter.u.patlist.PAD1[0] = 0; + pkt_filter.u.patlist.totsize = buf + buf_len - (char*)pkt_filterp; + pkt_filter.u.patlist.totsize -= WL_PKT_FILTER_FIXED_LEN; + + memcpy((char *)pkt_filterp, &pkt_filter, + WL_PKT_FILTER_FIXED_LEN + WL_PKT_FILTER_PATTERN_LIST_FIXED_LEN); + } else { + DHD_ERROR(("Invalid filter type %d\n", pkt_filter.type)); + goto fail; + } + + rc = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, buf_len, TRUE, 0); + rc = rc >= 0 ? 0 : rc; + + if (rc) + DHD_TRACE(("%s: failed to add pktfilter %s, retcode = %d\n", + __FUNCTION__, arg, rc)); + else + DHD_TRACE(("%s: successfully added pktfilter %s\n", + __FUNCTION__, arg)); + +fail: + if (arg_org) + MFREE(dhd->osh, arg_org, strlen(arg) + 1); + + if (buf) + MFREE(dhd->osh, buf, MAX_PKTFLT_BUF_SIZE); +} + +void +dhd_pktfilter_offload_delete(dhd_pub_t *dhd, int id) +{ + int ret; + + ret = dhd_wl_ioctl_set_intiovar(dhd, "pkt_filter_delete", + id, WLC_SET_VAR, TRUE, 0); + if (ret < 0) { + DHD_ERROR(("%s: Failed to delete filter ID:%d, ret=%d\n", + __FUNCTION__, id, ret)); + } + else + DHD_TRACE(("%s: successfully deleted pktfilter %d\n", + __FUNCTION__, id)); +} +#endif /* PKT_FILTER_SUPPORT */ + +/* ========================== */ +/* ==== ARP OFFLOAD SUPPORT = */ +/* ========================== */ +#ifdef ARP_OFFLOAD_SUPPORT +void +dhd_arp_offload_set(dhd_pub_t * dhd, int arp_mode) +{ + int retcode; + + retcode = dhd_wl_ioctl_set_intiovar(dhd, "arp_ol", + arp_mode, WLC_SET_VAR, TRUE, 0); + + retcode = retcode >= 0 ? 0 : retcode; + if (retcode) + DHD_ERROR(("%s: failed to set ARP offload mode to 0x%x, retcode = %d\n", + __FUNCTION__, arp_mode, retcode)); + else + DHD_ARPOE(("%s: successfully set ARP offload mode to 0x%x\n", + __FUNCTION__, arp_mode)); +} + +void +dhd_arp_offload_enable(dhd_pub_t * dhd, int arp_enable) +{ + int retcode; + + retcode = dhd_wl_ioctl_set_intiovar(dhd, "arpoe", + arp_enable, WLC_SET_VAR, TRUE, 0); + + retcode = retcode >= 0 ? 0 : retcode; + if (retcode) + DHD_ERROR(("%s: failed to enabe ARP offload to %d, retcode = %d\n", + __FUNCTION__, arp_enable, retcode)); + else + DHD_ARPOE(("%s: successfully enabed ARP offload to %d\n", + __FUNCTION__, arp_enable)); + if (arp_enable) { + uint32 version; + retcode = dhd_wl_ioctl_get_intiovar(dhd, "arp_version", + &version, WLC_GET_VAR, FALSE, 0); + if (retcode) { + DHD_INFO(("%s: fail to get version (maybe version 1:retcode = %d\n", + __FUNCTION__, retcode)); + dhd->arp_version = 1; + } + else { + DHD_INFO(("%s: ARP Version= %x\n", __FUNCTION__, version)); + dhd->arp_version = version; + } + } +} + +void +dhd_aoe_arp_clr(dhd_pub_t *dhd, int idx) +{ + int ret = 0; + + if (dhd == NULL) return; + if (dhd->arp_version == 1) + idx = 0; + + ret = dhd_iovar(dhd, idx, "arp_table_clear", NULL, 0, NULL, 0, TRUE); + if (ret < 0) + DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret)); +} + +void +dhd_aoe_hostip_clr(dhd_pub_t *dhd, int idx) +{ + int ret = 0; + + if (dhd == NULL) return; + if (dhd->arp_version == 1) + idx = 0; + + ret = dhd_iovar(dhd, idx, "arp_hostip_clear", NULL, 0, NULL, 0, TRUE); + if (ret < 0) + DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret)); +} + +void +dhd_arp_offload_add_ip(dhd_pub_t *dhd, uint32 ipaddr, int idx) +{ + int ret; + + if (dhd == NULL) return; + if (dhd->arp_version == 1) + idx = 0; + + ret = dhd_iovar(dhd, idx, "arp_hostip", (char *)&ipaddr, sizeof(ipaddr), + NULL, 0, TRUE); + if (ret) + DHD_ERROR(("%s: ARP ip addr add failed, ret = %d\n", __FUNCTION__, ret)); + else + DHD_ARPOE(("%s: sARP H ipaddr entry added \n", + __FUNCTION__)); +} + +int +dhd_arp_get_arp_hostip_table(dhd_pub_t *dhd, void *buf, int buflen, int idx) +{ + int ret, i; + uint32 *ptr32 = buf; + bool clr_bottom = FALSE; + + if (!buf) + return -1; + if (dhd == NULL) return -1; + if (dhd->arp_version == 1) + idx = 0; + + ret = dhd_iovar(dhd, idx, "arp_hostip", NULL, 0, (char *)buf, buflen, + FALSE); + if (ret) { + DHD_ERROR(("%s: ioctl WLC_GET_VAR error %d\n", + __FUNCTION__, ret)); + + return -1; + } + + /* clean up the buf, ascii reminder */ + for (i = 0; i < MAX_IPV4_ENTRIES; i++) { + if (!clr_bottom) { + if (*ptr32 == 0) + clr_bottom = TRUE; + } else { + *ptr32 = 0; + } + ptr32++; + } + + return 0; +} +#endif /* ARP_OFFLOAD_SUPPORT */ + +/* + * Neighbor Discovery Offload: enable NDO feature + * Called by ipv6 event handler when interface comes up/goes down + */ +int +dhd_ndo_enable(dhd_pub_t * dhd, int ndo_enable) +{ + int retcode; + + if (dhd == NULL) + return -1; + +#if defined(WL_CFG80211) && defined(WL_NAN) + if (wl_cfgnan_is_dp_active(dhd_linux_get_primary_netdev(dhd))) { + /* If nan dp is active, skip NDO */ + DHD_INFO(("Active NAN DP, skip NDO\n")); + return 0; + } +#endif /* WL_CFG80211 && WL_NAN */ +#ifdef WL_CFG80211 + if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) { + /* NDO disable on STA+SOFTAP mode */ + ndo_enable = FALSE; + } +#endif /* WL_CFG80211 */ + retcode = dhd_wl_ioctl_set_intiovar(dhd, "ndoe", + ndo_enable, WLC_SET_VAR, TRUE, 0); + if (retcode) + DHD_ERROR(("%s: failed to enabe ndo to %d, retcode = %d\n", + __FUNCTION__, ndo_enable, retcode)); + else + DHD_TRACE(("%s: successfully enabed ndo offload to %d\n", + __FUNCTION__, ndo_enable)); + + return retcode; +} + +/* + * Neighbor Discover Offload: enable NDO feature + * Called by ipv6 event handler when interface comes up + */ +int +dhd_ndo_add_ip(dhd_pub_t *dhd, char* ipv6addr, int idx) +{ + int iov_len = 0; + char iovbuf[DHD_IOVAR_BUF_SIZE]; + int retcode; + + if (dhd == NULL) + return -1; + + iov_len = bcm_mkiovar("nd_hostip", (char *)ipv6addr, + IPV6_ADDR_LEN, iovbuf, sizeof(iovbuf)); + if (!iov_len) { + DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n", + __FUNCTION__, sizeof(iovbuf))); + return -1; + } + retcode = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx); + + if (retcode) + DHD_ERROR(("%s: ndo ip addr add failed, retcode = %d\n", + __FUNCTION__, retcode)); + else + DHD_TRACE(("%s: ndo ipaddr entry added \n", + __FUNCTION__)); + + return retcode; +} + +/* + * Neighbor Discover Offload: enable NDO feature + * Called by ipv6 event handler when interface goes down + */ +int +dhd_ndo_remove_ip(dhd_pub_t *dhd, int idx) +{ + int iov_len = 0; + char iovbuf[DHD_IOVAR_BUF_SIZE]; + int retcode; + + if (dhd == NULL) + return -1; + + iov_len = bcm_mkiovar("nd_hostip_clear", NULL, + 0, iovbuf, sizeof(iovbuf)); + if (!iov_len) { + DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n", + __FUNCTION__, sizeof(iovbuf))); + return -1; + } + retcode = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx); + + if (retcode) + DHD_ERROR(("%s: ndo ip addr remove failed, retcode = %d\n", + __FUNCTION__, retcode)); + else + DHD_TRACE(("%s: ndo ipaddr entry removed \n", + __FUNCTION__)); + + return retcode; +} +/* Enhanced ND offload */ +uint16 +dhd_ndo_get_version(dhd_pub_t *dhdp) +{ + char iovbuf[DHD_IOVAR_BUF_SIZE]; + wl_nd_hostip_t ndo_get_ver; + int iov_len; + int retcode; + uint16 ver = 0; + + if (dhdp == NULL) { + return BCME_ERROR; + } + + memset(&iovbuf, 0, sizeof(iovbuf)); + ndo_get_ver.version = htod16(WL_ND_HOSTIP_IOV_VER); + ndo_get_ver.op_type = htod16(WL_ND_HOSTIP_OP_VER); + ndo_get_ver.length = htod32(WL_ND_HOSTIP_FIXED_LEN + sizeof(uint16)); + ndo_get_ver.u.version = 0; + iov_len = bcm_mkiovar("nd_hostip", (char *)&ndo_get_ver, + WL_ND_HOSTIP_FIXED_LEN + sizeof(uint16), iovbuf, sizeof(iovbuf)); + + if (!iov_len) { + DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n", + __FUNCTION__, sizeof(iovbuf))); + return BCME_ERROR; + } + + retcode = dhd_wl_ioctl_cmd(dhdp, WLC_GET_VAR, iovbuf, iov_len, FALSE, 0); + + if (retcode) { + DHD_ERROR(("%s: failed, retcode = %d\n", __FUNCTION__, retcode)); + /* ver iovar not supported. NDO version is 0 */ + ver = 0; + } else { + wl_nd_hostip_t *ndo_ver_ret = (wl_nd_hostip_t *)iovbuf; + + if ((dtoh16(ndo_ver_ret->version) == WL_ND_HOSTIP_IOV_VER) && + (dtoh16(ndo_ver_ret->op_type) == WL_ND_HOSTIP_OP_VER) && + (dtoh32(ndo_ver_ret->length) == WL_ND_HOSTIP_FIXED_LEN + + sizeof(uint16))) { + /* nd_hostip iovar version */ + ver = dtoh16(ndo_ver_ret->u.version); + } + + DHD_TRACE(("%s: successfully get version: %d\n", __FUNCTION__, ver)); + } + + return ver; +} + +int +dhd_ndo_add_ip_with_type(dhd_pub_t *dhdp, char *ipv6addr, uint8 type, int idx) +{ + char iovbuf[DHD_IOVAR_BUF_SIZE]; + wl_nd_hostip_t ndo_add_addr; + int iov_len; + int retcode; + + if (dhdp == NULL || ipv6addr == 0) { + return BCME_ERROR; + } + + /* wl_nd_hostip_t fixed param */ + ndo_add_addr.version = htod16(WL_ND_HOSTIP_IOV_VER); + ndo_add_addr.op_type = htod16(WL_ND_HOSTIP_OP_ADD); + ndo_add_addr.length = htod32(WL_ND_HOSTIP_WITH_ADDR_LEN); + /* wl_nd_host_ip_addr_t param for add */ + memcpy(&ndo_add_addr.u.host_ip.ip_addr, ipv6addr, IPV6_ADDR_LEN); + ndo_add_addr.u.host_ip.type = type; + + iov_len = bcm_mkiovar("nd_hostip", (char *)&ndo_add_addr, + WL_ND_HOSTIP_WITH_ADDR_LEN, iovbuf, sizeof(iovbuf)); + if (!iov_len) { + DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n", + __FUNCTION__, sizeof(iovbuf))); + return BCME_ERROR; + } + + retcode = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx); + if (retcode) { + DHD_ERROR(("%s: failed, retcode = %d\n", __FUNCTION__, retcode)); +#ifdef NDO_CONFIG_SUPPORT + if (retcode == BCME_NORESOURCE) { + /* number of host ip addr exceeds FW capacity, Deactivate ND offload */ + DHD_INFO(("%s: Host IP count exceed device capacity," + "ND offload deactivated\n", __FUNCTION__)); + dhdp->ndo_host_ip_overflow = TRUE; + dhd_ndo_enable(dhdp, FALSE); + } +#endif /* NDO_CONFIG_SUPPORT */ + } else { + DHD_TRACE(("%s: successfully added: %d\n", __FUNCTION__, retcode)); + } + + return retcode; +} + +int +dhd_ndo_remove_ip_by_addr(dhd_pub_t *dhdp, char *ipv6addr, int idx) +{ + char iovbuf[DHD_IOVAR_BUF_SIZE]; + wl_nd_hostip_t ndo_del_addr; + int iov_len; + int retcode; + + if (dhdp == NULL || ipv6addr == 0) { + return BCME_ERROR; + } + + /* wl_nd_hostip_t fixed param */ + ndo_del_addr.version = htod16(WL_ND_HOSTIP_IOV_VER); + ndo_del_addr.op_type = htod16(WL_ND_HOSTIP_OP_DEL); + ndo_del_addr.length = htod32(WL_ND_HOSTIP_WITH_ADDR_LEN); + /* wl_nd_host_ip_addr_t param for del */ + memcpy(&ndo_del_addr.u.host_ip.ip_addr, ipv6addr, IPV6_ADDR_LEN); + ndo_del_addr.u.host_ip.type = 0; /* don't care */ + + iov_len = bcm_mkiovar("nd_hostip", (char *)&ndo_del_addr, + WL_ND_HOSTIP_WITH_ADDR_LEN, iovbuf, sizeof(iovbuf)); + + if (!iov_len) { + DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n", + __FUNCTION__, sizeof(iovbuf))); + return BCME_ERROR; + } + + retcode = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx); + if (retcode) { + DHD_ERROR(("%s: failed, retcode = %d\n", __FUNCTION__, retcode)); + } else { + DHD_TRACE(("%s: successfully removed: %d\n", __FUNCTION__, retcode)); + } + + return retcode; +} + +int +dhd_ndo_remove_ip_by_type(dhd_pub_t *dhdp, uint8 type, int idx) +{ + char iovbuf[DHD_IOVAR_BUF_SIZE]; + wl_nd_hostip_t ndo_del_addr; + int iov_len; + int retcode; + + if (dhdp == NULL) { + return BCME_ERROR; + } + + /* wl_nd_hostip_t fixed param */ + ndo_del_addr.version = htod16(WL_ND_HOSTIP_IOV_VER); + if (type == WL_ND_IPV6_ADDR_TYPE_UNICAST) { + ndo_del_addr.op_type = htod16(WL_ND_HOSTIP_OP_DEL_UC); + } else if (type == WL_ND_IPV6_ADDR_TYPE_ANYCAST) { + ndo_del_addr.op_type = htod16(WL_ND_HOSTIP_OP_DEL_AC); + } else { + return BCME_BADARG; + } + ndo_del_addr.length = htod32(WL_ND_HOSTIP_FIXED_LEN); + + iov_len = bcm_mkiovar("nd_hostip", (char *)&ndo_del_addr, WL_ND_HOSTIP_FIXED_LEN, + iovbuf, sizeof(iovbuf)); + + if (!iov_len) { + DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n", + __FUNCTION__, sizeof(iovbuf))); + return BCME_ERROR; + } + + retcode = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx); + if (retcode) { + DHD_ERROR(("%s: failed, retcode = %d\n", __FUNCTION__, retcode)); + } else { + DHD_TRACE(("%s: successfully removed: %d\n", __FUNCTION__, retcode)); + } + + return retcode; +} + +int +dhd_ndo_unsolicited_na_filter_enable(dhd_pub_t *dhdp, int enable) +{ + char iovbuf[DHD_IOVAR_BUF_SIZE]; + int iov_len; + int retcode; + + if (dhdp == NULL) { + return BCME_ERROR; + } + + iov_len = bcm_mkiovar("nd_unsolicited_na_filter", (char *)&enable, sizeof(int), + iovbuf, sizeof(iovbuf)); + + if (!iov_len) { + DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n", + __FUNCTION__, sizeof(iovbuf))); + return BCME_ERROR; + } + + retcode = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, iov_len, TRUE, 0); + if (retcode) + DHD_ERROR(("%s: failed to enable Unsolicited NA filter to %d, retcode = %d\n", + __FUNCTION__, enable, retcode)); + else { + DHD_TRACE(("%s: successfully enabled Unsolicited NA filter to %d\n", + __FUNCTION__, enable)); + } + + return retcode; +} +#ifdef SIMPLE_ISCAN + +uint iscan_thread_id = 0; +iscan_buf_t * iscan_chain = 0; + +iscan_buf_t * +dhd_iscan_allocate_buf(dhd_pub_t *dhd, iscan_buf_t **iscanbuf) +{ + iscan_buf_t *iscanbuf_alloc = 0; + iscan_buf_t *iscanbuf_head; + + DHD_ISCAN(("%s: Entered\n", __FUNCTION__)); + dhd_iscan_lock(); + + iscanbuf_alloc = (iscan_buf_t*)MALLOC(dhd->osh, sizeof(iscan_buf_t)); + if (iscanbuf_alloc == NULL) + goto fail; + + iscanbuf_alloc->next = NULL; + iscanbuf_head = *iscanbuf; + + DHD_ISCAN(("%s: addr of allocated node = 0x%X" + "addr of iscanbuf_head = 0x%X dhd = 0x%X\n", + __FUNCTION__, iscanbuf_alloc, iscanbuf_head, dhd)); + + if (iscanbuf_head == NULL) { + *iscanbuf = iscanbuf_alloc; + DHD_ISCAN(("%s: Head is allocated\n", __FUNCTION__)); + goto fail; + } + + while (iscanbuf_head->next) + iscanbuf_head = iscanbuf_head->next; + + iscanbuf_head->next = iscanbuf_alloc; + +fail: + dhd_iscan_unlock(); + return iscanbuf_alloc; +} + +void +dhd_iscan_free_buf(void *dhdp, iscan_buf_t *iscan_delete) +{ + iscan_buf_t *iscanbuf_free = 0; + iscan_buf_t *iscanbuf_prv = 0; + iscan_buf_t *iscanbuf_cur; + dhd_pub_t *dhd = dhd_bus_pub(dhdp); + DHD_ISCAN(("%s: Entered\n", __FUNCTION__)); + + dhd_iscan_lock(); + + iscanbuf_cur = iscan_chain; + + /* If iscan_delete is null then delete the entire + * chain or else delete specific one provided + */ + if (!iscan_delete) { + while (iscanbuf_cur) { + iscanbuf_free = iscanbuf_cur; + iscanbuf_cur = iscanbuf_cur->next; + iscanbuf_free->next = 0; + MFREE(dhd->osh, iscanbuf_free, sizeof(iscan_buf_t)); + } + iscan_chain = 0; + } else { + while (iscanbuf_cur) { + if (iscanbuf_cur == iscan_delete) + break; + iscanbuf_prv = iscanbuf_cur; + iscanbuf_cur = iscanbuf_cur->next; + } + if (iscanbuf_prv) + iscanbuf_prv->next = iscan_delete->next; + + iscan_delete->next = 0; + MFREE(dhd->osh, iscan_delete, sizeof(iscan_buf_t)); + + if (!iscanbuf_prv) + iscan_chain = 0; + } + dhd_iscan_unlock(); +} + +iscan_buf_t * +dhd_iscan_result_buf(void) +{ + return iscan_chain; +} + +int +dhd_iscan_issue_request(void * dhdp, wl_iscan_params_t *pParams, uint32 size) +{ + int rc = -1; + dhd_pub_t *dhd = dhd_bus_pub(dhdp); + char *buf; + char iovar[] = "iscan"; + uint32 allocSize = 0; + wl_ioctl_t ioctl; + int len; + + if (pParams) { + allocSize = (size + strlen(iovar) + 1); + if ((allocSize < size) || (allocSize < strlen(iovar))) + { + DHD_ERROR(("%s: overflow - allocation size too large %d < %d + %d!\n", + __FUNCTION__, allocSize, size, strlen(iovar))); + goto cleanUp; + } + buf = MALLOC(dhd->osh, allocSize); + + if (buf == NULL) + { + DHD_ERROR(("%s: malloc of size %d failed!\n", __FUNCTION__, allocSize)); + goto cleanUp; + } + ioctl.cmd = WLC_SET_VAR; + len = bcm_mkiovar(iovar, (char *)pParams, size, buf, allocSize); + if (len == 0) { + rc = BCME_BUFTOOSHORT; + goto cleanUp; + } + rc = dhd_wl_ioctl(dhd, 0, &ioctl, buf, len); + } + +cleanUp: + if (buf) { + MFREE(dhd->osh, buf, allocSize); + } + + return rc; +} + +static int +dhd_iscan_get_partial_result(void *dhdp, uint *scan_count) +{ + wl_iscan_results_t *list_buf; + wl_iscan_results_t list; + wl_scan_results_t *results; + iscan_buf_t *iscan_cur; + int status = -1; + dhd_pub_t *dhd = dhd_bus_pub(dhdp); + int rc; + wl_ioctl_t ioctl; + int len; + + DHD_ISCAN(("%s: Enter\n", __FUNCTION__)); + + iscan_cur = dhd_iscan_allocate_buf(dhd, &iscan_chain); + if (!iscan_cur) { + DHD_ERROR(("%s: Failed to allocate node\n", __FUNCTION__)); + dhd_iscan_free_buf(dhdp, 0); + dhd_iscan_request(dhdp, WL_SCAN_ACTION_ABORT); + dhd_ind_scan_confirm(dhdp, FALSE); + goto fail; + } + + dhd_iscan_lock(); + + memset(iscan_cur->iscan_buf, 0, WLC_IW_ISCAN_MAXLEN); + list_buf = (wl_iscan_results_t*)iscan_cur->iscan_buf; + results = &list_buf->results; + results->buflen = WL_ISCAN_RESULTS_FIXED_SIZE; + results->version = 0; + results->count = 0; + + memset(&list, 0, sizeof(list)); + list.results.buflen = htod32(WLC_IW_ISCAN_MAXLEN); + len = bcm_mkiovar("iscanresults", (char *)&list, WL_ISCAN_RESULTS_FIXED_SIZE, + iscan_cur->iscan_buf, WLC_IW_ISCAN_MAXLEN); + if (len == 0) { + dhd_iscan_free_buf(dhdp, 0); + dhd_iscan_request(dhdp, WL_SCAN_ACTION_ABORT); + dhd_ind_scan_confirm(dhdp, FALSE); + status = BCME_BUFTOOSHORT; + goto fail; + } + ioctl.cmd = WLC_GET_VAR; + ioctl.set = FALSE; + rc = dhd_wl_ioctl(dhd, 0, &ioctl, iscan_cur->iscan_buf, WLC_IW_ISCAN_MAXLEN); + + results->buflen = dtoh32(results->buflen); + results->version = dtoh32(results->version); + *scan_count = results->count = dtoh32(results->count); + status = dtoh32(list_buf->status); + DHD_ISCAN(("%s: Got %d resuls status = (%x)\n", __FUNCTION__, results->count, status)); + + dhd_iscan_unlock(); + + if (!(*scan_count)) { + /* TODO: race condition when FLUSH already called */ + dhd_iscan_free_buf(dhdp, 0); + } +fail: + return status; +} + +#endif /* SIMPLE_ISCAN */ + +/* + * returns = TRUE if associated, FALSE if not associated + */ +bool dhd_is_associated(dhd_pub_t *dhd, uint8 ifidx, int *retval) +{ + char bssid[6], zbuf[6]; + int ret = -1; + + bzero(bssid, 6); + bzero(zbuf, 6); + + ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_BSSID, (char *)&bssid, + ETHER_ADDR_LEN, FALSE, ifidx); + DHD_TRACE((" %s WLC_GET_BSSID ioctl res = %d\n", __FUNCTION__, ret)); + + if (ret == BCME_NOTASSOCIATED) { + DHD_TRACE(("%s: not associated! res:%d\n", __FUNCTION__, ret)); + } + + if (retval) + *retval = ret; + + if (ret < 0) + return FALSE; + + if ((memcmp(bssid, zbuf, ETHER_ADDR_LEN) == 0)) { + DHD_TRACE(("%s: WLC_GET_BSSID ioctl returned zero bssid\n", __FUNCTION__)); + return FALSE; + } + return TRUE; +} + +/* Function to estimate possible DTIM_SKIP value */ +#if defined(BCMPCIE) +int +dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd, int *dtim_period, int *bcn_interval) +{ + int bcn_li_dtim = 1; /* deafult no dtim skip setting */ + int ret = -1; + int allowed_skip_dtim_cnt = 0; + + /* Check if associated */ + if (dhd_is_associated(dhd, 0, NULL) == FALSE) { + DHD_TRACE(("%s NOT assoc ret %d\n", __FUNCTION__, ret)); + return bcn_li_dtim; + } + + if (dtim_period == NULL || bcn_interval == NULL) + return bcn_li_dtim; + + /* read associated AP beacon interval */ + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_BCNPRD, + bcn_interval, sizeof(*bcn_interval), FALSE, 0)) < 0) { + DHD_ERROR(("%s get beacon failed code %d\n", __FUNCTION__, ret)); + return bcn_li_dtim; + } + + /* read associated AP dtim setup */ + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_DTIMPRD, + dtim_period, sizeof(*dtim_period), FALSE, 0)) < 0) { + DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret)); + return bcn_li_dtim; + } + + /* if not assocated just return */ + if (*dtim_period == 0) { + return bcn_li_dtim; + } + + if (dhd->max_dtim_enable) { + bcn_li_dtim = + (int) (MAX_DTIM_ALLOWED_INTERVAL / ((*dtim_period) * (*bcn_interval))); + if (bcn_li_dtim == 0) { + bcn_li_dtim = 1; + } + } else { + /* attemp to use platform defined dtim skip interval */ + bcn_li_dtim = dhd->suspend_bcn_li_dtim; + + /* check if sta listen interval fits into AP dtim */ + if (*dtim_period > CUSTOM_LISTEN_INTERVAL) { + /* AP DTIM to big for our Listen Interval : no dtim skiping */ + bcn_li_dtim = NO_DTIM_SKIP; + DHD_ERROR(("%s DTIM=%d > Listen=%d : too big ...\n", + __FUNCTION__, *dtim_period, CUSTOM_LISTEN_INTERVAL)); + return bcn_li_dtim; + } + + if (((*dtim_period) * (*bcn_interval) * bcn_li_dtim) > MAX_DTIM_ALLOWED_INTERVAL) { + allowed_skip_dtim_cnt = + MAX_DTIM_ALLOWED_INTERVAL / ((*dtim_period) * (*bcn_interval)); + bcn_li_dtim = + (allowed_skip_dtim_cnt != 0) ? allowed_skip_dtim_cnt : NO_DTIM_SKIP; + } + + if ((bcn_li_dtim * (*dtim_period)) > CUSTOM_LISTEN_INTERVAL) { + /* Round up dtim_skip to fit into STAs Listen Interval */ + bcn_li_dtim = (int)(CUSTOM_LISTEN_INTERVAL / *dtim_period); + DHD_TRACE(("%s agjust dtim_skip as %d\n", __FUNCTION__, bcn_li_dtim)); + } + } + + DHD_ERROR(("%s beacon=%d bcn_li_dtim=%d DTIM=%d Listen=%d\n", + __FUNCTION__, *bcn_interval, bcn_li_dtim, *dtim_period, CUSTOM_LISTEN_INTERVAL)); + + return bcn_li_dtim; +} +#else /* OEM_ANDROID && BCMPCIE */ +int +dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd) +{ + int bcn_li_dtim = 1; /* deafult no dtim skip setting */ + int ret = -1; + int dtim_period = 0; + int ap_beacon = 0; + int allowed_skip_dtim_cnt = 0; + + /* Check if associated */ + if (dhd_is_associated(dhd, 0, NULL) == FALSE) { + DHD_TRACE(("%s NOT assoc ret %d\n", __FUNCTION__, ret)); + goto exit; + } + + /* read associated AP beacon interval */ + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_BCNPRD, + &ap_beacon, sizeof(ap_beacon), FALSE, 0)) < 0) { + DHD_ERROR(("%s get beacon failed code %d\n", __FUNCTION__, ret)); + goto exit; + } + + /* read associated ap's dtim setup */ + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_DTIMPRD, + &dtim_period, sizeof(dtim_period), FALSE, 0)) < 0) { + DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret)); + goto exit; + } + + /* if not assocated just exit */ + if (dtim_period == 0) { + goto exit; + } + + if (dhd->max_dtim_enable) { + bcn_li_dtim = + (int) (MAX_DTIM_ALLOWED_INTERVAL / (ap_beacon * dtim_period)); + if (bcn_li_dtim == 0) { + bcn_li_dtim = 1; + } + bcn_li_dtim = MAX(dhd->suspend_bcn_li_dtim, bcn_li_dtim); + } else { + /* attemp to use platform defined dtim skip interval */ + bcn_li_dtim = dhd->suspend_bcn_li_dtim; + + /* check if sta listen interval fits into AP dtim */ + if (dtim_period > CUSTOM_LISTEN_INTERVAL) { + /* AP DTIM to big for our Listen Interval : no dtim skiping */ + bcn_li_dtim = NO_DTIM_SKIP; + DHD_ERROR(("%s DTIM=%d > Listen=%d : too big ...\n", + __FUNCTION__, dtim_period, CUSTOM_LISTEN_INTERVAL)); + goto exit; + } + + if ((dtim_period * ap_beacon * bcn_li_dtim) > MAX_DTIM_ALLOWED_INTERVAL) { + allowed_skip_dtim_cnt = + MAX_DTIM_ALLOWED_INTERVAL / (dtim_period * ap_beacon); + bcn_li_dtim = + (allowed_skip_dtim_cnt != 0) ? allowed_skip_dtim_cnt : NO_DTIM_SKIP; + } + + if ((bcn_li_dtim * dtim_period) > CUSTOM_LISTEN_INTERVAL) { + /* Round up dtim_skip to fit into STAs Listen Interval */ + bcn_li_dtim = (int)(CUSTOM_LISTEN_INTERVAL / dtim_period); + DHD_TRACE(("%s agjust dtim_skip as %d\n", __FUNCTION__, bcn_li_dtim)); + } + } + + if (dhd->conf->suspend_bcn_li_dtim >= 0) + bcn_li_dtim = dhd->conf->suspend_bcn_li_dtim; + DHD_ERROR(("%s beacon=%d bcn_li_dtim=%d DTIM=%d Listen=%d\n", + __FUNCTION__, ap_beacon, bcn_li_dtim, dtim_period, CUSTOM_LISTEN_INTERVAL)); + +exit: + return bcn_li_dtim; +} +#endif /* OEM_ANDROID && BCMPCIE */ + +/* Check if the mode supports STA MODE */ +bool dhd_support_sta_mode(dhd_pub_t *dhd) +{ + +#ifdef WL_CFG80211 + if (!(dhd->op_mode & DHD_FLAG_STA_MODE)) + return FALSE; + else +#endif /* WL_CFG80211 */ + return TRUE; +} + +#if defined(KEEP_ALIVE) +int dhd_keep_alive_onoff(dhd_pub_t *dhd) +{ + char buf[32] = {0}; + const char *str; + wl_mkeep_alive_pkt_t mkeep_alive_pkt = {0, 0, 0, 0, 0, {0}}; + wl_mkeep_alive_pkt_t *mkeep_alive_pktp; + int buf_len; + int str_len; + int res = -1; + + if (!dhd_support_sta_mode(dhd)) + return res; + + DHD_TRACE(("%s execution\n", __FUNCTION__)); + + str = "mkeep_alive"; + str_len = strlen(str); + strncpy(buf, str, sizeof(buf) - 1); + buf[ sizeof(buf) - 1 ] = '\0'; + mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) (buf + str_len + 1); + mkeep_alive_pkt.period_msec = dhd->conf->keep_alive_period; + buf_len = str_len + 1; + mkeep_alive_pkt.version = htod16(WL_MKEEP_ALIVE_VERSION); + mkeep_alive_pkt.length = htod16(WL_MKEEP_ALIVE_FIXED_LEN); + /* Setup keep alive zero for null packet generation */ + mkeep_alive_pkt.keep_alive_id = 0; + mkeep_alive_pkt.len_bytes = 0; + buf_len += WL_MKEEP_ALIVE_FIXED_LEN; + bzero(mkeep_alive_pkt.data, sizeof(mkeep_alive_pkt.data)); + /* Keep-alive attributes are set in local variable (mkeep_alive_pkt), and + * then memcpy'ed into buffer (mkeep_alive_pktp) since there is no + * guarantee that the buffer is properly aligned. + */ + memcpy((char *)mkeep_alive_pktp, &mkeep_alive_pkt, WL_MKEEP_ALIVE_FIXED_LEN); + + res = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, buf_len, TRUE, 0); + + return res; +} +#endif /* defined(KEEP_ALIVE) */ +#define CSCAN_TLV_TYPE_SSID_IE 'S' +/* + * SSIDs list parsing from cscan tlv list + */ +int +wl_parse_ssid_list_tlv(char** list_str, wlc_ssid_ext_t* ssid, int max, int *bytes_left) +{ + char* str; + int idx = 0; + uint8 len; + + if ((list_str == NULL) || (*list_str == NULL) || (*bytes_left < 0)) { + DHD_ERROR(("%s error paramters\n", __FUNCTION__)); + return BCME_BADARG; + } + str = *list_str; + while (*bytes_left > 0) { + if (str[0] != CSCAN_TLV_TYPE_SSID_IE) { + *list_str = str; + DHD_TRACE(("nssid=%d left_parse=%d %d\n", idx, *bytes_left, str[0])); + return idx; + } + + if (idx >= max) { + DHD_ERROR(("%s number of SSIDs more than %d\n", __FUNCTION__, idx)); + return BCME_BADARG; + } + + /* Get proper CSCAN_TLV_TYPE_SSID_IE */ + *bytes_left -= 1; + if (*bytes_left == 0) { + DHD_ERROR(("%s no length field.\n", __FUNCTION__)); + return BCME_BADARG; + } + str += 1; + ssid[idx].rssi_thresh = 0; + ssid[idx].flags = 0; + len = str[0]; + if (len == 0) { + /* Broadcast SSID */ + ssid[idx].SSID_len = 0; + memset((char*)ssid[idx].SSID, 0x0, DOT11_MAX_SSID_LEN); + *bytes_left -= 1; + str += 1; + + DHD_TRACE(("BROADCAST SCAN left=%d\n", *bytes_left)); + } else if (len <= DOT11_MAX_SSID_LEN) { + /* Get proper SSID size */ + ssid[idx].SSID_len = len; + *bytes_left -= 1; + /* Get SSID */ + if (ssid[idx].SSID_len > *bytes_left) { + DHD_ERROR(("%s out of memory range len=%d but left=%d\n", + __FUNCTION__, ssid[idx].SSID_len, *bytes_left)); + return BCME_BADARG; + } + str += 1; + memcpy((char*)ssid[idx].SSID, str, ssid[idx].SSID_len); + + *bytes_left -= ssid[idx].SSID_len; + str += ssid[idx].SSID_len; + ssid[idx].hidden = TRUE; + + DHD_TRACE(("%s :size=%d left=%d\n", + (char*)ssid[idx].SSID, ssid[idx].SSID_len, *bytes_left)); + } else { + DHD_ERROR(("### SSID size more than %d\n", str[0])); + return BCME_BADARG; + } + idx++; + } + + *list_str = str; + return idx; +} + +#if defined(WL_WIRELESS_EXT) +/* Android ComboSCAN support */ + +/* + * data parsing from ComboScan tlv list +*/ +int +wl_iw_parse_data_tlv(char** list_str, void *dst, int dst_size, const char token, + int input_size, int *bytes_left) +{ + char* str; + uint16 short_temp; + uint32 int_temp; + + if ((list_str == NULL) || (*list_str == NULL) ||(bytes_left == NULL) || (*bytes_left < 0)) { + DHD_ERROR(("%s error paramters\n", __FUNCTION__)); + return -1; + } + str = *list_str; + + /* Clean all dest bytes */ + memset(dst, 0, dst_size); + if (*bytes_left > 0) { + + if (str[0] != token) { + DHD_TRACE(("%s NOT Type=%d get=%d left_parse=%d \n", + __FUNCTION__, token, str[0], *bytes_left)); + return -1; + } + + *bytes_left -= 1; + str += 1; + + if (input_size == 1) { + memcpy(dst, str, input_size); + } + else if (input_size == 2) { + memcpy(dst, (char *)htod16(memcpy(&short_temp, str, input_size)), + input_size); + } + else if (input_size == 4) { + memcpy(dst, (char *)htod32(memcpy(&int_temp, str, input_size)), + input_size); + } + + *bytes_left -= input_size; + str += input_size; + *list_str = str; + return 1; + } + return 1; +} + +/* + * channel list parsing from cscan tlv list +*/ +int +wl_iw_parse_channel_list_tlv(char** list_str, uint16* channel_list, + int channel_num, int *bytes_left) +{ + char* str; + int idx = 0; + + if ((list_str == NULL) || (*list_str == NULL) ||(bytes_left == NULL) || (*bytes_left < 0)) { + DHD_ERROR(("%s error paramters\n", __FUNCTION__)); + return -1; + } + str = *list_str; + + while (*bytes_left > 0) { + + if (str[0] != CSCAN_TLV_TYPE_CHANNEL_IE) { + *list_str = str; + DHD_TRACE(("End channel=%d left_parse=%d %d\n", idx, *bytes_left, str[0])); + return idx; + } + /* Get proper CSCAN_TLV_TYPE_CHANNEL_IE */ + *bytes_left -= 1; + str += 1; + + if (str[0] == 0) { + /* All channels */ + channel_list[idx] = 0x0; + } + else { + channel_list[idx] = (uint16)str[0]; + DHD_TRACE(("%s channel=%d \n", __FUNCTION__, channel_list[idx])); + } + *bytes_left -= 1; + str += 1; + + if (idx++ > 255) { + DHD_ERROR(("%s Too many channels \n", __FUNCTION__)); + return -1; + } + } + + *list_str = str; + return idx; +} + +/* Parse a comma-separated list from list_str into ssid array, starting + * at index idx. Max specifies size of the ssid array. Parses ssids + * and returns updated idx; if idx >= max not all fit, the excess have + * not been copied. Returns -1 on empty string, or on ssid too long. + */ +int +wl_iw_parse_ssid_list(char** list_str, wlc_ssid_t* ssid, int idx, int max) +{ + char* str, *ptr; + + if ((list_str == NULL) || (*list_str == NULL)) + return -1; + + for (str = *list_str; str != NULL; str = ptr) { + + /* check for next TAG */ + if (!strncmp(str, GET_CHANNEL, strlen(GET_CHANNEL))) { + *list_str = str + strlen(GET_CHANNEL); + return idx; + } + + if ((ptr = strchr(str, ',')) != NULL) { + *ptr++ = '\0'; + } + + if (strlen(str) > DOT11_MAX_SSID_LEN) { + DHD_ERROR(("ssid <%s> exceeds %d\n", str, DOT11_MAX_SSID_LEN)); + return -1; + } + + if (strlen(str) == 0) + ssid[idx].SSID_len = 0; + + if (idx < max) { + bzero(ssid[idx].SSID, sizeof(ssid[idx].SSID)); + strncpy((char*)ssid[idx].SSID, str, sizeof(ssid[idx].SSID) - 1); + ssid[idx].SSID_len = strlen(str); + } + idx++; + } + return idx; +} + +/* + * Parse channel list from iwpriv CSCAN + */ +int +wl_iw_parse_channel_list(char** list_str, uint16* channel_list, int channel_num) +{ + int num; + int val; + char* str; + char* endptr = NULL; + + if ((list_str == NULL)||(*list_str == NULL)) + return -1; + + str = *list_str; + num = 0; + while (strncmp(str, GET_NPROBE, strlen(GET_NPROBE))) { + val = (int)strtoul(str, &endptr, 0); + if (endptr == str) { + printf("could not parse channel number starting at" + " substring \"%s\" in list:\n%s\n", + str, *list_str); + return -1; + } + str = endptr + strspn(endptr, " ,"); + + if (num == channel_num) { + DHD_ERROR(("too many channels (more than %d) in channel list:\n%s\n", + channel_num, *list_str)); + return -1; + } + + channel_list[num++] = (uint16)val; + } + *list_str = str; + return num; +} +#endif + +/* Given filename and download type, returns a buffer pointer and length +* for download to f/w. Type can be FW or NVRAM. +* +*/ +int dhd_get_download_buffer(dhd_pub_t *dhd, char *file_path, download_type_t component, + char ** buffer, int *length) + +{ + int ret = BCME_ERROR; + int len = 0; + int file_len; + void *image = NULL; + uint8 *buf = NULL; + + /* Point to cache if available. */ + /* No Valid cache found on this call */ + if (!len) { + file_len = *length; + *length = 0; + + if (file_path) { + image = dhd_os_open_image1(dhd, file_path); + if (image == NULL) { + printf("%s: Open image file failed %s\n", __FUNCTION__, file_path); + goto err; + } + } + + buf = MALLOCZ(dhd->osh, file_len); + if (buf == NULL) { + DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", + __FUNCTION__, file_len)); + goto err; + } + + /* Download image */ + len = dhd_os_get_image_block((char *)buf, file_len, image); + if ((len <= 0 || len > file_len)) { + MFREE(dhd->osh, buf, file_len); + goto err; + } + } + + ret = BCME_OK; + *length = len; + *buffer = (char *)buf; + + /* Cache if first call. */ + +err: + if (image) + dhd_os_close_image1(dhd, image); + + return ret; +} + +int +dhd_download_2_dongle(dhd_pub_t *dhd, char *iovar, uint16 flag, uint16 dload_type, + unsigned char *dload_buf, int len) +{ + struct wl_dload_data *dload_ptr = (struct wl_dload_data *)dload_buf; + int err = 0; + int dload_data_offset; + static char iovar_buf[WLC_IOCTL_MEDLEN]; + int iovar_len; + + memset(iovar_buf, 0, sizeof(iovar_buf)); + + dload_data_offset = OFFSETOF(wl_dload_data_t, data); + dload_ptr->flag = (DLOAD_HANDLER_VER << DLOAD_FLAG_VER_SHIFT) | flag; + dload_ptr->dload_type = dload_type; + dload_ptr->len = htod32(len - dload_data_offset); + dload_ptr->crc = 0; + len = ROUNDUP(len, 8); + + iovar_len = bcm_mkiovar(iovar, (char *)dload_buf, + (uint)len, iovar_buf, sizeof(iovar_buf)); + if (iovar_len == 0) { + DHD_ERROR(("%s: insufficient buffer space passed to bcm_mkiovar for '%s' \n", + __FUNCTION__, iovar)); + return BCME_BUFTOOSHORT; + } + + err = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovar_buf, + iovar_len, IOV_SET, 0); + + return err; +} + +int +dhd_download_blob(dhd_pub_t *dhd, unsigned char *buf, + uint32 len, char *iovar) + +{ + int chunk_len; + int size2alloc; + unsigned char *new_buf; + int err = 0, data_offset; + uint16 dl_flag = DL_BEGIN; + + data_offset = OFFSETOF(wl_dload_data_t, data); + size2alloc = data_offset + MAX_CHUNK_LEN; + size2alloc = ROUNDUP(size2alloc, 8); + + if ((new_buf = (unsigned char *)MALLOCZ(dhd->osh, size2alloc)) != NULL) { + do { + chunk_len = dhd_os_get_image_block((char *)(new_buf + data_offset), + MAX_CHUNK_LEN, buf); + if (chunk_len < 0) { + DHD_ERROR(("%s: dhd_os_get_image_block failed (%d)\n", + __FUNCTION__, chunk_len)); + err = BCME_ERROR; + goto exit; + } + if (len - chunk_len == 0) + dl_flag |= DL_END; + + err = dhd_download_2_dongle(dhd, iovar, dl_flag, DL_TYPE_CLM, + new_buf, data_offset + chunk_len); + + dl_flag &= ~DL_BEGIN; + + len = len - chunk_len; + } while ((len > 0) && (err == 0)); + } else { + err = BCME_NOMEM; + } +exit: + if (new_buf) { + MFREE(dhd->osh, new_buf, size2alloc); + } + return err; +} + +int +dhd_apply_default_txcap(dhd_pub_t *dhd, char *path) +{ + return 0; +} + +int +dhd_check_current_clm_data(dhd_pub_t *dhd) +{ + char iovbuf[WLC_IOCTL_SMLEN]; + wl_country_t *cspec; + int err = BCME_OK; + + memset(iovbuf, 0, sizeof(iovbuf)); + err = bcm_mkiovar("country", NULL, 0, iovbuf, sizeof(iovbuf)); + if (err == 0) { + err = BCME_BUFTOOSHORT; + DHD_ERROR(("%s: bcm_mkiovar failed.", __FUNCTION__)); + return err; + } + err = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0); + if (err) { + DHD_ERROR(("%s: country code get failed\n", __FUNCTION__)); + return err; + } + cspec = (wl_country_t *)iovbuf; + if ((strncmp(cspec->ccode, WL_CCODE_NULL_COUNTRY, WLC_CNTRY_BUF_SZ)) == 0) { + DHD_ERROR(("%s: ----- This FW is not included CLM data -----\n", + __FUNCTION__)); + return FALSE; + } + DHD_ERROR(("%s: ----- This FW is included CLM data -----\n", + __FUNCTION__)); + return TRUE; +} + +int +dhd_apply_default_clm(dhd_pub_t *dhd, char *clm_path) +{ + char *clm_blob_path; + int len; + char *memblock = NULL; + int err = BCME_OK; + char iovbuf[WLC_IOCTL_SMLEN]; + int status = FALSE; + + if (clm_path && clm_path[0] != '\0') { + if (strlen(clm_path) > MOD_PARAM_PATHLEN) { + DHD_ERROR(("clm path exceeds max len\n")); + return BCME_ERROR; + } + clm_blob_path = clm_path; + DHD_TRACE(("clm path from module param:%s\n", clm_path)); + } else { + clm_blob_path = VENDOR_PATH CONFIG_BCMDHD_CLM_PATH; + } + + /* If CLM blob file is found on the filesystem, download the file. + * After CLM file download or If the blob file is not present, + * validate the country code before proceeding with the initialization. + * If country code is not valid, fail the initialization. + */ + memblock = dhd_os_open_image1(dhd, (char *)clm_blob_path); + if (memblock == NULL) { + printf("%s: Ignore clm file %s\n", __FUNCTION__, clm_path); +#if defined(DHD_BLOB_EXISTENCE_CHECK) + if (dhd->is_blob) { + err = BCME_ERROR; + } else { + status = dhd_check_current_clm_data(dhd); + if (status == TRUE) { + err = BCME_OK; + } else { + err = status; + } + } +#endif /* DHD_BLOB_EXISTENCE_CHECK */ + goto exit; + } + + len = dhd_os_get_image_size(memblock); + + if ((len > 0) && (len < MAX_CLM_BUF_SIZE) && memblock) { + status = dhd_check_current_clm_data(dhd); + if (status == TRUE) { +#if defined(DHD_BLOB_EXISTENCE_CHECK) + if (dhd->op_mode != DHD_FLAG_MFG_MODE) { + if (dhd->is_blob) { + err = BCME_ERROR; + } + goto exit; + } +#else + DHD_ERROR(("%s: CLM already exist in F/W, " + "new CLM data will be added to the end of existing CLM data!\n", + __FUNCTION__)); +#endif /* DHD_BLOB_EXISTENCE_CHECK */ + } else if (status != FALSE) { + err = status; + goto exit; + } + + /* Found blob file. Download the file */ + DHD_TRACE(("clm file download from %s \n", clm_blob_path)); + err = dhd_download_blob(dhd, (unsigned char*)memblock, len, "clmload"); + if (err) { + DHD_ERROR(("%s: CLM download failed err=%d\n", __FUNCTION__, err)); + /* Retrieve clmload_status and print */ + memset(iovbuf, 0, sizeof(iovbuf)); + len = bcm_mkiovar("clmload_status", NULL, 0, iovbuf, sizeof(iovbuf)); + if (len == 0) { + err = BCME_BUFTOOSHORT; + goto exit; + } + err = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0); + if (err) { + DHD_ERROR(("%s: clmload_status get failed err=%d \n", + __FUNCTION__, err)); + } else { + DHD_ERROR(("%s: clmload_status: %d \n", + __FUNCTION__, *((int *)iovbuf))); + if (*((int *)iovbuf) == CHIPID_MISMATCH) { + DHD_ERROR(("Chip ID mismatch error \n")); + } + } + err = BCME_ERROR; + goto exit; + } else { + DHD_INFO(("%s: CLM download succeeded \n", __FUNCTION__)); + } + } else { + DHD_INFO(("Skipping the clm download. len:%d memblk:%p \n", len, memblock)); + } + + /* Verify country code */ + status = dhd_check_current_clm_data(dhd); + + if (status != TRUE) { + /* Country code not initialized or CLM download not proper */ + DHD_ERROR(("country code not initialized\n")); + err = status; + } +exit: + + if (memblock) { + dhd_os_close_image1(dhd, memblock); + } + + return err; +} + +void dhd_free_download_buffer(dhd_pub_t *dhd, void *buffer, int length) +{ + MFREE(dhd->osh, buffer, length); +} + +#if defined(DHD_8021X_DUMP) +#define EAP_PRINT(str) \ + DHD_ERROR(("ETHER_TYPE_802_1X[%s] [%s]: " str "\n", \ + ifname, direction ? "TX" : "RX")); +#else +#define EAP_PRINT(str) +#endif /* DHD_8021X_DUMP */ +/* Parse EAPOL 4 way handshake messages */ +void +dhd_dump_eapol_4way_message(dhd_pub_t *dhd, char *ifname, + char *dump_data, bool direction) +{ + unsigned char type; + int pair, ack, mic, kerr, req, sec, install; + unsigned short us_tmp; + + type = dump_data[15]; + if (type == 0) { + if ((dump_data[22] == 1) && (dump_data[18] == 1)) { + dhd->conf->eapol_status = EAPOL_STATUS_WPS_REQID; + EAP_PRINT("EAP Packet, Request, Identity"); + } else if ((dump_data[22] == 1) && (dump_data[18] == 2)) { + dhd->conf->eapol_status = EAPOL_STATUS_WPS_RSPID; + EAP_PRINT("EAP Packet, Response, Identity"); + } else if (dump_data[22] == 254) { + if (dump_data[30] == 1) { + dhd->conf->eapol_status = EAPOL_STATUS_WPS_WSC_START; + EAP_PRINT("EAP Packet, WSC Start"); + } else if (dump_data[30] == 4) { + if (dump_data[41] == 4) { + dhd->conf->eapol_status = EAPOL_STATUS_WPS_M1; + EAP_PRINT("EAP Packet, WPS M1"); + } else if (dump_data[41] == 5) { + dhd->conf->eapol_status = EAPOL_STATUS_WPS_M2; + EAP_PRINT("EAP Packet, WPS M2"); + } else if (dump_data[41] == 7) { + dhd->conf->eapol_status = EAPOL_STATUS_WPS_M3; + EAP_PRINT("EAP Packet, WPS M3"); + } else if (dump_data[41] == 8) { + dhd->conf->eapol_status = EAPOL_STATUS_WPS_M4; + EAP_PRINT("EAP Packet, WPS M4"); + } else if (dump_data[41] == 9) { + dhd->conf->eapol_status = EAPOL_STATUS_WPS_M5; + EAP_PRINT("EAP Packet, WPS M5"); + } else if (dump_data[41] == 10) { + dhd->conf->eapol_status = EAPOL_STATUS_WPS_M6; + EAP_PRINT("EAP Packet, WPS M6"); + } else if (dump_data[41] == 11) { + dhd->conf->eapol_status = EAPOL_STATUS_WPS_M7; + EAP_PRINT("EAP Packet, WPS M7"); + } else if (dump_data[41] == 12) { + dhd->conf->eapol_status = EAPOL_STATUS_WPS_M8; + EAP_PRINT("EAP Packet, WPS M8"); + } + } else if (dump_data[30] == 5) { + dhd->conf->eapol_status = EAPOL_STATUS_WPS_DONE; + EAP_PRINT("EAP Packet, WSC Done"); + } + } else { + DHD_ERROR(("ETHER_TYPE_802_1X[%s] [%s]: ver %d, type %d, replay %d\n", + ifname, direction ? "TX" : "RX", + dump_data[14], dump_data[15], dump_data[30])); + } + } else if (type == 3 && dump_data[18] == 2) { + us_tmp = (dump_data[19] << 8) | dump_data[20]; + pair = 0 != (us_tmp & 0x08); + ack = 0 != (us_tmp & 0x80); + mic = 0 != (us_tmp & 0x100); + kerr = 0 != (us_tmp & 0x400); + req = 0 != (us_tmp & 0x800); + sec = 0 != (us_tmp & 0x200); + install = 0 != (us_tmp & 0x40); + + if (!sec && !mic && ack && !install && pair && !kerr && !req) { + dhd->conf->eapol_status = EAPOL_STATUS_WPA_M1; + EAP_PRINT("EAPOL Packet, 4-way handshake, M1"); + } else if (pair && !install && !ack && mic && !sec && !kerr && !req) { + dhd->conf->eapol_status = EAPOL_STATUS_WPA_M2; + EAP_PRINT("EAPOL Packet, 4-way handshake, M2"); + } else if (pair && ack && mic && sec && !kerr && !req) { + dhd->conf->eapol_status = EAPOL_STATUS_WPA_M3; + EAP_PRINT("EAPOL Packet, 4-way handshake, M3"); + } else if (pair && !install && !ack && mic && sec && !req && !kerr) { + dhd->conf->eapol_status = EAPOL_STATUS_WPA_M4; + EAP_PRINT("EAPOL Packet, 4-way handshake, M4"); + } else { + DHD_ERROR(("ETHER_TYPE_802_1X[%s] [%s]: ver %d, type %d, replay %d\n", + ifname, direction ? "TX" : "RX", + dump_data[14], dump_data[15], dump_data[30])); + } + } else { + DHD_ERROR(("ETHER_TYPE_802_1X[%s] [%s]: ver %d, type %d, replay %d\n", + ifname, direction ? "TX" : "RX", + dump_data[14], dump_data[15], dump_data[30])); + } +} + +#ifdef SHOW_LOGTRACE +int +dhd_parse_logstrs_file(osl_t *osh, char *raw_fmts, int logstrs_size, + dhd_event_log_t *event_log) +{ + uint32 *lognums = NULL; + char *logstrs = NULL; + logstr_trailer_t *trailer = NULL; + int ram_index = 0; + char **fmts = NULL; + int num_fmts = 0; + bool match_fail = TRUE; + int32 i = 0; + uint8 *pfw_id = NULL; + uint32 fwid = 0; + void *file = NULL; + int file_len = 0; + char fwid_str[FWID_STR_LEN]; + uint32 hdr_logstrs_size = 0; + + /* Read last three words in the logstrs.bin file */ + trailer = (logstr_trailer_t *) (raw_fmts + logstrs_size - + sizeof(logstr_trailer_t)); + + if (trailer->log_magic == LOGSTRS_MAGIC) { + /* + * logstrs.bin has a header. + */ + if (trailer->version == 1) { + logstr_header_v1_t *hdr_v1 = (logstr_header_v1_t *) (raw_fmts + + logstrs_size - sizeof(logstr_header_v1_t)); + DHD_INFO(("%s: logstr header version = %u\n", + __FUNCTION__, hdr_v1->version)); + num_fmts = hdr_v1->rom_logstrs_offset / sizeof(uint32); + ram_index = (hdr_v1->ram_lognums_offset - + hdr_v1->rom_lognums_offset) / sizeof(uint32); + lognums = (uint32 *) &raw_fmts[hdr_v1->rom_lognums_offset]; + logstrs = (char *) &raw_fmts[hdr_v1->rom_logstrs_offset]; + hdr_logstrs_size = hdr_v1->logstrs_size; + } else if (trailer->version == 2) { + logstr_header_t *hdr = (logstr_header_t *) (raw_fmts + logstrs_size - + sizeof(logstr_header_t)); + DHD_INFO(("%s: logstr header version = %u; flags = %x\n", + __FUNCTION__, hdr->trailer.version, hdr->trailer.flags)); + + /* For ver. 2 of the header, need to match fwid of + * both logstrs.bin and fw bin + */ + + /* read the FWID from fw bin */ + file = dhd_os_open_image1(NULL, st_str_file_path); + if (!file) { + DHD_ERROR(("%s: cannot open fw file !\n", __FUNCTION__)); + goto error; + } + file_len = dhd_os_get_image_size(file); + if (file_len <= 0) { + DHD_ERROR(("%s: bad fw file length !\n", __FUNCTION__)); + goto error; + } + /* fwid is at the end of fw bin in string format */ + if (dhd_os_seek_file(file, file_len - sizeof(fwid_str) - 1) < 0) { + DHD_ERROR(("%s: can't seek file \n", __FUNCTION__)); + goto error; + } + + memset(fwid_str, 0, sizeof(fwid_str)); + if (dhd_os_get_image_block(fwid_str, sizeof(fwid_str) - 1, file) <= 0) { + DHD_ERROR(("%s: read fw file failed !\n", __FUNCTION__)); + goto error; + } + pfw_id = (uint8 *)bcmstrnstr(fwid_str, sizeof(fwid_str) - 1, + FWID_STR_1, strlen(FWID_STR_1)); + if (!pfw_id) { + pfw_id = (uint8 *)bcmstrnstr(fwid_str, sizeof(fwid_str) - 1, + FWID_STR_2, strlen(FWID_STR_2)); + if (!pfw_id) { + DHD_ERROR(("%s: could not find id in FW bin!\n", + __FUNCTION__)); + goto error; + } + } + /* search for the '-' in the fw id str, after which the + * actual 4 byte fw id is present + */ + while (pfw_id && *pfw_id != '-') { + ++pfw_id; + } + ++pfw_id; + fwid = bcm_strtoul((char *)pfw_id, NULL, 16); + + /* check if fw id in logstrs.bin matches the fw one */ + if (hdr->trailer.fw_id != fwid) { + DHD_ERROR(("%s: logstr id does not match FW!\n", __FUNCTION__)); + goto error; + } + + match_fail = FALSE; + num_fmts = hdr->rom_logstrs_offset / sizeof(uint32); + ram_index = (hdr->ram_lognums_offset - + hdr->rom_lognums_offset) / sizeof(uint32); + lognums = (uint32 *) &raw_fmts[hdr->rom_lognums_offset]; + logstrs = (char *) &raw_fmts[hdr->rom_logstrs_offset]; + hdr_logstrs_size = hdr->logstrs_size; + +error: + if (file) { + dhd_os_close_image1(NULL, file); + } + if (match_fail) { + return BCME_DECERR; + } + } else { + DHD_ERROR(("%s: Invalid logstr version %u\n", __FUNCTION__, + trailer->version)); + return BCME_ERROR; + } + if (logstrs_size != hdr_logstrs_size) { + DHD_ERROR(("%s: bad logstrs_size %d\n", __FUNCTION__, hdr_logstrs_size)); + return BCME_ERROR; + } + } else { + /* + * Legacy logstrs.bin format without header. + */ + num_fmts = *((uint32 *) (raw_fmts)) / sizeof(uint32); + + /* Legacy RAM-only logstrs.bin format: + * - RAM 'lognums' section + * - RAM 'logstrs' section. + * + * 'lognums' is an array of indexes for the strings in the + * 'logstrs' section. The first uint32 is an index to the + * start of 'logstrs'. Therefore, if this index is divided + * by 'sizeof(uint32)' it provides the number of logstr + * entries. + */ + ram_index = 0; + lognums = (uint32 *) raw_fmts; + logstrs = (char *) &raw_fmts[num_fmts << 2]; + } + if (num_fmts) + fmts = MALLOC(osh, num_fmts * sizeof(char *)); + if (fmts == NULL) { + DHD_ERROR(("%s: Failed to allocate fmts memory\n", __FUNCTION__)); + return BCME_ERROR; + } + event_log->fmts_size = num_fmts * sizeof(char *); + + for (i = 0; i < num_fmts; i++) { + /* ROM lognums index into logstrs using 'rom_logstrs_offset' as a base + * (they are 0-indexed relative to 'rom_logstrs_offset'). + * + * RAM lognums are already indexed to point to the correct RAM logstrs (they + * are 0-indexed relative to the start of the logstrs.bin file). + */ + if (i == ram_index) { + logstrs = raw_fmts; + } + fmts[i] = &logstrs[lognums[i]]; + } + event_log->fmts = fmts; + event_log->raw_fmts_size = logstrs_size; + event_log->raw_fmts = raw_fmts; + event_log->num_fmts = num_fmts; + return BCME_OK; +} /* dhd_parse_logstrs_file */ + +int dhd_parse_map_file(osl_t *osh, void *file, uint32 *ramstart, uint32 *rodata_start, + uint32 *rodata_end) +{ + char *raw_fmts = NULL, *raw_fmts_loc = NULL; + uint32 read_size = READ_NUM_BYTES; + int error = 0; + char * cptr = NULL; + char c; + uint8 count = 0; + + *ramstart = 0; + *rodata_start = 0; + *rodata_end = 0; + + /* Allocate 1 byte more than read_size to terminate it with NULL */ + raw_fmts = MALLOCZ(osh, read_size + 1); + if (raw_fmts == NULL) { + DHD_ERROR(("%s: Failed to allocate raw_fmts memory \n", __FUNCTION__)); + goto fail; + } + + /* read ram start, rodata_start and rodata_end values from map file */ + while (count != ALL_MAP_VAL) + { + error = dhd_os_read_file(file, raw_fmts, read_size); + if (error < 0) { + DHD_ERROR(("%s: map file read failed err:%d \n", __FUNCTION__, + error)); + goto fail; + } + + /* End raw_fmts with NULL as strstr expects NULL terminated strings */ + raw_fmts[read_size] = '\0'; + + /* Get ramstart address */ + raw_fmts_loc = raw_fmts; + if (!(count & RAMSTART_BIT) && + (cptr = bcmstrnstr(raw_fmts_loc, read_size, ramstart_str, + strlen(ramstart_str)))) { + cptr = cptr - BYTES_AHEAD_NUM; + sscanf(cptr, "%x %c text_start", ramstart, &c); + count |= RAMSTART_BIT; + } + + /* Get ram rodata start address */ + raw_fmts_loc = raw_fmts; + if (!(count & RDSTART_BIT) && + (cptr = bcmstrnstr(raw_fmts_loc, read_size, rodata_start_str, + strlen(rodata_start_str)))) { + cptr = cptr - BYTES_AHEAD_NUM; + sscanf(cptr, "%x %c rodata_start", rodata_start, &c); + count |= RDSTART_BIT; + } + + /* Get ram rodata end address */ + raw_fmts_loc = raw_fmts; + if (!(count & RDEND_BIT) && + (cptr = bcmstrnstr(raw_fmts_loc, read_size, rodata_end_str, + strlen(rodata_end_str)))) { + cptr = cptr - BYTES_AHEAD_NUM; + sscanf(cptr, "%x %c rodata_end", rodata_end, &c); + count |= RDEND_BIT; + } + + if (error < (int)read_size) { + /* + * since we reset file pos back to earlier pos by + * GO_BACK_FILE_POS_NUM_BYTES bytes we won't reach EOF. + * The reason for this is if string is spreaded across + * bytes, the read function should not miss it. + * So if ret value is less than read_size, reached EOF don't read further + */ + break; + } + memset(raw_fmts, 0, read_size); + /* + * go back to predefined NUM of bytes so that we won't miss + * the string and addr even if it comes as splited in next read. + */ + dhd_os_seek_file(file, -GO_BACK_FILE_POS_NUM_BYTES); + } + +fail: + if (raw_fmts) { + MFREE(osh, raw_fmts, read_size + 1); + raw_fmts = NULL; + } + if (count == ALL_MAP_VAL) { + return BCME_OK; + } + else { + DHD_ERROR(("%s: readmap error 0X%x \n", __FUNCTION__, + count)); + return BCME_ERROR; + } + +} /* dhd_parse_map_file */ + +#ifdef PCIE_FULL_DONGLE +int +dhd_event_logtrace_infobuf_pkt_process(dhd_pub_t *dhdp, void *pktbuf, + dhd_event_log_t *event_data) +{ + uint32 infobuf_version; + info_buf_payload_hdr_t *payload_hdr_ptr; + uint16 payload_hdr_type; + uint16 payload_hdr_length; + + DHD_TRACE(("%s:Enter\n", __FUNCTION__)); + + if (PKTLEN(dhdp->osh, pktbuf) < sizeof(uint32)) { + DHD_ERROR(("%s: infobuf too small for version field\n", + __FUNCTION__)); + goto exit; + } + infobuf_version = *((uint32 *)PKTDATA(dhdp->osh, pktbuf)); + PKTPULL(dhdp->osh, pktbuf, sizeof(uint32)); + if (infobuf_version != PCIE_INFOBUF_V1) { + DHD_ERROR(("%s: infobuf version %d is not PCIE_INFOBUF_V1\n", + __FUNCTION__, infobuf_version)); + goto exit; + } + + /* Version 1 infobuf has a single type/length (and then value) field */ + if (PKTLEN(dhdp->osh, pktbuf) < sizeof(info_buf_payload_hdr_t)) { + DHD_ERROR(("%s: infobuf too small for v1 type/length fields\n", + __FUNCTION__)); + goto exit; + } + /* Process/parse the common info payload header (type/length) */ + payload_hdr_ptr = (info_buf_payload_hdr_t *)PKTDATA(dhdp->osh, pktbuf); + payload_hdr_type = ltoh16(payload_hdr_ptr->type); + payload_hdr_length = ltoh16(payload_hdr_ptr->length); + if (payload_hdr_type != PCIE_INFOBUF_V1_TYPE_LOGTRACE) { + DHD_ERROR(("%s: payload_hdr_type %d is not V1_TYPE_LOGTRACE\n", + __FUNCTION__, payload_hdr_type)); + goto exit; + } + PKTPULL(dhdp->osh, pktbuf, sizeof(info_buf_payload_hdr_t)); + + /* Validate that the specified length isn't bigger than the + * provided data. + */ + if (payload_hdr_length > PKTLEN(dhdp->osh, pktbuf)) { + DHD_ERROR(("%s: infobuf logtrace length is bigger" + " than actual buffer data\n", __FUNCTION__)); + goto exit; + } + dhd_dbg_trace_evnt_handler(dhdp, PKTDATA(dhdp->osh, pktbuf), + event_data, payload_hdr_length); + + return BCME_OK; + +exit: + return BCME_ERROR; +} /* dhd_event_logtrace_infobuf_pkt_process */ +#endif /* PCIE_FULL_DONGLE */ +#endif /* SHOW_LOGTRACE */ + +#if defined(WLTDLS) && defined(PCIE_FULL_DONGLE) + +/* To handle the TDLS event in the dhd_common.c + */ +int dhd_tdls_event_handler(dhd_pub_t *dhd_pub, wl_event_msg_t *event) +{ + int ret = BCME_OK; +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif // endif + ret = dhd_tdls_update_peer_info(dhd_pub, event); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif // endif + return ret; +} + +int dhd_free_tdls_peer_list(dhd_pub_t *dhd_pub) +{ + tdls_peer_node_t *cur = NULL, *prev = NULL; + if (!dhd_pub) + return BCME_ERROR; + cur = dhd_pub->peer_tbl.node; + + if ((dhd_pub->peer_tbl.node == NULL) && !dhd_pub->peer_tbl.tdls_peer_count) + return BCME_ERROR; + + while (cur != NULL) { + prev = cur; + cur = cur->next; + MFREE(dhd_pub->osh, prev, sizeof(tdls_peer_node_t)); + } + dhd_pub->peer_tbl.tdls_peer_count = 0; + dhd_pub->peer_tbl.node = NULL; + return BCME_OK; +} +#endif /* #if defined(WLTDLS) && defined(PCIE_FULL_DONGLE) */ + +/* pretty hex print a contiguous buffer +* based on the debug level specified +*/ +void +dhd_prhex(const char *msg, volatile uchar *buf, uint nbytes, uint8 dbg_level) +{ + char line[128], *p; + int len = sizeof(line); + int nchar; + uint i; + + if (msg && (msg[0] != '\0')) { + if (dbg_level == DHD_ERROR_VAL) + DHD_ERROR(("%s:\n", msg)); + else if (dbg_level == DHD_INFO_VAL) + DHD_INFO(("%s:\n", msg)); + else if (dbg_level == DHD_TRACE_VAL) + DHD_TRACE(("%s:\n", msg)); + } + + p = line; + for (i = 0; i < nbytes; i++) { + if (i % 16 == 0) { + nchar = snprintf(p, len, " %04x: ", i); /* line prefix */ + p += nchar; + len -= nchar; + } + if (len > 0) { + nchar = snprintf(p, len, "%02x ", buf[i]); + p += nchar; + len -= nchar; + } + + if (i % 16 == 15) { + /* flush line */ + if (dbg_level == DHD_ERROR_VAL) + DHD_ERROR(("%s:\n", line)); + else if (dbg_level == DHD_INFO_VAL) + DHD_INFO(("%s:\n", line)); + else if (dbg_level == DHD_TRACE_VAL) + DHD_TRACE(("%s:\n", line)); + p = line; + len = sizeof(line); + } + } + + /* flush last partial line */ + if (p != line) { + if (dbg_level == DHD_ERROR_VAL) + DHD_ERROR(("%s:\n", line)); + else if (dbg_level == DHD_INFO_VAL) + DHD_INFO(("%s:\n", line)); + else if (dbg_level == DHD_TRACE_VAL) + DHD_TRACE(("%s:\n", line)); + } +} + +#ifdef DUMP_IOCTL_IOV_LIST +void +dhd_iov_li_append(dhd_pub_t *dhd, dll_t *list_head, dll_t *node) +{ + dll_t *item; + dhd_iov_li_t *iov_li; + dhd->dump_iovlist_len++; + + if (dhd->dump_iovlist_len == IOV_LIST_MAX_LEN+1) { + item = dll_head_p(list_head); + iov_li = (dhd_iov_li_t *)CONTAINEROF(item, dhd_iov_li_t, list); + dll_delete(item); + MFREE(dhd->osh, iov_li, sizeof(*iov_li)); + dhd->dump_iovlist_len--; + } + dll_append(list_head, node); +} + +void +dhd_iov_li_print(dll_t *list_head) +{ + dhd_iov_li_t *iov_li; + dll_t *item, *next; + uint8 index = 0; + for (item = dll_head_p(list_head); !dll_end(list_head, item); item = next) { + next = dll_next_p(item); + iov_li = (dhd_iov_li_t *)CONTAINEROF(item, dhd_iov_li_t, list); + DHD_ERROR(("%d:cmd_name = %s, cmd = %d.\n", ++index, iov_li->buff, iov_li->cmd)); + } +} + +void +dhd_iov_li_delete(dhd_pub_t *dhd, dll_t *list_head) +{ + dll_t *item; + dhd_iov_li_t *iov_li; + while (!(dll_empty(list_head))) { + item = dll_head_p(list_head); + iov_li = (dhd_iov_li_t *)CONTAINEROF(item, dhd_iov_li_t, list); + dll_delete(item); + MFREE(dhd->osh, iov_li, sizeof(*iov_li)); + } +} +#endif /* DUMP_IOCTL_IOV_LIST */ + +/* configuations of ecounters to be enabled by default in FW */ +static ecounters_cfg_t ecounters_cfg_tbl[] = { + /* Global ecounters */ + {ECOUNTERS_STATS_TYPES_FLAG_GLOBAL, 0x0, WL_IFSTATS_XTLV_BUS_PCIE}, + // {ECOUNTERS_STATS_TYPES_FLAG_GLOBAL, 0x0, WL_IFSTATS_XTLV_TX_AMPDU_STATS}, + // {ECOUNTERS_STATS_TYPES_FLAG_GLOBAL, 0x0, WL_IFSTATS_XTLV_RX_AMPDU_STATS}, + + /* Slice specific ecounters */ + {ECOUNTERS_STATS_TYPES_FLAG_SLICE, 0x0, WL_SLICESTATS_XTLV_PERIODIC_STATE}, + {ECOUNTERS_STATS_TYPES_FLAG_SLICE, 0x1, WL_SLICESTATS_XTLV_PERIODIC_STATE}, + {ECOUNTERS_STATS_TYPES_FLAG_SLICE, 0x1, WL_IFSTATS_XTLV_WL_SLICE_BTCOEX}, + + /* Interface specific ecounters */ + {ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x0, WL_IFSTATS_XTLV_IF_PERIODIC_STATE}, + {ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x0, WL_IFSTATS_XTLV_GENERIC}, + {ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x0, WL_IFSTATS_XTLV_INFRA_SPECIFIC}, + {ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x0, WL_IFSTATS_XTLV_MGT_CNT}, + + /* secondary interface */ +}; + +static event_ecounters_cfg_t event_ecounters_cfg_tbl[] = { + /* Interface specific event ecounters */ + {WLC_E_LINK, ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x0, WL_IFSTATS_XTLV_IF_EVENT_STATS}, +}; + +/* Accepts an argument to -s, -g or -f and creates an XTLV */ +int +dhd_create_ecounters_params(dhd_pub_t *dhd, uint16 type, uint16 if_slice_idx, + uint16 stats_rep, uint8 **xtlv) +{ + uint8 *req_xtlv = NULL; + ecounters_stats_types_report_req_t *req; + bcm_xtlvbuf_t xtlvbuf, container_xtlvbuf; + ecountersv2_xtlv_list_elt_t temp; + uint16 xtlv_len = 0, total_len = 0; + int rc = BCME_OK; + + /* fill in the stat type XTLV. For now there is no explicit TLV for the stat type. */ + temp.id = stats_rep; + temp.len = 0; + + /* Hence len/data = 0/NULL */ + xtlv_len += temp.len + BCM_XTLV_HDR_SIZE; + + /* Total length of the container */ + total_len = BCM_XTLV_HDR_SIZE + + OFFSETOF(ecounters_stats_types_report_req_t, stats_types_req) + xtlv_len; + + /* Now allocate a structure for the entire request */ + if ((req_xtlv = (uint8 *)MALLOCZ(dhd->osh, total_len)) == NULL) { + rc = BCME_NOMEM; + goto fail; + } + + /* container XTLV context */ + bcm_xtlv_buf_init(&container_xtlvbuf, (uint8 *)req_xtlv, total_len, + BCM_XTLV_OPTION_ALIGN32); + + /* Fill other XTLVs in the container. Leave space for XTLV headers */ + req = (ecounters_stats_types_report_req_t *)(req_xtlv + BCM_XTLV_HDR_SIZE); + req->flags = type; + if (type == ECOUNTERS_STATS_TYPES_FLAG_SLICE) { + req->slice_mask = 0x1 << if_slice_idx; + } else if (type == ECOUNTERS_STATS_TYPES_FLAG_IFACE) { + req->if_index = if_slice_idx; + } + + /* Fill remaining XTLVs */ + bcm_xtlv_buf_init(&xtlvbuf, (uint8*) req->stats_types_req, xtlv_len, + BCM_XTLV_OPTION_ALIGN32); + if (bcm_xtlv_put_data(&xtlvbuf, temp.id, NULL, temp.len)) { + DHD_ERROR(("Error creating XTLV for requested stats type = %d\n", temp.id)); + rc = BCME_ERROR; + goto fail; + } + + /* fill the top level container and get done with the XTLV container */ + rc = bcm_xtlv_put_data(&container_xtlvbuf, WL_ECOUNTERS_XTLV_REPORT_REQ, NULL, + bcm_xtlv_buf_len(&xtlvbuf) + OFFSETOF(ecounters_stats_types_report_req_t, + stats_types_req)); + + if (rc) { + DHD_ERROR(("Error creating parent XTLV for type = %d\n", req->flags)); + goto fail; + } + +fail: + if (rc && req_xtlv) { + MFREE(dhd->osh, req_xtlv, total_len); + req_xtlv = NULL; + } + + /* update the xtlv pointer */ + *xtlv = req_xtlv; + return rc; +} + +int +dhd_get_preserve_log_numbers(dhd_pub_t *dhd, uint32 *logset_mask) +{ + wl_el_set_type_t logset_type, logset_op; + int ret = BCME_ERROR; + int i = 0, err = 0; + + if (!dhd || !logset_mask) + return BCME_BADARG; + + *logset_mask = 0; + memset(&logset_type, 0, sizeof(logset_type)); + memset(&logset_op, 0, sizeof(logset_op)); + logset_type.version = htod16(EVENT_LOG_SET_TYPE_CURRENT_VERSION); + logset_type.len = htod16(sizeof(wl_el_set_type_t)); + for (i = 0; i < WL_MAX_PRESERVE_BUFFER; i++) { + logset_type.set = i; + err = dhd_iovar(dhd, 0, "event_log_set_type", (char *)&logset_type, + sizeof(logset_type), (char *)&logset_op, sizeof(logset_op), FALSE); + /* the iovar may return 'unsupported' error if a log set number is not present + * in the fw, so we should not return on error ! + */ + if (err == BCME_OK && + logset_op.type == EVENT_LOG_SET_TYPE_PRSRV) { + *logset_mask |= 0x01u << i; + ret = BCME_OK; + DHD_ERROR(("[INIT] logset:%d is preserve/chatty\n", i)); + } + } + + return ret; +} + +int +dhd_start_ecounters(dhd_pub_t *dhd) +{ + uint8 i = 0; + uint8 *start_ptr; + uint32 buf; + int rc = BCME_OK; + bcm_xtlv_t *elt; + ecounters_config_request_v2_t *req = NULL; + ecountersv2_processed_xtlv_list_elt *list_elt, *tail = NULL; + ecountersv2_processed_xtlv_list_elt *processed_containers_list = NULL; + uint16 total_processed_containers_len = 0; + + rc = dhd_iovar(dhd, 0, "ecounters_autoconfig", NULL, 0, (char *)&buf, sizeof(buf), FALSE); + + if (rc != BCME_UNSUPPORTED) + return rc; + + rc = BCME_OK; + + for (i = 0; i < ARRAYSIZE(ecounters_cfg_tbl); i++) { + ecounters_cfg_t *ecounter_stat = &ecounters_cfg_tbl[i]; + + if ((list_elt = (ecountersv2_processed_xtlv_list_elt *) + MALLOCZ(dhd->osh, sizeof(*list_elt))) == NULL) { + DHD_ERROR(("Ecounters v2: No memory to process\n")); + goto fail; + } + + rc = dhd_create_ecounters_params(dhd, ecounter_stat->type, + ecounter_stat->if_slice_idx, ecounter_stat->stats_rep, &list_elt->data); + + if (rc) { + DHD_ERROR(("Ecounters v2: Could not process: stat: %d return code: %d\n", + ecounter_stat->stats_rep, rc)); + + /* Free allocated memory and go to fail to release any memories allocated + * in previous iterations. Note that list_elt->data gets populated in + * dhd_create_ecounters_params() and gets freed there itself. + */ + MFREE(dhd->osh, list_elt, sizeof(*list_elt)); + list_elt = NULL; + goto fail; + } + elt = (bcm_xtlv_t *) list_elt->data; + + /* Put the elements in the order they are processed */ + if (processed_containers_list == NULL) { + processed_containers_list = list_elt; + } else { + tail->next = list_elt; + } + tail = list_elt; + /* Size of the XTLV returned */ + total_processed_containers_len += BCM_XTLV_LEN(elt) + BCM_XTLV_HDR_SIZE; + } + + /* Now create ecounters config request with totallength */ + req = (ecounters_config_request_v2_t *)MALLOCZ(dhd->osh, sizeof(*req) + + total_processed_containers_len); + + if (req == NULL) { + rc = BCME_NOMEM; + goto fail; + } + + req->version = ECOUNTERS_VERSION_2; + req->logset = EVENT_LOG_SET_ECOUNTERS; + req->reporting_period = ECOUNTERS_DEFAULT_PERIOD; + req->num_reports = ECOUNTERS_NUM_REPORTS; + req->len = total_processed_containers_len + + OFFSETOF(ecounters_config_request_v2_t, ecounters_xtlvs); + + /* Copy config */ + start_ptr = req->ecounters_xtlvs; + + /* Now go element by element in the list */ + while (processed_containers_list) { + list_elt = processed_containers_list; + + elt = (bcm_xtlv_t *)list_elt->data; + + memcpy(start_ptr, list_elt->data, BCM_XTLV_LEN(elt) + BCM_XTLV_HDR_SIZE); + start_ptr += (size_t)(BCM_XTLV_LEN(elt) + BCM_XTLV_HDR_SIZE); + processed_containers_list = processed_containers_list->next; + + /* Free allocated memories */ + MFREE(dhd->osh, elt, elt->len + BCM_XTLV_HDR_SIZE); + MFREE(dhd->osh, list_elt, sizeof(*list_elt)); + } + + if ((rc = dhd_iovar(dhd, 0, "ecounters", (char *)req, req->len, NULL, 0, TRUE)) < 0) { + DHD_ERROR(("failed to start ecounters\n")); + } + +fail: + if (req) { + MFREE(dhd->osh, req, sizeof(*req) + total_processed_containers_len); + } + + /* Now go element by element in the list */ + while (processed_containers_list) { + list_elt = processed_containers_list; + elt = (bcm_xtlv_t *)list_elt->data; + processed_containers_list = processed_containers_list->next; + + /* Free allocated memories */ + MFREE(dhd->osh, elt, elt->len + BCM_XTLV_HDR_SIZE); + MFREE(dhd->osh, list_elt, sizeof(*list_elt)); + } + return rc; +} + +int +dhd_stop_ecounters(dhd_pub_t *dhd) +{ + int rc = BCME_OK; + ecounters_config_request_v2_t *req; + + /* Now create ecounters config request with totallength */ + req = (ecounters_config_request_v2_t *)MALLOCZ(dhd->osh, sizeof(*req)); + + if (req == NULL) { + rc = BCME_NOMEM; + goto fail; + } + + req->version = ECOUNTERS_VERSION_2; + req->len = OFFSETOF(ecounters_config_request_v2_t, ecounters_xtlvs); + + if ((rc = dhd_iovar(dhd, 0, "ecounters", (char *)req, req->len, NULL, 0, TRUE)) < 0) { + DHD_ERROR(("failed to stop ecounters\n")); + } + +fail: + if (req) { + MFREE(dhd->osh, req, sizeof(*req)); + } + return rc; +} + +/* configured event_id_array for event ecounters */ +typedef struct event_id_array { + uint8 event_id; + uint8 str_idx; +} event_id_array_t; + +/* get event id array only from event_ecounters_cfg_tbl[] */ +static inline int __dhd_event_ecounters_get_event_id_array(event_id_array_t *event_array) +{ + uint8 i; + uint8 idx = 0; + int32 prev_evt_id = -1; + + for (i = 0; i < (uint8)ARRAYSIZE(event_ecounters_cfg_tbl); i++) { + if (prev_evt_id != event_ecounters_cfg_tbl[i].event_id) { + if (prev_evt_id >= 0) + idx++; + event_array[idx].event_id = event_ecounters_cfg_tbl[i].event_id; + event_array[idx].str_idx = i; + } + prev_evt_id = event_ecounters_cfg_tbl[i].event_id; + } + return idx; +} + +/* One event id has limit xtlv num to request based on wl_ifstats_xtlv_id * 2 interface */ +#define ECNTRS_MAX_XTLV_NUM (31 * 2) + +int +dhd_start_event_ecounters(dhd_pub_t *dhd) +{ + uint8 i, j = 0; + uint8 event_id_cnt = 0; + uint16 processed_containers_len = 0; + uint16 max_xtlv_len = 0; + int rc = BCME_OK; + uint8 *ptr; + uint8 *data; + event_id_array_t *id_array; + bcm_xtlv_t *elt = NULL; + event_ecounters_config_request_v2_t *req = NULL; + + id_array = (event_id_array_t *)MALLOCZ(dhd->osh, sizeof(event_id_array_t) * + ARRAYSIZE(event_ecounters_cfg_tbl)); + + if (id_array == NULL) { + rc = BCME_NOMEM; + goto fail; + } + event_id_cnt = __dhd_event_ecounters_get_event_id_array(id_array); + + max_xtlv_len = ((BCM_XTLV_HDR_SIZE + + OFFSETOF(event_ecounters_config_request_v2_t, ecounters_xtlvs)) * + ECNTRS_MAX_XTLV_NUM); + + /* Now create ecounters config request with max allowed length */ + req = (event_ecounters_config_request_v2_t *)MALLOCZ(dhd->osh, + sizeof(event_ecounters_config_request_v2_t *) + max_xtlv_len); + + if (req == NULL) { + rc = BCME_NOMEM; + goto fail; + } + + for (i = 0; i <= event_id_cnt; i++) { + /* req initialization by event id */ + req->version = ECOUNTERS_VERSION_2; + req->logset = EVENT_LOG_SET_ECOUNTERS; + req->event_id = id_array[i].event_id; + req->flags = EVENT_ECOUNTERS_FLAGS_ADD; + req->len = 0; + processed_containers_len = 0; + + /* Copy config */ + ptr = req->ecounters_xtlvs; + + for (j = id_array[i].str_idx; j < (uint8)ARRAYSIZE(event_ecounters_cfg_tbl); j++) { + event_ecounters_cfg_t *event_ecounter_stat = &event_ecounters_cfg_tbl[j]; + if (id_array[i].event_id != event_ecounter_stat->event_id) + break; + + rc = dhd_create_ecounters_params(dhd, event_ecounter_stat->type, + event_ecounter_stat->if_slice_idx, event_ecounter_stat->stats_rep, + &data); + + if (rc) { + DHD_ERROR(("%s: Could not process: stat: %d return code: %d\n", + __FUNCTION__, event_ecounter_stat->stats_rep, rc)); + goto fail; + } + + elt = (bcm_xtlv_t *)data; + + memcpy(ptr, elt, BCM_XTLV_LEN(elt) + BCM_XTLV_HDR_SIZE); + ptr += (size_t)(BCM_XTLV_LEN(elt) + BCM_XTLV_HDR_SIZE); + processed_containers_len += BCM_XTLV_LEN(elt) + BCM_XTLV_HDR_SIZE; + + /* Free allocated memories alloced by dhd_create_ecounters_params */ + MFREE(dhd->osh, elt, elt->len + BCM_XTLV_HDR_SIZE); + + if (processed_containers_len > max_xtlv_len) { + DHD_ERROR(("%s XTLV NUM IS OVERFLOWED THAN ALLOWED!!\n", + __FUNCTION__)); + rc = BCME_BADLEN; + goto fail; + } + } + + req->len = processed_containers_len + + OFFSETOF(event_ecounters_config_request_v2_t, ecounters_xtlvs); + + DHD_INFO(("%s req version %d logset %d event_id %d flags %d len %d\n", + __FUNCTION__, req->version, req->logset, req->event_id, + req->flags, req->len)); + + rc = dhd_iovar(dhd, 0, "event_ecounters", (char *)req, req->len, NULL, 0, TRUE); + + if (rc < 0) { + DHD_ERROR(("failed to start event_ecounters(event id %d) with rc %d\n", + req->event_id, rc)); + goto fail; + } + } + +fail: + /* Free allocated memories */ + if (req) { + MFREE(dhd->osh, req, sizeof(event_ecounters_config_request_v2_t *) + max_xtlv_len); + } + if (id_array) { + MFREE(dhd->osh, id_array, sizeof(event_id_array_t) * + ARRAYSIZE(event_ecounters_cfg_tbl)); + } + + return rc; +} + +int +dhd_stop_event_ecounters(dhd_pub_t *dhd) +{ + int rc = BCME_OK; + event_ecounters_config_request_v2_t *req; + + /* Now create ecounters config request with totallength */ + req = (event_ecounters_config_request_v2_t *)MALLOCZ(dhd->osh, sizeof(*req)); + + if (req == NULL) { + rc = BCME_NOMEM; + goto fail; + } + + req->version = ECOUNTERS_VERSION_2; + req->flags = EVENT_ECOUNTERS_FLAGS_DEL_ALL; + req->len = OFFSETOF(event_ecounters_config_request_v2_t, ecounters_xtlvs); + + if ((rc = dhd_iovar(dhd, 0, "event_ecounters", (char *)req, req->len, NULL, 0, TRUE)) < 0) { + DHD_ERROR(("failed to stop event_ecounters\n")); + } + +fail: + if (req) { + MFREE(dhd->osh, req, sizeof(*req)); + } + return rc; +} + +#ifdef DHD_LOG_DUMP +int +dhd_log_dump_ring_to_file(dhd_pub_t *dhdp, void *ring_ptr, void *file, + unsigned long *file_posn, log_dump_section_hdr_t *sec_hdr) +{ + uint32 rlen = 0; + uint32 data_len = 0, total_len = 0; + void *data = NULL; + unsigned long fpos_sechdr = 0; + unsigned long flags = 0; + int ret = 0; + dhd_dbg_ring_t *ring = (dhd_dbg_ring_t *)ring_ptr; + + if (!dhdp || !ring || !file || !sec_hdr || !file_posn) + return BCME_BADARG; + + /* do not allow further writes to the ring + * till we flush it + */ + DHD_DBG_RING_LOCK(ring->lock, flags); + ring->state = RING_SUSPEND; + DHD_DBG_RING_UNLOCK(ring->lock, flags); + + if (dhdp->concise_dbg_buf) { + /* re-use concise debug buffer temporarily + * to pull ring data, to write + * record by record to file + */ + data_len = CONCISE_DUMP_BUFLEN; + data = dhdp->concise_dbg_buf; + dhd_os_write_file_posn(file, file_posn, ECNTRS_LOG_HDR, + strlen(ECNTRS_LOG_HDR)); + /* write the section header now with zero length, + * once the correct length is found out, update + * it later + */ + fpos_sechdr = *file_posn; + sec_hdr->type = LOG_DUMP_SECTION_ECNTRS; + sec_hdr->length = 0; + dhd_os_write_file_posn(file, file_posn, (char *)sec_hdr, + sizeof(*sec_hdr)); + do { + rlen = dhd_dbg_ring_pull_single(ring, data, data_len, TRUE); + if (rlen > 0) { + /* write the log */ + ret = dhd_os_write_file_posn(file, file_posn, data, rlen); + if (ret < 0) { + DHD_ERROR(("%s: write file error !\n", __FUNCTION__)); + DHD_DBG_RING_LOCK(ring->lock, flags); + ring->state = RING_ACTIVE; + DHD_DBG_RING_UNLOCK(ring->lock, flags); + return BCME_ERROR; + } + } + total_len += rlen; + } while (rlen > 0); + /* now update the section header length in the file */ + sec_hdr->length = total_len; + dhd_os_write_file_posn(file, &fpos_sechdr, (char *)sec_hdr, sizeof(*sec_hdr)); + } else { + DHD_ERROR(("%s: No concise buffer available !\n", __FUNCTION__)); + } + + DHD_DBG_RING_LOCK(ring->lock, flags); + ring->state = RING_ACTIVE; + DHD_DBG_RING_UNLOCK(ring->lock, flags); + + return BCME_OK; +} + +/* logdump cookie */ +#define MAX_LOGUDMP_COOKIE_CNT 10u +#define LOGDUMP_COOKIE_STR_LEN 50u +int +dhd_logdump_cookie_init(dhd_pub_t *dhdp, uint8 *buf, uint32 buf_size) +{ + uint32 ring_size; + + if (!dhdp || !buf) { + DHD_ERROR(("INVALID PTR: dhdp:%p buf:%p\n", dhdp, buf)); + return BCME_ERROR; + } + + ring_size = dhd_ring_get_hdr_size() + LOGDUMP_COOKIE_STR_LEN * MAX_LOGUDMP_COOKIE_CNT; + if (buf_size < ring_size) { + DHD_ERROR(("BUF SIZE IS TO SHORT: req:%d buf_size:%d\n", + ring_size, buf_size)); + return BCME_ERROR; + } + + dhdp->logdump_cookie = dhd_ring_init(buf, buf_size, + LOGDUMP_COOKIE_STR_LEN, MAX_LOGUDMP_COOKIE_CNT); + if (!dhdp->logdump_cookie) { + DHD_ERROR(("FAIL TO INIT COOKIE RING\n")); + return BCME_ERROR; + } + + return BCME_OK; +} + +void +dhd_logdump_cookie_deinit(dhd_pub_t *dhdp) +{ + if (!dhdp) { + return; + } + if (dhdp->logdump_cookie) { + dhd_ring_deinit(dhdp->logdump_cookie); + } + + return; +} + +void +dhd_logdump_cookie_save(dhd_pub_t *dhdp, char *cookie, char *type) +{ + char *ptr; + + if (!dhdp || !cookie || !type || !dhdp->logdump_cookie) { + DHD_ERROR(("%s: At least one buffer ptr is NULL dhdp=%p cookie=%p" + " type = %p, cookie_cfg:%p\n", __FUNCTION__, + dhdp, cookie, type, dhdp?dhdp->logdump_cookie: NULL)); + return; + } + ptr = (char *)dhd_ring_get_empty(dhdp->logdump_cookie); + if (ptr == NULL) { + DHD_ERROR(("%s : Skip to save due to locking\n", __FUNCTION__)); + return; + } + scnprintf(ptr, LOGDUMP_COOKIE_STR_LEN, "%s: %s\n", type, cookie); + return; +} + +int +dhd_logdump_cookie_get(dhd_pub_t *dhdp, char *ret_cookie, uint32 buf_size) +{ + char *ptr; + + if (!dhdp || !ret_cookie || !dhdp->logdump_cookie) { + DHD_ERROR(("%s: At least one buffer ptr is NULL dhdp=%p" + "cookie=%p cookie_cfg:%p\n", __FUNCTION__, + dhdp, ret_cookie, dhdp?dhdp->logdump_cookie: NULL)); + return BCME_ERROR; + } + ptr = (char *)dhd_ring_get_first(dhdp->logdump_cookie); + if (ptr == NULL) { + DHD_ERROR(("%s : Skip to save due to locking\n", __FUNCTION__)); + return BCME_ERROR; + } + memcpy(ret_cookie, ptr, MIN(buf_size, strlen(ptr))); + dhd_ring_free_first(dhdp->logdump_cookie); + return BCME_OK; +} + +int +dhd_logdump_cookie_count(dhd_pub_t *dhdp) +{ + if (!dhdp || !dhdp->logdump_cookie) { + DHD_ERROR(("%s: At least one buffer ptr is NULL dhdp=%p cookie=%p\n", + __FUNCTION__, dhdp, dhdp?dhdp->logdump_cookie: NULL)); + return 0; + } + return dhd_ring_get_cur_size(dhdp->logdump_cookie); +} + +static inline int +__dhd_log_dump_cookie_to_file( + dhd_pub_t *dhdp, void *fp, unsigned long *f_pos, char *buf, uint32 buf_size) +{ + + uint32 remain = buf_size; + int ret = BCME_ERROR; + char tmp_buf[LOGDUMP_COOKIE_STR_LEN]; + log_dump_section_hdr_t sec_hdr; + while (dhd_logdump_cookie_count(dhdp) > 0) { + memset(tmp_buf, 0, sizeof(tmp_buf)); + ret = dhd_logdump_cookie_get(dhdp, tmp_buf, LOGDUMP_COOKIE_STR_LEN); + if (ret != BCME_OK) { + return ret; + } + remain -= scnprintf(&buf[buf_size - remain], remain, "%s", tmp_buf); + } + ret = dhd_os_write_file_posn(fp, f_pos, COOKIE_LOG_HDR, strlen(COOKIE_LOG_HDR)); + if (ret < 0) { + DHD_ERROR(("%s : Write file Error for cookie hdr\n", __FUNCTION__)); + return ret; + } + sec_hdr.magic = LOG_DUMP_MAGIC; + sec_hdr.timestamp = local_clock(); + sec_hdr.type = LOG_DUMP_SECTION_COOKIE; + sec_hdr.length = buf_size - remain; + ret = dhd_os_write_file_posn(fp, f_pos, (char *)&sec_hdr, sizeof(sec_hdr)); + if (ret < 0) { + DHD_ERROR(("%s : Write file Error for section hdr\n", __FUNCTION__)); + return ret; + } + + ret = dhd_os_write_file_posn(fp, f_pos, buf, sec_hdr.length); + if (ret < 0) { + DHD_ERROR(("%s : Write file Error for cookie data\n", __FUNCTION__)); + } + + return ret; +} + +int +dhd_log_dump_cookie_to_file(dhd_pub_t *dhdp, void *fp, unsigned long *f_pos) +{ + char *buf; + int ret = BCME_ERROR; + uint32 buf_size = MAX_LOGUDMP_COOKIE_CNT * LOGDUMP_COOKIE_STR_LEN; + + if (!dhdp || !dhdp->logdump_cookie ||!fp || !f_pos) { + DHD_ERROR(("%s At least one ptr is NULL " + "dhdp = %p cookie %p fp = %p f_pos = %p\n", + __FUNCTION__, dhdp, dhdp?dhdp->logdump_cookie:NULL, fp, f_pos)); + return ret; + } + + buf = (char *)MALLOCZ(dhdp->osh, buf_size); + if (!buf) { + DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__)); + return ret; + } + ret = __dhd_log_dump_cookie_to_file(dhdp, fp, f_pos, buf, buf_size); + MFREE(dhdp->osh, buf, buf_size); + + return ret; +} + +#endif /* DHD_LOG_DUMP */ + +#ifdef DHD_LOG_DUMP +void +dhd_log_dump_trigger(dhd_pub_t *dhdp, int subcmd) +{ + log_dump_type_t *flush_type; + + if (!dhdp) { + DHD_ERROR(("dhdp is NULL !\n")); + return; + } + + if (subcmd >= CMD_MAX || subcmd < CMD_DEFAULT) { + DHD_ERROR(("%s : Invalid subcmd \n", __FUNCTION__)); + return; + } + + flush_type = MALLOCZ(dhdp->osh, sizeof(log_dump_type_t)); + if (!flush_type) { + DHD_ERROR(("%s Fail to malloc flush_type\n", __FUNCTION__)); + return; + } + clear_debug_dump_time(dhdp->debug_dump_time_str); + /* */ + dhdp->debug_dump_subcmd = subcmd; + + if (flush_type) { + *flush_type = DLD_BUF_TYPE_ALL; + dhd_schedule_log_dump(dhdp, flush_type); + } +#if (defined(BCMPCIE) || defined(BCMSDIO)) && defined(DHD_FW_COREDUMP) + dhdp->memdump_type = DUMP_TYPE_BY_SYSDUMP; + dhd_bus_mem_dump(dhdp); +#endif /* BCMPCIE && DHD_FW_COREDUMP */ +} +#endif /* DHD_LOG_DUMP */ + +#ifdef EWP_EDL +/* For now we are allocating memory for EDL ring using DMA_ALLOC_CONSISTENT +* The reason being that, in hikey, if we try to DMA_MAP prealloced memory +* it is failing with an 'out of space in SWIOTLB' error +*/ +int +dhd_edl_mem_init(dhd_pub_t *dhd) +{ + int ret = 0; + + memset(&dhd->edl_ring_mem, 0, sizeof(dhd->edl_ring_mem)); + ret = dhd_dma_buf_alloc(dhd, &dhd->edl_ring_mem, DHD_EDL_RING_SIZE); + if (ret != BCME_OK) { + DHD_ERROR(("%s: alloc of edl_ring_mem failed\n", + __FUNCTION__)); + return BCME_ERROR; + } + return BCME_OK; +} + +/* NOTE:- that dhd_edl_mem_deinit need NOT be called explicitly, because the dma_buf +* for EDL is freed during 'dhd_prot_detach_edl_rings' which is called during de-init. +*/ +void +dhd_edl_mem_deinit(dhd_pub_t *dhd) +{ + if (dhd->edl_ring_mem.va != NULL) + dhd_dma_buf_free(dhd, &dhd->edl_ring_mem); +} + +int +dhd_event_logtrace_process_edl(dhd_pub_t *dhdp, uint8 *data, + void *evt_decode_data) +{ + msg_hdr_edl_t *msg = NULL; + cmn_msg_hdr_t *cmn_msg_hdr = NULL; + uint8 *buf = NULL; + + if (!data || !dhdp || !evt_decode_data) { + DHD_ERROR(("%s: invalid args ! \n", __FUNCTION__)); + return BCME_ERROR; + } + + /* format of data in each work item in the EDL ring: + * |cmn_msg_hdr_t |payload (var len)|cmn_msg_hdr_t| + * payload = |infobuf_ver(u32)|info_buf_payload_hdr_t|msgtrace_hdr_t|| + */ + cmn_msg_hdr = (cmn_msg_hdr_t *)data; + msg = (msg_hdr_edl_t *)(data + sizeof(cmn_msg_hdr_t)); + buf = (uint8 *)msg; + /* validate the fields */ + if (ltoh32(msg->infobuf_ver) != PCIE_INFOBUF_V1) { + DHD_ERROR(("%s: Skipping msg with invalid infobuf ver (0x%x)" + " expected (0x%x)\n", __FUNCTION__, + msg->infobuf_ver, PCIE_INFOBUF_V1)); + return BCME_VERSION; + } + + /* in EDL, the request_id field of cmn_msg_hdr is overloaded to carry payload length */ + if (sizeof(info_buf_payload_hdr_t) > cmn_msg_hdr->request_id) { + DHD_ERROR(("%s: infobuf too small for v1 type/length fields\n", + __FUNCTION__)); + return BCME_BUFTOOLONG; + } + + if (ltoh16(msg->pyld_hdr.type) != PCIE_INFOBUF_V1_TYPE_LOGTRACE) { + DHD_ERROR(("%s: payload_hdr_type %d is not V1_TYPE_LOGTRACE\n", + __FUNCTION__, ltoh16(msg->pyld_hdr.type))); + return BCME_BADOPTION; + } + + if (ltoh16(msg->pyld_hdr.length) > cmn_msg_hdr->request_id) { + DHD_ERROR(("%s: infobuf logtrace length %u is bigger" + " than available buffer size %u\n", __FUNCTION__, + ltoh16(msg->pyld_hdr.length), cmn_msg_hdr->request_id)); + return BCME_BADLEN; + } + + /* dhd_dbg_trace_evnt_handler expects the data to start from msgtrace_hdr_t */ + buf += sizeof(msg->infobuf_ver) + sizeof(msg->pyld_hdr); + dhd_dbg_trace_evnt_handler(dhdp, buf, evt_decode_data, + ltoh16(msg->pyld_hdr.length)); + + /* check 'dhdp->logtrace_pkt_sendup' and if true alloc an skb + * copy the event data to the skb and send it up the stack + */ + if (dhdp->logtrace_pkt_sendup) { + DHD_INFO(("%s: send up event log, len %u bytes\n", __FUNCTION__, + (uint32)(ltoh16(msg->pyld_hdr.length) + + sizeof(info_buf_payload_hdr_t) + 4))); + dhd_sendup_info_buf(dhdp, (uint8 *)msg); + } + + return BCME_OK; +} +#endif /* EWP_EDL */ + +#if defined(SHOW_LOGTRACE) +int +dhd_print_fw_ver_from_file(dhd_pub_t *dhdp, char *fwpath) +{ + void *file = NULL; + int size = 0; + char buf[FW_VER_STR_LEN]; + char *str = NULL; + int ret = BCME_OK; + + if (!fwpath) + return BCME_BADARG; + + file = dhd_os_open_image1(dhdp, fwpath); + if (!file) { + ret = BCME_ERROR; + goto exit; + } + size = dhd_os_get_image_size(file); + if (!size) { + ret = BCME_ERROR; + goto exit; + } + + /* seek to the last 'X' bytes in the file */ + if (dhd_os_seek_file(file, size - FW_VER_STR_LEN) != BCME_OK) { + ret = BCME_ERROR; + goto exit; + } + + /* read the last 'X' bytes of the file to a buffer */ + memset(buf, 0, FW_VER_STR_LEN); + if (dhd_os_get_image_block(buf, FW_VER_STR_LEN - 1, file) < 0) { + ret = BCME_ERROR; + goto exit; + } + /* search for 'Version' in the buffer */ + str = bcmstrnstr(buf, FW_VER_STR_LEN, FW_VER_STR, strlen(FW_VER_STR)); + if (!str) { + ret = BCME_ERROR; + goto exit; + } + /* go back in the buffer to the last ascii character */ + while (str != buf && + (*str >= ' ' && *str <= '~')) { + --str; + } + /* reverse the final decrement, so that str is pointing + * to the first ascii character in the buffer + */ + ++str; + + if (strlen(str) > (FW_VER_STR_LEN - 1)) { + ret = BCME_BADLEN; + goto exit; + } + + DHD_ERROR(("FW version in file '%s': %s\n", fwpath, str)); + /* copy to global variable, so that in case FW load fails, the + * core capture logs will contain FW version read from the file + */ + memset(fw_version, 0, FW_VER_STR_LEN); + strlcpy(fw_version, str, FW_VER_STR_LEN); + +exit: + if (file) + dhd_os_close_image1(dhdp, file); + + return ret; +} +#endif // endif + +#if defined(DHD_H2D_LOG_TIME_SYNC) +/* + * Helper function: + * Used for Dongle console message time syncing with Host printk + */ +void dhd_h2d_log_time_sync(dhd_pub_t *dhd) +{ + uint64 ts; + + /* + * local_clock() returns time in nano seconds. + * Dongle understand only milli seconds time. + */ + ts = local_clock(); + /* Nano seconds to milli seconds */ + do_div(ts, 1000000); + if (dhd_wl_ioctl_set_intiovar(dhd, "rte_timesync", ts, WLC_SET_VAR, TRUE, 0)) { + DHD_ERROR(("%s rte_timesync **** FAILED ****\n", __FUNCTION__)); + /* Stopping HOST Dongle console time syncing */ + dhd->dhd_rte_time_sync_ms = 0; + } +} +#endif /* DHD_H2D_LOG_TIME_SYNC */ diff --git a/bcmdhd.100.10.315.x/dhd_config.c b/bcmdhd.100.10.315.x/dhd_config.c new file mode 100644 index 0000000..eedac09 --- /dev/null +++ b/bcmdhd.100.10.315.x/dhd_config.c @@ -0,0 +1,2896 @@ + +#include +#include + +#include +#include +#include +#if defined(HW_OOB) || defined(FORCE_WOWLAN) +#include +#include +#include +#include +#endif + +#include +#include + +/* message levels */ +#define CONFIG_ERROR_LEVEL 0x0001 +#define CONFIG_TRACE_LEVEL 0x0002 + +uint config_msg_level = CONFIG_ERROR_LEVEL; + +#define CONFIG_ERROR(x) \ + do { \ + if (config_msg_level & CONFIG_ERROR_LEVEL) { \ + printk(KERN_ERR "CONFIG-ERROR) "); \ + printk x; \ + } \ + } while (0) +#define CONFIG_TRACE(x) \ + do { \ + if (config_msg_level & CONFIG_TRACE_LEVEL) { \ + printk(KERN_ERR "CONFIG-TRACE) "); \ + printk x; \ + } \ + } while (0) + +#define MAXSZ_BUF 1000 +#define MAXSZ_CONFIG 4096 + +#define htod32(i) i +#define htod16(i) i +#define dtoh32(i) i +#define dtoh16(i) i +#define htodchanspec(i) i +#define dtohchanspec(i) i + +typedef struct cihp_name_map_t { + uint chip; + uint chiprev; + uint ag_type; + bool clm; + char *chip_name; + char *module_name; +} cihp_name_map_t; + +/* Map of WLC_E events to connection failure strings */ +#define DONT_CARE 9999 +const cihp_name_map_t chip_name_map [] = { + /* ChipID Chiprev AG CLM ChipName ModuleName */ +#ifdef BCMSDIO + {BCM43362_CHIP_ID, 0, DONT_CARE, FALSE, "bcm40181a0", ""}, + {BCM43362_CHIP_ID, 1, DONT_CARE, FALSE, "bcm40181a2", ""}, + {BCM4330_CHIP_ID, 4, FW_TYPE_G, FALSE, "bcm40183b2", ""}, + {BCM4330_CHIP_ID, 4, FW_TYPE_AG, FALSE, "bcm40183b2_ag", ""}, + {BCM43430_CHIP_ID, 0, DONT_CARE, FALSE, "bcm43438a0", "ap6212"}, + {BCM43430_CHIP_ID, 1, DONT_CARE, FALSE, "bcm43438a1", "ap6212a"}, + {BCM43430_CHIP_ID, 2, DONT_CARE, FALSE, "bcm43436b0", "ap6236"}, + {BCM43012_CHIP_ID, 1, DONT_CARE, TRUE, "bcm43013b0", ""}, + {BCM4334_CHIP_ID, 3, DONT_CARE, FALSE, "bcm4334b1_ag", ""}, + {BCM43340_CHIP_ID, 2, DONT_CARE, FALSE, "bcm43341b0_ag", ""}, + {BCM43341_CHIP_ID, 2, DONT_CARE, FALSE, "bcm43341b0_ag", ""}, + {BCM4324_CHIP_ID, 5, DONT_CARE, FALSE, "bcm43241b4_ag", ""}, + {BCM4335_CHIP_ID, 2, DONT_CARE, FALSE, "bcm4339a0_ag", ""}, + {BCM4339_CHIP_ID, 1, DONT_CARE, FALSE, "bcm4339a0_ag", "ap6335"}, + {BCM4345_CHIP_ID, 6, DONT_CARE, FALSE, "bcm43455c0_ag", "ap6255"}, + {BCM43454_CHIP_ID, 6, DONT_CARE, FALSE, "bcm43455c0_ag", ""}, + {BCM4345_CHIP_ID, 9, DONT_CARE, FALSE, "bcm43456c5_ag", "ap6256"}, + {BCM43454_CHIP_ID, 9, DONT_CARE, FALSE, "bcm43456c5_ag", ""}, + {BCM4354_CHIP_ID, 1, DONT_CARE, FALSE, "bcm4354a1_ag", ""}, + {BCM4354_CHIP_ID, 2, DONT_CARE, FALSE, "bcm4356a2_ag", "ap6356"}, + {BCM4356_CHIP_ID, 2, DONT_CARE, FALSE, "bcm4356a2_ag", ""}, + {BCM4371_CHIP_ID, 2, DONT_CARE, FALSE, "bcm4356a2_ag", ""}, + {BCM43569_CHIP_ID, 3, DONT_CARE, FALSE, "bcm4358a3_ag", ""}, + {BCM4359_CHIP_ID, 5, DONT_CARE, FALSE, "bcm4359b1_ag", ""}, + {BCM4359_CHIP_ID, 9, DONT_CARE, FALSE, "bcm4359c0_ag", "ap6398s"}, + {BCM43751_CHIP_ID, 1, DONT_CARE, TRUE, "bcm43751a1_ag", "ap6271s"}, +#endif +#ifdef BCMPCIE + {BCM4354_CHIP_ID, 2, DONT_CARE, FALSE, "bcm4356a2_pcie_ag", ""}, + {BCM4356_CHIP_ID, 2, DONT_CARE, FALSE, "bcm4356a2_pcie_ag", ""}, + {BCM4359_CHIP_ID, 9, DONT_CARE, FALSE, "bcm4359c0_pcie_ag", ""}, + {BCM4362_CHIP_ID, 0, DONT_CARE, TRUE, "bcm43752a0_pcie_ag", ""}, +#endif +#ifdef BCMDBUS + {BCM43143_CHIP_ID, 2, DONT_CARE, FALSE, "bcm43143b0", ""}, + {BCM43242_CHIP_ID, 1, DONT_CARE, FALSE, "bcm43242a1_ag", ""}, + {BCM43569_CHIP_ID, 2, DONT_CARE, FALSE, "bcm4358u_ag", "ap62x8"}, +#endif +}; + +#ifdef BCMSDIO +void +dhd_conf_free_mac_list(wl_mac_list_ctrl_t *mac_list) +{ + int i; + + CONFIG_TRACE(("%s called\n", __FUNCTION__)); + if (mac_list->m_mac_list_head) { + for (i=0; icount; i++) { + if (mac_list->m_mac_list_head[i].mac) { + CONFIG_TRACE(("%s Free mac %p\n", __FUNCTION__, mac_list->m_mac_list_head[i].mac)); + kfree(mac_list->m_mac_list_head[i].mac); + } + } + CONFIG_TRACE(("%s Free m_mac_list_head %p\n", __FUNCTION__, mac_list->m_mac_list_head)); + kfree(mac_list->m_mac_list_head); + } + mac_list->count = 0; +} + +void +dhd_conf_free_chip_nv_path_list(wl_chip_nv_path_list_ctrl_t *chip_nv_list) +{ + CONFIG_TRACE(("%s called\n", __FUNCTION__)); + + if (chip_nv_list->m_chip_nv_path_head) { + CONFIG_TRACE(("%s Free %p\n", __FUNCTION__, chip_nv_list->m_chip_nv_path_head)); + kfree(chip_nv_list->m_chip_nv_path_head); + } + chip_nv_list->count = 0; +} + +#if defined(HW_OOB) || defined(FORCE_WOWLAN) +void +dhd_conf_set_hw_oob_intr(bcmsdh_info_t *sdh, struct si_pub *sih) +{ + uint32 gpiocontrol, addr; + + if (CHIPID(sih->chip) == BCM43362_CHIP_ID) { + printf("%s: Enable HW OOB for 43362\n", __FUNCTION__); + addr = SI_ENUM_BASE(sih) + OFFSETOF(chipcregs_t, gpiocontrol); + gpiocontrol = bcmsdh_reg_read(sdh, addr, 4); + gpiocontrol |= 0x2; + bcmsdh_reg_write(sdh, addr, 4, gpiocontrol); + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, 0x10005, 0xf, NULL); + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, 0x10006, 0x0, NULL); + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, 0x10007, 0x2, NULL); + } +} +#endif + +#define SBSDIO_CIS_SIZE_LIMIT 0x200 +#define F0_BLOCK_SIZE 32 +int +dhd_conf_set_blksize(bcmsdh_info_t *sdh) +{ + int err = 0; + uint fn, numfn; + int32 blksize = 0, cur_blksize = 0; + uint8 cisd; + + numfn = bcmsdh_query_iofnum(sdh); + + for (fn = 0; fn <= numfn; fn++) { + if (!fn) + blksize = F0_BLOCK_SIZE; + else { + bcmsdh_cisaddr_read(sdh, fn, &cisd, 24); + blksize = cisd; + bcmsdh_cisaddr_read(sdh, fn, &cisd, 25); + blksize |= cisd << 8; + } +#ifdef CUSTOM_SDIO_F2_BLKSIZE + if (fn == 2 && blksize > CUSTOM_SDIO_F2_BLKSIZE) { + blksize = CUSTOM_SDIO_F2_BLKSIZE; + } +#endif + bcmsdh_iovar_op(sdh, "sd_blocksize", &fn, sizeof(int32), + &cur_blksize, sizeof(int32), FALSE); + if (cur_blksize != blksize) { + printf("%s: fn=%d, blksize=%d, cur_blksize=%d\n", __FUNCTION__, + fn, blksize, cur_blksize); + blksize |= (fn<<16); + if (bcmsdh_iovar_op(sdh, "sd_blocksize", NULL, 0, &blksize, + sizeof(blksize), TRUE) != BCME_OK) { + DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_blocksize")); + err = -1; + } + } + } + + return err; +} + +int +dhd_conf_get_mac(dhd_pub_t *dhd, bcmsdh_info_t *sdh, si_t *sih, uint8 *mac) +{ + int i, err = -1; + uint8 *ptr = 0; + unsigned char tpl_code, tpl_link='\0'; + uint8 header[3] = {0x80, 0x07, 0x19}; + uint8 *cis; + + if (!(cis = MALLOC(dhd->osh, SBSDIO_CIS_SIZE_LIMIT))) { + CONFIG_ERROR(("%s: cis malloc failed\n", __FUNCTION__)); + return err; + } + bzero(cis, SBSDIO_CIS_SIZE_LIMIT); + + if ((err = bcmsdh_cis_read(sdh, 0, cis, SBSDIO_CIS_SIZE_LIMIT))) { + CONFIG_ERROR(("%s: cis read err %d\n", __FUNCTION__, err)); + MFREE(dhd->osh, cis, SBSDIO_CIS_SIZE_LIMIT); + return err; + } + err = -1; // reset err; + ptr = cis; + do { + /* 0xff means we're done */ + tpl_code = *ptr; + ptr++; + if (tpl_code == 0xff) + break; + + /* null entries have no link field or data */ + if (tpl_code == 0x00) + continue; + + tpl_link = *ptr; + ptr++; + /* a size of 0xff also means we're done */ + if (tpl_link == 0xff) + break; + if (config_msg_level & CONFIG_TRACE_LEVEL) { + printf("%s: tpl_code=0x%02x, tpl_link=0x%02x, tag=0x%02x\n", + __FUNCTION__, tpl_code, tpl_link, *ptr); + printk("%s: value:", __FUNCTION__); + for (i=0; iosh, cis, SBSDIO_CIS_SIZE_LIMIT); + + return err; +} + +void +dhd_conf_set_fw_name_by_mac(dhd_pub_t *dhd, bcmsdh_info_t *sdh, si_t *sih, char *fw_path) +{ + int i, j; + uint8 mac[6]={0}; + int fw_num=0, mac_num=0; + uint32 oui, nic; + wl_mac_list_t *mac_list; + wl_mac_range_t *mac_range; + int fw_type, fw_type_new; + char *name_ptr; + + mac_list = dhd->conf->fw_by_mac.m_mac_list_head; + fw_num = dhd->conf->fw_by_mac.count; + if (!mac_list || !fw_num) + return; + + if (dhd_conf_get_mac(dhd, sdh, sih, mac)) { + CONFIG_ERROR(("%s: Can not read MAC address\n", __FUNCTION__)); + return; + } + oui = (mac[0] << 16) | (mac[1] << 8) | (mac[2]); + nic = (mac[3] << 16) | (mac[4] << 8) | (mac[5]); + + /* find out the last '/' */ + i = strlen(fw_path); + while (i > 0) { + if (fw_path[i] == '/') { + i++; + break; + } + i--; + } + name_ptr = &fw_path[i]; + + if (strstr(name_ptr, "_apsta")) + fw_type = FW_TYPE_APSTA; + else if (strstr(name_ptr, "_p2p")) + fw_type = FW_TYPE_P2P; + else if (strstr(name_ptr, "_mesh")) + fw_type = FW_TYPE_MESH; + else if (strstr(name_ptr, "_es")) + fw_type = FW_TYPE_ES; + else if (strstr(name_ptr, "_mfg")) + fw_type = FW_TYPE_MFG; + else + fw_type = FW_TYPE_STA; + + for (i=0; i= mac_range[j].nic_start && nic <= mac_range[j].nic_end) { + strcpy(name_ptr, mac_list[i].name); + printf("%s: matched oui=0x%06X, nic=0x%06X\n", + __FUNCTION__, oui, nic); + printf("%s: fw_path=%s\n", __FUNCTION__, fw_path); + return; + } + } + } + } +} + +void +dhd_conf_set_nv_name_by_mac(dhd_pub_t *dhd, bcmsdh_info_t *sdh, si_t *sih, char *nv_path) +{ + int i, j; + uint8 mac[6]={0}; + int nv_num=0, mac_num=0; + uint32 oui, nic; + wl_mac_list_t *mac_list; + wl_mac_range_t *mac_range; + char *pnv_name; + + mac_list = dhd->conf->nv_by_mac.m_mac_list_head; + nv_num = dhd->conf->nv_by_mac.count; + if (!mac_list || !nv_num) + return; + + if (dhd_conf_get_mac(dhd, sdh, sih, mac)) { + CONFIG_ERROR(("%s: Can not read MAC address\n", __FUNCTION__)); + return; + } + oui = (mac[0] << 16) | (mac[1] << 8) | (mac[2]); + nic = (mac[3] << 16) | (mac[4] << 8) | (mac[5]); + + /* find out the last '/' */ + i = strlen(nv_path); + while (i > 0) { + if (nv_path[i] == '/') break; + i--; + } + pnv_name = &nv_path[i+1]; + + for (i=0; i= mac_range[j].nic_start && nic <= mac_range[j].nic_end) { + strcpy(pnv_name, mac_list[i].name); + printf("%s: matched oui=0x%06X, nic=0x%06X\n", + __FUNCTION__, oui, nic); + printf("%s: nv_path=%s\n", __FUNCTION__, nv_path); + return; + } + } + } + } +} +#endif + +void +dhd_conf_free_country_list(conf_country_list_t *country_list) +{ + int i; + + CONFIG_TRACE(("%s called\n", __FUNCTION__)); + for (i=0; icount; i++) { + if (country_list->cspec[i]) { + CONFIG_TRACE(("%s Free cspec %p\n", __FUNCTION__, country_list->cspec[i])); + kfree(country_list->cspec[i]); + } + } + country_list->count = 0; +} + +void +dhd_conf_set_fw_name_by_chip(dhd_pub_t *dhd, char *fw_path) +{ + int fw_type, ag_type; + uint chip, chiprev; + int i; + char *name_ptr; + + chip = dhd->conf->chip; + chiprev = dhd->conf->chiprev; + + if (fw_path[0] == '\0') { +#ifdef CONFIG_BCMDHD_FW_PATH + bcm_strncpy_s(fw_path, MOD_PARAM_PATHLEN-1, CONFIG_BCMDHD_FW_PATH, MOD_PARAM_PATHLEN-1); + if (fw_path[0] == '\0') +#endif + { + printf("firmware path is null\n"); + return; + } + } +#ifndef FW_PATH_AUTO_SELECT + return; +#endif + + /* find out the last '/' */ + i = strlen(fw_path); + while (i > 0) { + if (fw_path[i] == '/') { + i++; + break; + } + i--; + } + name_ptr = &fw_path[i]; +#ifdef BAND_AG + ag_type = FW_TYPE_AG; +#else + ag_type = strstr(name_ptr, "_ag") ? FW_TYPE_AG : FW_TYPE_G; +#endif + if (strstr(name_ptr, "_apsta")) + fw_type = FW_TYPE_APSTA; + else if (strstr(name_ptr, "_p2p")) + fw_type = FW_TYPE_P2P; + else if (strstr(name_ptr, "_mesh")) + fw_type = FW_TYPE_MESH; + else if (strstr(name_ptr, "_es")) + fw_type = FW_TYPE_ES; + else if (strstr(name_ptr, "_mfg")) + fw_type = FW_TYPE_MFG; + else + fw_type = FW_TYPE_STA; + + for (i = 0; i < sizeof(chip_name_map)/sizeof(chip_name_map[0]); i++) { + const cihp_name_map_t* row = &chip_name_map[i]; + if (row->chip == chip && row->chiprev == chiprev && + (row->ag_type == ag_type || row->ag_type == DONT_CARE)) { + strcpy(name_ptr, "fw_"); + strcat(fw_path, row->chip_name); +#ifdef BCMUSBDEV_COMPOSITE + strcat(fw_path, "_cusb"); +#endif + if (fw_type == FW_TYPE_APSTA) + strcat(fw_path, "_apsta.bin"); + else if (fw_type == FW_TYPE_P2P) + strcat(fw_path, "_p2p.bin"); + else if (fw_type == FW_TYPE_MESH) + strcat(fw_path, "_mesh.bin"); + else if (fw_type == FW_TYPE_ES) + strcat(fw_path, "_es.bin"); + else if (fw_type == FW_TYPE_MFG) + strcat(fw_path, "_mfg.bin"); + else + strcat(fw_path, ".bin"); + } + } + + dhd->conf->fw_type = fw_type; + + CONFIG_TRACE(("%s: firmware_path=%s\n", __FUNCTION__, fw_path)); +} + +void +dhd_conf_set_clm_name_by_chip(dhd_pub_t *dhd, char *clm_path) +{ + uint chip, chiprev; + int i; + char *name_ptr; + + chip = dhd->conf->chip; + chiprev = dhd->conf->chiprev; + + if (clm_path[0] == '\0') { + printf("clm path is null\n"); + return; + } + + /* find out the last '/' */ + i = strlen(clm_path); + while (i > 0) { + if (clm_path[i] == '/') { + i++; + break; + } + i--; + } + name_ptr = &clm_path[i]; + + for (i = 0; i < sizeof(chip_name_map)/sizeof(chip_name_map[0]); i++) { + const cihp_name_map_t* row = &chip_name_map[i]; + if (row->chip == chip && row->chiprev == chiprev && row->clm) { + strcpy(name_ptr, "clm_"); + strcat(clm_path, row->chip_name); + strcat(clm_path, ".blob"); + } + } + + CONFIG_TRACE(("%s: clm_path=%s\n", __FUNCTION__, clm_path)); +} + +void +dhd_conf_set_nv_name_by_chip(dhd_pub_t *dhd, char *nv_path) +{ + uint chip, chiprev; + int i; + char *name_ptr; + + chip = dhd->conf->chip; + chiprev = dhd->conf->chiprev; + + if (nv_path[0] == '\0') { +#ifdef CONFIG_BCMDHD_NVRAM_PATH + bcm_strncpy_s(nv_path, MOD_PARAM_PATHLEN-1, CONFIG_BCMDHD_NVRAM_PATH, MOD_PARAM_PATHLEN-1); + if (nv_path[0] == '\0') +#endif + { + printf("nvram path is null\n"); + return; + } + } + + /* find out the last '/' */ + i = strlen(nv_path); + while (i > 0) { + if (nv_path[i] == '/') { + i++; + break; + } + i--; + } + name_ptr = &nv_path[i]; + + for (i = 0; i < sizeof(chip_name_map)/sizeof(chip_name_map[0]); i++) { + const cihp_name_map_t* row = &chip_name_map[i]; + if (row->chip == chip && row->chiprev == chiprev && strlen(row->module_name)) { + strcpy(name_ptr, "nvram_"); + strcat(name_ptr, row->module_name); +#ifdef BCMUSBDEV_COMPOSITE + strcat(name_ptr, "_cusb"); +#endif + strcat(name_ptr, ".txt"); + } + } + + for (i=0; iconf->nv_by_chip.count; i++) { + if (chip==dhd->conf->nv_by_chip.m_chip_nv_path_head[i].chip && + chiprev==dhd->conf->nv_by_chip.m_chip_nv_path_head[i].chiprev) { + strcpy(name_ptr, dhd->conf->nv_by_chip.m_chip_nv_path_head[i].name); + break; + } + } + + CONFIG_TRACE(("%s: nvram_path=%s\n", __FUNCTION__, nv_path)); +} + +void +dhd_conf_set_path(dhd_pub_t *dhd, char *dst_name, char *dst_path, char *src_path) +{ + int i; + + if (src_path[0] == '\0') { + printf("src_path is null\n"); + return; + } else + strcpy(dst_path, src_path); + + /* find out the last '/' */ + i = strlen(dst_path); + while (i > 0) { + if (dst_path[i] == '/') { + i++; + break; + } + i--; + } + strcpy(&dst_path[i], dst_name); + + CONFIG_TRACE(("%s: dst_path=%s\n", __FUNCTION__, dst_path)); +} + +#ifdef CONFIG_PATH_AUTO_SELECT +void +dhd_conf_set_conf_name_by_chip(dhd_pub_t *dhd, char *conf_path) +{ + uint chip, chiprev; + int i; + char *name_ptr; + + chip = dhd->conf->chip; + chiprev = dhd->conf->chiprev; + + if (conf_path[0] == '\0') { + printf("config path is null\n"); + return; + } + + /* find out the last '/' */ + i = strlen(conf_path); + while (i > 0) { + if (conf_path[i] == '/') { + i++; + break; + } + i--; + } + name_ptr = &conf_path[i]; + + for (i = 0; i < sizeof(chip_name_map)/sizeof(chip_name_map[0]); i++) { + const cihp_name_map_t* row = &chip_name_map[i]; + if (row->chip == chip && row->chiprev == chiprev) { + strcpy(name_ptr, "config_"); + strcat(conf_path, row->chip_name); + strcat(conf_path, ".txt"); + } + } + + CONFIG_TRACE(("%s: config_path=%s\n", __FUNCTION__, conf_path)); +} +#endif + +int +dhd_conf_set_intiovar(dhd_pub_t *dhd, uint cmd, char *name, int val, + int def, bool down) +{ + int ret = -1; + char iovbuf[WL_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" + '\0' + bitvec */ + + if (val >= def) { + if (down) { + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, NULL, 0, TRUE, 0)) < 0) + CONFIG_ERROR(("%s: WLC_DOWN setting failed %d\n", __FUNCTION__, ret)); + } + if (cmd == WLC_SET_VAR) { + CONFIG_TRACE(("%s: set %s %d\n", __FUNCTION__, name, val)); + bcm_mkiovar(name, (char *)&val, sizeof(val), iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) + CONFIG_ERROR(("%s: %s setting failed %d\n", __FUNCTION__, name, ret)); + } else { + CONFIG_TRACE(("%s: set %s %d %d\n", __FUNCTION__, name, cmd, val)); + if ((ret = dhd_wl_ioctl_cmd(dhd, cmd, &val, sizeof(val), TRUE, 0)) < 0) + CONFIG_ERROR(("%s: %s setting failed %d\n", __FUNCTION__, name, ret)); + } + } + + return ret; +} + +int +dhd_conf_set_bufiovar(dhd_pub_t *dhd, uint cmd, char *name, char *buf, + int len, bool down) +{ + char iovbuf[WLC_IOCTL_SMLEN]; + int ret = -1; + + if (down) { + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, NULL, 0, TRUE, 0)) < 0) + CONFIG_ERROR(("%s: WLC_DOWN setting failed %d\n", __FUNCTION__, ret)); + } + + if (cmd == WLC_SET_VAR) { + bcm_mkiovar(name, buf, len, iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, cmd, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) + CONFIG_ERROR(("%s: %s setting failed %d\n", __FUNCTION__, name, ret)); + } else { + if ((ret = dhd_wl_ioctl_cmd(dhd, cmd, buf, len, TRUE, 0)) < 0) + CONFIG_ERROR(("%s: %s setting failed %d\n", __FUNCTION__, name, ret)); + } + + return ret; +} + +int +dhd_conf_get_iovar(dhd_pub_t *dhd, int cmd, char *name, char *buf, int len, int ifidx) +{ + char iovbuf[WLC_IOCTL_SMLEN]; + int ret = -1; + + if (cmd == WLC_GET_VAR) { + if (bcm_mkiovar(name, NULL, 0, iovbuf, sizeof(iovbuf))) { + ret = dhd_wl_ioctl_cmd(dhd, cmd, iovbuf, sizeof(iovbuf), FALSE, ifidx); + if (!ret) { + memcpy(buf, iovbuf, len); + } else { + CONFIG_ERROR(("%s: get iovar %s failed %d\n", __FUNCTION__, name, ret)); + } + } else { + CONFIG_ERROR(("%s: mkiovar %s failed\n", __FUNCTION__, name)); + } + } else { + ret = dhd_wl_ioctl_cmd(dhd, cmd, buf, len, FALSE, 0); + if (ret < 0) + CONFIG_ERROR(("%s: get iovar %s failed %d\n", __FUNCTION__, name, ret)); + } + + return ret; +} + +uint +dhd_conf_get_band(dhd_pub_t *dhd) +{ + int band = -1; + + if (dhd && dhd->conf) + band = dhd->conf->band; + else + CONFIG_ERROR(("%s: dhd or conf is NULL\n", __FUNCTION__)); + + return band; +} + +int +dhd_conf_get_country(dhd_pub_t *dhd, wl_country_t *cspec) +{ + int bcmerror = -1; + + memset(cspec, 0, sizeof(wl_country_t)); + bcm_mkiovar("country", NULL, 0, (char*)cspec, sizeof(wl_country_t)); + if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, cspec, sizeof(wl_country_t), FALSE, 0)) < 0) + CONFIG_ERROR(("%s: country code getting failed %d\n", __FUNCTION__, bcmerror)); + + return bcmerror; +} + +int +dhd_conf_map_country_list(dhd_pub_t *dhd, wl_country_t *cspec) +{ + int bcmerror = -1, i; + struct dhd_conf *conf = dhd->conf; + conf_country_list_t *country_list = &conf->country_list; + + for (i = 0; i < country_list->count; i++) { + if (!strncmp(cspec->country_abbrev, country_list->cspec[i]->country_abbrev, 2)) { + memcpy(cspec->ccode, country_list->cspec[i]->ccode, WLC_CNTRY_BUF_SZ); + cspec->rev = country_list->cspec[i]->rev; + bcmerror = 0; + } + } + + if (!bcmerror) + printf("%s: %s/%d\n", __FUNCTION__, cspec->ccode, cspec->rev); + + return bcmerror; +} + +int +dhd_conf_set_country(dhd_pub_t *dhd, wl_country_t *cspec) +{ + int bcmerror = -1; + + memset(&dhd->dhd_cspec, 0, sizeof(wl_country_t)); + + printf("%s: set country %s, revision %d\n", __FUNCTION__, cspec->ccode, cspec->rev); + dhd_conf_set_bufiovar(dhd, WLC_SET_VAR, "country", (char *)cspec, sizeof(wl_country_t), FALSE); + dhd_conf_get_country(dhd, cspec); + printf("Country code: %s (%s/%d)\n", cspec->country_abbrev, cspec->ccode, cspec->rev); + + return bcmerror; +} + +int +dhd_conf_fix_country(dhd_pub_t *dhd) +{ + int bcmerror = -1; + uint band; + wl_uint32_list_t *list; + u8 valid_chan_list[sizeof(u32)*(WL_NUMCHANNELS + 1)]; + wl_country_t cspec; + + if (!(dhd && dhd->conf)) { + return bcmerror; + } + + memset(valid_chan_list, 0, sizeof(valid_chan_list)); + list = (wl_uint32_list_t *)(void *) valid_chan_list; + list->count = htod32(WL_NUMCHANNELS); + if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_GET_VALID_CHANNELS, valid_chan_list, sizeof(valid_chan_list), FALSE, 0)) < 0) { + CONFIG_ERROR(("%s: get channels failed with %d\n", __FUNCTION__, bcmerror)); + } + + band = dhd_conf_get_band(dhd); + + if (bcmerror || ((band==WLC_BAND_AUTO || band==WLC_BAND_2G) && + dtoh32(list->count)<11)) { + CONFIG_ERROR(("%s: bcmerror=%d, # of channels %d\n", + __FUNCTION__, bcmerror, dtoh32(list->count))); + dhd_conf_map_country_list(dhd, &dhd->conf->cspec); + if ((bcmerror = dhd_conf_set_country(dhd, &dhd->conf->cspec)) < 0) { + strcpy(cspec.country_abbrev, "US"); + cspec.rev = 0; + strcpy(cspec.ccode, "US"); + dhd_conf_map_country_list(dhd, &cspec); + dhd_conf_set_country(dhd, &cspec); + } + } + + return bcmerror; +} + +bool +dhd_conf_match_channel(dhd_pub_t *dhd, uint32 channel) +{ + int i; + bool match = false; + + if (dhd && dhd->conf) { + if (dhd->conf->channels.count == 0) + return true; + for (i=0; iconf->channels.count; i++) { + if (channel == dhd->conf->channels.channel[i]) + match = true; + } + } else { + match = true; + CONFIG_ERROR(("%s: dhd or conf is NULL\n", __FUNCTION__)); + } + + return match; +} + +int +dhd_conf_set_roam(dhd_pub_t *dhd) +{ + int bcmerror = -1; + struct dhd_conf *conf = dhd->conf; + + dhd_roam_disable = conf->roam_off; + dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "roam_off", dhd->conf->roam_off, 0, FALSE); + + if (!conf->roam_off || !conf->roam_off_suspend) { + printf("%s: set roam_trigger %d\n", __FUNCTION__, conf->roam_trigger[0]); + dhd_conf_set_bufiovar(dhd, WLC_SET_ROAM_TRIGGER, "WLC_SET_ROAM_TRIGGER", + (char *)conf->roam_trigger, sizeof(conf->roam_trigger), FALSE); + + printf("%s: set roam_scan_period %d\n", __FUNCTION__, conf->roam_scan_period[0]); + dhd_conf_set_bufiovar(dhd, WLC_SET_ROAM_SCAN_PERIOD, "WLC_SET_ROAM_SCAN_PERIOD", + (char *)conf->roam_scan_period, sizeof(conf->roam_scan_period), FALSE); + + printf("%s: set roam_delta %d\n", __FUNCTION__, conf->roam_delta[0]); + dhd_conf_set_bufiovar(dhd, WLC_SET_ROAM_DELTA, "WLC_SET_ROAM_DELTA", + (char *)conf->roam_delta, sizeof(conf->roam_delta), FALSE); + + dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "fullroamperiod", dhd->conf->fullroamperiod, 1, FALSE); + } + + return bcmerror; +} + +void +dhd_conf_set_bw_cap(dhd_pub_t *dhd) +{ + struct { + u32 band; + u32 bw_cap; + } param = {0, 0}; + + if (dhd->conf->bw_cap[0] >= 0) { + memset(¶m, 0, sizeof(param)); + param.band = WLC_BAND_2G; + param.bw_cap = (uint)dhd->conf->bw_cap[0]; + printf("%s: set bw_cap 2g 0x%x\n", __FUNCTION__, param.bw_cap); + dhd_conf_set_bufiovar(dhd, WLC_SET_VAR, "bw_cap", (char *)¶m, sizeof(param), TRUE); + } + + if (dhd->conf->bw_cap[1] >= 0) { + memset(¶m, 0, sizeof(param)); + param.band = WLC_BAND_5G; + param.bw_cap = (uint)dhd->conf->bw_cap[1]; + printf("%s: set bw_cap 5g 0x%x\n", __FUNCTION__, param.bw_cap); + dhd_conf_set_bufiovar(dhd, WLC_SET_VAR, "bw_cap", (char *)¶m, sizeof(param), TRUE); + } +} + +void +dhd_conf_get_wme(dhd_pub_t *dhd, int mode, edcf_acparam_t *acp) +{ + int bcmerror = -1; + char iovbuf[WLC_IOCTL_SMLEN]; + edcf_acparam_t *acparam; + + bzero(iovbuf, sizeof(iovbuf)); + + /* + * Get current acparams, using buf as an input buffer. + * Return data is array of 4 ACs of wme params. + */ + if (mode == 0) + bcm_mkiovar("wme_ac_sta", NULL, 0, iovbuf, sizeof(iovbuf)); + else + bcm_mkiovar("wme_ac_ap", NULL, 0, iovbuf, sizeof(iovbuf)); + if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0)) < 0) { + CONFIG_ERROR(("%s: wme_ac_sta getting failed %d\n", __FUNCTION__, bcmerror)); + return; + } + memcpy((char*)acp, iovbuf, sizeof(edcf_acparam_t)*AC_COUNT); + + acparam = &acp[AC_BK]; + CONFIG_TRACE(("%s: BK: aci %d aifsn %d ecwmin %d ecwmax %d txop 0x%x\n", + __FUNCTION__, + acparam->ACI, acparam->ACI&EDCF_AIFSN_MASK, + acparam->ECW&EDCF_ECWMIN_MASK, (acparam->ECW&EDCF_ECWMAX_MASK)>>EDCF_ECWMAX_SHIFT, + acparam->TXOP)); + acparam = &acp[AC_BE]; + CONFIG_TRACE(("%s: BE: aci %d aifsn %d ecwmin %d ecwmax %d txop 0x%x\n", + __FUNCTION__, + acparam->ACI, acparam->ACI&EDCF_AIFSN_MASK, + acparam->ECW&EDCF_ECWMIN_MASK, (acparam->ECW&EDCF_ECWMAX_MASK)>>EDCF_ECWMAX_SHIFT, + acparam->TXOP)); + acparam = &acp[AC_VI]; + CONFIG_TRACE(("%s: VI: aci %d aifsn %d ecwmin %d ecwmax %d txop 0x%x\n", + __FUNCTION__, + acparam->ACI, acparam->ACI&EDCF_AIFSN_MASK, + acparam->ECW&EDCF_ECWMIN_MASK, (acparam->ECW&EDCF_ECWMAX_MASK)>>EDCF_ECWMAX_SHIFT, + acparam->TXOP)); + acparam = &acp[AC_VO]; + CONFIG_TRACE(("%s: VO: aci %d aifsn %d ecwmin %d ecwmax %d txop 0x%x\n", + __FUNCTION__, + acparam->ACI, acparam->ACI&EDCF_AIFSN_MASK, + acparam->ECW&EDCF_ECWMIN_MASK, (acparam->ECW&EDCF_ECWMAX_MASK)>>EDCF_ECWMAX_SHIFT, + acparam->TXOP)); + + return; +} + +void +dhd_conf_update_wme(dhd_pub_t *dhd, int mode, edcf_acparam_t *acparam_cur, int aci) +{ + int aifsn, ecwmin, ecwmax, txop; + edcf_acparam_t *acp; + struct dhd_conf *conf = dhd->conf; + wme_param_t *wme; + + if (mode == 0) + wme = &conf->wme_sta; + else + wme = &conf->wme_ap; + + /* Default value */ + aifsn = acparam_cur->ACI&EDCF_AIFSN_MASK; + ecwmin = acparam_cur->ECW&EDCF_ECWMIN_MASK; + ecwmax = (acparam_cur->ECW&EDCF_ECWMAX_MASK)>>EDCF_ECWMAX_SHIFT; + txop = acparam_cur->TXOP; + + /* Modified value */ + if (wme->aifsn[aci] > 0) + aifsn = wme->aifsn[aci]; + if (wme->ecwmin[aci] > 0) + ecwmin = wme->ecwmin[aci]; + if (wme->ecwmax[aci] > 0) + ecwmax = wme->ecwmax[aci]; + if (wme->txop[aci] > 0) + txop = wme->txop[aci]; + + if (!(wme->aifsn[aci] || wme->ecwmin[aci] || + wme->ecwmax[aci] || wme->txop[aci])) + return; + + /* Update */ + acp = acparam_cur; + acp->ACI = (acp->ACI & ~EDCF_AIFSN_MASK) | (aifsn & EDCF_AIFSN_MASK); + acp->ECW = ((ecwmax << EDCF_ECWMAX_SHIFT) & EDCF_ECWMAX_MASK) | (acp->ECW & EDCF_ECWMIN_MASK); + acp->ECW = ((acp->ECW & EDCF_ECWMAX_MASK) | (ecwmin & EDCF_ECWMIN_MASK)); + acp->TXOP = txop; + + printf("%s: wme_ac %s aci %d aifsn %d ecwmin %d ecwmax %d txop 0x%x\n", + __FUNCTION__, mode?"ap":"sta", + acp->ACI, acp->ACI&EDCF_AIFSN_MASK, + acp->ECW&EDCF_ECWMIN_MASK, (acp->ECW&EDCF_ECWMAX_MASK)>>EDCF_ECWMAX_SHIFT, + acp->TXOP); + + /* + * Now use buf as an output buffer. + * Put WME acparams after "wme_ac\0" in buf. + * NOTE: only one of the four ACs can be set at a time. + */ + if (mode == 0) + dhd_conf_set_bufiovar(dhd, WLC_SET_VAR, "wme_ac_sta", (char *)acp, sizeof(edcf_acparam_t), FALSE); + else + dhd_conf_set_bufiovar(dhd, WLC_SET_VAR, "wme_ac_ap", (char *)acp, sizeof(edcf_acparam_t), FALSE); + +} + +void +dhd_conf_set_wme(dhd_pub_t *dhd, int mode) +{ + edcf_acparam_t acparam_cur[AC_COUNT]; + + if (dhd && dhd->conf) { + if (!dhd->conf->force_wme_ac) { + CONFIG_TRACE(("%s: force_wme_ac is not enabled %d\n", + __FUNCTION__, dhd->conf->force_wme_ac)); + return; + } + + CONFIG_TRACE(("%s: Before change:\n", __FUNCTION__)); + dhd_conf_get_wme(dhd, mode, acparam_cur); + + dhd_conf_update_wme(dhd, mode, &acparam_cur[AC_BK], AC_BK); + dhd_conf_update_wme(dhd, mode, &acparam_cur[AC_BE], AC_BE); + dhd_conf_update_wme(dhd, mode, &acparam_cur[AC_VI], AC_VI); + dhd_conf_update_wme(dhd, mode, &acparam_cur[AC_VO], AC_VO); + + CONFIG_TRACE(("%s: After change:\n", __FUNCTION__)); + dhd_conf_get_wme(dhd, mode, acparam_cur); + } else { + CONFIG_ERROR(("%s: dhd or conf is NULL\n", __FUNCTION__)); + } + + return; +} + +void +dhd_conf_set_mchan_bw(dhd_pub_t *dhd, int p2p_mode, int miracast_mode) +{ + int i; + struct dhd_conf *conf = dhd->conf; + bool set = true; + + for (i=0; imchan[i].bw >= 0); + set &= ((conf->mchan[i].p2p_mode == -1) | (conf->mchan[i].p2p_mode == p2p_mode)); + set &= ((conf->mchan[i].miracast_mode == -1) | (conf->mchan[i].miracast_mode == miracast_mode)); + if (set) { + dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "mchan_bw", conf->mchan[i].bw, 0, FALSE); + } + } + + return; +} + +#ifdef PKT_FILTER_SUPPORT +void +dhd_conf_add_pkt_filter(dhd_pub_t *dhd) +{ + int i, j; + char str[16]; +#define MACS "%02x%02x%02x%02x%02x%02x" + + /* + * Filter in less pkt: ARP(0x0806, ID is 105), BRCM(0x886C), 802.1X(0x888E) + * 1) dhd_master_mode=1 + * 2) pkt_filter_del=100, 102, 103, 104, 105 + * 3) pkt_filter_add=131 0 0 12 0xFFFF 0x886C, 132 0 0 12 0xFFFF 0x888E + * 4) magic_pkt_filter_add=141 0 1 12 + */ + for(i=0; iconf->pkt_filter_add.count; i++) { + dhd->pktfilter[i+dhd->pktfilter_count] = dhd->conf->pkt_filter_add.filter[i]; + printf("%s: %s\n", __FUNCTION__, dhd->pktfilter[i+dhd->pktfilter_count]); + } + dhd->pktfilter_count += i; + + if (dhd->conf->magic_pkt_filter_add) { + strcat(dhd->conf->magic_pkt_filter_add, " 0x"); + strcat(dhd->conf->magic_pkt_filter_add, "FFFFFFFFFFFF"); + for (j=0; j<16; j++) + strcat(dhd->conf->magic_pkt_filter_add, "FFFFFFFFFFFF"); + strcat(dhd->conf->magic_pkt_filter_add, " 0x"); + strcat(dhd->conf->magic_pkt_filter_add, "FFFFFFFFFFFF"); + sprintf(str, MACS, MAC2STRDBG(dhd->mac.octet)); + for (j=0; j<16; j++) + strncat(dhd->conf->magic_pkt_filter_add, str, 12); + dhd->pktfilter[dhd->pktfilter_count] = dhd->conf->magic_pkt_filter_add; + dhd->pktfilter_count += 1; + } +} + +bool +dhd_conf_del_pkt_filter(dhd_pub_t *dhd, uint32 id) +{ + int i; + + if (dhd && dhd->conf) { + for (i=0; iconf->pkt_filter_del.count; i++) { + if (id == dhd->conf->pkt_filter_del.id[i]) { + printf("%s: %d\n", __FUNCTION__, dhd->conf->pkt_filter_del.id[i]); + return true; + } + } + return false; + } + return false; +} + +void +dhd_conf_discard_pkt_filter(dhd_pub_t *dhd) +{ + dhd->pktfilter_count = 6; + dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = NULL; + dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = "101 0 0 0 0xFFFFFFFFFFFF 0xFFFFFFFFFFFF"; + dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = "102 0 0 0 0xFFFFFF 0x01005E"; + dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = "103 0 0 0 0xFFFF 0x3333"; + dhd->pktfilter[DHD_MDNS_FILTER_NUM] = NULL; + /* Do not enable ARP to pkt filter if dhd_master_mode is false.*/ + dhd->pktfilter[DHD_ARP_FILTER_NUM] = NULL; + + /* IPv4 broadcast address XXX.XXX.XXX.255 */ + dhd->pktfilter[dhd->pktfilter_count] = "110 0 0 12 0xFFFF00000000000000000000000000000000000000FF 0x080000000000000000000000000000000000000000FF"; + dhd->pktfilter_count++; + /* discard IPv4 multicast address 224.0.0.0/4 */ + dhd->pktfilter[dhd->pktfilter_count] = "111 0 0 12 0xFFFF00000000000000000000000000000000F0 0x080000000000000000000000000000000000E0"; + dhd->pktfilter_count++; + /* discard IPv6 multicast address FF00::/8 */ + dhd->pktfilter[dhd->pktfilter_count] = "112 0 0 12 0xFFFF000000000000000000000000000000000000000000000000FF 0x86DD000000000000000000000000000000000000000000000000FF"; + dhd->pktfilter_count++; + /* discard Netbios pkt */ + dhd->pktfilter[dhd->pktfilter_count] = "121 0 0 12 0xFFFF000000000000000000FF000000000000000000000000FFFF 0x0800000000000000000000110000000000000000000000000089"; + dhd->pktfilter_count++; + +} +#endif /* PKT_FILTER_SUPPORT */ + +int +dhd_conf_get_pm(dhd_pub_t *dhd) +{ + if (dhd && dhd->conf) { + return dhd->conf->pm; + } + return -1; +} + +#define AP_IN_SUSPEND 1 +#define AP_DOWN_IN_SUSPEND 2 +int +dhd_conf_get_ap_mode_in_suspend(dhd_pub_t *dhd) +{ + int mode = 0; + + /* returned ap_in_suspend value: + * 0: nothing + * 1: ap enabled in suspend + * 2: ap enabled, but down in suspend + */ + if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) { + mode = dhd->conf->ap_in_suspend; + } + + return mode; +} + +int +dhd_conf_set_ap_in_suspend(dhd_pub_t *dhd, int suspend) +{ + int mode = 0; + uint wl_down = 1; + + mode = dhd_conf_get_ap_mode_in_suspend(dhd); + if (mode) + printf("%s: suspend %d, mode %d\n", __FUNCTION__, suspend, mode); + if (suspend) { + if (mode == AP_IN_SUSPEND) { +#ifdef SUSPEND_EVENT + if (dhd->conf->suspend_eventmask_enable) { + char *eventmask = dhd->conf->suspend_eventmask; + dhd_conf_set_bufiovar(dhd, WLC_SET_VAR, "event_msgs", eventmask, sizeof(eventmask), TRUE); + } +#endif + } else if (mode == AP_DOWN_IN_SUSPEND) + dhd_wl_ioctl_cmd(dhd, WLC_DOWN, (char *)&wl_down, sizeof(wl_down), TRUE, 0); + } else { + if (mode == AP_IN_SUSPEND) { +#ifdef SUSPEND_EVENT + if (dhd->conf->suspend_eventmask_enable) { + char *eventmask = dhd->conf->resume_eventmask; + dhd_conf_set_bufiovar(dhd, WLC_SET_VAR, "event_msgs", eventmask, sizeof(eventmask), TRUE); + } +#endif + } else if (mode == AP_DOWN_IN_SUSPEND) { + wl_down = 0; + dhd_wl_ioctl_cmd(dhd, WLC_UP, (char *)&wl_down, sizeof(wl_down), TRUE, 0); + } + } + + return mode; +} + +#ifdef PROP_TXSTATUS +int +dhd_conf_get_disable_proptx(dhd_pub_t *dhd) +{ + struct dhd_conf *conf = dhd->conf; + int disable_proptx = -1; + int fw_proptx = 0; + + /* check fw proptx priority: + * 1st: check fw support by wl cap + * 2nd: 4334/43340/43341/43241 support proptx but not show in wl cap, so enable it by default + * if you would like to disable it, please set disable_proptx=1 in config.txt + * 3th: disable when proptxstatus not support in wl cap + */ + if (FW_SUPPORTED(dhd, proptxstatus)) { + fw_proptx = 1; + } else if (conf->chip == BCM4334_CHIP_ID || conf->chip == BCM43340_CHIP_ID || + dhd->conf->chip == BCM43340_CHIP_ID || conf->chip == BCM4324_CHIP_ID) { + fw_proptx = 1; + } else { + fw_proptx = 0; + } + + /* returned disable_proptx value: + * -1: disable in STA and enable in P2P(follow original dhd settings when PROP_TXSTATUS_VSDB enabled) + * 0: depend on fw support + * 1: always disable proptx + */ + if (conf->disable_proptx == 0) { + // check fw support as well + if (fw_proptx) + disable_proptx = 0; + else + disable_proptx = 1; + } else if (conf->disable_proptx >= 1) { + disable_proptx = 1; + } else { + // check fw support as well + if (fw_proptx) + disable_proptx = -1; + else + disable_proptx = 1; + } + + printf("%s: fw_proptx=%d, disable_proptx=%d\n", __FUNCTION__, fw_proptx, disable_proptx); + + return disable_proptx; +} +#endif + +uint +pick_config_vars(char *varbuf, uint len, uint start_pos, char *pickbuf) +{ + bool findNewline, changenewline=FALSE, pick=FALSE; + int column; + uint n, pick_column=0; + + findNewline = FALSE; + column = 0; + + if (start_pos >= len) { + CONFIG_ERROR(("%s: wrong start pos\n", __FUNCTION__)); + return 0; + } + + for (n = start_pos; n < len; n++) { + if (varbuf[n] == '\r') + continue; + if ((findNewline || changenewline) && varbuf[n] != '\n') + continue; + findNewline = FALSE; + if (varbuf[n] == '#') { + findNewline = TRUE; + continue; + } + if (varbuf[n] == '\\') { + changenewline = TRUE; + continue; + } + if (!changenewline && varbuf[n] == '\n') { + if (column == 0) + continue; + column = 0; + continue; + } + if (changenewline && varbuf[n] == '\n') { + changenewline = FALSE; + continue; + } + + if (column==0 && !pick) { // start to pick + pick = TRUE; + column++; + pick_column = 0; + } else { + if (pick && column==0) { // stop to pick + pick = FALSE; + break; + } else + column++; + } + if (pick) { + if (varbuf[n] == 0x9) + continue; + pickbuf[pick_column] = varbuf[n]; + pick_column++; + } + } + + return n; // return current position +} + +bool +dhd_conf_read_log_level(dhd_pub_t *dhd, char *full_param, uint len_param) +{ + char *data = full_param+len_param; + + if (!strncmp("dhd_msg_level=", full_param, len_param)) { + dhd_msg_level = (int)simple_strtol(data, NULL, 0); + printf("%s: dhd_msg_level = 0x%X\n", __FUNCTION__, dhd_msg_level); + } +#ifdef BCMSDIO + else if (!strncmp("sd_msglevel=", full_param, len_param)) { + sd_msglevel = (int)simple_strtol(data, NULL, 0); + printf("%s: sd_msglevel = 0x%X\n", __FUNCTION__, sd_msglevel); + } +#endif +#ifdef BCMDBUS + else if (!strncmp("dbus_msglevel=", full_param, len_param)) { + dbus_msglevel = (int)simple_strtol(data, NULL, 0); + printf("%s: dbus_msglevel = 0x%X\n", __FUNCTION__, dbus_msglevel); + } +#endif + else if (!strncmp("android_msg_level=", full_param, len_param)) { + android_msg_level = (int)simple_strtol(data, NULL, 0); + printf("%s: android_msg_level = 0x%X\n", __FUNCTION__, android_msg_level); + } + else if (!strncmp("config_msg_level=", full_param, len_param)) { + config_msg_level = (int)simple_strtol(data, NULL, 0); + printf("%s: config_msg_level = 0x%X\n", __FUNCTION__, config_msg_level); + } +#ifdef WL_CFG80211 + else if (!strncmp("wl_dbg_level=", full_param, len_param)) { + wl_dbg_level = (int)simple_strtol(data, NULL, 0); + printf("%s: wl_dbg_level = 0x%X\n", __FUNCTION__, wl_dbg_level); + } +#endif +#if defined(WL_WIRELESS_EXT) + else if (!strncmp("iw_msg_level=", full_param, len_param)) { + iw_msg_level = (int)simple_strtol(data, NULL, 0); + printf("%s: iw_msg_level = 0x%X\n", __FUNCTION__, iw_msg_level); + } +#endif +#if defined(DHD_DEBUG) + else if (!strncmp("dhd_console_ms=", full_param, len_param)) { + dhd->dhd_console_ms = (int)simple_strtol(data, NULL, 0); + printf("%s: dhd_console_ms = 0x%X\n", __FUNCTION__, dhd->dhd_console_ms); + } +#endif + else + return false; + + return true; +} + +void +dhd_conf_read_wme_ac_value(wme_param_t *wme, char *pick, int ac_val) +{ + char *pick_tmp, *pch; + + pick_tmp = pick; + pch = bcmstrstr(pick_tmp, "aifsn "); + if (pch) { + wme->aifsn[ac_val] = (int)simple_strtol(pch+strlen("aifsn "), NULL, 0); + printf("%s: ac_val=%d, aifsn=%d\n", __FUNCTION__, ac_val, wme->aifsn[ac_val]); + } + pick_tmp = pick; + pch = bcmstrstr(pick_tmp, "ecwmin "); + if (pch) { + wme->ecwmin[ac_val] = (int)simple_strtol(pch+strlen("ecwmin "), NULL, 0); + printf("%s: ac_val=%d, ecwmin=%d\n", __FUNCTION__, ac_val, wme->ecwmin[ac_val]); + } + pick_tmp = pick; + pch = bcmstrstr(pick_tmp, "ecwmax "); + if (pch) { + wme->ecwmax[ac_val] = (int)simple_strtol(pch+strlen("ecwmax "), NULL, 0); + printf("%s: ac_val=%d, ecwmax=%d\n", __FUNCTION__, ac_val, wme->ecwmax[ac_val]); + } + pick_tmp = pick; + pch = bcmstrstr(pick_tmp, "txop "); + if (pch) { + wme->txop[ac_val] = (int)simple_strtol(pch+strlen("txop "), NULL, 0); + printf("%s: ac_val=%d, txop=0x%x\n", __FUNCTION__, ac_val, wme->txop[ac_val]); + } + +} + +bool +dhd_conf_read_wme_ac_params(dhd_pub_t *dhd, char *full_param, uint len_param) +{ + struct dhd_conf *conf = dhd->conf; + char *data = full_param+len_param; + + // wme_ac_sta_be=aifsn 1 ecwmin 2 ecwmax 3 txop 0x5e + // wme_ac_sta_vo=aifsn 1 ecwmin 1 ecwmax 1 txop 0x5e + + if (!strncmp("force_wme_ac=", full_param, len_param)) { + conf->force_wme_ac = (int)simple_strtol(data, NULL, 10); + printf("%s: force_wme_ac = %d\n", __FUNCTION__, conf->force_wme_ac); + } + else if (!strncmp("wme_ac_sta_be=", full_param, len_param)) { + dhd_conf_read_wme_ac_value(&conf->wme_sta, data, AC_BE); + } + else if (!strncmp("wme_ac_sta_bk=", full_param, len_param)) { + dhd_conf_read_wme_ac_value(&conf->wme_sta, data, AC_BK); + } + else if (!strncmp("wme_ac_sta_vi=", full_param, len_param)) { + dhd_conf_read_wme_ac_value(&conf->wme_sta, data, AC_VI); + } + else if (!strncmp("wme_ac_sta_vo=", full_param, len_param)) { + dhd_conf_read_wme_ac_value(&conf->wme_sta, data, AC_VO); + } + else if (!strncmp("wme_ac_ap_be=", full_param, len_param)) { + dhd_conf_read_wme_ac_value(&conf->wme_ap, data, AC_BE); + } + else if (!strncmp("wme_ac_ap_bk=", full_param, len_param)) { + dhd_conf_read_wme_ac_value(&conf->wme_ap, data, AC_BK); + } + else if (!strncmp("wme_ac_ap_vi=", full_param, len_param)) { + dhd_conf_read_wme_ac_value(&conf->wme_ap, data, AC_VI); + } + else if (!strncmp("wme_ac_ap_vo=", full_param, len_param)) { + dhd_conf_read_wme_ac_value(&conf->wme_ap, data, AC_VO); + } + else + return false; + + return true; +} + +bool +dhd_conf_read_fw_by_mac(dhd_pub_t *dhd, char *full_param, uint len_param) +{ + int i, j; + char *pch, *pick_tmp; + wl_mac_list_t *mac_list; + wl_mac_range_t *mac_range; + struct dhd_conf *conf = dhd->conf; + char *data = full_param+len_param; + + /* Process fw_by_mac: + * fw_by_mac=[fw_mac_num] \ + * [fw_name1] [mac_num1] [oui1-1] [nic_start1-1] [nic_end1-1] \ + * [oui1-1] [nic_start1-1] [nic_end1-1]... \ + * [oui1-n] [nic_start1-n] [nic_end1-n] \ + * [fw_name2] [mac_num2] [oui2-1] [nic_start2-1] [nic_end2-1] \ + * [oui2-1] [nic_start2-1] [nic_end2-1]... \ + * [oui2-n] [nic_start2-n] [nic_end2-n] \ + * Ex: fw_by_mac=2 \ + * fw_bcmdhd1.bin 2 0x0022F4 0xE85408 0xE8549D 0x983B16 0x3557A9 0x35582A \ + * fw_bcmdhd2.bin 3 0x0022F4 0xE85408 0xE8549D 0x983B16 0x3557A9 0x35582A \ + * 0x983B16 0x916157 0x916487 + */ + + if (!strncmp("fw_by_mac=", full_param, len_param)) { + pick_tmp = data; + pch = bcmstrtok(&pick_tmp, " ", 0); + conf->fw_by_mac.count = (uint32)simple_strtol(pch, NULL, 0); + if (!(mac_list = kmalloc(sizeof(wl_mac_list_t)*conf->fw_by_mac.count, GFP_KERNEL))) { + conf->fw_by_mac.count = 0; + CONFIG_ERROR(("%s: kmalloc failed\n", __FUNCTION__)); + } + printf("%s: fw_count=%d\n", __FUNCTION__, conf->fw_by_mac.count); + conf->fw_by_mac.m_mac_list_head = mac_list; + for (i=0; ifw_by_mac.count; i++) { + pch = bcmstrtok(&pick_tmp, " ", 0); + strcpy(mac_list[i].name, pch); + pch = bcmstrtok(&pick_tmp, " ", 0); + mac_list[i].count = (uint32)simple_strtol(pch, NULL, 0); + printf("%s: name=%s, mac_count=%d\n", __FUNCTION__, + mac_list[i].name, mac_list[i].count); + if (!(mac_range = kmalloc(sizeof(wl_mac_range_t)*mac_list[i].count, GFP_KERNEL))) { + mac_list[i].count = 0; + CONFIG_ERROR(("%s: kmalloc failed\n", __FUNCTION__)); + break; + } + mac_list[i].mac = mac_range; + for (j=0; jconf; + char *data = full_param+len_param; + + /* Process nv_by_mac: + * [nv_by_mac]: The same format as fw_by_mac + */ + if (!strncmp("nv_by_mac=", full_param, len_param)) { + pick_tmp = data; + pch = bcmstrtok(&pick_tmp, " ", 0); + conf->nv_by_mac.count = (uint32)simple_strtol(pch, NULL, 0); + if (!(mac_list = kmalloc(sizeof(wl_mac_list_t)*conf->nv_by_mac.count, GFP_KERNEL))) { + conf->nv_by_mac.count = 0; + CONFIG_ERROR(("%s: kmalloc failed\n", __FUNCTION__)); + } + printf("%s: nv_count=%d\n", __FUNCTION__, conf->nv_by_mac.count); + conf->nv_by_mac.m_mac_list_head = mac_list; + for (i=0; inv_by_mac.count; i++) { + pch = bcmstrtok(&pick_tmp, " ", 0); + strcpy(mac_list[i].name, pch); + pch = bcmstrtok(&pick_tmp, " ", 0); + mac_list[i].count = (uint32)simple_strtol(pch, NULL, 0); + printf("%s: name=%s, mac_count=%d\n", __FUNCTION__, + mac_list[i].name, mac_list[i].count); + if (!(mac_range = kmalloc(sizeof(wl_mac_range_t)*mac_list[i].count, GFP_KERNEL))) { + mac_list[i].count = 0; + CONFIG_ERROR(("%s: kmalloc failed\n", __FUNCTION__)); + break; + } + mac_list[i].mac = mac_range; + for (j=0; jconf; + char *data = full_param+len_param; + + /* Process nv_by_chip: + * nv_by_chip=[nv_chip_num] \ + * [chip1] [chiprev1] [nv_name1] [chip2] [chiprev2] [nv_name2] \ + * Ex: nv_by_chip=2 \ + * 43430 0 nvram_ap6212.txt 43430 1 nvram_ap6212a.txt \ + */ + if (!strncmp("nv_by_chip=", full_param, len_param)) { + pick_tmp = data; + pch = bcmstrtok(&pick_tmp, " ", 0); + conf->nv_by_chip.count = (uint32)simple_strtol(pch, NULL, 0); + if (!(chip_nv_path = kmalloc(sizeof(wl_mac_list_t)*conf->nv_by_chip.count, GFP_KERNEL))) { + conf->nv_by_chip.count = 0; + CONFIG_ERROR(("%s: kmalloc failed\n", __FUNCTION__)); + } + printf("%s: nv_by_chip_count=%d\n", __FUNCTION__, conf->nv_by_chip.count); + conf->nv_by_chip.m_chip_nv_path_head = chip_nv_path; + for (i=0; inv_by_chip.count; i++) { + pch = bcmstrtok(&pick_tmp, " ", 0); + chip_nv_path[i].chip = (uint32)simple_strtol(pch, NULL, 0); + pch = bcmstrtok(&pick_tmp, " ", 0); + chip_nv_path[i].chiprev = (uint32)simple_strtol(pch, NULL, 0); + pch = bcmstrtok(&pick_tmp, " ", 0); + strcpy(chip_nv_path[i].name, pch); + printf("%s: chip=0x%x, chiprev=%d, name=%s\n", __FUNCTION__, + chip_nv_path[i].chip, chip_nv_path[i].chiprev, chip_nv_path[i].name); + } + } + else + return false; + + return true; +} + +bool +dhd_conf_read_roam_params(dhd_pub_t *dhd, char *full_param, uint len_param) +{ + struct dhd_conf *conf = dhd->conf; + char *data = full_param+len_param; + + if (!strncmp("roam_off=", full_param, len_param)) { + if (!strncmp(data, "0", 1)) + conf->roam_off = 0; + else + conf->roam_off = 1; + printf("%s: roam_off = %d\n", __FUNCTION__, conf->roam_off); + } + else if (!strncmp("roam_off_suspend=", full_param, len_param)) { + if (!strncmp(data, "0", 1)) + conf->roam_off_suspend = 0; + else + conf->roam_off_suspend = 1; + printf("%s: roam_off_suspend = %d\n", __FUNCTION__, conf->roam_off_suspend); + } + else if (!strncmp("roam_trigger=", full_param, len_param)) { + conf->roam_trigger[0] = (int)simple_strtol(data, NULL, 10); + printf("%s: roam_trigger = %d\n", __FUNCTION__, + conf->roam_trigger[0]); + } + else if (!strncmp("roam_scan_period=", full_param, len_param)) { + conf->roam_scan_period[0] = (int)simple_strtol(data, NULL, 10); + printf("%s: roam_scan_period = %d\n", __FUNCTION__, + conf->roam_scan_period[0]); + } + else if (!strncmp("roam_delta=", full_param, len_param)) { + conf->roam_delta[0] = (int)simple_strtol(data, NULL, 10); + printf("%s: roam_delta = %d\n", __FUNCTION__, conf->roam_delta[0]); + } + else if (!strncmp("fullroamperiod=", full_param, len_param)) { + conf->fullroamperiod = (int)simple_strtol(data, NULL, 10); + printf("%s: fullroamperiod = %d\n", __FUNCTION__, + conf->fullroamperiod); + } else + return false; + + return true; +} + +bool +dhd_conf_read_country_list(dhd_pub_t *dhd, char *full_param, uint len_param) +{ + int i; + char *pch, *pick_tmp, *pick_tmp2; + struct dhd_conf *conf = dhd->conf; + char *data = full_param+len_param; + wl_country_t *cspec; + conf_country_list_t *country_list = NULL; + + /* Process country_list: + * country_list=[country1]:[ccode1]/[regrev1], + * [country2]:[ccode2]/[regrev2] \ + * Ex: country_list=US:US/0, TW:TW/1 + */ + if (!strncmp("country_list=", full_param, len_param)) { + country_list = &dhd->conf->country_list; + } + if (country_list) { + pick_tmp = data; + for (i=0; icountry_abbrev, pch); + pch = bcmstrtok(&pick_tmp2, "/", 0); + if (!pch) { + kfree(cspec); + break; + } + memcpy(cspec->ccode, pch, 2); + pch = bcmstrtok(&pick_tmp2, "/", 0); + if (!pch) { + kfree(cspec); + break; + } + cspec->rev = (int32)simple_strtol(pch, NULL, 10); + country_list->count++; + country_list->cspec[i] = cspec; + CONFIG_TRACE(("%s: country_list abbrev=%s, ccode=%s, regrev=%d\n", __FUNCTION__, + cspec->country_abbrev, cspec->ccode, cspec->rev)); + } + if (!strncmp("country_list=", full_param, len_param)) { + printf("%s: %d country in list\n", __FUNCTION__, conf->country_list.count); + } + } + else + return false; + + return true; +} + +bool +dhd_conf_read_mchan_params(dhd_pub_t *dhd, char *full_param, uint len_param) +{ + int i; + char *pch, *pick_tmp, *pick_tmp2; + struct dhd_conf *conf = dhd->conf; + char *data = full_param+len_param; + + /* Process mchan_bw: + * mchan_bw=[val]/[any/go/gc]/[any/source/sink] + * Ex: mchan_bw=80/go/source, 30/gc/sink + */ + if (!strncmp("mchan_bw=", full_param, len_param)) { + pick_tmp = data; + for (i=0; imchan[i].bw = (int)simple_strtol(pch, NULL, 0); + if (conf->mchan[i].bw < 0 || conf->mchan[i].bw > 100) { + CONFIG_ERROR(("%s: wrong bw %d\n", __FUNCTION__, conf->mchan[i].bw)); + conf->mchan[i].bw = 0; + break; + } + } + pch = bcmstrtok(&pick_tmp2, "/", 0); + if (!pch) { + break; + } else { + if (bcmstrstr(pch, "any")) { + conf->mchan[i].p2p_mode = -1; + } else if (bcmstrstr(pch, "go")) { + conf->mchan[i].p2p_mode = WL_P2P_IF_GO; + } else if (bcmstrstr(pch, "gc")) { + conf->mchan[i].p2p_mode = WL_P2P_IF_CLIENT; + } + } + pch = bcmstrtok(&pick_tmp2, "/", 0); + if (!pch) { + break; + } else { + if (bcmstrstr(pch, "any")) { + conf->mchan[i].miracast_mode = -1; + } else if (bcmstrstr(pch, "source")) { + conf->mchan[i].miracast_mode = MIRACAST_SOURCE; + } else if (bcmstrstr(pch, "sink")) { + conf->mchan[i].miracast_mode = MIRACAST_SINK; + } + } + } + for (i=0; imchan[i].bw >= 0) + printf("%s: mchan_bw=%d/%d/%d\n", __FUNCTION__, + conf->mchan[i].bw, conf->mchan[i].p2p_mode, conf->mchan[i].miracast_mode); + } + } + else + return false; + + return true; +} + +#ifdef PKT_FILTER_SUPPORT +bool +dhd_conf_read_pkt_filter(dhd_pub_t *dhd, char *full_param, uint len_param) +{ + struct dhd_conf *conf = dhd->conf; + char *data = full_param+len_param; + char *pch, *pick_tmp; + int i; + + /* Process pkt filter: + * 1) pkt_filter_add=99 0 0 0 0x000000000000 0x000000000000 + * 2) pkt_filter_del=100, 102, 103, 104, 105 + * 3) magic_pkt_filter_add=141 0 1 12 + */ + if (!strncmp("dhd_master_mode=", full_param, len_param)) { + if (!strncmp(data, "0", 1)) + dhd_master_mode = FALSE; + else + dhd_master_mode = TRUE; + printf("%s: dhd_master_mode = %d\n", __FUNCTION__, dhd_master_mode); + } + else if (!strncmp("pkt_filter_add=", full_param, len_param)) { + pick_tmp = data; + pch = bcmstrtok(&pick_tmp, ",.-", 0); + i=0; + while (pch != NULL && ipkt_filter_add.filter[i][0], pch); + printf("%s: pkt_filter_add[%d][] = %s\n", __FUNCTION__, i, &conf->pkt_filter_add.filter[i][0]); + pch = bcmstrtok(&pick_tmp, ",.-", 0); + i++; + } + conf->pkt_filter_add.count = i; + } + else if (!strncmp("pkt_filter_del=", full_param, len_param)) { + pick_tmp = data; + pch = bcmstrtok(&pick_tmp, " ,.-", 0); + i=0; + while (pch != NULL && ipkt_filter_del.id[i] = (uint32)simple_strtol(pch, NULL, 10); + pch = bcmstrtok(&pick_tmp, " ,.-", 0); + i++; + } + conf->pkt_filter_del.count = i; + printf("%s: pkt_filter_del id = ", __FUNCTION__); + for (i=0; ipkt_filter_del.count; i++) + printf("%d ", conf->pkt_filter_del.id[i]); + printf("\n"); + } + else if (!strncmp("magic_pkt_filter_add=", full_param, len_param)) { + if (!(conf->magic_pkt_filter_add = kmalloc(MAGIC_PKT_FILTER_LEN, GFP_KERNEL))) { + CONFIG_ERROR(("%s: kmalloc failed\n", __FUNCTION__)); + } else { + memset(conf->magic_pkt_filter_add, 0, MAGIC_PKT_FILTER_LEN); + strcpy(conf->magic_pkt_filter_add, data); + printf("%s: magic_pkt_filter_add = %s\n", __FUNCTION__, conf->magic_pkt_filter_add); + } + } + else + return false; + + return true; +} +#endif + +#ifdef ISAM_PREINIT +/* + * isam_init=mode [sta|ap|apsta|dualap] vifname [wlan1] + * isam_config=ifname [wlan0|wlan1] ssid [xxx] chan [x] + hidden [y|n] maxassoc [x] + amode [open|shared|wpapsk|wpa2psk|wpawpa2psk] + emode [none|wep|tkip|aes|tkipaes] + key [xxxxx] + * isam_enable=ifname [wlan0|wlan1] +*/ +bool +dhd_conf_read_isam(dhd_pub_t *dhd, char *full_param, uint len_param) +{ + struct dhd_conf *conf = dhd->conf; + char *data = full_param+len_param; + + if (!strncmp("isam_init=", full_param, len_param)) { + sprintf(conf->isam_init, "isam_init %s", data); + printf("%s: isam_init=%s\n", __FUNCTION__, conf->isam_init); + } + else if (!strncmp("isam_config=", full_param, len_param)) { + sprintf(conf->isam_config, "isam_config %s", data); + printf("%s: isam_config=%s\n", __FUNCTION__, conf->isam_config); + } + else if (!strncmp("isam_enable=", full_param, len_param)) { + sprintf(conf->isam_enable, "isam_enable %s", data); + printf("%s: isam_enable=%s\n", __FUNCTION__, conf->isam_enable); + } + else + return false; + + return true; +} +#endif + +#ifdef IDHCP +bool +dhd_conf_read_dhcp_params(dhd_pub_t *dhd, char *full_param, uint len_param) +{ + struct dhd_conf *conf = dhd->conf; + char *data = full_param+len_param; + struct ipv4_addr ipa_set; + + if (!strncmp("dhcpc_enable=", full_param, len_param)) { + conf->dhcpc_enable = (int)simple_strtol(data, NULL, 10); + printf("%s: dhcpc_enable = %d\n", __FUNCTION__, conf->dhcpc_enable); + } + else if (!strncmp("dhcpd_enable=", full_param, len_param)) { + conf->dhcpd_enable = (int)simple_strtol(data, NULL, 10); + printf("%s: dhcpd_enable = %d\n", __FUNCTION__, conf->dhcpd_enable); + } + else if (!strncmp("dhcpd_ip_addr=", full_param, len_param)) { + if (!bcm_atoipv4(data, &ipa_set)) + printf("%s : dhcpd_ip_addr adress setting failed.\n", __FUNCTION__); + conf->dhcpd_ip_addr = ipa_set; + printf("%s: dhcpd_ip_addr = %s\n",__FUNCTION__, data); + } + else if (!strncmp("dhcpd_ip_mask=", full_param, len_param)) { + if (!bcm_atoipv4(data, &ipa_set)) + printf("%s : dhcpd_ip_mask adress setting failed.\n", __FUNCTION__); + conf->dhcpd_ip_mask = ipa_set; + printf("%s: dhcpd_ip_mask = %s\n",__FUNCTION__, data); + } + else if (!strncmp("dhcpd_ip_start=", full_param, len_param)) { + if (!bcm_atoipv4(data, &ipa_set)) + printf("%s : dhcpd_ip_start adress setting failed.\n", __FUNCTION__); + conf->dhcpd_ip_start = ipa_set; + printf("%s: dhcpd_ip_start = %s\n",__FUNCTION__, data); + } + else if (!strncmp("dhcpd_ip_end=", full_param, len_param)) { + if (!bcm_atoipv4(data, &ipa_set)) + printf("%s : dhcpd_ip_end adress setting failed.\n", __FUNCTION__); + conf->dhcpd_ip_end = ipa_set; + printf("%s: dhcpd_ip_end = %s\n",__FUNCTION__, data); + } + else + return false; + + return true; +} +#endif + +#ifdef BCMSDIO +bool +dhd_conf_read_sdio_params(dhd_pub_t *dhd, char *full_param, uint len_param) +{ + struct dhd_conf *conf = dhd->conf; + char *data = full_param+len_param; + + if (!strncmp("dhd_doflow=", full_param, len_param)) { + if (!strncmp(data, "0", 1)) + dhd_doflow = FALSE; + else + dhd_doflow = TRUE; + printf("%s: dhd_doflow = %d\n", __FUNCTION__, dhd_doflow); + } + else if (!strncmp("dhd_slpauto=", full_param, len_param) || + !strncmp("kso_enable=", full_param, len_param)) { + if (!strncmp(data, "0", 1)) + dhd_slpauto = FALSE; + else + dhd_slpauto = TRUE; + printf("%s: dhd_slpauto = %d\n", __FUNCTION__, dhd_slpauto); + } + else if (!strncmp("use_rxchain=", full_param, len_param)) { + conf->use_rxchain = (int)simple_strtol(data, NULL, 10); + printf("%s: use_rxchain = %d\n", __FUNCTION__, conf->use_rxchain); + } + else if (!strncmp("dhd_txminmax=", full_param, len_param)) { + conf->dhd_txminmax = (uint)simple_strtol(data, NULL, 10); + printf("%s: dhd_txminmax = %d\n", __FUNCTION__, conf->dhd_txminmax); + } + else if (!strncmp("txinrx_thres=", full_param, len_param)) { + conf->txinrx_thres = (int)simple_strtol(data, NULL, 10); + printf("%s: txinrx_thres = %d\n", __FUNCTION__, conf->txinrx_thres); + } + else if (!strncmp("sd_f2_blocksize=", full_param, len_param)) { + conf->sd_f2_blocksize = (int)simple_strtol(data, NULL, 10); + printf("%s: sd_f2_blocksize = %d\n", __FUNCTION__, conf->sd_f2_blocksize); + } +#if defined(HW_OOB) + else if (!strncmp("oob_enabled_later=", full_param, len_param)) { + if (!strncmp(data, "0", 1)) + conf->oob_enabled_later = FALSE; + else + conf->oob_enabled_later = TRUE; + printf("%s: oob_enabled_later = %d\n", __FUNCTION__, conf->oob_enabled_later); + } +#endif + else if (!strncmp("dpc_cpucore=", full_param, len_param)) { + conf->dpc_cpucore = (int)simple_strtol(data, NULL, 10); + printf("%s: dpc_cpucore = %d\n", __FUNCTION__, conf->dpc_cpucore); + } + else if (!strncmp("rxf_cpucore=", full_param, len_param)) { + conf->rxf_cpucore = (int)simple_strtol(data, NULL, 10); + printf("%s: rxf_cpucore = %d\n", __FUNCTION__, conf->rxf_cpucore); + } + else if (!strncmp("orphan_move=", full_param, len_param)) { + conf->orphan_move = (int)simple_strtol(data, NULL, 10); + printf("%s: orphan_move = %d\n", __FUNCTION__, conf->orphan_move); + } +#if defined(BCMSDIOH_TXGLOM) + else if (!strncmp("txglomsize=", full_param, len_param)) { + conf->txglomsize = (uint)simple_strtol(data, NULL, 10); + if (conf->txglomsize > SDPCM_MAXGLOM_SIZE) + conf->txglomsize = SDPCM_MAXGLOM_SIZE; + printf("%s: txglomsize = %d\n", __FUNCTION__, conf->txglomsize); + } + else if (!strncmp("txglom_ext=", full_param, len_param)) { + if (!strncmp(data, "0", 1)) + conf->txglom_ext = FALSE; + else + conf->txglom_ext = TRUE; + printf("%s: txglom_ext = %d\n", __FUNCTION__, conf->txglom_ext); + if (conf->txglom_ext) { + if ((conf->chip == BCM43362_CHIP_ID) || (conf->chip == BCM4330_CHIP_ID)) + conf->txglom_bucket_size = 1680; + else if (conf->chip == BCM43340_CHIP_ID || conf->chip == BCM43341_CHIP_ID || + conf->chip == BCM4334_CHIP_ID || conf->chip == BCM4324_CHIP_ID) + conf->txglom_bucket_size = 1684; + } + printf("%s: txglom_bucket_size = %d\n", __FUNCTION__, conf->txglom_bucket_size); + } + else if (!strncmp("bus:rxglom=", full_param, len_param)) { + if (!strncmp(data, "0", 1)) + conf->bus_rxglom = FALSE; + else + conf->bus_rxglom = TRUE; + printf("%s: bus:rxglom = %d\n", __FUNCTION__, conf->bus_rxglom); + } + else if (!strncmp("deferred_tx_len=", full_param, len_param)) { + conf->deferred_tx_len = (int)simple_strtol(data, NULL, 10); + printf("%s: deferred_tx_len = %d\n", __FUNCTION__, conf->deferred_tx_len); + } + else if (!strncmp("txctl_tmo_fix=", full_param, len_param)) { + conf->txctl_tmo_fix = (int)simple_strtol(data, NULL, 0); + printf("%s: txctl_tmo_fix = %d\n", __FUNCTION__, conf->txctl_tmo_fix); + } + else if (!strncmp("tx_max_offset=", full_param, len_param)) { + conf->tx_max_offset = (int)simple_strtol(data, NULL, 10); + printf("%s: tx_max_offset = %d\n", __FUNCTION__, conf->tx_max_offset); + } + else if (!strncmp("txglom_mode=", full_param, len_param)) { + if (!strncmp(data, "0", 1)) + conf->txglom_mode = FALSE; + else + conf->txglom_mode = TRUE; + printf("%s: txglom_mode = %d\n", __FUNCTION__, conf->txglom_mode); + } +#endif + else + return false; + + return true; +} +#endif + +#ifdef BCMPCIE +bool +dhd_conf_read_pcie_params(dhd_pub_t *dhd, char *full_param, uint len_param) +{ + struct dhd_conf *conf = dhd->conf; + char *data = full_param+len_param; + + if (!strncmp("bus:deepsleep_disable=", full_param, len_param)) { + if (!strncmp(data, "0", 1)) + conf->bus_deepsleep_disable = 0; + else + conf->bus_deepsleep_disable = 1; + printf("%s: bus:deepsleep_disable = %d\n", __FUNCTION__, conf->bus_deepsleep_disable); + } + else + return false; + + return true; +} +#endif + +bool +dhd_conf_read_pm_params(dhd_pub_t *dhd, char *full_param, uint len_param) +{ + struct dhd_conf *conf = dhd->conf; + char *data = full_param+len_param; + + if (!strncmp("deepsleep=", full_param, len_param)) { + if (!strncmp(data, "1", 1)) + conf->deepsleep = TRUE; + else + conf->deepsleep = FALSE; + printf("%s: deepsleep = %d\n", __FUNCTION__, conf->deepsleep); + } + else if (!strncmp("PM=", full_param, len_param)) { + conf->pm = (int)simple_strtol(data, NULL, 10); + printf("%s: PM = %d\n", __FUNCTION__, conf->pm); + } + else if (!strncmp("pm_in_suspend=", full_param, len_param)) { + conf->pm_in_suspend = (int)simple_strtol(data, NULL, 10); + printf("%s: pm_in_suspend = %d\n", __FUNCTION__, conf->pm_in_suspend); + } + else if (!strncmp("suspend_bcn_li_dtim=", full_param, len_param)) { + conf->suspend_bcn_li_dtim = (int)simple_strtol(data, NULL, 10); + printf("%s: suspend_bcn_li_dtim = %d\n", __FUNCTION__, conf->suspend_bcn_li_dtim); + } + else if (!strncmp("xmit_in_suspend=", full_param, len_param)) { + if (!strncmp(data, "1", 1)) + conf->xmit_in_suspend = TRUE; + else + conf->xmit_in_suspend = FALSE; + printf("%s: xmit_in_suspend = %d\n", __FUNCTION__, conf->xmit_in_suspend); + } + else if (!strncmp("ap_in_suspend=", full_param, len_param)) { + conf->ap_in_suspend = (int)simple_strtol(data, NULL, 10); + printf("%s: ap_in_suspend = %d\n", __FUNCTION__, conf->ap_in_suspend); + } +#ifdef SUSPEND_EVENT + else if (!strncmp("suspend_eventmask_enable=", full_param, len_param)) { + if (!strncmp(data, "1", 1)) + conf->suspend_eventmask_enable = TRUE; + else + conf->suspend_eventmask_enable = FALSE; + printf("%s: suspend_eventmask_enable = %d\n", __FUNCTION__, conf->suspend_eventmask_enable); + } +#endif + else + return false; + + return true; +} + +bool +dhd_conf_read_others(dhd_pub_t *dhd, char *full_param, uint len_param) +{ + struct dhd_conf *conf = dhd->conf; + char *data = full_param+len_param; + uint len_data = strlen(data); + char *pch, *pick_tmp; + int i; + + if (!strncmp("dhd_poll=", full_param, len_param)) { + if (!strncmp(data, "0", 1)) + conf->dhd_poll = 0; + else + conf->dhd_poll = 1; + printf("%s: dhd_poll = %d\n", __FUNCTION__, conf->dhd_poll); + } + else if (!strncmp("dhd_watchdog_ms=", full_param, len_param)) { + dhd_watchdog_ms = (int)simple_strtol(data, NULL, 10); + printf("%s: dhd_watchdog_ms = %d\n", __FUNCTION__, dhd_watchdog_ms); + } + else if (!strncmp("band=", full_param, len_param)) { + /* Process band: + * band=a for 5GHz only and band=b for 2.4GHz only + */ + if (!strcmp(data, "b")) + conf->band = WLC_BAND_2G; + else if (!strcmp(data, "a")) + conf->band = WLC_BAND_5G; + else + conf->band = WLC_BAND_AUTO; + printf("%s: band = %d\n", __FUNCTION__, conf->band); + } + else if (!strncmp("bw_cap_2g=", full_param, len_param)) { + conf->bw_cap[0] = (uint)simple_strtol(data, NULL, 0); + printf("%s: bw_cap_2g = %d\n", __FUNCTION__, conf->bw_cap[0]); + } + else if (!strncmp("bw_cap_5g=", full_param, len_param)) { + conf->bw_cap[1] = (uint)simple_strtol(data, NULL, 0); + printf("%s: bw_cap_5g = %d\n", __FUNCTION__, conf->bw_cap[1]); + } + else if (!strncmp("bw_cap=", full_param, len_param)) { + pick_tmp = data; + pch = bcmstrtok(&pick_tmp, " ,.-", 0); + if (pch != NULL) { + conf->bw_cap[0] = (uint32)simple_strtol(pch, NULL, 0); + printf("%s: bw_cap 2g = %d\n", __FUNCTION__, conf->bw_cap[0]); + } + pch = bcmstrtok(&pick_tmp, " ,.-", 0); + if (pch != NULL) { + conf->bw_cap[1] = (uint32)simple_strtol(pch, NULL, 0); + printf("%s: bw_cap 5g = %d\n", __FUNCTION__, conf->bw_cap[1]); + } + } + else if (!strncmp("ccode=", full_param, len_param)) { + memset(&conf->cspec, 0, sizeof(wl_country_t)); + memcpy(conf->cspec.country_abbrev, data, len_data); + memcpy(conf->cspec.ccode, data, len_data); + printf("%s: ccode = %s\n", __FUNCTION__, conf->cspec.ccode); + } + else if (!strncmp("regrev=", full_param, len_param)) { + conf->cspec.rev = (int32)simple_strtol(data, NULL, 10); + printf("%s: regrev = %d\n", __FUNCTION__, conf->cspec.rev); + } + else if (!strncmp("channels=", full_param, len_param)) { + pick_tmp = data; + pch = bcmstrtok(&pick_tmp, " ,.-", 0); + i=0; + while (pch != NULL && ichannels.channel[i] = (uint32)simple_strtol(pch, NULL, 10); + pch = bcmstrtok(&pick_tmp, " ,.-", 0); + i++; + } + conf->channels.count = i; + printf("%s: channels = ", __FUNCTION__); + for (i=0; ichannels.count; i++) + printf("%d ", conf->channels.channel[i]); + printf("\n"); + } + else if (!strncmp("keep_alive_period=", full_param, len_param)) { + conf->keep_alive_period = (uint)simple_strtol(data, NULL, 10); + printf("%s: keep_alive_period = %d\n", __FUNCTION__, + conf->keep_alive_period); + } + else if (!strncmp("phy_oclscdenable=", full_param, len_param)) { + conf->phy_oclscdenable = (int)simple_strtol(data, NULL, 10); + printf("%s: phy_oclscdenable = %d\n", __FUNCTION__, conf->phy_oclscdenable); + } + else if (!strncmp("srl=", full_param, len_param)) { + conf->srl = (int)simple_strtol(data, NULL, 10); + printf("%s: srl = %d\n", __FUNCTION__, conf->srl); + } + else if (!strncmp("lrl=", full_param, len_param)) { + conf->lrl = (int)simple_strtol(data, NULL, 10); + printf("%s: lrl = %d\n", __FUNCTION__, conf->lrl); + } + else if (!strncmp("bcn_timeout=", full_param, len_param)) { + conf->bcn_timeout= (uint)simple_strtol(data, NULL, 10); + printf("%s: bcn_timeout = %d\n", __FUNCTION__, conf->bcn_timeout); + } + else if (!strncmp("txbf=", full_param, len_param)) { + conf->txbf = (int)simple_strtol(data, NULL, 10); + printf("%s: txbf = %d\n", __FUNCTION__, conf->txbf); + } + else if (!strncmp("frameburst=", full_param, len_param)) { + conf->frameburst = (int)simple_strtol(data, NULL, 10); + printf("%s: frameburst = %d\n", __FUNCTION__, conf->frameburst); + } + else if (!strncmp("disable_proptx=", full_param, len_param)) { + conf->disable_proptx = (int)simple_strtol(data, NULL, 10); + printf("%s: disable_proptx = %d\n", __FUNCTION__, conf->disable_proptx); + } +#ifdef DHDTCPACK_SUPPRESS + else if (!strncmp("tcpack_sup_mode=", full_param, len_param)) { + conf->tcpack_sup_mode = (uint)simple_strtol(data, NULL, 10); + printf("%s: tcpack_sup_mode = %d\n", __FUNCTION__, conf->tcpack_sup_mode); + } +#endif + else if (!strncmp("pktprio8021x=", full_param, len_param)) { + conf->pktprio8021x = (int)simple_strtol(data, NULL, 10); + printf("%s: pktprio8021x = %d\n", __FUNCTION__, conf->pktprio8021x); + } +#if defined(BCMSDIO) || defined(BCMPCIE) + else if (!strncmp("dhd_txbound=", full_param, len_param)) { + dhd_txbound = (uint)simple_strtol(data, NULL, 10); + printf("%s: dhd_txbound = %d\n", __FUNCTION__, dhd_txbound); + } + else if (!strncmp("dhd_rxbound=", full_param, len_param)) { + dhd_rxbound = (uint)simple_strtol(data, NULL, 10); + printf("%s: dhd_rxbound = %d\n", __FUNCTION__, dhd_rxbound); + } +#endif + else if (!strncmp("tsq=", full_param, len_param)) { + conf->tsq = (int)simple_strtol(data, NULL, 10); + printf("%s: tsq = %d\n", __FUNCTION__, conf->tsq); + } + else if (!strncmp("ctrl_resched=", full_param, len_param)) { + conf->ctrl_resched = (int)simple_strtol(data, NULL, 10); + printf("%s: ctrl_resched = %d\n", __FUNCTION__, conf->ctrl_resched); + } + else if (!strncmp("dhd_ioctl_timeout_msec=", full_param, len_param)) { + conf->dhd_ioctl_timeout_msec = (int)simple_strtol(data, NULL, 10); + printf("%s: dhd_ioctl_timeout_msec = %d\n", __FUNCTION__, conf->dhd_ioctl_timeout_msec); + } + else if (!strncmp("in4way=", full_param, len_param)) { + conf->in4way = (int)simple_strtol(data, NULL, 0); + printf("%s: in4way = 0x%x\n", __FUNCTION__, conf->in4way); + } + else if (!strncmp("max_wait_gc_time=", full_param, len_param)) { + conf->max_wait_gc_time = (int)simple_strtol(data, NULL, 0); + printf("%s: max_wait_gc_time = %d\n", __FUNCTION__, conf->max_wait_gc_time); + } + else if (!strncmp("wl_preinit=", full_param, len_param)) { + if (!(conf->wl_preinit = kmalloc(len_param+1, GFP_KERNEL))) { + CONFIG_ERROR(("%s: kmalloc failed\n", __FUNCTION__)); + } else { + memset(conf->wl_preinit, 0, len_param+1); + strcpy(conf->wl_preinit, data); + printf("%s: wl_preinit = %s\n", __FUNCTION__, conf->wl_preinit); + } + } + else + return false; + + return true; +} + +int +dhd_conf_read_config(dhd_pub_t *dhd, char *conf_path) +{ + int bcmerror = -1; + uint len = 0, start_pos=0; + void * image = NULL; + char * memblock = NULL; + char *bufp, *pick = NULL, *pch; + bool conf_file_exists; + uint len_param; + + conf_file_exists = ((conf_path != NULL) && (conf_path[0] != '\0')); + if (!conf_file_exists) { + printf("%s: config path %s\n", __FUNCTION__, conf_path); + return (0); + } + + if (conf_file_exists) { + image = dhd_os_open_image1(dhd, conf_path); + if (image == NULL) { + printf("%s: Ignore config file %s\n", __FUNCTION__, conf_path); + goto err; + } + } + + memblock = MALLOC(dhd->osh, MAXSZ_CONFIG); + if (memblock == NULL) { + CONFIG_ERROR(("%s: Failed to allocate memory %d bytes\n", + __FUNCTION__, MAXSZ_CONFIG)); + goto err; + } + + pick = MALLOC(dhd->osh, MAXSZ_BUF); + if (!pick) { + CONFIG_ERROR(("%s: Failed to allocate memory %d bytes\n", + __FUNCTION__, MAXSZ_BUF)); + goto err; + } + + /* Read variables */ + if (conf_file_exists) { + len = dhd_os_get_image_block(memblock, MAXSZ_CONFIG, image); + } + if (len > 0 && len < MAXSZ_CONFIG) { + bufp = (char *)memblock; + bufp[len] = 0; + + while (start_pos < len) { + memset(pick, 0, MAXSZ_BUF); + start_pos = pick_config_vars(bufp, len, start_pos, pick); + pch = strchr(pick, '='); + if (pch != NULL) { + len_param = pch-pick+1; + if (len_param == strlen(pick)) { + CONFIG_ERROR(("%s: not a right parameter %s\n", __FUNCTION__, pick)); + continue; + } + } else { + CONFIG_ERROR(("%s: not a right parameter %s\n", __FUNCTION__, pick)); + continue; + } + + if (dhd_conf_read_log_level(dhd, pick, len_param)) + continue; + else if (dhd_conf_read_roam_params(dhd, pick, len_param)) + continue; + else if (dhd_conf_read_wme_ac_params(dhd, pick, len_param)) + continue; + else if (dhd_conf_read_fw_by_mac(dhd, pick, len_param)) + continue; + else if (dhd_conf_read_nv_by_mac(dhd, pick, len_param)) + continue; + else if (dhd_conf_read_nv_by_chip(dhd, pick, len_param)) + continue; + else if (dhd_conf_read_country_list(dhd, pick, len_param)) + continue; + else if (dhd_conf_read_mchan_params(dhd, pick, len_param)) + continue; +#ifdef PKT_FILTER_SUPPORT + else if (dhd_conf_read_pkt_filter(dhd, pick, len_param)) + continue; +#endif /* PKT_FILTER_SUPPORT */ +#ifdef ISAM_PREINIT + else if (dhd_conf_read_isam(dhd, pick, len_param)) + continue; +#endif /* ISAM_PREINIT */ +#ifdef IDHCP + else if (dhd_conf_read_dhcp_params(dhd, pick, len_param)) + continue; +#endif /* IDHCP */ +#ifdef BCMSDIO + else if (dhd_conf_read_sdio_params(dhd, pick, len_param)) + continue; +#endif /* BCMSDIO */ +#ifdef BCMPCIE + else if (dhd_conf_read_pcie_params(dhd, pick, len_param)) + continue; +#endif /* BCMPCIE */ + else if (dhd_conf_read_pm_params(dhd, pick, len_param)) + continue; + else if (dhd_conf_read_others(dhd, pick, len_param)) + continue; + else + continue; + } + + bcmerror = 0; + } else { + CONFIG_ERROR(("%s: error reading config file: %d\n", __FUNCTION__, len)); + bcmerror = BCME_SDIO_ERROR; + } + +err: + if (pick) + MFREE(dhd->osh, pick, MAXSZ_BUF); + + if (memblock) + MFREE(dhd->osh, memblock, MAXSZ_CONFIG); + + if (image) + dhd_os_close_image1(dhd, image); + + return bcmerror; +} + +int +dhd_conf_set_chiprev(dhd_pub_t *dhd, uint chip, uint chiprev) +{ + printf("%s: chip=0x%x, chiprev=%d\n", __FUNCTION__, chip, chiprev); + dhd->conf->chip = chip; + dhd->conf->chiprev = chiprev; + return 0; +} + +uint +dhd_conf_get_chip(void *context) +{ + dhd_pub_t *dhd = context; + + if (dhd && dhd->conf) + return dhd->conf->chip; + return 0; +} + +uint +dhd_conf_get_chiprev(void *context) +{ + dhd_pub_t *dhd = context; + + if (dhd && dhd->conf) + return dhd->conf->chiprev; + return 0; +} + +#ifdef BCMSDIO +void +dhd_conf_set_txglom_params(dhd_pub_t *dhd, bool enable) +{ + struct dhd_conf *conf = dhd->conf; + + if (enable) { +#if defined(BCMSDIOH_TXGLOM_EXT) + if (conf->chip == BCM43362_CHIP_ID || conf->chip == BCM4330_CHIP_ID || + conf->chip == BCM43340_CHIP_ID || conf->chip == BCM43341_CHIP_ID || + conf->chip == BCM4334_CHIP_ID || conf->chip == BCM4324_CHIP_ID) { + conf->txglom_mode = SDPCM_TXGLOM_CPY; + } +#endif + // other parameters set in preinit or config.txt + } else { + // clear txglom parameters + conf->txglom_ext = FALSE; + conf->txglom_bucket_size = 0; + conf->txglomsize = 0; + conf->deferred_tx_len = 0; + } + if (conf->txglom_ext) + printf("%s: txglom_ext=%d, txglom_bucket_size=%d\n", __FUNCTION__, + conf->txglom_ext, conf->txglom_bucket_size); + printf("%s: txglom_mode=%s\n", __FUNCTION__, + conf->txglom_mode==SDPCM_TXGLOM_MDESC?"multi-desc":"copy"); + printf("%s: txglomsize=%d, deferred_tx_len=%d\n", __FUNCTION__, + conf->txglomsize, conf->deferred_tx_len); + printf("%s: txinrx_thres=%d, dhd_txminmax=%d\n", __FUNCTION__, + conf->txinrx_thres, conf->dhd_txminmax); + printf("%s: tx_max_offset=%d, txctl_tmo_fix=%d\n", __FUNCTION__, + conf->tx_max_offset, conf->txctl_tmo_fix); + +} +#endif + +static int +dhd_conf_rsdb_mode(dhd_pub_t *dhd, char *buf) +{ + char *pch; + wl_config_t rsdb_mode_cfg = {1, 0}; + + pch = buf; + rsdb_mode_cfg.config = (int)simple_strtol(pch, NULL, 0); + + if (pch) { + dhd_conf_set_bufiovar(dhd, WLC_SET_VAR, "rsdb_mode", (char *)&rsdb_mode_cfg, + sizeof(rsdb_mode_cfg), TRUE); + printf("%s: rsdb_mode %d\n", __FUNCTION__, rsdb_mode_cfg.config); + } + + return 0; +} + +typedef int (tpl_parse_t)(dhd_pub_t *dhd, char *buf); + +typedef struct iovar_tpl_t { + int cmd; + char *name; + tpl_parse_t *parse; +} iovar_tpl_t; + +const iovar_tpl_t iovar_tpl_list[] = { + {WLC_SET_VAR, "rsdb_mode", dhd_conf_rsdb_mode}, +}; + +static int iovar_tpl_parse(const iovar_tpl_t *tpl, int tpl_count, + dhd_pub_t *dhd, int cmd, char *name, char *buf) +{ + int i, ret = 0; + + /* look for a matching code in the table */ + for (i = 0; i < tpl_count; i++, tpl++) { + if (tpl->cmd == cmd && !strcmp(tpl->name, name)) + break; + } + if (i < tpl_count && tpl->parse) { + ret = tpl->parse(dhd, buf); + } else { + ret = -1; + } + + return ret; +} + +bool +dhd_conf_set_wl_preinit(dhd_pub_t *dhd, char *data) +{ + int cmd, val, ret = 0; + char name[32], *pch, *pick_tmp, *pick_tmp2; + + /* Process wl_preinit: + * wl_preinit=[cmd]=[val], [cmd]=[val] + * Ex: wl_preinit=86=0, mpc=0 + */ + pick_tmp = data; + while (pick_tmp && (pick_tmp2 = bcmstrtok(&pick_tmp, ",", 0)) != NULL) { + pch = bcmstrtok(&pick_tmp2, "=", 0); + if (!pch) + break; + if (*pch == ' ') { + pch++; + } + memset(name, 0 , sizeof (name)); + cmd = (int)simple_strtol(pch, NULL, 0); + if (cmd == 0) { + cmd = WLC_SET_VAR; + strcpy(name, pch); + } + pch = bcmstrtok(&pick_tmp2, ",", 0); + if (!pch) { + break; + } + ret = iovar_tpl_parse(iovar_tpl_list, ARRAY_SIZE(iovar_tpl_list), + dhd, cmd, name, pch); + if (ret) { + val = (int)simple_strtol(pch, NULL, 0); + dhd_conf_set_intiovar(dhd, cmd, name, val, -1, TRUE); + } + } + + return true; +} + +void +dhd_conf_postinit_ioctls(dhd_pub_t *dhd) +{ + struct dhd_conf *conf = dhd->conf; + + dhd_conf_set_intiovar(dhd, WLC_UP, "up", 0, 0, FALSE); + dhd_conf_map_country_list(dhd, &conf->cspec); + dhd_conf_set_country(dhd, &conf->cspec); + dhd_conf_fix_country(dhd); + dhd_conf_get_country(dhd, &dhd->dhd_cspec); + + dhd_conf_set_intiovar(dhd, WLC_SET_BAND, "WLC_SET_BAND", conf->band, 0, FALSE); + dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "bcn_timeout", conf->bcn_timeout, 0, FALSE); + dhd_conf_set_intiovar(dhd, WLC_SET_PM, "PM", conf->pm, 0, FALSE); + dhd_conf_set_intiovar(dhd, WLC_SET_SRL, "WLC_SET_SRL", conf->srl, 0, TRUE); + dhd_conf_set_intiovar(dhd, WLC_SET_LRL, "WLC_SET_LRL", conf->lrl, 0, FALSE); + dhd_conf_set_bw_cap(dhd); + dhd_conf_set_roam(dhd); + +#if defined(BCMPCIE) + dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "bus:deepsleep_disable", + conf->bus_deepsleep_disable, 0, FALSE); +#endif /* defined(BCMPCIE) */ + +#ifdef IDHCP + dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "dhcpc_enable", conf->dhcpc_enable, 0, FALSE); + if (dhd->conf->dhcpd_enable >= 0) { + dhd_conf_set_bufiovar(dhd, WLC_SET_VAR, "dhcpd_ip_addr", + (char *)&conf->dhcpd_ip_addr, sizeof(conf->dhcpd_ip_addr), FALSE); + dhd_conf_set_bufiovar(dhd, WLC_SET_VAR, "dhcpd_ip_mask", + (char *)&conf->dhcpd_ip_mask, sizeof(conf->dhcpd_ip_mask), FALSE); + dhd_conf_set_bufiovar(dhd, WLC_SET_VAR, "dhcpd_ip_start", + (char *)&conf->dhcpd_ip_start, sizeof(conf->dhcpd_ip_start), FALSE); + dhd_conf_set_bufiovar(dhd, WLC_SET_VAR, "dhcpd_ip_end", + (char *)&conf->dhcpd_ip_end, sizeof(conf->dhcpd_ip_end), FALSE); + dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "dhcpd_enable", + conf->dhcpd_enable, 0, FALSE); + } +#endif + dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "txbf", conf->txbf, 0, FALSE); + dhd_conf_set_intiovar(dhd, WLC_SET_FAKEFRAG, "WLC_SET_FAKEFRAG", conf->frameburst, 0, FALSE); + + dhd_conf_set_wl_preinit(dhd, conf->wl_preinit); + +#ifndef WL_CFG80211 + dhd_conf_set_intiovar(dhd, WLC_UP, "up", 0, 0, FALSE); +#endif + +} + +int +dhd_conf_preinit(dhd_pub_t *dhd) +{ + struct dhd_conf *conf = dhd->conf; + int i; + + CONFIG_TRACE(("%s: Enter\n", __FUNCTION__)); + +#ifdef BCMSDIO + dhd_conf_free_mac_list(&conf->fw_by_mac); + dhd_conf_free_mac_list(&conf->nv_by_mac); + dhd_conf_free_chip_nv_path_list(&conf->nv_by_chip); +#endif + dhd_conf_free_country_list(&conf->country_list); + if (conf->magic_pkt_filter_add) { + kfree(conf->magic_pkt_filter_add); + conf->magic_pkt_filter_add = NULL; + } + if (conf->wl_preinit) { + kfree(conf->wl_preinit); + conf->wl_preinit = NULL; + } + memset(&conf->country_list, 0, sizeof(conf_country_list_t)); + conf->band = -1; + memset(&conf->bw_cap, -1, sizeof(conf->bw_cap)); + if (conf->chip == BCM43362_CHIP_ID || conf->chip == BCM4330_CHIP_ID) { + strcpy(conf->cspec.country_abbrev, "ALL"); + strcpy(conf->cspec.ccode, "ALL"); + conf->cspec.rev = 0; + } else if (conf->chip == BCM4335_CHIP_ID || conf->chip == BCM4339_CHIP_ID || + conf->chip == BCM4354_CHIP_ID || conf->chip == BCM4356_CHIP_ID || + conf->chip == BCM4345_CHIP_ID || conf->chip == BCM4371_CHIP_ID || + conf->chip == BCM43569_CHIP_ID || conf->chip == BCM4359_CHIP_ID || + conf->chip == BCM4362_CHIP_ID || conf->chip == BCM43751_CHIP_ID) { + strcpy(conf->cspec.country_abbrev, "CN"); + strcpy(conf->cspec.ccode, "CN"); + conf->cspec.rev = 38; + } else { + strcpy(conf->cspec.country_abbrev, "CN"); + strcpy(conf->cspec.ccode, "CN"); + conf->cspec.rev = 0; + } + memset(&conf->channels, 0, sizeof(wl_channel_list_t)); + conf->roam_off = 1; + conf->roam_off_suspend = 1; +#ifdef CUSTOM_ROAM_TRIGGER_SETTING + conf->roam_trigger[0] = CUSTOM_ROAM_TRIGGER_SETTING; +#else + conf->roam_trigger[0] = -65; +#endif + conf->roam_trigger[1] = WLC_BAND_ALL; + conf->roam_scan_period[0] = 10; + conf->roam_scan_period[1] = WLC_BAND_ALL; +#ifdef CUSTOM_ROAM_DELTA_SETTING + conf->roam_delta[0] = CUSTOM_ROAM_DELTA_SETTING; +#else + conf->roam_delta[0] = 15; +#endif + conf->roam_delta[1] = WLC_BAND_ALL; +#ifdef FULL_ROAMING_SCAN_PERIOD_60_SEC + conf->fullroamperiod = 60; +#else /* FULL_ROAMING_SCAN_PERIOD_60_SEC */ + conf->fullroamperiod = 120; +#endif /* FULL_ROAMING_SCAN_PERIOD_60_SEC */ +#ifdef CUSTOM_KEEP_ALIVE_SETTING + conf->keep_alive_period = CUSTOM_KEEP_ALIVE_SETTING; +#else + conf->keep_alive_period = 28000; +#endif + conf->force_wme_ac = 0; + memset(&conf->wme_sta, 0, sizeof(wme_param_t)); + memset(&conf->wme_ap, 0, sizeof(wme_param_t)); + conf->phy_oclscdenable = -1; +#ifdef PKT_FILTER_SUPPORT + memset(&conf->pkt_filter_add, 0, sizeof(conf_pkt_filter_add_t)); + memset(&conf->pkt_filter_del, 0, sizeof(conf_pkt_filter_del_t)); +#endif + conf->srl = -1; + conf->lrl = -1; + conf->bcn_timeout = 16; + conf->txbf = -1; + conf->disable_proptx = -1; + conf->dhd_poll = -1; +#ifdef BCMSDIO + conf->use_rxchain = 0; + conf->bus_rxglom = TRUE; + conf->txglom_ext = FALSE; + conf->tx_max_offset = 0; + conf->txglomsize = SDPCM_DEFGLOM_SIZE; + conf->txctl_tmo_fix = 300; + conf->txglom_mode = SDPCM_TXGLOM_CPY; + conf->deferred_tx_len = 0; + conf->dhd_txminmax = 1; + conf->txinrx_thres = -1; + conf->sd_f2_blocksize = 0; +#if defined(HW_OOB) + conf->oob_enabled_later = FALSE; +#endif + conf->orphan_move = 0; +#endif +#ifdef BCMPCIE + conf->bus_deepsleep_disable = 1; +#endif + conf->dpc_cpucore = -1; + conf->rxf_cpucore = -1; + conf->frameburst = -1; + conf->deepsleep = FALSE; + conf->pm = -1; + conf->pm_in_suspend = -1; + conf->suspend_bcn_li_dtim = -1; + conf->xmit_in_suspend = TRUE; + conf->ap_in_suspend = 0; +#ifdef SUSPEND_EVENT + conf->suspend_eventmask_enable = FALSE; + memset(&conf->suspend_eventmask, 0, sizeof(conf->suspend_eventmask)); + memset(&conf->resume_eventmask, 0, sizeof(conf->resume_eventmask)); +#endif +#ifdef IDHCP + conf->dhcpc_enable = -1; + conf->dhcpd_enable = -1; +#endif +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) + conf->tsq = 10; +#else + conf->tsq = 0; +#endif +#ifdef DHDTCPACK_SUPPRESS +#ifdef BCMPCIE + conf->tcpack_sup_mode = TCPACK_SUP_HOLD; +#else + conf->tcpack_sup_mode = TCPACK_SUP_OFF; +#endif +#endif + conf->pktprio8021x = -1; + conf->ctrl_resched = 2; + conf->dhd_ioctl_timeout_msec = 0; + conf->in4way = NO_SCAN_IN4WAY | WAIT_DISCONNECTED; + conf->max_wait_gc_time = 300; +#ifdef ISAM_PREINIT + memset(conf->isam_init, 0, sizeof(conf->isam_init)); + memset(conf->isam_config, 0, sizeof(conf->isam_config)); + memset(conf->isam_enable, 0, sizeof(conf->isam_enable)); +#endif + for (i=0; imchan[i], -1, sizeof(mchan_params_t)); + } +#ifdef CUSTOMER_HW_AMLOGIC + dhd_slpauto = FALSE; +#endif + if (conf->chip == BCM4335_CHIP_ID || conf->chip == BCM4339_CHIP_ID || + conf->chip == BCM4354_CHIP_ID || conf->chip == BCM4356_CHIP_ID || + conf->chip == BCM4345_CHIP_ID || conf->chip == BCM4371_CHIP_ID || + conf->chip == BCM43569_CHIP_ID || conf->chip == BCM4359_CHIP_ID || + conf->chip == BCM4362_CHIP_ID || conf->chip == BCM43751_CHIP_ID) { +#ifdef DHDTCPACK_SUPPRESS +#ifdef BCMSDIO + conf->tcpack_sup_mode = TCPACK_SUP_REPLACE; +#endif +#endif +#if defined(BCMSDIO) || defined(BCMPCIE) + dhd_rxbound = 128; + dhd_txbound = 64; +#endif + conf->txbf = 1; + conf->frameburst = 1; +#ifdef BCMSDIO + conf->dhd_txminmax = -1; + conf->txinrx_thres = 128; + conf->sd_f2_blocksize = CUSTOM_SDIO_F2_BLKSIZE; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) + conf->orphan_move = 1; +#else + conf->orphan_move = 0; +#endif +#endif + } + +#ifdef BCMSDIO +#if defined(BCMSDIOH_TXGLOM_EXT) + if (conf->chip == BCM43362_CHIP_ID || conf->chip == BCM4330_CHIP_ID || + conf->chip == BCM43340_CHIP_ID || conf->chip == BCM43341_CHIP_ID || + conf->chip == BCM4334_CHIP_ID || conf->chip == BCM4324_CHIP_ID) { + conf->txglom_ext = TRUE; + } else { + conf->txglom_ext = FALSE; + } + if (conf->chip == BCM43362_CHIP_ID || conf->chip == BCM4330_CHIP_ID) { + conf->txglom_bucket_size = 1680; // fixed value, don't change + conf->txglomsize = 6; + } + if (conf->chip == BCM4334_CHIP_ID || conf->chip == BCM43340_CHIP_ID || + conf->chip == BCM43341_CHIP_ID || conf->chip == BCM4324_CHIP_ID) { + conf->txglom_bucket_size = 1684; // fixed value, don't change + conf->txglomsize = 16; + } +#endif + if (conf->txglomsize > SDPCM_MAXGLOM_SIZE) + conf->txglomsize = SDPCM_MAXGLOM_SIZE; +#endif + + return 0; +} + +int +dhd_conf_reset(dhd_pub_t *dhd) +{ +#ifdef BCMSDIO + dhd_conf_free_mac_list(&dhd->conf->fw_by_mac); + dhd_conf_free_mac_list(&dhd->conf->nv_by_mac); + dhd_conf_free_chip_nv_path_list(&dhd->conf->nv_by_chip); +#endif + dhd_conf_free_country_list(&dhd->conf->country_list); + if (dhd->conf->magic_pkt_filter_add) { + kfree(dhd->conf->magic_pkt_filter_add); + dhd->conf->magic_pkt_filter_add = NULL; + } + if (dhd->conf->wl_preinit) { + kfree(dhd->conf->wl_preinit); + dhd->conf->wl_preinit = NULL; + } + memset(dhd->conf, 0, sizeof(dhd_conf_t)); + return 0; +} + +int +dhd_conf_attach(dhd_pub_t *dhd) +{ + dhd_conf_t *conf; + + CONFIG_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (dhd->conf != NULL) { + printf("%s: config is attached before!\n", __FUNCTION__); + return 0; + } + /* Allocate private bus interface state */ + if (!(conf = MALLOC(dhd->osh, sizeof(dhd_conf_t)))) { + CONFIG_ERROR(("%s: MALLOC failed\n", __FUNCTION__)); + goto fail; + } + memset(conf, 0, sizeof(dhd_conf_t)); + + dhd->conf = conf; + + return 0; + +fail: + if (conf != NULL) + MFREE(dhd->osh, conf, sizeof(dhd_conf_t)); + return BCME_NOMEM; +} + +void +dhd_conf_detach(dhd_pub_t *dhd) +{ + CONFIG_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (dhd->conf) { +#ifdef BCMSDIO + dhd_conf_free_mac_list(&dhd->conf->fw_by_mac); + dhd_conf_free_mac_list(&dhd->conf->nv_by_mac); + dhd_conf_free_chip_nv_path_list(&dhd->conf->nv_by_chip); +#endif + dhd_conf_free_country_list(&dhd->conf->country_list); + if (dhd->conf->magic_pkt_filter_add) { + kfree(dhd->conf->magic_pkt_filter_add); + dhd->conf->magic_pkt_filter_add = NULL; + } + if (dhd->conf->wl_preinit) { + kfree(dhd->conf->wl_preinit); + dhd->conf->wl_preinit = NULL; + } + MFREE(dhd->osh, dhd->conf, sizeof(dhd_conf_t)); + } + dhd->conf = NULL; +} diff --git a/bcmdhd.100.10.315.x/dhd_config.h b/bcmdhd.100.10.315.x/dhd_config.h new file mode 100644 index 0000000..d7ad3c8 --- /dev/null +++ b/bcmdhd.100.10.315.x/dhd_config.h @@ -0,0 +1,285 @@ + +#ifndef _dhd_config_ +#define _dhd_config_ + +#include +#include +#include +#include +#include +#include <802.11.h> + +#define FW_TYPE_STA 0 +#define FW_TYPE_APSTA 1 +#define FW_TYPE_P2P 2 +#define FW_TYPE_MESH 3 +#define FW_TYPE_ES 4 +#define FW_TYPE_MFG 5 +#define FW_TYPE_G 0 +#define FW_TYPE_AG 1 + +#define FW_PATH_AUTO_SELECT 1 +//#define CONFIG_PATH_AUTO_SELECT +extern char firmware_path[MOD_PARAM_PATHLEN]; +#if defined(BCMSDIO) || defined(BCMPCIE) +extern uint dhd_rxbound; +extern uint dhd_txbound; +#endif +#ifdef BCMSDIO +#define TXGLOM_RECV_OFFSET 8 +extern uint dhd_doflow; +extern uint dhd_slpauto; +#endif + +typedef struct wl_mac_range { + uint32 oui; + uint32 nic_start; + uint32 nic_end; +} wl_mac_range_t; + +typedef struct wl_mac_list { + int count; + wl_mac_range_t *mac; + char name[MOD_PARAM_PATHLEN]; +} wl_mac_list_t; + +typedef struct wl_mac_list_ctrl { + int count; + struct wl_mac_list *m_mac_list_head; +} wl_mac_list_ctrl_t; + +typedef struct wl_chip_nv_path { + uint chip; + uint chiprev; + char name[MOD_PARAM_PATHLEN]; +} wl_chip_nv_path_t; + +typedef struct wl_chip_nv_path_list_ctrl { + int count; + struct wl_chip_nv_path *m_chip_nv_path_head; +} wl_chip_nv_path_list_ctrl_t; + +typedef struct wl_channel_list { + uint32 count; + uint32 channel[WL_NUMCHANNELS]; +} wl_channel_list_t; + +typedef struct wmes_param { + int aifsn[AC_COUNT]; + int ecwmin[AC_COUNT]; + int ecwmax[AC_COUNT]; + int txop[AC_COUNT]; +} wme_param_t; + +#ifdef PKT_FILTER_SUPPORT +#define DHD_CONF_FILTER_MAX 8 +#define PKT_FILTER_LEN 300 +#define MAGIC_PKT_FILTER_LEN 450 +typedef struct conf_pkt_filter_add { + uint32 count; + char filter[DHD_CONF_FILTER_MAX][PKT_FILTER_LEN]; +} conf_pkt_filter_add_t; + +typedef struct conf_pkt_filter_del { + uint32 count; + uint32 id[DHD_CONF_FILTER_MAX]; +} conf_pkt_filter_del_t; +#endif + +#define CONFIG_COUNTRY_LIST_SIZE 100 +typedef struct conf_country_list { + uint32 count; + wl_country_t *cspec[CONFIG_COUNTRY_LIST_SIZE]; +} conf_country_list_t; + +/* mchan_params */ +#define MCHAN_MAX_NUM 4 +#define MIRACAST_SOURCE 1 +#define MIRACAST_SINK 2 +typedef struct mchan_params { + int bw; + int p2p_mode; + int miracast_mode; +} mchan_params_t; + +enum in4way_flags { + NO_SCAN_IN4WAY = (1 << (0)), + NO_BTC_IN4WAY = (1 << (1)), + DONT_DELETE_GC_AFTER_WPS = (1 << (2)), + WAIT_DISCONNECTED = (1 << (3)), +}; + +enum eapol_status { + EAPOL_STATUS_NONE = 0, + EAPOL_STATUS_WPS_REQID, + EAPOL_STATUS_WPS_RSPID, + EAPOL_STATUS_WPS_WSC_START, + EAPOL_STATUS_WPS_M1, + EAPOL_STATUS_WPS_M2, + EAPOL_STATUS_WPS_M3, + EAPOL_STATUS_WPS_M4, + EAPOL_STATUS_WPS_M5, + EAPOL_STATUS_WPS_M6, + EAPOL_STATUS_WPS_M7, + EAPOL_STATUS_WPS_M8, + EAPOL_STATUS_WPS_DONE, + EAPOL_STATUS_WPA_START, + EAPOL_STATUS_WPA_M1, + EAPOL_STATUS_WPA_M2, + EAPOL_STATUS_WPA_M3, + EAPOL_STATUS_WPA_M4, + EAPOL_STATUS_WPA_END +}; + +typedef struct dhd_conf { + uint chip; + uint chiprev; + int fw_type; + wl_mac_list_ctrl_t fw_by_mac; + wl_mac_list_ctrl_t nv_by_mac; + wl_chip_nv_path_list_ctrl_t nv_by_chip; + conf_country_list_t country_list; + int band; + int bw_cap[2]; + wl_country_t cspec; + wl_channel_list_t channels; + uint roam_off; + uint roam_off_suspend; + int roam_trigger[2]; + int roam_scan_period[2]; + int roam_delta[2]; + int fullroamperiod; + uint keep_alive_period; + int force_wme_ac; + wme_param_t wme_sta; + wme_param_t wme_ap; + int phy_oclscdenable; +#ifdef PKT_FILTER_SUPPORT + conf_pkt_filter_add_t pkt_filter_add; + conf_pkt_filter_del_t pkt_filter_del; + char *magic_pkt_filter_add; +#endif + int srl; + int lrl; + uint bcn_timeout; + int txbf; + int disable_proptx; + int dhd_poll; +#ifdef BCMSDIO + int use_rxchain; + bool bus_rxglom; + bool txglom_ext; /* Only for 43362/4330/43340/43341/43241 */ + /* terence 20161011: + 1) conf->tx_max_offset = 1 to fix credict issue in adaptivity testing + 2) conf->tx_max_offset = 1 will cause to UDP Tx not work in rxglom supported, + but not happened in sw txglom + */ + int tx_max_offset; + uint txglomsize; + int txctl_tmo_fix; + bool txglom_mode; + uint deferred_tx_len; + /*txglom_bucket_size: + * 43362/4330: 1680 + * 43340/43341/43241: 1684 + */ + int txglom_bucket_size; + int txinrx_thres; + int dhd_txminmax; // -1=DATABUFCNT(bus) + uint sd_f2_blocksize; + bool oob_enabled_later; + int orphan_move; +#endif +#ifdef BCMPCIE + int bus_deepsleep_disable; +#endif + int dpc_cpucore; + int rxf_cpucore; + int frameburst; + bool deepsleep; + int pm; + int pm_in_suspend; + int suspend_bcn_li_dtim; +#ifdef DHDTCPACK_SUPPRESS + uint8 tcpack_sup_mode; +#endif + int pktprio8021x; + int xmit_in_suspend; + int ap_in_suspend; +#ifdef SUSPEND_EVENT + bool suspend_eventmask_enable; + char suspend_eventmask[WL_EVENTING_MASK_LEN]; + char resume_eventmask[WL_EVENTING_MASK_LEN]; +#endif +#ifdef IDHCP + int dhcpc_enable; + int dhcpd_enable; + struct ipv4_addr dhcpd_ip_addr; + struct ipv4_addr dhcpd_ip_mask; + struct ipv4_addr dhcpd_ip_start; + struct ipv4_addr dhcpd_ip_end; +#endif +#ifdef ISAM_PREINIT + char isam_init[50]; + char isam_config[300]; + char isam_enable[50]; +#endif + int ctrl_resched; + int dhd_ioctl_timeout_msec; + struct mchan_params mchan[MCHAN_MAX_NUM]; + char *wl_preinit; + int tsq; + uint eapol_status; + uint in4way; + uint max_wait_gc_time; +} dhd_conf_t; + +#ifdef BCMSDIO +int dhd_conf_get_mac(dhd_pub_t *dhd, bcmsdh_info_t *sdh, si_t *sih, uint8 *mac); +void dhd_conf_set_fw_name_by_mac(dhd_pub_t *dhd, bcmsdh_info_t *sdh, si_t *sih, char *fw_path); +void dhd_conf_set_nv_name_by_mac(dhd_pub_t *dhd, bcmsdh_info_t *sdh, si_t *sih, char *nv_path); +#if defined(HW_OOB) || defined(FORCE_WOWLAN) +void dhd_conf_set_hw_oob_intr(bcmsdh_info_t *sdh, struct si_pub *sih); +#endif +void dhd_conf_set_txglom_params(dhd_pub_t *dhd, bool enable); +int dhd_conf_set_blksize(bcmsdh_info_t *sdh); +#endif +void dhd_conf_set_fw_name_by_chip(dhd_pub_t *dhd, char *fw_path); +void dhd_conf_set_clm_name_by_chip(dhd_pub_t *dhd, char *clm_path); +void dhd_conf_set_nv_name_by_chip(dhd_pub_t *dhd, char *nv_path); +void dhd_conf_set_path(dhd_pub_t *dhd, char *dst_name, char *dst_path, char *src_path); +#ifdef CONFIG_PATH_AUTO_SELECT +void dhd_conf_set_conf_name_by_chip(dhd_pub_t *dhd, char *conf_path); +#endif +int dhd_conf_set_intiovar(dhd_pub_t *dhd, uint cmd, char *name, int val, int def, bool down); +int dhd_conf_set_bufiovar(dhd_pub_t *dhd, uint cmd, char *name, char *buf, int len, bool down); +uint dhd_conf_get_band(dhd_pub_t *dhd); +int dhd_conf_set_country(dhd_pub_t *dhd, wl_country_t *cspec); +int dhd_conf_get_country(dhd_pub_t *dhd, wl_country_t *cspec); +int dhd_conf_map_country_list(dhd_pub_t *dhd, wl_country_t *cspec); +int dhd_conf_fix_country(dhd_pub_t *dhd); +bool dhd_conf_match_channel(dhd_pub_t *dhd, uint32 channel); +void dhd_conf_set_wme(dhd_pub_t *dhd, int mode); +void dhd_conf_set_mchan_bw(dhd_pub_t *dhd, int go, int source); +void dhd_conf_add_pkt_filter(dhd_pub_t *dhd); +bool dhd_conf_del_pkt_filter(dhd_pub_t *dhd, uint32 id); +void dhd_conf_discard_pkt_filter(dhd_pub_t *dhd); +int dhd_conf_read_config(dhd_pub_t *dhd, char *conf_path); +int dhd_conf_set_chiprev(dhd_pub_t *dhd, uint chip, uint chiprev); +uint dhd_conf_get_chip(void *context); +uint dhd_conf_get_chiprev(void *context); +int dhd_conf_get_pm(dhd_pub_t *dhd); + +#ifdef PROP_TXSTATUS +int dhd_conf_get_disable_proptx(dhd_pub_t *dhd); +#endif +int dhd_conf_get_ap_mode_in_suspend(dhd_pub_t *dhd); +int dhd_conf_set_ap_in_suspend(dhd_pub_t *dhd, int suspend); +void dhd_conf_postinit_ioctls(dhd_pub_t *dhd); +int dhd_conf_preinit(dhd_pub_t *dhd); +int dhd_conf_reset(dhd_pub_t *dhd); +int dhd_conf_attach(dhd_pub_t *dhd); +void dhd_conf_detach(dhd_pub_t *dhd); +void *dhd_get_pub(struct net_device *dev); +void *dhd_get_conf(struct net_device *dev); +#endif /* _dhd_config_ */ diff --git a/bcmdhd.100.10.315.x/dhd_custom_gpio.c b/bcmdhd.100.10.315.x/dhd_custom_gpio.c new file mode 100644 index 0000000..aa60f57 --- /dev/null +++ b/bcmdhd.100.10.315.x/dhd_custom_gpio.c @@ -0,0 +1,278 @@ +/* + * Customer code to add GPIO control during WLAN start/stop + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_custom_gpio.c 717227 2017-08-23 13:51:13Z $ + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#if defined(WL_WIRELESS_EXT) +#include +#endif // endif + +#define WL_ERROR(x) printf x +#define WL_TRACE(x) + +#if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) + +#if defined(BCMLXSDMMC) +extern int sdioh_mmc_irq(int irq); +#endif /* (BCMLXSDMMC) */ + +/* Customer specific Host GPIO defintion */ +static int dhd_oob_gpio_num = -1; + +module_param(dhd_oob_gpio_num, int, 0644); +MODULE_PARM_DESC(dhd_oob_gpio_num, "DHD oob gpio number"); + +/* This function will return: + * 1) return : Host gpio interrupt number per customer platform + * 2) irq_flags_ptr : Type of Host interrupt as Level or Edge + * + * NOTE : + * Customer should check his platform definitions + * and his Host Interrupt spec + * to figure out the proper setting for his platform. + * Broadcom provides just reference settings as example. + * + */ +int dhd_customer_oob_irq_map(void *adapter, unsigned long *irq_flags_ptr) +{ + int host_oob_irq = 0; + + host_oob_irq = wifi_platform_get_irq_number(adapter, irq_flags_ptr); + + return (host_oob_irq); +} +#endif /* defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) */ + +/* Customer function to control hw specific wlan gpios */ +int +dhd_customer_gpio_wlan_ctrl(void *adapter, int onoff) +{ + int err = 0; + + return err; +} + +#ifdef GET_CUSTOM_MAC_ENABLE +/* Function to get custom MAC address */ +int +dhd_custom_get_mac_address(void *adapter, unsigned char *buf) +{ + int ret = 0; + + WL_TRACE(("%s Enter\n", __FUNCTION__)); + if (!buf) + return -EINVAL; + + /* Customer access to MAC address stored outside of DHD driver */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)) + ret = wifi_platform_get_mac_addr(adapter, buf); +#endif // endif + +#ifdef EXAMPLE_GET_MAC + /* EXAMPLE code */ + { + struct ether_addr ea_example = {{0x00, 0x11, 0x22, 0x33, 0x44, 0xFF}}; + bcopy((char *)&ea_example, buf, sizeof(struct ether_addr)); + } +#endif /* EXAMPLE_GET_MAC */ + + return ret; +} +#endif /* GET_CUSTOM_MAC_ENABLE */ + +/* Customized Locale table : OPTIONAL feature */ +const struct cntry_locales_custom translate_custom_table[] = { +/* Table should be filled out based on custom platform regulatory requirement */ +#ifdef EXAMPLE_TABLE + {"", "XY", 4}, /* Universal if Country code is unknown or empty */ + {"US", "US", 69}, /* input ISO "US" to : US regrev 69 */ + {"CA", "US", 69}, /* input ISO "CA" to : US regrev 69 */ + {"EU", "EU", 5}, /* European union countries to : EU regrev 05 */ + {"AT", "EU", 5}, + {"BE", "EU", 5}, + {"BG", "EU", 5}, + {"CY", "EU", 5}, + {"CZ", "EU", 5}, + {"DK", "EU", 5}, + {"EE", "EU", 5}, + {"FI", "EU", 5}, + {"FR", "EU", 5}, + {"DE", "EU", 5}, + {"GR", "EU", 5}, + {"HU", "EU", 5}, + {"IE", "EU", 5}, + {"IT", "EU", 5}, + {"LV", "EU", 5}, + {"LI", "EU", 5}, + {"LT", "EU", 5}, + {"LU", "EU", 5}, + {"MT", "EU", 5}, + {"NL", "EU", 5}, + {"PL", "EU", 5}, + {"PT", "EU", 5}, + {"RO", "EU", 5}, + {"SK", "EU", 5}, + {"SI", "EU", 5}, + {"ES", "EU", 5}, + {"SE", "EU", 5}, + {"GB", "EU", 5}, + {"KR", "XY", 3}, + {"AU", "XY", 3}, + {"CN", "XY", 3}, /* input ISO "CN" to : XY regrev 03 */ + {"TW", "XY", 3}, + {"AR", "XY", 3}, + {"MX", "XY", 3}, + {"IL", "IL", 0}, + {"CH", "CH", 0}, + {"TR", "TR", 0}, + {"NO", "NO", 0}, +#endif /* EXMAPLE_TABLE */ +#if defined(BCM4335_CHIP) + {"", "XZ", 11}, /* Universal if Country code is unknown or empty */ +#endif // endif + {"AE", "AE", 1}, + {"AR", "AR", 1}, + {"AT", "AT", 1}, + {"AU", "AU", 2}, + {"BE", "BE", 1}, + {"BG", "BG", 1}, + {"BN", "BN", 1}, + {"CA", "CA", 2}, + {"CH", "CH", 1}, + {"CY", "CY", 1}, + {"CZ", "CZ", 1}, + {"DE", "DE", 3}, + {"DK", "DK", 1}, + {"EE", "EE", 1}, + {"ES", "ES", 1}, + {"FI", "FI", 1}, + {"FR", "FR", 1}, + {"GB", "GB", 1}, + {"GR", "GR", 1}, + {"HR", "HR", 1}, + {"HU", "HU", 1}, + {"IE", "IE", 1}, + {"IS", "IS", 1}, + {"IT", "IT", 1}, + {"ID", "ID", 1}, + {"JP", "JP", 8}, + {"KR", "KR", 24}, + {"KW", "KW", 1}, + {"LI", "LI", 1}, + {"LT", "LT", 1}, + {"LU", "LU", 1}, + {"LV", "LV", 1}, + {"MA", "MA", 1}, + {"MT", "MT", 1}, + {"MX", "MX", 1}, + {"NL", "NL", 1}, + {"NO", "NO", 1}, + {"PL", "PL", 1}, + {"PT", "PT", 1}, + {"PY", "PY", 1}, + {"RO", "RO", 1}, + {"SE", "SE", 1}, + {"SI", "SI", 1}, + {"SK", "SK", 1}, + {"TR", "TR", 7}, + {"TW", "TW", 1}, + {"IR", "XZ", 11}, /* Universal if Country code is IRAN, (ISLAMIC REPUBLIC OF) */ + {"SD", "XZ", 11}, /* Universal if Country code is SUDAN */ + {"SY", "XZ", 11}, /* Universal if Country code is SYRIAN ARAB REPUBLIC */ + {"GL", "XZ", 11}, /* Universal if Country code is GREENLAND */ + {"PS", "XZ", 11}, /* Universal if Country code is PALESTINIAN TERRITORY, OCCUPIED */ + {"TL", "XZ", 11}, /* Universal if Country code is TIMOR-LESTE (EAST TIMOR) */ + {"MH", "XZ", 11}, /* Universal if Country code is MARSHALL ISLANDS */ +}; + +/* Customized Locale convertor +* input : ISO 3166-1 country abbreviation +* output: customized cspec +*/ +void +#ifdef CUSTOM_COUNTRY_CODE +get_customized_country_code(void *adapter, char *country_iso_code, + wl_country_t *cspec, u32 flags) +#else +get_customized_country_code(void *adapter, char *country_iso_code, wl_country_t *cspec) +#endif /* CUSTOM_COUNTRY_CODE */ +{ +#if (defined(CUSTOMER_HW) || defined(CUSTOMER_HW2)) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)) + + struct cntry_locales_custom *cloc_ptr; + + if (!cspec) + return; +#ifdef CUSTOM_COUNTRY_CODE + cloc_ptr = wifi_platform_get_country_code(adapter, country_iso_code, flags); +#else + cloc_ptr = wifi_platform_get_country_code(adapter, country_iso_code); +#endif /* CUSTOM_COUNTRY_CODE */ + + if (cloc_ptr) { + strlcpy(cspec->ccode, cloc_ptr->custom_locale, WLC_CNTRY_BUF_SZ); + cspec->rev = cloc_ptr->custom_locale_rev; + } + return; +#else + int size, i; + + size = ARRAYSIZE(translate_custom_table); + + if (cspec == 0) + return; + + if (size == 0) + return; + + for (i = 0; i < size; i++) { + if (strcmp(country_iso_code, translate_custom_table[i].iso_abbrev) == 0) { + memcpy(cspec->ccode, + translate_custom_table[i].custom_locale, WLC_CNTRY_BUF_SZ); + cspec->rev = translate_custom_table[i].custom_locale_rev; + return; + } + } +#ifdef EXAMPLE_TABLE + /* if no country code matched return first universal code from translate_custom_table */ + memcpy(cspec->ccode, translate_custom_table[0].custom_locale, WLC_CNTRY_BUF_SZ); + cspec->rev = translate_custom_table[0].custom_locale_rev; +#endif /* EXMAPLE_TABLE */ + return; +#endif /* (defined(CUSTOMER_HW2) || defined(BOARD_HIKEY)) && + * (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)) + */ +} diff --git a/bcmdhd.100.10.315.x/dhd_custom_hikey.c b/bcmdhd.100.10.315.x/dhd_custom_hikey.c new file mode 100644 index 0000000..b2e7781 --- /dev/null +++ b/bcmdhd.100.10.315.x/dhd_custom_hikey.c @@ -0,0 +1,283 @@ +/* + * Platform Dependent file for Hikey + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * <> + * + * $Id$ + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_BROADCOM_WIFI_RESERVED_MEM +extern int dhd_init_wlan_mem(void); +extern void *dhd_wlan_mem_prealloc(int section, unsigned long size); +#endif /* CONFIG_BROADCOM_WIFI_RESERVED_MEM */ + +#define WIFI_TURNON_DELAY 200 +#define WLAN_REG_ON_GPIO 491 +#define WLAN_HOST_WAKE_GPIO 493 + +static int wlan_reg_on = -1; +#define DHD_DT_COMPAT_ENTRY "android,bcmdhd_wlan" +#define WIFI_WL_REG_ON_PROPNAME "wl_reg_on" + +static int wlan_host_wake_up = -1; +static int wlan_host_wake_irq = 0; +#define WIFI_WLAN_HOST_WAKE_PROPNAME "wl_host_wake" + +int +dhd_wifi_init_gpio(void) +{ + int gpio_reg_on_val; + /* ========== WLAN_PWR_EN ============ */ + char *wlan_node = DHD_DT_COMPAT_ENTRY; + struct device_node *root_node = NULL; + + root_node = of_find_compatible_node(NULL, NULL, wlan_node); + if (root_node) { + wlan_reg_on = of_get_named_gpio(root_node, WIFI_WL_REG_ON_PROPNAME, 0); + wlan_host_wake_up = of_get_named_gpio(root_node, WIFI_WLAN_HOST_WAKE_PROPNAME, 0); + } else { + printk(KERN_ERR "failed to get device node of BRCM WLAN, use default GPIOs\n"); + wlan_reg_on = WLAN_REG_ON_GPIO; + wlan_host_wake_up = WLAN_HOST_WAKE_GPIO; + } + + /* ========== WLAN_PWR_EN ============ */ + printk(KERN_INFO "%s: gpio_wlan_power : %d\n", __FUNCTION__, wlan_reg_on); + + /* + * For reg_on, gpio_request will fail if the gpio is configured to output-high + * in the dts using gpio-hog, so do not return error for failure. + */ + if (gpio_request_one(wlan_reg_on, GPIOF_OUT_INIT_HIGH, "WL_REG_ON")) { + printk(KERN_ERR "%s: Failed to request gpio %d for WL_REG_ON, " + "might have configured in the dts\n", + __FUNCTION__, wlan_reg_on); + } else { + printk(KERN_ERR "%s: gpio_request WL_REG_ON done - WLAN_EN: GPIO %d\n", + __FUNCTION__, wlan_reg_on); + } + + gpio_reg_on_val = gpio_get_value(wlan_reg_on); + printk(KERN_INFO "%s: Initial WL_REG_ON: [%d]\n", + __FUNCTION__, gpio_get_value(wlan_reg_on)); + + if (gpio_reg_on_val == 0) { + printk(KERN_INFO "%s: WL_REG_ON is LOW, drive it HIGH\n", __FUNCTION__); + if (gpio_direction_output(wlan_reg_on, 1)) { + printk(KERN_ERR "%s: WL_REG_ON is failed to pull up\n", __FUNCTION__); + return -EIO; + } + } + + printk(KERN_ERR "%s: WL_REG_ON is pulled up\n", __FUNCTION__); + + /* Wait for WIFI_TURNON_DELAY due to power stability */ + msleep(WIFI_TURNON_DELAY); + + /* ========== WLAN_HOST_WAKE ============ */ + printk(KERN_INFO "%s: gpio_wlan_host_wake : %d\n", __FUNCTION__, wlan_host_wake_up); + + if (gpio_request_one(wlan_host_wake_up, GPIOF_IN, "WLAN_HOST_WAKE")) { + printk(KERN_ERR "%s: Failed to request gpio %d for WLAN_HOST_WAKE\n", + __FUNCTION__, wlan_host_wake_up); + return -ENODEV; + } else { + printk(KERN_ERR "%s: gpio_request WLAN_HOST_WAKE done" + " - WLAN_HOST_WAKE: GPIO %d\n", + __FUNCTION__, wlan_host_wake_up); + } + + if (gpio_direction_input(wlan_host_wake_up)) { + printk(KERN_ERR "%s: Failed to set WL_HOST_WAKE gpio direction\n", __FUNCTION__); + } + + wlan_host_wake_irq = gpio_to_irq(wlan_host_wake_up); + + return 0; +} + +extern void kirin_pcie_power_on_atu_fixup(void) __attribute__ ((weak)); +extern int kirin_pcie_lp_ctrl(u32 enable) __attribute__ ((weak)); + +#ifndef BOARD_HIKEY_MODULAR +int +dhd_wlan_power(int onoff) +{ + printk(KERN_INFO"------------------------------------------------"); + printk(KERN_INFO"------------------------------------------------\n"); + printk(KERN_INFO"%s Enter: power %s\n", __func__, onoff ? "on" : "off"); + + if (onoff) { + if (gpio_direction_output(wlan_reg_on, 1)) { + printk(KERN_ERR "%s: WL_REG_ON is failed to pull up\n", __FUNCTION__); + return -EIO; + } + if (gpio_get_value(wlan_reg_on)) { + printk(KERN_INFO"WL_REG_ON on-step-2 : [%d]\n", + gpio_get_value(wlan_reg_on)); + } else { + printk("[%s] gpio value is 0. We need reinit.\n", __func__); + if (gpio_direction_output(wlan_reg_on, 1)) { + printk(KERN_ERR "%s: WL_REG_ON is " + "failed to pull up\n", __func__); + } + } + + /* Wait for WIFI_TURNON_DELAY due to power stability */ + msleep(WIFI_TURNON_DELAY); + + /* + * Call Kiric RC ATU fixup else si_attach will fail due to + * improper BAR0/1 address translations + */ + if (kirin_pcie_power_on_atu_fixup) { + kirin_pcie_power_on_atu_fixup(); + } else { + printk(KERN_ERR "[%s] kirin_pcie_power_on_atu_fixup is NULL. " + "REG_ON may not work\n", __func__); + } + /* Enable ASPM after powering ON */ + if (kirin_pcie_lp_ctrl) { + kirin_pcie_lp_ctrl(onoff); + } else { + printk(KERN_ERR "[%s] kirin_pcie_lp_ctrl is NULL. " + "ASPM may not work\n", __func__); + } + } else { + /* Disable ASPM before powering off */ + if (kirin_pcie_lp_ctrl) { + kirin_pcie_lp_ctrl(onoff); + } else { + printk(KERN_ERR "[%s] kirin_pcie_lp_ctrl is NULL. " + "ASPM may not work\n", __func__); + } + if (gpio_direction_output(wlan_reg_on, 0)) { + printk(KERN_ERR "%s: WL_REG_ON is failed to pull up\n", __FUNCTION__); + return -EIO; + } + if (gpio_get_value(wlan_reg_on)) { + printk(KERN_INFO"WL_REG_ON on-step-2 : [%d]\n", + gpio_get_value(wlan_reg_on)); + } + } + return 0; +} +EXPORT_SYMBOL(dhd_wlan_power); +#endif /* BOARD_HIKEY_MODULAR */ + +static int +dhd_wlan_reset(int onoff) +{ + return 0; +} + +static int +dhd_wlan_set_carddetect(int val) +{ + return 0; +} + +#ifdef BCMSDIO +static int dhd_wlan_get_wake_irq(void) +{ + return gpio_to_irq(wlan_host_wake_up); +} +#endif /* BCMSDIO */ + +struct resource dhd_wlan_resources = { + .name = "bcmdhd_wlan_irq", + .start = 0, /* Dummy */ + .end = 0, /* Dummy */ + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE | + IORESOURCE_IRQ_HIGHEDGE, +}; +EXPORT_SYMBOL(dhd_wlan_resources); + +struct wifi_platform_data dhd_wlan_control = { +#ifndef BOARD_HIKEY_MODULAR + .set_power = dhd_wlan_power, +#endif /* BOARD_HIKEY_MODULAR */ + .set_reset = dhd_wlan_reset, + .set_carddetect = dhd_wlan_set_carddetect, +#ifdef CONFIG_BROADCOM_WIFI_RESERVED_MEM + .mem_prealloc = dhd_wlan_mem_prealloc, +#endif /* CONFIG_BROADCOM_WIFI_RESERVED_MEM */ +#ifdef BCMSDIO + .get_wake_irq = dhd_wlan_get_wake_irq, +#endif // endif +}; +EXPORT_SYMBOL(dhd_wlan_control); + +int +dhd_wlan_init(void) +{ + int ret; + + printk(KERN_INFO"%s: START.......\n", __FUNCTION__); + ret = dhd_wifi_init_gpio(); + if (ret < 0) { + printk(KERN_ERR "%s: failed to initiate GPIO, ret=%d\n", + __FUNCTION__, ret); + goto fail; + } + + dhd_wlan_resources.start = wlan_host_wake_irq; + dhd_wlan_resources.end = wlan_host_wake_irq; + +#ifdef CONFIG_BROADCOM_WIFI_RESERVED_MEM + ret = dhd_init_wlan_mem(); + if (ret < 0) { + printk(KERN_ERR "%s: failed to alloc reserved memory," + " ret=%d\n", __FUNCTION__, ret); + } +#endif /* CONFIG_BROADCOM_WIFI_RESERVED_MEM */ + +fail: + printk(KERN_INFO"%s: FINISH.......\n", __FUNCTION__); + return ret; +} + +int +dhd_wlan_deinit(void) +{ + gpio_free(wlan_host_wake_up); + gpio_free(wlan_reg_on); + return 0; +} +#ifndef BOARD_HIKEY_MODULAR +/* Required only for Built-in DHD */ +device_initcall(dhd_wlan_init); +#endif /* BOARD_HIKEY_MODULAR */ diff --git a/bcmdhd.100.10.315.x/dhd_custom_memprealloc.c b/bcmdhd.100.10.315.x/dhd_custom_memprealloc.c new file mode 100644 index 0000000..7887406 --- /dev/null +++ b/bcmdhd.100.10.315.x/dhd_custom_memprealloc.c @@ -0,0 +1,559 @@ +/* + * Platform Dependent file for usage of Preallocted Memory + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * <> + * + * $Id: dhd_custom_memprealloc.c 744015 2018-01-31 05:51:10Z $ + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_BROADCOM_WIFI_RESERVED_MEM + +#define WLAN_STATIC_SCAN_BUF0 5 +#define WLAN_STATIC_SCAN_BUF1 6 +#define WLAN_STATIC_DHD_INFO_BUF 7 +#define WLAN_STATIC_DHD_WLFC_BUF 8 +#define WLAN_STATIC_DHD_IF_FLOW_LKUP 9 +#define WLAN_STATIC_DHD_MEMDUMP_RAM 11 +#define WLAN_STATIC_DHD_WLFC_HANGER 12 +#define WLAN_STATIC_DHD_PKTID_MAP 13 +#define WLAN_STATIC_DHD_PKTID_IOCTL_MAP 14 +#define WLAN_STATIC_DHD_LOG_DUMP_BUF 15 +#define WLAN_STATIC_DHD_LOG_DUMP_BUF_EX 16 +#define WLAN_STATIC_DHD_PKTLOG_DUMP_BUF 17 + +#define WLAN_SCAN_BUF_SIZE (64 * 1024) + +#if defined(CONFIG_64BIT) +#define WLAN_DHD_INFO_BUF_SIZE (32 * 1024) +#define WLAN_DHD_WLFC_BUF_SIZE (64 * 1024) +#define WLAN_DHD_IF_FLOW_LKUP_SIZE (64 * 1024) +#else +#define WLAN_DHD_INFO_BUF_SIZE (32 * 1024) +#define WLAN_DHD_WLFC_BUF_SIZE (16 * 1024) +#define WLAN_DHD_IF_FLOW_LKUP_SIZE (20 * 1024) +#endif /* CONFIG_64BIT */ +/* Have 2MB ramsize to accomodate future chips */ +#define WLAN_DHD_MEMDUMP_SIZE (2048 * 1024) + +#define PREALLOC_WLAN_SEC_NUM 4 +#define PREALLOC_WLAN_BUF_NUM 160 +#define PREALLOC_WLAN_SECTION_HEADER 24 + +#ifdef CONFIG_BCMDHD_PCIE +#define DHD_SKB_1PAGE_BUFSIZE (PAGE_SIZE*1) +#define DHD_SKB_2PAGE_BUFSIZE (PAGE_SIZE*2) +#define DHD_SKB_4PAGE_BUFSIZE (PAGE_SIZE*4) + +#define WLAN_SECTION_SIZE_0 (PREALLOC_WLAN_BUF_NUM * 128) +#define WLAN_SECTION_SIZE_1 0 +#define WLAN_SECTION_SIZE_2 0 +#define WLAN_SECTION_SIZE_3 (PREALLOC_WLAN_BUF_NUM * 1024) + +#define DHD_SKB_1PAGE_BUF_NUM 0 +#define DHD_SKB_2PAGE_BUF_NUM 128 +#define DHD_SKB_4PAGE_BUF_NUM 0 + +#else +#define DHD_SKB_HDRSIZE 336 +#define DHD_SKB_1PAGE_BUFSIZE ((PAGE_SIZE*1)-DHD_SKB_HDRSIZE) +#define DHD_SKB_2PAGE_BUFSIZE ((PAGE_SIZE*2)-DHD_SKB_HDRSIZE) +#define DHD_SKB_4PAGE_BUFSIZE ((PAGE_SIZE*4)-DHD_SKB_HDRSIZE) + +#define WLAN_SECTION_SIZE_0 (PREALLOC_WLAN_BUF_NUM * 128) +#define WLAN_SECTION_SIZE_1 (PREALLOC_WLAN_BUF_NUM * 128) +#define WLAN_SECTION_SIZE_2 (PREALLOC_WLAN_BUF_NUM * 512) +#define WLAN_SECTION_SIZE_3 (PREALLOC_WLAN_BUF_NUM * 1024) + +#define DHD_SKB_1PAGE_BUF_NUM 8 +#define DHD_SKB_2PAGE_BUF_NUM 8 +#define DHD_SKB_4PAGE_BUF_NUM 1 +#endif /* CONFIG_BCMDHD_PCIE */ + +#define WLAN_SKB_1_2PAGE_BUF_NUM ((DHD_SKB_1PAGE_BUF_NUM) + \ + (DHD_SKB_2PAGE_BUF_NUM)) +#define WLAN_SKB_BUF_NUM ((WLAN_SKB_1_2PAGE_BUF_NUM) + \ + (DHD_SKB_4PAGE_BUF_NUM)) + +#define WLAN_MAX_PKTID_ITEMS (8192) +#define WLAN_DHD_PKTID_MAP_HDR_SIZE (20 + 4*(WLAN_MAX_PKTID_ITEMS + 1)) +#define WLAN_DHD_PKTID_MAP_ITEM_SIZE (32) +#define WLAN_DHD_PKTID_MAP_SIZE ((WLAN_DHD_PKTID_MAP_HDR_SIZE) + \ + ((WLAN_MAX_PKTID_ITEMS+1) * WLAN_DHD_PKTID_MAP_ITEM_SIZE)) + +#define WLAN_MAX_PKTID_IOCTL_ITEMS (32) +#define WLAN_DHD_PKTID_IOCTL_MAP_HDR_SIZE (20 + 4*(WLAN_MAX_PKTID_IOCTL_ITEMS + 1)) +#define WLAN_DHD_PKTID_IOCTL_MAP_ITEM_SIZE (32) +#define WLAN_DHD_PKTID_IOCTL_MAP_SIZE ((WLAN_DHD_PKTID_IOCTL_MAP_HDR_SIZE) + \ + ((WLAN_MAX_PKTID_IOCTL_ITEMS+1) * WLAN_DHD_PKTID_IOCTL_MAP_ITEM_SIZE)) + +#define DHD_LOG_DUMP_BUF_SIZE (1024 * 1024 * 4) +#define DHD_LOG_DUMP_BUF_EX_SIZE (1024 * 1024 * 4) + +#define DHD_PKTLOG_DUMP_BUF_SIZE (64 * 1024) + +#define WLAN_DHD_WLFC_HANGER_MAXITEMS 3072 +#define WLAN_DHD_WLFC_HANGER_ITEM_SIZE 32 +#define WLAN_DHD_WLFC_HANGER_SIZE ((WLAN_DHD_WLFC_HANGER_ITEM_SIZE) + \ + ((WLAN_DHD_WLFC_HANGER_MAXITEMS) * (WLAN_DHD_WLFC_HANGER_ITEM_SIZE))) + +static struct sk_buff *wlan_static_skb[WLAN_SKB_BUF_NUM]; + +struct wlan_mem_prealloc { + void *mem_ptr; + unsigned long size; +}; + +static struct wlan_mem_prealloc wlan_mem_array[PREALLOC_WLAN_SEC_NUM] = { + {NULL, (WLAN_SECTION_SIZE_0 + PREALLOC_WLAN_SECTION_HEADER)}, + {NULL, (WLAN_SECTION_SIZE_1 + PREALLOC_WLAN_SECTION_HEADER)}, + {NULL, (WLAN_SECTION_SIZE_2 + PREALLOC_WLAN_SECTION_HEADER)}, + {NULL, (WLAN_SECTION_SIZE_3 + PREALLOC_WLAN_SECTION_HEADER)} +}; + +static void *wlan_static_scan_buf0 = NULL; +static void *wlan_static_scan_buf1 = NULL; +static void *wlan_static_dhd_info_buf = NULL; +static void *wlan_static_dhd_wlfc_buf = NULL; +static void *wlan_static_if_flow_lkup = NULL; +static void *wlan_static_dhd_memdump_ram = NULL; +static void *wlan_static_dhd_wlfc_hanger = NULL; +static void *wlan_static_dhd_pktid_map = NULL; +static void *wlan_static_dhd_pktid_ioctl_map = NULL; +static void *wlan_static_dhd_log_dump_buf = NULL; +static void *wlan_static_dhd_log_dump_buf_ex = NULL; +static void *wlan_static_dhd_pktlog_dump_buf = NULL; + +void +*dhd_wlan_mem_prealloc(int section, unsigned long size) +{ + if (section == PREALLOC_WLAN_SEC_NUM) { + return wlan_static_skb; + } + + if (section == WLAN_STATIC_SCAN_BUF0) { + return wlan_static_scan_buf0; + } + + if (section == WLAN_STATIC_SCAN_BUF1) { + return wlan_static_scan_buf1; + } + + if (section == WLAN_STATIC_DHD_INFO_BUF) { + if (size > WLAN_DHD_INFO_BUF_SIZE) { + pr_err("request DHD_INFO size(%lu) is bigger than" + " static size(%d).\n", size, + WLAN_DHD_INFO_BUF_SIZE); + return NULL; + } + return wlan_static_dhd_info_buf; + } + + if (section == WLAN_STATIC_DHD_WLFC_BUF) { + if (size > WLAN_DHD_WLFC_BUF_SIZE) { + pr_err("request DHD_WLFC size(%lu) is bigger than" + " static size(%d).\n", + size, WLAN_DHD_WLFC_BUF_SIZE); + return NULL; + } + return wlan_static_dhd_wlfc_buf; + } + + if (section == WLAN_STATIC_DHD_WLFC_HANGER) { + if (size > WLAN_DHD_WLFC_HANGER_SIZE) { + pr_err("request DHD_WLFC_HANGER size(%lu) is bigger than" + " static size(%d).\n", + size, WLAN_DHD_WLFC_HANGER_SIZE); + return NULL; + } + return wlan_static_dhd_wlfc_hanger; + } + + if (section == WLAN_STATIC_DHD_IF_FLOW_LKUP) { + if (size > WLAN_DHD_IF_FLOW_LKUP_SIZE) { + pr_err("request DHD_WLFC size(%lu) is bigger than" + " static size(%d).\n", + size, WLAN_DHD_WLFC_BUF_SIZE); + return NULL; + } + return wlan_static_if_flow_lkup; + } + + if (section == WLAN_STATIC_DHD_MEMDUMP_RAM) { + if (size > WLAN_DHD_MEMDUMP_SIZE) { + pr_err("request DHD_MEMDUMP_RAM size(%lu) is bigger" + " than static size(%d).\n", + size, WLAN_DHD_MEMDUMP_SIZE); + return NULL; + } + return wlan_static_dhd_memdump_ram; + } + + if (section == WLAN_STATIC_DHD_PKTID_MAP) { + if (size > WLAN_DHD_PKTID_MAP_SIZE) { + pr_err("request DHD_PKTID_MAP size(%lu) is bigger than" + " static size(%d).\n", + size, WLAN_DHD_PKTID_MAP_SIZE); + return NULL; + } + return wlan_static_dhd_pktid_map; + } + + if (section == WLAN_STATIC_DHD_PKTID_IOCTL_MAP) { + if (size > WLAN_DHD_PKTID_IOCTL_MAP_SIZE) { + pr_err("request DHD_PKTID_IOCTL_MAP size(%lu) is bigger than" + " static size(%d).\n", + size, WLAN_DHD_PKTID_IOCTL_MAP_SIZE); + return NULL; + } + return wlan_static_dhd_pktid_ioctl_map; + } + + if (section == WLAN_STATIC_DHD_LOG_DUMP_BUF) { + if (size > DHD_LOG_DUMP_BUF_SIZE) { + pr_err("request DHD_LOG_DUMP_BUF size(%lu) is bigger then" + " static size(%d).\n", + size, DHD_LOG_DUMP_BUF_SIZE); + return NULL; + } + return wlan_static_dhd_log_dump_buf; + } + + if (section == WLAN_STATIC_DHD_LOG_DUMP_BUF_EX) { + if (size > DHD_LOG_DUMP_BUF_EX_SIZE) { + pr_err("request DHD_LOG_DUMP_BUF_EX size(%lu) is bigger then" + " static size(%d).\n", + size, DHD_LOG_DUMP_BUF_EX_SIZE); + return NULL; + } + return wlan_static_dhd_log_dump_buf_ex; + } + + if (section == WLAN_STATIC_DHD_PKTLOG_DUMP_BUF) { + if (size > DHD_PKTLOG_DUMP_BUF_SIZE) { + pr_err("request DHD_PKTLOG_DUMP_BUF size(%lu) is bigger then" + " static size(%d).\n", + size, DHD_PKTLOG_DUMP_BUF_SIZE); + return NULL; + } + return wlan_static_dhd_pktlog_dump_buf; + } + + if ((section < 0) || (section >= PREALLOC_WLAN_SEC_NUM)) { + return NULL; + } + + if (wlan_mem_array[section].size < size) { + return NULL; + } + + return wlan_mem_array[section].mem_ptr; +} +EXPORT_SYMBOL(dhd_wlan_mem_prealloc); + +int +dhd_init_wlan_mem(void) +{ + int i; + int j; + + for (i = 0; i < DHD_SKB_1PAGE_BUF_NUM; i++) { + wlan_static_skb[i] = __dev_alloc_skb(DHD_SKB_1PAGE_BUFSIZE, GFP_KERNEL); + if (!wlan_static_skb[i]) { + goto err_skb_alloc; + } + } + + for (i = DHD_SKB_1PAGE_BUF_NUM; i < WLAN_SKB_1_2PAGE_BUF_NUM; i++) { + wlan_static_skb[i] = __dev_alloc_skb(DHD_SKB_2PAGE_BUFSIZE, GFP_KERNEL); + if (!wlan_static_skb[i]) { + goto err_skb_alloc; + } + } + +#if !defined(CONFIG_BCMDHD_PCIE) + wlan_static_skb[i] = __dev_alloc_skb(DHD_SKB_4PAGE_BUFSIZE, GFP_KERNEL); + if (!wlan_static_skb[i]) { + goto err_skb_alloc; + } +#endif /* !CONFIG_BCMDHD_PCIE */ + + for (i = 0; i < PREALLOC_WLAN_SEC_NUM; i++) { + if (wlan_mem_array[i].size > 0) { + wlan_mem_array[i].mem_ptr = + kmalloc(wlan_mem_array[i].size, GFP_KERNEL); + + if (!wlan_mem_array[i].mem_ptr) { + goto err_mem_alloc; + } + } + } + + wlan_static_scan_buf0 = kmalloc(WLAN_SCAN_BUF_SIZE, GFP_KERNEL); + if (!wlan_static_scan_buf0) { + pr_err("Failed to alloc wlan_static_scan_buf0\n"); + goto err_mem_alloc; + } + + wlan_static_scan_buf1 = kmalloc(WLAN_SCAN_BUF_SIZE, GFP_KERNEL); + if (!wlan_static_scan_buf1) { + pr_err("Failed to alloc wlan_static_scan_buf1\n"); + goto err_mem_alloc; + } + + wlan_static_dhd_log_dump_buf = kmalloc(DHD_LOG_DUMP_BUF_SIZE, GFP_KERNEL); + if (!wlan_static_dhd_log_dump_buf) { + pr_err("Failed to alloc wlan_static_dhd_log_dump_buf\n"); + goto err_mem_alloc; + } + + wlan_static_dhd_log_dump_buf_ex = kmalloc(DHD_LOG_DUMP_BUF_EX_SIZE, GFP_KERNEL); + if (!wlan_static_dhd_log_dump_buf_ex) { + pr_err("Failed to alloc wlan_static_dhd_log_dump_buf_ex\n"); + goto err_mem_alloc; + } + + wlan_static_dhd_info_buf = kmalloc(WLAN_DHD_INFO_BUF_SIZE, GFP_KERNEL); + if (!wlan_static_dhd_info_buf) { + pr_err("Failed to alloc wlan_static_dhd_info_buf\n"); + goto err_mem_alloc; + } + +#ifdef CONFIG_BCMDHD_PCIE + wlan_static_if_flow_lkup = kmalloc(WLAN_DHD_IF_FLOW_LKUP_SIZE, + GFP_KERNEL); + if (!wlan_static_if_flow_lkup) { + pr_err("Failed to alloc wlan_static_if_flow_lkup\n"); + goto err_mem_alloc; + } + +#ifdef CONFIG_BCMDHD_PREALLOC_PKTIDMAP + wlan_static_dhd_pktid_map = kmalloc(WLAN_DHD_PKTID_MAP_SIZE, + GFP_KERNEL); + if (!wlan_static_dhd_pktid_map) { + pr_err("Failed to alloc wlan_static_dhd_pktid_map\n"); + goto err_mem_alloc; + } + + wlan_static_dhd_pktid_ioctl_map = kmalloc(WLAN_DHD_PKTID_IOCTL_MAP_SIZE, + GFP_KERNEL); + if (!wlan_static_dhd_pktid_ioctl_map) { + pr_err("Failed to alloc wlan_static_dhd_pktid_ioctl_map\n"); + goto err_mem_alloc; + } +#endif /* CONFIG_BCMDHD_PREALLOC_PKTIDMAP */ +#else + wlan_static_dhd_wlfc_buf = kmalloc(WLAN_DHD_WLFC_BUF_SIZE, + GFP_KERNEL); + if (!wlan_static_dhd_wlfc_buf) { + pr_err("Failed to alloc wlan_static_dhd_wlfc_buf\n"); + goto err_mem_alloc; + } + + wlan_static_dhd_wlfc_hanger = kmalloc(WLAN_DHD_WLFC_HANGER_SIZE, + GFP_KERNEL); + if (!wlan_static_dhd_wlfc_hanger) { + pr_err("Failed to alloc wlan_static_dhd_wlfc_hanger\n"); + goto err_mem_alloc; + } +#endif /* CONFIG_BCMDHD_PCIE */ + +#ifdef CONFIG_BCMDHD_PREALLOC_MEMDUMP + wlan_static_dhd_memdump_ram = kmalloc(WLAN_DHD_MEMDUMP_SIZE, GFP_KERNEL); + if (!wlan_static_dhd_memdump_ram) { + pr_err("Failed to alloc wlan_static_dhd_memdump_ram\n"); + goto err_mem_alloc; + } +#endif /* CONFIG_BCMDHD_PREALLOC_MEMDUMP */ + + wlan_static_dhd_pktlog_dump_buf = kmalloc(DHD_PKTLOG_DUMP_BUF_SIZE, GFP_KERNEL); + if (!wlan_static_dhd_pktlog_dump_buf) { + pr_err("Failed to alloc wlan_static_dhd_pktlog_dump_buf\n"); + goto err_mem_alloc; + } + + pr_err("%s: WIFI MEM Allocated\n", __FUNCTION__); + return 0; + +err_mem_alloc: +#ifdef CONFIG_BCMDHD_PREALLOC_MEMDUMP + if (wlan_static_dhd_memdump_ram) { + kfree(wlan_static_dhd_memdump_ram); + } + +#endif /* CONFIG_BCMDHD_PREALLOC_MEMDUMP */ + +#ifdef CONFIG_BCMDHD_PCIE + if (wlan_static_if_flow_lkup) { + kfree(wlan_static_if_flow_lkup); + } + +#ifdef CONFIG_BCMDHD_PREALLOC_PKTIDMAP + if (wlan_static_dhd_pktid_map) { + kfree(wlan_static_dhd_pktid_map); + } + + if (wlan_static_dhd_pktid_ioctl_map) { + kfree(wlan_static_dhd_pktid_ioctl_map); + } +#endif /* CONFIG_BCMDHD_PREALLOC_PKTIDMAP */ +#else + if (wlan_static_dhd_wlfc_buf) { + kfree(wlan_static_dhd_wlfc_buf); + } + + if (wlan_static_dhd_wlfc_hanger) { + kfree(wlan_static_dhd_wlfc_hanger); + } +#endif /* CONFIG_BCMDHD_PCIE */ + if (wlan_static_dhd_info_buf) { + kfree(wlan_static_dhd_info_buf); + } + + if (wlan_static_dhd_log_dump_buf) { + kfree(wlan_static_dhd_log_dump_buf); + } + + if (wlan_static_dhd_log_dump_buf_ex) { + kfree(wlan_static_dhd_log_dump_buf_ex); + } + + if (wlan_static_scan_buf1) { + kfree(wlan_static_scan_buf1); + } + + if (wlan_static_scan_buf0) { + kfree(wlan_static_scan_buf0); + } + + if (wlan_static_dhd_pktlog_dump_buf) { + kfree(wlan_static_dhd_pktlog_dump_buf); + } + + pr_err("Failed to mem_alloc for WLAN\n"); + + for (j = 0; j < i; j++) { + kfree(wlan_mem_array[j].mem_ptr); + } + + i = WLAN_SKB_BUF_NUM; + +err_skb_alloc: + pr_err("Failed to skb_alloc for WLAN\n"); + for (j = 0; j < i; j++) { + dev_kfree_skb(wlan_static_skb[j]); + } + + return -ENOMEM; +} +EXPORT_SYMBOL(dhd_init_wlan_mem); + +void +dhd_exit_wlan_mem(void) +{ + int i = 0; + +#ifdef CONFIG_BCMDHD_PREALLOC_MEMDUMP + if (wlan_static_dhd_memdump_ram) { + kfree(wlan_static_dhd_memdump_ram); + } + +#endif /* CONFIG_BCMDHD_PREALLOC_MEMDUMP */ + +#ifdef CONFIG_BCMDHD_PCIE + if (wlan_static_if_flow_lkup) { + kfree(wlan_static_if_flow_lkup); + } + +#ifdef CONFIG_BCMDHD_PREALLOC_PKTIDMAP + if (wlan_static_dhd_pktid_map) { + kfree(wlan_static_dhd_pktid_map); + } + + if (wlan_static_dhd_pktid_ioctl_map) { + kfree(wlan_static_dhd_pktid_ioctl_map); + } +#endif /* CONFIG_BCMDHD_PREALLOC_PKTIDMAP */ +#else + if (wlan_static_dhd_wlfc_buf) { + kfree(wlan_static_dhd_wlfc_buf); + } + + if (wlan_static_dhd_wlfc_hanger) { + kfree(wlan_static_dhd_wlfc_hanger); + } +#endif /* CONFIG_BCMDHD_PCIE */ + if (wlan_static_dhd_info_buf) { + kfree(wlan_static_dhd_info_buf); + } + + if (wlan_static_dhd_log_dump_buf) { + kfree(wlan_static_dhd_log_dump_buf); + } + + if (wlan_static_dhd_log_dump_buf_ex) { + kfree(wlan_static_dhd_log_dump_buf_ex); + } + + if (wlan_static_scan_buf1) { + kfree(wlan_static_scan_buf1); + } + + if (wlan_static_scan_buf0) { + kfree(wlan_static_scan_buf0); + } + + if (wlan_static_dhd_pktlog_dump_buf) { + kfree(wlan_static_dhd_pktlog_dump_buf); + } + + pr_err("Failed to mem_alloc for WLAN\n"); + + for (i = 0; i < PREALLOC_WLAN_SEC_NUM; i++) { + if (wlan_mem_array[i].mem_ptr) { + kfree(wlan_mem_array[i].mem_ptr); + } + } + + pr_err("Failed to skb_alloc for WLAN\n"); + for (i = 0; i < WLAN_SKB_BUF_NUM; i++) { + dev_kfree_skb(wlan_static_skb[i]); + } + + return; +} +EXPORT_SYMBOL(dhd_exit_wlan_mem); +#endif /* CONFIG_BROADCOM_WIFI_RESERVED_MEM */ diff --git a/bcmdhd.100.10.315.x/dhd_dbg.h b/bcmdhd.100.10.315.x/dhd_dbg.h new file mode 100644 index 0000000..c43848b --- /dev/null +++ b/bcmdhd.100.10.315.x/dhd_dbg.h @@ -0,0 +1,353 @@ +/* + * Debug/trace/assert driver definitions for Dongle Host Driver. + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_dbg.h 759128 2018-04-24 03:48:17Z $ + */ + +#ifndef _dhd_dbg_ +#define _dhd_dbg_ + +#ifdef DHD_LOG_DUMP +typedef enum { + DLD_BUF_TYPE_GENERAL = 0, + DLD_BUF_TYPE_PRESERVE, + DLD_BUF_TYPE_SPECIAL, + DLD_BUF_TYPE_ECNTRS, + DLD_BUF_TYPE_FILTER, + DLD_BUF_TYPE_ALL +} log_dump_type_t; +extern char *dhd_log_dump_get_timestamp(void); +extern void dhd_log_dump_write(int type, char *binary_data, + int binary_len, const char *fmt, ...); +#ifndef _DHD_LOG_DUMP_DEFINITIONS_ +#define _DHD_LOG_DUMP_DEFINITIONS_ +#define GENERAL_LOG_HDR "\n-------------------- General log ---------------------------\n" +#define PRESERVE_LOG_HDR "\n-------------------- Preserve log ---------------------------\n" +#define SPECIAL_LOG_HDR "\n-------------------- Special log ---------------------------\n" +#define DHD_DUMP_LOG_HDR "\n-------------------- 'dhd dump' log -----------------------\n" +#define EXT_TRAP_LOG_HDR "\n-------------------- Extended trap data -------------------\n" +#define HEALTH_CHK_LOG_HDR "\n-------------------- Health check data --------------------\n" +#ifdef DHD_DUMP_PCIE_RINGS +#define FLOWRING_DUMP_HDR "\n-------------------- Flowring dump --------------------\n" +#endif /* DHD_DUMP_PCIE_RINGS */ +#define DHD_LOG_DUMP_WRITE(fmt, ...) \ + dhd_log_dump_write(DLD_BUF_TYPE_GENERAL, NULL, 0, fmt, ##__VA_ARGS__) +#define DHD_LOG_DUMP_WRITE_EX(fmt, ...) \ + dhd_log_dump_write(DLD_BUF_TYPE_SPECIAL, NULL, 0, fmt, ##__VA_ARGS__) +#define DHD_LOG_DUMP_WRITE_PRSRV(fmt, ...) \ + dhd_log_dump_write(DLD_BUF_TYPE_PRESERVE, NULL, 0, fmt, ##__VA_ARGS__) +#endif /* !_DHD_LOG_DUMP_DEFINITIONS_ */ +#define CONCISE_DUMP_BUFLEN 16 * 1024 +#define ECNTRS_LOG_HDR "\n-------------------- Ecounters log --------------------------\n" +#define COOKIE_LOG_HDR "\n-------------------- Cookie List ----------------------------\n" +#endif /* DHD_LOG_DUMP */ + +#if defined(DHD_DEBUG) + +/* NON-NDIS cases */ +#ifdef DHD_LOG_DUMP +/* Common case for EFI and non EFI */ +#define DHD_ERROR(args) \ +do { \ + if (dhd_msg_level & DHD_ERROR_VAL) { \ + printf args; \ + DHD_LOG_DUMP_WRITE("[%s] %s: ", dhd_log_dump_get_timestamp(), __func__); \ + DHD_LOG_DUMP_WRITE args; \ + } \ +} while (0) + +/* !defined(DHD_EFI) and defined(DHD_LOG_DUMP) */ +#define DHD_INFO(args) do {if (dhd_msg_level & DHD_INFO_VAL) printf args;} while (0) +#else /* DHD_LOG_DUMP */ +/* !defined(DHD_LOG_DUMP cases) */ +#define DHD_ERROR(args) do {if (dhd_msg_level & DHD_ERROR_VAL) printf args;} while (0) +#define DHD_INFO(args) do {if (dhd_msg_level & DHD_INFO_VAL) printf args;} while (0) +#endif /* DHD_LOG_DUMP */ + +#define DHD_TRACE(args) do {if (dhd_msg_level & DHD_TRACE_VAL) printf args;} while (0) + +#ifdef DHD_LOG_DUMP +/* LOG_DUMP defines common to EFI and NON-EFI */ +#define DHD_ERROR_MEM(args) \ +do { \ + if (dhd_msg_level & DHD_ERROR_VAL) { \ + if (dhd_msg_level & DHD_ERROR_MEM_VAL) { \ + printf args; \ + } \ + DHD_LOG_DUMP_WRITE("[%s]: ", dhd_log_dump_get_timestamp()); \ + DHD_LOG_DUMP_WRITE args; \ + } \ +} while (0) +#define DHD_IOVAR_MEM(args) \ +do { \ + if (dhd_msg_level & DHD_ERROR_VAL) { \ + if (dhd_msg_level & DHD_IOVAR_MEM_VAL) { \ + printf args; \ + } \ + DHD_LOG_DUMP_WRITE("[%s]: ", dhd_log_dump_get_timestamp()); \ + DHD_LOG_DUMP_WRITE args; \ + } \ +} while (0) +#define DHD_LOG_MEM(args) \ +do { \ + if (dhd_msg_level & DHD_ERROR_VAL) { \ + DHD_LOG_DUMP_WRITE("[%s]: ", dhd_log_dump_get_timestamp()); \ + DHD_LOG_DUMP_WRITE args; \ + } \ +} while (0) +/* NON-EFI builds with LOG DUMP enabled */ +#define DHD_EVENT(args) \ +do { \ + if (dhd_msg_level & DHD_EVENT_VAL) { \ + printf args; \ + DHD_LOG_DUMP_WRITE("[%s]: ", dhd_log_dump_get_timestamp()); \ + DHD_LOG_DUMP_WRITE args; \ + } \ +} while (0) +#define DHD_PRSRV_MEM(args) \ +do { \ + if (dhd_msg_level & DHD_EVENT_VAL) { \ + if (dhd_msg_level & DHD_PRSRV_MEM_VAL) \ + printf args; \ + DHD_LOG_DUMP_WRITE_PRSRV("[%s]: ", dhd_log_dump_get_timestamp()); \ + DHD_LOG_DUMP_WRITE_PRSRV args; \ + } \ +} while (0) + +/* Re-using 'DHD_MSGTRACE_VAL' for controlling printing of ecounter binary event +* logs to console and debug dump -- need to cleanup in the future to use separate +* 'DHD_ECNTR_VAL' bitmap flag. 'DHD_MSGTRACE_VAL' will be defined only +* for non-android builds. +*/ +#define DHD_ECNTR_LOG(args) \ +do { \ + if (dhd_msg_level & DHD_EVENT_VAL) { \ + if (dhd_msg_level & DHD_MSGTRACE_VAL) { \ + printf args; \ + DHD_LOG_DUMP_WRITE("[%s]: ", dhd_log_dump_get_timestamp()); \ + DHD_LOG_DUMP_WRITE args; \ + } \ + } \ +} while (0) + +#define DHD_ERROR_EX(args) \ +do { \ + if (dhd_msg_level & DHD_ERROR_VAL) { \ + printf args; \ + DHD_LOG_DUMP_WRITE_EX("[%s]: ", dhd_log_dump_get_timestamp()); \ + DHD_LOG_DUMP_WRITE_EX args; \ + } \ +} while (0) + +#define DHD_MSGTRACE_LOG(args) \ +do { \ + if (dhd_msg_level & DHD_MSGTRACE_VAL) { \ + printf args; \ + } \ + DHD_LOG_DUMP_WRITE("[%s]: ", dhd_log_dump_get_timestamp()); \ + DHD_LOG_DUMP_WRITE args; \ +} while (0) +#else /* DHD_LOG_DUMP */ +/* !DHD_LOG_DUMP */ +#define DHD_MSGTRACE_LOG(args) do {if (dhd_msg_level & DHD_MSGTRACE_VAL) printf args;} while (0) +#define DHD_ERROR_MEM(args) DHD_ERROR(args) +#define DHD_IOVAR_MEM(args) DHD_ERROR(args) +#define DHD_LOG_MEM(args) DHD_ERROR(args) +#define DHD_EVENT(args) do {if (dhd_msg_level & DHD_EVENT_VAL) printf args;} while (0) +#define DHD_ECNTR_LOG(args) DHD_EVENT(args) +#define DHD_PRSRV_MEM(args) DHD_EVENT(args) +#define DHD_ERROR_EX(args) DHD_ERROR(args) +#endif /* DHD_LOG_DUMP */ + +#define DHD_DATA(args) do {if (dhd_msg_level & DHD_DATA_VAL) printf args;} while (0) +#define DHD_CTL(args) do {if (dhd_msg_level & DHD_CTL_VAL) printf args;} while (0) +#define DHD_TIMER(args) do {if (dhd_msg_level & DHD_TIMER_VAL) printf args;} while (0) +#define DHD_HDRS(args) do {if (dhd_msg_level & DHD_HDRS_VAL) printf args;} while (0) +#define DHD_BYTES(args) do {if (dhd_msg_level & DHD_BYTES_VAL) printf args;} while (0) +#define DHD_INTR(args) do {if (dhd_msg_level & DHD_INTR_VAL) printf args;} while (0) +#define DHD_GLOM(args) do {if (dhd_msg_level & DHD_GLOM_VAL) printf args;} while (0) +#define DHD_BTA(args) do {if (dhd_msg_level & DHD_BTA_VAL) printf args;} while (0) +#define DHD_ISCAN(args) do {if (dhd_msg_level & DHD_ISCAN_VAL) printf args;} while (0) +#define DHD_ARPOE(args) do {if (dhd_msg_level & DHD_ARPOE_VAL) printf args;} while (0) +#define DHD_REORDER(args) do {if (dhd_msg_level & DHD_REORDER_VAL) printf args;} while (0) +#define DHD_PNO(args) do {if (dhd_msg_level & DHD_PNO_VAL) printf args;} while (0) +#define DHD_RTT(args) do {if (dhd_msg_level & DHD_RTT_VAL) printf args;} while (0) +#define DHD_PKT_MON(args) do {if (dhd_msg_level & DHD_PKT_MON_VAL) printf args;} while (0) + +#if defined(DHD_LOG_DUMP) +#define DHD_FWLOG(args) \ + do { \ + if (dhd_msg_level & DHD_FWLOG_VAL) { \ + printf args; \ + DHD_LOG_DUMP_WRITE args; \ + } \ + } while (0) +#else /* DHD_LOG_DUMP */ +#define DHD_FWLOG(args) do {if (dhd_msg_level & DHD_FWLOG_VAL) printf args;} while (0) +#endif /* DHD_LOG_DUMP */ + +#define DHD_DBGIF(args) do {if (dhd_msg_level & DHD_DBGIF_VAL) printf args;} while (0) + +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM +#define DHD_RPM(args) do {if (dhd_msg_level & DHD_RPM_VAL) printf args;} while (0) +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + +#define DHD_TRACE_HW4 DHD_TRACE +#define DHD_INFO_HW4 DHD_INFO +#define DHD_ERROR_NO_HW4 DHD_ERROR + +#define DHD_ERROR_ON() (dhd_msg_level & DHD_ERROR_VAL) +#define DHD_TRACE_ON() (dhd_msg_level & DHD_TRACE_VAL) +#define DHD_INFO_ON() (dhd_msg_level & DHD_INFO_VAL) +#define DHD_DATA_ON() (dhd_msg_level & DHD_DATA_VAL) +#define DHD_CTL_ON() (dhd_msg_level & DHD_CTL_VAL) +#define DHD_TIMER_ON() (dhd_msg_level & DHD_TIMER_VAL) +#define DHD_HDRS_ON() (dhd_msg_level & DHD_HDRS_VAL) +#define DHD_BYTES_ON() (dhd_msg_level & DHD_BYTES_VAL) +#define DHD_INTR_ON() (dhd_msg_level & DHD_INTR_VAL) +#define DHD_GLOM_ON() (dhd_msg_level & DHD_GLOM_VAL) +#define DHD_EVENT_ON() (dhd_msg_level & DHD_EVENT_VAL) +#define DHD_BTA_ON() (dhd_msg_level & DHD_BTA_VAL) +#define DHD_ISCAN_ON() (dhd_msg_level & DHD_ISCAN_VAL) +#define DHD_ARPOE_ON() (dhd_msg_level & DHD_ARPOE_VAL) +#define DHD_REORDER_ON() (dhd_msg_level & DHD_REORDER_VAL) +#define DHD_NOCHECKDIED_ON() (dhd_msg_level & DHD_NOCHECKDIED_VAL) +#define DHD_PNO_ON() (dhd_msg_level & DHD_PNO_VAL) +#define DHD_RTT_ON() (dhd_msg_level & DHD_RTT_VAL) +#define DHD_MSGTRACE_ON() (dhd_msg_level & DHD_MSGTRACE_VAL) +#define DHD_FWLOG_ON() (dhd_msg_level & DHD_FWLOG_VAL) +#define DHD_DBGIF_ON() (dhd_msg_level & DHD_DBGIF_VAL) +#define DHD_PKT_MON_ON() (dhd_msg_level & DHD_PKT_MON_VAL) +#define DHD_PKT_MON_DUMP_ON() (dhd_msg_level & DHD_PKT_MON_DUMP_VAL) +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM +#define DHD_RPM_ON() (dhd_msg_level & DHD_RPM_VAL) +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + +#else /* defined(BCMDBG) || defined(DHD_DEBUG) */ + +#define DHD_ERROR(args) do {if (dhd_msg_level & DHD_ERROR_VAL) \ + printf args;} while (0) +#define DHD_TRACE(args) +#define DHD_INFO(args) + +#define DHD_DATA(args) +#define DHD_CTL(args) +#define DHD_TIMER(args) +#define DHD_HDRS(args) +#define DHD_BYTES(args) +#define DHD_INTR(args) +#define DHD_GLOM(args) + +#define DHD_EVENT(args) +#define DHD_ECNTR_LOG(args) DHD_EVENT(args) + +#define DHD_PRSRV_MEM(args) DHD_EVENT(args) + +#define DHD_BTA(args) +#define DHD_ISCAN(args) +#define DHD_ARPOE(args) +#define DHD_REORDER(args) +#define DHD_PNO(args) +#define DHD_RTT(args) +#define DHD_PKT_MON(args) + +#define DHD_MSGTRACE_LOG(args) +#define DHD_FWLOG(args) + +#define DHD_DBGIF(args) + +#define DHD_ERROR_MEM(args) DHD_ERROR(args) +#define DHD_IOVAR_MEM(args) DHD_ERROR(args) +#define DHD_LOG_MEM(args) DHD_ERROR(args) +#define DHD_ERROR_EX(args) DHD_ERROR(args) + +#define DHD_TRACE_HW4 DHD_TRACE +#define DHD_INFO_HW4 DHD_INFO +#define DHD_ERROR_NO_HW4 DHD_ERROR + +#define DHD_ERROR_ON() 0 +#define DHD_TRACE_ON() 0 +#define DHD_INFO_ON() 0 +#define DHD_DATA_ON() 0 +#define DHD_CTL_ON() 0 +#define DHD_TIMER_ON() 0 +#define DHD_HDRS_ON() 0 +#define DHD_BYTES_ON() 0 +#define DHD_INTR_ON() 0 +#define DHD_GLOM_ON() 0 +#define DHD_EVENT_ON() 0 +#define DHD_BTA_ON() 0 +#define DHD_ISCAN_ON() 0 +#define DHD_ARPOE_ON() 0 +#define DHD_REORDER_ON() 0 +#define DHD_NOCHECKDIED_ON() 0 +#define DHD_PNO_ON() 0 +#define DHD_RTT_ON() 0 +#define DHD_PKT_MON_ON() 0 +#define DHD_PKT_MON_DUMP_ON() 0 +#define DHD_MSGTRACE_ON() 0 +#define DHD_FWLOG_ON() 0 +#define DHD_DBGIF_ON() 0 +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM +#define DHD_RPM_ON() 0 +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ +#endif // endif + +#define PRINT_RATE_LIMIT_PERIOD 5000000u /* 5s in units of us */ +#define DHD_ERROR_RLMT(args) \ +do { \ + if (dhd_msg_level & DHD_ERROR_VAL) { \ + static uint64 __err_ts = 0; \ + static uint32 __err_cnt = 0; \ + uint64 __cur_ts = 0; \ + __cur_ts = OSL_SYSUPTIME_US(); \ + if (__err_ts == 0 || (__cur_ts > __err_ts && \ + (__cur_ts - __err_ts > PRINT_RATE_LIMIT_PERIOD))) { \ + __err_ts = __cur_ts; \ + DHD_ERROR(args); \ + DHD_ERROR(("[Repeats %u times]\n", __err_cnt)); \ + __err_cnt = 0; \ + } else { \ + ++__err_cnt; \ + } \ + } \ +} while (0) + +/* even in non-BCMDBG builds, logging of dongle iovars should be available */ +#define DHD_DNGL_IOVAR_SET(args) \ + do {if (dhd_msg_level & DHD_DNGL_IOVAR_SET_VAL) printf args;} while (0) + +#define DHD_LOG(args) + +#define DHD_BLOG(cp, size) + +#define DHD_NONE(args) +extern int dhd_msg_level; + +/* Defines msg bits */ +#include + +#endif /* _dhd_dbg_ */ diff --git a/bcmdhd.100.10.315.x/dhd_dbg_ring.c b/bcmdhd.100.10.315.x/dhd_dbg_ring.c new file mode 100644 index 0000000..6d0553f --- /dev/null +++ b/bcmdhd.100.10.315.x/dhd_dbg_ring.c @@ -0,0 +1,409 @@ +/* + * DHD debug ring API and structures + * + * <> + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: dhd_dbg_ring.c 769272 2018-06-25 09:23:27Z $ + */ +#include +#include +#include +#include +#include +#include +#include +#include + +int +dhd_dbg_ring_init(dhd_pub_t *dhdp, dhd_dbg_ring_t *ring, uint16 id, uint8 *name, + uint32 ring_sz, void *allocd_buf) +{ + void *buf; + unsigned long flags = 0; + + if (allocd_buf == NULL) { + return BCME_NOMEM; + } else { + buf = allocd_buf; + } + + ring->lock = DHD_DBG_RING_LOCK_INIT(dhdp->osh); + if (!ring->lock) + return BCME_NOMEM; + + DHD_DBG_RING_LOCK(ring->lock, flags); + ring->id = id; + strncpy(ring->name, name, DBGRING_NAME_MAX); + ring->name[DBGRING_NAME_MAX - 1] = 0; + ring->ring_size = ring_sz; + ring->wp = ring->rp = 0; + ring->ring_buf = buf; + ring->threshold = DBGRING_FLUSH_THRESHOLD(ring); + ring->state = RING_SUSPEND; + ring->rem_len = 0; + ring->sched_pull = TRUE; + DHD_DBG_RING_UNLOCK(ring->lock, flags); + + return BCME_OK; +} + +void +dhd_dbg_ring_deinit(dhd_pub_t *dhdp, dhd_dbg_ring_t *ring) +{ + unsigned long flags = 0; + DHD_DBG_RING_LOCK(ring->lock, flags); + ring->id = 0; + ring->name[0] = 0; + ring->wp = ring->rp = 0; + memset(&ring->stat, 0, sizeof(ring->stat)); + ring->threshold = 0; + ring->state = RING_STOP; + DHD_DBG_RING_UNLOCK(ring->lock, flags); + + DHD_DBG_RING_LOCK_DEINIT(dhdp->osh, ring->lock); +} + +void +dhd_dbg_ring_sched_pull(dhd_dbg_ring_t *ring, uint32 pending_len, + os_pullreq_t pull_fn, void *os_pvt, const int id) +{ + unsigned long flags = 0; + DHD_DBG_RING_LOCK(ring->lock, flags); + /* if the current pending size is bigger than threshold and + * threshold is set + */ + if (ring->threshold > 0 && + (pending_len >= ring->threshold) && ring->sched_pull) { + /* + * Update the state and release the lock before calling + * the pull_fn. Do not transfer control to other layers + * with locks held. If the call back again calls into + * the same layer fro this context, can lead to deadlock. + */ + ring->sched_pull = FALSE; + DHD_DBG_RING_UNLOCK(ring->lock, flags); + pull_fn(os_pvt, id); + } else { + DHD_DBG_RING_UNLOCK(ring->lock, flags); + } +} + +uint32 +dhd_dbg_ring_get_pending_len(dhd_dbg_ring_t *ring) +{ + uint32 pending_len = 0; + unsigned long flags = 0; + DHD_DBG_RING_LOCK(ring->lock, flags); + if (ring->stat.written_bytes > ring->stat.read_bytes) { + pending_len = ring->stat.written_bytes - ring->stat.read_bytes; + } else if (ring->stat.written_bytes < ring->stat.read_bytes) { + pending_len = PENDING_LEN_MAX - ring->stat.read_bytes + ring->stat.written_bytes; + } else { + pending_len = 0; + } + DHD_DBG_RING_UNLOCK(ring->lock, flags); + + return pending_len; +} + +int +dhd_dbg_ring_push(dhd_dbg_ring_t *ring, dhd_dbg_ring_entry_t *hdr, void *data) +{ + unsigned long flags; + uint32 w_len; + uint32 avail_size; + dhd_dbg_ring_entry_t *w_entry, *r_entry; + + if (!ring || !hdr || !data) { + return BCME_BADARG; + } + + DHD_DBG_RING_LOCK(ring->lock, flags); + + if (ring->state != RING_ACTIVE) { + DHD_DBG_RING_UNLOCK(ring->lock, flags); + return BCME_OK; + } + + w_len = ENTRY_LENGTH(hdr); + + DHD_DBGIF(("%s: RING%d[%s] hdr->len=%u, w_len=%u, wp=%d, rp=%d, ring_start=0x%p;" + " ring_size=%u\n", + __FUNCTION__, ring->id, ring->name, hdr->len, w_len, ring->wp, ring->rp, + ring->ring_buf, ring->ring_size)); + + if (w_len > ring->ring_size) { + DHD_DBG_RING_UNLOCK(ring->lock, flags); + DHD_ERROR(("%s: RING%d[%s] w_len=%u, ring_size=%u," + " write size exceeds ring size !\n", + __FUNCTION__, ring->id, ring->name, w_len, ring->ring_size)); + return BCME_BUFTOOLONG; + } + /* Claim the space */ + do { + avail_size = DBG_RING_CHECK_WRITE_SPACE(ring->rp, ring->wp, ring->ring_size); + if (avail_size <= w_len) { + /* Prepare the space */ + if (ring->rp <= ring->wp) { + ring->tail_padded = TRUE; + ring->rem_len = ring->ring_size - ring->wp; + DHD_DBGIF(("%s: RING%d[%s] Insuffient tail space," + " rp=%d, wp=%d, rem_len=%d, ring_size=%d," + " avail_size=%d, w_len=%d\n", __FUNCTION__, + ring->id, ring->name, ring->rp, ring->wp, + ring->rem_len, ring->ring_size, avail_size, + w_len)); + + /* 0 pad insufficient tail space */ + memset((uint8 *)ring->ring_buf + ring->wp, 0, ring->rem_len); + /* If read pointer is still at the beginning, make some room */ + if (ring->rp == 0) { + r_entry = (dhd_dbg_ring_entry_t *)((uint8 *)ring->ring_buf + + ring->rp); + ring->rp += ENTRY_LENGTH(r_entry); + ring->stat.read_bytes += ENTRY_LENGTH(r_entry); + DHD_DBGIF(("%s: rp at 0, move by one entry length" + " (%u bytes)\n", + __FUNCTION__, (uint32)ENTRY_LENGTH(r_entry))); + } + if (ring->rp == ring->wp) { + ring->rp = 0; + } + ring->wp = 0; + DHD_DBGIF(("%s: new rp=%u, wp=%u\n", + __FUNCTION__, ring->rp, ring->wp)); + } else { + /* Not enough space for new entry, free some up */ + r_entry = (dhd_dbg_ring_entry_t *)((uint8 *)ring->ring_buf + + ring->rp); + /* check bounds before incrementing read ptr */ + if (ring->rp + ENTRY_LENGTH(r_entry) >= ring->ring_size) { + DHD_ERROR(("%s: RING%d[%s] rp points out of boundary," + "ring->wp=%u, ring->rp=%u, ring->ring_size=%d\n", + __FUNCTION__, ring->id, ring->name, ring->wp, + ring->rp, ring->ring_size)); + ASSERT(0); + DHD_DBG_RING_UNLOCK(ring->lock, flags); + return BCME_BUFTOOSHORT; + } + ring->rp += ENTRY_LENGTH(r_entry); + /* skip padding if there is one */ + if (ring->tail_padded && + ((ring->rp + ring->rem_len) == ring->ring_size)) { + DHD_DBGIF(("%s: RING%d[%s] Found padding," + " avail_size=%d, w_len=%d, set rp=0\n", + __FUNCTION__, ring->id, ring->name, + avail_size, w_len)); + ring->rp = 0; + ring->tail_padded = FALSE; + ring->rem_len = 0; + } + ring->stat.read_bytes += ENTRY_LENGTH(r_entry); + DHD_DBGIF(("%s: RING%d[%s] read_bytes=%d, wp=%d, rp=%d\n", + __FUNCTION__, ring->id, ring->name, ring->stat.read_bytes, + ring->wp, ring->rp)); + } + } else { + break; + } + } while (TRUE); + + /* check before writing to the ring */ + if (ring->wp + w_len >= ring->ring_size) { + DHD_ERROR(("%s: RING%d[%s] wp pointed out of ring boundary, " + "wp=%d, ring_size=%d, w_len=%u\n", __FUNCTION__, ring->id, + ring->name, ring->wp, ring->ring_size, w_len)); + ASSERT(0); + DHD_DBG_RING_UNLOCK(ring->lock, flags); + return BCME_BUFTOOLONG; + } + + w_entry = (dhd_dbg_ring_entry_t *)((uint8 *)ring->ring_buf + ring->wp); + /* header */ + memcpy(w_entry, hdr, DBG_RING_ENTRY_SIZE); + w_entry->len = hdr->len; + /* payload */ + memcpy((char *)w_entry + DBG_RING_ENTRY_SIZE, data, w_entry->len); + /* update write pointer */ + ring->wp += w_len; + + /* update statistics */ + ring->stat.written_records++; + ring->stat.written_bytes += w_len; + DHD_DBGIF(("%s : RING%d[%s] written_records %d, written_bytes %d, read_bytes=%d," + " ring->threshold=%d, wp=%d, rp=%d\n", __FUNCTION__, ring->id, ring->name, + ring->stat.written_records, ring->stat.written_bytes, ring->stat.read_bytes, + ring->threshold, ring->wp, ring->rp)); + + DHD_DBG_RING_UNLOCK(ring->lock, flags); + return BCME_OK; +} + +int +dhd_dbg_ring_pull_single(dhd_dbg_ring_t *ring, void *data, uint32 buf_len, + bool strip_header) +{ + dhd_dbg_ring_entry_t *r_entry = NULL; + uint32 rlen = 0; + char *buf = NULL; + + if (!ring || !data || buf_len <= 0) { + return 0; + } + + /* pull from ring is allowed for inactive (suspended) ring + * in case of ecounters only, this is because, for ecounters + * when a trap occurs the ring is suspended and data is then + * pulled to dump it to a file. For other rings if ring is + * not in active state return without processing (as before) + */ +#ifndef EWP_ECNTRS_LOGGING + if (ring->state != RING_ACTIVE) { + return 0; + } +#endif /* EWP_ECNTRS_LOGGING */ + + if (ring->rp == ring->wp) { + return 0; + } + + DHD_DBGIF(("%s: RING%d[%s] buf_len=%u, wp=%d, rp=%d, ring_start=0x%p; ring_size=%u\n", + __FUNCTION__, ring->id, ring->name, buf_len, ring->wp, ring->rp, + ring->ring_buf, ring->ring_size)); + + r_entry = (dhd_dbg_ring_entry_t *)((uint8 *)ring->ring_buf + ring->rp); + + /* Boundary Check */ + rlen = ENTRY_LENGTH(r_entry); + if ((ring->rp + rlen) > ring->ring_size) { + DHD_ERROR(("%s: entry len %d is out of boundary of ring size %d," + " current ring %d[%s] - rp=%d\n", __FUNCTION__, rlen, + ring->ring_size, ring->id, ring->name, ring->rp)); + return 0; + } + + if (strip_header) { + rlen = r_entry->len; + buf = (char *)r_entry + DBG_RING_ENTRY_SIZE; + } else { + rlen = ENTRY_LENGTH(r_entry); + buf = (char *)r_entry; + } + if (rlen > buf_len) { + DHD_ERROR(("%s: buf len %d is too small for entry len %d\n", + __FUNCTION__, buf_len, rlen)); + DHD_ERROR(("%s: ring %d[%s] - ring size=%d, wp=%d, rp=%d\n", + __FUNCTION__, ring->id, ring->name, ring->ring_size, + ring->wp, ring->rp)); + ASSERT(0); + return 0; + } + + memcpy(data, buf, rlen); + /* update ring context */ + ring->rp += ENTRY_LENGTH(r_entry); + /* don't pass wp but skip padding if there is one */ + if (ring->rp != ring->wp && + ring->tail_padded && ((ring->rp + ring->rem_len) == ring->ring_size)) { + DHD_DBGIF(("%s: RING%d[%s] Found padding, rp=%d, wp=%d\n", + __FUNCTION__, ring->id, ring->name, ring->rp, ring->wp)); + ring->rp = 0; + ring->tail_padded = FALSE; + ring->rem_len = 0; + } + if (ring->rp >= ring->ring_size) { + DHD_ERROR(("%s: RING%d[%s] rp pointed out of ring boundary," + " rp=%d, ring_size=%d\n", __FUNCTION__, ring->id, + ring->name, ring->rp, ring->ring_size)); + ASSERT(0); + return 0; + } + ring->stat.read_bytes += ENTRY_LENGTH(r_entry); + DHD_DBGIF(("%s RING%d[%s]read_bytes %d, wp=%d, rp=%d\n", __FUNCTION__, + ring->id, ring->name, ring->stat.read_bytes, ring->wp, ring->rp)); + + return rlen; +} + +int +dhd_dbg_ring_pull(dhd_dbg_ring_t *ring, void *data, uint32 buf_len, bool strip_hdr) +{ + int32 r_len, total_r_len = 0; + + if (!ring || !data) + return 0; + + if (ring->state != RING_ACTIVE) + return 0; + + while (buf_len > 0) { + r_len = dhd_dbg_ring_pull_single(ring, data, buf_len, strip_hdr); + if (r_len == 0) + break; + data = (uint8 *)data + r_len; + buf_len -= r_len; + total_r_len += r_len; + } + + return total_r_len; +} + +int +dhd_dbg_ring_config(dhd_dbg_ring_t *ring, int log_level, uint32 threshold) +{ + unsigned long flags = 0; + if (!ring) + return BCME_BADADDR; + + if (ring->state == RING_STOP) + return BCME_UNSUPPORTED; + + DHD_DBG_RING_LOCK(ring->lock, flags); + + if (log_level == 0) + ring->state = RING_SUSPEND; + else + ring->state = RING_ACTIVE; + + ring->log_level = log_level; + ring->threshold = MIN(threshold, DBGRING_FLUSH_THRESHOLD(ring)); + + DHD_DBG_RING_UNLOCK(ring->lock, flags); + + return BCME_OK; +} + +void +dhd_dbg_ring_start(dhd_dbg_ring_t *ring) +{ + if (!ring) + return; + + /* Initialize the information for the ring */ + ring->state = RING_SUSPEND; + ring->log_level = 0; + ring->rp = ring->wp = 0; + ring->threshold = 0; + memset(&ring->stat, 0, sizeof(struct ring_statistics)); + memset(ring->ring_buf, 0, ring->ring_size); +} diff --git a/bcmdhd.100.10.315.x/dhd_dbg_ring.h b/bcmdhd.100.10.315.x/dhd_dbg_ring.h new file mode 100644 index 0000000..c1d3041 --- /dev/null +++ b/bcmdhd.100.10.315.x/dhd_dbg_ring.h @@ -0,0 +1,137 @@ +/* + * DHD debug ring header file + * + * <> + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: dhd_dbg_ring.h 754183 2018-03-26 11:15:16Z $ + */ + +#ifndef __DHD_DBG_RING_H__ +#define __DHD_DBG_RING_H__ + +#include + +#define PACKED_STRUCT __attribute__ ((packed)) + +#define DBGRING_NAME_MAX 32 + +enum dbg_ring_state { + RING_STOP = 0, /* ring is not initialized */ + RING_ACTIVE, /* ring is live and logging */ + RING_SUSPEND /* ring is initialized but not logging */ +}; + +/* each entry in dbg ring has below header, to handle + * variable length records in ring + */ +typedef struct dhd_dbg_ring_entry { + uint16 len; /* payload length excluding the header */ + uint8 flags; + uint8 type; /* Per ring specific */ + uint64 timestamp; /* present if has_timestamp bit is set. */ +} PACKED_STRUCT dhd_dbg_ring_entry_t; + +struct ring_statistics { + /* number of bytes that was written to the buffer by driver */ + uint32 written_bytes; + /* number of bytes that was read from the buffer by user land */ + uint32 read_bytes; + /* number of records that was written to the buffer by driver */ + uint32 written_records; +}; + +typedef struct dhd_dbg_ring_status { + uint8 name[DBGRING_NAME_MAX]; + uint32 flags; + int ring_id; /* unique integer representing the ring */ + /* total memory size allocated for the buffer */ + uint32 ring_buffer_byte_size; + uint32 verbose_level; + /* number of bytes that was written to the buffer by driver */ + uint32 written_bytes; + /* number of bytes that was read from the buffer by user land */ + uint32 read_bytes; + /* number of records that was read from the buffer by user land */ + uint32 written_records; +} dhd_dbg_ring_status_t; + +typedef struct dhd_dbg_ring { + int id; /* ring id */ + uint8 name[DBGRING_NAME_MAX]; /* name string */ + uint32 ring_size; /* numbers of item in ring */ + uint32 wp; /* write pointer */ + uint32 rp; /* read pointer */ + uint32 log_level; /* log_level */ + uint32 threshold; /* threshold bytes */ + void * ring_buf; /* pointer of actually ring buffer */ + void * lock; /* lock for ring access */ + struct ring_statistics stat; /* statistics */ + enum dbg_ring_state state; /* ring state enum */ + bool tail_padded; /* writer does not have enough space */ + uint32 rem_len; /* number of bytes from wp_pad to end */ + bool sched_pull; /* schedule reader immediately */ +} dhd_dbg_ring_t; + +#define DBGRING_FLUSH_THRESHOLD(ring) (ring->ring_size / 3) +#define RING_STAT_TO_STATUS(ring, status) \ + do { \ + strncpy(status.name, ring->name, \ + sizeof(status.name) - 1); \ + status.ring_id = ring->id; \ + status.ring_buffer_byte_size = ring->ring_size; \ + status.written_bytes = ring->stat.written_bytes; \ + status.written_records = ring->stat.written_records; \ + status.read_bytes = ring->stat.read_bytes; \ + status.verbose_level = ring->log_level; \ + } while (0) + +#define DBG_RING_ENTRY_SIZE (sizeof(dhd_dbg_ring_entry_t)) +#define ENTRY_LENGTH(hdr) ((hdr)->len + DBG_RING_ENTRY_SIZE) +#define PAYLOAD_MAX_LEN 65535 +#define PAYLOAD_ECNTR_MAX_LEN 1648u +#define PENDING_LEN_MAX 0xFFFFFFFF +#define DBG_RING_STATUS_SIZE (sizeof(dhd_dbg_ring_status_t)) + +#define TXACTIVESZ(r, w, d) (((r) <= (w)) ? ((w) - (r)) : ((d) - (r) + (w))) +#define DBG_RING_READ_AVAIL_SPACE(w, r, d) (((w) >= (r)) ? ((w) - (r)) : ((d) - (r))) +#define DBG_RING_WRITE_SPACE_AVAIL_CONT(r, w, d) (((w) >= (r)) ? ((d) - (w)) : ((r) - (w))) +#define DBG_RING_WRITE_SPACE_AVAIL(r, w, d) (d - (TXACTIVESZ(r, w, d))) +#define DBG_RING_CHECK_WRITE_SPACE(r, w, d) \ + MIN(DBG_RING_WRITE_SPACE_AVAIL(r, w, d), DBG_RING_WRITE_SPACE_AVAIL_CONT(r, w, d)) + +typedef void (*os_pullreq_t)(void *os_priv, const int ring_id); + +int dhd_dbg_ring_init(dhd_pub_t *dhdp, dhd_dbg_ring_t *ring, uint16 id, uint8 *name, + uint32 ring_sz, void *allocd_buf); +void dhd_dbg_ring_deinit(dhd_pub_t *dhdp, dhd_dbg_ring_t *ring); +int dhd_dbg_ring_push(dhd_dbg_ring_t *ring, dhd_dbg_ring_entry_t *hdr, void *data); +int dhd_dbg_ring_pull(dhd_dbg_ring_t *ring, void *data, uint32 buf_len, + bool strip_hdr); +int dhd_dbg_ring_pull_single(dhd_dbg_ring_t *ring, void *data, uint32 buf_len, + bool strip_header); +uint32 dhd_dbg_ring_get_pending_len(dhd_dbg_ring_t *ring); +void dhd_dbg_ring_sched_pull(dhd_dbg_ring_t *ring, uint32 pending_len, + os_pullreq_t pull_fn, void *os_pvt, const int id); +int dhd_dbg_ring_config(dhd_dbg_ring_t *ring, int log_level, uint32 threshold); +void dhd_dbg_ring_start(dhd_dbg_ring_t *ring); +#endif /* __DHD_DBG_RING_H__ */ diff --git a/bcmdhd.100.10.315.x/dhd_debug.c b/bcmdhd.100.10.315.x/dhd_debug.c new file mode 100644 index 0000000..8ea1ead --- /dev/null +++ b/bcmdhd.100.10.315.x/dhd_debug.c @@ -0,0 +1,2092 @@ +/* + * DHD debugability support + * + * <> + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: dhd_debug.c 771435 2018-07-10 05:35:24Z $ + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#define DHD_PKT_INFO DHD_ERROR +struct map_table { + uint16 fw_id; + uint16 host_id; + char *desc; +}; + +struct map_table event_map[] = { + {WLC_E_AUTH, WIFI_EVENT_AUTH_COMPLETE, "AUTH_COMPLETE"}, + {WLC_E_ASSOC, WIFI_EVENT_ASSOC_COMPLETE, "ASSOC_COMPLETE"}, + {TRACE_FW_AUTH_STARTED, WIFI_EVENT_FW_AUTH_STARTED, "AUTH STARTED"}, + {TRACE_FW_ASSOC_STARTED, WIFI_EVENT_FW_ASSOC_STARTED, "ASSOC STARTED"}, + {TRACE_FW_RE_ASSOC_STARTED, WIFI_EVENT_FW_RE_ASSOC_STARTED, "REASSOC STARTED"}, + {TRACE_G_SCAN_STARTED, WIFI_EVENT_G_SCAN_STARTED, "GSCAN STARTED"}, + {WLC_E_PFN_SCAN_COMPLETE, WIFI_EVENT_G_SCAN_COMPLETE, "GSCAN COMPLETE"}, + {WLC_E_DISASSOC, WIFI_EVENT_DISASSOCIATION_REQUESTED, "DIASSOC REQUESTED"}, + {WLC_E_REASSOC, WIFI_EVENT_RE_ASSOCIATION_REQUESTED, "REASSOC REQUESTED"}, + {TRACE_ROAM_SCAN_STARTED, WIFI_EVENT_ROAM_REQUESTED, "ROAM REQUESTED"}, + {WLC_E_BEACON_FRAME_RX, WIFI_EVENT_BEACON_RECEIVED, "BEACON Received"}, + {TRACE_ROAM_SCAN_STARTED, WIFI_EVENT_ROAM_SCAN_STARTED, "ROAM SCAN STARTED"}, + {TRACE_ROAM_SCAN_COMPLETE, WIFI_EVENT_ROAM_SCAN_COMPLETE, "ROAM SCAN COMPLETED"}, + {TRACE_ROAM_AUTH_STARTED, WIFI_EVENT_ROAM_AUTH_STARTED, "ROAM AUTH STARTED"}, + {WLC_E_AUTH, WIFI_EVENT_ROAM_AUTH_COMPLETE, "ROAM AUTH COMPLETED"}, + {TRACE_FW_RE_ASSOC_STARTED, WIFI_EVENT_ROAM_ASSOC_STARTED, "ROAM ASSOC STARTED"}, + {WLC_E_ASSOC, WIFI_EVENT_ROAM_ASSOC_COMPLETE, "ROAM ASSOC COMPLETED"}, + {TRACE_ROAM_SCAN_COMPLETE, WIFI_EVENT_ROAM_SCAN_COMPLETE, "ROAM SCAN COMPLETED"}, + {TRACE_BT_COEX_BT_SCO_START, WIFI_EVENT_BT_COEX_BT_SCO_START, "BT SCO START"}, + {TRACE_BT_COEX_BT_SCO_STOP, WIFI_EVENT_BT_COEX_BT_SCO_STOP, "BT SCO STOP"}, + {TRACE_BT_COEX_BT_SCAN_START, WIFI_EVENT_BT_COEX_BT_SCAN_START, "BT COEX SCAN START"}, + {TRACE_BT_COEX_BT_SCAN_STOP, WIFI_EVENT_BT_COEX_BT_SCAN_STOP, "BT COEX SCAN STOP"}, + {TRACE_BT_COEX_BT_HID_START, WIFI_EVENT_BT_COEX_BT_HID_START, "BT HID START"}, + {TRACE_BT_COEX_BT_HID_STOP, WIFI_EVENT_BT_COEX_BT_HID_STOP, "BT HID STOP"}, + {WLC_E_EAPOL_MSG, WIFI_EVENT_FW_EAPOL_FRAME_RECEIVED, "FW EAPOL PKT RECEIVED"}, + {TRACE_FW_EAPOL_FRAME_TRANSMIT_START, WIFI_EVENT_FW_EAPOL_FRAME_TRANSMIT_START, + "FW EAPOL PKT TRANSMITED"}, + {TRACE_FW_EAPOL_FRAME_TRANSMIT_STOP, WIFI_EVENT_FW_EAPOL_FRAME_TRANSMIT_STOP, + "FW EAPOL PKT TX STOPPED"}, + {TRACE_BLOCK_ACK_NEGOTIATION_COMPLETE, WIFI_EVENT_BLOCK_ACK_NEGOTIATION_COMPLETE, + "BLOCK ACK NEGO COMPLETED"}, +}; + +struct map_table event_tag_map[] = { + {TRACE_TAG_VENDOR_SPECIFIC, WIFI_TAG_VENDOR_SPECIFIC, "VENDOR SPECIFIC DATA"}, + {TRACE_TAG_BSSID, WIFI_TAG_BSSID, "BSSID"}, + {TRACE_TAG_ADDR, WIFI_TAG_ADDR, "ADDR_0"}, + {TRACE_TAG_SSID, WIFI_TAG_SSID, "SSID"}, + {TRACE_TAG_STATUS, WIFI_TAG_STATUS, "STATUS"}, + {TRACE_TAG_CHANNEL_SPEC, WIFI_TAG_CHANNEL_SPEC, "CHANSPEC"}, + {TRACE_TAG_WAKE_LOCK_EVENT, WIFI_TAG_WAKE_LOCK_EVENT, "WAKELOCK EVENT"}, + {TRACE_TAG_ADDR1, WIFI_TAG_ADDR1, "ADDR_1"}, + {TRACE_TAG_ADDR2, WIFI_TAG_ADDR2, "ADDR_2"}, + {TRACE_TAG_ADDR3, WIFI_TAG_ADDR3, "ADDR_3"}, + {TRACE_TAG_ADDR4, WIFI_TAG_ADDR4, "ADDR_4"}, + {TRACE_TAG_TSF, WIFI_TAG_TSF, "TSF"}, + {TRACE_TAG_IE, WIFI_TAG_IE, "802.11 IE"}, + {TRACE_TAG_INTERFACE, WIFI_TAG_INTERFACE, "INTERFACE"}, + {TRACE_TAG_REASON_CODE, WIFI_TAG_REASON_CODE, "REASON CODE"}, + {TRACE_TAG_RATE_MBPS, WIFI_TAG_RATE_MBPS, "RATE"}, +}; + +/* define log level per ring type */ +struct log_level_table fw_verbose_level_map[] = { + {1, EVENT_LOG_TAG_PCI_ERROR, "PCI_ERROR"}, + {1, EVENT_LOG_TAG_PCI_WARN, "PCI_WARN"}, + {2, EVENT_LOG_TAG_PCI_INFO, "PCI_INFO"}, + {3, EVENT_LOG_TAG_PCI_DBG, "PCI_DEBUG"}, + {3, EVENT_LOG_TAG_BEACON_LOG, "BEACON_LOG"}, + {2, EVENT_LOG_TAG_WL_ASSOC_LOG, "ASSOC_LOG"}, + {2, EVENT_LOG_TAG_WL_ROAM_LOG, "ROAM_LOG"}, + {1, EVENT_LOG_TAG_TRACE_WL_INFO, "WL INFO"}, + {1, EVENT_LOG_TAG_TRACE_BTCOEX_INFO, "BTCOEX INFO"}, + {1, EVENT_LOG_TAG_SCAN_WARN, "SCAN_WARN"}, + {1, EVENT_LOG_TAG_SCAN_ERROR, "SCAN_ERROR"}, + {2, EVENT_LOG_TAG_SCAN_TRACE_LOW, "SCAN_TRACE_LOW"}, + {2, EVENT_LOG_TAG_SCAN_TRACE_HIGH, "SCAN_TRACE_HIGH"} +}; + +/* reference tab table */ +uint ref_tag_tbl[EVENT_LOG_TAG_MAX + 1] = {0}; + +typedef struct dhddbg_loglist_item { + dll_t list; + prcd_event_log_hdr_t prcd_log_hdr; +} loglist_item_t; + +typedef struct dhbdbg_pending_item { + dll_t list; + dhd_dbg_ring_status_t ring_status; + dhd_dbg_ring_entry_t *ring_entry; +} pending_item_t; + +/* trace log entry header user space processing */ +struct tracelog_header { + int magic_num; + int buf_size; + int seq_num; +}; +#define TRACE_LOG_MAGIC_NUMBER 0xEAE47C06 + +int +dhd_dbg_push_to_ring(dhd_pub_t *dhdp, int ring_id, dhd_dbg_ring_entry_t *hdr, void *data) +{ + dhd_dbg_ring_t *ring; + int ret = 0; + uint32 pending_len = 0; + + if (!dhdp || !dhdp->dbg) { + return BCME_BADADDR; + } + + if (!VALID_RING(ring_id)) { + DHD_ERROR(("%s : invalid ring_id : %d\n", __FUNCTION__, ring_id)); + return BCME_RANGE; + } + + ring = &dhdp->dbg->dbg_rings[ring_id]; + + ret = dhd_dbg_ring_push(ring, hdr, data); + if (ret != BCME_OK) + return ret; + + pending_len = dhd_dbg_ring_get_pending_len(ring); + dhd_dbg_ring_sched_pull(ring, pending_len, dhdp->dbg->pullreq, + dhdp->dbg->private, ring->id); + + return ret; +} + +int +dhd_dbg_pull_single_from_ring(dhd_pub_t *dhdp, int ring_id, void *data, uint32 buf_len, + bool strip_header) +{ + dhd_dbg_ring_t *ring; + + if (!dhdp || !dhdp->dbg) { + return 0; + } + + if (!VALID_RING(ring_id)) { + DHD_ERROR(("%s : invalid ring_id : %d\n", __FUNCTION__, ring_id)); + return BCME_RANGE; + } + + ring = &dhdp->dbg->dbg_rings[ring_id]; + + return dhd_dbg_ring_pull_single(ring, data, buf_len, strip_header); +} + +int +dhd_dbg_pull_from_ring(dhd_pub_t *dhdp, int ring_id, void *data, uint32 buf_len) +{ + dhd_dbg_ring_t *ring; + + if (!dhdp || !dhdp->dbg) + return 0; + if (!VALID_RING(ring_id)) { + DHD_ERROR(("%s : invalid ring_id : %d\n", __FUNCTION__, ring_id)); + return BCME_RANGE; + } + ring = &dhdp->dbg->dbg_rings[ring_id]; + return dhd_dbg_ring_pull(ring, data, buf_len, FALSE); +} + +static int +dhd_dbg_msgtrace_seqchk(uint32 *prev, uint32 cur) +{ + /* normal case including wrap around */ + if ((cur == 0 && *prev == 0xFFFFFFFF) || ((cur - *prev) == 1)) { + goto done; + } else if (cur == *prev) { + DHD_EVENT(("%s duplicate trace\n", __FUNCTION__)); + return -1; + } else if (cur > *prev) { + DHD_EVENT(("%s lost %d packets\n", __FUNCTION__, cur - *prev)); + } else { + DHD_EVENT(("%s seq out of order, dhd %d, dongle %d\n", + __FUNCTION__, *prev, cur)); + } +done: + *prev = cur; + return 0; +} + +#ifndef MACOSX_DHD +static void +dhd_dbg_msgtrace_msg_parser(void *event_data) +{ + msgtrace_hdr_t *hdr; + char *data, *s; + static uint32 seqnum_prev = 0; + + if (!event_data) { + DHD_ERROR(("%s: event_data is NULL\n", __FUNCTION__)); + return; + } + + hdr = (msgtrace_hdr_t *)event_data; + data = (char *)event_data + MSGTRACE_HDRLEN; + + /* There are 2 bytes available at the end of data */ + data[ntoh16(hdr->len)] = '\0'; + + if (ntoh32(hdr->discarded_bytes) || ntoh32(hdr->discarded_printf)) { + DHD_DBGIF(("WLC_E_TRACE: [Discarded traces in dongle -->" + "discarded_bytes %d discarded_printf %d]\n", + ntoh32(hdr->discarded_bytes), + ntoh32(hdr->discarded_printf))); + } + + if (dhd_dbg_msgtrace_seqchk(&seqnum_prev, ntoh32(hdr->seqnum))) + return; + + /* Display the trace buffer. Advance from + * \n to \n to avoid display big + * printf (issue with Linux printk ) + */ + while (*data != '\0' && (s = strstr(data, "\n")) != NULL) { + *s = '\0'; + DHD_FWLOG(("[FWLOG] %s\n", data)); + data = s+1; + } + if (*data) + DHD_FWLOG(("[FWLOG] %s", data)); +} +#endif /* MACOSX_DHD */ +#ifdef SHOW_LOGTRACE +#define DATA_UNIT_FOR_LOG_CNT 4 + +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif // endif + +int +replace_percent_p_to_x(char *fmt) +{ + int p_to_x_done = FALSE; + + while (*fmt != '\0') + { + /* Skip characters will we see a % */ + if (*fmt++ != '%') + { + continue; + } + + /* + * Skip any flags, field width and precision: + *Flags: Followed by % + * #, 0, -, ' ', + + */ + if (*fmt == '#') + fmt++; + + if (*fmt == '0' || *fmt == '-' || *fmt == '+') + fmt++; + + /* + * Field width: + * An optional decimal digit string (with non-zero first digit) + * specifying a minimum field width + */ + while (*fmt && bcm_isdigit(*fmt)) + fmt++; + + /* + * Precision: + * An optional precision, in the form of a period ('.') followed by an + * optional decimal digit string. + */ + if (*fmt == '.') + { + fmt++; + while (*fmt && bcm_isdigit(*fmt)) fmt++; + } + + /* If %p is seen, change it to %x */ + if (*fmt == 'p') + { + *fmt = 'x'; + p_to_x_done = TRUE; + } + if (*fmt) + fmt++; + } + + return p_to_x_done; +} + +/* To identify format of types %Ns where N >= 0 is a number */ +bool +check_valid_string_format(char *curr_ptr) +{ + char *next_ptr; + if ((next_ptr = bcmstrstr(curr_ptr, "s")) != NULL) { + /* Default %s format */ + if (curr_ptr == next_ptr) { + return TRUE; + } + + /* Verify each charater between '%' and 's' is a valid number */ + while (curr_ptr < next_ptr) { + if (bcm_isdigit(*curr_ptr) == FALSE) { + return FALSE; + } + curr_ptr++; + } + + return TRUE; + } else { + return FALSE; + } +} + +#define MAX_NO_OF_ARG 16 +#define FMTSTR_SIZE 132 +#define ROMSTR_SIZE 200 +#define SIZE_LOC_STR 50 +#define LOG_PRINT_CNT_MAX 16u +#define EL_PARSE_VER "V02" +#define EL_MSEC_PER_SEC 1000 + +bool +dhd_dbg_process_event_log_hdr(event_log_hdr_t *log_hdr, prcd_event_log_hdr_t *prcd_log_hdr) +{ + event_log_extended_hdr_t *ext_log_hdr; + uint16 event_log_fmt_num; + uint8 event_log_hdr_type; + + /* Identify the type of event tag, payload type etc.. */ + event_log_hdr_type = log_hdr->fmt_num & DHD_EVENT_LOG_HDR_MASK; + event_log_fmt_num = (log_hdr->fmt_num >> DHD_EVENT_LOG_FMT_NUM_OFFSET) & + DHD_EVENT_LOG_FMT_NUM_MASK; + + switch (event_log_hdr_type) { + case DHD_OW_NB_EVENT_LOG_HDR: + prcd_log_hdr->ext_event_log_hdr = FALSE; + prcd_log_hdr->binary_payload = FALSE; + break; + case DHD_TW_NB_EVENT_LOG_HDR: + prcd_log_hdr->ext_event_log_hdr = TRUE; + prcd_log_hdr->binary_payload = FALSE; + break; + case DHD_BI_EVENT_LOG_HDR: + if (event_log_fmt_num == DHD_OW_BI_EVENT_FMT_NUM) { + prcd_log_hdr->ext_event_log_hdr = FALSE; + prcd_log_hdr->binary_payload = TRUE; + } else if (event_log_fmt_num == DHD_TW_BI_EVENT_FMT_NUM) { + prcd_log_hdr->ext_event_log_hdr = TRUE; + prcd_log_hdr->binary_payload = TRUE; + } else { + DHD_ERROR(("%s: invalid format number 0x%X\n", + __FUNCTION__, event_log_fmt_num)); + return FALSE; + } + break; + case DHD_INVALID_EVENT_LOG_HDR: + default: + DHD_ERROR(("%s: invalid event log header type 0x%X\n", + __FUNCTION__, event_log_hdr_type)); + return FALSE; + } + + /* Parse extended and legacy event log headers and populate prcd_event_log_hdr_t */ + if (prcd_log_hdr->ext_event_log_hdr) { + ext_log_hdr = (event_log_extended_hdr_t *) + ((uint8 *)log_hdr - sizeof(event_log_hdr_t)); + prcd_log_hdr->tag = ((ext_log_hdr->extended_tag & + DHD_TW_VALID_TAG_BITS_MASK) << DHD_TW_EVENT_LOG_TAG_OFFSET) | log_hdr->tag; + } else { + prcd_log_hdr->tag = log_hdr->tag; + } + prcd_log_hdr->count = log_hdr->count; + prcd_log_hdr->fmt_num_raw = log_hdr->fmt_num; + prcd_log_hdr->fmt_num = event_log_fmt_num; + + /* update arm cycle */ + /* + * For loegacy event tag :- + * |payload........|Timestamp| Tag + * + * For extended event tag:- + * |payload........|Timestamp|extended Tag| Tag. + * + */ + prcd_log_hdr->armcycle = prcd_log_hdr->ext_event_log_hdr ? + *(uint32 *)log_hdr - EVENT_TAG_TIMESTAMP_EXT_OFFSET : + *(uint32 *)log_hdr - EVENT_TAG_TIMESTAMP_OFFSET; + + /* update event log data pointer address */ + prcd_log_hdr->log_ptr = + (uint32 *)log_hdr - log_hdr->count - prcd_log_hdr->ext_event_log_hdr; + + /* handle error cases above this */ + return TRUE; +} + +static void +dhd_dbg_verboselog_handler(dhd_pub_t *dhdp, prcd_event_log_hdr_t *plog_hdr, + void *raw_event_ptr, uint32 logset, uint16 block, uint32* data) +{ + event_log_hdr_t *ts_hdr; + uint32 *log_ptr = plog_hdr->log_ptr; + char fmtstr_loc_buf[ROMSTR_SIZE] = { 0 }; + uint32 rom_str_len = 0; + uint32 *ts_data; + + if (!raw_event_ptr) { + return; + } + + if (log_ptr < data) { + DHD_ERROR(("Invalid log pointer, logptr : %p data : %p \n", log_ptr, data)); + return; + } + + BCM_REFERENCE(ts_hdr); + BCM_REFERENCE(ts_data); + + if (log_ptr > data) { + /* Get time stamp if it's updated */ + ts_hdr = (event_log_hdr_t *)((char *)log_ptr - sizeof(event_log_hdr_t)); + if (ts_hdr->tag == EVENT_LOG_TAG_TS) { + ts_data = (uint32 *)ts_hdr - ts_hdr->count; + if (ts_data >= data) { + DHD_MSGTRACE_LOG(("EVENT_LOG_TS[0x%08x]: SYS:%08x CPU:%08x\n", + ts_data[ts_hdr->count - 1], ts_data[0], ts_data[1])); + } + } + } + + if (plog_hdr->tag == EVENT_LOG_TAG_ROM_PRINTF) { + rom_str_len = (plog_hdr->count - 1) * sizeof(uint32); + if (rom_str_len >= (ROMSTR_SIZE -1)) + rom_str_len = ROMSTR_SIZE - 1; + + /* copy all ascii data for ROM printf to local string */ + memcpy(fmtstr_loc_buf, log_ptr, rom_str_len); + /* add end of line at last */ + fmtstr_loc_buf[rom_str_len] = '\0'; + + DHD_MSGTRACE_LOG(("EVENT_LOG_ROM[0x%08x]: %s", + log_ptr[plog_hdr->count - 1], fmtstr_loc_buf)); + + /* Add newline if missing */ + if (fmtstr_loc_buf[strlen(fmtstr_loc_buf) - 1] != '\n') + DHD_MSGTRACE_LOG(("\n")); + + return; + } + + if (plog_hdr->tag == EVENT_LOG_TAG_MSCHPROFILE || + plog_hdr->tag == EVENT_LOG_TAG_MSCHPROFILE_TLV) { + wl_mschdbg_verboselog_handler(dhdp, raw_event_ptr, plog_hdr, log_ptr); + return; + } + + /* print the message out in a logprint */ + dhd_dbg_verboselog_printf(dhdp, plog_hdr, raw_event_ptr, log_ptr, logset, block); +} + +void +dhd_dbg_verboselog_printf(dhd_pub_t *dhdp, prcd_event_log_hdr_t *plog_hdr, + void *raw_event_ptr, uint32 *log_ptr, uint32 logset, uint16 block) +{ + dhd_event_log_t *raw_event = (dhd_event_log_t *)raw_event_ptr; + uint16 count; + int log_level, id; + char fmtstr_loc_buf[ROMSTR_SIZE] = { 0 }; + char (*str_buf)[SIZE_LOC_STR] = NULL; + char *str_tmpptr = NULL; + uint32 addr = 0; + typedef union { + uint32 val; + char * addr; + } u_arg; + u_arg arg[MAX_NO_OF_ARG] = {{0}}; + char *c_ptr = NULL; + struct bcmstrbuf b; + + BCM_REFERENCE(arg); + + /* print the message out in a logprint */ + if (!(raw_event->fmts)) { + if (dhdp->dbg) { + log_level = dhdp->dbg->dbg_rings[FW_VERBOSE_RING_ID].log_level; + for (id = 0; id < ARRAYSIZE(fw_verbose_level_map); id++) { + if ((fw_verbose_level_map[id].tag == plog_hdr->tag) && + (fw_verbose_level_map[id].log_level > log_level)) + return; + } + } + + if (plog_hdr->binary_payload) { + DHD_ECNTR_LOG(("%06d.%03d EL:tag=%d len=%d fmt=0x%x", + (uint32)(log_ptr[plog_hdr->count - 1] / EL_MSEC_PER_SEC), + (uint32)(log_ptr[plog_hdr->count - 1] % EL_MSEC_PER_SEC), + plog_hdr->tag, + plog_hdr->count, + plog_hdr->fmt_num)); + + for (count = 0; count < (plog_hdr->count - 1); count++) { + if (count && (count % LOG_PRINT_CNT_MAX == 0)) { + DHD_ECNTR_LOG(("\n\t%08x", log_ptr[count])); + } else { + DHD_ECNTR_LOG((" %08x", log_ptr[count])); + } + } + DHD_ECNTR_LOG(("\n")); + } + else { + bcm_binit(&b, fmtstr_loc_buf, FMTSTR_SIZE); + bcm_bprintf(&b, "%06d.%03d EL:%s:%u:%u %d %d 0x%x", + (uint32)(log_ptr[plog_hdr->count - 1] / EL_MSEC_PER_SEC), + (uint32)(log_ptr[plog_hdr->count - 1] % EL_MSEC_PER_SEC), + EL_PARSE_VER, logset, block, + plog_hdr->tag, + plog_hdr->count, + plog_hdr->fmt_num); + for (count = 0; count < (plog_hdr->count - 1); count++) { + bcm_bprintf(&b, " %x", log_ptr[count]); + } + + /* ensure preserve fw logs go to debug_dump only in case of customer4 */ + if (logset < WL_MAX_PRESERVE_BUFFER && + ((0x01u << logset) & dhdp->logset_prsrv_mask)) { + DHD_PRSRV_MEM(("%s\n", b.origbuf)); + } else { + DHD_EVENT(("%s\n", b.origbuf)); + } + } + return; + } + + str_buf = MALLOCZ(dhdp->osh, (MAX_NO_OF_ARG * SIZE_LOC_STR)); + if (!str_buf) { + DHD_ERROR(("%s: malloc failed str_buf\n", __FUNCTION__)); + return; + } + + if ((plog_hdr->fmt_num) < raw_event->num_fmts) { + if (plog_hdr->tag == EVENT_LOG_TAG_MSCHPROFILE) { + snprintf(fmtstr_loc_buf, FMTSTR_SIZE, "%s", + raw_event->fmts[plog_hdr->fmt_num]); + plog_hdr->count++; + } else { + snprintf(fmtstr_loc_buf, FMTSTR_SIZE, "CONSOLE_E:%u:%u %06d.%03d %s", + logset, block, + (uint32)(log_ptr[plog_hdr->count - 1] / EL_MSEC_PER_SEC), + (uint32)(log_ptr[plog_hdr->count - 1] % EL_MSEC_PER_SEC), + raw_event->fmts[plog_hdr->fmt_num]); + } + c_ptr = fmtstr_loc_buf; + } else { + /* for ecounters, don't print the error as it will flood */ + if ((plog_hdr->fmt_num != DHD_OW_BI_EVENT_FMT_NUM) && + (plog_hdr->fmt_num != DHD_TW_BI_EVENT_FMT_NUM)) { + DHD_ERROR(("%s: fmt number: 0x%x out of range\n", + __FUNCTION__, plog_hdr->fmt_num)); + } else { + DHD_INFO(("%s: fmt number: 0x%x out of range\n", + __FUNCTION__, plog_hdr->fmt_num)); + } + + goto exit; + } + + if (plog_hdr->count > MAX_NO_OF_ARG) { + DHD_ERROR(("%s: plog_hdr->count(%d) out of range\n", + __FUNCTION__, plog_hdr->count)); + goto exit; + } + + /* print the format string which will be needed for debugging incorrect formats */ + DHD_INFO(("%s: fmtstr_loc_buf = %s\n", __FUNCTION__, fmtstr_loc_buf)); + + /* Replace all %p to %x to handle 32 bit %p */ + replace_percent_p_to_x(fmtstr_loc_buf); + + for (count = 0; count < (plog_hdr->count - 1); count++) { + if (c_ptr != NULL) + if ((c_ptr = bcmstrstr(c_ptr, "%")) != NULL) + c_ptr++; + + if (c_ptr != NULL) { + if (check_valid_string_format(c_ptr)) { + if ((raw_event->raw_sstr) && + ((log_ptr[count] > raw_event->rodata_start) && + (log_ptr[count] < raw_event->rodata_end))) { + /* ram static string */ + addr = log_ptr[count] - raw_event->rodata_start; + str_tmpptr = raw_event->raw_sstr + addr; + memcpy(str_buf[count], str_tmpptr, + SIZE_LOC_STR); + str_buf[count][SIZE_LOC_STR-1] = '\0'; + arg[count].addr = str_buf[count]; + } else if ((raw_event->rom_raw_sstr) && + ((log_ptr[count] > + raw_event->rom_rodata_start) && + (log_ptr[count] < + raw_event->rom_rodata_end))) { + /* rom static string */ + addr = log_ptr[count] - raw_event->rom_rodata_start; + str_tmpptr = raw_event->rom_raw_sstr + addr; + memcpy(str_buf[count], str_tmpptr, + SIZE_LOC_STR); + str_buf[count][SIZE_LOC_STR-1] = '\0'; + arg[count].addr = str_buf[count]; + } else { + /* + * Dynamic string OR + * No data for static string. + * So store all string's address as string. + */ + snprintf(str_buf[count], SIZE_LOC_STR, + "(s)0x%x", log_ptr[count]); + arg[count].addr = str_buf[count]; + } + } else { + /* Other than string */ + arg[count].val = log_ptr[count]; + } + } + } + + /* ensure preserve fw logs go to debug_dump only in case of customer4 */ + if (logset < WL_MAX_PRESERVE_BUFFER && + ((0x01u << logset) & dhdp->logset_prsrv_mask)) { + DHD_PRSRV_MEM((fmtstr_loc_buf, arg[0], arg[1], arg[2], arg[3], + arg[4], arg[5], arg[6], arg[7], arg[8], arg[9], arg[10], + arg[11], arg[12], arg[13], arg[14], arg[15])); + } else { + DHD_EVENT((fmtstr_loc_buf, arg[0], arg[1], arg[2], arg[3], + arg[4], arg[5], arg[6], arg[7], arg[8], arg[9], arg[10], + arg[11], arg[12], arg[13], arg[14], arg[15])); + } + +exit: + MFREE(dhdp->osh, str_buf, (MAX_NO_OF_ARG * SIZE_LOC_STR)); +} + +void +dhd_dbg_msgtrace_log_parser(dhd_pub_t *dhdp, void *event_data, + void *raw_event_ptr, uint datalen, bool msgtrace_hdr_present, + uint32 msgtrace_seqnum) +{ + msgtrace_hdr_t *hdr; + char *data, *tmpdata; + const uint32 log_hdr_len = sizeof(event_log_hdr_t); + uint32 log_pyld_len; + static uint32 seqnum_prev = 0; + event_log_hdr_t *log_hdr; + bool msg_processed = FALSE; + prcd_event_log_hdr_t prcd_log_hdr; + prcd_event_log_hdr_t *plog_hdr; + dll_t list_head, *cur; + loglist_item_t *log_item; + dhd_dbg_ring_entry_t msg_hdr; + char *logbuf; + struct tracelog_header *logentry_header; + uint ring_data_len = 0; + bool ecntr_pushed = FALSE; + uint32 logset = 0; + uint16 block = 0; + uint min_expected_len = 0; +#if defined(EWP_ECNTRS_LOGGING) && defined(DHD_LOG_DUMP) + uint16 len_chk = 0; +#endif /* EWP_ECNTRS_LOGGING && DHD_LOG_DUMP */ + + BCM_REFERENCE(ecntr_pushed); + + if (msgtrace_hdr_present) + min_expected_len = (MSGTRACE_HDRLEN + EVENT_LOG_BLOCK_LEN); + else + min_expected_len = EVENT_LOG_BLOCK_LEN; + + /* log trace event consists of: + * msgtrace header + * event log block header + * event log payload + */ + if (!event_data || (datalen <= min_expected_len)) { + DHD_ERROR(("%s: Not processing due to invalid event_data : %p or length : %d\n", + __FUNCTION__, event_data, datalen)); + if (event_data && msgtrace_hdr_present) { + prhex("event_data dump", event_data, datalen); + tmpdata = (char *)event_data + MSGTRACE_HDRLEN; + if (tmpdata) { + DHD_ERROR(("EVENT_LOG_HDR[0x%x]: Set: 0x%08x length = %d\n", + ltoh16(*((uint16 *)(tmpdata+2))), + ltoh32(*((uint32 *)(tmpdata + 4))), + ltoh16(*((uint16 *)(tmpdata))))); + } + } else if (!event_data) { + DHD_ERROR(("%s: event_data is NULL, cannot dump prhex\n", __FUNCTION__)); + } + return; + } + + if (msgtrace_hdr_present) { + hdr = (msgtrace_hdr_t *)event_data; + data = (char *)event_data + MSGTRACE_HDRLEN; + datalen -= MSGTRACE_HDRLEN; + msgtrace_seqnum = hdr->seqnum; + } else { + data = (char *)event_data; + } + + if (dhd_dbg_msgtrace_seqchk(&seqnum_prev, ntoh32(msgtrace_seqnum))) + return; + + /* Save the whole message to event log ring */ + memset(&msg_hdr, 0, sizeof(dhd_dbg_ring_entry_t)); + logbuf = VMALLOC(dhdp->osh, sizeof(*logentry_header) + datalen); + if (logbuf == NULL) + return; + logentry_header = (struct tracelog_header *)logbuf; + logentry_header->magic_num = TRACE_LOG_MAGIC_NUMBER; + logentry_header->buf_size = datalen; + logentry_header->seq_num = msgtrace_seqnum; + msg_hdr.type = DBG_RING_ENTRY_DATA_TYPE; + + ring_data_len = datalen + sizeof(*logentry_header); + + if ((sizeof(*logentry_header) + datalen) > PAYLOAD_MAX_LEN) { + DHD_ERROR(("%s:Payload len=%u exceeds max len\n", __FUNCTION__, + ((uint)sizeof(*logentry_header) + datalen))); + goto exit; + } + + msg_hdr.len = sizeof(*logentry_header) + datalen; + memcpy(logbuf + sizeof(*logentry_header), data, datalen); + DHD_DBGIF(("%s: datalen %d %d\n", __FUNCTION__, msg_hdr.len, datalen)); + dhd_dbg_push_to_ring(dhdp, FW_VERBOSE_RING_ID, &msg_hdr, logbuf); + + /* Print sequence number, originating set and length of received + * event log buffer. Refer to event log buffer structure in + * event_log.h + */ + DHD_MSGTRACE_LOG(("EVENT_LOG_HDR[0x%x]: Set: 0x%08x length = %d\n", + ltoh16(*((uint16 *)(data+2))), ltoh32(*((uint32 *)(data + 4))), + ltoh16(*((uint16 *)(data))))); + + logset = ltoh32(*((uint32 *)(data + 4))); + + if (logset >= NUM_EVENT_LOG_SETS) { + DHD_ERROR(("%s logset: %d max: %d out of range, collect socram\n", + __FUNCTION__, logset, NUM_EVENT_LOG_SETS)); +#ifdef DHD_FW_COREDUMP + dhdp->memdump_type = DUMP_TYPE_LOGSET_BEYOND_RANGE; + dhd_bus_mem_dump(dhdp); +#endif /* DHD_FW_COREDUMP */ + } + + block = ltoh16(*((uint16 *)(data+2))); + + data += EVENT_LOG_BLOCK_HDRLEN; + datalen -= EVENT_LOG_BLOCK_HDRLEN; + + /* start parsing from the tail of packet + * Sameple format of a meessage + * 001d3c54 00000064 00000064 001d3c54 001dba08 035d6ce1 0c540639 + * 001d3c54 00000064 00000064 035d6d89 0c580439 + * 0x0c580439 -- 39 is tag, 04 is count, 580c is format number + * all these uint32 values comes in reverse order as group as EL data + * while decoding we can only parse from last to first + * |<- datalen ->| + * |----(payload and maybe more logs)----|event_log_hdr_t| + * data log_hdr + */ + dll_init(&list_head); + + while (datalen > log_hdr_len) { + log_hdr = (event_log_hdr_t *)(data + datalen - log_hdr_len); + memset(&prcd_log_hdr, 0, sizeof(prcd_log_hdr)); + if (!dhd_dbg_process_event_log_hdr(log_hdr, &prcd_log_hdr)) { + DHD_ERROR(("%s: Error while parsing event log header\n", + __FUNCTION__)); + } + + /* skip zero padding at end of frame */ + if (prcd_log_hdr.tag == EVENT_LOG_TAG_NULL) { + datalen -= log_hdr_len; + continue; + } + /* Check argument count (for non-ecounter events only), + * any event log should contain at least + * one argument (4 bytes) for arm cycle count and up to 16 + * arguments except EVENT_LOG_TAG_STATS which could use the + * whole payload of 256 words + */ + if (prcd_log_hdr.count == 0) { + break; + } + if ((prcd_log_hdr.tag != EVENT_LOG_TAG_STATS) && + (prcd_log_hdr.count > MAX_NO_OF_ARG)) { + break; + } + + log_pyld_len = (prcd_log_hdr.count + prcd_log_hdr.ext_event_log_hdr) * + DATA_UNIT_FOR_LOG_CNT; + /* log data should not cross the event data boundary */ + if ((uint32)((char *)log_hdr - data) < log_pyld_len) { + break; + } + /* skip 4 bytes time stamp packet */ + if (prcd_log_hdr.tag == EVENT_LOG_TAG_TS) { + datalen -= (log_pyld_len + log_hdr_len); + continue; + } + if (!(log_item = MALLOC(dhdp->osh, sizeof(*log_item)))) { + DHD_ERROR(("%s allocating log list item failed\n", + __FUNCTION__)); + break; + } + + log_item->prcd_log_hdr.tag = prcd_log_hdr.tag; + log_item->prcd_log_hdr.count = prcd_log_hdr.count; + log_item->prcd_log_hdr.fmt_num = prcd_log_hdr.fmt_num; + log_item->prcd_log_hdr.armcycle = prcd_log_hdr.armcycle; + log_item->prcd_log_hdr.log_ptr = prcd_log_hdr.log_ptr; + log_item->prcd_log_hdr.payload_len = prcd_log_hdr.payload_len; + log_item->prcd_log_hdr.ext_event_log_hdr = prcd_log_hdr.ext_event_log_hdr; + log_item->prcd_log_hdr.binary_payload = prcd_log_hdr.binary_payload; + + dll_insert(&log_item->list, &list_head); + datalen -= (log_pyld_len + log_hdr_len); + } + + while (!dll_empty(&list_head)) { + msg_processed = FALSE; + cur = dll_head_p(&list_head); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif // endif + log_item = (loglist_item_t *)container_of(cur, loglist_item_t, list); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif // endif + + plog_hdr = &log_item->prcd_log_hdr; + +#if defined(EWP_ECNTRS_LOGGING) && defined(DHD_LOG_DUMP) + if ((plog_hdr->tag == EVENT_LOG_TAG_ECOUNTERS_TIME_DATA) || + ((plog_hdr->tag == EVENT_LOG_TAG_STATS) && + (plog_hdr->binary_payload))) { + if (!ecntr_pushed && dhd_log_dump_ecntr_enabled()) { + /* + * check msg hdr len before pushing. + * FW msg_hdr.len includes length of event log hdr, + * logentry header and payload. + */ + len_chk = (sizeof(*logentry_header) + sizeof(*log_hdr) + + PAYLOAD_ECNTR_MAX_LEN); + /* account extended event log header(extended_event_log_hdr) */ + if (plog_hdr->ext_event_log_hdr) { + len_chk += sizeof(*log_hdr); + } + if (msg_hdr.len > len_chk) { + DHD_ERROR(("%s: EVENT_LOG_VALIDATION_FAILS: " + "msg_hdr.len=%u, max allowed for ecntrs=%u\n", + __FUNCTION__, msg_hdr.len, len_chk)); + goto exit; + } + dhd_dbg_ring_push(dhdp->ecntr_dbg_ring, &msg_hdr, logbuf); + ecntr_pushed = TRUE; + } + } +#endif /* EWP_ECNTRS_LOGGING && DHD_LOG_DUMP */ + + if (!msg_processed) { + dhd_dbg_verboselog_handler(dhdp, plog_hdr, raw_event_ptr, + logset, block, (uint32 *)data); + } + dll_delete(cur); + MFREE(dhdp->osh, log_item, sizeof(*log_item)); + + } + BCM_REFERENCE(log_hdr); + +exit: + while (!dll_empty(&list_head)) { + cur = dll_head_p(&list_head); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif // endif + log_item = (loglist_item_t *)container_of(cur, loglist_item_t, list); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif // endif + dll_delete(cur); + MFREE(dhdp->osh, log_item, sizeof(*log_item)); + } + VMFREE(dhdp->osh, logbuf, ring_data_len); +} +#else /* !SHOW_LOGTRACE */ +static INLINE void dhd_dbg_verboselog_handler(dhd_pub_t *dhdp, + prcd_event_log_hdr_t *plog_hdr, void *raw_event_ptr, uint32 logset, uint16 block, + uint32 *data) {}; +INLINE void dhd_dbg_msgtrace_log_parser(dhd_pub_t *dhdp, + void *event_data, void *raw_event_ptr, uint datalen, + bool msgtrace_hdr_present, uint32 msgtrace_seqnum) {}; +#endif /* SHOW_LOGTRACE */ +#ifndef MACOSX_DHD +void +dhd_dbg_trace_evnt_handler(dhd_pub_t *dhdp, void *event_data, + void *raw_event_ptr, uint datalen) +{ + msgtrace_hdr_t *hdr; + + hdr = (msgtrace_hdr_t *)event_data; + + if (hdr->version != MSGTRACE_VERSION) { + DHD_DBGIF(("%s unsupported MSGTRACE version, dhd %d, dongle %d\n", + __FUNCTION__, MSGTRACE_VERSION, hdr->version)); + return; + } + + if (hdr->trace_type == MSGTRACE_HDR_TYPE_MSG) + dhd_dbg_msgtrace_msg_parser(event_data); + else if (hdr->trace_type == MSGTRACE_HDR_TYPE_LOG) + dhd_dbg_msgtrace_log_parser(dhdp, event_data, raw_event_ptr, datalen, + TRUE, 0); +} + +#endif /* MACOSX_DHD */ + +/* + * dhd_dbg_set_event_log_tag : modify the state of an event log tag + */ +void +dhd_dbg_set_event_log_tag(dhd_pub_t *dhdp, uint16 tag, uint8 set) +{ + wl_el_tag_params_t pars; + char *cmd = "event_log_tag_control"; + char iovbuf[WLC_IOCTL_SMLEN] = { 0 }; + int ret; + + memset(&pars, 0, sizeof(pars)); + pars.tag = tag; + pars.set = set; + pars.flags = EVENT_LOG_TAG_FLAG_LOG; + + if (!bcm_mkiovar(cmd, (char *)&pars, sizeof(pars), iovbuf, sizeof(iovbuf))) { + DHD_ERROR(("%s mkiovar failed\n", __FUNCTION__)); + return; + } + + ret = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); + if (ret) { + DHD_ERROR(("%s set log tag iovar failed %d\n", __FUNCTION__, ret)); + } +} + +int +dhd_dbg_set_configuration(dhd_pub_t *dhdp, int ring_id, int log_level, int flags, uint32 threshold) +{ + dhd_dbg_ring_t *ring; + uint8 set = 1; + int i, array_len = 0; + struct log_level_table *log_level_tbl = NULL; + + if (!dhdp || !dhdp->dbg) + return BCME_BADADDR; + + if (!VALID_RING(ring_id)) { + DHD_ERROR(("%s : invalid ring_id : %d\n", __FUNCTION__, ring_id)); + return BCME_RANGE; + } + + ring = &dhdp->dbg->dbg_rings[ring_id]; + dhd_dbg_ring_config(ring, log_level, threshold); + + if (log_level > 0) + set = TRUE; + + if (ring->id == FW_VERBOSE_RING_ID) { + log_level_tbl = fw_verbose_level_map; + array_len = ARRAYSIZE(fw_verbose_level_map); + } + + for (i = 0; i < array_len; i++) { + if (log_level == 0 || (log_level_tbl[i].log_level > log_level)) { + /* clear the reference per ring */ + ref_tag_tbl[log_level_tbl[i].tag] &= ~(1 << ring_id); + } else { + /* set the reference per ring */ + ref_tag_tbl[log_level_tbl[i].tag] |= (1 << ring_id); + } + set = (ref_tag_tbl[log_level_tbl[i].tag])? 1 : 0; + DHD_DBGIF(("%s TAG(%s) is %s for the ring(%s)\n", __FUNCTION__, + log_level_tbl[i].desc, (set)? "SET" : "CLEAR", ring->name)); + dhd_dbg_set_event_log_tag(dhdp, log_level_tbl[i].tag, set); + } + return BCME_OK; +} + +/* +* dhd_dbg_get_ring_status : get the ring status from the coresponding ring buffer +* Return: An error code or 0 on success. +*/ + +int +dhd_dbg_get_ring_status(dhd_pub_t *dhdp, int ring_id, dhd_dbg_ring_status_t *dbg_ring_status) +{ + int ret = BCME_OK; + int id = 0; + dhd_dbg_t *dbg; + dhd_dbg_ring_t *dbg_ring; + dhd_dbg_ring_status_t ring_status; + if (!dhdp || !dhdp->dbg) + return BCME_BADADDR; + dbg = dhdp->dbg; + + memset(&ring_status, 0, sizeof(dhd_dbg_ring_status_t)); + for (id = DEBUG_RING_ID_INVALID + 1; id < DEBUG_RING_ID_MAX; id++) { + dbg_ring = &dbg->dbg_rings[id]; + if (VALID_RING(dbg_ring->id) && (dbg_ring->id == ring_id)) { + RING_STAT_TO_STATUS(dbg_ring, ring_status); + *dbg_ring_status = ring_status; + break; + } + } + if (!VALID_RING(id)) { + DHD_ERROR(("%s : cannot find the ring_id : %d\n", __FUNCTION__, ring_id)); + ret = BCME_NOTFOUND; + } + return ret; +} + +/* +* dhd_dbg_find_ring_id : return ring_id based on ring_name +* Return: An invalid ring id for failure or valid ring id on success. +*/ + +int +dhd_dbg_find_ring_id(dhd_pub_t *dhdp, char *ring_name) +{ + int id; + dhd_dbg_t *dbg; + dhd_dbg_ring_t *ring; + + if (!dhdp || !dhdp->dbg) + return BCME_BADADDR; + + dbg = dhdp->dbg; + for (id = DEBUG_RING_ID_INVALID + 1; id < DEBUG_RING_ID_MAX; id++) { + ring = &dbg->dbg_rings[id]; + if (!strncmp((char *)ring->name, ring_name, sizeof(ring->name) - 1)) + break; + } + return id; +} + +/* +* dhd_dbg_get_priv : get the private data of dhd dbugability module +* Return : An NULL on failure or valid data address +*/ +void * +dhd_dbg_get_priv(dhd_pub_t *dhdp) +{ + if (!dhdp || !dhdp->dbg) + return NULL; + return dhdp->dbg->private; +} + +/* +* dhd_dbg_start : start and stop All of Ring buffers +* Return: An error code or 0 on success. +*/ +int +dhd_dbg_start(dhd_pub_t *dhdp, bool start) +{ + int ret = BCME_OK; + int ring_id; + dhd_dbg_t *dbg; + dhd_dbg_ring_t *dbg_ring; + if (!dhdp) + return BCME_BADARG; + dbg = dhdp->dbg; + + for (ring_id = DEBUG_RING_ID_INVALID + 1; ring_id < DEBUG_RING_ID_MAX; ring_id++) { + dbg_ring = &dbg->dbg_rings[ring_id]; + if (!start) { + if (VALID_RING(dbg_ring->id)) { + dhd_dbg_ring_start(dbg_ring); + } + } + } + return ret; +} + +/* + * dhd_dbg_send_urgent_evt: send the health check evt to Upper layer + * + * Return: An error code or 0 on success. + */ + +int +dhd_dbg_send_urgent_evt(dhd_pub_t *dhdp, const void *data, const uint32 len) +{ + dhd_dbg_t *dbg; + int ret = BCME_OK; + if (!dhdp || !dhdp->dbg) + return BCME_BADADDR; + + dbg = dhdp->dbg; + if (dbg->urgent_notifier) { + dbg->urgent_notifier(dhdp, data, len); + } + return ret; +} + +#if defined(DBG_PKT_MON) +uint32 +__dhd_dbg_pkt_hash(uintptr_t pkt, uint32 pktid) +{ + uint32 __pkt; + uint32 __pktid; + + __pkt = ((int)pkt) >= 0 ? (2 * pkt) : (-2 * pkt - 1); + __pktid = ((int)pktid) >= 0 ? (2 * pktid) : (-2 * pktid - 1); + + return (__pkt >= __pktid ? (__pkt * __pkt + __pkt + __pktid) : + (__pkt + __pktid * __pktid)); +} + +#define __TIMESPEC_TO_US(ts) \ + (((uint32)(ts).tv_sec * USEC_PER_SEC) + ((ts).tv_nsec / NSEC_PER_USEC)) + +uint32 +__dhd_dbg_driver_ts_usec(void) +{ + struct timespec ts; + + get_monotonic_boottime(&ts); + return ((uint32)(__TIMESPEC_TO_US(ts))); +} + +wifi_tx_packet_fate +__dhd_dbg_map_tx_status_to_pkt_fate(uint16 status) +{ + wifi_tx_packet_fate pkt_fate; + + switch (status) { + case WLFC_CTL_PKTFLAG_DISCARD: + pkt_fate = TX_PKT_FATE_ACKED; + break; + case WLFC_CTL_PKTFLAG_D11SUPPRESS: + /* intensional fall through */ + case WLFC_CTL_PKTFLAG_WLSUPPRESS: + pkt_fate = TX_PKT_FATE_FW_QUEUED; + break; + case WLFC_CTL_PKTFLAG_TOSSED_BYWLC: + pkt_fate = TX_PKT_FATE_FW_DROP_INVALID; + break; + case WLFC_CTL_PKTFLAG_DISCARD_NOACK: + pkt_fate = TX_PKT_FATE_SENT; + break; + case WLFC_CTL_PKTFLAG_EXPIRED: + pkt_fate = TX_PKT_FATE_FW_DROP_EXPTIME; + break; + case WLFC_CTL_PKTFLAG_MKTFREE: + pkt_fate = TX_PKT_FATE_FW_PKT_FREE; + break; + default: + pkt_fate = TX_PKT_FATE_FW_DROP_OTHER; + break; + } + + return pkt_fate; +} +#endif // endif + +#ifdef DBG_PKT_MON +static int +__dhd_dbg_free_tx_pkts(dhd_pub_t *dhdp, dhd_dbg_tx_info_t *tx_pkts, + uint16 pkt_count) +{ + uint16 count; + + DHD_PKT_INFO(("%s, %d\n", __FUNCTION__, __LINE__)); + count = 0; + while ((count < pkt_count) && tx_pkts) { + if (tx_pkts->info.pkt) { + PKTFREE(dhdp->osh, tx_pkts->info.pkt, TRUE); + } + tx_pkts++; + count++; + } + + return BCME_OK; +} + +static int +__dhd_dbg_free_rx_pkts(dhd_pub_t *dhdp, dhd_dbg_rx_info_t *rx_pkts, + uint16 pkt_count) +{ + uint16 count; + + DHD_PKT_INFO(("%s, %d\n", __FUNCTION__, __LINE__)); + count = 0; + while ((count < pkt_count) && rx_pkts) { + if (rx_pkts->info.pkt) { + PKTFREE(dhdp->osh, rx_pkts->info.pkt, TRUE); + } + rx_pkts++; + count++; + } + + return BCME_OK; +} + +void +__dhd_dbg_dump_pkt_info(dhd_pub_t *dhdp, dhd_dbg_pkt_info_t *info) +{ + if (DHD_PKT_MON_DUMP_ON()) { + DHD_PKT_MON(("payload type = %d\n", info->payload_type)); + DHD_PKT_MON(("driver ts = %u\n", info->driver_ts)); + DHD_PKT_MON(("firmware ts = %u\n", info->firmware_ts)); + DHD_PKT_MON(("packet hash = %u\n", info->pkt_hash)); + DHD_PKT_MON(("packet length = %zu\n", info->pkt_len)); + DHD_PKT_MON(("packet address = %p\n", info->pkt)); + DHD_PKT_MON(("packet data = \n")); + if (DHD_PKT_MON_ON()) { + prhex(NULL, PKTDATA(dhdp->osh, info->pkt), info->pkt_len); + } + } +} + +void +__dhd_dbg_dump_tx_pkt_info(dhd_pub_t *dhdp, dhd_dbg_tx_info_t *tx_pkt, + uint16 count) +{ + if (DHD_PKT_MON_DUMP_ON()) { + DHD_PKT_MON(("\nTX (count: %d)\n", ++count)); + DHD_PKT_MON(("packet fate = %d\n", tx_pkt->fate)); + __dhd_dbg_dump_pkt_info(dhdp, &tx_pkt->info); + } +} + +void +__dhd_dbg_dump_rx_pkt_info(dhd_pub_t *dhdp, dhd_dbg_rx_info_t *rx_pkt, + uint16 count) +{ + if (DHD_PKT_MON_DUMP_ON()) { + DHD_PKT_MON(("\nRX (count: %d)\n", ++count)); + DHD_PKT_MON(("packet fate = %d\n", rx_pkt->fate)); + __dhd_dbg_dump_pkt_info(dhdp, &rx_pkt->info); + } +} + +int +dhd_dbg_attach_pkt_monitor(dhd_pub_t *dhdp, + dbg_mon_tx_pkts_t tx_pkt_mon, + dbg_mon_tx_status_t tx_status_mon, + dbg_mon_rx_pkts_t rx_pkt_mon) +{ + + dhd_dbg_tx_report_t *tx_report = NULL; + dhd_dbg_rx_report_t *rx_report = NULL; + dhd_dbg_tx_info_t *tx_pkts = NULL; + dhd_dbg_rx_info_t *rx_pkts = NULL; + dhd_dbg_pkt_mon_state_t tx_pkt_state; + dhd_dbg_pkt_mon_state_t tx_status_state; + dhd_dbg_pkt_mon_state_t rx_pkt_state; + uint32 alloc_len; + int ret = BCME_OK; + unsigned long flags; + + DHD_PKT_INFO(("%s, %d\n", __FUNCTION__, __LINE__)); + if (!dhdp || !dhdp->dbg) { + DHD_PKT_MON(("%s(): dhdp=%p, dhdp->dbg=%p\n", __FUNCTION__, + dhdp, (dhdp ? dhdp->dbg : NULL))); + return -EINVAL; + } + + DHD_PKT_MON_LOCK(dhdp->dbg->pkt_mon_lock, flags); + tx_pkt_state = dhdp->dbg->pkt_mon.tx_pkt_state; + tx_status_state = dhdp->dbg->pkt_mon.tx_pkt_state; + rx_pkt_state = dhdp->dbg->pkt_mon.rx_pkt_state; + + if (PKT_MON_ATTACHED(tx_pkt_state) || PKT_MON_ATTACHED(tx_status_state) || + PKT_MON_ATTACHED(rx_pkt_state)) { + DHD_PKT_MON(("%s(): packet monitor is already attached, " + "tx_pkt_state=%d, tx_status_state=%d, rx_pkt_state=%d\n", + __FUNCTION__, tx_pkt_state, tx_status_state, rx_pkt_state)); + DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags); + /* return success as the intention was to initialize packet monitor */ + return BCME_OK; + } + + /* allocate and initialize tx packet monitoring */ + alloc_len = sizeof(*tx_report); + tx_report = (dhd_dbg_tx_report_t *)MALLOCZ(dhdp->osh, alloc_len); + if (unlikely(!tx_report)) { + DHD_ERROR(("%s(): could not allocate memory for - " + "dhd_dbg_tx_report_t\n", __FUNCTION__)); + ret = -ENOMEM; + goto fail; + } + + alloc_len = (sizeof(*tx_pkts) * MAX_FATE_LOG_LEN); + tx_pkts = (dhd_dbg_tx_info_t *)MALLOCZ(dhdp->osh, alloc_len); + if (unlikely(!tx_pkts)) { + DHD_ERROR(("%s(): could not allocate memory for - " + "dhd_dbg_tx_info_t\n", __FUNCTION__)); + ret = -ENOMEM; + goto fail; + } + dhdp->dbg->pkt_mon.tx_report = tx_report; + dhdp->dbg->pkt_mon.tx_report->tx_pkts = tx_pkts; + dhdp->dbg->pkt_mon.tx_pkt_mon = tx_pkt_mon; + dhdp->dbg->pkt_mon.tx_status_mon = tx_status_mon; + dhdp->dbg->pkt_mon.tx_pkt_state = PKT_MON_ATTACHED; + dhdp->dbg->pkt_mon.tx_status_state = PKT_MON_ATTACHED; + + /* allocate and initialze rx packet monitoring */ + alloc_len = sizeof(*rx_report); + rx_report = (dhd_dbg_rx_report_t *)MALLOCZ(dhdp->osh, alloc_len); + if (unlikely(!rx_report)) { + DHD_ERROR(("%s(): could not allocate memory for - " + "dhd_dbg_rx_report_t\n", __FUNCTION__)); + ret = -ENOMEM; + goto fail; + } + + alloc_len = (sizeof(*rx_pkts) * MAX_FATE_LOG_LEN); + rx_pkts = (dhd_dbg_rx_info_t *)MALLOCZ(dhdp->osh, alloc_len); + if (unlikely(!rx_pkts)) { + DHD_ERROR(("%s(): could not allocate memory for - " + "dhd_dbg_rx_info_t\n", __FUNCTION__)); + ret = -ENOMEM; + goto fail; + } + dhdp->dbg->pkt_mon.rx_report = rx_report; + dhdp->dbg->pkt_mon.rx_report->rx_pkts = rx_pkts; + dhdp->dbg->pkt_mon.rx_pkt_mon = rx_pkt_mon; + dhdp->dbg->pkt_mon.rx_pkt_state = PKT_MON_ATTACHED; + + DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags); + DHD_PKT_MON(("%s(): packet monitor attach succeeded\n", __FUNCTION__)); + return ret; + +fail: + /* tx packet monitoring */ + if (tx_pkts) { + alloc_len = (sizeof(*tx_pkts) * MAX_FATE_LOG_LEN); + MFREE(dhdp->osh, tx_pkts, alloc_len); + } + if (tx_report) { + alloc_len = sizeof(*tx_report); + MFREE(dhdp->osh, tx_report, alloc_len); + } + dhdp->dbg->pkt_mon.tx_report = NULL; + dhdp->dbg->pkt_mon.tx_report->tx_pkts = NULL; + dhdp->dbg->pkt_mon.tx_pkt_mon = NULL; + dhdp->dbg->pkt_mon.tx_status_mon = NULL; + dhdp->dbg->pkt_mon.tx_pkt_state = PKT_MON_DETACHED; + dhdp->dbg->pkt_mon.tx_status_state = PKT_MON_DETACHED; + + /* rx packet monitoring */ + if (rx_pkts) { + alloc_len = (sizeof(*rx_pkts) * MAX_FATE_LOG_LEN); + MFREE(dhdp->osh, rx_pkts, alloc_len); + } + if (rx_report) { + alloc_len = sizeof(*rx_report); + MFREE(dhdp->osh, rx_report, alloc_len); + } + dhdp->dbg->pkt_mon.rx_report = NULL; + dhdp->dbg->pkt_mon.rx_report->rx_pkts = NULL; + dhdp->dbg->pkt_mon.rx_pkt_mon = NULL; + dhdp->dbg->pkt_mon.rx_pkt_state = PKT_MON_DETACHED; + + DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags); + DHD_ERROR(("%s(): packet monitor attach failed\n", __FUNCTION__)); + return ret; +} + +int +dhd_dbg_start_pkt_monitor(dhd_pub_t *dhdp) +{ + dhd_dbg_tx_report_t *tx_report; + dhd_dbg_rx_report_t *rx_report; + dhd_dbg_pkt_mon_state_t tx_pkt_state; + dhd_dbg_pkt_mon_state_t tx_status_state; + dhd_dbg_pkt_mon_state_t rx_pkt_state; + unsigned long flags; + + DHD_PKT_INFO(("%s, %d\n", __FUNCTION__, __LINE__)); + if (!dhdp || !dhdp->dbg) { + DHD_PKT_MON(("%s(): dhdp=%p, dhdp->dbg=%p\n", __FUNCTION__, + dhdp, (dhdp ? dhdp->dbg : NULL))); + return -EINVAL; + } + + DHD_PKT_MON_LOCK(dhdp->dbg->pkt_mon_lock, flags); + tx_pkt_state = dhdp->dbg->pkt_mon.tx_pkt_state; + tx_status_state = dhdp->dbg->pkt_mon.tx_status_state; + rx_pkt_state = dhdp->dbg->pkt_mon.rx_pkt_state; + + if (PKT_MON_DETACHED(tx_pkt_state) || PKT_MON_DETACHED(tx_status_state) || + PKT_MON_DETACHED(rx_pkt_state)) { + DHD_PKT_MON(("%s(): packet monitor is not yet enabled, " + "tx_pkt_state=%d, tx_status_state=%d, rx_pkt_state=%d\n", + __FUNCTION__, tx_pkt_state, tx_status_state, rx_pkt_state)); + DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags); + return -EINVAL; + } + + dhdp->dbg->pkt_mon.tx_pkt_state = PKT_MON_STARTING; + dhdp->dbg->pkt_mon.tx_status_state = PKT_MON_STARTING; + dhdp->dbg->pkt_mon.rx_pkt_state = PKT_MON_STARTING; + + tx_report = dhdp->dbg->pkt_mon.tx_report; + rx_report = dhdp->dbg->pkt_mon.rx_report; + if (!tx_report || !rx_report) { + DHD_PKT_MON(("%s(): tx_report=%p, rx_report=%p\n", + __FUNCTION__, tx_report, rx_report)); + DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags); + return -EINVAL; + } + + tx_pkt_state = dhdp->dbg->pkt_mon.tx_pkt_state; + tx_status_state = dhdp->dbg->pkt_mon.tx_status_state; + rx_pkt_state = dhdp->dbg->pkt_mon.rx_pkt_state; + + /* Safe to free packets as state pkt_state is STARTING */ + __dhd_dbg_free_tx_pkts(dhdp, tx_report->tx_pkts, tx_report->pkt_pos); + + __dhd_dbg_free_rx_pkts(dhdp, rx_report->rx_pkts, rx_report->pkt_pos); + + /* reset array postion */ + tx_report->pkt_pos = 0; + tx_report->status_pos = 0; + dhdp->dbg->pkt_mon.tx_pkt_state = PKT_MON_STARTED; + dhdp->dbg->pkt_mon.tx_status_state = PKT_MON_STARTED; + + rx_report->pkt_pos = 0; + dhdp->dbg->pkt_mon.rx_pkt_state = PKT_MON_STARTED; + DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags); + + DHD_PKT_MON(("%s(): packet monitor started\n", __FUNCTION__)); + return BCME_OK; +} + +int +dhd_dbg_monitor_tx_pkts(dhd_pub_t *dhdp, void *pkt, uint32 pktid) +{ + dhd_dbg_tx_report_t *tx_report; + dhd_dbg_tx_info_t *tx_pkts; + dhd_dbg_pkt_mon_state_t tx_pkt_state; + uint32 pkt_hash, driver_ts; + uint16 pkt_pos; + unsigned long flags; + + if (!dhdp || !dhdp->dbg) { + DHD_PKT_MON(("%s(): dhdp=%p, dhdp->dbg=%p\n", __FUNCTION__, + dhdp, (dhdp ? dhdp->dbg : NULL))); + return -EINVAL; + } + + DHD_PKT_MON_LOCK(dhdp->dbg->pkt_mon_lock, flags); + tx_pkt_state = dhdp->dbg->pkt_mon.tx_pkt_state; + if (PKT_MON_STARTED(tx_pkt_state)) { + tx_report = dhdp->dbg->pkt_mon.tx_report; + pkt_pos = tx_report->pkt_pos; + + if (!PKT_MON_PKT_FULL(pkt_pos)) { + tx_pkts = tx_report->tx_pkts; + pkt_hash = __dhd_dbg_pkt_hash((uintptr_t)pkt, pktid); + driver_ts = __dhd_dbg_driver_ts_usec(); + + tx_pkts[pkt_pos].info.pkt = PKTDUP(dhdp->osh, pkt); + tx_pkts[pkt_pos].info.pkt_len = PKTLEN(dhdp->osh, pkt); + tx_pkts[pkt_pos].info.pkt_hash = pkt_hash; + tx_pkts[pkt_pos].info.driver_ts = driver_ts; + tx_pkts[pkt_pos].info.firmware_ts = 0U; + tx_pkts[pkt_pos].info.payload_type = FRAME_TYPE_ETHERNET_II; + tx_pkts[pkt_pos].fate = TX_PKT_FATE_DRV_QUEUED; + + tx_report->pkt_pos++; + } else { + dhdp->dbg->pkt_mon.tx_pkt_state = PKT_MON_STOPPED; + DHD_PKT_MON(("%s(): tx pkt logging stopped, reached " + "max limit\n", __FUNCTION__)); + } + } + + DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags); + return BCME_OK; +} + +int +dhd_dbg_monitor_tx_status(dhd_pub_t *dhdp, void *pkt, uint32 pktid, + uint16 status) +{ + dhd_dbg_tx_report_t *tx_report; + dhd_dbg_tx_info_t *tx_pkt; + dhd_dbg_pkt_mon_state_t tx_status_state; + wifi_tx_packet_fate pkt_fate; + uint32 pkt_hash, temp_hash; + uint16 pkt_pos, status_pos; + int16 count; + bool found = FALSE; + unsigned long flags; + + if (!dhdp || !dhdp->dbg) { + DHD_PKT_MON(("%s(): dhdp=%p, dhdp->dbg=%p\n", __FUNCTION__, + dhdp, (dhdp ? dhdp->dbg : NULL))); + return -EINVAL; + } + + DHD_PKT_MON_LOCK(dhdp->dbg->pkt_mon_lock, flags); + tx_status_state = dhdp->dbg->pkt_mon.tx_status_state; + if (PKT_MON_STARTED(tx_status_state)) { + tx_report = dhdp->dbg->pkt_mon.tx_report; + pkt_pos = tx_report->pkt_pos; + status_pos = tx_report->status_pos; + + if (!PKT_MON_STATUS_FULL(pkt_pos, status_pos)) { + pkt_hash = __dhd_dbg_pkt_hash((uintptr_t)pkt, pktid); + pkt_fate = __dhd_dbg_map_tx_status_to_pkt_fate(status); + + /* best bet (in-order tx completion) */ + count = status_pos; + tx_pkt = (((dhd_dbg_tx_info_t *)tx_report->tx_pkts) + status_pos); + while ((count < pkt_pos) && tx_pkt) { + temp_hash = tx_pkt->info.pkt_hash; + if (temp_hash == pkt_hash) { + tx_pkt->fate = pkt_fate; + tx_report->status_pos++; + found = TRUE; + break; + } + tx_pkt++; + count++; + } + + /* search until beginning (handles out-of-order completion) */ + if (!found) { + count = status_pos - 1; + tx_pkt = (((dhd_dbg_tx_info_t *)tx_report->tx_pkts) + count); + while ((count >= 0) && tx_pkt) { + temp_hash = tx_pkt->info.pkt_hash; + if (temp_hash == pkt_hash) { + tx_pkt->fate = pkt_fate; + tx_report->status_pos++; + found = TRUE; + break; + } + tx_pkt--; + count--; + } + + if (!found) { + /* still couldn't match tx_status */ + DHD_ERROR(("%s(): couldn't match tx_status, pkt_pos=%u, " + "status_pos=%u, pkt_fate=%u\n", __FUNCTION__, + pkt_pos, status_pos, pkt_fate)); + } + } + } else { + dhdp->dbg->pkt_mon.tx_status_state = PKT_MON_STOPPED; + DHD_PKT_MON(("%s(): tx_status logging stopped, reached " + "max limit\n", __FUNCTION__)); + } + } + + DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags); + return BCME_OK; +} + +int +dhd_dbg_monitor_rx_pkts(dhd_pub_t *dhdp, void *pkt) +{ + dhd_dbg_rx_report_t *rx_report; + dhd_dbg_rx_info_t *rx_pkts; + dhd_dbg_pkt_mon_state_t rx_pkt_state; + uint32 driver_ts; + uint16 pkt_pos; + unsigned long flags; + + if (!dhdp || !dhdp->dbg) { + DHD_PKT_MON(("%s(): dhdp=%p, dhdp->dbg=%p\n", __FUNCTION__, + dhdp, (dhdp ? dhdp->dbg : NULL))); + return -EINVAL; + } + + DHD_PKT_MON_LOCK(dhdp->dbg->pkt_mon_lock, flags); + rx_pkt_state = dhdp->dbg->pkt_mon.rx_pkt_state; + if (PKT_MON_STARTED(rx_pkt_state)) { + rx_report = dhdp->dbg->pkt_mon.rx_report; + pkt_pos = rx_report->pkt_pos; + + if (!PKT_MON_PKT_FULL(pkt_pos)) { + rx_pkts = rx_report->rx_pkts; + driver_ts = __dhd_dbg_driver_ts_usec(); + + rx_pkts[pkt_pos].info.pkt = PKTDUP(dhdp->osh, pkt); + rx_pkts[pkt_pos].info.pkt_len = PKTLEN(dhdp->osh, pkt); + rx_pkts[pkt_pos].info.pkt_hash = 0U; + rx_pkts[pkt_pos].info.driver_ts = driver_ts; + rx_pkts[pkt_pos].info.firmware_ts = 0U; + rx_pkts[pkt_pos].info.payload_type = FRAME_TYPE_ETHERNET_II; + rx_pkts[pkt_pos].fate = RX_PKT_FATE_SUCCESS; + + rx_report->pkt_pos++; + } else { + dhdp->dbg->pkt_mon.rx_pkt_state = PKT_MON_STOPPED; + DHD_PKT_MON(("%s(): rx pkt logging stopped, reached " + "max limit\n", __FUNCTION__)); + } + } + + DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags); + return BCME_OK; +} + +int +dhd_dbg_stop_pkt_monitor(dhd_pub_t *dhdp) +{ + dhd_dbg_pkt_mon_state_t tx_pkt_state; + dhd_dbg_pkt_mon_state_t tx_status_state; + dhd_dbg_pkt_mon_state_t rx_pkt_state; + unsigned long flags; + + DHD_PKT_INFO(("%s, %d\n", __FUNCTION__, __LINE__)); + if (!dhdp || !dhdp->dbg) { + DHD_PKT_MON(("%s(): dhdp=%p, dhdp->dbg=%p\n", __FUNCTION__, + dhdp, (dhdp ? dhdp->dbg : NULL))); + return -EINVAL; + } + + DHD_PKT_MON_LOCK(dhdp->dbg->pkt_mon_lock, flags); + tx_pkt_state = dhdp->dbg->pkt_mon.tx_pkt_state; + tx_status_state = dhdp->dbg->pkt_mon.tx_status_state; + rx_pkt_state = dhdp->dbg->pkt_mon.rx_pkt_state; + + if (PKT_MON_DETACHED(tx_pkt_state) || PKT_MON_DETACHED(tx_status_state) || + PKT_MON_DETACHED(rx_pkt_state)) { + DHD_PKT_MON(("%s(): packet monitor is not yet enabled, " + "tx_pkt_state=%d, tx_status_state=%d, rx_pkt_state=%d\n", + __FUNCTION__, tx_pkt_state, tx_status_state, rx_pkt_state)); + DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags); + return -EINVAL; + } + dhdp->dbg->pkt_mon.tx_pkt_state = PKT_MON_STOPPED; + dhdp->dbg->pkt_mon.tx_status_state = PKT_MON_STOPPED; + dhdp->dbg->pkt_mon.rx_pkt_state = PKT_MON_STOPPED; + DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags); + + DHD_PKT_MON(("%s(): packet monitor stopped\n", __FUNCTION__)); + return BCME_OK; +} + +#define __COPY_TO_USER(to, from, n) \ + do { \ + int __ret; \ + __ret = copy_to_user((void __user *)(to), (void *)(from), \ + (unsigned long)(n)); \ + if (unlikely(__ret)) { \ + DHD_ERROR(("%s():%d: copy_to_user failed, ret=%d\n", \ + __FUNCTION__, __LINE__, __ret)); \ + return __ret; \ + } \ + } while (0); + +int +dhd_dbg_monitor_get_tx_pkts(dhd_pub_t *dhdp, void __user *user_buf, + uint16 req_count, uint16 *resp_count) +{ + dhd_dbg_tx_report_t *tx_report; + dhd_dbg_tx_info_t *tx_pkt; + wifi_tx_report_t *ptr; + compat_wifi_tx_report_t *cptr; + dhd_dbg_pkt_mon_state_t tx_pkt_state; + dhd_dbg_pkt_mon_state_t tx_status_state; + uint16 pkt_count, count; + unsigned long flags; + + DHD_PKT_INFO(("%s, %d\n", __FUNCTION__, __LINE__)); + BCM_REFERENCE(ptr); + BCM_REFERENCE(cptr); + + if (!dhdp || !dhdp->dbg) { + DHD_PKT_MON(("%s(): dhdp=%p, dhdp->dbg=%p\n", __FUNCTION__, + dhdp, (dhdp ? dhdp->dbg : NULL))); + return -EINVAL; + } + + DHD_PKT_MON_LOCK(dhdp->dbg->pkt_mon_lock, flags); + tx_pkt_state = dhdp->dbg->pkt_mon.tx_pkt_state; + tx_status_state = dhdp->dbg->pkt_mon.tx_status_state; + if (PKT_MON_DETACHED(tx_pkt_state) || PKT_MON_DETACHED(tx_status_state)) { + DHD_PKT_MON(("%s(): packet monitor is not yet enabled, " + "tx_pkt_state=%d, tx_status_state=%d\n", __FUNCTION__, + tx_pkt_state, tx_status_state)); + DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags); + return -EINVAL; + } + + count = 0; + tx_report = dhdp->dbg->pkt_mon.tx_report; + tx_pkt = tx_report->tx_pkts; + pkt_count = MIN(req_count, tx_report->status_pos); + +#ifdef CONFIG_COMPAT +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0)) + if (in_compat_syscall()) +#else + if (is_compat_task()) +#endif + { + cptr = (compat_wifi_tx_report_t *)user_buf; + while ((count < pkt_count) && tx_pkt && cptr) { + compat_wifi_tx_report_t *comp_ptr = compat_ptr((uintptr_t) cptr); + compat_dhd_dbg_pkt_info_t compat_tx_pkt; + __dhd_dbg_dump_tx_pkt_info(dhdp, tx_pkt, count); + __COPY_TO_USER(&comp_ptr->fate, &tx_pkt->fate, sizeof(tx_pkt->fate)); + + compat_tx_pkt.payload_type = tx_pkt->info.payload_type; + compat_tx_pkt.pkt_len = tx_pkt->info.pkt_len; + compat_tx_pkt.driver_ts = tx_pkt->info.driver_ts; + compat_tx_pkt.firmware_ts = tx_pkt->info.firmware_ts; + compat_tx_pkt.pkt_hash = tx_pkt->info.pkt_hash; + __COPY_TO_USER(&comp_ptr->frame_inf.payload_type, + &compat_tx_pkt.payload_type, + OFFSETOF(compat_dhd_dbg_pkt_info_t, pkt_hash)); + __COPY_TO_USER(comp_ptr->frame_inf.frame_content.ethernet_ii, + PKTDATA(dhdp->osh, tx_pkt->info.pkt), tx_pkt->info.pkt_len); + + cptr++; + tx_pkt++; + count++; + } + } else +#endif /* CONFIG_COMPAT */ + { + ptr = (wifi_tx_report_t *)user_buf; + while ((count < pkt_count) && tx_pkt && ptr) { + __dhd_dbg_dump_tx_pkt_info(dhdp, tx_pkt, count); + __COPY_TO_USER(&ptr->fate, &tx_pkt->fate, sizeof(tx_pkt->fate)); + __COPY_TO_USER(&ptr->frame_inf.payload_type, + &tx_pkt->info.payload_type, + OFFSETOF(dhd_dbg_pkt_info_t, pkt_hash)); + __COPY_TO_USER(ptr->frame_inf.frame_content.ethernet_ii, + PKTDATA(dhdp->osh, tx_pkt->info.pkt), tx_pkt->info.pkt_len); + + ptr++; + tx_pkt++; + count++; + } + } + *resp_count = pkt_count; + + DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags); + if (!pkt_count) { + DHD_ERROR(("%s(): no tx_status in tx completion messages, " + "make sure that 'd11status' is enabled in firmware, " + "status_pos=%u\n", __FUNCTION__, pkt_count)); + } + + return BCME_OK; +} + +int +dhd_dbg_monitor_get_rx_pkts(dhd_pub_t *dhdp, void __user *user_buf, + uint16 req_count, uint16 *resp_count) +{ + dhd_dbg_rx_report_t *rx_report; + dhd_dbg_rx_info_t *rx_pkt; + wifi_rx_report_t *ptr; + compat_wifi_rx_report_t *cptr; + dhd_dbg_pkt_mon_state_t rx_pkt_state; + uint16 pkt_count, count; + unsigned long flags; + + DHD_PKT_INFO(("%s, %d\n", __FUNCTION__, __LINE__)); + BCM_REFERENCE(ptr); + BCM_REFERENCE(cptr); + + if (!dhdp || !dhdp->dbg) { + DHD_PKT_MON(("%s(): dhdp=%p, dhdp->dbg=%p\n", __FUNCTION__, + dhdp, (dhdp ? dhdp->dbg : NULL))); + return -EINVAL; + } + + DHD_PKT_MON_LOCK(dhdp->dbg->pkt_mon_lock, flags); + rx_pkt_state = dhdp->dbg->pkt_mon.rx_pkt_state; + if (PKT_MON_DETACHED(rx_pkt_state)) { + DHD_PKT_MON(("%s(): packet fetch is not allowed , " + "rx_pkt_state=%d\n", __FUNCTION__, rx_pkt_state)); + DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags); + return -EINVAL; + } + + count = 0; + rx_report = dhdp->dbg->pkt_mon.rx_report; + rx_pkt = rx_report->rx_pkts; + pkt_count = MIN(req_count, rx_report->pkt_pos); + +#ifdef CONFIG_COMPAT +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0)) + if (in_compat_syscall()) +#else + if (is_compat_task()) +#endif + { + cptr = (compat_wifi_rx_report_t *)user_buf; + while ((count < pkt_count) && rx_pkt && cptr) { + compat_wifi_rx_report_t *comp_ptr = compat_ptr((uintptr_t) cptr); + compat_dhd_dbg_pkt_info_t compat_rx_pkt; + __dhd_dbg_dump_rx_pkt_info(dhdp, rx_pkt, count); + __COPY_TO_USER(&comp_ptr->fate, &rx_pkt->fate, sizeof(rx_pkt->fate)); + + compat_rx_pkt.payload_type = rx_pkt->info.payload_type; + compat_rx_pkt.pkt_len = rx_pkt->info.pkt_len; + compat_rx_pkt.driver_ts = rx_pkt->info.driver_ts; + compat_rx_pkt.firmware_ts = rx_pkt->info.firmware_ts; + compat_rx_pkt.pkt_hash = rx_pkt->info.pkt_hash; + __COPY_TO_USER(&comp_ptr->frame_inf.payload_type, + &compat_rx_pkt.payload_type, + OFFSETOF(compat_dhd_dbg_pkt_info_t, pkt_hash)); + __COPY_TO_USER(comp_ptr->frame_inf.frame_content.ethernet_ii, + PKTDATA(dhdp->osh, rx_pkt->info.pkt), rx_pkt->info.pkt_len); + + cptr++; + rx_pkt++; + count++; + } + } else +#endif /* CONFIG_COMPAT */ + { + ptr = (wifi_rx_report_t *)user_buf; + while ((count < pkt_count) && rx_pkt && ptr) { + __dhd_dbg_dump_rx_pkt_info(dhdp, rx_pkt, count); + + __COPY_TO_USER(&ptr->fate, &rx_pkt->fate, sizeof(rx_pkt->fate)); + __COPY_TO_USER(&ptr->frame_inf.payload_type, + &rx_pkt->info.payload_type, + OFFSETOF(dhd_dbg_pkt_info_t, pkt_hash)); + __COPY_TO_USER(ptr->frame_inf.frame_content.ethernet_ii, + PKTDATA(dhdp->osh, rx_pkt->info.pkt), rx_pkt->info.pkt_len); + + ptr++; + rx_pkt++; + count++; + } + } + + *resp_count = pkt_count; + DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags); + + return BCME_OK; +} + +int +dhd_dbg_detach_pkt_monitor(dhd_pub_t *dhdp) +{ + dhd_dbg_tx_report_t *tx_report; + dhd_dbg_rx_report_t *rx_report; + dhd_dbg_pkt_mon_state_t tx_pkt_state; + dhd_dbg_pkt_mon_state_t tx_status_state; + dhd_dbg_pkt_mon_state_t rx_pkt_state; + unsigned long flags; + + DHD_PKT_INFO(("%s, %d\n", __FUNCTION__, __LINE__)); + if (!dhdp || !dhdp->dbg) { + DHD_PKT_MON(("%s(): dhdp=%p, dhdp->dbg=%p\n", __FUNCTION__, + dhdp, (dhdp ? dhdp->dbg : NULL))); + return -EINVAL; + } + + DHD_PKT_MON_LOCK(dhdp->dbg->pkt_mon_lock, flags); + tx_pkt_state = dhdp->dbg->pkt_mon.tx_pkt_state; + tx_status_state = dhdp->dbg->pkt_mon.tx_status_state; + rx_pkt_state = dhdp->dbg->pkt_mon.rx_pkt_state; + + if (PKT_MON_DETACHED(tx_pkt_state) || PKT_MON_DETACHED(tx_status_state) || + PKT_MON_DETACHED(rx_pkt_state)) { + DHD_PKT_MON(("%s(): packet monitor is already detached, " + "tx_pkt_state=%d, tx_status_state=%d, rx_pkt_state=%d\n", + __FUNCTION__, tx_pkt_state, tx_status_state, rx_pkt_state)); + DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags); + return -EINVAL; + } + + tx_report = dhdp->dbg->pkt_mon.tx_report; + rx_report = dhdp->dbg->pkt_mon.rx_report; + + /* free and de-initalize tx packet monitoring */ + dhdp->dbg->pkt_mon.tx_pkt_state = PKT_MON_DETACHED; + dhdp->dbg->pkt_mon.tx_status_state = PKT_MON_DETACHED; + if (tx_report) { + if (tx_report->tx_pkts) { + __dhd_dbg_free_tx_pkts(dhdp, tx_report->tx_pkts, + tx_report->pkt_pos); + MFREE(dhdp->osh, tx_report->tx_pkts, + (sizeof(*tx_report->tx_pkts) * MAX_FATE_LOG_LEN)); + dhdp->dbg->pkt_mon.tx_report->tx_pkts = NULL; + } + MFREE(dhdp->osh, tx_report, sizeof(*tx_report)); + dhdp->dbg->pkt_mon.tx_report = NULL; + } + dhdp->dbg->pkt_mon.tx_pkt_mon = NULL; + dhdp->dbg->pkt_mon.tx_status_mon = NULL; + + /* free and de-initalize rx packet monitoring */ + dhdp->dbg->pkt_mon.rx_pkt_state = PKT_MON_DETACHED; + if (rx_report) { + if (rx_report->rx_pkts) { + __dhd_dbg_free_rx_pkts(dhdp, rx_report->rx_pkts, + rx_report->pkt_pos); + MFREE(dhdp->osh, rx_report->rx_pkts, + (sizeof(*rx_report->rx_pkts) * MAX_FATE_LOG_LEN)); + dhdp->dbg->pkt_mon.rx_report->rx_pkts = NULL; + } + MFREE(dhdp->osh, rx_report, sizeof(*rx_report)); + dhdp->dbg->pkt_mon.rx_report = NULL; + } + dhdp->dbg->pkt_mon.rx_pkt_mon = NULL; + + DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags); + DHD_PKT_MON(("%s(): packet monitor detach succeeded\n", __FUNCTION__)); + return BCME_OK; +} +bool +dhd_dbg_process_tx_status(dhd_pub_t *dhdp, void *pkt, uint32 pktid, + uint16 status) +{ + bool pkt_fate = TRUE; + if (dhdp->d11_tx_status) { + pkt_fate = (status == WLFC_CTL_PKTFLAG_DISCARD) ? TRUE : FALSE; + DHD_DBG_PKT_MON_TX_STATUS(dhdp, pkt, pktid, status); + } + return pkt_fate; +} + +#else /* DBG_PKT_MON */ + +bool +dhd_dbg_process_tx_status(dhd_pub_t *dhdp, void *pkt, + uint32 pktid, uint16 status) +{ + return TRUE; +} + +#endif /* DBG_PKT_MON */ + +/* + * dhd_dbg_attach: initialziation of dhd dbugability module + * + * Return: An error code or 0 on success. + */ +int +dhd_dbg_attach(dhd_pub_t *dhdp, dbg_pullreq_t os_pullreq, + dbg_urgent_noti_t os_urgent_notifier, void *os_priv) +{ + dhd_dbg_t *dbg = NULL; + dhd_dbg_ring_t *ring = NULL; + int ret = BCME_ERROR, ring_id = 0; + void *buf = NULL; + + dbg = MALLOCZ(dhdp->osh, sizeof(dhd_dbg_t)); + if (!dbg) + return BCME_NOMEM; + +#ifdef CONFIG_DHD_USE_STATIC_BUF + buf = DHD_OS_PREALLOC(dhdp, DHD_PREALLOC_FW_VERBOSE_RING, FW_VERBOSE_RING_SIZE); +#else + buf = MALLOCZ(dhdp->osh, FW_VERBOSE_RING_SIZE); +#endif + if (!buf) + goto error; + ret = dhd_dbg_ring_init(dhdp, &dbg->dbg_rings[FW_VERBOSE_RING_ID], FW_VERBOSE_RING_ID, + (uint8 *)FW_VERBOSE_RING_NAME, FW_VERBOSE_RING_SIZE, buf); + if (ret) + goto error; + +#ifdef CONFIG_DHD_USE_STATIC_BUF + buf = DHD_OS_PREALLOC(dhdp, DHD_PREALLOC_DHD_EVENT_RING, DHD_EVENT_RING_SIZE); +#else + buf = MALLOCZ(dhdp->osh, DHD_EVENT_RING_SIZE); +#endif + if (!buf) + goto error; + ret = dhd_dbg_ring_init(dhdp, &dbg->dbg_rings[DHD_EVENT_RING_ID], DHD_EVENT_RING_ID, + (uint8 *)DHD_EVENT_RING_NAME, DHD_EVENT_RING_SIZE, buf); + if (ret) + goto error; + + dbg->private = os_priv; + dbg->pullreq = os_pullreq; + dbg->urgent_notifier = os_urgent_notifier; + dhdp->dbg = dbg; + + return BCME_OK; + +error: + for (ring_id = DEBUG_RING_ID_INVALID + 1; ring_id < DEBUG_RING_ID_MAX; ring_id++) { + if (VALID_RING(dbg->dbg_rings[ring_id].id)) { + ring = &dbg->dbg_rings[ring_id]; + dhd_dbg_ring_deinit(dhdp, ring); + if (ring->ring_buf) { +#ifndef CONFIG_DHD_USE_STATIC_BUF + MFREE(dhdp->osh, ring->ring_buf, ring->ring_size); +#endif + ring->ring_buf = NULL; + } + ring->ring_size = 0; + } + } + MFREE(dhdp->osh, dhdp->dbg, sizeof(dhd_dbg_t)); + + return ret; +} + +/* + * dhd_dbg_detach: clean up dhd dbugability module + */ +void +dhd_dbg_detach(dhd_pub_t *dhdp) +{ + int ring_id; + dhd_dbg_ring_t *ring = NULL; + dhd_dbg_t *dbg; + + if (!dhdp->dbg) + return; + dbg = dhdp->dbg; + for (ring_id = DEBUG_RING_ID_INVALID + 1; ring_id < DEBUG_RING_ID_MAX; ring_id++) { + if (VALID_RING(dbg->dbg_rings[ring_id].id)) { + ring = &dbg->dbg_rings[ring_id]; + dhd_dbg_ring_deinit(dhdp, ring); + if (ring->ring_buf) { +#ifndef CONFIG_DHD_USE_STATIC_BUF + MFREE(dhdp->osh, ring->ring_buf, ring->ring_size); +#endif + ring->ring_buf = NULL; + } + ring->ring_size = 0; + } + } + MFREE(dhdp->osh, dhdp->dbg, sizeof(dhd_dbg_t)); +} diff --git a/bcmdhd.100.10.315.x/dhd_debug.h b/bcmdhd.100.10.315.x/dhd_debug.h new file mode 100644 index 0000000..f850efc --- /dev/null +++ b/bcmdhd.100.10.315.x/dhd_debug.h @@ -0,0 +1,845 @@ +/* + * DHD debugability header file + * + * <> + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: dhd_debug.h 771435 2018-07-10 05:35:24Z $ + */ + +#ifndef _dhd_debug_h_ +#define _dhd_debug_h_ +#include +#include +#include + +enum { + DEBUG_RING_ID_INVALID = 0, + FW_VERBOSE_RING_ID, + DHD_EVENT_RING_ID, + /* add new id here */ + DEBUG_RING_ID_MAX +}; + +enum { + /* Feature set */ + DBG_MEMORY_DUMP_SUPPORTED = (1 << (0)), /* Memory dump of FW */ + DBG_PER_PACKET_TX_RX_STATUS_SUPPORTED = (1 << (1)), /* PKT Status */ + DBG_CONNECT_EVENT_SUPPORTED = (1 << (2)), /* Connectivity Event */ + DBG_POWER_EVENT_SUPOORTED = (1 << (3)), /* POWER of Driver */ + DBG_WAKE_LOCK_SUPPORTED = (1 << (4)), /* WAKE LOCK of Driver */ + DBG_VERBOSE_LOG_SUPPORTED = (1 << (5)), /* verbose log of FW */ + DBG_HEALTH_CHECK_SUPPORTED = (1 << (6)), /* monitor the health of FW */ + DBG_DRIVER_DUMP_SUPPORTED = (1 << (7)), /* dumps driver state */ + DBG_PACKET_FATE_SUPPORTED = (1 << (8)), /* tracks connection packets' fate */ + DBG_NAN_EVENT_SUPPORTED = (1 << (9)), /* NAN Events */ +}; + +enum { + /* set for binary entries */ + DBG_RING_ENTRY_FLAGS_HAS_BINARY = (1 << (0)), + /* set if 64 bits timestamp is present */ + DBG_RING_ENTRY_FLAGS_HAS_TIMESTAMP = (1 << (1)) +}; + +/* firmware verbose ring, ring id 1 */ +#define FW_VERBOSE_RING_NAME "fw_verbose" +#define FW_VERBOSE_RING_SIZE (256 * 1024) +/* firmware event ring, ring id 2 */ +#define FW_EVENT_RING_NAME "fw_event" +#define FW_EVENT_RING_SIZE (64 * 1024) +/* DHD connection event ring, ring id 3 */ +#define DHD_EVENT_RING_NAME "dhd_event" +#define DHD_EVENT_RING_SIZE (64 * 1024) +/* NAN event ring, ring id 4 */ +#define NAN_EVENT_RING_NAME "nan_event" +#define NAN_EVENT_RING_SIZE (64 * 1024) + +#define TLV_LOG_SIZE(tlv) ((tlv) ? (sizeof(tlv_log) + (tlv)->len) : 0) + +#define TLV_LOG_NEXT(tlv) \ + ((tlv) ? ((tlv_log *)((uint8 *)tlv + TLV_LOG_SIZE(tlv))) : 0) + +#define VALID_RING(id) \ + ((id > DEBUG_RING_ID_INVALID) && (id < DEBUG_RING_ID_MAX)) + +#ifdef DEBUGABILITY +#define DBG_RING_ACTIVE(dhdp, ring_id) \ + ((dhdp)->dbg->dbg_rings[(ring_id)].state == RING_ACTIVE) +#else +#define DBG_RING_ACTIVE(dhdp, ring_id) 0 +#endif /* DEBUGABILITY */ + +enum { + /* driver receive association command from kernel */ + WIFI_EVENT_ASSOCIATION_REQUESTED = 0, + WIFI_EVENT_AUTH_COMPLETE, + WIFI_EVENT_ASSOC_COMPLETE, + /* received firmware event indicating auth frames are sent */ + WIFI_EVENT_FW_AUTH_STARTED, + /* received firmware event indicating assoc frames are sent */ + WIFI_EVENT_FW_ASSOC_STARTED, + /* received firmware event indicating reassoc frames are sent */ + WIFI_EVENT_FW_RE_ASSOC_STARTED, + WIFI_EVENT_DRIVER_SCAN_REQUESTED, + WIFI_EVENT_DRIVER_SCAN_RESULT_FOUND, + WIFI_EVENT_DRIVER_SCAN_COMPLETE, + WIFI_EVENT_G_SCAN_STARTED, + WIFI_EVENT_G_SCAN_COMPLETE, + WIFI_EVENT_DISASSOCIATION_REQUESTED, + WIFI_EVENT_RE_ASSOCIATION_REQUESTED, + WIFI_EVENT_ROAM_REQUESTED, + /* received beacon from AP (event enabled only in verbose mode) */ + WIFI_EVENT_BEACON_RECEIVED, + /* firmware has triggered a roam scan (not g-scan) */ + WIFI_EVENT_ROAM_SCAN_STARTED, + /* firmware has completed a roam scan (not g-scan) */ + WIFI_EVENT_ROAM_SCAN_COMPLETE, + /* firmware has started searching for roam candidates (with reason =xx) */ + WIFI_EVENT_ROAM_SEARCH_STARTED, + /* firmware has stopped searching for roam candidates (with reason =xx) */ + WIFI_EVENT_ROAM_SEARCH_STOPPED, + WIFI_EVENT_UNUSED_0, + /* received channel switch anouncement from AP */ + WIFI_EVENT_CHANNEL_SWITCH_ANOUNCEMENT, + /* fw start transmit eapol frame, with EAPOL index 1-4 */ + WIFI_EVENT_FW_EAPOL_FRAME_TRANSMIT_START, + /* fw gives up eapol frame, with rate, success/failure and number retries */ + WIFI_EVENT_FW_EAPOL_FRAME_TRANSMIT_STOP, + /* kernel queue EAPOL for transmission in driver with EAPOL index 1-4 */ + WIFI_EVENT_DRIVER_EAPOL_FRAME_TRANSMIT_REQUESTED, + /* with rate, regardless of the fact that EAPOL frame is accepted or + * rejected by firmware + */ + WIFI_EVENT_FW_EAPOL_FRAME_RECEIVED, + WIFI_EVENT_UNUSED_1, + /* with rate, and eapol index, driver has received */ + /* EAPOL frame and will queue it up to wpa_supplicant */ + WIFI_EVENT_DRIVER_EAPOL_FRAME_RECEIVED, + /* with success/failure, parameters */ + WIFI_EVENT_BLOCK_ACK_NEGOTIATION_COMPLETE, + WIFI_EVENT_BT_COEX_BT_SCO_START, + WIFI_EVENT_BT_COEX_BT_SCO_STOP, + /* for paging/scan etc..., when BT starts transmiting twice per BT slot */ + WIFI_EVENT_BT_COEX_BT_SCAN_START, + WIFI_EVENT_BT_COEX_BT_SCAN_STOP, + WIFI_EVENT_BT_COEX_BT_HID_START, + WIFI_EVENT_BT_COEX_BT_HID_STOP, + /* firmware sends auth frame in roaming to next candidate */ + WIFI_EVENT_ROAM_AUTH_STARTED, + /* firmware receive auth confirm from ap */ + WIFI_EVENT_ROAM_AUTH_COMPLETE, + /* firmware sends assoc/reassoc frame in */ + WIFI_EVENT_ROAM_ASSOC_STARTED, + /* firmware receive assoc/reassoc confirm from ap */ + WIFI_EVENT_ROAM_ASSOC_COMPLETE, + /* firmware sends stop G_SCAN */ + WIFI_EVENT_G_SCAN_STOP, + /* firmware indicates G_SCAN scan cycle started */ + WIFI_EVENT_G_SCAN_CYCLE_STARTED, + /* firmware indicates G_SCAN scan cycle completed */ + WIFI_EVENT_G_SCAN_CYCLE_COMPLETED, + /* firmware indicates G_SCAN scan start for a particular bucket */ + WIFI_EVENT_G_SCAN_BUCKET_STARTED, + /* firmware indicates G_SCAN scan completed for particular bucket */ + WIFI_EVENT_G_SCAN_BUCKET_COMPLETED, + /* Event received from firmware about G_SCAN scan results being available */ + WIFI_EVENT_G_SCAN_RESULTS_AVAILABLE, + /* Event received from firmware with G_SCAN capabilities */ + WIFI_EVENT_G_SCAN_CAPABILITIES, + /* Event received from firmware when eligible candidate is found */ + WIFI_EVENT_ROAM_CANDIDATE_FOUND, + /* Event received from firmware when roam scan configuration gets enabled or disabled */ + WIFI_EVENT_ROAM_SCAN_CONFIG, + /* firmware/driver timed out authentication */ + WIFI_EVENT_AUTH_TIMEOUT, + /* firmware/driver timed out association */ + WIFI_EVENT_ASSOC_TIMEOUT, + /* firmware/driver encountered allocation failure */ + WIFI_EVENT_MEM_ALLOC_FAILURE, + /* driver added a PNO network in firmware */ + WIFI_EVENT_DRIVER_PNO_ADD, + /* driver removed a PNO network in firmware */ + WIFI_EVENT_DRIVER_PNO_REMOVE, + /* driver received PNO networks found indication from firmware */ + WIFI_EVENT_DRIVER_PNO_NETWORK_FOUND, + /* driver triggered a scan for PNO networks */ + WIFI_EVENT_DRIVER_PNO_SCAN_REQUESTED, + /* driver received scan results of PNO networks */ + WIFI_EVENT_DRIVER_PNO_SCAN_RESULT_FOUND, + /* driver updated scan results from PNO candidates to cfg */ + WIFI_EVENT_DRIVER_PNO_SCAN_COMPLETE +}; + +enum { + WIFI_TAG_VENDOR_SPECIFIC = 0, /* take a byte stream as parameter */ + WIFI_TAG_BSSID, /* takes a 6 bytes MAC address as parameter */ + WIFI_TAG_ADDR, /* takes a 6 bytes MAC address as parameter */ + WIFI_TAG_SSID, /* takes a 32 bytes SSID address as parameter */ + WIFI_TAG_STATUS, /* takes an integer as parameter */ + WIFI_TAG_CHANNEL_SPEC, /* takes one or more wifi_channel_spec as parameter */ + WIFI_TAG_WAKE_LOCK_EVENT, /* takes a wake_lock_event struct as parameter */ + WIFI_TAG_ADDR1, /* takes a 6 bytes MAC address as parameter */ + WIFI_TAG_ADDR2, /* takes a 6 bytes MAC address as parameter */ + WIFI_TAG_ADDR3, /* takes a 6 bytes MAC address as parameter */ + WIFI_TAG_ADDR4, /* takes a 6 bytes MAC address as parameter */ + WIFI_TAG_TSF, /* take a 64 bits TSF value as parameter */ + WIFI_TAG_IE, + /* take one or more specific 802.11 IEs parameter, IEs are in turn + * indicated in TLV format as per 802.11 spec + */ + WIFI_TAG_INTERFACE, /* take interface name as parameter */ + WIFI_TAG_REASON_CODE, /* take a reason code as per 802.11 as parameter */ + WIFI_TAG_RATE_MBPS, /* take a wifi rate in 0.5 mbps */ + WIFI_TAG_REQUEST_ID, /* take an integer as parameter */ + WIFI_TAG_BUCKET_ID, /* take an integer as parameter */ + WIFI_TAG_GSCAN_PARAMS, /* takes a wifi_scan_cmd_params struct as parameter */ + WIFI_TAG_GSCAN_CAPABILITIES, /* takes a wifi_gscan_capabilities struct as parameter */ + WIFI_TAG_SCAN_ID, /* take an integer as parameter */ + WIFI_TAG_RSSI, /* takes s16 as parameter */ + WIFI_TAG_CHANNEL, /* takes u16 as parameter */ + WIFI_TAG_LINK_ID, /* take an integer as parameter */ + WIFI_TAG_LINK_ROLE, /* take an integer as parameter */ + WIFI_TAG_LINK_STATE, /* take an integer as parameter */ + WIFI_TAG_LINK_TYPE, /* take an integer as parameter */ + WIFI_TAG_TSCO, /* take an integer as parameter */ + WIFI_TAG_RSCO, /* take an integer as parameter */ + WIFI_TAG_EAPOL_MESSAGE_TYPE /* take an integer as parameter */ +}; + +/* NAN events */ +typedef enum { + NAN_EVENT_INVALID = 0, + NAN_EVENT_CLUSTER_STARTED = 1, + NAN_EVENT_CLUSTER_JOINED = 2, + NAN_EVENT_CLUSTER_MERGED = 3, + NAN_EVENT_ROLE_CHANGED = 4, + NAN_EVENT_SCAN_COMPLETE = 5, + NAN_EVENT_STATUS_CHNG = 6, + /* ADD new events before this line */ + NAN_EVENT_MAX +} nan_event_id_t; + +typedef struct { + uint16 tag; + uint16 len; /* length of value */ + uint8 value[0]; +} tlv_log; + +typedef struct per_packet_status_entry { + uint8 flags; + uint8 tid; /* transmit or received tid */ + uint16 MCS; /* modulation and bandwidth */ + /* + * TX: RSSI of ACK for that packet + * RX: RSSI of packet + */ + uint8 rssi; + uint8 num_retries; /* number of attempted retries */ + uint16 last_transmit_rate; /* last transmit rate in .5 mbps */ + /* transmit/reeive sequence for that MPDU packet */ + uint16 link_layer_transmit_sequence; + /* + * TX: firmware timestamp (us) when packet is queued within firmware buffer + * for SDIO/HSIC or into PCIe buffer + * RX : firmware receive timestamp + */ + uint64 firmware_entry_timestamp; + /* + * firmware timestamp (us) when packet start contending for the + * medium for the first time, at head of its AC queue, + * or as part of an MPDU or A-MPDU. This timestamp is not updated + * for each retry, only the first transmit attempt. + */ + uint64 start_contention_timestamp; + /* + * fimrware timestamp (us) when packet is successfully transmitted + * or aborted because it has exhausted its maximum number of retries + */ + uint64 transmit_success_timestamp; + /* + * packet data. The length of packet data is determined by the entry_size field of + * the wifi_ring_buffer_entry structure. It is expected that first bytes of the + * packet, or packet headers only (up to TCP or RTP/UDP headers) will be copied into the ring + */ + uint8 *data; +} per_packet_status_entry_t; + +#define PACKED_STRUCT __attribute__ ((packed)) + +typedef struct log_conn_event { + uint16 event; + tlv_log *tlvs; + /* + * separate parameter structure per event to be provided and optional data + * the event_data is expected to include an official android part, with some + * parameter as transmit rate, num retries, num scan result found etc... + * as well, event_data can include a vendor proprietary part which is + * understood by the developer only. + */ +} PACKED_STRUCT log_conn_event_t; + +/* + * Ring buffer name for power events ring. note that power event are extremely frequents + * and thus should be stored in their own ring/file so as not to clobber connectivity events + */ + +typedef struct wake_lock_event { + uint32 status; /* 0 taken, 1 released */ + uint32 reason; /* reason why this wake lock is taken */ + char *name; /* null terminated */ +} wake_lock_event_t; + +typedef struct wifi_power_event { + uint16 event; + tlv_log *tlvs; +} wifi_power_event_t; + +#define NAN_EVENT_VERSION 1 +typedef struct log_nan_event { + uint8 version; + uint8 pad; + uint16 event; + tlv_log *tlvs; +} log_nan_event_t; + +/* entry type */ +enum { + DBG_RING_ENTRY_EVENT_TYPE = 1, + DBG_RING_ENTRY_PKT_TYPE, + DBG_RING_ENTRY_WAKE_LOCK_EVENT_TYPE, + DBG_RING_ENTRY_POWER_EVENT_TYPE, + DBG_RING_ENTRY_DATA_TYPE, + DBG_RING_ENTRY_NAN_EVENT_TYPE +}; + +struct log_level_table { + int log_level; + uint16 tag; + char *desc; +}; + +/* + * Assuming that the Ring lock is mutex, bailing out if the + * callers are from atomic context. On a long term, one has to + * schedule a job to execute in sleepable context so that + * contents are pushed to the ring. + */ +#define DBG_EVENT_LOG(dhdp, connect_state) \ +{ \ + do { \ + uint16 state = connect_state; \ + if (CAN_SLEEP() && DBG_RING_ACTIVE(dhdp, DHD_EVENT_RING_ID)) \ + dhd_os_push_push_ring_data(dhdp, DHD_EVENT_RING_ID, \ + &state, sizeof(state)); \ + } while (0); \ +} + +#define MD5_PREFIX_LEN 4 +#define MAX_FATE_LOG_LEN 32 +#define MAX_FRAME_LEN_ETHERNET 1518 +#define MAX_FRAME_LEN_80211_MGMT 2352 /* 802.11-2012 Fig. 8-34 */ + +typedef enum { + /* Sent over air and ACKed. */ + TX_PKT_FATE_ACKED, + + /* Sent over air but not ACKed. (Normal for broadcast/multicast.) */ + TX_PKT_FATE_SENT, + + /* Queued within firmware, but not yet sent over air. */ + TX_PKT_FATE_FW_QUEUED, + + /* + * Dropped by firmware as invalid. E.g. bad source address, + * bad checksum, or invalid for current state. + */ + TX_PKT_FATE_FW_DROP_INVALID, + + /* Dropped by firmware due to lifetime expiration. */ + TX_PKT_FATE_FW_DROP_EXPTIME, + + /* + * Dropped by firmware for any other reason. Includes + * frames that were sent by driver to firmware, but + * unaccounted for by firmware. + */ + TX_PKT_FATE_FW_DROP_OTHER, + + /* Queued within driver, not yet sent to firmware. */ + TX_PKT_FATE_DRV_QUEUED, + + /* + * Dropped by driver as invalid. E.g. bad source address, + * or invalid for current state. + */ + TX_PKT_FATE_DRV_DROP_INVALID, + + /* Dropped by driver due to lack of buffer space. */ + TX_PKT_FATE_DRV_DROP_NOBUFS, + + /* Dropped by driver for any other reason. */ + TX_PKT_FATE_DRV_DROP_OTHER, + + /* Packet free by firmware. */ + TX_PKT_FATE_FW_PKT_FREE, + + } wifi_tx_packet_fate; + +typedef enum { + /* Valid and delivered to network stack (e.g., netif_rx()). */ + RX_PKT_FATE_SUCCESS, + + /* Queued within firmware, but not yet sent to driver. */ + RX_PKT_FATE_FW_QUEUED, + + /* Dropped by firmware due to host-programmable filters. */ + RX_PKT_FATE_FW_DROP_FILTER, + + /* + * Dropped by firmware as invalid. E.g. bad checksum, + * decrypt failed, or invalid for current state. + */ + RX_PKT_FATE_FW_DROP_INVALID, + + /* Dropped by firmware due to lack of buffer space. */ + RX_PKT_FATE_FW_DROP_NOBUFS, + + /* Dropped by firmware for any other reason. */ + RX_PKT_FATE_FW_DROP_OTHER, + + /* Queued within driver, not yet delivered to network stack. */ + RX_PKT_FATE_DRV_QUEUED, + + /* Dropped by driver due to filter rules. */ + RX_PKT_FATE_DRV_DROP_FILTER, + + /* Dropped by driver as invalid. E.g. not permitted in current state. */ + RX_PKT_FATE_DRV_DROP_INVALID, + + /* Dropped by driver due to lack of buffer space. */ + RX_PKT_FATE_DRV_DROP_NOBUFS, + + /* Dropped by driver for any other reason. */ + RX_PKT_FATE_DRV_DROP_OTHER, + + } wifi_rx_packet_fate; + +typedef enum { + FRAME_TYPE_UNKNOWN, + FRAME_TYPE_ETHERNET_II, + FRAME_TYPE_80211_MGMT, + } frame_type; + +typedef struct wifi_frame_info { + /* + * The type of MAC-layer frame that this frame_info holds. + * - For data frames, use FRAME_TYPE_ETHERNET_II. + * - For management frames, use FRAME_TYPE_80211_MGMT. + * - If the type of the frame is unknown, use FRAME_TYPE_UNKNOWN. + */ + frame_type payload_type; + + /* + * The number of bytes included in |frame_content|. If the frame + * contents are missing (e.g. RX frame dropped in firmware), + * |frame_len| should be set to 0. + */ + size_t frame_len; + + /* + * Host clock when this frame was received by the driver (either + * outbound from the host network stack, or inbound from the + * firmware). + * - The timestamp should be taken from a clock which includes time + * the host spent suspended (e.g. ktime_get_boottime()). + * - If no host timestamp is available (e.g. RX frame was dropped in + * firmware), this field should be set to 0. + */ + uint32 driver_timestamp_usec; + + /* + * Firmware clock when this frame was received by the firmware + * (either outbound from the host, or inbound from a remote + * station). + * - The timestamp should be taken from a clock which includes time + * firmware spent suspended (if applicable). + * - If no firmware timestamp is available (e.g. TX frame was + * dropped by driver), this field should be set to 0. + * - Consumers of |frame_info| should _not_ assume any + * synchronization between driver and firmware clocks. + */ + uint32 firmware_timestamp_usec; + + /* + * Actual frame content. + * - Should be provided for TX frames originated by the host. + * - Should be provided for RX frames received by the driver. + * - Optionally provided for TX frames originated by firmware. (At + * discretion of HAL implementation.) + * - Optionally provided for RX frames dropped in firmware. (At + * discretion of HAL implementation.) + * - If frame content is not provided, |frame_len| should be set + * to 0. + */ + union { + char ethernet_ii[MAX_FRAME_LEN_ETHERNET]; + char ieee_80211_mgmt[MAX_FRAME_LEN_80211_MGMT]; + } frame_content; +} wifi_frame_info_t; + +typedef struct wifi_tx_report { + /* + * Prefix of MD5 hash of |frame_inf.frame_content|. If frame + * content is not provided, prefix of MD5 hash over the same data + * that would be in frame_content, if frame content were provided. + */ + char md5_prefix[MD5_PREFIX_LEN]; + wifi_tx_packet_fate fate; + wifi_frame_info_t frame_inf; +} wifi_tx_report_t; + +typedef struct wifi_rx_report { + /* + * Prefix of MD5 hash of |frame_inf.frame_content|. If frame + * content is not provided, prefix of MD5 hash over the same data + * that would be in frame_content, if frame content were provided. + */ + char md5_prefix[MD5_PREFIX_LEN]; + wifi_rx_packet_fate fate; + wifi_frame_info_t frame_inf; +} wifi_rx_report_t; + +typedef struct compat_wifi_frame_info { + frame_type payload_type; + + uint32 frame_len; + + uint32 driver_timestamp_usec; + + uint32 firmware_timestamp_usec; + + union { + char ethernet_ii[MAX_FRAME_LEN_ETHERNET]; + char ieee_80211_mgmt[MAX_FRAME_LEN_80211_MGMT]; + } frame_content; +} compat_wifi_frame_info_t; + +typedef struct compat_wifi_tx_report { + char md5_prefix[MD5_PREFIX_LEN]; + wifi_tx_packet_fate fate; + compat_wifi_frame_info_t frame_inf; +} compat_wifi_tx_report_t; + +typedef struct compat_wifi_rx_report { + char md5_prefix[MD5_PREFIX_LEN]; + wifi_rx_packet_fate fate; + compat_wifi_frame_info_t frame_inf; +} compat_wifi_rx_report_t; + +/* + * Packet logging - internal data + */ + +typedef enum dhd_dbg_pkt_mon_state { + PKT_MON_INVALID = 0, + PKT_MON_ATTACHED, + PKT_MON_STARTING, + PKT_MON_STARTED, + PKT_MON_STOPPING, + PKT_MON_STOPPED, + PKT_MON_DETACHED, + } dhd_dbg_pkt_mon_state_t; + +typedef struct dhd_dbg_pkt_info { + frame_type payload_type; + size_t pkt_len; + uint32 driver_ts; + uint32 firmware_ts; + uint32 pkt_hash; + void *pkt; +} dhd_dbg_pkt_info_t; + +typedef struct compat_dhd_dbg_pkt_info { + frame_type payload_type; + uint32 pkt_len; + uint32 driver_ts; + uint32 firmware_ts; + uint32 pkt_hash; + void *pkt; +} compat_dhd_dbg_pkt_info_t; + +typedef struct dhd_dbg_tx_info +{ + wifi_tx_packet_fate fate; + dhd_dbg_pkt_info_t info; +} dhd_dbg_tx_info_t; + +typedef struct dhd_dbg_rx_info +{ + wifi_rx_packet_fate fate; + dhd_dbg_pkt_info_t info; +} dhd_dbg_rx_info_t; + +typedef struct dhd_dbg_tx_report +{ + dhd_dbg_tx_info_t *tx_pkts; + uint16 pkt_pos; + uint16 status_pos; +} dhd_dbg_tx_report_t; + +typedef struct dhd_dbg_rx_report +{ + dhd_dbg_rx_info_t *rx_pkts; + uint16 pkt_pos; +} dhd_dbg_rx_report_t; + +typedef void (*dbg_pullreq_t)(void *os_priv, const int ring_id); +typedef void (*dbg_urgent_noti_t) (dhd_pub_t *dhdp, const void *data, const uint32 len); +typedef int (*dbg_mon_tx_pkts_t) (dhd_pub_t *dhdp, void *pkt, uint32 pktid); +typedef int (*dbg_mon_tx_status_t) (dhd_pub_t *dhdp, void *pkt, + uint32 pktid, uint16 status); +typedef int (*dbg_mon_rx_pkts_t) (dhd_pub_t *dhdp, void *pkt); + +typedef struct dhd_dbg_pkt_mon +{ + dhd_dbg_tx_report_t *tx_report; + dhd_dbg_rx_report_t *rx_report; + dhd_dbg_pkt_mon_state_t tx_pkt_state; + dhd_dbg_pkt_mon_state_t tx_status_state; + dhd_dbg_pkt_mon_state_t rx_pkt_state; + + /* call backs */ + dbg_mon_tx_pkts_t tx_pkt_mon; + dbg_mon_tx_status_t tx_status_mon; + dbg_mon_rx_pkts_t rx_pkt_mon; +} dhd_dbg_pkt_mon_t; + +typedef struct dhd_dbg { + dhd_dbg_ring_t dbg_rings[DEBUG_RING_ID_MAX]; + void *private; /* os private_data */ + dhd_dbg_pkt_mon_t pkt_mon; + void *pkt_mon_lock; /* spin lock for packet monitoring */ + dbg_pullreq_t pullreq; + dbg_urgent_noti_t urgent_notifier; +} dhd_dbg_t; + +#define PKT_MON_ATTACHED(state) \ + (((state) > PKT_MON_INVALID) && ((state) < PKT_MON_DETACHED)) +#define PKT_MON_DETACHED(state) \ + (((state) == PKT_MON_INVALID) || ((state) == PKT_MON_DETACHED)) +#define PKT_MON_STARTED(state) ((state) == PKT_MON_STARTED) +#define PKT_MON_STOPPED(state) ((state) == PKT_MON_STOPPED) +#define PKT_MON_NOT_OPERATIONAL(state) \ + (((state) != PKT_MON_STARTED) && ((state) != PKT_MON_STOPPED)) +#define PKT_MON_SAFE_TO_FREE(state) \ + (((state) == PKT_MON_STARTING) || ((state) == PKT_MON_STOPPED)) +#define PKT_MON_PKT_FULL(pkt_count) ((pkt_count) >= MAX_FATE_LOG_LEN) +#define PKT_MON_STATUS_FULL(pkt_count, status_count) \ + (((status_count) >= (pkt_count)) || ((status_count) >= MAX_FATE_LOG_LEN)) + +#ifdef DBG_PKT_MON +#define DHD_DBG_PKT_MON_TX(dhdp, pkt, pktid) \ + do { \ + if ((dhdp) && (dhdp)->dbg && (dhdp)->dbg->pkt_mon.tx_pkt_mon && (pkt)) { \ + (dhdp)->dbg->pkt_mon.tx_pkt_mon((dhdp), (pkt), (pktid)); \ + } \ + } while (0); +#define DHD_DBG_PKT_MON_TX_STATUS(dhdp, pkt, pktid, status) \ + do { \ + if ((dhdp) && (dhdp)->dbg && (dhdp)->dbg->pkt_mon.tx_status_mon && (pkt)) { \ + (dhdp)->dbg->pkt_mon.tx_status_mon((dhdp), (pkt), (pktid), (status)); \ + } \ + } while (0); +#define DHD_DBG_PKT_MON_RX(dhdp, pkt) \ + do { \ + if ((dhdp) && (dhdp)->dbg && (dhdp)->dbg->pkt_mon.rx_pkt_mon && (pkt)) { \ + if (ntoh16((pkt)->protocol) != ETHER_TYPE_BRCM) { \ + (dhdp)->dbg->pkt_mon.rx_pkt_mon((dhdp), (pkt)); \ + } \ + } \ + } while (0); + +#define DHD_DBG_PKT_MON_START(dhdp) \ + dhd_os_dbg_start_pkt_monitor((dhdp)); +#define DHD_DBG_PKT_MON_STOP(dhdp) \ + dhd_os_dbg_stop_pkt_monitor((dhdp)); +#else +#define DHD_DBG_PKT_MON_TX(dhdp, pkt, pktid) +#define DHD_DBG_PKT_MON_TX_STATUS(dhdp, pkt, pktid, status) +#define DHD_DBG_PKT_MON_RX(dhdp, pkt) +#define DHD_DBG_PKT_MON_START(dhdp) +#define DHD_DBG_PKT_MON_STOP(dhdp) +#endif /* DBG_PKT_MON */ + +#ifdef DUMP_IOCTL_IOV_LIST +typedef struct dhd_iov_li { + dll_t list; + uint32 cmd; /* command number */ + char buff[100]; /* command name */ +} dhd_iov_li_t; +#endif /* DUMP_IOCTL_IOV_LIST */ + +#define IOV_LIST_MAX_LEN 5 + +#ifdef DHD_DEBUG +typedef struct { + dll_t list; + uint32 id; /* wasted chunk id */ + uint32 handle; /* wasted chunk handle */ + uint32 size; /* wasted chunk size */ +} dhd_dbg_mwli_t; +#endif /* DHD_DEBUG */ + +/* LSB 2 bits of format number to identify the type of event log */ +#define DHD_EVENT_LOG_HDR_MASK 0x3 + +#define DHD_EVENT_LOG_FMT_NUM_OFFSET 2 +#define DHD_EVENT_LOG_FMT_NUM_MASK 0x3FFF +/** + * OW:- one word + * TW:- two word + * NB:- non binary + * BI:- binary + */ +#define DHD_OW_NB_EVENT_LOG_HDR 0 +#define DHD_TW_NB_EVENT_LOG_HDR 1 +#define DHD_BI_EVENT_LOG_HDR 3 +#define DHD_INVALID_EVENT_LOG_HDR 2 + +#define DHD_TW_VALID_TAG_BITS_MASK 0xF +#define DHD_OW_BI_EVENT_FMT_NUM 0x3FFF +#define DHD_TW_BI_EVENT_FMT_NUM 0x3FFE + +#define DHD_TW_EVENT_LOG_TAG_OFFSET 8 + +#define EVENT_TAG_TIMESTAMP_OFFSET 1 +#define EVENT_TAG_TIMESTAMP_EXT_OFFSET 2 + +typedef struct prcd_event_log_hdr { + uint32 tag; /* Event_log entry tag */ + uint32 count; /* Count of 4-byte entries */ + uint32 fmt_num_raw; /* Format number */ + uint32 fmt_num; /* Format number >> 2 */ + uint32 armcycle; /* global ARM CYCLE for TAG */ + uint32 *log_ptr; /* start of payload */ + uint32 payload_len; + /* Extended event log header info + * 0 - legacy, 1 - extended event log header present + */ + bool ext_event_log_hdr; + bool binary_payload; /* 0 - non binary payload, 1 - binary payload */ +} prcd_event_log_hdr_t; /* Processed event log header */ + +/* dhd_dbg functions */ +extern void dhd_dbg_trace_evnt_handler(dhd_pub_t *dhdp, void *event_data, + void *raw_event_ptr, uint datalen); +void dhd_dbg_msgtrace_log_parser(dhd_pub_t *dhdp, void *event_data, + void *raw_event_ptr, uint datalen, bool msgtrace_hdr_present, + uint32 msgtrace_seqnum); + +extern int dhd_dbg_attach(dhd_pub_t *dhdp, dbg_pullreq_t os_pullreq, + dbg_urgent_noti_t os_urgent_notifier, void *os_priv); +extern void dhd_dbg_detach(dhd_pub_t *dhdp); +extern int dhd_dbg_start(dhd_pub_t *dhdp, bool start); +extern int dhd_dbg_set_configuration(dhd_pub_t *dhdp, int ring_id, + int log_level, int flags, uint32 threshold); +extern int dhd_dbg_find_ring_id(dhd_pub_t *dhdp, char *ring_name); +extern void *dhd_dbg_get_priv(dhd_pub_t *dhdp); +extern int dhd_dbg_send_urgent_evt(dhd_pub_t *dhdp, const void *data, const uint32 len); +extern void dhd_dbg_verboselog_printf(dhd_pub_t *dhdp, prcd_event_log_hdr_t *plog_hdr, + void *raw_event_ptr, uint32 *log_ptr, uint32 logset, uint16 block); +int dhd_dbg_pull_from_ring(dhd_pub_t *dhdp, int ring_id, void *data, uint32 buf_len); +int dhd_dbg_pull_single_from_ring(dhd_pub_t *dhdp, int ring_id, void *data, uint32 buf_len, + bool strip_header); +int dhd_dbg_push_to_ring(dhd_pub_t *dhdp, int ring_id, dhd_dbg_ring_entry_t *hdr, + void *data); +int dhd_dbg_get_ring_status(dhd_pub_t *dhdp, int ring_id, + dhd_dbg_ring_status_t *dbg_ring_status); + +#ifdef DBG_PKT_MON +extern int dhd_dbg_attach_pkt_monitor(dhd_pub_t *dhdp, + dbg_mon_tx_pkts_t tx_pkt_mon, + dbg_mon_tx_status_t tx_status_mon, + dbg_mon_rx_pkts_t rx_pkt_mon); +extern int dhd_dbg_start_pkt_monitor(dhd_pub_t *dhdp); +extern int dhd_dbg_monitor_tx_pkts(dhd_pub_t *dhdp, void *pkt, uint32 pktid); +extern int dhd_dbg_monitor_tx_status(dhd_pub_t *dhdp, void *pkt, + uint32 pktid, uint16 status); +extern int dhd_dbg_monitor_rx_pkts(dhd_pub_t *dhdp, void *pkt); +extern int dhd_dbg_stop_pkt_monitor(dhd_pub_t *dhdp); +extern int dhd_dbg_monitor_get_tx_pkts(dhd_pub_t *dhdp, void __user *user_buf, + uint16 req_count, uint16 *resp_count); +extern int dhd_dbg_monitor_get_rx_pkts(dhd_pub_t *dhdp, void __user *user_buf, + uint16 req_count, uint16 *resp_count); +extern int dhd_dbg_detach_pkt_monitor(dhd_pub_t *dhdp); +#endif /* DBG_PKT_MON */ + +extern bool dhd_dbg_process_tx_status(dhd_pub_t *dhdp, void *pkt, + uint32 pktid, uint16 status); + +/* os wrapper function */ +extern int dhd_os_dbg_attach(dhd_pub_t *dhdp); +extern void dhd_os_dbg_detach(dhd_pub_t *dhdp); +extern int dhd_os_dbg_register_callback(int ring_id, + void (*dbg_ring_sub_cb)(void *ctx, const int ring_id, const void *data, + const uint32 len, const dhd_dbg_ring_status_t dbg_ring_status)); +extern int dhd_os_dbg_register_urgent_notifier(dhd_pub_t *dhdp, + void (*urgent_noti)(void *ctx, const void *data, const uint32 len, const uint32 fw_len)); + +extern int dhd_os_start_logging(dhd_pub_t *dhdp, char *ring_name, int log_level, + int flags, int time_intval, int threshold); +extern int dhd_os_reset_logging(dhd_pub_t *dhdp); +extern int dhd_os_suppress_logging(dhd_pub_t *dhdp, bool suppress); + +extern int dhd_os_get_ring_status(dhd_pub_t *dhdp, int ring_id, + dhd_dbg_ring_status_t *dbg_ring_status); +extern int dhd_os_trigger_get_ring_data(dhd_pub_t *dhdp, char *ring_name); +extern int dhd_os_push_push_ring_data(dhd_pub_t *dhdp, int ring_id, void *data, int32 data_len); +extern int dhd_os_dbg_get_feature(dhd_pub_t *dhdp, int32 *features); + +#ifdef DBG_PKT_MON +extern int dhd_os_dbg_attach_pkt_monitor(dhd_pub_t *dhdp); +extern int dhd_os_dbg_start_pkt_monitor(dhd_pub_t *dhdp); +extern int dhd_os_dbg_monitor_tx_pkts(dhd_pub_t *dhdp, void *pkt, + uint32 pktid); +extern int dhd_os_dbg_monitor_tx_status(dhd_pub_t *dhdp, void *pkt, + uint32 pktid, uint16 status); +extern int dhd_os_dbg_monitor_rx_pkts(dhd_pub_t *dhdp, void *pkt); +extern int dhd_os_dbg_stop_pkt_monitor(dhd_pub_t *dhdp); +extern int dhd_os_dbg_monitor_get_tx_pkts(dhd_pub_t *dhdp, + void __user *user_buf, uint16 req_count, uint16 *resp_count); +extern int dhd_os_dbg_monitor_get_rx_pkts(dhd_pub_t *dhdp, + void __user *user_buf, uint16 req_count, uint16 *resp_count); +extern int dhd_os_dbg_detach_pkt_monitor(dhd_pub_t *dhdp); +#endif /* DBG_PKT_MON */ + +#ifdef DUMP_IOCTL_IOV_LIST +extern void dhd_iov_li_append(dhd_pub_t *dhd, dll_t *list_head, dll_t *node); +extern void dhd_iov_li_print(dll_t *list_head); +extern void dhd_iov_li_delete(dhd_pub_t *dhd, dll_t *list_head); +#endif /* DUMP_IOCTL_IOV_LIST */ + +#ifdef DHD_DEBUG +extern void dhd_mw_list_delete(dhd_pub_t *dhd, dll_t *list_head); +#endif /* DHD_DEBUG */ +#endif /* _dhd_debug_h_ */ diff --git a/bcmdhd.100.10.315.x/dhd_debug_linux.c b/bcmdhd.100.10.315.x/dhd_debug_linux.c new file mode 100644 index 0000000..386fe8a --- /dev/null +++ b/bcmdhd.100.10.315.x/dhd_debug_linux.c @@ -0,0 +1,509 @@ +/* + * DHD debugability Linux os layer + * + * <> + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: dhd_debug_linux.c 769272 2018-06-25 09:23:27Z $ + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +typedef void (*dbg_ring_send_sub_t)(void *ctx, const int ring_id, const void *data, + const uint32 len, const dhd_dbg_ring_status_t ring_status); +typedef void (*dbg_urgent_noti_sub_t)(void *ctx, const void *data, + const uint32 len, const uint32 fw_len); + +static dbg_ring_send_sub_t ring_send_sub_cb[DEBUG_RING_ID_MAX]; +static dbg_urgent_noti_sub_t urgent_noti_sub_cb; +typedef struct dhd_dbg_os_ring_info { + dhd_pub_t *dhdp; + int ring_id; + int log_level; + unsigned long interval; + struct delayed_work work; + uint64 tsoffset; +} linux_dbgring_info_t; + +struct log_level_table dhd_event_map[] = { + {1, WIFI_EVENT_DRIVER_EAPOL_FRAME_TRANSMIT_REQUESTED, "DRIVER EAPOL TX REQ"}, + {1, WIFI_EVENT_DRIVER_EAPOL_FRAME_RECEIVED, "DRIVER EAPOL RX"}, + {2, WIFI_EVENT_DRIVER_SCAN_REQUESTED, "SCAN_REQUESTED"}, + {2, WIFI_EVENT_DRIVER_SCAN_COMPLETE, "SCAN COMPELETE"}, + {3, WIFI_EVENT_DRIVER_SCAN_RESULT_FOUND, "SCAN RESULT FOUND"}, + {2, WIFI_EVENT_DRIVER_PNO_ADD, "PNO ADD"}, + {2, WIFI_EVENT_DRIVER_PNO_REMOVE, "PNO REMOVE"}, + {2, WIFI_EVENT_DRIVER_PNO_NETWORK_FOUND, "PNO NETWORK FOUND"}, + {2, WIFI_EVENT_DRIVER_PNO_SCAN_REQUESTED, "PNO SCAN_REQUESTED"}, + {1, WIFI_EVENT_DRIVER_PNO_SCAN_RESULT_FOUND, "PNO SCAN RESULT FOUND"}, + {1, WIFI_EVENT_DRIVER_PNO_SCAN_COMPLETE, "PNO SCAN COMPELETE"} +}; + +static void +debug_data_send(dhd_pub_t *dhdp, int ring_id, const void *data, const uint32 len, + const dhd_dbg_ring_status_t ring_status) +{ + struct net_device *ndev; + dbg_ring_send_sub_t ring_sub_send; + ndev = dhd_linux_get_primary_netdev(dhdp); + if (!ndev) + return; + if (!VALID_RING(ring_id)) + return; + if (ring_send_sub_cb[ring_id]) { + ring_sub_send = ring_send_sub_cb[ring_id]; + ring_sub_send(ndev, ring_id, data, len, ring_status); + } +} + +static void +dhd_os_dbg_urgent_notifier(dhd_pub_t *dhdp, const void *data, const uint32 len) +{ + struct net_device *ndev; + ndev = dhd_linux_get_primary_netdev(dhdp); + if (!ndev) + return; + if (urgent_noti_sub_cb) { + urgent_noti_sub_cb(ndev, data, len, dhdp->soc_ram_length); + } +} + +static void +dbg_ring_poll_worker(struct work_struct *work) +{ + struct delayed_work *d_work = to_delayed_work(work); + bool sched = TRUE; + dhd_dbg_ring_t *ring; +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif // endif + linux_dbgring_info_t *ring_info = + container_of(d_work, linux_dbgring_info_t, work); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif // endif + dhd_pub_t *dhdp = ring_info->dhdp; + int ringid = ring_info->ring_id; + dhd_dbg_ring_status_t ring_status; + void *buf; + dhd_dbg_ring_entry_t *hdr; + uint32 buflen, rlen; + unsigned long flags; + + ring = &dhdp->dbg->dbg_rings[ringid]; + DHD_DBG_RING_LOCK(ring->lock, flags); + dhd_dbg_get_ring_status(dhdp, ringid, &ring_status); + + if (ring->wp > ring->rp) { + buflen = ring->wp - ring->rp; + } else if (ring->wp < ring->rp) { + buflen = ring->ring_size - ring->rp + ring->wp; + } else { + goto exit; + } + + if (buflen > ring->ring_size) { + goto exit; + } + + buf = MALLOCZ(dhdp->osh, buflen); + if (!buf) { + DHD_ERROR(("%s failed to allocate read buf\n", __FUNCTION__)); + sched = FALSE; + goto exit; + } + + rlen = dhd_dbg_pull_from_ring(dhdp, ringid, buf, buflen); + + if (!ring->sched_pull) { + ring->sched_pull = TRUE; + } + + hdr = (dhd_dbg_ring_entry_t *)buf; + while (rlen > 0) { + ring_status.read_bytes += ENTRY_LENGTH(hdr); + /* offset fw ts to host ts */ + hdr->timestamp += ring_info->tsoffset; + debug_data_send(dhdp, ringid, hdr, ENTRY_LENGTH(hdr), + ring_status); + rlen -= ENTRY_LENGTH(hdr); + hdr = (dhd_dbg_ring_entry_t *)((char *)hdr + ENTRY_LENGTH(hdr)); + } + MFREE(dhdp->osh, buf, buflen); + +exit: + if (sched) { + /* retrigger the work at same interval */ + if ((ring_status.written_bytes == ring_status.read_bytes) && + (ring_info->interval)) { + schedule_delayed_work(d_work, ring_info->interval); + } + } + + DHD_DBG_RING_UNLOCK(ring->lock, flags); + + return; +} + +int +dhd_os_dbg_register_callback(int ring_id, dbg_ring_send_sub_t callback) +{ + if (!VALID_RING(ring_id)) + return BCME_RANGE; + + ring_send_sub_cb[ring_id] = callback; + return BCME_OK; +} + +int +dhd_os_dbg_register_urgent_notifier(dhd_pub_t *dhdp, dbg_urgent_noti_sub_t urgent_noti_sub) +{ + if (!dhdp || !urgent_noti_sub) + return BCME_BADARG; + urgent_noti_sub_cb = urgent_noti_sub; + + return BCME_OK; +} + +int +dhd_os_start_logging(dhd_pub_t *dhdp, char *ring_name, int log_level, + int flags, int time_intval, int threshold) +{ + int ret = BCME_OK; + int ring_id; + linux_dbgring_info_t *os_priv, *ring_info; + + ring_id = dhd_dbg_find_ring_id(dhdp, ring_name); + if (!VALID_RING(ring_id)) + return BCME_UNSUPPORTED; + + DHD_DBGIF(("%s , log_level : %d, time_intval : %d, threshod %d Bytes\n", + __FUNCTION__, log_level, time_intval, threshold)); + + /* change the configuration */ + ret = dhd_dbg_set_configuration(dhdp, ring_id, log_level, flags, threshold); + if (ret) { + DHD_ERROR(("dhd_set_configuration is failed : %d\n", ret)); + return ret; + } + + os_priv = dhd_dbg_get_priv(dhdp); + if (!os_priv) + return BCME_ERROR; + ring_info = &os_priv[ring_id]; + ring_info->log_level = log_level; + + if (time_intval == 0 || log_level == 0) { + ring_info->interval = 0; + cancel_delayed_work_sync(&ring_info->work); + } else { + ring_info->interval = msecs_to_jiffies(time_intval * MSEC_PER_SEC); + cancel_delayed_work_sync(&ring_info->work); + schedule_delayed_work(&ring_info->work, ring_info->interval); + } + + return ret; +} + +int +dhd_os_reset_logging(dhd_pub_t *dhdp) +{ + int ret = BCME_OK; + int ring_id; + linux_dbgring_info_t *os_priv, *ring_info; + + os_priv = dhd_dbg_get_priv(dhdp); + if (!os_priv) + return BCME_ERROR; + + /* Stop all rings */ + for (ring_id = DEBUG_RING_ID_INVALID + 1; ring_id < DEBUG_RING_ID_MAX; ring_id++) { + DHD_DBGIF(("%s: Stop ring buffer %d\n", __FUNCTION__, ring_id)); + + ring_info = &os_priv[ring_id]; + /* cancel any pending work */ + cancel_delayed_work_sync(&ring_info->work); + /* log level zero makes stop logging on that ring */ + ring_info->log_level = 0; + ring_info->interval = 0; + /* change the configuration */ + ret = dhd_dbg_set_configuration(dhdp, ring_id, 0, 0, 0); + if (ret) { + DHD_ERROR(("dhd_set_configuration is failed : %d\n", ret)); + return ret; + } + } + return ret; +} + +#define SUPPRESS_LOG_LEVEL 1 +int +dhd_os_suppress_logging(dhd_pub_t *dhdp, bool suppress) +{ + int ret = BCME_OK; + int max_log_level; + int enable = (suppress) ? 0 : 1; + linux_dbgring_info_t *os_priv; + + os_priv = dhd_dbg_get_priv(dhdp); + if (!os_priv) + return BCME_ERROR; + + max_log_level = os_priv[FW_VERBOSE_RING_ID].log_level; + + if (max_log_level == SUPPRESS_LOG_LEVEL) { + /* suppress the logging in FW not to wake up host while device in suspend mode */ + ret = dhd_iovar(dhdp, 0, "logtrace", (char *)&enable, sizeof(enable), NULL, 0, + TRUE); + if (ret < 0 && (ret != BCME_UNSUPPORTED)) { + DHD_ERROR(("logtrace is failed : %d\n", ret)); + } + } + + return ret; +} + +int +dhd_os_get_ring_status(dhd_pub_t *dhdp, int ring_id, dhd_dbg_ring_status_t *dbg_ring_status) +{ + return dhd_dbg_get_ring_status(dhdp, ring_id, dbg_ring_status); +} + +int +dhd_os_trigger_get_ring_data(dhd_pub_t *dhdp, char *ring_name) +{ + int ret = BCME_OK; + int ring_id; + linux_dbgring_info_t *os_priv, *ring_info; + ring_id = dhd_dbg_find_ring_id(dhdp, ring_name); + if (!VALID_RING(ring_id)) + return BCME_UNSUPPORTED; + os_priv = dhd_dbg_get_priv(dhdp); + if (os_priv) { + ring_info = &os_priv[ring_id]; + if (ring_info->interval) { + cancel_delayed_work_sync(&ring_info->work); + } + schedule_delayed_work(&ring_info->work, 0); + } else { + DHD_ERROR(("%s : os_priv is NULL\n", __FUNCTION__)); + ret = BCME_ERROR; + } + return ret; +} + +int +dhd_os_push_push_ring_data(dhd_pub_t *dhdp, int ring_id, void *data, int32 data_len) +{ + int ret = BCME_OK, i; + dhd_dbg_ring_entry_t msg_hdr; + log_conn_event_t *event_data = (log_conn_event_t *)data; + linux_dbgring_info_t *os_priv, *ring_info = NULL; + + if (!VALID_RING(ring_id)) + return BCME_UNSUPPORTED; + os_priv = dhd_dbg_get_priv(dhdp); + + if (os_priv) { + ring_info = &os_priv[ring_id]; + } else + return BCME_NORESOURCE; + + memset(&msg_hdr, 0, sizeof(dhd_dbg_ring_entry_t)); + + if (ring_id == DHD_EVENT_RING_ID) { + msg_hdr.type = DBG_RING_ENTRY_EVENT_TYPE; + msg_hdr.flags |= DBG_RING_ENTRY_FLAGS_HAS_TIMESTAMP; + msg_hdr.flags |= DBG_RING_ENTRY_FLAGS_HAS_BINARY; + msg_hdr.timestamp = local_clock(); + /* convert to ms */ + msg_hdr.timestamp = DIV_U64_BY_U32(msg_hdr.timestamp, NSEC_PER_MSEC); + msg_hdr.len = data_len; + /* filter the event for higher log level with current log level */ + for (i = 0; i < ARRAYSIZE(dhd_event_map); i++) { + if ((dhd_event_map[i].tag == event_data->event) && + dhd_event_map[i].log_level > ring_info->log_level) { + return ret; + } + } + } + ret = dhd_dbg_push_to_ring(dhdp, ring_id, &msg_hdr, event_data); + if (ret) { + DHD_ERROR(("%s : failed to push data into the ring (%d) with ret(%d)\n", + __FUNCTION__, ring_id, ret)); + } + + return ret; +} + +#ifdef DBG_PKT_MON +int +dhd_os_dbg_attach_pkt_monitor(dhd_pub_t *dhdp) +{ + return dhd_dbg_attach_pkt_monitor(dhdp, dhd_os_dbg_monitor_tx_pkts, + dhd_os_dbg_monitor_tx_status, dhd_os_dbg_monitor_rx_pkts); +} + +int +dhd_os_dbg_start_pkt_monitor(dhd_pub_t *dhdp) +{ + return dhd_dbg_start_pkt_monitor(dhdp); +} + +int +dhd_os_dbg_monitor_tx_pkts(dhd_pub_t *dhdp, void *pkt, uint32 pktid) +{ + return dhd_dbg_monitor_tx_pkts(dhdp, pkt, pktid); +} + +int +dhd_os_dbg_monitor_tx_status(dhd_pub_t *dhdp, void *pkt, uint32 pktid, + uint16 status) +{ + return dhd_dbg_monitor_tx_status(dhdp, pkt, pktid, status); +} + +int +dhd_os_dbg_monitor_rx_pkts(dhd_pub_t *dhdp, void *pkt) +{ + return dhd_dbg_monitor_rx_pkts(dhdp, pkt); +} + +int +dhd_os_dbg_stop_pkt_monitor(dhd_pub_t *dhdp) +{ + return dhd_dbg_stop_pkt_monitor(dhdp); +} + +int +dhd_os_dbg_monitor_get_tx_pkts(dhd_pub_t *dhdp, void __user *user_buf, + uint16 req_count, uint16 *resp_count) +{ + return dhd_dbg_monitor_get_tx_pkts(dhdp, user_buf, req_count, resp_count); +} + +int +dhd_os_dbg_monitor_get_rx_pkts(dhd_pub_t *dhdp, void __user *user_buf, + uint16 req_count, uint16 *resp_count) +{ + return dhd_dbg_monitor_get_rx_pkts(dhdp, user_buf, req_count, resp_count); +} + +int +dhd_os_dbg_detach_pkt_monitor(dhd_pub_t *dhdp) +{ + return dhd_dbg_detach_pkt_monitor(dhdp); +} +#endif /* DBG_PKT_MON */ + +int +dhd_os_dbg_get_feature(dhd_pub_t *dhdp, int32 *features) +{ + int ret = BCME_OK; + *features = 0; +#ifdef DEBUGABILITY + *features |= DBG_MEMORY_DUMP_SUPPORTED; + if (FW_SUPPORTED(dhdp, logtrace)) { + *features |= DBG_CONNECT_EVENT_SUPPORTED; + *features |= DBG_VERBOSE_LOG_SUPPORTED; + } + if (FW_SUPPORTED(dhdp, hchk)) { + *features |= DBG_HEALTH_CHECK_SUPPORTED; + } +#ifdef DBG_PKT_MON + if (FW_SUPPORTED(dhdp, d11status)) { + *features |= DBG_PACKET_FATE_SUPPORTED; + } +#endif /* DBG_PKT_MON */ +#endif /* DEBUGABILITY */ + return ret; +} + +static void +dhd_os_dbg_pullreq(void *os_priv, int ring_id) +{ + linux_dbgring_info_t *ring_info; + + ring_info = &((linux_dbgring_info_t *)os_priv)[ring_id]; + cancel_delayed_work(&ring_info->work); + schedule_delayed_work(&ring_info->work, 0); +} + +int +dhd_os_dbg_attach(dhd_pub_t *dhdp) +{ + int ret = BCME_OK; + linux_dbgring_info_t *os_priv, *ring_info; + int ring_id; + + /* os_dbg data */ + os_priv = MALLOCZ(dhdp->osh, sizeof(*os_priv) * DEBUG_RING_ID_MAX); + if (!os_priv) + return BCME_NOMEM; + + for (ring_id = DEBUG_RING_ID_INVALID + 1; ring_id < DEBUG_RING_ID_MAX; + ring_id++) { + ring_info = &os_priv[ring_id]; + INIT_DELAYED_WORK(&ring_info->work, dbg_ring_poll_worker); + ring_info->dhdp = dhdp; + ring_info->ring_id = ring_id; + } + + ret = dhd_dbg_attach(dhdp, dhd_os_dbg_pullreq, dhd_os_dbg_urgent_notifier, os_priv); + if (ret) + MFREE(dhdp->osh, os_priv, sizeof(*os_priv) * DEBUG_RING_ID_MAX); + + return ret; +} + +void +dhd_os_dbg_detach(dhd_pub_t *dhdp) +{ + linux_dbgring_info_t *os_priv, *ring_info; + int ring_id; + /* free os_dbg data */ + os_priv = dhd_dbg_get_priv(dhdp); + if (!os_priv) + return; + /* abort pending any job */ + for (ring_id = DEBUG_RING_ID_INVALID + 1; ring_id < DEBUG_RING_ID_MAX; ring_id++) { + ring_info = &os_priv[ring_id]; + if (ring_info->interval) { + ring_info->interval = 0; + cancel_delayed_work_sync(&ring_info->work); + } + } + MFREE(dhdp->osh, os_priv, sizeof(*os_priv) * DEBUG_RING_ID_MAX); + + return dhd_dbg_detach(dhdp); +} diff --git a/bcmdhd.100.10.315.x/dhd_flowring.c b/bcmdhd.100.10.315.x/dhd_flowring.c new file mode 100644 index 0000000..ce9a1cf --- /dev/null +++ b/bcmdhd.100.10.315.x/dhd_flowring.c @@ -0,0 +1,1157 @@ +/* + * @file Broadcom Dongle Host Driver (DHD), Flow ring specific code at top level + * + * Flow rings are transmit traffic (=propagating towards antenna) related entities + * + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_flowring.c 765578 2018-06-04 17:10:24Z $ + */ + +#include +#include +#include +#include + +#include +#include +#include + +#include + +#include +#include +#include +#include +#include <802.1d.h> +#include +#include +#include + +static INLINE int dhd_flow_queue_throttle(flow_queue_t *queue); + +static INLINE uint16 dhd_flowid_find(dhd_pub_t *dhdp, uint8 ifindex, + uint8 prio, char *sa, char *da); + +static INLINE uint16 dhd_flowid_alloc(dhd_pub_t *dhdp, uint8 ifindex, + uint8 prio, char *sa, char *da); + +static INLINE int dhd_flowid_lookup(dhd_pub_t *dhdp, uint8 ifindex, + uint8 prio, char *sa, char *da, uint16 *flowid); +int BCMFASTPATH dhd_flow_queue_overflow(flow_queue_t *queue, void *pkt); + +#define FLOW_QUEUE_PKT_NEXT(p) PKTLINK(p) +#define FLOW_QUEUE_PKT_SETNEXT(p, x) PKTSETLINK((p), (x)) + +#if defined(EAPOL_PKT_PRIO) || defined(DHD_LOSSLESS_ROAMING) +const uint8 prio2ac[8] = { 0, 1, 1, 0, 2, 2, 3, 7 }; +#else +const uint8 prio2ac[8] = { 0, 1, 1, 0, 2, 2, 3, 3 }; +#endif /* EAPOL_PKT_PRIO || DHD_LOSSLESS_ROAMING */ +const uint8 prio2tid[8] = { 0, 1, 2, 3, 4, 5, 6, 7 }; + +/** Queue overflow throttle. Return value: TRUE if throttle needs to be applied */ +static INLINE int +dhd_flow_queue_throttle(flow_queue_t *queue) +{ + return DHD_FLOW_QUEUE_FULL(queue); +} + +int BCMFASTPATH +dhd_flow_queue_overflow(flow_queue_t *queue, void *pkt) +{ + return BCME_NORESOURCE; +} + +/** Returns flow ring given a flowid */ +flow_ring_node_t * +dhd_flow_ring_node(dhd_pub_t *dhdp, uint16 flowid) +{ + flow_ring_node_t * flow_ring_node; + + ASSERT(dhdp != (dhd_pub_t*)NULL); + ASSERT(flowid < dhdp->num_flow_rings); + if (flowid >= dhdp->num_flow_rings) { + return NULL; + } + + flow_ring_node = &(((flow_ring_node_t*)(dhdp->flow_ring_table))[flowid]); + + ASSERT(flow_ring_node->flowid == flowid); + return flow_ring_node; +} + +/** Returns 'backup' queue given a flowid */ +flow_queue_t * +dhd_flow_queue(dhd_pub_t *dhdp, uint16 flowid) +{ + flow_ring_node_t * flow_ring_node = NULL; + + flow_ring_node = dhd_flow_ring_node(dhdp, flowid); + if (flow_ring_node) + return &flow_ring_node->queue; + else + return NULL; +} + +/* Flow ring's queue management functions */ + +/** Reinitialize a flow ring's queue. */ +void +dhd_flow_queue_reinit(dhd_pub_t *dhdp, flow_queue_t *queue, int max) +{ + ASSERT((queue != NULL) && (max > 0)); + + queue->head = queue->tail = NULL; + queue->len = 0; + + /* Set queue's threshold and queue's parent cummulative length counter */ + ASSERT(max > 1); + DHD_FLOW_QUEUE_SET_MAX(queue, max); + DHD_FLOW_QUEUE_SET_THRESHOLD(queue, max); + DHD_FLOW_QUEUE_SET_CLEN(queue, &dhdp->cumm_ctr); + DHD_FLOW_QUEUE_SET_L2CLEN(queue, &dhdp->l2cumm_ctr); + + queue->failures = 0U; + queue->cb = &dhd_flow_queue_overflow; +} + +/** Initialize a flow ring's queue, called on driver initialization. */ +void +dhd_flow_queue_init(dhd_pub_t *dhdp, flow_queue_t *queue, int max) +{ + ASSERT((queue != NULL) && (max > 0)); + + dll_init(&queue->list); + dhd_flow_queue_reinit(dhdp, queue, max); +} + +/** Register an enqueue overflow callback handler */ +void +dhd_flow_queue_register(flow_queue_t *queue, flow_queue_cb_t cb) +{ + ASSERT(queue != NULL); + queue->cb = cb; +} + +/** + * Enqueue an 802.3 packet at the back of a flow ring's queue. From there, it will travel later on + * to the flow ring itself. + */ +int BCMFASTPATH +dhd_flow_queue_enqueue(dhd_pub_t *dhdp, flow_queue_t *queue, void *pkt) +{ + int ret = BCME_OK; + + ASSERT(queue != NULL); + + if (dhd_flow_queue_throttle(queue)) { + queue->failures++; + ret = (*queue->cb)(queue, pkt); + goto done; + } + + if (queue->head) { + FLOW_QUEUE_PKT_SETNEXT(queue->tail, pkt); + } else { + queue->head = pkt; + } + + FLOW_QUEUE_PKT_SETNEXT(pkt, NULL); + + queue->tail = pkt; /* at tail */ + + queue->len++; + /* increment parent's cummulative length */ + DHD_CUMM_CTR_INCR(DHD_FLOW_QUEUE_CLEN_PTR(queue)); + /* increment grandparent's cummulative length */ + DHD_CUMM_CTR_INCR(DHD_FLOW_QUEUE_L2CLEN_PTR(queue)); + +done: + return ret; +} + +/** Dequeue an 802.3 packet from a flow ring's queue, from head (FIFO) */ +void * BCMFASTPATH +dhd_flow_queue_dequeue(dhd_pub_t *dhdp, flow_queue_t *queue) +{ + void * pkt; + + ASSERT(queue != NULL); + + pkt = queue->head; /* from head */ + + if (pkt == NULL) { + ASSERT((queue->len == 0) && (queue->tail == NULL)); + goto done; + } + + queue->head = FLOW_QUEUE_PKT_NEXT(pkt); + if (queue->head == NULL) + queue->tail = NULL; + + queue->len--; + /* decrement parent's cummulative length */ + DHD_CUMM_CTR_DECR(DHD_FLOW_QUEUE_CLEN_PTR(queue)); + /* decrement grandparent's cummulative length */ + DHD_CUMM_CTR_DECR(DHD_FLOW_QUEUE_L2CLEN_PTR(queue)); + + FLOW_QUEUE_PKT_SETNEXT(pkt, NULL); /* dettach packet from queue */ + +done: + return pkt; +} + +/** Reinsert a dequeued 802.3 packet back at the head */ +void BCMFASTPATH +dhd_flow_queue_reinsert(dhd_pub_t *dhdp, flow_queue_t *queue, void *pkt) +{ + if (queue->head == NULL) { + queue->tail = pkt; + } + + FLOW_QUEUE_PKT_SETNEXT(pkt, queue->head); + queue->head = pkt; + queue->len++; + /* increment parent's cummulative length */ + DHD_CUMM_CTR_INCR(DHD_FLOW_QUEUE_CLEN_PTR(queue)); + /* increment grandparent's cummulative length */ + DHD_CUMM_CTR_INCR(DHD_FLOW_QUEUE_L2CLEN_PTR(queue)); +} + +/** Fetch the backup queue for a flowring, and assign flow control thresholds */ +void +dhd_flow_ring_config_thresholds(dhd_pub_t *dhdp, uint16 flowid, + int queue_budget, int cumm_threshold, void *cumm_ctr, + int l2cumm_threshold, void *l2cumm_ctr) +{ + flow_queue_t * queue = NULL; + + ASSERT(dhdp != (dhd_pub_t*)NULL); + ASSERT(queue_budget > 1); + ASSERT(cumm_threshold > 1); + ASSERT(cumm_ctr != (void*)NULL); + ASSERT(l2cumm_threshold > 1); + ASSERT(l2cumm_ctr != (void*)NULL); + + queue = dhd_flow_queue(dhdp, flowid); + if (queue) { + DHD_FLOW_QUEUE_SET_MAX(queue, queue_budget); /* Max queue length */ + + /* Set the queue's parent threshold and cummulative counter */ + DHD_FLOW_QUEUE_SET_THRESHOLD(queue, cumm_threshold); + DHD_FLOW_QUEUE_SET_CLEN(queue, cumm_ctr); + + /* Set the queue's grandparent threshold and cummulative counter */ + DHD_FLOW_QUEUE_SET_L2THRESHOLD(queue, l2cumm_threshold); + DHD_FLOW_QUEUE_SET_L2CLEN(queue, l2cumm_ctr); + } +} + +/** Initializes data structures of multiple flow rings */ +int +dhd_flow_rings_init(dhd_pub_t *dhdp, uint32 num_flow_rings) +{ + uint32 idx; + uint32 flow_ring_table_sz; + uint32 if_flow_lkup_sz = 0; + void * flowid_allocator; + flow_ring_table_t *flow_ring_table = NULL; + if_flow_lkup_t *if_flow_lkup = NULL; + void *lock = NULL; + void *list_lock = NULL; + unsigned long flags; + + DHD_INFO(("%s\n", __FUNCTION__)); + + /* Construct a 16bit flowid allocator */ + flowid_allocator = id16_map_init(dhdp->osh, + num_flow_rings - dhdp->bus->max_cmn_rings, FLOWID_RESERVED); + if (flowid_allocator == NULL) { + DHD_ERROR(("%s: flowid allocator init failure\n", __FUNCTION__)); + return BCME_NOMEM; + } + + /* Allocate a flow ring table, comprising of requested number of rings */ + flow_ring_table_sz = (num_flow_rings * sizeof(flow_ring_node_t)); + flow_ring_table = (flow_ring_table_t *)MALLOCZ(dhdp->osh, flow_ring_table_sz); + if (flow_ring_table == NULL) { + DHD_ERROR(("%s: flow ring table alloc failure\n", __FUNCTION__)); + goto fail; + } + + /* Initialize flow ring table state */ + DHD_CUMM_CTR_INIT(&dhdp->cumm_ctr); + DHD_CUMM_CTR_INIT(&dhdp->l2cumm_ctr); + bzero((uchar *)flow_ring_table, flow_ring_table_sz); + for (idx = 0; idx < num_flow_rings; idx++) { + flow_ring_table[idx].status = FLOW_RING_STATUS_CLOSED; + flow_ring_table[idx].flowid = (uint16)idx; + flow_ring_table[idx].lock = dhd_os_spin_lock_init(dhdp->osh); +#ifdef IDLE_TX_FLOW_MGMT + flow_ring_table[idx].last_active_ts = OSL_SYSUPTIME(); +#endif /* IDLE_TX_FLOW_MGMT */ + if (flow_ring_table[idx].lock == NULL) { + DHD_ERROR(("%s: Failed to init spinlock for queue!\n", __FUNCTION__)); + goto fail; + } + + dll_init(&flow_ring_table[idx].list); + + /* Initialize the per flow ring backup queue */ + dhd_flow_queue_init(dhdp, &flow_ring_table[idx].queue, + FLOW_RING_QUEUE_THRESHOLD); + } + + /* Allocate per interface hash table (for fast lookup from interface to flow ring) */ + if_flow_lkup_sz = sizeof(if_flow_lkup_t) * DHD_MAX_IFS; + if_flow_lkup = (if_flow_lkup_t *)DHD_OS_PREALLOC(dhdp, + DHD_PREALLOC_IF_FLOW_LKUP, if_flow_lkup_sz); + if (if_flow_lkup == NULL) { + DHD_ERROR(("%s: if flow lkup alloc failure\n", __FUNCTION__)); + goto fail; + } + + /* Initialize per interface hash table */ + for (idx = 0; idx < DHD_MAX_IFS; idx++) { + int hash_ix; + if_flow_lkup[idx].status = 0; + if_flow_lkup[idx].role = 0; + for (hash_ix = 0; hash_ix < DHD_FLOWRING_HASH_SIZE; hash_ix++) + if_flow_lkup[idx].fl_hash[hash_ix] = NULL; + } + + lock = dhd_os_spin_lock_init(dhdp->osh); + if (lock == NULL) + goto fail; + + list_lock = dhd_os_spin_lock_init(dhdp->osh); + if (list_lock == NULL) + goto lock_fail; + + dhdp->flow_prio_map_type = DHD_FLOW_PRIO_AC_MAP; + bcopy(prio2ac, dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO); +#ifdef DHD_LOSSLESS_ROAMING + dhdp->dequeue_prec_map = ALLPRIO; +#endif // endif + /* Now populate into dhd pub */ + DHD_FLOWID_LOCK(lock, flags); + dhdp->num_flow_rings = num_flow_rings; + dhdp->flowid_allocator = (void *)flowid_allocator; + dhdp->flow_ring_table = (void *)flow_ring_table; + dhdp->if_flow_lkup = (void *)if_flow_lkup; + dhdp->flowid_lock = lock; + dhdp->flow_rings_inited = TRUE; + dhdp->flowring_list_lock = list_lock; + DHD_FLOWID_UNLOCK(lock, flags); + + DHD_INFO(("%s done\n", __FUNCTION__)); + return BCME_OK; + +lock_fail: + /* deinit the spinlock */ + dhd_os_spin_lock_deinit(dhdp->osh, lock); + +fail: + /* Destruct the per interface flow lkup table */ + if (if_flow_lkup != NULL) { + DHD_OS_PREFREE(dhdp, if_flow_lkup, if_flow_lkup_sz); + } + if (flow_ring_table != NULL) { + for (idx = 0; idx < num_flow_rings; idx++) { + if (flow_ring_table[idx].lock != NULL) + dhd_os_spin_lock_deinit(dhdp->osh, flow_ring_table[idx].lock); + } + MFREE(dhdp->osh, flow_ring_table, flow_ring_table_sz); + } + id16_map_fini(dhdp->osh, flowid_allocator); + + return BCME_NOMEM; +} + +/** Deinit Flow Ring specific data structures */ +void dhd_flow_rings_deinit(dhd_pub_t *dhdp) +{ + uint16 idx; + uint32 flow_ring_table_sz; + uint32 if_flow_lkup_sz; + flow_ring_table_t *flow_ring_table; + unsigned long flags; + void *lock; + + DHD_INFO(("dhd_flow_rings_deinit\n")); + + if (!(dhdp->flow_rings_inited)) { + DHD_ERROR(("dhd_flow_rings not initialized!\n")); + return; + } + + if (dhdp->flow_ring_table != NULL) { + + ASSERT(dhdp->num_flow_rings > 0); + + DHD_FLOWID_LOCK(dhdp->flowid_lock, flags); + flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table; + dhdp->flow_ring_table = NULL; + DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags); + for (idx = 0; idx < dhdp->num_flow_rings; idx++) { + if (flow_ring_table[idx].active) { + dhd_bus_clean_flow_ring(dhdp->bus, &flow_ring_table[idx]); + } + ASSERT(DHD_FLOW_QUEUE_EMPTY(&flow_ring_table[idx].queue)); + + /* Deinit flow ring queue locks before destroying flow ring table */ + if (flow_ring_table[idx].lock != NULL) { + dhd_os_spin_lock_deinit(dhdp->osh, flow_ring_table[idx].lock); + } + flow_ring_table[idx].lock = NULL; + + } + + /* Destruct the flow ring table */ + flow_ring_table_sz = dhdp->num_flow_rings * sizeof(flow_ring_table_t); + MFREE(dhdp->osh, flow_ring_table, flow_ring_table_sz); + } + + DHD_FLOWID_LOCK(dhdp->flowid_lock, flags); + + /* Destruct the per interface flow lkup table */ + if (dhdp->if_flow_lkup != NULL) { + if_flow_lkup_sz = sizeof(if_flow_lkup_t) * DHD_MAX_IFS; + bzero((uchar *)dhdp->if_flow_lkup, if_flow_lkup_sz); + DHD_OS_PREFREE(dhdp, dhdp->if_flow_lkup, if_flow_lkup_sz); + dhdp->if_flow_lkup = NULL; + } + + /* Destruct the flowid allocator */ + if (dhdp->flowid_allocator != NULL) + dhdp->flowid_allocator = id16_map_fini(dhdp->osh, dhdp->flowid_allocator); + + dhdp->num_flow_rings = 0U; + bzero(dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO); + + lock = dhdp->flowid_lock; + dhdp->flowid_lock = NULL; + + if (lock) { + DHD_FLOWID_UNLOCK(lock, flags); + dhd_os_spin_lock_deinit(dhdp->osh, lock); + } + + dhd_os_spin_lock_deinit(dhdp->osh, dhdp->flowring_list_lock); + dhdp->flowring_list_lock = NULL; + + ASSERT(dhdp->if_flow_lkup == NULL); + ASSERT(dhdp->flowid_allocator == NULL); + ASSERT(dhdp->flow_ring_table == NULL); + dhdp->flow_rings_inited = FALSE; +} + +/** Uses hash table to quickly map from ifindex to a flow ring 'role' (STA/AP) */ +uint8 +dhd_flow_rings_ifindex2role(dhd_pub_t *dhdp, uint8 ifindex) +{ + if_flow_lkup_t *if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup; + ASSERT(if_flow_lkup); + return if_flow_lkup[ifindex].role; +} + +#ifdef WLTDLS +bool is_tdls_destination(dhd_pub_t *dhdp, uint8 *da) +{ + unsigned long flags; + tdls_peer_node_t *cur = NULL; + + DHD_TDLS_LOCK(&dhdp->tdls_lock, flags); + cur = dhdp->peer_tbl.node; + + while (cur != NULL) { + if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) { + DHD_TDLS_UNLOCK(&dhdp->tdls_lock, flags); + return TRUE; + } + cur = cur->next; + } + DHD_TDLS_UNLOCK(&dhdp->tdls_lock, flags); + return FALSE; +} +#endif /* WLTDLS */ + +/** Uses hash table to quickly map from ifindex+prio+da to a flow ring id */ +static INLINE uint16 +dhd_flowid_find(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, char *sa, char *da) +{ + int hash; + bool ismcast = FALSE; + flow_hash_info_t *cur; + if_flow_lkup_t *if_flow_lkup; + unsigned long flags; + + ASSERT(ifindex < DHD_MAX_IFS); + if (ifindex >= DHD_MAX_IFS) + return FLOWID_INVALID; + + DHD_FLOWID_LOCK(dhdp->flowid_lock, flags); + if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup; + + ASSERT(if_flow_lkup); + + if ((if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_STA) || + (if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_WDS)) { +#ifdef WLTDLS + if (dhdp->peer_tbl.tdls_peer_count && !(ETHER_ISMULTI(da)) && + is_tdls_destination(dhdp, da)) { + hash = DHD_FLOWRING_HASHINDEX(da, prio); + cur = if_flow_lkup[ifindex].fl_hash[hash]; + while (cur != NULL) { + if (!memcmp(cur->flow_info.da, da, ETHER_ADDR_LEN)) { + DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags); + return cur->flowid; + } + cur = cur->next; + } + DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags); + return FLOWID_INVALID; + } +#endif /* WLTDLS */ + /* For STA non TDLS dest and WDS dest flow ring id is mapped based on prio only */ + cur = if_flow_lkup[ifindex].fl_hash[prio]; + if (cur) { + DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags); + return cur->flowid; + } + } else { + + if (ETHER_ISMULTI(da)) { + ismcast = TRUE; + hash = 0; + } else { + hash = DHD_FLOWRING_HASHINDEX(da, prio); + } + + cur = if_flow_lkup[ifindex].fl_hash[hash]; + + while (cur) { + if ((ismcast && ETHER_ISMULTI(cur->flow_info.da)) || + (!memcmp(cur->flow_info.da, da, ETHER_ADDR_LEN) && + (cur->flow_info.tid == prio))) { + DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags); + return cur->flowid; + } + cur = cur->next; + } + } + DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags); + + DHD_INFO(("%s: cannot find flowid\n", __FUNCTION__)); + return FLOWID_INVALID; +} /* dhd_flowid_find */ + +/** Create unique Flow ID, called when a flow ring is created. */ +static INLINE uint16 +dhd_flowid_alloc(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, char *sa, char *da) +{ + flow_hash_info_t *fl_hash_node, *cur; + if_flow_lkup_t *if_flow_lkup; + int hash; + uint16 flowid; + unsigned long flags; + + fl_hash_node = (flow_hash_info_t *) MALLOCZ(dhdp->osh, sizeof(flow_hash_info_t)); + if (fl_hash_node == NULL) { + DHD_ERROR(("%s: flow_hash_info_t memory allocation failed \n", __FUNCTION__)); + return FLOWID_INVALID; + } + memcpy(fl_hash_node->flow_info.da, da, sizeof(fl_hash_node->flow_info.da)); + + DHD_FLOWID_LOCK(dhdp->flowid_lock, flags); + ASSERT(dhdp->flowid_allocator != NULL); + flowid = id16_map_alloc(dhdp->flowid_allocator); + DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags); + + if (flowid == FLOWID_INVALID) { + MFREE(dhdp->osh, fl_hash_node, sizeof(flow_hash_info_t)); + DHD_ERROR(("%s: cannot get free flowid \n", __FUNCTION__)); + return FLOWID_INVALID; + } + + fl_hash_node->flowid = flowid; + fl_hash_node->flow_info.tid = prio; + fl_hash_node->flow_info.ifindex = ifindex; + fl_hash_node->next = NULL; + + DHD_FLOWID_LOCK(dhdp->flowid_lock, flags); + if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup; + + if ((if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_STA) || + (if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_WDS)) { + /* For STA non TDLS dest and WDS dest we allocate entry based on prio only */ +#ifdef WLTDLS + if (dhdp->peer_tbl.tdls_peer_count && + (is_tdls_destination(dhdp, da))) { + hash = DHD_FLOWRING_HASHINDEX(da, prio); + cur = if_flow_lkup[ifindex].fl_hash[hash]; + if (cur) { + while (cur->next) { + cur = cur->next; + } + cur->next = fl_hash_node; + } else { + if_flow_lkup[ifindex].fl_hash[hash] = fl_hash_node; + } + } else +#endif /* WLTDLS */ + if_flow_lkup[ifindex].fl_hash[prio] = fl_hash_node; + } else { + + /* For bcast/mcast assign first slot in in interface */ + hash = ETHER_ISMULTI(da) ? 0 : DHD_FLOWRING_HASHINDEX(da, prio); + cur = if_flow_lkup[ifindex].fl_hash[hash]; + if (cur) { + while (cur->next) { + cur = cur->next; + } + cur->next = fl_hash_node; + } else + if_flow_lkup[ifindex].fl_hash[hash] = fl_hash_node; + } + DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags); + + DHD_INFO(("%s: allocated flowid %d\n", __FUNCTION__, fl_hash_node->flowid)); + + if (fl_hash_node->flowid >= dhdp->num_flow_rings) { + DHD_ERROR(("%s: flowid=%d num_flow_rings=%d ifindex=%d prio=%d role=%d\n", + __FUNCTION__, fl_hash_node->flowid, dhdp->num_flow_rings, + ifindex, prio, if_flow_lkup[ifindex].role)); + dhd_prhex("da", (uchar *)da, ETHER_ADDR_LEN, DHD_ERROR_VAL); + dhd_prhex("sa", (uchar *)sa, ETHER_ADDR_LEN, DHD_ERROR_VAL); + return FLOWID_INVALID; + } + + return fl_hash_node->flowid; +} /* dhd_flowid_alloc */ + +/** Get flow ring ID, if not present try to create one */ +static INLINE int +dhd_flowid_lookup(dhd_pub_t *dhdp, uint8 ifindex, + uint8 prio, char *sa, char *da, uint16 *flowid) +{ + uint16 id; + flow_ring_node_t *flow_ring_node; + flow_ring_table_t *flow_ring_table; + unsigned long flags; + int ret; + bool is_sta_assoc; + + DHD_TRACE(("%s\n", __FUNCTION__)); + + if (!dhdp->flow_ring_table) { + return BCME_ERROR; + } + + ASSERT(ifindex < DHD_MAX_IFS); + if (ifindex >= DHD_MAX_IFS) + return BCME_BADARG; + + flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table; + + id = dhd_flowid_find(dhdp, ifindex, prio, sa, da); + + if (id == FLOWID_INVALID) { + + if_flow_lkup_t *if_flow_lkup; + if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup; + + if (!if_flow_lkup[ifindex].status) + return BCME_ERROR; + BCM_REFERENCE(is_sta_assoc); +#if defined(PCIE_FULL_DONGLE) + is_sta_assoc = dhd_sta_associated(dhdp, ifindex, (uint8 *)da); + DHD_ERROR_RLMT(("%s: multi %x ifindex %d role %x assoc %d\n", __FUNCTION__, + ETHER_ISMULTI(da), ifindex, if_flow_lkup[ifindex].role, + is_sta_assoc)); + if (!ETHER_ISMULTI(da) && + ((if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_AP) || + (FALSE) || +#ifdef WL_NAN + (if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_NAN) || +#endif /* WL_NAN */ + (if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_P2P_GO)) && + (!is_sta_assoc)) { + DHD_ERROR_RLMT(("Attempt to send pkt with out peer/scb addition\n")); + return BCME_ERROR; + } +#endif /* (linux || LINUX) && PCIE_FULL_DONGLE */ + + id = dhd_flowid_alloc(dhdp, ifindex, prio, sa, da); + if (id == FLOWID_INVALID) { + DHD_ERROR(("%s: alloc flowid ifindex %u status %u\n", + __FUNCTION__, ifindex, if_flow_lkup[ifindex].status)); + return BCME_ERROR; + } + + ASSERT(id < dhdp->num_flow_rings); + + /* register this flowid in dhd_pub */ + dhd_add_flowid(dhdp, ifindex, prio, da, id); + + flow_ring_node = (flow_ring_node_t *) &flow_ring_table[id]; + + DHD_FLOWRING_LOCK(flow_ring_node->lock, flags); + + /* Init Flow info */ + memcpy(flow_ring_node->flow_info.sa, sa, sizeof(flow_ring_node->flow_info.sa)); + memcpy(flow_ring_node->flow_info.da, da, sizeof(flow_ring_node->flow_info.da)); + flow_ring_node->flow_info.tid = prio; + flow_ring_node->flow_info.ifindex = ifindex; + flow_ring_node->active = TRUE; + flow_ring_node->status = FLOW_RING_STATUS_CREATE_PENDING; + +#ifdef TX_STATUS_LATENCY_STATS + flow_ring_node->flow_info.num_tx_status = 0; + flow_ring_node->flow_info.cum_tx_status_latency = 0; + flow_ring_node->flow_info.num_tx_pkts = 0; +#endif /* TX_STATUS_LATENCY_STATS */ + DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); + + /* Create and inform device about the new flow */ + if (dhd_bus_flow_ring_create_request(dhdp->bus, (void *)flow_ring_node) + != BCME_OK) { + DHD_FLOWRING_LOCK(flow_ring_node->lock, flags); + flow_ring_node->status = FLOW_RING_STATUS_CLOSED; + flow_ring_node->active = FALSE; + DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); + DHD_ERROR(("%s: create error %d\n", __FUNCTION__, id)); + return BCME_ERROR; + } + + *flowid = id; + return BCME_OK; + } else { + /* if the Flow id was found in the hash */ + ASSERT(id < dhdp->num_flow_rings); + + flow_ring_node = (flow_ring_node_t *) &flow_ring_table[id]; + DHD_FLOWRING_LOCK(flow_ring_node->lock, flags); + + /* + * If the flow_ring_node is in Open State or Status pending state then + * we can return the Flow id to the caller.If the flow_ring_node is in + * FLOW_RING_STATUS_PENDING this means the creation is in progress and + * hence the packets should be queued. + * + * If the flow_ring_node is in FLOW_RING_STATUS_DELETE_PENDING Or + * FLOW_RING_STATUS_CLOSED, then we should return Error. + * Note that if the flowing is being deleted we would mark it as + * FLOW_RING_STATUS_DELETE_PENDING. Now before Dongle could respond and + * before we mark it as FLOW_RING_STATUS_CLOSED we could get tx packets. + * We should drop the packets in that case. + * The decission to return OK should NOT be based on 'active' variable, beause + * active is made TRUE when a flow_ring_node gets allocated and is made + * FALSE when the flow ring gets removed and does not reflect the True state + * of the Flow ring. + * In case if IDLE_TX_FLOW_MGMT is defined, we have to handle two more flowring + * states. If the flow_ring_node's status is FLOW_RING_STATUS_SUSPENDED, the flowid + * is to be returned and from dhd_bus_txdata, the flowring would be resumed again. + * The status FLOW_RING_STATUS_RESUME_PENDING, is equivalent to + * FLOW_RING_STATUS_CREATE_PENDING. + */ + if (flow_ring_node->status == FLOW_RING_STATUS_DELETE_PENDING || + flow_ring_node->status == FLOW_RING_STATUS_CLOSED) { + *flowid = FLOWID_INVALID; + ret = BCME_ERROR; + } else { + *flowid = id; + ret = BCME_OK; + } + + DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); + return ret; + } /* Flow Id found in the hash */ +} /* dhd_flowid_lookup */ + +int +dhd_flowid_find_by_ifidx(dhd_pub_t *dhdp, uint8 ifindex, uint16 flowid) +{ + int hashidx = 0; + bool found = FALSE; + flow_hash_info_t *cur; + if_flow_lkup_t *if_flow_lkup; + unsigned long flags; + + if (!dhdp->flow_ring_table) { + DHD_ERROR(("%s : dhd->flow_ring_table is NULL\n", __FUNCTION__)); + return BCME_ERROR; + } + + DHD_FLOWID_LOCK(dhdp->flowid_lock, flags); + if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup; + for (hashidx = 0; hashidx < DHD_FLOWRING_HASH_SIZE; hashidx++) { + cur = if_flow_lkup[ifindex].fl_hash[hashidx]; + if (cur) { + if (cur->flowid == flowid) { + found = TRUE; + } + + while (!found && cur) { + if (cur->flowid == flowid) { + found = TRUE; + break; + } + cur = cur->next; + } + + if (found) { + DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags); + return BCME_OK; + } + } + } + DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags); + + return BCME_ERROR; +} + +int +dhd_flowid_debug_create(dhd_pub_t *dhdp, uint8 ifindex, + uint8 prio, char *sa, char *da, uint16 *flowid) +{ + return dhd_flowid_lookup(dhdp, ifindex, prio, sa, da, flowid); +} + +/** + * Assign existing or newly created flowid to an 802.3 packet. This flowid is later on used to + * select the flowring to send the packet to the dongle. + */ +int BCMFASTPATH +dhd_flowid_update(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, void *pktbuf) +{ + uint8 *pktdata = (uint8 *)PKTDATA(dhdp->osh, pktbuf); + struct ether_header *eh = (struct ether_header *)pktdata; + uint16 flowid = 0; + + ASSERT(ifindex < DHD_MAX_IFS); + + if (ifindex >= DHD_MAX_IFS) { + return BCME_BADARG; + } + + if (!dhdp->flowid_allocator) { + DHD_ERROR(("%s: Flow ring not intited yet \n", __FUNCTION__)); + return BCME_ERROR; + } + + if (dhd_flowid_lookup(dhdp, ifindex, prio, (char *)eh->ether_shost, (char *)eh->ether_dhost, + &flowid) != BCME_OK) { + return BCME_ERROR; + } + + DHD_INFO(("%s: prio %d flowid %d\n", __FUNCTION__, prio, flowid)); + + /* Tag the packet with flowid */ + DHD_PKT_SET_FLOWID(pktbuf, flowid); + return BCME_OK; +} + +void +dhd_flowid_free(dhd_pub_t *dhdp, uint8 ifindex, uint16 flowid) +{ + int hashix; + bool found = FALSE; + flow_hash_info_t *cur, *prev; + if_flow_lkup_t *if_flow_lkup; + unsigned long flags; + + ASSERT(ifindex < DHD_MAX_IFS); + if (ifindex >= DHD_MAX_IFS) + return; + + DHD_FLOWID_LOCK(dhdp->flowid_lock, flags); + if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup; + + for (hashix = 0; hashix < DHD_FLOWRING_HASH_SIZE; hashix++) { + + cur = if_flow_lkup[ifindex].fl_hash[hashix]; + + if (cur) { + if (cur->flowid == flowid) { + found = TRUE; + } + + prev = NULL; + while (!found && cur) { + if (cur->flowid == flowid) { + found = TRUE; + break; + } + prev = cur; + cur = cur->next; + } + if (found) { + if (!prev) { + if_flow_lkup[ifindex].fl_hash[hashix] = cur->next; + } else { + prev->next = cur->next; + } + + /* deregister flowid from dhd_pub. */ + dhd_del_flowid(dhdp, ifindex, flowid); + + id16_map_free(dhdp->flowid_allocator, flowid); + DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags); + MFREE(dhdp->osh, cur, sizeof(flow_hash_info_t)); + + return; + } + } + } + + DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags); + DHD_ERROR(("%s: could not free flow ring hash entry flowid %d\n", + __FUNCTION__, flowid)); +} /* dhd_flowid_free */ + +/** + * Delete all Flow rings associated with the given interface. Is called when eg the dongle + * indicates that a wireless link has gone down. + */ +void +dhd_flow_rings_delete(dhd_pub_t *dhdp, uint8 ifindex) +{ + uint32 id; + flow_ring_table_t *flow_ring_table; + + DHD_ERROR(("%s: ifindex %u\n", __FUNCTION__, ifindex)); + + ASSERT(ifindex < DHD_MAX_IFS); + if (ifindex >= DHD_MAX_IFS) + return; + + if (!dhdp->flow_ring_table) + return; + + flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table; + for (id = 0; id < dhdp->num_flow_rings; id++) { + if (flow_ring_table[id].active && + (flow_ring_table[id].flow_info.ifindex == ifindex) && + (flow_ring_table[id].status == FLOW_RING_STATUS_OPEN)) { + dhd_bus_flow_ring_delete_request(dhdp->bus, + (void *) &flow_ring_table[id]); + } + } +} + +void +dhd_flow_rings_flush(dhd_pub_t *dhdp, uint8 ifindex) +{ + uint32 id; + flow_ring_table_t *flow_ring_table; + + DHD_INFO(("%s: ifindex %u\n", __FUNCTION__, ifindex)); + + ASSERT(ifindex < DHD_MAX_IFS); + if (ifindex >= DHD_MAX_IFS) + return; + + if (!dhdp->flow_ring_table) + return; + flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table; + + for (id = 0; id < dhdp->num_flow_rings; id++) { + if (flow_ring_table[id].active && + (flow_ring_table[id].flow_info.ifindex == ifindex) && + (flow_ring_table[id].status == FLOW_RING_STATUS_OPEN)) { + dhd_bus_flow_ring_flush_request(dhdp->bus, + (void *) &flow_ring_table[id]); + } + } +} + +/** Delete flow ring(s) for given peer address. Related to AP/AWDL/TDLS functionality. */ +void +dhd_flow_rings_delete_for_peer(dhd_pub_t *dhdp, uint8 ifindex, char *addr) +{ + uint32 id; + flow_ring_table_t *flow_ring_table; + + DHD_ERROR(("%s: ifindex %u\n", __FUNCTION__, ifindex)); + + ASSERT(ifindex < DHD_MAX_IFS); + if (ifindex >= DHD_MAX_IFS) + return; + + if (!dhdp->flow_ring_table) + return; + + flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table; + for (id = 0; id < dhdp->num_flow_rings; id++) { + if (flow_ring_table[id].active && + (flow_ring_table[id].flow_info.ifindex == ifindex) && + (!memcmp(flow_ring_table[id].flow_info.da, addr, ETHER_ADDR_LEN)) && + (flow_ring_table[id].status == FLOW_RING_STATUS_OPEN)) { + DHD_ERROR(("%s: deleting flowid %d\n", + __FUNCTION__, flow_ring_table[id].flowid)); + dhd_bus_flow_ring_delete_request(dhdp->bus, + (void *) &flow_ring_table[id]); + } + } +} + +/** Handles interface ADD, CHANGE, DEL indications from the dongle */ +void +dhd_update_interface_flow_info(dhd_pub_t *dhdp, uint8 ifindex, + uint8 op, uint8 role) +{ + if_flow_lkup_t *if_flow_lkup; + unsigned long flags; + + ASSERT(ifindex < DHD_MAX_IFS); + if (ifindex >= DHD_MAX_IFS) + return; + + DHD_INFO(("%s: ifindex %u op %u role is %u \n", + __FUNCTION__, ifindex, op, role)); + if (!dhdp->flowid_allocator) { + DHD_ERROR(("%s: Flow ring not intited yet \n", __FUNCTION__)); + return; + } + + DHD_FLOWID_LOCK(dhdp->flowid_lock, flags); + if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup; + + if (op == WLC_E_IF_ADD || op == WLC_E_IF_CHANGE) { + + if_flow_lkup[ifindex].role = role; + + if (role == WLC_E_IF_ROLE_WDS) { + /** + * WDS role does not send WLC_E_LINK event after interface is up. + * So to create flowrings for WDS, make status as TRUE in WLC_E_IF itself. + * same is true while making the status as FALSE. + * TODO: Fix FW to send WLC_E_LINK for WDS role aswell. So that all the + * interfaces are handled uniformly. + */ + if_flow_lkup[ifindex].status = TRUE; + DHD_INFO(("%s: Mcast Flow ring for ifindex %d role is %d \n", + __FUNCTION__, ifindex, role)); + } + } else if ((op == WLC_E_IF_DEL) && (role == WLC_E_IF_ROLE_WDS)) { + if_flow_lkup[ifindex].status = FALSE; + DHD_INFO(("%s: cleanup all Flow rings for ifindex %d role is %d \n", + __FUNCTION__, ifindex, role)); + } + DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags); +} + +/** Handles a STA 'link' indication from the dongle */ +int +dhd_update_interface_link_status(dhd_pub_t *dhdp, uint8 ifindex, uint8 status) +{ + if_flow_lkup_t *if_flow_lkup; + unsigned long flags; + + ASSERT(ifindex < DHD_MAX_IFS); + if (ifindex >= DHD_MAX_IFS) + return BCME_BADARG; + + DHD_INFO(("%s: ifindex %d status %d\n", __FUNCTION__, ifindex, status)); + + DHD_FLOWID_LOCK(dhdp->flowid_lock, flags); + if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup; + + if (status) { + if_flow_lkup[ifindex].status = TRUE; + } else { + if_flow_lkup[ifindex].status = FALSE; + } + + DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags); + + return BCME_OK; +} + +/** Update flow priority mapping, called on IOVAR */ +int dhd_update_flow_prio_map(dhd_pub_t *dhdp, uint8 map) +{ + uint16 flowid; + flow_ring_node_t *flow_ring_node; + + if (map > DHD_FLOW_PRIO_LLR_MAP) + return BCME_BADOPTION; + + /* Check if we need to change prio map */ + if (map == dhdp->flow_prio_map_type) + return BCME_OK; + + /* If any ring is active we cannot change priority mapping for flow rings */ + for (flowid = 0; flowid < dhdp->num_flow_rings; flowid++) { + flow_ring_node = DHD_FLOW_RING(dhdp, flowid); + if (flow_ring_node->active) + return BCME_EPERM; + } + + /* Inform firmware about new mapping type */ + if (BCME_OK != dhd_flow_prio_map(dhdp, &map, TRUE)) + return BCME_ERROR; + + /* update internal structures */ + dhdp->flow_prio_map_type = map; + if (dhdp->flow_prio_map_type == DHD_FLOW_PRIO_TID_MAP) + bcopy(prio2tid, dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO); + else + bcopy(prio2ac, dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO); + + return BCME_OK; +} + +/** Inform firmware on updated flow priority mapping, called on IOVAR */ +int dhd_flow_prio_map(dhd_pub_t *dhd, uint8 *map, bool set) +{ + uint8 iovbuf[24]; + int len; + if (!set) { + memset(&iovbuf, 0, sizeof(iovbuf)); + len = bcm_mkiovar("bus:fl_prio_map", NULL, 0, (char*)iovbuf, sizeof(iovbuf)); + if (len == 0) { + return BCME_BUFTOOSHORT; + } + if (dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0) < 0) { + DHD_ERROR(("%s: failed to get fl_prio_map\n", __FUNCTION__)); + return BCME_ERROR; + } + *map = iovbuf[0]; + return BCME_OK; + } + len = bcm_mkiovar("bus:fl_prio_map", (char *)map, 4, (char*)iovbuf, sizeof(iovbuf)); + if (len == 0) { + return BCME_BUFTOOSHORT; + } + if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, len, TRUE, 0) < 0) { + DHD_ERROR(("%s: failed to set fl_prio_map \n", + __FUNCTION__)); + return BCME_ERROR; + } + return BCME_OK; +} diff --git a/bcmdhd.100.10.315.x/dhd_flowring.h b/bcmdhd.100.10.315.x/dhd_flowring.h new file mode 100644 index 0000000..a2eb4b7 --- /dev/null +++ b/bcmdhd.100.10.315.x/dhd_flowring.h @@ -0,0 +1,272 @@ +/* + * @file Header file describing the flow rings DHD interfaces. + * + * Flow rings are transmit traffic (=propagating towards antenna) related entities. + * + * Provides type definitions and function prototypes used to create, delete and manage flow rings at + * high level. + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_flowring.h 761412 2018-05-08 05:34:36Z $ + */ + +/**************** + * Common types * + */ + +#ifndef _dhd_flowrings_h_ +#define _dhd_flowrings_h_ + +/* Max pkts held in a flow ring's backup queue */ +#define FLOW_RING_QUEUE_THRESHOLD (2048) + +/* Number of H2D common rings */ +#define FLOW_RING_COMMON BCMPCIE_H2D_COMMON_MSGRINGS + +#define FLOWID_INVALID (ID16_INVALID) +#define FLOWID_RESERVED (FLOW_RING_COMMON) + +#define FLOW_RING_STATUS_OPEN 0 +#define FLOW_RING_STATUS_CREATE_PENDING 1 +#define FLOW_RING_STATUS_CLOSED 2 +#define FLOW_RING_STATUS_DELETE_PENDING 3 +#define FLOW_RING_STATUS_FLUSH_PENDING 4 + +#ifdef IDLE_TX_FLOW_MGMT +#define FLOW_RING_STATUS_SUSPENDED 5 +#define FLOW_RING_STATUS_RESUME_PENDING 6 +#endif /* IDLE_TX_FLOW_MGMT */ +#define FLOW_RING_STATUS_STA_FREEING 7 + +#define DHD_FLOWRING_RX_BUFPOST_PKTSZ 2048 +/* Maximum Mu MIMO frame size */ +#ifdef WL_MONITOR +#define DHD_MAX_MON_FLOWRING_RX_BUFPOST_PKTSZ 4096 +#endif /* WL_MONITOR */ + +#define DHD_FLOW_PRIO_AC_MAP 0 +#define DHD_FLOW_PRIO_TID_MAP 1 +/* Flow ring prority map for lossless roaming */ +#define DHD_FLOW_PRIO_LLR_MAP 2 + +/* Hashing a MacAddress for lkup into a per interface flow hash table */ +#define DHD_FLOWRING_HASH_SIZE 256 +#define DHD_FLOWRING_HASHINDEX(ea, prio) \ + ((((uint8 *)(ea))[3] ^ ((uint8 *)(ea))[4] ^ ((uint8 *)(ea))[5] ^ ((uint8)(prio))) \ + % DHD_FLOWRING_HASH_SIZE) + +#define DHD_IF_ROLE(pub, idx) (((if_flow_lkup_t *)(pub)->if_flow_lkup)[idx].role) +#define DHD_IF_ROLE_AP(pub, idx) (DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_AP) +#define DHD_IF_ROLE_STA(pub, idx) (DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_STA) +#define DHD_IF_ROLE_P2PGO(pub, idx) (DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_P2P_GO) +#define DHD_IF_ROLE_WDS(pub, idx) (DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_WDS) +#define DHD_FLOW_RING(dhdp, flowid) \ + (flow_ring_node_t *)&(((flow_ring_node_t *)((dhdp)->flow_ring_table))[flowid]) + +struct flow_queue; + +/* Flow Ring Queue Enqueue overflow callback */ +typedef int (*flow_queue_cb_t)(struct flow_queue * queue, void * pkt); + +/** + * Each flow ring has an associated (tx flow controlled) queue. 802.3 packets are transferred + * between queue and ring. A packet from the host stack is first added to the queue, and in a later + * stage transferred to the flow ring. Packets in the queue are dhd owned, whereas packets in the + * flow ring are device owned. + */ +typedef struct flow_queue { + dll_t list; /* manage a flowring queue in a double linked list */ + void * head; /* first packet in the queue */ + void * tail; /* last packet in the queue */ + uint16 len; /* number of packets in the queue */ + uint16 max; /* maximum or min budget (used in cumm) */ + uint32 threshold; /* parent's cummulative length threshold */ + void * clen_ptr; /* parent's cummulative length counter */ + uint32 failures; /* enqueue failures due to queue overflow */ + flow_queue_cb_t cb; /* callback invoked on threshold crossing */ + uint32 l2threshold; /* grandparent's (level 2) cummulative length threshold */ + void * l2clen_ptr; /* grandparent's (level 2) cummulative length counter */ +} flow_queue_t; + +#define DHD_FLOW_QUEUE_LEN(queue) ((int)(queue)->len) +#define DHD_FLOW_QUEUE_MAX(queue) ((int)(queue)->max) +#define DHD_FLOW_QUEUE_THRESHOLD(queue) ((int)(queue)->threshold) +#define DHD_FLOW_QUEUE_L2THRESHOLD(queue) ((int)(queue)->l2threshold) +#define DHD_FLOW_QUEUE_EMPTY(queue) ((queue)->len == 0) +#define DHD_FLOW_QUEUE_FAILURES(queue) ((queue)->failures) + +#define DHD_FLOW_QUEUE_AVAIL(queue) ((int)((queue)->max - (queue)->len)) +#define DHD_FLOW_QUEUE_FULL(queue) ((queue)->len >= (queue)->max) + +#define DHD_FLOW_QUEUE_OVFL(queue, budget) \ + (((queue)->len) > budget) + +#define DHD_FLOW_QUEUE_SET_MAX(queue, budget) \ + ((queue)->max) = ((budget) - 1) + +/* Queue's cummulative threshold. */ +#define DHD_FLOW_QUEUE_SET_THRESHOLD(queue, cumm_threshold) \ + ((queue)->threshold) = ((cumm_threshold) - 1) + +/* Queue's cummulative length object accessor. */ +#define DHD_FLOW_QUEUE_CLEN_PTR(queue) ((queue)->clen_ptr) + +/* Set a queue's cumm_len point to a parent's cumm_ctr_t cummulative length */ +#define DHD_FLOW_QUEUE_SET_CLEN(queue, parent_clen_ptr) \ + ((queue)->clen_ptr) = (void *)(parent_clen_ptr) + +/* Queue's level 2 cummulative threshold. */ +#define DHD_FLOW_QUEUE_SET_L2THRESHOLD(queue, l2cumm_threshold) \ + ((queue)->l2threshold) = ((l2cumm_threshold) - 1) + +/* Queue's level 2 cummulative length object accessor. */ +#define DHD_FLOW_QUEUE_L2CLEN_PTR(queue) ((queue)->l2clen_ptr) + +/* Set a queue's level 2 cumm_len point to a grandparent's cumm_ctr_t cummulative length */ +#define DHD_FLOW_QUEUE_SET_L2CLEN(queue, grandparent_clen_ptr) \ + ((queue)->l2clen_ptr) = (void *)(grandparent_clen_ptr) + +#define DHD_FLOWRING_TXSTATUS_CNT_UPDATE(bus, flowid, txstatus) + +/* Pkttag not compatible with PROP_TXSTATUS or WLFC */ +typedef struct dhd_pkttag_fr { + uint16 flowid; + uint16 ifid; +#ifdef DHD_LB_TXC + int dataoff; + dmaaddr_t physaddr; + uint32 pa_len; +#endif /* DHD_LB_TXC */ +} dhd_pkttag_fr_t; + +#define DHD_PKTTAG_SET_IFID(tag, idx) ((tag)->ifid = (uint16)(idx)) +#define DHD_PKTTAG_SET_PA(tag, pa) ((tag)->physaddr = (pa)) +#define DHD_PKTTAG_SET_PA_LEN(tag, palen) ((tag)->pa_len = (palen)) +#define DHD_PKTTAG_IFID(tag) ((tag)->ifid) +#define DHD_PKTTAG_PA(tag) ((tag)->physaddr) +#define DHD_PKTTAG_PA_LEN(tag) ((tag)->pa_len) + +/** each flow ring is dedicated to a tid/sa/da combination */ +typedef struct flow_info { + uint8 tid; + uint8 ifindex; + uchar sa[ETHER_ADDR_LEN]; + uchar da[ETHER_ADDR_LEN]; +#ifdef TX_STATUS_LATENCY_STATS + /* total number of tx_status received on this flowid */ + uint64 num_tx_status; + /* cumulative tx_status latency for this flowid */ + uint64 cum_tx_status_latency; + /* num tx packets sent on this flowring */ + uint64 num_tx_pkts; +#endif /* TX_STATUS_LATENCY_STATS */ +} flow_info_t; + +/** a flow ring is used for outbound (towards antenna) 802.3 packets */ +typedef struct flow_ring_node { + dll_t list; /* manage a constructed flowring in a dll, must be at first place */ + flow_queue_t queue; /* queues packets before they enter the flow ring, flow control */ + bool active; + uint8 status; + /* + * flowid: unique ID of a flow ring, which can either be unicast or broadcast/multicast. For + * unicast flow rings, the flow id accelerates ARM 802.3->802.11 header translation. + */ + uint16 flowid; + flow_info_t flow_info; + void *prot_info; + void *lock; /* lock for flowring access protection */ + +#ifdef IDLE_TX_FLOW_MGMT + uint64 last_active_ts; /* contains last active timestamp */ +#endif /* IDLE_TX_FLOW_MGMT */ +} flow_ring_node_t; + +typedef flow_ring_node_t flow_ring_table_t; + +typedef struct flow_hash_info { + uint16 flowid; + flow_info_t flow_info; + struct flow_hash_info *next; +} flow_hash_info_t; + +typedef struct if_flow_lkup { + bool status; + uint8 role; /* Interface role: STA/AP */ + flow_hash_info_t *fl_hash[DHD_FLOWRING_HASH_SIZE]; /* Lkup Hash table */ +} if_flow_lkup_t; + +static INLINE flow_ring_node_t * +dhd_constlist_to_flowring(dll_t *item) +{ + return ((flow_ring_node_t *)item); +} + +/* Exported API */ + +/* Flow ring's queue management functions */ +extern flow_ring_node_t * dhd_flow_ring_node(dhd_pub_t *dhdp, uint16 flowid); +extern flow_queue_t * dhd_flow_queue(dhd_pub_t *dhdp, uint16 flowid); + +extern void dhd_flow_queue_init(dhd_pub_t *dhdp, flow_queue_t *queue, int max); +extern void dhd_flow_queue_reinit(dhd_pub_t *dhdp, flow_queue_t *queue, int max); +extern void dhd_flow_queue_register(flow_queue_t *queue, flow_queue_cb_t cb); +extern int dhd_flow_queue_enqueue(dhd_pub_t *dhdp, flow_queue_t *queue, void *pkt); +extern void * dhd_flow_queue_dequeue(dhd_pub_t *dhdp, flow_queue_t *queue); +extern void dhd_flow_queue_reinsert(dhd_pub_t *dhdp, flow_queue_t *queue, void *pkt); + +extern void dhd_flow_ring_config_thresholds(dhd_pub_t *dhdp, uint16 flowid, + int queue_budget, int cumm_threshold, void *cumm_ctr, + int l2cumm_threshold, void *l2cumm_ctr); +extern int dhd_flow_rings_init(dhd_pub_t *dhdp, uint32 num_flow_rings); + +extern void dhd_flow_rings_deinit(dhd_pub_t *dhdp); + +extern int dhd_flowid_update(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, + void *pktbuf); +extern int dhd_flowid_debug_create(dhd_pub_t *dhdp, uint8 ifindex, + uint8 prio, char *sa, char *da, uint16 *flowid); +extern int dhd_flowid_find_by_ifidx(dhd_pub_t *dhdp, uint8 ifidex, uint16 flowid); + +extern void dhd_flowid_free(dhd_pub_t *dhdp, uint8 ifindex, uint16 flowid); + +extern void dhd_flow_rings_delete(dhd_pub_t *dhdp, uint8 ifindex); +extern void dhd_flow_rings_flush(dhd_pub_t *dhdp, uint8 ifindex); + +extern void dhd_flow_rings_delete_for_peer(dhd_pub_t *dhdp, uint8 ifindex, + char *addr); + +/* Handle Interface ADD, DEL operations */ +extern void dhd_update_interface_flow_info(dhd_pub_t *dhdp, uint8 ifindex, + uint8 op, uint8 role); + +/* Handle a STA interface link status update */ +extern int dhd_update_interface_link_status(dhd_pub_t *dhdp, uint8 ifindex, + uint8 status); +extern int dhd_flow_prio_map(dhd_pub_t *dhd, uint8 *map, bool set); +extern int dhd_update_flow_prio_map(dhd_pub_t *dhdp, uint8 map); + +extern uint8 dhd_flow_rings_ifindex2role(dhd_pub_t *dhdp, uint8 ifindex); +#endif /* _dhd_flowrings_h_ */ diff --git a/bcmdhd.100.10.315.x/dhd_gpio.c b/bcmdhd.100.10.315.x/dhd_gpio.c new file mode 100644 index 0000000..54d02d4 --- /dev/null +++ b/bcmdhd.100.10.315.x/dhd_gpio.c @@ -0,0 +1,386 @@ + +#include +#include +#include + +#ifdef CUSTOMER_HW_PLATFORM +#include +#define sdmmc_channel sdmmc_device_mmc0 +#endif /* CUSTOMER_HW_PLATFORM */ + +#if defined(BUS_POWER_RESTORE) && defined(BCMSDIO) +#include +#include +#include +#include +#endif /* defined(BUS_POWER_RESTORE) && defined(BCMSDIO) */ + +#ifdef CONFIG_DHD_USE_STATIC_BUF +extern void *bcmdhd_mem_prealloc(int section, unsigned long size); +#endif /* CONFIG_DHD_USE_STATIC_BUF */ + +static int gpio_wl_reg_on = -1; // WL_REG_ON is input pin of WLAN module +#ifdef CUSTOMER_OOB +static int gpio_wl_host_wake = -1; // WL_HOST_WAKE is output pin of WLAN module +#endif + +#ifdef CUSTOMER_HW_AMLOGIC +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)) +#include +extern int wifi_irq_trigger_level(void); +extern u8 *wifi_get_mac(void); +#endif +extern void sdio_reinit(void); +extern void extern_wifi_set_enable(int is_on); +extern void pci_remove_reinit(unsigned int vid, unsigned int pid, int delBus); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) +extern int wifi_irq_num(void); +#endif +#endif + +static int +dhd_wlan_set_power(int on +#ifdef BUS_POWER_RESTORE +, wifi_adapter_info_t *adapter +#endif /* BUS_POWER_RESTORE */ +) +{ + int err = 0; + + if (on) { + printf("======== PULL WL_REG_ON(%d) HIGH! ========\n", gpio_wl_reg_on); + if (gpio_wl_reg_on >= 0) { + err = gpio_direction_output(gpio_wl_reg_on, 1); + if (err) { + printf("%s: WL_REG_ON didn't output high\n", __FUNCTION__); + return -EIO; + } + } +#ifdef CUSTOMER_HW_AMLOGIC +#ifdef BCMSDIO + extern_wifi_set_enable(0); + mdelay(200); + extern_wifi_set_enable(1); + mdelay(200); +// sdio_reinit(); +#endif +#endif +#if defined(BUS_POWER_RESTORE) +#if defined(BCMSDIO) + if (adapter->sdio_func && adapter->sdio_func->card && adapter->sdio_func->card->host) { + printf("======== mmc_power_restore_host! ========\n"); + mmc_power_restore_host(adapter->sdio_func->card->host); + } +#elif defined(BCMPCIE) + OSL_SLEEP(50); /* delay needed to be able to restore PCIe configuration registers */ + if (adapter->pci_dev) { + printf("======== pci_set_power_state PCI_D0! ========\n"); + pci_set_power_state(adapter->pci_dev, PCI_D0); + if (adapter->pci_saved_state) + pci_load_and_free_saved_state(adapter->pci_dev, &adapter->pci_saved_state); + pci_restore_state(adapter->pci_dev); + err = pci_enable_device(adapter->pci_dev); + if (err < 0) + printf("%s: PCI enable device failed", __FUNCTION__); + pci_set_master(adapter->pci_dev); + } +#endif /* BCMPCIE */ +#endif /* BUS_POWER_RESTORE */ + /* Lets customer power to get stable */ + mdelay(100); + } else { +#if defined(BUS_POWER_RESTORE) +#if defined(BCMSDIO) + if (adapter->sdio_func && adapter->sdio_func->card && adapter->sdio_func->card->host) { + printf("======== mmc_power_save_host! ========\n"); + mmc_power_save_host(adapter->sdio_func->card->host); + } +#elif defined(BCMPCIE) + if (adapter->pci_dev) { + printf("======== pci_set_power_state PCI_D3hot! ========\n"); + pci_save_state(adapter->pci_dev); + adapter->pci_saved_state = pci_store_saved_state(adapter->pci_dev); + if (pci_is_enabled(adapter->pci_dev)) + pci_disable_device(adapter->pci_dev); + pci_set_power_state(adapter->pci_dev, PCI_D3hot); + } +#endif /* BCMPCIE */ +#endif /* BUS_POWER_RESTORE */ + printf("======== PULL WL_REG_ON(%d) LOW! ========\n", gpio_wl_reg_on); + if (gpio_wl_reg_on >= 0) { + err = gpio_direction_output(gpio_wl_reg_on, 0); + if (err) { + printf("%s: WL_REG_ON didn't output low\n", __FUNCTION__); + return -EIO; + } + } +#ifdef CUSTOMER_HW_AMLOGIC +// extern_wifi_set_enable(0); +// mdelay(200); +#endif + } + + return err; +} + +static int dhd_wlan_set_reset(int onoff) +{ + return 0; +} + +static int dhd_wlan_set_carddetect(int present) +{ + int err = 0; + +#if !defined(BUS_POWER_RESTORE) + if (present) { +#if defined(BCMSDIO) + printf("======== Card detection to detect SDIO card! ========\n"); +#ifdef CUSTOMER_HW_PLATFORM + err = sdhci_force_presence_change(&sdmmc_channel, 1); +#endif /* CUSTOMER_HW_PLATFORM */ +#ifdef CUSTOMER_HW_AMLOGIC + sdio_reinit(); +#endif +#endif + } else { +#if defined(BCMSDIO) + printf("======== Card detection to remove SDIO card! ========\n"); +#ifdef CUSTOMER_HW_PLATFORM + err = sdhci_force_presence_change(&sdmmc_channel, 0); +#endif /* CUSTOMER_HW_PLATFORM */ +#ifdef CUSTOMER_HW_AMLOGIC + extern_wifi_set_enable(0); + mdelay(200); +#endif +#elif defined(BCMPCIE) + printf("======== Card detection to remove PCIE card! ========\n"); + extern_wifi_set_enable(0); + mdelay(200); +#endif + } +#endif /* BUS_POWER_RESTORE */ + + return err; +} + +static int dhd_wlan_get_mac_addr(unsigned char *buf) +{ + int err = 0; + + printf("======== %s ========\n", __FUNCTION__); +#ifdef EXAMPLE_GET_MAC + /* EXAMPLE code */ + { + struct ether_addr ea_example = {{0x00, 0x11, 0x22, 0x33, 0x44, 0xFF}}; + bcopy((char *)&ea_example, buf, sizeof(struct ether_addr)); + } +#endif /* EXAMPLE_GET_MAC */ +#ifdef EXAMPLE_GET_MAC_VER2 + /* EXAMPLE code */ + { + char macpad[56]= { + 0x00,0xaa,0x9c,0x84,0xc7,0xbc,0x9b,0xf6, + 0x02,0x33,0xa9,0x4d,0x5c,0xb4,0x0a,0x5d, + 0xa8,0xef,0xb0,0xcf,0x8e,0xbf,0x24,0x8a, + 0x87,0x0f,0x6f,0x0d,0xeb,0x83,0x6a,0x70, + 0x4a,0xeb,0xf6,0xe6,0x3c,0xe7,0x5f,0xfc, + 0x0e,0xa7,0xb3,0x0f,0x00,0xe4,0x4a,0xaf, + 0x87,0x08,0x16,0x6d,0x3a,0xe3,0xc7,0x80}; + bcopy(macpad, buf+6, sizeof(macpad)); + } +#endif /* EXAMPLE_GET_MAC_VER2 */ + + return err; +} + +static struct cntry_locales_custom brcm_wlan_translate_custom_table[] = { + /* Table should be filled out based on custom platform regulatory requirement */ +#ifdef EXAMPLE_TABLE + {"", "XT", 49}, /* Universal if Country code is unknown or empty */ + {"US", "US", 0}, +#endif /* EXMAPLE_TABLE */ +}; + +#ifdef CUSTOM_FORCE_NODFS_FLAG +struct cntry_locales_custom brcm_wlan_translate_nodfs_table[] = { +#ifdef EXAMPLE_TABLE + {"", "XT", 50}, /* Universal if Country code is unknown or empty */ + {"US", "US", 0}, +#endif /* EXMAPLE_TABLE */ +}; +#endif + +static void *dhd_wlan_get_country_code(char *ccode +#ifdef CUSTOM_FORCE_NODFS_FLAG + , u32 flags +#endif +) +{ + struct cntry_locales_custom *locales; + int size; + int i; + + if (!ccode) + return NULL; + +#ifdef CUSTOM_FORCE_NODFS_FLAG + if (flags & WLAN_PLAT_NODFS_FLAG) { + locales = brcm_wlan_translate_nodfs_table; + size = ARRAY_SIZE(brcm_wlan_translate_nodfs_table); + } else { +#endif + locales = brcm_wlan_translate_custom_table; + size = ARRAY_SIZE(brcm_wlan_translate_custom_table); +#ifdef CUSTOM_FORCE_NODFS_FLAG + } +#endif + + for (i = 0; i < size; i++) + if (strcmp(ccode, locales[i].iso_abbrev) == 0) + return &locales[i]; + return NULL; +} + +struct resource dhd_wlan_resources[] = { + [0] = { + .name = "bcmdhd_wlan_irq", + .start = 0, /* Dummy */ + .end = 0, /* Dummy */ + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE + | IORESOURCE_IRQ_HIGHLEVEL, /* Dummy */ + }, +}; + +struct wifi_platform_data dhd_wlan_control = { + .set_power = dhd_wlan_set_power, + .set_reset = dhd_wlan_set_reset, + .set_carddetect = dhd_wlan_set_carddetect, + .get_mac_addr = dhd_wlan_get_mac_addr, +#ifdef CONFIG_DHD_USE_STATIC_BUF + .mem_prealloc = bcmdhd_mem_prealloc, +#endif /* CONFIG_DHD_USE_STATIC_BUF */ + .get_country_code = dhd_wlan_get_country_code, +}; + +int dhd_wlan_init_gpio(void) +{ + int err = 0; +#ifdef CUSTOMER_OOB + int host_oob_irq = -1; + uint host_oob_irq_flags = 0; +#endif + + /* Please check your schematic and fill right GPIO number which connected to + * WL_REG_ON and WL_HOST_WAKE. + */ + gpio_wl_reg_on = -1; +#ifdef CUSTOMER_OOB + gpio_wl_host_wake = -1; +#endif + +#ifdef CUSTOMER_HW_AMLOGIC +#if defined(BCMPCIE) + printf("======== Card detection to detect PCIE card! ========\n"); + pci_remove_reinit(0x14e4, 0x43ec, 1); +#endif +#endif + + if (gpio_wl_reg_on >= 0) { + err = gpio_request(gpio_wl_reg_on, "WL_REG_ON"); + if (err < 0) { + printf("%s: gpio_request(%d) for WL_REG_ON failed\n", + __FUNCTION__, gpio_wl_reg_on); + gpio_wl_reg_on = -1; + } + } + +#ifdef CUSTOMER_OOB + if (gpio_wl_host_wake >= 0) { + err = gpio_request(gpio_wl_host_wake, "bcmdhd"); + if (err < 0) { + printf("%s: gpio_request(%d) for WL_HOST_WAKE failed\n", + __FUNCTION__, gpio_wl_host_wake); + return -1; + } + err = gpio_direction_input(gpio_wl_host_wake); + if (err < 0) { + printf("%s: gpio_direction_input(%d) for WL_HOST_WAKE failed\n", + __FUNCTION__, gpio_wl_host_wake); + gpio_free(gpio_wl_host_wake); + return -1; + } + host_oob_irq = gpio_to_irq(gpio_wl_host_wake); + if (host_oob_irq < 0) { + printf("%s: gpio_to_irq(%d) for WL_HOST_WAKE failed\n", + __FUNCTION__, gpio_wl_host_wake); + gpio_free(gpio_wl_host_wake); + return -1; + } + } +#ifdef CUSTOMER_HW_AMLOGIC +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)) + host_oob_irq = INT_GPIO_4; +#else + host_oob_irq = wifi_irq_num(); +#endif +#endif + +#ifdef HW_OOB +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)) + if (wifi_irq_trigger_level() == GPIO_IRQ_LOW) + host_oob_irq_flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL | IORESOURCE_IRQ_SHAREABLE; + else + host_oob_irq_flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL | IORESOURCE_IRQ_SHAREABLE; +#else +#ifdef HW_OOB_LOW_LEVEL + host_oob_irq_flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL | IORESOURCE_IRQ_SHAREABLE; +#else + host_oob_irq_flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL | IORESOURCE_IRQ_SHAREABLE; +#endif +#endif +#else + host_oob_irq_flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE | IORESOURCE_IRQ_SHAREABLE; +#endif + + dhd_wlan_resources[0].start = dhd_wlan_resources[0].end = host_oob_irq; + dhd_wlan_resources[0].flags = host_oob_irq_flags; + printf("%s: WL_HOST_WAKE=%d, oob_irq=%d, oob_irq_flags=0x%x\n", __FUNCTION__, + gpio_wl_host_wake, host_oob_irq, host_oob_irq_flags); +#endif /* CUSTOMER_OOB */ + printf("%s: WL_REG_ON=%d\n", __FUNCTION__, gpio_wl_reg_on); + + return 0; +} + +static void dhd_wlan_deinit_gpio(void) +{ + if (gpio_wl_reg_on >= 0) { + printf("%s: gpio_free(WL_REG_ON %d)\n", __FUNCTION__, gpio_wl_reg_on); + gpio_free(gpio_wl_reg_on); + gpio_wl_reg_on = -1; + } +#ifdef CUSTOMER_OOB + if (gpio_wl_host_wake >= 0) { + printf("%s: gpio_free(WL_HOST_WAKE %d)\n", __FUNCTION__, gpio_wl_host_wake); + gpio_free(gpio_wl_host_wake); + gpio_wl_host_wake = -1; + } +#endif /* CUSTOMER_OOB */ +} + +int dhd_wlan_init_plat_data(void) +{ + int err = 0; + + printf("======== %s ========\n", __FUNCTION__); + err = dhd_wlan_init_gpio(); + return err; +} + +void dhd_wlan_deinit_plat_data(wifi_adapter_info_t *adapter) +{ + printf("======== %s ========\n", __FUNCTION__); + dhd_wlan_deinit_gpio(); +} + diff --git a/bcmdhd.100.10.315.x/dhd_ip.c b/bcmdhd.100.10.315.x/dhd_ip.c new file mode 100644 index 0000000..403ad9b --- /dev/null +++ b/bcmdhd.100.10.315.x/dhd_ip.c @@ -0,0 +1,1391 @@ +/* + * IP Packet Parser Module. + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_ip.c 729083 2017-10-30 10:33:47Z $ + */ +#include +#include + +#include +#include +#include <802.3.h> +#include +#include + +#include + +#include + +#ifdef DHDTCPACK_SUPPRESS +#include +#include +#include +#endif /* DHDTCPACK_SUPPRESS */ + +/* special values */ +/* 802.3 llc/snap header */ +static const uint8 llc_snap_hdr[SNAP_HDR_LEN] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00}; + +pkt_frag_t pkt_frag_info(osl_t *osh, void *p) +{ + uint8 *frame; + int length; + uint8 *pt; /* Pointer to type field */ + uint16 ethertype; + struct ipv4_hdr *iph; /* IP frame pointer */ + int ipl; /* IP frame length */ + uint16 iph_frag; + + ASSERT(osh && p); + + frame = PKTDATA(osh, p); + length = PKTLEN(osh, p); + + /* Process Ethernet II or SNAP-encapsulated 802.3 frames */ + if (length < ETHER_HDR_LEN) { + DHD_INFO(("%s: short eth frame (%d)\n", __FUNCTION__, length)); + return DHD_PKT_FRAG_NONE; + } else if (ntoh16(*(uint16 *)(frame + ETHER_TYPE_OFFSET)) >= ETHER_TYPE_MIN) { + /* Frame is Ethernet II */ + pt = frame + ETHER_TYPE_OFFSET; + } else if (length >= ETHER_HDR_LEN + SNAP_HDR_LEN + ETHER_TYPE_LEN && + !bcmp(llc_snap_hdr, frame + ETHER_HDR_LEN, SNAP_HDR_LEN)) { + pt = frame + ETHER_HDR_LEN + SNAP_HDR_LEN; + } else { + DHD_INFO(("%s: non-SNAP 802.3 frame\n", __FUNCTION__)); + return DHD_PKT_FRAG_NONE; + } + + ethertype = ntoh16(*(uint16 *)pt); + + /* Skip VLAN tag, if any */ + if (ethertype == ETHER_TYPE_8021Q) { + pt += VLAN_TAG_LEN; + + if (pt + ETHER_TYPE_LEN > frame + length) { + DHD_INFO(("%s: short VLAN frame (%d)\n", __FUNCTION__, length)); + return DHD_PKT_FRAG_NONE; + } + + ethertype = ntoh16(*(uint16 *)pt); + } + + if (ethertype != ETHER_TYPE_IP) { + DHD_INFO(("%s: non-IP frame (ethertype 0x%x, length %d)\n", + __FUNCTION__, ethertype, length)); + return DHD_PKT_FRAG_NONE; + } + + iph = (struct ipv4_hdr *)(pt + ETHER_TYPE_LEN); + ipl = (uint)(length - (pt + ETHER_TYPE_LEN - frame)); + + /* We support IPv4 only */ + if ((ipl < IPV4_OPTIONS_OFFSET) || (IP_VER(iph) != IP_VER_4)) { + DHD_INFO(("%s: short frame (%d) or non-IPv4\n", __FUNCTION__, ipl)); + return DHD_PKT_FRAG_NONE; + } + + iph_frag = ntoh16(iph->frag); + + if (iph_frag & IPV4_FRAG_DONT) { + return DHD_PKT_FRAG_NONE; + } else if ((iph_frag & IPV4_FRAG_MORE) == 0) { + return DHD_PKT_FRAG_LAST; + } else { + return (iph_frag & IPV4_FRAG_OFFSET_MASK)? DHD_PKT_FRAG_CONT : DHD_PKT_FRAG_FIRST; + } +} + +#ifdef DHDTCPACK_SUPPRESS + +typedef struct { + void *pkt_in_q; /* TCP ACK packet that is already in txq or DelayQ */ + void *pkt_ether_hdr; /* Ethernet header pointer of pkt_in_q */ + int ifidx; + uint8 supp_cnt; + dhd_pub_t *dhdp; +#ifndef TCPACK_SUPPRESS_HOLD_HRT + struct timer_list timer; +#else + struct tasklet_hrtimer timer; +#endif /* TCPACK_SUPPRESS_HOLD_HRT */ +} tcpack_info_t; + +typedef struct _tdata_psh_info_t { + uint32 end_seq; /* end seq# of a received TCP PSH DATA pkt */ + struct _tdata_psh_info_t *next; /* next pointer of the link chain */ +} tdata_psh_info_t; + +typedef struct { + struct { + uint8 src[IPV4_ADDR_LEN]; /* SRC ip addrs of this TCP stream */ + uint8 dst[IPV4_ADDR_LEN]; /* DST ip addrs of this TCP stream */ + } ip_addr; + struct { + uint8 src[TCP_PORT_LEN]; /* SRC tcp ports of this TCP stream */ + uint8 dst[TCP_PORT_LEN]; /* DST tcp ports of this TCP stream */ + } tcp_port; + tdata_psh_info_t *tdata_psh_info_head; /* Head of received TCP PSH DATA chain */ + tdata_psh_info_t *tdata_psh_info_tail; /* Tail of received TCP PSH DATA chain */ + uint32 last_used_time; /* The last time this tcpdata_info was used(in ms) */ +} tcpdata_info_t; + +/* TCPACK SUPPRESS module */ +typedef struct { + int tcpack_info_cnt; + tcpack_info_t tcpack_info_tbl[TCPACK_INFO_MAXNUM]; /* Info of TCP ACK to send */ + int tcpdata_info_cnt; + tcpdata_info_t tcpdata_info_tbl[TCPDATA_INFO_MAXNUM]; /* Info of received TCP DATA */ + tdata_psh_info_t *tdata_psh_info_pool; /* Pointer to tdata_psh_info elements pool */ + tdata_psh_info_t *tdata_psh_info_free; /* free tdata_psh_info elements chain in pool */ +#ifdef DHDTCPACK_SUP_DBG + int psh_info_enq_num; /* Number of free TCP PSH DATA info elements in pool */ +#endif /* DHDTCPACK_SUP_DBG */ +} tcpack_sup_module_t; + +#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG) +counter_tbl_t tack_tbl = {"tcpACK", 0, 1000, 10, {0, }, 1}; +#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */ + +static void +_tdata_psh_info_pool_enq(tcpack_sup_module_t *tcpack_sup_mod, + tdata_psh_info_t *tdata_psh_info) +{ + if ((tcpack_sup_mod == NULL) || (tdata_psh_info == NULL)) { + DHD_ERROR(("%s %d: ERROR %p %p\n", __FUNCTION__, __LINE__, + tcpack_sup_mod, tdata_psh_info)); + return; + } + + ASSERT(tdata_psh_info->next == NULL); + tdata_psh_info->next = tcpack_sup_mod->tdata_psh_info_free; + tcpack_sup_mod->tdata_psh_info_free = tdata_psh_info; +#ifdef DHDTCPACK_SUP_DBG + tcpack_sup_mod->psh_info_enq_num++; +#endif // endif +} + +static tdata_psh_info_t* +_tdata_psh_info_pool_deq(tcpack_sup_module_t *tcpack_sup_mod) +{ + tdata_psh_info_t *tdata_psh_info = NULL; + + if (tcpack_sup_mod == NULL) { + DHD_ERROR(("%s %d: ERROR %p\n", __FUNCTION__, __LINE__, + tcpack_sup_mod)); + return NULL; + } + + tdata_psh_info = tcpack_sup_mod->tdata_psh_info_free; + if (tdata_psh_info == NULL) + DHD_ERROR(("%s %d: Out of tdata_disc_grp\n", __FUNCTION__, __LINE__)); + else { + tcpack_sup_mod->tdata_psh_info_free = tdata_psh_info->next; + tdata_psh_info->next = NULL; +#ifdef DHDTCPACK_SUP_DBG + tcpack_sup_mod->psh_info_enq_num--; +#endif /* DHDTCPACK_SUP_DBG */ + } + + return tdata_psh_info; +} + +#ifdef BCMSDIO +static int _tdata_psh_info_pool_init(dhd_pub_t *dhdp, + tcpack_sup_module_t *tcpack_sup_mod) +{ + tdata_psh_info_t *tdata_psh_info_pool = NULL; + uint i; + + DHD_TRACE(("%s %d: Enter\n", __FUNCTION__, __LINE__)); + + if (tcpack_sup_mod == NULL) + return BCME_ERROR; + + ASSERT(tcpack_sup_mod->tdata_psh_info_pool == NULL); + ASSERT(tcpack_sup_mod->tdata_psh_info_free == NULL); + + tdata_psh_info_pool = + MALLOC(dhdp->osh, sizeof(tdata_psh_info_t) * TCPDATA_PSH_INFO_MAXNUM); + + if (tdata_psh_info_pool == NULL) + return BCME_NOMEM; + bzero(tdata_psh_info_pool, sizeof(tdata_psh_info_t) * TCPDATA_PSH_INFO_MAXNUM); +#ifdef DHDTCPACK_SUP_DBG + tcpack_sup_mod->psh_info_enq_num = 0; +#endif /* DHDTCPACK_SUP_DBG */ + + /* Enqueue newly allocated tcpdata psh info elements to the pool */ + for (i = 0; i < TCPDATA_PSH_INFO_MAXNUM; i++) + _tdata_psh_info_pool_enq(tcpack_sup_mod, &tdata_psh_info_pool[i]); + + ASSERT(tcpack_sup_mod->tdata_psh_info_free != NULL); + tcpack_sup_mod->tdata_psh_info_pool = tdata_psh_info_pool; + + return BCME_OK; +} + +static void _tdata_psh_info_pool_deinit(dhd_pub_t *dhdp, + tcpack_sup_module_t *tcpack_sup_mod) +{ + uint i; + tdata_psh_info_t *tdata_psh_info; + + DHD_TRACE(("%s %d: Enter\n", __FUNCTION__, __LINE__)); + + if (tcpack_sup_mod == NULL) { + DHD_ERROR(("%s %d: ERROR tcpack_sup_mod NULL!\n", + __FUNCTION__, __LINE__)); + return; + } + + for (i = 0; i < tcpack_sup_mod->tcpdata_info_cnt; i++) { + tcpdata_info_t *tcpdata_info = &tcpack_sup_mod->tcpdata_info_tbl[i]; + /* Return tdata_psh_info elements allocated to each tcpdata_info to the pool */ + while ((tdata_psh_info = tcpdata_info->tdata_psh_info_head)) { + tcpdata_info->tdata_psh_info_head = tdata_psh_info->next; + tdata_psh_info->next = NULL; + _tdata_psh_info_pool_enq(tcpack_sup_mod, tdata_psh_info); + } + tcpdata_info->tdata_psh_info_tail = NULL; + } +#ifdef DHDTCPACK_SUP_DBG + DHD_ERROR(("%s %d: PSH INFO ENQ %d\n", + __FUNCTION__, __LINE__, tcpack_sup_mod->psh_info_enq_num)); +#endif /* DHDTCPACK_SUP_DBG */ + + i = 0; + /* Be sure we recollected all tdata_psh_info elements */ + while ((tdata_psh_info = tcpack_sup_mod->tdata_psh_info_free)) { + tcpack_sup_mod->tdata_psh_info_free = tdata_psh_info->next; + tdata_psh_info->next = NULL; + i++; + } + ASSERT(i == TCPDATA_PSH_INFO_MAXNUM); + MFREE(dhdp->osh, tcpack_sup_mod->tdata_psh_info_pool, + sizeof(tdata_psh_info_t) * TCPDATA_PSH_INFO_MAXNUM); + tcpack_sup_mod->tdata_psh_info_pool = NULL; + + return; +} +#endif /* BCMSDIO */ + +#ifdef BCMPCIE +#ifndef TCPACK_SUPPRESS_HOLD_HRT +static void dhd_tcpack_send( +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0) + struct timer_list *t +#else + ulong data +#endif +) +#else +static enum hrtimer_restart dhd_tcpack_send(struct hrtimer *timer) +#endif /* TCPACK_SUPPRESS_HOLD_HRT */ +{ + tcpack_sup_module_t *tcpack_sup_mod; + tcpack_info_t *cur_tbl; + dhd_pub_t *dhdp; + int ifidx; + void* pkt; + unsigned long flags; + +#ifndef TCPACK_SUPPRESS_HOLD_HRT +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0) + cur_tbl = from_timer(cur_tbl, t, timer); +#else + cur_tbl = (tcpack_info_t *)data; +#endif +#else + cur_tbl = container_of(timer, tcpack_info_t, timer.timer); +#endif /* TCPACK_SUPPRESS_HOLD_HRT */ + + if (!cur_tbl) { + goto done; + } + + dhdp = cur_tbl->dhdp; + if (!dhdp) { + goto done; + } + + flags = dhd_os_tcpacklock(dhdp); + + if (unlikely(dhdp->tcpack_sup_mode != TCPACK_SUP_HOLD)) { + dhd_os_tcpackunlock(dhdp, flags); + goto done; + } + + tcpack_sup_mod = dhdp->tcpack_sup_module; + if (!tcpack_sup_mod) { + DHD_ERROR(("%s %d: tcpack suppress module NULL!!\n", + __FUNCTION__, __LINE__)); + dhd_os_tcpackunlock(dhdp, flags); + goto done; + } + pkt = cur_tbl->pkt_in_q; + ifidx = cur_tbl->ifidx; + if (!pkt) { + dhd_os_tcpackunlock(dhdp, flags); + goto done; + } + cur_tbl->pkt_in_q = NULL; + cur_tbl->pkt_ether_hdr = NULL; + cur_tbl->ifidx = 0; + cur_tbl->supp_cnt = 0; + if (--tcpack_sup_mod->tcpack_info_cnt < 0) { + DHD_ERROR(("%s %d: ERROR!!! tcp_ack_info_cnt %d\n", + __FUNCTION__, __LINE__, tcpack_sup_mod->tcpack_info_cnt)); + } + + dhd_os_tcpackunlock(dhdp, flags); + + dhd_sendpkt(dhdp, ifidx, pkt); + +done: +#ifndef TCPACK_SUPPRESS_HOLD_HRT + return; +#else + return HRTIMER_NORESTART; +#endif /* TCPACK_SUPPRESS_HOLD_HRT */ +} +#endif /* BCMPCIE */ + +int dhd_tcpack_suppress_set(dhd_pub_t *dhdp, uint8 mode) +{ + int ret = BCME_OK; + unsigned long flags; + tcpack_sup_module_t *tcpack_sup_module; + uint8 invalid_mode = FALSE; + int prev_mode; + int i = 0; + + flags = dhd_os_tcpacklock(dhdp); + tcpack_sup_module = dhdp->tcpack_sup_module; + prev_mode = dhdp->tcpack_sup_mode; + + if (prev_mode == mode) { + DHD_ERROR(("%s %d: already set to %d\n", __FUNCTION__, __LINE__, mode)); + goto exit; + } + + invalid_mode |= (mode >= TCPACK_SUP_LAST_MODE); +#ifdef BCMSDIO + invalid_mode |= (mode == TCPACK_SUP_HOLD); +#endif /* BCMSDIO */ +#ifdef BCMPCIE + invalid_mode |= ((mode == TCPACK_SUP_REPLACE) || (mode == TCPACK_SUP_DELAYTX)); +#endif /* BCMPCIE */ + + if (invalid_mode) { + DHD_ERROR(("%s %d: Invalid TCP ACK Suppress mode %d\n", + __FUNCTION__, __LINE__, mode)); + ret = BCME_BADARG; + goto exit; + } + + printf("%s: TCP ACK Suppress mode %d -> mode %d\n", + __FUNCTION__, dhdp->tcpack_sup_mode, mode); + + /* Pre-process routines to change a new mode as per previous mode */ + switch (prev_mode) { + case TCPACK_SUP_OFF: + if (tcpack_sup_module == NULL) { + tcpack_sup_module = MALLOC(dhdp->osh, sizeof(tcpack_sup_module_t)); + if (tcpack_sup_module == NULL) { + DHD_ERROR(("%s[%d]: Failed to allocate the new memory for " + "tcpack_sup_module\n", __FUNCTION__, __LINE__)); + dhdp->tcpack_sup_mode = TCPACK_SUP_OFF; + ret = BCME_NOMEM; + goto exit; + } + dhdp->tcpack_sup_module = tcpack_sup_module; + } + bzero(tcpack_sup_module, sizeof(tcpack_sup_module_t)); + break; +#ifdef BCMSDIO + case TCPACK_SUP_DELAYTX: + if (tcpack_sup_module) { + /* We won't need tdata_psh_info pool and + * tcpddata_info_tbl anymore + */ + _tdata_psh_info_pool_deinit(dhdp, tcpack_sup_module); + tcpack_sup_module->tcpdata_info_cnt = 0; + bzero(tcpack_sup_module->tcpdata_info_tbl, + sizeof(tcpdata_info_t) * TCPDATA_INFO_MAXNUM); + } + + /* For half duplex bus interface, tx precedes rx by default */ + if (dhdp->bus) { + dhd_bus_set_dotxinrx(dhdp->bus, TRUE); + } + + if (tcpack_sup_module == NULL) { + DHD_ERROR(("%s[%d]: tcpack_sup_module should not be NULL\n", + __FUNCTION__, __LINE__)); + dhdp->tcpack_sup_mode = TCPACK_SUP_OFF; + goto exit; + } + break; +#endif /* BCMSDIO */ + } + + /* Update a new mode */ + dhdp->tcpack_sup_mode = mode; + + /* Process for a new mode */ + switch (mode) { + case TCPACK_SUP_OFF: + ASSERT(tcpack_sup_module != NULL); + /* Clean up timer/data structure for + * any remaining/pending packet or timer. + */ + if (tcpack_sup_module) { + /* Check if previous mode is TCAPACK_SUP_HOLD */ + if (prev_mode == TCPACK_SUP_HOLD) { + for (i = 0; i < TCPACK_INFO_MAXNUM; i++) { + tcpack_info_t *tcpack_info_tbl = + &tcpack_sup_module->tcpack_info_tbl[i]; +#ifndef TCPACK_SUPPRESS_HOLD_HRT + del_timer(&tcpack_info_tbl->timer); +#else + hrtimer_cancel(&tcpack_info_tbl->timer.timer); +#endif /* TCPACK_SUPPRESS_HOLD_HRT */ + if (tcpack_info_tbl->pkt_in_q) { + PKTFREE(dhdp->osh, + tcpack_info_tbl->pkt_in_q, TRUE); + tcpack_info_tbl->pkt_in_q = NULL; + } + } + } + MFREE(dhdp->osh, tcpack_sup_module, sizeof(tcpack_sup_module_t)); + dhdp->tcpack_sup_module = NULL; + } else { + DHD_ERROR(("%s[%d]: tcpack_sup_module should not be NULL\n", + __FUNCTION__, __LINE__)); + } + break; +#ifdef BCMSDIO + case TCPACK_SUP_REPLACE: + /* There is nothing to configure for this mode */ + break; + case TCPACK_SUP_DELAYTX: + ret = _tdata_psh_info_pool_init(dhdp, tcpack_sup_module); + if (ret != BCME_OK) { + DHD_ERROR(("%s %d: pool init fail with %d\n", + __FUNCTION__, __LINE__, ret)); + break; + } + if (dhdp->bus) { + dhd_bus_set_dotxinrx(dhdp->bus, FALSE); + } + break; +#endif /* BCMSDIO */ +#ifdef BCMPCIE + case TCPACK_SUP_HOLD: + dhdp->tcpack_sup_ratio = CUSTOM_TCPACK_SUPP_RATIO; + dhdp->tcpack_sup_delay = CUSTOM_TCPACK_DELAY_TIME; + for (i = 0; i < TCPACK_INFO_MAXNUM; i++) { + tcpack_info_t *tcpack_info_tbl = + &tcpack_sup_module->tcpack_info_tbl[i]; + tcpack_info_tbl->dhdp = dhdp; +#ifndef TCPACK_SUPPRESS_HOLD_HRT +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0) + timer_setup(&tcpack_info_tbl->timer, dhd_tcpack_send, 0); +#else + init_timer(&tcpack_info_tbl->timer); + tcpack_info_tbl->timer.data = (ulong)tcpack_info_tbl; + tcpack_info_tbl->timer.function = dhd_tcpack_send; +#endif +#else + tasklet_hrtimer_init(&tcpack_info_tbl->timer, + dhd_tcpack_send, CLOCK_MONOTONIC, HRTIMER_MODE_REL); +#endif /* TCPACK_SUPPRESS_HOLD_HRT */ + } + break; +#endif /* BCMPCIE */ + } + +exit: + dhd_os_tcpackunlock(dhdp, flags); + return ret; +} + +void +dhd_tcpack_info_tbl_clean(dhd_pub_t *dhdp) +{ + tcpack_sup_module_t *tcpack_sup_mod = dhdp->tcpack_sup_module; + int i; + unsigned long flags; + + if (dhdp->tcpack_sup_mode == TCPACK_SUP_OFF) + goto exit; + + flags = dhd_os_tcpacklock(dhdp); + + if (!tcpack_sup_mod) { + DHD_ERROR(("%s %d: tcpack suppress module NULL!!\n", + __FUNCTION__, __LINE__)); + dhd_os_tcpackunlock(dhdp, flags); + goto exit; + } + + if (dhdp->tcpack_sup_mode == TCPACK_SUP_HOLD) { + for (i = 0; i < TCPACK_INFO_MAXNUM; i++) { + if (tcpack_sup_mod->tcpack_info_tbl[i].pkt_in_q) { + PKTFREE(dhdp->osh, tcpack_sup_mod->tcpack_info_tbl[i].pkt_in_q, + TRUE); + tcpack_sup_mod->tcpack_info_tbl[i].pkt_in_q = NULL; + tcpack_sup_mod->tcpack_info_tbl[i].pkt_ether_hdr = NULL; + tcpack_sup_mod->tcpack_info_tbl[i].ifidx = 0; + tcpack_sup_mod->tcpack_info_tbl[i].supp_cnt = 0; + } + } + } else { + tcpack_sup_mod->tcpack_info_cnt = 0; + bzero(tcpack_sup_mod->tcpack_info_tbl, sizeof(tcpack_info_t) * TCPACK_INFO_MAXNUM); + } + + dhd_os_tcpackunlock(dhdp, flags); + + if (dhdp->tcpack_sup_mode == TCPACK_SUP_HOLD) { + for (i = 0; i < TCPACK_INFO_MAXNUM; i++) { +#ifndef TCPACK_SUPPRESS_HOLD_HRT + del_timer_sync(&tcpack_sup_mod->tcpack_info_tbl[i].timer); +#else + hrtimer_cancel(&tcpack_sup_mod->tcpack_info_tbl[i].timer.timer); +#endif /* TCPACK_SUPPRESS_HOLD_HRT */ + } + } + +exit: + return; +} + +inline int dhd_tcpack_check_xmit(dhd_pub_t *dhdp, void *pkt) +{ + uint8 i; + tcpack_sup_module_t *tcpack_sup_mod; + tcpack_info_t *tcpack_info_tbl; + int tbl_cnt; + int ret = BCME_OK; + void *pdata; + uint32 pktlen; + unsigned long flags; + + if (dhdp->tcpack_sup_mode == TCPACK_SUP_OFF) + goto exit; + + pdata = PKTDATA(dhdp->osh, pkt); + pktlen = PKTLEN(dhdp->osh, pkt) - dhd_prot_hdrlen(dhdp, pdata); + + if (pktlen < TCPACKSZMIN || pktlen > TCPACKSZMAX) { + DHD_TRACE(("%s %d: Too short or long length %d to be TCP ACK\n", + __FUNCTION__, __LINE__, pktlen)); + goto exit; + } + + flags = dhd_os_tcpacklock(dhdp); + tcpack_sup_mod = dhdp->tcpack_sup_module; + + if (!tcpack_sup_mod) { + DHD_ERROR(("%s %d: tcpack suppress module NULL!!\n", __FUNCTION__, __LINE__)); + ret = BCME_ERROR; + dhd_os_tcpackunlock(dhdp, flags); + goto exit; + } + tbl_cnt = tcpack_sup_mod->tcpack_info_cnt; + tcpack_info_tbl = tcpack_sup_mod->tcpack_info_tbl; + + ASSERT(tbl_cnt <= TCPACK_INFO_MAXNUM); + + for (i = 0; i < tbl_cnt; i++) { + if (tcpack_info_tbl[i].pkt_in_q == pkt) { + DHD_TRACE(("%s %d: pkt %p sent out. idx %d, tbl_cnt %d\n", + __FUNCTION__, __LINE__, pkt, i, tbl_cnt)); + /* This pkt is being transmitted so remove the tcp_ack_info of it. */ + if (i < tbl_cnt - 1) { + bcopy(&tcpack_info_tbl[tbl_cnt - 1], + &tcpack_info_tbl[i], sizeof(tcpack_info_t)); + } + bzero(&tcpack_info_tbl[tbl_cnt - 1], sizeof(tcpack_info_t)); + if (--tcpack_sup_mod->tcpack_info_cnt < 0) { + DHD_ERROR(("%s %d: ERROR!!! tcp_ack_info_cnt %d\n", + __FUNCTION__, __LINE__, tcpack_sup_mod->tcpack_info_cnt)); + ret = BCME_ERROR; + } + break; + } + } + dhd_os_tcpackunlock(dhdp, flags); + +exit: + return ret; +} + +static INLINE bool dhd_tcpdata_psh_acked(dhd_pub_t *dhdp, uint8 *ip_hdr, + uint8 *tcp_hdr, uint32 tcp_ack_num) +{ + tcpack_sup_module_t *tcpack_sup_mod; + int i; + tcpdata_info_t *tcpdata_info = NULL; + tdata_psh_info_t *tdata_psh_info = NULL; + bool ret = FALSE; + + if (dhdp->tcpack_sup_mode != TCPACK_SUP_DELAYTX) + goto exit; + + tcpack_sup_mod = dhdp->tcpack_sup_module; + + if (!tcpack_sup_mod) { + DHD_ERROR(("%s %d: tcpack suppress module NULL!!\n", __FUNCTION__, __LINE__)); + goto exit; + } + + DHD_TRACE(("%s %d: IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR + " TCP port %d %d, ack %u\n", __FUNCTION__, __LINE__, + IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_SRC_IP_OFFSET])), + IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_DEST_IP_OFFSET])), + ntoh16_ua(&tcp_hdr[TCP_SRC_PORT_OFFSET]), + ntoh16_ua(&tcp_hdr[TCP_DEST_PORT_OFFSET]), + tcp_ack_num)); + + for (i = 0; i < tcpack_sup_mod->tcpdata_info_cnt; i++) { + tcpdata_info_t *tcpdata_info_tmp = &tcpack_sup_mod->tcpdata_info_tbl[i]; + DHD_TRACE(("%s %d: data info[%d], IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR + " TCP port %d %d\n", __FUNCTION__, __LINE__, i, + IPV4_ADDR_TO_STR(ntoh32_ua(tcpdata_info_tmp->ip_addr.src)), + IPV4_ADDR_TO_STR(ntoh32_ua(tcpdata_info_tmp->ip_addr.dst)), + ntoh16_ua(tcpdata_info_tmp->tcp_port.src), + ntoh16_ua(tcpdata_info_tmp->tcp_port.dst))); + + /* If either IP address or TCP port number does not match, skip. */ + if (memcmp(&ip_hdr[IPV4_SRC_IP_OFFSET], + tcpdata_info_tmp->ip_addr.dst, IPV4_ADDR_LEN) == 0 && + memcmp(&ip_hdr[IPV4_DEST_IP_OFFSET], + tcpdata_info_tmp->ip_addr.src, IPV4_ADDR_LEN) == 0 && + memcmp(&tcp_hdr[TCP_SRC_PORT_OFFSET], + tcpdata_info_tmp->tcp_port.dst, TCP_PORT_LEN) == 0 && + memcmp(&tcp_hdr[TCP_DEST_PORT_OFFSET], + tcpdata_info_tmp->tcp_port.src, TCP_PORT_LEN) == 0) { + tcpdata_info = tcpdata_info_tmp; + break; + } + } + + if (tcpdata_info == NULL) { + DHD_TRACE(("%s %d: no tcpdata_info!\n", __FUNCTION__, __LINE__)); + goto exit; + } + + if (tcpdata_info->tdata_psh_info_head == NULL) { + DHD_TRACE(("%s %d: No PSH DATA to be acked!\n", __FUNCTION__, __LINE__)); + } + + while ((tdata_psh_info = tcpdata_info->tdata_psh_info_head)) { + if (IS_TCPSEQ_GE(tcp_ack_num, tdata_psh_info->end_seq)) { + DHD_TRACE(("%s %d: PSH ACKED! %u >= %u\n", + __FUNCTION__, __LINE__, tcp_ack_num, tdata_psh_info->end_seq)); + tcpdata_info->tdata_psh_info_head = tdata_psh_info->next; + tdata_psh_info->next = NULL; + _tdata_psh_info_pool_enq(tcpack_sup_mod, tdata_psh_info); + ret = TRUE; + } else + break; + } + if (tdata_psh_info == NULL) + tcpdata_info->tdata_psh_info_tail = NULL; + +#ifdef DHDTCPACK_SUP_DBG + DHD_TRACE(("%s %d: PSH INFO ENQ %d\n", + __FUNCTION__, __LINE__, tcpack_sup_mod->psh_info_enq_num)); +#endif /* DHDTCPACK_SUP_DBG */ + +exit: + return ret; +} + +bool +dhd_tcpack_suppress(dhd_pub_t *dhdp, void *pkt) +{ + uint8 *new_ether_hdr; /* Ethernet header of the new packet */ + uint16 new_ether_type; /* Ethernet type of the new packet */ + uint8 *new_ip_hdr; /* IP header of the new packet */ + uint8 *new_tcp_hdr; /* TCP header of the new packet */ + uint32 new_ip_hdr_len; /* IP header length of the new packet */ + uint32 cur_framelen; + uint32 new_tcp_ack_num; /* TCP acknowledge number of the new packet */ + uint16 new_ip_total_len; /* Total length of IP packet for the new packet */ + uint32 new_tcp_hdr_len; /* TCP header length of the new packet */ + tcpack_sup_module_t *tcpack_sup_mod; + tcpack_info_t *tcpack_info_tbl; + int i; + bool ret = FALSE; + bool set_dotxinrx = TRUE; + unsigned long flags; + + if (dhdp->tcpack_sup_mode == TCPACK_SUP_OFF) + goto exit; + + new_ether_hdr = PKTDATA(dhdp->osh, pkt); + cur_framelen = PKTLEN(dhdp->osh, pkt); + + if (cur_framelen < TCPACKSZMIN || cur_framelen > TCPACKSZMAX) { + DHD_TRACE(("%s %d: Too short or long length %d to be TCP ACK\n", + __FUNCTION__, __LINE__, cur_framelen)); + goto exit; + } + + new_ether_type = new_ether_hdr[12] << 8 | new_ether_hdr[13]; + + if (new_ether_type != ETHER_TYPE_IP) { + DHD_TRACE(("%s %d: Not a IP packet 0x%x\n", + __FUNCTION__, __LINE__, new_ether_type)); + goto exit; + } + + DHD_TRACE(("%s %d: IP pkt! 0x%x\n", __FUNCTION__, __LINE__, new_ether_type)); + + new_ip_hdr = new_ether_hdr + ETHER_HDR_LEN; + cur_framelen -= ETHER_HDR_LEN; + + ASSERT(cur_framelen >= IPV4_MIN_HEADER_LEN); + + new_ip_hdr_len = IPV4_HLEN(new_ip_hdr); + if (IP_VER(new_ip_hdr) != IP_VER_4 || IPV4_PROT(new_ip_hdr) != IP_PROT_TCP) { + DHD_TRACE(("%s %d: Not IPv4 nor TCP! ip ver %d, prot %d\n", + __FUNCTION__, __LINE__, IP_VER(new_ip_hdr), IPV4_PROT(new_ip_hdr))); + goto exit; + } + + new_tcp_hdr = new_ip_hdr + new_ip_hdr_len; + cur_framelen -= new_ip_hdr_len; + + ASSERT(cur_framelen >= TCP_MIN_HEADER_LEN); + + DHD_TRACE(("%s %d: TCP pkt!\n", __FUNCTION__, __LINE__)); + + /* is it an ack ? Allow only ACK flag, not to suppress others. */ + if (new_tcp_hdr[TCP_FLAGS_OFFSET] != TCP_FLAG_ACK) { + DHD_TRACE(("%s %d: Do not touch TCP flag 0x%x\n", + __FUNCTION__, __LINE__, new_tcp_hdr[TCP_FLAGS_OFFSET])); + goto exit; + } + + new_ip_total_len = ntoh16_ua(&new_ip_hdr[IPV4_PKTLEN_OFFSET]); + new_tcp_hdr_len = 4 * TCP_HDRLEN(new_tcp_hdr[TCP_HLEN_OFFSET]); + + /* This packet has TCP data, so just send */ + if (new_ip_total_len > new_ip_hdr_len + new_tcp_hdr_len) { + DHD_TRACE(("%s %d: Do nothing for TCP DATA\n", __FUNCTION__, __LINE__)); + goto exit; + } + + ASSERT(new_ip_total_len == new_ip_hdr_len + new_tcp_hdr_len); + + new_tcp_ack_num = ntoh32_ua(&new_tcp_hdr[TCP_ACK_NUM_OFFSET]); + + DHD_TRACE(("%s %d: TCP ACK with zero DATA length" + " IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR" TCP port %d %d\n", + __FUNCTION__, __LINE__, + IPV4_ADDR_TO_STR(ntoh32_ua(&new_ip_hdr[IPV4_SRC_IP_OFFSET])), + IPV4_ADDR_TO_STR(ntoh32_ua(&new_ip_hdr[IPV4_DEST_IP_OFFSET])), + ntoh16_ua(&new_tcp_hdr[TCP_SRC_PORT_OFFSET]), + ntoh16_ua(&new_tcp_hdr[TCP_DEST_PORT_OFFSET]))); + + /* Look for tcp_ack_info that has the same ip src/dst addrs and tcp src/dst ports */ + flags = dhd_os_tcpacklock(dhdp); +#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG) + counter_printlog(&tack_tbl); + tack_tbl.cnt[0]++; +#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */ + + tcpack_sup_mod = dhdp->tcpack_sup_module; + tcpack_info_tbl = tcpack_sup_mod->tcpack_info_tbl; + + if (!tcpack_sup_mod) { + DHD_ERROR(("%s %d: tcpack suppress module NULL!!\n", __FUNCTION__, __LINE__)); + ret = BCME_ERROR; + dhd_os_tcpackunlock(dhdp, flags); + goto exit; + } + + if (dhd_tcpdata_psh_acked(dhdp, new_ip_hdr, new_tcp_hdr, new_tcp_ack_num)) { + /* This TCPACK is ACK to TCPDATA PSH pkt, so keep set_dotxinrx TRUE */ +#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG) + tack_tbl.cnt[5]++; +#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */ + } else + set_dotxinrx = FALSE; + + for (i = 0; i < tcpack_sup_mod->tcpack_info_cnt; i++) { + void *oldpkt; /* TCPACK packet that is already in txq or DelayQ */ + uint8 *old_ether_hdr, *old_ip_hdr, *old_tcp_hdr; + uint32 old_ip_hdr_len, old_tcp_hdr_len; + uint32 old_tcpack_num; /* TCP ACK number of old TCPACK packet in Q */ + + if ((oldpkt = tcpack_info_tbl[i].pkt_in_q) == NULL) { + DHD_ERROR(("%s %d: Unexpected error!! cur idx %d, ttl cnt %d\n", + __FUNCTION__, __LINE__, i, tcpack_sup_mod->tcpack_info_cnt)); + break; + } + + if (PKTDATA(dhdp->osh, oldpkt) == NULL) { + DHD_ERROR(("%s %d: oldpkt data NULL!! cur idx %d, ttl cnt %d\n", + __FUNCTION__, __LINE__, i, tcpack_sup_mod->tcpack_info_cnt)); + break; + } + + old_ether_hdr = tcpack_info_tbl[i].pkt_ether_hdr; + old_ip_hdr = old_ether_hdr + ETHER_HDR_LEN; + old_ip_hdr_len = IPV4_HLEN(old_ip_hdr); + old_tcp_hdr = old_ip_hdr + old_ip_hdr_len; + old_tcp_hdr_len = 4 * TCP_HDRLEN(old_tcp_hdr[TCP_HLEN_OFFSET]); + + DHD_TRACE(("%s %d: oldpkt %p[%d], IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR + " TCP port %d %d\n", __FUNCTION__, __LINE__, oldpkt, i, + IPV4_ADDR_TO_STR(ntoh32_ua(&old_ip_hdr[IPV4_SRC_IP_OFFSET])), + IPV4_ADDR_TO_STR(ntoh32_ua(&old_ip_hdr[IPV4_DEST_IP_OFFSET])), + ntoh16_ua(&old_tcp_hdr[TCP_SRC_PORT_OFFSET]), + ntoh16_ua(&old_tcp_hdr[TCP_DEST_PORT_OFFSET]))); + + /* If either of IP address or TCP port number does not match, skip. + * Note that src/dst addr fields in ip header are contiguous being 8 bytes in total. + * Also, src/dst port fields in TCP header are contiguous being 4 bytes in total. + */ + if (memcmp(&new_ip_hdr[IPV4_SRC_IP_OFFSET], + &old_ip_hdr[IPV4_SRC_IP_OFFSET], IPV4_ADDR_LEN * 2) || + memcmp(&new_tcp_hdr[TCP_SRC_PORT_OFFSET], + &old_tcp_hdr[TCP_SRC_PORT_OFFSET], TCP_PORT_LEN * 2)) + continue; + + old_tcpack_num = ntoh32_ua(&old_tcp_hdr[TCP_ACK_NUM_OFFSET]); + + if (IS_TCPSEQ_GT(new_tcp_ack_num, old_tcpack_num)) { + /* New packet has higher TCP ACK number, so it replaces the old packet */ + if (new_ip_hdr_len == old_ip_hdr_len && + new_tcp_hdr_len == old_tcp_hdr_len) { + ASSERT(memcmp(new_ether_hdr, old_ether_hdr, ETHER_HDR_LEN) == 0); + bcopy(new_ip_hdr, old_ip_hdr, new_ip_total_len); + PKTFREE(dhdp->osh, pkt, FALSE); + DHD_TRACE(("%s %d: TCP ACK replace %u -> %u\n", + __FUNCTION__, __LINE__, old_tcpack_num, new_tcp_ack_num)); +#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG) + tack_tbl.cnt[2]++; +#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */ + ret = TRUE; + } else { +#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG) + tack_tbl.cnt[6]++; +#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */ + DHD_TRACE(("%s %d: lenth mismatch %d != %d || %d != %d" + " ACK %u -> %u\n", __FUNCTION__, __LINE__, + new_ip_hdr_len, old_ip_hdr_len, + new_tcp_hdr_len, old_tcp_hdr_len, + old_tcpack_num, new_tcp_ack_num)); + } + } else if (new_tcp_ack_num == old_tcpack_num) { + set_dotxinrx = TRUE; + /* TCPACK retransmission */ +#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG) + tack_tbl.cnt[3]++; +#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */ + } else { + DHD_TRACE(("%s %d: ACK number reverse old %u(0x%p) new %u(0x%p)\n", + __FUNCTION__, __LINE__, old_tcpack_num, oldpkt, + new_tcp_ack_num, pkt)); + } + dhd_os_tcpackunlock(dhdp, flags); + goto exit; + } + + if (i == tcpack_sup_mod->tcpack_info_cnt && i < TCPACK_INFO_MAXNUM) { + /* No TCPACK packet with the same IP addr and TCP port is found + * in tcp_ack_info_tbl. So add this packet to the table. + */ + DHD_TRACE(("%s %d: Add pkt 0x%p(ether_hdr 0x%p) to tbl[%d]\n", + __FUNCTION__, __LINE__, pkt, new_ether_hdr, + tcpack_sup_mod->tcpack_info_cnt)); + + tcpack_info_tbl[tcpack_sup_mod->tcpack_info_cnt].pkt_in_q = pkt; + tcpack_info_tbl[tcpack_sup_mod->tcpack_info_cnt].pkt_ether_hdr = new_ether_hdr; + tcpack_sup_mod->tcpack_info_cnt++; +#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG) + tack_tbl.cnt[1]++; +#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */ + } else { + ASSERT(i == tcpack_sup_mod->tcpack_info_cnt); + DHD_TRACE(("%s %d: No empty tcp ack info tbl\n", + __FUNCTION__, __LINE__)); + } + dhd_os_tcpackunlock(dhdp, flags); + +exit: + /* Unless TCPACK_SUP_DELAYTX, dotxinrx is alwasy TRUE, so no need to set here */ + if (dhdp->tcpack_sup_mode == TCPACK_SUP_DELAYTX && set_dotxinrx) + dhd_bus_set_dotxinrx(dhdp->bus, TRUE); + + return ret; +} + +bool +dhd_tcpdata_info_get(dhd_pub_t *dhdp, void *pkt) +{ + uint8 *ether_hdr; /* Ethernet header of the new packet */ + uint16 ether_type; /* Ethernet type of the new packet */ + uint8 *ip_hdr; /* IP header of the new packet */ + uint8 *tcp_hdr; /* TCP header of the new packet */ + uint32 ip_hdr_len; /* IP header length of the new packet */ + uint32 cur_framelen; + uint16 ip_total_len; /* Total length of IP packet for the new packet */ + uint32 tcp_hdr_len; /* TCP header length of the new packet */ + uint32 tcp_seq_num; /* TCP sequence number of the new packet */ + uint16 tcp_data_len; /* TCP DATA length that excludes IP and TCP headers */ + uint32 end_tcp_seq_num; /* TCP seq number of the last byte in the new packet */ + tcpack_sup_module_t *tcpack_sup_mod; + tcpdata_info_t *tcpdata_info = NULL; + tdata_psh_info_t *tdata_psh_info; + + int i; + bool ret = FALSE; + unsigned long flags; + + if (dhdp->tcpack_sup_mode != TCPACK_SUP_DELAYTX) + goto exit; + + ether_hdr = PKTDATA(dhdp->osh, pkt); + cur_framelen = PKTLEN(dhdp->osh, pkt); + + ether_type = ether_hdr[12] << 8 | ether_hdr[13]; + + if (ether_type != ETHER_TYPE_IP) { + DHD_TRACE(("%s %d: Not a IP packet 0x%x\n", + __FUNCTION__, __LINE__, ether_type)); + goto exit; + } + + DHD_TRACE(("%s %d: IP pkt! 0x%x\n", __FUNCTION__, __LINE__, ether_type)); + + ip_hdr = ether_hdr + ETHER_HDR_LEN; + cur_framelen -= ETHER_HDR_LEN; + + ASSERT(cur_framelen >= IPV4_MIN_HEADER_LEN); + + ip_hdr_len = IPV4_HLEN(ip_hdr); + if (IP_VER(ip_hdr) != IP_VER_4 || IPV4_PROT(ip_hdr) != IP_PROT_TCP) { + DHD_TRACE(("%s %d: Not IPv4 nor TCP! ip ver %d, prot %d\n", + __FUNCTION__, __LINE__, IP_VER(ip_hdr), IPV4_PROT(ip_hdr))); + goto exit; + } + + tcp_hdr = ip_hdr + ip_hdr_len; + cur_framelen -= ip_hdr_len; + + ASSERT(cur_framelen >= TCP_MIN_HEADER_LEN); + + DHD_TRACE(("%s %d: TCP pkt!\n", __FUNCTION__, __LINE__)); + + ip_total_len = ntoh16_ua(&ip_hdr[IPV4_PKTLEN_OFFSET]); + tcp_hdr_len = 4 * TCP_HDRLEN(tcp_hdr[TCP_HLEN_OFFSET]); + + /* This packet is mere TCP ACK, so do nothing */ + if (ip_total_len == ip_hdr_len + tcp_hdr_len) { + DHD_TRACE(("%s %d: Do nothing for no data TCP ACK\n", __FUNCTION__, __LINE__)); + goto exit; + } + + ASSERT(ip_total_len > ip_hdr_len + tcp_hdr_len); + + if ((tcp_hdr[TCP_FLAGS_OFFSET] & TCP_FLAG_PSH) == 0) { + DHD_TRACE(("%s %d: Not interested TCP DATA packet\n", __FUNCTION__, __LINE__)); + goto exit; + } + + DHD_TRACE(("%s %d: TCP DATA with nonzero DATA length" + " IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR" TCP port %d %d, flag 0x%x\n", + __FUNCTION__, __LINE__, + IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_SRC_IP_OFFSET])), + IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_DEST_IP_OFFSET])), + ntoh16_ua(&tcp_hdr[TCP_SRC_PORT_OFFSET]), + ntoh16_ua(&tcp_hdr[TCP_DEST_PORT_OFFSET]), + tcp_hdr[TCP_FLAGS_OFFSET])); + + flags = dhd_os_tcpacklock(dhdp); + tcpack_sup_mod = dhdp->tcpack_sup_module; + + if (!tcpack_sup_mod) { + DHD_ERROR(("%s %d: tcpack suppress module NULL!!\n", __FUNCTION__, __LINE__)); + ret = BCME_ERROR; + dhd_os_tcpackunlock(dhdp, flags); + goto exit; + } + + /* Look for tcpdata_info that has the same ip src/dst addrs and tcp src/dst ports */ + i = 0; + while (i < tcpack_sup_mod->tcpdata_info_cnt) { + tcpdata_info_t *tdata_info_tmp = &tcpack_sup_mod->tcpdata_info_tbl[i]; + uint32 now_in_ms = OSL_SYSUPTIME(); + DHD_TRACE(("%s %d: data info[%d], IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR + " TCP port %d %d\n", __FUNCTION__, __LINE__, i, + IPV4_ADDR_TO_STR(ntoh32_ua(tdata_info_tmp->ip_addr.src)), + IPV4_ADDR_TO_STR(ntoh32_ua(tdata_info_tmp->ip_addr.dst)), + ntoh16_ua(tdata_info_tmp->tcp_port.src), + ntoh16_ua(tdata_info_tmp->tcp_port.dst))); + + /* If both IP address and TCP port number match, we found it so break. + * Note that src/dst addr fields in ip header are contiguous being 8 bytes in total. + * Also, src/dst port fields in TCP header are contiguous being 4 bytes in total. + */ + if (memcmp(&ip_hdr[IPV4_SRC_IP_OFFSET], + (void *)&tdata_info_tmp->ip_addr, IPV4_ADDR_LEN * 2) == 0 && + memcmp(&tcp_hdr[TCP_SRC_PORT_OFFSET], + (void *)&tdata_info_tmp->tcp_port, TCP_PORT_LEN * 2) == 0) { + tcpdata_info = tdata_info_tmp; + tcpdata_info->last_used_time = now_in_ms; + break; + } + + if (now_in_ms - tdata_info_tmp->last_used_time > TCPDATA_INFO_TIMEOUT) { + tdata_psh_info_t *tdata_psh_info_tmp; + tcpdata_info_t *last_tdata_info; + + while ((tdata_psh_info_tmp = tdata_info_tmp->tdata_psh_info_head)) { + tdata_info_tmp->tdata_psh_info_head = tdata_psh_info_tmp->next; + tdata_psh_info_tmp->next = NULL; + DHD_TRACE(("%s %d: Clean tdata_psh_info(end_seq %u)!\n", + __FUNCTION__, __LINE__, tdata_psh_info_tmp->end_seq)); + _tdata_psh_info_pool_enq(tcpack_sup_mod, tdata_psh_info_tmp); + } +#ifdef DHDTCPACK_SUP_DBG + DHD_ERROR(("%s %d: PSH INFO ENQ %d\n", + __FUNCTION__, __LINE__, tcpack_sup_mod->psh_info_enq_num)); +#endif /* DHDTCPACK_SUP_DBG */ + tcpack_sup_mod->tcpdata_info_cnt--; + ASSERT(tcpack_sup_mod->tcpdata_info_cnt >= 0); + + last_tdata_info = + &tcpack_sup_mod->tcpdata_info_tbl[tcpack_sup_mod->tcpdata_info_cnt]; + if (i < tcpack_sup_mod->tcpdata_info_cnt) { + ASSERT(last_tdata_info != tdata_info_tmp); + bcopy(last_tdata_info, tdata_info_tmp, sizeof(tcpdata_info_t)); + } + bzero(last_tdata_info, sizeof(tcpdata_info_t)); + DHD_INFO(("%s %d: tcpdata_info(idx %d) is aged out. ttl cnt is now %d\n", + __FUNCTION__, __LINE__, i, tcpack_sup_mod->tcpdata_info_cnt)); + /* Don't increase "i" here, so that the prev last tcpdata_info is checked */ + } else + i++; + } + + tcp_seq_num = ntoh32_ua(&tcp_hdr[TCP_SEQ_NUM_OFFSET]); + tcp_data_len = ip_total_len - ip_hdr_len - tcp_hdr_len; + end_tcp_seq_num = tcp_seq_num + tcp_data_len; + + if (tcpdata_info == NULL) { + ASSERT(i == tcpack_sup_mod->tcpdata_info_cnt); + if (i >= TCPDATA_INFO_MAXNUM) { + DHD_TRACE(("%s %d: tcp_data_info_tbl FULL! %d %d" + " IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR" TCP port %d %d\n", + __FUNCTION__, __LINE__, i, tcpack_sup_mod->tcpdata_info_cnt, + IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_SRC_IP_OFFSET])), + IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_DEST_IP_OFFSET])), + ntoh16_ua(&tcp_hdr[TCP_SRC_PORT_OFFSET]), + ntoh16_ua(&tcp_hdr[TCP_DEST_PORT_OFFSET]))); + dhd_os_tcpackunlock(dhdp, flags); + goto exit; + } + tcpdata_info = &tcpack_sup_mod->tcpdata_info_tbl[i]; + + /* No TCP flow with the same IP addr and TCP port is found + * in tcp_data_info_tbl. So add this flow to the table. + */ + DHD_INFO(("%s %d: Add data info to tbl[%d]: IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR + " TCP port %d %d\n", + __FUNCTION__, __LINE__, tcpack_sup_mod->tcpdata_info_cnt, + IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_SRC_IP_OFFSET])), + IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_DEST_IP_OFFSET])), + ntoh16_ua(&tcp_hdr[TCP_SRC_PORT_OFFSET]), + ntoh16_ua(&tcp_hdr[TCP_DEST_PORT_OFFSET]))); + /* Note that src/dst addr fields in ip header are contiguous being 8 bytes in total. + * Also, src/dst port fields in TCP header are contiguous being 4 bytes in total. + */ + bcopy(&ip_hdr[IPV4_SRC_IP_OFFSET], (void *)&tcpdata_info->ip_addr, + IPV4_ADDR_LEN * 2); + bcopy(&tcp_hdr[TCP_SRC_PORT_OFFSET], (void *)&tcpdata_info->tcp_port, + TCP_PORT_LEN * 2); + + tcpdata_info->last_used_time = OSL_SYSUPTIME(); + tcpack_sup_mod->tcpdata_info_cnt++; + } + + ASSERT(tcpdata_info != NULL); + + tdata_psh_info = _tdata_psh_info_pool_deq(tcpack_sup_mod); +#ifdef DHDTCPACK_SUP_DBG + DHD_TRACE(("%s %d: PSH INFO ENQ %d\n", + __FUNCTION__, __LINE__, tcpack_sup_mod->psh_info_enq_num)); +#endif /* DHDTCPACK_SUP_DBG */ + + if (tdata_psh_info == NULL) { + DHD_ERROR(("%s %d: No more free tdata_psh_info!!\n", __FUNCTION__, __LINE__)); + ret = BCME_ERROR; + dhd_os_tcpackunlock(dhdp, flags); + goto exit; + } + tdata_psh_info->end_seq = end_tcp_seq_num; + +#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG) + tack_tbl.cnt[4]++; +#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */ + + DHD_TRACE(("%s %d: TCP PSH DATA recvd! end seq %u\n", + __FUNCTION__, __LINE__, tdata_psh_info->end_seq)); + + ASSERT(tdata_psh_info->next == NULL); + + if (tcpdata_info->tdata_psh_info_head == NULL) + tcpdata_info->tdata_psh_info_head = tdata_psh_info; + else { + ASSERT(tcpdata_info->tdata_psh_info_tail); + tcpdata_info->tdata_psh_info_tail->next = tdata_psh_info; + } + tcpdata_info->tdata_psh_info_tail = tdata_psh_info; + + dhd_os_tcpackunlock(dhdp, flags); + +exit: + return ret; +} + +bool +dhd_tcpack_hold(dhd_pub_t *dhdp, void *pkt, int ifidx) +{ + uint8 *new_ether_hdr; /* Ethernet header of the new packet */ + uint16 new_ether_type; /* Ethernet type of the new packet */ + uint8 *new_ip_hdr; /* IP header of the new packet */ + uint8 *new_tcp_hdr; /* TCP header of the new packet */ + uint32 new_ip_hdr_len; /* IP header length of the new packet */ + uint32 cur_framelen; + uint32 new_tcp_ack_num; /* TCP acknowledge number of the new packet */ + uint16 new_ip_total_len; /* Total length of IP packet for the new packet */ + uint32 new_tcp_hdr_len; /* TCP header length of the new packet */ + tcpack_sup_module_t *tcpack_sup_mod; + tcpack_info_t *tcpack_info_tbl; + int i, free_slot = TCPACK_INFO_MAXNUM; + bool hold = FALSE; + unsigned long flags; + + if (dhdp->tcpack_sup_mode != TCPACK_SUP_HOLD) { + goto exit; + } + + if (dhdp->tcpack_sup_ratio == 1) { + goto exit; + } + + new_ether_hdr = PKTDATA(dhdp->osh, pkt); + cur_framelen = PKTLEN(dhdp->osh, pkt); + + if (cur_framelen < TCPACKSZMIN || cur_framelen > TCPACKSZMAX) { + DHD_TRACE(("%s %d: Too short or long length %d to be TCP ACK\n", + __FUNCTION__, __LINE__, cur_framelen)); + goto exit; + } + + new_ether_type = new_ether_hdr[12] << 8 | new_ether_hdr[13]; + + if (new_ether_type != ETHER_TYPE_IP) { + DHD_TRACE(("%s %d: Not a IP packet 0x%x\n", + __FUNCTION__, __LINE__, new_ether_type)); + goto exit; + } + + DHD_TRACE(("%s %d: IP pkt! 0x%x\n", __FUNCTION__, __LINE__, new_ether_type)); + + new_ip_hdr = new_ether_hdr + ETHER_HDR_LEN; + cur_framelen -= ETHER_HDR_LEN; + + ASSERT(cur_framelen >= IPV4_MIN_HEADER_LEN); + + new_ip_hdr_len = IPV4_HLEN(new_ip_hdr); + if (IP_VER(new_ip_hdr) != IP_VER_4 || IPV4_PROT(new_ip_hdr) != IP_PROT_TCP) { + DHD_TRACE(("%s %d: Not IPv4 nor TCP! ip ver %d, prot %d\n", + __FUNCTION__, __LINE__, IP_VER(new_ip_hdr), IPV4_PROT(new_ip_hdr))); + goto exit; + } + + new_tcp_hdr = new_ip_hdr + new_ip_hdr_len; + cur_framelen -= new_ip_hdr_len; + + ASSERT(cur_framelen >= TCP_MIN_HEADER_LEN); + + DHD_TRACE(("%s %d: TCP pkt!\n", __FUNCTION__, __LINE__)); + + /* is it an ack ? Allow only ACK flag, not to suppress others. */ + if (new_tcp_hdr[TCP_FLAGS_OFFSET] != TCP_FLAG_ACK) { + DHD_TRACE(("%s %d: Do not touch TCP flag 0x%x\n", + __FUNCTION__, __LINE__, new_tcp_hdr[TCP_FLAGS_OFFSET])); + goto exit; + } + + new_ip_total_len = ntoh16_ua(&new_ip_hdr[IPV4_PKTLEN_OFFSET]); + new_tcp_hdr_len = 4 * TCP_HDRLEN(new_tcp_hdr[TCP_HLEN_OFFSET]); + + /* This packet has TCP data, so just send */ + if (new_ip_total_len > new_ip_hdr_len + new_tcp_hdr_len) { + DHD_TRACE(("%s %d: Do nothing for TCP DATA\n", __FUNCTION__, __LINE__)); + goto exit; + } + + ASSERT(new_ip_total_len == new_ip_hdr_len + new_tcp_hdr_len); + + new_tcp_ack_num = ntoh32_ua(&new_tcp_hdr[TCP_ACK_NUM_OFFSET]); + + DHD_TRACE(("%s %d: TCP ACK with zero DATA length" + " IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR" TCP port %d %d\n", + __FUNCTION__, __LINE__, + IPV4_ADDR_TO_STR(ntoh32_ua(&new_ip_hdr[IPV4_SRC_IP_OFFSET])), + IPV4_ADDR_TO_STR(ntoh32_ua(&new_ip_hdr[IPV4_DEST_IP_OFFSET])), + ntoh16_ua(&new_tcp_hdr[TCP_SRC_PORT_OFFSET]), + ntoh16_ua(&new_tcp_hdr[TCP_DEST_PORT_OFFSET]))); + + /* Look for tcp_ack_info that has the same ip src/dst addrs and tcp src/dst ports */ + flags = dhd_os_tcpacklock(dhdp); + + tcpack_sup_mod = dhdp->tcpack_sup_module; + tcpack_info_tbl = tcpack_sup_mod->tcpack_info_tbl; + + if (!tcpack_sup_mod) { + DHD_ERROR(("%s %d: tcpack suppress module NULL!!\n", __FUNCTION__, __LINE__)); + dhd_os_tcpackunlock(dhdp, flags); + goto exit; + } + + hold = TRUE; + + for (i = 0; i < TCPACK_INFO_MAXNUM; i++) { + void *oldpkt; /* TCPACK packet that is already in txq or DelayQ */ + uint8 *old_ether_hdr, *old_ip_hdr, *old_tcp_hdr; + uint32 old_ip_hdr_len; + uint32 old_tcpack_num; /* TCP ACK number of old TCPACK packet in Q */ + + if ((oldpkt = tcpack_info_tbl[i].pkt_in_q) == NULL) { + if (free_slot == TCPACK_INFO_MAXNUM) { + free_slot = i; + } + continue; + } + + if (PKTDATA(dhdp->osh, oldpkt) == NULL) { + DHD_ERROR(("%s %d: oldpkt data NULL!! cur idx %d\n", + __FUNCTION__, __LINE__, i)); + hold = FALSE; + dhd_os_tcpackunlock(dhdp, flags); + goto exit; + } + + old_ether_hdr = tcpack_info_tbl[i].pkt_ether_hdr; + old_ip_hdr = old_ether_hdr + ETHER_HDR_LEN; + old_ip_hdr_len = IPV4_HLEN(old_ip_hdr); + old_tcp_hdr = old_ip_hdr + old_ip_hdr_len; + + DHD_TRACE(("%s %d: oldpkt %p[%d], IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR + " TCP port %d %d\n", __FUNCTION__, __LINE__, oldpkt, i, + IPV4_ADDR_TO_STR(ntoh32_ua(&old_ip_hdr[IPV4_SRC_IP_OFFSET])), + IPV4_ADDR_TO_STR(ntoh32_ua(&old_ip_hdr[IPV4_DEST_IP_OFFSET])), + ntoh16_ua(&old_tcp_hdr[TCP_SRC_PORT_OFFSET]), + ntoh16_ua(&old_tcp_hdr[TCP_DEST_PORT_OFFSET]))); + + /* If either of IP address or TCP port number does not match, skip. */ + if (memcmp(&new_ip_hdr[IPV4_SRC_IP_OFFSET], + &old_ip_hdr[IPV4_SRC_IP_OFFSET], IPV4_ADDR_LEN * 2) || + memcmp(&new_tcp_hdr[TCP_SRC_PORT_OFFSET], + &old_tcp_hdr[TCP_SRC_PORT_OFFSET], TCP_PORT_LEN * 2)) { + continue; + } + + old_tcpack_num = ntoh32_ua(&old_tcp_hdr[TCP_ACK_NUM_OFFSET]); + + if (IS_TCPSEQ_GE(new_tcp_ack_num, old_tcpack_num)) { + tcpack_info_tbl[i].supp_cnt++; + if (tcpack_info_tbl[i].supp_cnt >= dhdp->tcpack_sup_ratio) { + tcpack_info_tbl[i].pkt_in_q = NULL; + tcpack_info_tbl[i].pkt_ether_hdr = NULL; + tcpack_info_tbl[i].ifidx = 0; + tcpack_info_tbl[i].supp_cnt = 0; + hold = FALSE; + } else { + tcpack_info_tbl[i].pkt_in_q = pkt; + tcpack_info_tbl[i].pkt_ether_hdr = new_ether_hdr; + tcpack_info_tbl[i].ifidx = ifidx; + } + PKTFREE(dhdp->osh, oldpkt, TRUE); + } else { + PKTFREE(dhdp->osh, pkt, TRUE); + } + dhd_os_tcpackunlock(dhdp, flags); + + if (!hold) { +#ifndef TCPACK_SUPPRESS_HOLD_HRT + del_timer_sync(&tcpack_info_tbl[i].timer); +#else + hrtimer_cancel(&tcpack_sup_mod->tcpack_info_tbl[i].timer.timer); +#endif /* TCPACK_SUPPRESS_HOLD_HRT */ + } + goto exit; + } + + if (free_slot < TCPACK_INFO_MAXNUM) { + /* No TCPACK packet with the same IP addr and TCP port is found + * in tcp_ack_info_tbl. So add this packet to the table. + */ + DHD_TRACE(("%s %d: Add pkt 0x%p(ether_hdr 0x%p) to tbl[%d]\n", + __FUNCTION__, __LINE__, pkt, new_ether_hdr, + free_slot)); + + tcpack_info_tbl[free_slot].pkt_in_q = pkt; + tcpack_info_tbl[free_slot].pkt_ether_hdr = new_ether_hdr; + tcpack_info_tbl[free_slot].ifidx = ifidx; + tcpack_info_tbl[free_slot].supp_cnt = 1; +#ifndef TCPACK_SUPPRESS_HOLD_HRT + mod_timer(&tcpack_sup_mod->tcpack_info_tbl[free_slot].timer, + jiffies + msecs_to_jiffies(dhdp->tcpack_sup_delay)); +#else + tasklet_hrtimer_start(&tcpack_sup_mod->tcpack_info_tbl[free_slot].timer, + ktime_set(0, dhdp->tcpack_sup_delay*1000000), + HRTIMER_MODE_REL); +#endif /* TCPACK_SUPPRESS_HOLD_HRT */ + tcpack_sup_mod->tcpack_info_cnt++; + } else { + DHD_TRACE(("%s %d: No empty tcp ack info tbl\n", + __FUNCTION__, __LINE__)); + } + dhd_os_tcpackunlock(dhdp, flags); + +exit: + return hold; +} +#endif /* DHDTCPACK_SUPPRESS */ diff --git a/bcmdhd.100.10.315.x/dhd_ip.h b/bcmdhd.100.10.315.x/dhd_ip.h new file mode 100644 index 0000000..846eb4f --- /dev/null +++ b/bcmdhd.100.10.315.x/dhd_ip.h @@ -0,0 +1,85 @@ +/* + * Header file describing the common ip parser function. + * + * Provides type definitions and function prototypes used to parse ip packet. + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_ip.h 536854 2015-02-24 13:17:29Z $ + */ + +#ifndef _dhd_ip_h_ +#define _dhd_ip_h_ + +#ifdef DHDTCPACK_SUPPRESS +#include +#include +#include +#endif /* DHDTCPACK_SUPPRESS */ + +typedef enum pkt_frag +{ + DHD_PKT_FRAG_NONE = 0, + DHD_PKT_FRAG_FIRST, + DHD_PKT_FRAG_CONT, + DHD_PKT_FRAG_LAST +} pkt_frag_t; + +extern pkt_frag_t pkt_frag_info(osl_t *osh, void *p); + +#ifdef DHDTCPACK_SUPPRESS +#define TCPACKSZMIN (ETHER_HDR_LEN + IPV4_MIN_HEADER_LEN + TCP_MIN_HEADER_LEN) +/* Size of MAX possible TCP ACK packet. Extra bytes for IP/TCP option fields */ +#define TCPACKSZMAX (TCPACKSZMIN + 100) + +/* Max number of TCP streams that have own src/dst IP addrs and TCP ports */ +#define TCPACK_INFO_MAXNUM 4 +#define TCPDATA_INFO_MAXNUM 4 +#define TCPDATA_PSH_INFO_MAXNUM (8 * TCPDATA_INFO_MAXNUM) + +#define TCPDATA_INFO_TIMEOUT 5000 /* Remove tcpdata_info if inactive for this time (in ms) */ + +#define DEFAULT_TCPACK_SUPP_RATIO 3 +#ifndef CUSTOM_TCPACK_SUPP_RATIO +#define CUSTOM_TCPACK_SUPP_RATIO DEFAULT_TCPACK_SUPP_RATIO +#endif /* CUSTOM_TCPACK_SUPP_RATIO */ + +#define DEFAULT_TCPACK_DELAY_TIME 10 /* ms */ +#ifndef CUSTOM_TCPACK_DELAY_TIME +#define CUSTOM_TCPACK_DELAY_TIME DEFAULT_TCPACK_DELAY_TIME +#endif /* CUSTOM_TCPACK_DELAY_TIME */ + +extern int dhd_tcpack_suppress_set(dhd_pub_t *dhdp, uint8 on); +extern void dhd_tcpack_info_tbl_clean(dhd_pub_t *dhdp); +extern int dhd_tcpack_check_xmit(dhd_pub_t *dhdp, void *pkt); +extern bool dhd_tcpack_suppress(dhd_pub_t *dhdp, void *pkt); +extern bool dhd_tcpdata_info_get(dhd_pub_t *dhdp, void *pkt); +extern bool dhd_tcpack_hold(dhd_pub_t *dhdp, void *pkt, int ifidx); +/* #define DHDTCPACK_SUP_DBG */ +#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG) +extern counter_tbl_t tack_tbl; +#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */ +#endif /* DHDTCPACK_SUPPRESS */ + +#endif /* _dhd_ip_h_ */ diff --git a/bcmdhd.100.10.315.x/dhd_linux.c b/bcmdhd.100.10.315.x/dhd_linux.c new file mode 100644 index 0000000..e8914db --- /dev/null +++ b/bcmdhd.100.10.315.x/dhd_linux.c @@ -0,0 +1,22537 @@ +/* + * Broadcom Dongle Host Driver (DHD), Linux-specific network interface + * Basically selected code segments from usb-cdc.c and usb-rndis.c + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_linux.c 771880 2018-07-12 07:25:59Z $ + */ + +#include +#include +#include +#include +#ifdef SHOW_LOGTRACE +#include +#include +#endif /* SHOW_LOGTRACE */ + +#if defined(PCIE_FULL_DONGLE) || defined(SHOW_LOGTRACE) +#include +#endif /* PCIE_FULL_DONGLE */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef ENABLE_ADAPTIVE_SCHED +#include +#endif /* ENABLE_ADAPTIVE_SCHED */ +#include +#ifdef DHD_DUMP_MNGR +#include +#endif /* DHD_DUMP_MNGR */ +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include <802.3.h> + +#include +#include +#include +#include +#ifdef DHD_WET +#include +#endif /* DHD_WET */ +#ifdef PCIE_FULL_DONGLE +#include +#endif // endif +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_HAS_WAKELOCK +#include +#endif // endif +#if defined(WL_CFG80211) +#include +#endif /* WL_CFG80211 */ +#ifdef PNO_SUPPORT +#include +#endif // endif +#ifdef RTT_SUPPORT +#include +#endif // endif + +#ifdef CONFIG_COMPAT +#include +#endif // endif + +#if defined(CONFIG_SOC_EXYNOS8895) || defined(CONFIG_SOC_EXYNOS9810) || \ + defined(CONFIG_SOC_EXYNOS9820) +#include +#endif /* CONFIG_SOC_EXYNOS8895 || CONFIG_SOC_EXYNOS9810 || CONFIG_SOC_EXYNOS9820 */ + +#ifdef DHD_L2_FILTER +#include +#include +#include +#endif /* DHD_L2_FILTER */ + +#ifdef DHD_PSTA +#include +#endif /* DHD_PSTA */ + +#ifdef AMPDU_VO_ENABLE +#include <802.1d.h> +#endif /* AMPDU_VO_ENABLE */ + +#ifdef DHDTCPACK_SUPPRESS +#include +#endif /* DHDTCPACK_SUPPRESS */ +#include +#ifdef DHD_DEBUG_PAGEALLOC +typedef void (*page_corrupt_cb_t)(void *handle, void *addr_corrupt, size_t len); +void dhd_page_corrupt_cb(void *handle, void *addr_corrupt, size_t len); +extern void register_page_corrupt_cb(page_corrupt_cb_t cb, void* handle); +#endif /* DHD_DEBUG_PAGEALLOC */ + +#define IP_PROT_RESERVED 0xFF + +#if defined(DHD_LB) +#if !defined(PCIE_FULL_DONGLE) +#error "DHD Loadbalancing only supported on PCIE_FULL_DONGLE" +#endif /* !PCIE_FULL_DONGLE */ +#endif /* DHD_LB */ + +#if defined(DHD_LB_RXP) || defined(DHD_LB_RXC) || defined(DHD_LB_TXC) || \ + defined(DHD_LB_STATS) +#if !defined(DHD_LB) +#error "DHD loadbalance derivatives are supported only if DHD_LB is defined" +#endif /* !DHD_LB */ +#endif /* DHD_LB_RXP || DHD_LB_RXC || DHD_LB_TXC || DHD_LB_STATS */ + +#if defined(DHD_LB) +/* Dynamic CPU selection for load balancing */ +#include +#include +#include +#include +#include + +#if !defined(DHD_LB_PRIMARY_CPUS) +#define DHD_LB_PRIMARY_CPUS 0x0 /* Big CPU coreids mask */ +#endif // endif +#if !defined(DHD_LB_SECONDARY_CPUS) +#define DHD_LB_SECONDARY_CPUS 0xFE /* Little CPU coreids mask */ +#endif // endif + +#define HIST_BIN_SIZE 9 + +static void dhd_rx_napi_dispatcher_fn(struct work_struct * work); + +#if defined(DHD_LB_TXP) +static void dhd_lb_tx_handler(unsigned long data); +static void dhd_tx_dispatcher_work(struct work_struct * work); +static void dhd_tx_dispatcher_fn(dhd_pub_t *dhdp); +static void dhd_lb_tx_dispatch(dhd_pub_t *dhdp); + +/* Pkttag not compatible with PROP_TXSTATUS or WLFC */ +typedef struct dhd_tx_lb_pkttag_fr { + struct net_device *net; + int ifidx; +} dhd_tx_lb_pkttag_fr_t; + +#define DHD_LB_TX_PKTTAG_SET_NETDEV(tag, netdevp) ((tag)->net = netdevp) +#define DHD_LB_TX_PKTTAG_NETDEV(tag) ((tag)->net) + +#define DHD_LB_TX_PKTTAG_SET_IFIDX(tag, ifidx) ((tag)->ifidx = ifidx) +#define DHD_LB_TX_PKTTAG_IFIDX(tag) ((tag)->ifidx) +#endif /* DHD_LB_TXP */ +#endif /* DHD_LB */ + +#ifdef WL_NATOE +#include +#endif /* WL_NATOE */ + +#ifdef WL_MONITOR +#include +#define MAX_RADIOTAP_SIZE 256 /* Maximum size to hold HE Radiotap header format */ +#define MAX_MON_PKT_SIZE (4096 + MAX_RADIOTAP_SIZE) +#endif /* WL_MONITOR */ + +#define htod32(i) (i) +#define htod16(i) (i) +#define dtoh32(i) (i) +#define dtoh16(i) (i) +#define htodchanspec(i) (i) +#define dtohchanspec(i) (i) + +#ifdef BLOCK_IPV6_PACKET +#define HEX_PREF_STR "0x" +#define UNI_FILTER_STR "010000000000" +#define ZERO_ADDR_STR "000000000000" +#define ETHER_TYPE_STR "0000" +#define IPV6_FILTER_STR "20" +#define ZERO_TYPE_STR "00" +#endif /* BLOCK_IPV6_PACKET */ + +#if defined(SOFTAP) +extern bool ap_cfg_running; +extern bool ap_fw_loaded; +#endif // endif + +extern void dhd_dump_eapol_4way_message(dhd_pub_t *dhd, char *ifname, + char *dump_data, bool direction); + +#ifdef FIX_CPU_MIN_CLOCK +#include +#endif /* FIX_CPU_MIN_CLOCK */ + +#ifdef SET_RANDOM_MAC_SOFTAP +#ifndef CONFIG_DHD_SET_RANDOM_MAC_VAL +#define CONFIG_DHD_SET_RANDOM_MAC_VAL 0x001A11 +#endif // endif +static u32 vendor_oui = CONFIG_DHD_SET_RANDOM_MAC_VAL; +#endif /* SET_RANDOM_MAC_SOFTAP */ + +#ifdef ENABLE_ADAPTIVE_SCHED +#define DEFAULT_CPUFREQ_THRESH 1000000 /* threshold frequency : 1000000 = 1GHz */ +#ifndef CUSTOM_CPUFREQ_THRESH +#define CUSTOM_CPUFREQ_THRESH DEFAULT_CPUFREQ_THRESH +#endif /* CUSTOM_CPUFREQ_THRESH */ +#endif /* ENABLE_ADAPTIVE_SCHED */ + +/* enable HOSTIP cache update from the host side when an eth0:N is up */ +#define AOE_IP_ALIAS_SUPPORT 1 + +#ifdef PROP_TXSTATUS +#include +#include +#endif // endif + +#include +#ifdef WL_ESCAN +#include +#endif + +/* Maximum STA per radio */ +#define DHD_MAX_STA 32 + +#ifdef CUSTOMER_HW_AMLOGIC +#include +#endif + +const uint8 wme_fifo2ac[] = { 0, 1, 2, 3, 1, 1 }; +const uint8 prio2fifo[8] = { 1, 0, 0, 1, 2, 2, 3, 3 }; +#define WME_PRIO2AC(prio) wme_fifo2ac[prio2fifo[(prio)]] + +#ifdef ARP_OFFLOAD_SUPPORT +void aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add, int idx); +static int dhd_inetaddr_notifier_call(struct notifier_block *this, + unsigned long event, void *ptr); +static struct notifier_block dhd_inetaddr_notifier = { + .notifier_call = dhd_inetaddr_notifier_call +}; +/* to make sure we won't register the same notifier twice, otherwise a loop is likely to be + * created in kernel notifier link list (with 'next' pointing to itself) + */ +static bool dhd_inetaddr_notifier_registered = FALSE; +#endif /* ARP_OFFLOAD_SUPPORT */ + +#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT) +int dhd_inet6addr_notifier_call(struct notifier_block *this, + unsigned long event, void *ptr); +static struct notifier_block dhd_inet6addr_notifier = { + .notifier_call = dhd_inet6addr_notifier_call +}; +/* to make sure we won't register the same notifier twice, otherwise a loop is likely to be + * created in kernel notifier link list (with 'next' pointing to itself) + */ +static bool dhd_inet6addr_notifier_registered = FALSE; +#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) +#include +volatile bool dhd_mmc_suspend = FALSE; +DECLARE_WAIT_QUEUE_HEAD(dhd_dpc_wait); +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */ + +#if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) || defined(FORCE_WOWLAN) +extern void dhd_enable_oob_intr(struct dhd_bus *bus, bool enable); +#endif /* defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) +static void dhd_hang_process(void *dhd_info, void *event_data, u8 event); +#endif // endif +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) +MODULE_LICENSE("GPL and additional rights"); +#endif /* LinuxVer */ + +#if defined(MULTIPLE_SUPPLICANT) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) +DEFINE_MUTEX(_dhd_mutex_lock_); +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */ +#endif + +#ifdef CONFIG_BCM_DETECT_CONSECUTIVE_HANG +#define MAX_CONSECUTIVE_HANG_COUNTS 5 +#endif /* CONFIG_BCM_DETECT_CONSECUTIVE_HANG */ + +#include + +#ifdef DHD_ULP +#include +#endif /* DHD_ULP */ + +#ifndef PROP_TXSTATUS +#define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen) +#else +#define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen + 128) +#endif // endif + +#ifdef PROP_TXSTATUS +extern bool dhd_wlfc_skip_fc(void * dhdp, uint8 idx); +extern void dhd_wlfc_plat_init(void *dhd); +extern void dhd_wlfc_plat_deinit(void *dhd); +#endif /* PROP_TXSTATUS */ +extern uint sd_f2_blocksize; +extern int dhdsdio_func_blocksize(dhd_pub_t *dhd, int function_num, int block_size); + +#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15) +const char * +print_tainted() +{ + return ""; +} +#endif /* LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15) */ + +/* Linux wireless extension support */ +#if defined(WL_WIRELESS_EXT) +#include +extern wl_iw_extra_params_t g_wl_iw_params; +#endif /* defined(WL_WIRELESS_EXT) */ + +#ifdef CONFIG_PARTIALSUSPEND_SLP +#include +#define CONFIG_HAS_EARLYSUSPEND +#define DHD_USE_EARLYSUSPEND +#define register_early_suspend register_pre_suspend +#define unregister_early_suspend unregister_pre_suspend +#define early_suspend pre_suspend +#define EARLY_SUSPEND_LEVEL_BLANK_SCREEN 50 +#else +#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) +#include +#endif /* defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) */ +#endif /* CONFIG_PARTIALSUSPEND_SLP */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) +#include +#endif /* OEM_ANDROID && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) */ + +#if defined(BCMPCIE) +extern int dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd, int *dtim_period, int *bcn_interval); +#else +extern int dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd); +#endif /* OEM_ANDROID && BCMPCIE */ + +#ifdef PKT_FILTER_SUPPORT +extern void dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg); +extern void dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode); +extern void dhd_pktfilter_offload_delete(dhd_pub_t *dhd, int id); +#endif // endif + +#if defined(PKT_FILTER_SUPPORT) && defined(APF) +static int __dhd_apf_add_filter(struct net_device *ndev, uint32 filter_id, + u8* program, uint32 program_len); +static int __dhd_apf_config_filter(struct net_device *ndev, uint32 filter_id, + uint32 mode, uint32 enable); +static int __dhd_apf_delete_filter(struct net_device *ndev, uint32 filter_id); +#endif /* PKT_FILTER_SUPPORT && APF */ + +#if defined(ARGOS_NOTIFY_CB) +int argos_register_notifier_init(struct net_device *net); +int argos_register_notifier_deinit(void); + +extern int sec_argos_register_notifier(struct notifier_block *n, char *label); +extern int sec_argos_unregister_notifier(struct notifier_block *n, char *label); + +static int argos_status_notifier_wifi_cb(struct notifier_block *notifier, + unsigned long speed, void *v); +static int argos_status_notifier_p2p_cb(struct notifier_block *notifier, + unsigned long speed, void *v); + +/* PCIe interrupt affinity threshold (Mbps) */ +#define PCIE_IRQ_AFFINITY_THRESHOLD 300 + +/* ARGOS notifer data */ +static struct notifier_block argos_wifi; /* STA */ +static struct notifier_block argos_p2p; /* P2P */ + +typedef struct { + struct net_device *wlan_primary_netdev; + int argos_rps_cpus_enabled; +} argos_rps_ctrl; + +argos_rps_ctrl argos_rps_ctrl_data; +#define RPS_TPUT_THRESHOLD 300 +#define DELAY_TO_CLEAR_RPS_CPUS 300 +#endif // endif + +#if defined(BT_OVER_SDIO) +extern void wl_android_set_wifi_on_flag(bool enable); +#endif /* BT_OVER_SDIO */ + +#ifdef DHD_FW_COREDUMP +static void dhd_mem_dump(void *dhd_info, void *event_info, u8 event); +#endif /* DHD_FW_COREDUMP */ + +#ifdef DHD_LOG_DUMP +/* 0: DLD_BUF_TYPE_GENERAL, 1: DLD_BUF_TYPE_PRESERVE +* 2: DLD_BUF_TYPE_SPECIAL +*/ +#define DLD_BUFFER_NUM 3 + +#ifndef CUSTOM_LOG_DUMP_BUFSIZE_MB +#define CUSTOM_LOG_DUMP_BUFSIZE_MB 4 /* DHD_LOG_DUMP_BUF_SIZE 4 MB static memory in kernel */ +#endif /* CUSTOM_LOG_DUMP_BUFSIZE_MB */ + +#define LOG_DUMP_TOTAL_BUFSIZE (1024 * 1024 * CUSTOM_LOG_DUMP_BUFSIZE_MB) +#define LOG_DUMP_GENERAL_MAX_BUFSIZE (384 * 1024 * CUSTOM_LOG_DUMP_BUFSIZE_MB) +#define LOG_DUMP_PRESERVE_MAX_BUFSIZE (128 * 1024 * CUSTOM_LOG_DUMP_BUFSIZE_MB) +#define LOG_DUMP_SPECIAL_MAX_BUFSIZE (8 * 1024) +#define LOG_DUMP_ECNTRS_MAX_BUFSIZE (384 * 1024 * CUSTOM_LOG_DUMP_BUFSIZE_MB) +#define LOG_DUMP_FILTER_MAX_BUFSIZE (128 * 1024 * CUSTOM_LOG_DUMP_BUFSIZE_MB) +#define LOG_DUMP_MAX_FILESIZE (8 *1024 * 1024) /* 8 MB default */ +#ifdef CONFIG_LOG_BUF_SHIFT +/* 15% of kernel log buf size, if for example klog buf size is 512KB +* 15% of 512KB ~= 80KB +*/ +#define LOG_DUMP_KERNEL_TAIL_FLUSH_SIZE \ + (15 * ((1 << CONFIG_LOG_BUF_SHIFT)/100)) +#endif /* CONFIG_LOG_BUF_SHIFT */ + +#define LOG_DUMP_COOKIE_BUFSIZE 1024u +struct dhd_log_dump_buf g_dld_buf[DLD_BUFFER_NUM]; +static int dld_buf_size[DLD_BUFFER_NUM] = { + LOG_DUMP_GENERAL_MAX_BUFSIZE, /* DLD_BUF_TYPE_GENERAL */ + LOG_DUMP_PRESERVE_MAX_BUFSIZE, /* DLD_BUF_TYPE_PRESERVE */ + LOG_DUMP_SPECIAL_MAX_BUFSIZE, /* DLD_BUF_TYPE_SPECIAL */ +}; + +typedef struct { + char *hdr_str; + log_dump_section_type_t sec_type; +} dld_hdr_t; + +/* Only header for log dump buffers is stored in array + * header for sections like 'dhd dump', 'ext trap' + * etc, is not in the array, because they are not log + * ring buffers + */ +dld_hdr_t dld_hdrs[DLD_BUFFER_NUM] = { + {GENERAL_LOG_HDR, LOG_DUMP_SECTION_GENERAL}, + {PRESERVE_LOG_HDR, LOG_DUMP_SECTION_PRESERVE}, + {SPECIAL_LOG_HDR, LOG_DUMP_SECTION_SPECIAL} +}; + +static void dhd_log_dump_init(dhd_pub_t *dhd); +static void dhd_log_dump_deinit(dhd_pub_t *dhd); +static void dhd_log_dump(void *handle, void *event_info, u8 event); +static int do_dhd_log_dump(dhd_pub_t *dhdp, log_dump_type_t *type); + +#define DHD_PRINT_BUF_NAME_LEN 30 +static void dhd_print_buf_addr(dhd_pub_t *dhdp, char *name, void *buf, unsigned int size); +#endif /* DHD_LOG_DUMP */ + +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM +#include +#include +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + +#ifdef DHD_DEBUG_UART +#include +#define DHD_DEBUG_UART_EXEC_PATH "/system/bin/wldu" +static void dhd_debug_uart_exec_rd(void *handle, void *event_info, u8 event); +static void dhd_debug_uart_exec(dhd_pub_t *dhdp, char *cmd); +#endif /* DHD_DEBUG_UART */ + +static int dhd_reboot_callback(struct notifier_block *this, unsigned long code, void *unused); +static struct notifier_block dhd_reboot_notifier = { + .notifier_call = dhd_reboot_callback, + .priority = 1, +}; + +#ifdef BCMPCIE +static int is_reboot = 0; +#endif /* BCMPCIE */ + +dhd_pub_t *g_dhd_pub = NULL; + +#if defined(BT_OVER_SDIO) +#include "dhd_bt_interface.h" +#endif /* defined (BT_OVER_SDIO) */ + +#ifdef SHOW_LOGTRACE +static int dhd_trace_open_proc(struct inode *inode, struct file *file); +ssize_t dhd_trace_read_proc(struct file *file, char *buffer, size_t tt, loff_t *loff); + +static const struct file_operations proc_file_fops = { + .read = dhd_trace_read_proc, + .open = dhd_trace_open_proc, + .release = seq_release, +}; +#endif // endif + +#ifdef WL_STATIC_IF +bool dhd_is_static_ndev(dhd_pub_t *dhdp, struct net_device *ndev); +#endif /* WL_STATIC_IF */ + +atomic_t exit_in_progress = ATOMIC_INIT(0); + +typedef struct dhd_if_event { + struct list_head list; + wl_event_data_if_t event; + char name[IFNAMSIZ+1]; + uint8 mac[ETHER_ADDR_LEN]; +} dhd_if_event_t; + +/* Interface control information */ +typedef struct dhd_if { + struct dhd_info *info; /* back pointer to dhd_info */ + /* OS/stack specifics */ + struct net_device *net; + int idx; /* iface idx in dongle */ + uint subunit; /* subunit */ + uint8 mac_addr[ETHER_ADDR_LEN]; /* assigned MAC address */ + bool set_macaddress; + bool set_multicast; + uint8 bssidx; /* bsscfg index for the interface */ + bool attached; /* Delayed attachment when unset */ + bool txflowcontrol; /* Per interface flow control indicator */ + char name[IFNAMSIZ+1]; /* linux interface name */ + char dngl_name[IFNAMSIZ+1]; /* corresponding dongle interface name */ + struct net_device_stats stats; +#ifdef PCIE_FULL_DONGLE + struct list_head sta_list; /* sll of associated stations */ + spinlock_t sta_list_lock; /* lock for manipulating sll */ +#endif /* PCIE_FULL_DONGLE */ + uint32 ap_isolate; /* ap-isolation settings */ +#ifdef DHD_L2_FILTER + bool parp_enable; + bool parp_discard; + bool parp_allnode; + arp_table_t *phnd_arp_table; + /* for Per BSS modification */ + bool dhcp_unicast; + bool block_ping; + bool grat_arp; + bool block_tdls; +#endif /* DHD_L2_FILTER */ +#ifdef DHD_MCAST_REGEN + bool mcast_regen_bss_enable; +#endif // endif + bool rx_pkt_chainable; /* set all rx packet to chainable config by default */ + cumm_ctr_t cumm_ctr; /* cummulative queue length of child flowrings */ + uint8 tx_paths_active; + bool del_in_progress; + bool static_if; /* used to avoid some operations on static_if */ +} dhd_if_t; + +struct ipv6_work_info_t { + uint8 if_idx; + char ipv6_addr[IPV6_ADDR_LEN]; + unsigned long event; +}; +static void dhd_process_daemon_msg(struct sk_buff *skb); +static void dhd_destroy_to_notifier_skt(void); +static int dhd_create_to_notifier_skt(void); +static struct sock *nl_to_event_sk = NULL; +int sender_pid = 0; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) +struct netlink_kernel_cfg dhd_netlink_cfg = { + .groups = 1, + .input = dhd_process_daemon_msg, +}; +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) */ + +typedef struct dhd_dump { + uint8 *buf; + int bufsize; + uint8 *hscb_buf; + int hscb_bufsize; +} dhd_dump_t; + +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM +struct dhd_rx_tx_work { + struct work_struct work; + struct sk_buff *skb; + struct net_device *net; + struct dhd_pub *pub; +}; +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + +/* When Perimeter locks are deployed, any blocking calls must be preceeded + * with a PERIM UNLOCK and followed by a PERIM LOCK. + * Examples of blocking calls are: schedule_timeout(), down_interruptible(), + * wait_event_timeout(). + */ + +/* Local private structure (extension of pub) */ +typedef struct dhd_info { +#if defined(WL_WIRELESS_EXT) + wl_iw_t iw; /* wireless extensions state (must be first) */ +#endif /* defined(WL_WIRELESS_EXT) */ + dhd_pub_t pub; + /* for supporting multiple interfaces. + * static_ifs hold the net ifaces without valid FW IF + */ + dhd_if_t *iflist[DHD_MAX_IFS + DHD_MAX_STATIC_IFS]; + + wifi_adapter_info_t *adapter; /* adapter information, interrupt, fw path etc. */ + char fw_path[PATH_MAX]; /* path to firmware image */ + char nv_path[PATH_MAX]; /* path to nvram vars file */ + char clm_path[PATH_MAX]; /* path to clm vars file */ + char conf_path[PATH_MAX]; /* path to config vars file */ +#ifdef DHD_UCODE_DOWNLOAD + char uc_path[PATH_MAX]; /* path to ucode image */ +#endif /* DHD_UCODE_DOWNLOAD */ + + /* serialize dhd iovars */ + struct mutex dhd_iovar_mutex; + + struct semaphore proto_sem; +#ifdef PROP_TXSTATUS + spinlock_t wlfc_spinlock; + +#ifdef BCMDBUS + ulong wlfc_lock_flags; + ulong wlfc_pub_lock_flags; +#endif /* BCMDBUS */ +#endif /* PROP_TXSTATUS */ + wait_queue_head_t ioctl_resp_wait; + wait_queue_head_t d3ack_wait; + wait_queue_head_t dhd_bus_busy_state_wait; + wait_queue_head_t dmaxfer_wait; + uint32 default_wd_interval; + + struct timer_list timer; + bool wd_timer_valid; + struct tasklet_struct tasklet; + spinlock_t sdlock; + spinlock_t txqlock; + spinlock_t dhd_lock; +#ifdef BCMDBUS + ulong txqlock_flags; +#else + + struct semaphore sdsem; + tsk_ctl_t thr_dpc_ctl; + tsk_ctl_t thr_wdt_ctl; +#endif /* BCMDBUS */ + + tsk_ctl_t thr_rxf_ctl; + spinlock_t rxf_lock; + bool rxthread_enabled; + + /* Wakelocks */ +#if defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + struct wake_lock wl_wifi; /* Wifi wakelock */ + struct wake_lock wl_rxwake; /* Wifi rx wakelock */ + struct wake_lock wl_ctrlwake; /* Wifi ctrl wakelock */ + struct wake_lock wl_wdwake; /* Wifi wd wakelock */ + struct wake_lock wl_evtwake; /* Wifi event wakelock */ + struct wake_lock wl_pmwake; /* Wifi pm handler wakelock */ + struct wake_lock wl_txflwake; /* Wifi tx flow wakelock */ +#ifdef BCMPCIE_OOB_HOST_WAKE + struct wake_lock wl_intrwake; /* Host wakeup wakelock */ +#endif /* BCMPCIE_OOB_HOST_WAKE */ +#ifdef DHD_USE_SCAN_WAKELOCK + struct wake_lock wl_scanwake; /* Wifi scan wakelock */ +#endif /* DHD_USE_SCAN_WAKELOCK */ +#endif /* CONFIG_HAS_WAKELOCK && LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) + /* net_device interface lock, prevent race conditions among net_dev interface + * calls and wifi_on or wifi_off + */ + struct mutex dhd_net_if_mutex; + struct mutex dhd_suspend_mutex; +#if defined(PKT_FILTER_SUPPORT) && defined(APF) + struct mutex dhd_apf_mutex; +#endif /* PKT_FILTER_SUPPORT && APF */ +#endif // endif + spinlock_t wakelock_spinlock; + spinlock_t wakelock_evt_spinlock; + uint32 wakelock_counter; + int wakelock_wd_counter; + int wakelock_rx_timeout_enable; + int wakelock_ctrl_timeout_enable; + bool waive_wakelock; + uint32 wakelock_before_waive; + + /* Thread to issue ioctl for multicast */ + wait_queue_head_t ctrl_wait; + atomic_t pend_8021x_cnt; + dhd_attach_states_t dhd_state; +#ifdef SHOW_LOGTRACE + dhd_event_log_t event_data; +#endif /* SHOW_LOGTRACE */ + +#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) + struct early_suspend early_suspend; +#endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */ + +#ifdef ARP_OFFLOAD_SUPPORT + u32 pend_ipaddr; +#endif /* ARP_OFFLOAD_SUPPORT */ +#ifdef DHDTCPACK_SUPPRESS + spinlock_t tcpack_lock; +#endif /* DHDTCPACK_SUPPRESS */ +#ifdef FIX_CPU_MIN_CLOCK + bool cpufreq_fix_status; + struct mutex cpufreq_fix; + struct pm_qos_request dhd_cpu_qos; +#ifdef FIX_BUS_MIN_CLOCK + struct pm_qos_request dhd_bus_qos; +#endif /* FIX_BUS_MIN_CLOCK */ +#endif /* FIX_CPU_MIN_CLOCK */ + void *dhd_deferred_wq; +#ifdef DEBUG_CPU_FREQ + struct notifier_block freq_trans; + int __percpu *new_freq; +#endif // endif + unsigned int unit; + struct notifier_block pm_notifier; +#ifdef DHD_PSTA + uint32 psta_mode; /* PSTA or PSR */ +#endif /* DHD_PSTA */ +#ifdef DHD_WET + uint32 wet_mode; +#endif /* DHD_WET */ +#ifdef DHD_DEBUG + dhd_dump_t *dump; + struct timer_list join_timer; + u32 join_timeout_val; + bool join_timer_active; + uint scan_time_count; + struct timer_list scan_timer; + bool scan_timer_active; +#endif // endif +#if defined(DHD_LB) + /* CPU Load Balance dynamic CPU selection */ + + /* Variable that tracks the currect CPUs available for candidacy */ + cpumask_var_t cpumask_curr_avail; + + /* Primary and secondary CPU mask */ + cpumask_var_t cpumask_primary, cpumask_secondary; /* configuration */ + cpumask_var_t cpumask_primary_new, cpumask_secondary_new; /* temp */ + + struct notifier_block cpu_notifier; + + /* Tasklet to handle Tx Completion packet freeing */ + struct tasklet_struct tx_compl_tasklet; + atomic_t tx_compl_cpu; + + /* Tasklet to handle RxBuf Post during Rx completion */ + struct tasklet_struct rx_compl_tasklet; + atomic_t rx_compl_cpu; + + /* Napi struct for handling rx packet sendup. Packets are removed from + * H2D RxCompl ring and placed into rx_pend_queue. rx_pend_queue is then + * appended to rx_napi_queue (w/ lock) and the rx_napi_struct is scheduled + * to run to rx_napi_cpu. + */ + struct sk_buff_head rx_pend_queue ____cacheline_aligned; + struct sk_buff_head rx_napi_queue ____cacheline_aligned; + struct napi_struct rx_napi_struct ____cacheline_aligned; + atomic_t rx_napi_cpu; /* cpu on which the napi is dispatched */ + struct net_device *rx_napi_netdev; /* netdev of primary interface */ + + struct work_struct rx_napi_dispatcher_work; + struct work_struct tx_compl_dispatcher_work; + struct work_struct tx_dispatcher_work; + struct work_struct rx_compl_dispatcher_work; + + /* Number of times DPC Tasklet ran */ + uint32 dhd_dpc_cnt; + /* Number of times NAPI processing got scheduled */ + uint32 napi_sched_cnt; + /* Number of times NAPI processing ran on each available core */ + uint32 *napi_percpu_run_cnt; + /* Number of times RX Completions got scheduled */ + uint32 rxc_sched_cnt; + /* Number of times RX Completion ran on each available core */ + uint32 *rxc_percpu_run_cnt; + /* Number of times TX Completions got scheduled */ + uint32 txc_sched_cnt; + /* Number of times TX Completions ran on each available core */ + uint32 *txc_percpu_run_cnt; + /* CPU status */ + /* Number of times each CPU came online */ + uint32 *cpu_online_cnt; + /* Number of times each CPU went offline */ + uint32 *cpu_offline_cnt; + + /* Number of times TX processing run on each core */ + uint32 *txp_percpu_run_cnt; + /* Number of times TX start run on each core */ + uint32 *tx_start_percpu_run_cnt; + + /* Tx load balancing */ + + /* TODO: Need to see if batch processing is really required in case of TX + * processing. In case of RX the Dongle can send a bunch of rx completions, + * hence we took a 3 queue approach + * enque - adds the skbs to rx_pend_queue + * dispatch - uses a lock and adds the list of skbs from pend queue to + * napi queue + * napi processing - copies the pend_queue into a local queue and works + * on it. + * But for TX its going to be 1 skb at a time, so we are just thinking + * of using only one queue and use the lock supported skb queue functions + * to add and process it. If its in-efficient we'll re-visit the queue + * design. + */ + + /* When the NET_TX tries to send a TX packet put it into tx_pend_queue */ + /* struct sk_buff_head tx_pend_queue ____cacheline_aligned; */ + /* + * From the Tasklet that actually sends out data + * copy the list tx_pend_queue into tx_active_queue. There by we need + * to spinlock to only perform the copy the rest of the code ie to + * construct the tx_pend_queue and the code to process tx_active_queue + * can be lockless. The concept is borrowed as is from RX processing + */ + /* struct sk_buff_head tx_active_queue ____cacheline_aligned; */ + + /* Control TXP in runtime, enable by default */ + atomic_t lb_txp_active; + + /* + * When the NET_TX tries to send a TX packet put it into tx_pend_queue + * For now, the processing tasklet will also direcly operate on this + * queue + */ + struct sk_buff_head tx_pend_queue ____cacheline_aligned; + + /* cpu on which the DHD Tx is happenning */ + atomic_t tx_cpu; + + /* CPU on which the Network stack is calling the DHD's xmit function */ + atomic_t net_tx_cpu; + + /* Tasklet context from which the DHD's TX processing happens */ + struct tasklet_struct tx_tasklet; + + /* + * Consumer Histogram - NAPI RX Packet processing + * ----------------------------------------------- + * On Each CPU, when the NAPI RX Packet processing call back was invoked + * how many packets were processed is captured in this data structure. + * Now its difficult to capture the "exact" number of packets processed. + * So considering the packet counter to be a 32 bit one, we have a + * bucket with 8 bins (2^1, 2^2 ... 2^8). The "number" of packets + * processed is rounded off to the next power of 2 and put in the + * approriate "bin" the value in the bin gets incremented. + * For example, assume that in CPU 1 if NAPI Rx runs 3 times + * and the packet count processed is as follows (assume the bin counters are 0) + * iteration 1 - 10 (the bin counter 2^4 increments to 1) + * iteration 2 - 30 (the bin counter 2^5 increments to 1) + * iteration 3 - 15 (the bin counter 2^4 increments by 1 to become 2) + */ + uint32 *napi_rx_hist[HIST_BIN_SIZE]; + uint32 *txc_hist[HIST_BIN_SIZE]; + uint32 *rxc_hist[HIST_BIN_SIZE]; +#endif /* DHD_LB */ + +#ifdef SHOW_LOGTRACE +#ifdef DHD_USE_KTHREAD_FOR_LOGTRACE + tsk_ctl_t thr_logtrace_ctl; +#else + struct delayed_work event_log_dispatcher_work; +#endif /* DHD_USE_KTHREAD_FOR_LOGTRACE */ +#endif /* SHOW_LOGTRACE */ + +#if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) +#endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */ + struct kobject dhd_kobj; + struct timer_list timesync_timer; +#if defined(BT_OVER_SDIO) + char btfw_path[PATH_MAX]; +#endif /* defined (BT_OVER_SDIO) */ +#ifdef WL_MONITOR + struct net_device *monitor_dev; /* monitor pseudo device */ + struct sk_buff *monitor_skb; + uint monitor_len; + uint monitor_type; /* monitor pseudo device */ +#endif /* WL_MONITOR */ +#if defined(BT_OVER_SDIO) + struct mutex bus_user_lock; /* lock for sdio bus apis shared between WLAN & BT */ + int bus_user_count; /* User counts of sdio bus shared between WLAN & BT */ +#endif /* BT_OVER_SDIO */ +#ifdef SHOW_LOGTRACE + struct sk_buff_head evt_trace_queue ____cacheline_aligned; +#endif // endif +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM + struct workqueue_struct *tx_wq; + struct workqueue_struct *rx_wq; +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ +#ifdef DHD_DEBUG_UART + bool duart_execute; +#endif /* DHD_DEBUG_UART */ + struct mutex logdump_lock; + /* indicates mem_dump was scheduled as work queue or called directly */ + bool scheduled_memdump; + /* indicates sssrdump is called directly instead of scheduling work queue */ + bool no_wq_sssrdump; +#if defined(PCIE_FULL_DONGLE) + /* Spinlock used in Linux implementation of dhd_pcie_backplane_access_[un]lock() */ + spinlock_t backplane_access_lock; +#endif /* defined(PCIE_FULL_DONGLE) */ +} dhd_info_t; + +#ifdef WL_MONITOR +#define MONPKT_EXTRA_LEN 48u +#endif /* WL_MONITOR */ + +#define DHDIF_FWDER(dhdif) FALSE + +#if defined(BT_OVER_SDIO) +/* Flag to indicate if driver is initialized */ +uint dhd_driver_init_done = TRUE; +#else +/* Flag to indicate if driver is initialized */ +uint dhd_driver_init_done = FALSE; +#endif // endif +/* Flag to indicate if we should download firmware on driver load */ +uint dhd_download_fw_on_driverload = TRUE; + +/* Definitions to provide path to the firmware and nvram + * example nvram_path[MOD_PARAM_PATHLEN]="/projects/wlan/nvram.txt" + */ +char firmware_path[MOD_PARAM_PATHLEN]; +char nvram_path[MOD_PARAM_PATHLEN]; +char clm_path[MOD_PARAM_PATHLEN]; +char config_path[MOD_PARAM_PATHLEN]; +#ifdef DHD_UCODE_DOWNLOAD +char ucode_path[MOD_PARAM_PATHLEN]; +#endif /* DHD_UCODE_DOWNLOAD */ + +module_param_string(clm_path, clm_path, MOD_PARAM_PATHLEN, 0660); + +/* backup buffer for firmware and nvram path */ +char fw_bak_path[MOD_PARAM_PATHLEN]; +char nv_bak_path[MOD_PARAM_PATHLEN]; + +/* information string to keep firmware, chio, cheip version info visiable from log */ +char info_string[MOD_PARAM_INFOLEN]; +module_param_string(info_string, info_string, MOD_PARAM_INFOLEN, 0444); +int op_mode = 0; +int disable_proptx = 0; +module_param(op_mode, int, 0644); +extern int wl_control_wl_start(struct net_device *dev); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (defined(BCMLXSDMMC) || defined(BCMDBUS)) +struct semaphore dhd_registration_sem; +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */ + +#ifdef DHD_LOG_DUMP +int logdump_max_filesize = LOG_DUMP_MAX_FILESIZE; +module_param(logdump_max_filesize, int, 0644); +int logdump_max_bufsize = LOG_DUMP_GENERAL_MAX_BUFSIZE; +module_param(logdump_max_bufsize, int, 0644); +int logdump_prsrv_tailsize = DHD_LOG_DUMP_MAX_TAIL_FLUSH_SIZE; +int logdump_periodic_flush = FALSE; +module_param(logdump_periodic_flush, int, 0644); +#ifdef EWP_ECNTRS_LOGGING +int logdump_ecntr_enable = TRUE; +#else +int logdump_ecntr_enable = FALSE; +#endif /* EWP_ECNTRS_LOGGING */ +module_param(logdump_ecntr_enable, int, 0644); +#endif /* DHD_LOG_DUMP */ +#ifdef EWP_EDL +int host_edl_support = TRUE; +module_param(host_edl_support, int, 0644); +#endif // endif + +/* deferred handlers */ +static void dhd_ifadd_event_handler(void *handle, void *event_info, u8 event); +static void dhd_ifdel_event_handler(void *handle, void *event_info, u8 event); +static void dhd_set_mac_addr_handler(void *handle, void *event_info, u8 event); +static void dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event); +#ifdef WL_NATOE +static void dhd_natoe_ct_event_hanlder(void *handle, void *event_info, u8 event); +static void dhd_natoe_ct_ioctl_handler(void *handle, void *event_info, uint8 event); +#endif /* WL_NATOE */ + +#ifdef DHD_UPDATE_INTF_MAC +static void dhd_ifupdate_event_handler(void *handle, void *event_info, u8 event); +#endif /* DHD_UPDATE_INTF_MAC */ +#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT) +static void dhd_inet6_work_handler(void *dhd_info, void *event_data, u8 event); +#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */ +#ifdef WL_CFG80211 +extern void dhd_netdev_free(struct net_device *ndev); +#endif /* WL_CFG80211 */ +static dhd_if_t * dhd_get_ifp_by_ndev(dhd_pub_t *dhdp, struct net_device *ndev); + +#if (defined(DHD_WET) || defined(DHD_MCAST_REGEN) || defined(DHD_L2_FILTER)) +/* update rx_pkt_chainable state of dhd interface */ +static void dhd_update_rx_pkt_chainable_state(dhd_pub_t* dhdp, uint32 idx); +#endif /* DHD_WET || DHD_MCAST_REGEN || DHD_L2_FILTER */ + +/* Error bits */ +module_param(dhd_msg_level, int, 0); +#if defined(WL_WIRELESS_EXT) +module_param(iw_msg_level, int, 0); +#endif +#ifdef WL_CFG80211 +module_param(wl_dbg_level, int, 0); +#endif +module_param(android_msg_level, int, 0); +module_param(config_msg_level, int, 0); + +#ifdef ARP_OFFLOAD_SUPPORT +/* ARP offload enable */ +uint dhd_arp_enable = TRUE; +module_param(dhd_arp_enable, uint, 0); + +/* ARP offload agent mode : Enable ARP Host Auto-Reply and ARP Peer Auto-Reply */ + +#ifdef ENABLE_ARP_SNOOP_MODE +uint dhd_arp_mode = ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY | ARP_OL_SNOOP | ARP_OL_HOST_AUTO_REPLY; +#else +uint dhd_arp_mode = ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY; +#endif /* ENABLE_ARP_SNOOP_MODE */ + +module_param(dhd_arp_mode, uint, 0); +#endif /* ARP_OFFLOAD_SUPPORT */ + +/* Disable Prop tx */ +module_param(disable_proptx, int, 0644); +/* load firmware and/or nvram values from the filesystem */ +module_param_string(firmware_path, firmware_path, MOD_PARAM_PATHLEN, 0660); +module_param_string(nvram_path, nvram_path, MOD_PARAM_PATHLEN, 0660); +module_param_string(config_path, config_path, MOD_PARAM_PATHLEN, 0); +#ifdef DHD_UCODE_DOWNLOAD +module_param_string(ucode_path, ucode_path, MOD_PARAM_PATHLEN, 0660); +#endif /* DHD_UCODE_DOWNLOAD */ + +/* wl event forwarding */ +#ifdef WL_EVENT_ENAB +uint wl_event_enable = true; +#else +uint wl_event_enable = false; +#endif /* WL_EVENT_ENAB */ +module_param(wl_event_enable, uint, 0660); + +/* wl event forwarding */ +#ifdef LOGTRACE_PKT_SENDUP +uint logtrace_pkt_sendup = true; +#else +uint logtrace_pkt_sendup = false; +#endif /* LOGTRACE_PKT_SENDUP */ +module_param(logtrace_pkt_sendup, uint, 0660); + +/* Watchdog interval */ +/* extend watchdog expiration to 2 seconds when DPC is running */ +#define WATCHDOG_EXTEND_INTERVAL (2000) + +uint dhd_watchdog_ms = CUSTOM_DHD_WATCHDOG_MS; +module_param(dhd_watchdog_ms, uint, 0); + +#if defined(DHD_DEBUG) +/* Console poll interval */ +uint dhd_console_ms = 0; +module_param(dhd_console_ms, uint, 0644); +#else +uint dhd_console_ms = 0; +#endif /* DHD_DEBUG */ + +uint dhd_slpauto = TRUE; +module_param(dhd_slpauto, uint, 0); + +#ifdef PKT_FILTER_SUPPORT +/* Global Pkt filter enable control */ +uint dhd_pkt_filter_enable = TRUE; +module_param(dhd_pkt_filter_enable, uint, 0); +#endif // endif + +/* Pkt filter init setup */ +uint dhd_pkt_filter_init = 0; +module_param(dhd_pkt_filter_init, uint, 0); + +/* Pkt filter mode control */ +#ifdef GAN_LITE_NAT_KEEPALIVE_FILTER +uint dhd_master_mode = FALSE; +#else +uint dhd_master_mode = FALSE; +#endif /* GAN_LITE_NAT_KEEPALIVE_FILTER */ +module_param(dhd_master_mode, uint, 0); + +int dhd_watchdog_prio = 0; +module_param(dhd_watchdog_prio, int, 0); + +/* DPC thread priority */ +int dhd_dpc_prio = CUSTOM_DPC_PRIO_SETTING; +module_param(dhd_dpc_prio, int, 0); + +/* RX frame thread priority */ +int dhd_rxf_prio = CUSTOM_RXF_PRIO_SETTING; +module_param(dhd_rxf_prio, int, 0); + +#if !defined(BCMDBUS) +extern int dhd_dongle_ramsize; +module_param(dhd_dongle_ramsize, int, 0); +#endif /* !BCMDBUS */ + +#ifdef WL_CFG80211 +int passive_channel_skip = 0; +module_param(passive_channel_skip, int, (S_IRUSR|S_IWUSR)); +#endif /* WL_CFG80211 */ + +#ifdef DHD_MSI_SUPPORT +uint enable_msi = TRUE; +module_param(enable_msi, uint, 0); +#endif /* PCIE_FULL_DONGLE */ + +#ifdef DHD_SSSR_DUMP +extern uint support_sssr_dump; +module_param(support_sssr_dump, uint, 0); +#endif /* DHD_SSSR_DUMP */ + +/* Keep track of number of instances */ +static int dhd_found = 0; +static int instance_base = 0; /* Starting instance number */ +module_param(instance_base, int, 0644); + +#if defined(DHD_LB_RXP) && defined(PCIE_FULL_DONGLE) +static int dhd_napi_weight = 32; +module_param(dhd_napi_weight, int, 0644); +#endif /* DHD_LB_RXP && PCIE_FULL_DONGLE */ + +#ifdef PCIE_FULL_DONGLE +extern int h2d_max_txpost; +module_param(h2d_max_txpost, int, 0644); + +extern uint dma_ring_indices; +module_param(dma_ring_indices, uint, 0644); + +extern bool h2d_phase; +module_param(h2d_phase, bool, 0644); +extern bool force_trap_bad_h2d_phase; +module_param(force_trap_bad_h2d_phase, bool, 0644); +#endif /* PCIE_FULL_DONGLE */ + +#ifdef FORCE_TPOWERON +/* + * On Fire's reference platform, coming out of L1.2, + * there is a constant delay of 45us between CLKREQ# and stable REFCLK + * Due to this delay, with tPowerOn < 50 + * there is a chance of the refclk sense to trigger on noise. + * + * 0x29 when written to L1SSControl2 translates to 50us. + */ +#define FORCE_TPOWERON_50US 0x29 +uint32 tpoweron_scale = FORCE_TPOWERON_50US; /* default 50us */ +module_param(tpoweron_scale, uint, 0644); +#endif /* FORCE_TPOWERON */ + +#ifdef DHD_DHCP_DUMP +struct bootp_fmt { + struct iphdr ip_header; + struct udphdr udp_header; + uint8 op; + uint8 htype; + uint8 hlen; + uint8 hops; + uint32 transaction_id; + uint16 secs; + uint16 flags; + uint32 client_ip; + uint32 assigned_ip; + uint32 server_ip; + uint32 relay_ip; + uint8 hw_address[16]; + uint8 server_name[64]; + uint8 file_name[128]; + uint8 options[312]; +}; + +static const uint8 bootp_magic_cookie[4] = { 99, 130, 83, 99 }; +static const char dhcp_ops[][10] = { + "NA", "REQUEST", "REPLY" +}; +static const char dhcp_types[][10] = { + "NA", "DISCOVER", "OFFER", "REQUEST", "DECLINE", "ACK", "NAK", "RELEASE", "INFORM" +}; +static void dhd_dhcp_dump(char *ifname, uint8 *pktdata, bool tx); +#endif /* DHD_DHCP_DUMP */ + +#ifdef FILTER_IE +#define FILTER_IE_PATH "/etc/wifi/filter_ie" +#define FILTER_IE_BUFSZ 1024 /* ioc buffsize for FILTER_IE */ +#define FILE_BLOCK_READ_SIZE 256 +#define WL_FILTER_IE_IOV_HDR_SIZE OFFSETOF(wl_filter_ie_iov_v1_t, tlvs) +#endif /* FILTER_IE */ + +#define NULL_CHECK(p, s, err) \ + do { \ + if (!(p)) { \ + printk("NULL POINTER (%s) : %s\n", __FUNCTION__, (s)); \ + err = BCME_ERROR; \ + return err; \ + } \ + } while (0) + +#ifdef DHD_ICMP_DUMP +#include +static void dhd_icmp_dump(char *ifname, uint8 *pktdata, bool tx); +#endif /* DHD_ICMP_DUMP */ + +/* Functions to manage sysfs interface for dhd */ +static int dhd_sysfs_init(dhd_info_t *dhd); +static void dhd_sysfs_exit(dhd_info_t *dhd); + +#ifdef SHOW_LOGTRACE +static char *logstrs_path = "/data/misc/wifi/logstrs.bin"; +char *st_str_file_path = "/data/misc/wifi/rtecdc.bin"; +static char *map_file_path = "/data/misc/wifi/rtecdc.map"; +static char *rom_st_str_file_path = "/data/misc/wifi/roml.bin"; +static char *rom_map_file_path = "/data/misc/wifi/roml.map"; +static char *ram_file_str = "rtecdc"; +static char *rom_file_str = "roml"; + +module_param(logstrs_path, charp, S_IRUGO); +module_param(st_str_file_path, charp, S_IRUGO); +module_param(map_file_path, charp, S_IRUGO); +module_param(rom_st_str_file_path, charp, S_IRUGO); +module_param(rom_map_file_path, charp, S_IRUGO); + +static int dhd_init_logstrs_array(osl_t *osh, dhd_event_log_t *temp); +static int dhd_read_map(osl_t *osh, char *fname, uint32 *ramstart, uint32 *rodata_start, + uint32 *rodata_end); +static int dhd_init_static_strs_array(osl_t *osh, dhd_event_log_t *temp, char *str_file, + char *map_file); +#endif /* SHOW_LOGTRACE */ + +#if defined(DHD_LB) + +static void +dhd_lb_set_default_cpus(dhd_info_t *dhd) +{ + /* Default CPU allocation for the jobs */ + atomic_set(&dhd->rx_napi_cpu, 1); + atomic_set(&dhd->rx_compl_cpu, 2); + atomic_set(&dhd->tx_compl_cpu, 2); + atomic_set(&dhd->tx_cpu, 2); + atomic_set(&dhd->net_tx_cpu, 0); +} + +static void +dhd_cpumasks_deinit(dhd_info_t *dhd) +{ + free_cpumask_var(dhd->cpumask_curr_avail); + free_cpumask_var(dhd->cpumask_primary); + free_cpumask_var(dhd->cpumask_primary_new); + free_cpumask_var(dhd->cpumask_secondary); + free_cpumask_var(dhd->cpumask_secondary_new); +} + +static int +dhd_cpumasks_init(dhd_info_t *dhd) +{ + int id; + uint32 cpus, num_cpus = num_possible_cpus(); + int ret = 0; + + DHD_ERROR(("%s CPU masks primary(big)=0x%x secondary(little)=0x%x\n", __FUNCTION__, + DHD_LB_PRIMARY_CPUS, DHD_LB_SECONDARY_CPUS)); + + if (!alloc_cpumask_var(&dhd->cpumask_curr_avail, GFP_KERNEL) || + !alloc_cpumask_var(&dhd->cpumask_primary, GFP_KERNEL) || + !alloc_cpumask_var(&dhd->cpumask_primary_new, GFP_KERNEL) || + !alloc_cpumask_var(&dhd->cpumask_secondary, GFP_KERNEL) || + !alloc_cpumask_var(&dhd->cpumask_secondary_new, GFP_KERNEL)) { + DHD_ERROR(("%s Failed to init cpumasks\n", __FUNCTION__)); + ret = -ENOMEM; + goto fail; + } + + cpumask_copy(dhd->cpumask_curr_avail, cpu_online_mask); + cpumask_clear(dhd->cpumask_primary); + cpumask_clear(dhd->cpumask_secondary); + + if (num_cpus > 32) { + DHD_ERROR(("%s max cpus must be 32, %d too big\n", __FUNCTION__, num_cpus)); + ASSERT(0); + } + + cpus = DHD_LB_PRIMARY_CPUS; + for (id = 0; id < num_cpus; id++) { + if (isset(&cpus, id)) + cpumask_set_cpu(id, dhd->cpumask_primary); + } + + cpus = DHD_LB_SECONDARY_CPUS; + for (id = 0; id < num_cpus; id++) { + if (isset(&cpus, id)) + cpumask_set_cpu(id, dhd->cpumask_secondary); + } + + return ret; +fail: + dhd_cpumasks_deinit(dhd); + return ret; +} + +/* + * The CPU Candidacy Algorithm + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * The available CPUs for selection are divided into two groups + * Primary Set - A CPU mask that carries the First Choice CPUs + * Secondary Set - A CPU mask that carries the Second Choice CPUs. + * + * There are two types of Job, that needs to be assigned to + * the CPUs, from one of the above mentioned CPU group. The Jobs are + * 1) Rx Packet Processing - napi_cpu + * 2) Completion Processiong (Tx, RX) - compl_cpu + * + * To begin with both napi_cpu and compl_cpu are on CPU0. Whenever a CPU goes + * on-line/off-line the CPU candidacy algorithm is triggerd. The candidacy + * algo tries to pickup the first available non boot CPU (CPU0) for napi_cpu. + * If there are more processors free, it assigns one to compl_cpu. + * It also tries to ensure that both napi_cpu and compl_cpu are not on the same + * CPU, as much as possible. + * + * By design, both Tx and Rx completion jobs are run on the same CPU core, as it + * would allow Tx completion skb's to be released into a local free pool from + * which the rx buffer posts could have been serviced. it is important to note + * that a Tx packet may not have a large enough buffer for rx posting. + */ +void dhd_select_cpu_candidacy(dhd_info_t *dhd) +{ + uint32 primary_available_cpus; /* count of primary available cpus */ + uint32 secondary_available_cpus; /* count of secondary available cpus */ + uint32 napi_cpu = 0; /* cpu selected for napi rx processing */ + uint32 compl_cpu = 0; /* cpu selected for completion jobs */ + uint32 tx_cpu = 0; /* cpu selected for tx processing job */ + + cpumask_clear(dhd->cpumask_primary_new); + cpumask_clear(dhd->cpumask_secondary_new); + + /* + * Now select from the primary mask. Even if a Job is + * already running on a CPU in secondary group, we still move + * to primary CPU. So no conditional checks. + */ + cpumask_and(dhd->cpumask_primary_new, dhd->cpumask_primary, + dhd->cpumask_curr_avail); + + cpumask_and(dhd->cpumask_secondary_new, dhd->cpumask_secondary, + dhd->cpumask_curr_avail); + + primary_available_cpus = cpumask_weight(dhd->cpumask_primary_new); + + if (primary_available_cpus > 0) { + napi_cpu = cpumask_first(dhd->cpumask_primary_new); + + /* If no further CPU is available, + * cpumask_next returns >= nr_cpu_ids + */ + tx_cpu = cpumask_next(napi_cpu, dhd->cpumask_primary_new); + if (tx_cpu >= nr_cpu_ids) + tx_cpu = 0; + + /* In case there are no more CPUs, do completions & Tx in same CPU */ + compl_cpu = cpumask_next(tx_cpu, dhd->cpumask_primary_new); + if (compl_cpu >= nr_cpu_ids) + compl_cpu = tx_cpu; + } + + DHD_INFO(("%s After primary CPU check napi_cpu %d compl_cpu %d tx_cpu %d\n", + __FUNCTION__, napi_cpu, compl_cpu, tx_cpu)); + + /* -- Now check for the CPUs from the secondary mask -- */ + secondary_available_cpus = cpumask_weight(dhd->cpumask_secondary_new); + + DHD_INFO(("%s Available secondary cpus %d nr_cpu_ids %d\n", + __FUNCTION__, secondary_available_cpus, nr_cpu_ids)); + + if (secondary_available_cpus > 0) { + /* At this point if napi_cpu is unassigned it means no CPU + * is online from Primary Group + */ + if (napi_cpu == 0) { + napi_cpu = cpumask_first(dhd->cpumask_secondary_new); + tx_cpu = cpumask_next(napi_cpu, dhd->cpumask_secondary_new); + compl_cpu = cpumask_next(tx_cpu, dhd->cpumask_secondary_new); + } else if (tx_cpu == 0) { + tx_cpu = cpumask_first(dhd->cpumask_secondary_new); + compl_cpu = cpumask_next(tx_cpu, dhd->cpumask_secondary_new); + } else if (compl_cpu == 0) { + compl_cpu = cpumask_first(dhd->cpumask_secondary_new); + } + + /* If no CPU was available for tx processing, choose CPU 0 */ + if (tx_cpu >= nr_cpu_ids) + tx_cpu = 0; + + /* If no CPU was available for completion, choose CPU 0 */ + if (compl_cpu >= nr_cpu_ids) + compl_cpu = 0; + } + if ((primary_available_cpus == 0) && + (secondary_available_cpus == 0)) { + /* No CPUs available from primary or secondary mask */ + napi_cpu = 1; + compl_cpu = 0; + tx_cpu = 2; + } + + DHD_INFO(("%s After secondary CPU check napi_cpu %d compl_cpu %d tx_cpu %d\n", + __FUNCTION__, napi_cpu, compl_cpu, tx_cpu)); + + ASSERT(napi_cpu < nr_cpu_ids); + ASSERT(compl_cpu < nr_cpu_ids); + ASSERT(tx_cpu < nr_cpu_ids); + + atomic_set(&dhd->rx_napi_cpu, napi_cpu); + atomic_set(&dhd->tx_compl_cpu, compl_cpu); + atomic_set(&dhd->rx_compl_cpu, compl_cpu); + atomic_set(&dhd->tx_cpu, tx_cpu); + + return; +} + +/* + * Function to handle CPU Hotplug notifications. + * One of the task it does is to trigger the CPU Candidacy algorithm + * for load balancing. + */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) + +int dhd_cpu_startup_callback(unsigned int cpu) +{ + dhd_info_t *dhd = g_dhd_pub->info; + + DHD_INFO(("%s(): \r\n cpu:%d", __FUNCTION__, cpu)); + DHD_LB_STATS_INCR(dhd->cpu_online_cnt[cpu]); + cpumask_set_cpu(cpu, dhd->cpumask_curr_avail); + dhd_select_cpu_candidacy(dhd); + + return 0; +} + +int dhd_cpu_teardown_callback(unsigned int cpu) +{ + dhd_info_t *dhd = g_dhd_pub->info; + + DHD_INFO(("%s(): \r\n cpu:%d", __FUNCTION__, cpu)); + DHD_LB_STATS_INCR(dhd->cpu_offline_cnt[cpu]); + cpumask_clear_cpu(cpu, dhd->cpumask_curr_avail); + dhd_select_cpu_candidacy(dhd); + + return 0; +} +#else +int +dhd_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) +{ + unsigned long int cpu = (unsigned long int)hcpu; + +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif // endif + dhd_info_t *dhd = container_of(nfb, dhd_info_t, cpu_notifier); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif // endif + + if (!dhd || !(dhd->dhd_state & DHD_ATTACH_STATE_LB_ATTACH_DONE)) { + DHD_INFO(("%s(): LB data is not initialized yet.\n", + __FUNCTION__)); + return NOTIFY_BAD; + } + + switch (action) + { + case CPU_ONLINE: + case CPU_ONLINE_FROZEN: + DHD_LB_STATS_INCR(dhd->cpu_online_cnt[cpu]); + cpumask_set_cpu(cpu, dhd->cpumask_curr_avail); + dhd_select_cpu_candidacy(dhd); + break; + + case CPU_DOWN_PREPARE: + case CPU_DOWN_PREPARE_FROZEN: + DHD_LB_STATS_INCR(dhd->cpu_offline_cnt[cpu]); + cpumask_clear_cpu(cpu, dhd->cpumask_curr_avail); + dhd_select_cpu_candidacy(dhd); + break; + default: + break; + } + + return NOTIFY_OK; +} +#endif /* LINUX_VERSION_CODE < 4.10.0 */ + +static int dhd_register_cpuhp_callback(dhd_info_t *dhd) +{ + int cpuhp_ret = 0; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) + cpuhp_ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "dhd", + dhd_cpu_startup_callback, dhd_cpu_teardown_callback); + + if (cpuhp_ret < 0) { + DHD_ERROR(("%s(): cpuhp_setup_state failed %d RX LB won't happen \r\n", + __FUNCTION__, cpuhp_ret)); + } +#else + /* + * If we are able to initialize CPU masks, lets register to the + * CPU Hotplug framework to change the CPU for each job dynamically + * using candidacy algorithm. + */ + dhd->cpu_notifier.notifier_call = dhd_cpu_callback; + register_hotcpu_notifier(&dhd->cpu_notifier); /* Register a callback */ +#endif /* LINUX_VERSION_CODE < 4.10.0 */ + return cpuhp_ret; +} + +static int dhd_unregister_cpuhp_callback(dhd_info_t *dhd) +{ + int ret = 0; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) + /* Don't want to call tear down while unregistering */ + cpuhp_remove_state_nocalls(CPUHP_AP_ONLINE_DYN); +#else + if (dhd->cpu_notifier.notifier_call != NULL) { + unregister_cpu_notifier(&dhd->cpu_notifier); + } +#endif // endif + return ret; +} + +#if defined(DHD_LB_STATS) +void dhd_lb_stats_init(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd; + int i, j, num_cpus = num_possible_cpus(); + int alloc_size = sizeof(uint32) * num_cpus; + + if (dhdp == NULL) { + DHD_ERROR(("%s(): Invalid argument dhd pubb pointer is NULL \n", + __FUNCTION__)); + return; + } + + dhd = dhdp->info; + if (dhd == NULL) { + DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__)); + return; + } + + DHD_LB_STATS_CLR(dhd->dhd_dpc_cnt); + DHD_LB_STATS_CLR(dhd->napi_sched_cnt); + + dhd->napi_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size); + if (!dhd->napi_percpu_run_cnt) { + DHD_ERROR(("%s(): napi_percpu_run_cnt malloc failed \n", + __FUNCTION__)); + return; + } + for (i = 0; i < num_cpus; i++) + DHD_LB_STATS_CLR(dhd->napi_percpu_run_cnt[i]); + + DHD_LB_STATS_CLR(dhd->rxc_sched_cnt); + + dhd->rxc_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size); + if (!dhd->rxc_percpu_run_cnt) { + DHD_ERROR(("%s(): rxc_percpu_run_cnt malloc failed \n", + __FUNCTION__)); + return; + } + for (i = 0; i < num_cpus; i++) + DHD_LB_STATS_CLR(dhd->rxc_percpu_run_cnt[i]); + + DHD_LB_STATS_CLR(dhd->txc_sched_cnt); + + dhd->txc_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size); + if (!dhd->txc_percpu_run_cnt) { + DHD_ERROR(("%s(): txc_percpu_run_cnt malloc failed \n", + __FUNCTION__)); + return; + } + for (i = 0; i < num_cpus; i++) + DHD_LB_STATS_CLR(dhd->txc_percpu_run_cnt[i]); + + dhd->cpu_online_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size); + if (!dhd->cpu_online_cnt) { + DHD_ERROR(("%s(): cpu_online_cnt malloc failed \n", + __FUNCTION__)); + return; + } + for (i = 0; i < num_cpus; i++) + DHD_LB_STATS_CLR(dhd->cpu_online_cnt[i]); + + dhd->cpu_offline_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size); + if (!dhd->cpu_offline_cnt) { + DHD_ERROR(("%s(): cpu_offline_cnt malloc failed \n", + __FUNCTION__)); + return; + } + for (i = 0; i < num_cpus; i++) + DHD_LB_STATS_CLR(dhd->cpu_offline_cnt[i]); + + dhd->txp_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size); + if (!dhd->txp_percpu_run_cnt) { + DHD_ERROR(("%s(): txp_percpu_run_cnt malloc failed \n", + __FUNCTION__)); + return; + } + for (i = 0; i < num_cpus; i++) + DHD_LB_STATS_CLR(dhd->txp_percpu_run_cnt[i]); + + dhd->tx_start_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size); + if (!dhd->tx_start_percpu_run_cnt) { + DHD_ERROR(("%s(): tx_start_percpu_run_cnt malloc failed \n", + __FUNCTION__)); + return; + } + for (i = 0; i < num_cpus; i++) + DHD_LB_STATS_CLR(dhd->tx_start_percpu_run_cnt[i]); + + for (j = 0; j < HIST_BIN_SIZE; j++) { + dhd->napi_rx_hist[j] = (uint32 *)MALLOC(dhdp->osh, alloc_size); + if (!dhd->napi_rx_hist[j]) { + DHD_ERROR(("%s(): dhd->napi_rx_hist[%d] malloc failed \n", + __FUNCTION__, j)); + return; + } + for (i = 0; i < num_cpus; i++) { + DHD_LB_STATS_CLR(dhd->napi_rx_hist[j][i]); + } + } +#ifdef DHD_LB_TXC + for (j = 0; j < HIST_BIN_SIZE; j++) { + dhd->txc_hist[j] = (uint32 *)MALLOC(dhdp->osh, alloc_size); + if (!dhd->txc_hist[j]) { + DHD_ERROR(("%s(): dhd->txc_hist[%d] malloc failed \n", + __FUNCTION__, j)); + return; + } + for (i = 0; i < num_cpus; i++) { + DHD_LB_STATS_CLR(dhd->txc_hist[j][i]); + } + } +#endif /* DHD_LB_TXC */ +#ifdef DHD_LB_RXC + for (j = 0; j < HIST_BIN_SIZE; j++) { + dhd->rxc_hist[j] = (uint32 *)MALLOC(dhdp->osh, alloc_size); + if (!dhd->rxc_hist[j]) { + DHD_ERROR(("%s(): dhd->rxc_hist[%d] malloc failed \n", + __FUNCTION__, j)); + return; + } + for (i = 0; i < num_cpus; i++) { + DHD_LB_STATS_CLR(dhd->rxc_hist[j][i]); + } + } +#endif /* DHD_LB_RXC */ + return; +} + +void dhd_lb_stats_deinit(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd; + int j, num_cpus = num_possible_cpus(); + int alloc_size = sizeof(uint32) * num_cpus; + + if (dhdp == NULL) { + DHD_ERROR(("%s(): Invalid argument dhd pubb pointer is NULL \n", + __FUNCTION__)); + return; + } + + dhd = dhdp->info; + if (dhd == NULL) { + DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__)); + return; + } + + if (dhd->napi_percpu_run_cnt) { + MFREE(dhdp->osh, dhd->napi_percpu_run_cnt, alloc_size); + dhd->napi_percpu_run_cnt = NULL; + } + if (dhd->rxc_percpu_run_cnt) { + MFREE(dhdp->osh, dhd->rxc_percpu_run_cnt, alloc_size); + dhd->rxc_percpu_run_cnt = NULL; + } + if (dhd->txc_percpu_run_cnt) { + MFREE(dhdp->osh, dhd->txc_percpu_run_cnt, alloc_size); + dhd->txc_percpu_run_cnt = NULL; + } + if (dhd->cpu_online_cnt) { + MFREE(dhdp->osh, dhd->cpu_online_cnt, alloc_size); + dhd->cpu_online_cnt = NULL; + } + if (dhd->cpu_offline_cnt) { + MFREE(dhdp->osh, dhd->cpu_offline_cnt, alloc_size); + dhd->cpu_offline_cnt = NULL; + } + + if (dhd->txp_percpu_run_cnt) { + MFREE(dhdp->osh, dhd->txp_percpu_run_cnt, alloc_size); + dhd->txp_percpu_run_cnt = NULL; + } + if (dhd->tx_start_percpu_run_cnt) { + MFREE(dhdp->osh, dhd->tx_start_percpu_run_cnt, alloc_size); + dhd->tx_start_percpu_run_cnt = NULL; + } + + for (j = 0; j < HIST_BIN_SIZE; j++) { + if (dhd->napi_rx_hist[j]) { + MFREE(dhdp->osh, dhd->napi_rx_hist[j], alloc_size); + dhd->napi_rx_hist[j] = NULL; + } +#ifdef DHD_LB_TXC + if (dhd->txc_hist[j]) { + MFREE(dhdp->osh, dhd->txc_hist[j], alloc_size); + dhd->txc_hist[j] = NULL; + } +#endif /* DHD_LB_TXC */ +#ifdef DHD_LB_RXC + if (dhd->rxc_hist[j]) { + MFREE(dhdp->osh, dhd->rxc_hist[j], alloc_size); + dhd->rxc_hist[j] = NULL; + } +#endif /* DHD_LB_RXC */ + } + + return; +} + +static void dhd_lb_stats_dump_histo(dhd_pub_t *dhdp, + struct bcmstrbuf *strbuf, uint32 **hist) +{ + int i, j; + uint32 *per_cpu_total; + uint32 total = 0; + uint32 num_cpus = num_possible_cpus(); + + per_cpu_total = (uint32 *)MALLOC(dhdp->osh, sizeof(uint32) * num_cpus); + if (!per_cpu_total) { + DHD_ERROR(("%s(): dhd->per_cpu_total malloc failed \n", __FUNCTION__)); + return; + } + bzero(per_cpu_total, sizeof(uint32) * num_cpus); + + bcm_bprintf(strbuf, "CPU: \t\t"); + for (i = 0; i < num_cpus; i++) + bcm_bprintf(strbuf, "%d\t", i); + bcm_bprintf(strbuf, "\nBin\n"); + + for (i = 0; i < HIST_BIN_SIZE; i++) { + bcm_bprintf(strbuf, "%d:\t\t", 1<osh, per_cpu_total, sizeof(uint32) * num_cpus); + per_cpu_total = NULL; + } + return; +} + +static inline void dhd_lb_stats_dump_cpu_array(struct bcmstrbuf *strbuf, uint32 *p) +{ + int i, num_cpus = num_possible_cpus(); + + bcm_bprintf(strbuf, "CPU: \t"); + for (i = 0; i < num_cpus; i++) + bcm_bprintf(strbuf, "%d\t", i); + bcm_bprintf(strbuf, "\n"); + + bcm_bprintf(strbuf, "Val: \t"); + for (i = 0; i < num_cpus; i++) + bcm_bprintf(strbuf, "%u\t", *(p+i)); + bcm_bprintf(strbuf, "\n"); + return; +} + +void dhd_lb_stats_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf) +{ + dhd_info_t *dhd; + + if (dhdp == NULL || strbuf == NULL) { + DHD_ERROR(("%s(): Invalid argument dhdp %p strbuf %p \n", + __FUNCTION__, dhdp, strbuf)); + return; + } + + dhd = dhdp->info; + if (dhd == NULL) { + DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__)); + return; + } + + bcm_bprintf(strbuf, "\ncpu_online_cnt:\n"); + dhd_lb_stats_dump_cpu_array(strbuf, dhd->cpu_online_cnt); + + bcm_bprintf(strbuf, "\ncpu_offline_cnt:\n"); + dhd_lb_stats_dump_cpu_array(strbuf, dhd->cpu_offline_cnt); + + bcm_bprintf(strbuf, "\nsched_cnt: dhd_dpc %u napi %u rxc %u txc %u\n", + dhd->dhd_dpc_cnt, dhd->napi_sched_cnt, dhd->rxc_sched_cnt, + dhd->txc_sched_cnt); + +#ifdef DHD_LB_RXP + bcm_bprintf(strbuf, "\nnapi_percpu_run_cnt:\n"); + dhd_lb_stats_dump_cpu_array(strbuf, dhd->napi_percpu_run_cnt); + bcm_bprintf(strbuf, "\nNAPI Packets Received Histogram:\n"); + dhd_lb_stats_dump_histo(dhdp, strbuf, dhd->napi_rx_hist); +#endif /* DHD_LB_RXP */ + +#ifdef DHD_LB_RXC + bcm_bprintf(strbuf, "\nrxc_percpu_run_cnt:\n"); + dhd_lb_stats_dump_cpu_array(strbuf, dhd->rxc_percpu_run_cnt); + bcm_bprintf(strbuf, "\nRX Completions (Buffer Post) Histogram:\n"); + dhd_lb_stats_dump_histo(dhdp, strbuf, dhd->rxc_hist); +#endif /* DHD_LB_RXC */ + +#ifdef DHD_LB_TXC + bcm_bprintf(strbuf, "\ntxc_percpu_run_cnt:\n"); + dhd_lb_stats_dump_cpu_array(strbuf, dhd->txc_percpu_run_cnt); + bcm_bprintf(strbuf, "\nTX Completions (Buffer Free) Histogram:\n"); + dhd_lb_stats_dump_histo(dhdp, strbuf, dhd->txc_hist); +#endif /* DHD_LB_TXC */ + +#ifdef DHD_LB_TXP + bcm_bprintf(strbuf, "\ntxp_percpu_run_cnt:\n"); + dhd_lb_stats_dump_cpu_array(strbuf, dhd->txp_percpu_run_cnt); + + bcm_bprintf(strbuf, "\ntx_start_percpu_run_cnt:\n"); + dhd_lb_stats_dump_cpu_array(strbuf, dhd->tx_start_percpu_run_cnt); +#endif /* DHD_LB_TXP */ +} + +/* Given a number 'n' returns 'm' that is next larger power of 2 after n */ +static inline uint32 next_larger_power2(uint32 num) +{ + num--; + num |= (num >> 1); + num |= (num >> 2); + num |= (num >> 4); + num |= (num >> 8); + num |= (num >> 16); + + return (num + 1); +} + +static void dhd_lb_stats_update_histo(uint32 **bin, uint32 count, uint32 cpu) +{ + uint32 bin_power; + uint32 *p; + bin_power = next_larger_power2(count); + + switch (bin_power) { + case 1: p = bin[0] + cpu; break; + case 2: p = bin[1] + cpu; break; + case 4: p = bin[2] + cpu; break; + case 8: p = bin[3] + cpu; break; + case 16: p = bin[4] + cpu; break; + case 32: p = bin[5] + cpu; break; + case 64: p = bin[6] + cpu; break; + case 128: p = bin[7] + cpu; break; + default : p = bin[8] + cpu; break; + } + + *p = *p + 1; + return; +} + +extern void dhd_lb_stats_update_napi_histo(dhd_pub_t *dhdp, uint32 count) +{ + int cpu; + dhd_info_t *dhd = dhdp->info; + + cpu = get_cpu(); + put_cpu(); + dhd_lb_stats_update_histo(dhd->napi_rx_hist, count, cpu); + + return; +} + +extern void dhd_lb_stats_update_txc_histo(dhd_pub_t *dhdp, uint32 count) +{ + int cpu; + dhd_info_t *dhd = dhdp->info; + + cpu = get_cpu(); + put_cpu(); + dhd_lb_stats_update_histo(dhd->txc_hist, count, cpu); + + return; +} + +extern void dhd_lb_stats_update_rxc_histo(dhd_pub_t *dhdp, uint32 count) +{ + int cpu; + dhd_info_t *dhd = dhdp->info; + + cpu = get_cpu(); + put_cpu(); + dhd_lb_stats_update_histo(dhd->rxc_hist, count, cpu); + + return; +} + +extern void dhd_lb_stats_txc_percpu_cnt_incr(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd = dhdp->info; + DHD_LB_STATS_PERCPU_ARR_INCR(dhd->txc_percpu_run_cnt); +} + +extern void dhd_lb_stats_rxc_percpu_cnt_incr(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd = dhdp->info; + DHD_LB_STATS_PERCPU_ARR_INCR(dhd->rxc_percpu_run_cnt); +} +#endif /* DHD_LB_STATS */ + +#endif /* DHD_LB */ + +#ifdef USE_WFA_CERT_CONF +int g_frameburst = 1; +#endif /* USE_WFA_CERT_CONF */ + +static int dhd_get_pend_8021x_cnt(dhd_info_t *dhd); + +/* DHD Perimiter lock only used in router with bypass forwarding. */ +#define DHD_PERIM_RADIO_INIT() do { /* noop */ } while (0) +#define DHD_PERIM_LOCK_TRY(unit, flag) do { /* noop */ } while (0) +#define DHD_PERIM_UNLOCK_TRY(unit, flag) do { /* noop */ } while (0) + +#ifdef PCIE_FULL_DONGLE +#define DHD_IF_STA_LIST_LOCK_INIT(ifp) spin_lock_init(&(ifp)->sta_list_lock) +#define DHD_IF_STA_LIST_LOCK(ifp, flags) \ + spin_lock_irqsave(&(ifp)->sta_list_lock, (flags)) +#define DHD_IF_STA_LIST_UNLOCK(ifp, flags) \ + spin_unlock_irqrestore(&(ifp)->sta_list_lock, (flags)) + +#if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP) +static struct list_head * dhd_sta_list_snapshot(dhd_info_t *dhd, dhd_if_t *ifp, + struct list_head *snapshot_list); +static void dhd_sta_list_snapshot_free(dhd_info_t *dhd, struct list_head *snapshot_list); +#define DHD_IF_WMF_UCFORWARD_LOCK(dhd, ifp, slist) ({ dhd_sta_list_snapshot(dhd, ifp, slist); }) +#define DHD_IF_WMF_UCFORWARD_UNLOCK(dhd, slist) ({ dhd_sta_list_snapshot_free(dhd, slist); }) +#endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */ +#endif /* PCIE_FULL_DONGLE */ + +/* Control fw roaming */ +#ifdef BCMCCX +uint dhd_roam_disable = 0; +#else +uint dhd_roam_disable = 0; +#endif /* BCMCCX */ + +#ifdef BCMDBGFS +extern void dhd_dbgfs_init(dhd_pub_t *dhdp); +extern void dhd_dbgfs_remove(void); +#endif // endif + +static uint pcie_txs_metadata_enable = 0; /* Enable TX status metadta report */ +module_param(pcie_txs_metadata_enable, int, 0); + +/* Control radio state */ +uint dhd_radio_up = 1; + +/* Network inteface name */ +char iface_name[IFNAMSIZ] = {'\0'}; +module_param_string(iface_name, iface_name, IFNAMSIZ, 0); + +/* The following are specific to the SDIO dongle */ + +/* IOCTL response timeout */ +int dhd_ioctl_timeout_msec = IOCTL_RESP_TIMEOUT; + +/* DS Exit response timeout */ +int ds_exit_timeout_msec = DS_EXIT_TIMEOUT; + +/* Idle timeout for backplane clock */ +int dhd_idletime = DHD_IDLETIME_TICKS; +module_param(dhd_idletime, int, 0); + +/* Use polling */ +uint dhd_poll = FALSE; +module_param(dhd_poll, uint, 0); + +/* Use interrupts */ +uint dhd_intr = TRUE; +module_param(dhd_intr, uint, 0); + +/* SDIO Drive Strength (in milliamps) */ +uint dhd_sdiod_drive_strength = 6; +module_param(dhd_sdiod_drive_strength, uint, 0); + +#ifdef BCMSDIO +/* Tx/Rx bounds */ +extern uint dhd_txbound; +extern uint dhd_rxbound; +module_param(dhd_txbound, uint, 0); +module_param(dhd_rxbound, uint, 0); + +/* Deferred transmits */ +extern uint dhd_deferred_tx; +module_param(dhd_deferred_tx, uint, 0); + +#endif /* BCMSDIO */ + +#ifdef SDTEST +/* Echo packet generator (pkts/s) */ +uint dhd_pktgen = 0; +module_param(dhd_pktgen, uint, 0); + +/* Echo packet len (0 => sawtooth, max 2040) */ +uint dhd_pktgen_len = 0; +module_param(dhd_pktgen_len, uint, 0); +#endif /* SDTEST */ + +#if defined(BCMSUP_4WAY_HANDSHAKE) +/* Use in dongle supplicant for 4-way handshake */ +#if defined(WLFBT) || defined(WL_ENABLE_IDSUP) +/* Enable idsup by default (if supported in fw) */ +uint dhd_use_idsup = 1; +#else +uint dhd_use_idsup = 0; +#endif /* WLFBT || WL_ENABLE_IDSUP */ +module_param(dhd_use_idsup, uint, 0); +#endif /* BCMSUP_4WAY_HANDSHAKE */ + +#ifndef BCMDBUS +/* Allow delayed firmware download for debug purpose */ +int allow_delay_fwdl = FALSE; +module_param(allow_delay_fwdl, int, 0); +#endif /* !BCMDBUS */ + +#ifdef ECOUNTER_PERIODIC_DISABLE +uint enable_ecounter = FALSE; +#else +uint enable_ecounter = TRUE; +#endif // endif +module_param(enable_ecounter, uint, 0); + +extern char dhd_version[]; +extern char fw_version[]; +extern char clm_version[]; + +int dhd_net_bus_devreset(struct net_device *dev, uint8 flag); +static void dhd_net_if_lock_local(dhd_info_t *dhd); +static void dhd_net_if_unlock_local(dhd_info_t *dhd); +static void dhd_suspend_lock(dhd_pub_t *dhdp); +static void dhd_suspend_unlock(dhd_pub_t *dhdp); + +/* Monitor interface */ +int dhd_monitor_init(void *dhd_pub); +int dhd_monitor_uninit(void); + +#ifdef DHD_PM_CONTROL_FROM_FILE +bool g_pm_control; +void sec_control_pm(dhd_pub_t *dhd, uint *); +#endif /* DHD_PM_CONTROL_FROM_FILE */ + +#if defined(WL_WIRELESS_EXT) +struct iw_statistics *dhd_get_wireless_stats(struct net_device *dev); +#endif /* defined(WL_WIRELESS_EXT) */ + +#ifndef BCMDBUS +static void dhd_dpc(ulong data); +#endif /* !BCMDBUS */ +/* forward decl */ +extern int dhd_wait_pend8021x(struct net_device *dev); +void dhd_os_wd_timer_extend(void *bus, bool extend); + +#ifdef TOE +#ifndef BDC +#error TOE requires BDC +#endif /* !BDC */ +static int dhd_toe_get(dhd_info_t *dhd, int idx, uint32 *toe_ol); +static int dhd_toe_set(dhd_info_t *dhd, int idx, uint32 toe_ol); +#endif /* TOE */ + +static int dhd_wl_host_event(dhd_info_t *dhd, int ifidx, void *pktdata, uint16 pktlen, + wl_event_msg_t *event_ptr, void **data_ptr); + +#if defined(CONFIG_PM_SLEEP) +static int dhd_pm_callback(struct notifier_block *nfb, unsigned long action, void *ignored) +{ + int ret = NOTIFY_DONE; + bool suspend = FALSE; + +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif // endif + dhd_info_t *dhdinfo = (dhd_info_t*)container_of(nfb, struct dhd_info, pm_notifier); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif // endif + + BCM_REFERENCE(dhdinfo); + BCM_REFERENCE(suspend); + + switch (action) { + case PM_HIBERNATION_PREPARE: + case PM_SUSPEND_PREPARE: + suspend = TRUE; + break; + + case PM_POST_HIBERNATION: + case PM_POST_SUSPEND: + suspend = FALSE; + break; + } + +#if defined(SUPPORT_P2P_GO_PS) && defined(PROP_TXSTATUS) + if (suspend) { + DHD_OS_WAKE_LOCK_WAIVE(&dhdinfo->pub); + dhd_wlfc_suspend(&dhdinfo->pub); + DHD_OS_WAKE_LOCK_RESTORE(&dhdinfo->pub); + } else { + dhd_wlfc_resume(&dhdinfo->pub); + } +#endif /* defined(SUPPORT_P2P_GO_PS) && defined(PROP_TXSTATUS) */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \ + KERNEL_VERSION(2, 6, 39)) + dhd_mmc_suspend = suspend; + smp_mb(); +#endif // endif + + return ret; +} + +/* to make sure we won't register the same notifier twice, otherwise a loop is likely to be + * created in kernel notifier link list (with 'next' pointing to itself) + */ +static bool dhd_pm_notifier_registered = FALSE; + +extern int register_pm_notifier(struct notifier_block *nb); +extern int unregister_pm_notifier(struct notifier_block *nb); +#endif /* CONFIG_PM_SLEEP */ + +/* Request scheduling of the bus rx frame */ +static void dhd_sched_rxf(dhd_pub_t *dhdp, void *skb); +static void dhd_os_rxflock(dhd_pub_t *pub); +static void dhd_os_rxfunlock(dhd_pub_t *pub); + +#if defined(DHD_H2D_LOG_TIME_SYNC) +static void +dhd_deferred_work_rte_log_time_sync(void *handle, void *event_info, u8 event); +#endif /* DHD_H2D_LOG_TIME_SYNC */ + +/** priv_link is the link between netdev and the dhdif and dhd_info structs. */ +typedef struct dhd_dev_priv { + dhd_info_t * dhd; /* cached pointer to dhd_info in netdevice priv */ + dhd_if_t * ifp; /* cached pointer to dhd_if in netdevice priv */ + int ifidx; /* interface index */ + void * lkup; +} dhd_dev_priv_t; + +#define DHD_DEV_PRIV_SIZE (sizeof(dhd_dev_priv_t)) +#define DHD_DEV_PRIV(dev) ((dhd_dev_priv_t *)DEV_PRIV(dev)) +#define DHD_DEV_INFO(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->dhd) +#define DHD_DEV_IFP(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifp) +#define DHD_DEV_IFIDX(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifidx) +#define DHD_DEV_LKUP(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->lkup) + +#if defined(DHD_OF_SUPPORT) +extern int dhd_wlan_init(void); +#endif /* defined(DHD_OF_SUPPORT) */ +/** Clear the dhd net_device's private structure. */ +static inline void +dhd_dev_priv_clear(struct net_device * dev) +{ + dhd_dev_priv_t * dev_priv; + ASSERT(dev != (struct net_device *)NULL); + dev_priv = DHD_DEV_PRIV(dev); + dev_priv->dhd = (dhd_info_t *)NULL; + dev_priv->ifp = (dhd_if_t *)NULL; + dev_priv->ifidx = DHD_BAD_IF; + dev_priv->lkup = (void *)NULL; +} + +/** Setup the dhd net_device's private structure. */ +static inline void +dhd_dev_priv_save(struct net_device * dev, dhd_info_t * dhd, dhd_if_t * ifp, + int ifidx) +{ + dhd_dev_priv_t * dev_priv; + ASSERT(dev != (struct net_device *)NULL); + dev_priv = DHD_DEV_PRIV(dev); + dev_priv->dhd = dhd; + dev_priv->ifp = ifp; + dev_priv->ifidx = ifidx; +} + +/* Return interface pointer */ +static inline dhd_if_t *dhd_get_ifp(dhd_pub_t *dhdp, uint32 ifidx) +{ + ASSERT(ifidx < DHD_MAX_IFS); + + if (ifidx >= DHD_MAX_IFS) + return NULL; + + return dhdp->info->iflist[ifidx]; +} + +#ifdef PCIE_FULL_DONGLE + +/** Dummy objects are defined with state representing bad|down. + * Performance gains from reducing branch conditionals, instruction parallelism, + * dual issue, reducing load shadows, avail of larger pipelines. + * Use DHD_XXX_NULL instead of (dhd_xxx_t *)NULL, whenever an object pointer + * is accessed via the dhd_sta_t. + */ + +/* Dummy dhd_info object */ +dhd_info_t dhd_info_null = { + .pub = { + .info = &dhd_info_null, +#ifdef DHDTCPACK_SUPPRESS + .tcpack_sup_mode = TCPACK_SUP_REPLACE, +#endif /* DHDTCPACK_SUPPRESS */ + .up = FALSE, + .busstate = DHD_BUS_DOWN + } +}; +#define DHD_INFO_NULL (&dhd_info_null) +#define DHD_PUB_NULL (&dhd_info_null.pub) + +/* Dummy netdevice object */ +struct net_device dhd_net_dev_null = { + .reg_state = NETREG_UNREGISTERED +}; +#define DHD_NET_DEV_NULL (&dhd_net_dev_null) + +/* Dummy dhd_if object */ +dhd_if_t dhd_if_null = { +#ifdef WMF + .wmf = { .wmf_enable = TRUE }, +#endif // endif + .info = DHD_INFO_NULL, + .net = DHD_NET_DEV_NULL, + .idx = DHD_BAD_IF +}; +#define DHD_IF_NULL (&dhd_if_null) + +#define DHD_STA_NULL ((dhd_sta_t *)NULL) + +/** Interface STA list management. */ + +/** Alloc/Free a dhd_sta object from the dhd instances' sta_pool. */ +static void dhd_sta_free(dhd_pub_t *pub, dhd_sta_t *sta); +static dhd_sta_t * dhd_sta_alloc(dhd_pub_t * dhdp); + +/* Delete a dhd_sta or flush all dhd_sta in an interface's sta_list. */ +static void dhd_if_del_sta_list(dhd_if_t * ifp); +static void dhd_if_flush_sta(dhd_if_t * ifp); + +/* Construct/Destruct a sta pool. */ +static int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta); +static void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta); +/* Clear the pool of dhd_sta_t objects for built-in type driver */ +static void dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta); + +/** Reset a dhd_sta object and free into the dhd pool. */ +static void +dhd_sta_free(dhd_pub_t * dhdp, dhd_sta_t * sta) +{ + int prio; + + ASSERT((sta != DHD_STA_NULL) && (sta->idx != ID16_INVALID)); + + ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL)); + + /* + * Flush and free all packets in all flowring's queues belonging to sta. + * Packets in flow ring will be flushed later. + */ + for (prio = 0; prio < (int)NUMPRIO; prio++) { + uint16 flowid = sta->flowid[prio]; + + if (flowid != FLOWID_INVALID) { + unsigned long flags; + flow_ring_node_t * flow_ring_node; + +#ifdef DHDTCPACK_SUPPRESS + /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt, + * when there is a newly coming packet from network stack. + */ + dhd_tcpack_info_tbl_clean(dhdp); +#endif /* DHDTCPACK_SUPPRESS */ + + flow_ring_node = dhd_flow_ring_node(dhdp, flowid); + if (flow_ring_node) { + flow_queue_t *queue = &flow_ring_node->queue; + + DHD_FLOWRING_LOCK(flow_ring_node->lock, flags); + flow_ring_node->status = FLOW_RING_STATUS_STA_FREEING; + + if (!DHD_FLOW_QUEUE_EMPTY(queue)) { + void * pkt; + while ((pkt = dhd_flow_queue_dequeue(dhdp, queue)) != + NULL) { + PKTFREE(dhdp->osh, pkt, TRUE); + } + } + + DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); + ASSERT(DHD_FLOW_QUEUE_EMPTY(queue)); + } + } + + sta->flowid[prio] = FLOWID_INVALID; + } + + id16_map_free(dhdp->staid_allocator, sta->idx); + DHD_CUMM_CTR_INIT(&sta->cumm_ctr); + sta->ifp = DHD_IF_NULL; /* dummy dhd_if object */ + sta->ifidx = DHD_BAD_IF; + bzero(sta->ea.octet, ETHER_ADDR_LEN); + INIT_LIST_HEAD(&sta->list); + sta->idx = ID16_INVALID; /* implying free */ +} + +/** Allocate a dhd_sta object from the dhd pool. */ +static dhd_sta_t * +dhd_sta_alloc(dhd_pub_t * dhdp) +{ + uint16 idx; + dhd_sta_t * sta; + dhd_sta_pool_t * sta_pool; + + ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL)); + + idx = id16_map_alloc(dhdp->staid_allocator); + if (idx == ID16_INVALID) { + DHD_ERROR(("%s: cannot get free staid\n", __FUNCTION__)); + return DHD_STA_NULL; + } + + sta_pool = (dhd_sta_pool_t *)(dhdp->sta_pool); + sta = &sta_pool[idx]; + + ASSERT((sta->idx == ID16_INVALID) && + (sta->ifp == DHD_IF_NULL) && (sta->ifidx == DHD_BAD_IF)); + + DHD_CUMM_CTR_INIT(&sta->cumm_ctr); + + sta->idx = idx; /* implying allocated */ + + return sta; +} + +/** Delete all STAs in an interface's STA list. */ +static void +dhd_if_del_sta_list(dhd_if_t *ifp) +{ + dhd_sta_t *sta, *next; + unsigned long flags; + + DHD_IF_STA_LIST_LOCK(ifp, flags); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif // endif + list_for_each_entry_safe(sta, next, &ifp->sta_list, list) { + list_del(&sta->list); + dhd_sta_free(&ifp->info->pub, sta); + } +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif // endif + DHD_IF_STA_LIST_UNLOCK(ifp, flags); + + return; +} + +/** Router/GMAC3: Flush all station entries in the forwarder's WOFA database. */ +static void +dhd_if_flush_sta(dhd_if_t * ifp) +{ +} + +/** Construct a pool of dhd_sta_t objects to be used by interfaces. */ +static int +dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta) +{ + int idx, prio, sta_pool_memsz; + dhd_sta_t * sta; + dhd_sta_pool_t * sta_pool; + void * staid_allocator; + + ASSERT(dhdp != (dhd_pub_t *)NULL); + ASSERT((dhdp->staid_allocator == NULL) && (dhdp->sta_pool == NULL)); + + /* dhd_sta objects per radio are managed in a table. id#0 reserved. */ + staid_allocator = id16_map_init(dhdp->osh, max_sta, 1); + if (staid_allocator == NULL) { + DHD_ERROR(("%s: sta id allocator init failure\n", __FUNCTION__)); + return BCME_ERROR; + } + + /* Pre allocate a pool of dhd_sta objects (one extra). */ + sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t)); /* skip idx 0 */ + sta_pool = (dhd_sta_pool_t *)MALLOC(dhdp->osh, sta_pool_memsz); + if (sta_pool == NULL) { + DHD_ERROR(("%s: sta table alloc failure\n", __FUNCTION__)); + id16_map_fini(dhdp->osh, staid_allocator); + return BCME_ERROR; + } + + dhdp->sta_pool = sta_pool; + dhdp->staid_allocator = staid_allocator; + + /* Initialize all sta(s) for the pre-allocated free pool. */ + bzero((uchar *)sta_pool, sta_pool_memsz); + for (idx = max_sta; idx >= 1; idx--) { /* skip sta_pool[0] */ + sta = &sta_pool[idx]; + sta->idx = id16_map_alloc(staid_allocator); + ASSERT(sta->idx <= max_sta); + } + + /* Now place them into the pre-allocated free pool. */ + for (idx = 1; idx <= max_sta; idx++) { + sta = &sta_pool[idx]; + for (prio = 0; prio < (int)NUMPRIO; prio++) { + sta->flowid[prio] = FLOWID_INVALID; /* Flow rings do not exist */ + } + dhd_sta_free(dhdp, sta); + } + + return BCME_OK; +} + +/** Destruct the pool of dhd_sta_t objects. + * Caller must ensure that no STA objects are currently associated with an if. + */ +static void +dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta) +{ + dhd_sta_pool_t * sta_pool = (dhd_sta_pool_t *)dhdp->sta_pool; + + if (sta_pool) { + int idx; + int sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t)); + for (idx = 1; idx <= max_sta; idx++) { + ASSERT(sta_pool[idx].ifp == DHD_IF_NULL); + ASSERT(sta_pool[idx].idx == ID16_INVALID); + } + MFREE(dhdp->osh, dhdp->sta_pool, sta_pool_memsz); + dhdp->sta_pool = NULL; + } + + id16_map_fini(dhdp->osh, dhdp->staid_allocator); + dhdp->staid_allocator = NULL; +} + +/* Clear the pool of dhd_sta_t objects for built-in type driver */ +static void +dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta) +{ + int idx, prio, sta_pool_memsz; + dhd_sta_t * sta; + dhd_sta_pool_t * sta_pool; + void *staid_allocator; + + if (!dhdp) { + DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__)); + return; + } + + sta_pool = (dhd_sta_pool_t *)dhdp->sta_pool; + staid_allocator = dhdp->staid_allocator; + + if (!sta_pool) { + DHD_ERROR(("%s: sta_pool is NULL\n", __FUNCTION__)); + return; + } + + if (!staid_allocator) { + DHD_ERROR(("%s: staid_allocator is NULL\n", __FUNCTION__)); + return; + } + + /* clear free pool */ + sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t)); + bzero((uchar *)sta_pool, sta_pool_memsz); + + /* dhd_sta objects per radio are managed in a table. id#0 reserved. */ + id16_map_clear(staid_allocator, max_sta, 1); + + /* Initialize all sta(s) for the pre-allocated free pool. */ + for (idx = max_sta; idx >= 1; idx--) { /* skip sta_pool[0] */ + sta = &sta_pool[idx]; + sta->idx = id16_map_alloc(staid_allocator); + ASSERT(sta->idx <= max_sta); + } + /* Now place them into the pre-allocated free pool. */ + for (idx = 1; idx <= max_sta; idx++) { + sta = &sta_pool[idx]; + for (prio = 0; prio < (int)NUMPRIO; prio++) { + sta->flowid[prio] = FLOWID_INVALID; /* Flow rings do not exist */ + } + dhd_sta_free(dhdp, sta); + } +} + +/** Find STA with MAC address ea in an interface's STA list. */ +dhd_sta_t * +dhd_find_sta(void *pub, int ifidx, void *ea) +{ + dhd_sta_t *sta; + dhd_if_t *ifp; + unsigned long flags; + + ASSERT(ea != NULL); + ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx); + if (ifp == NULL) + return DHD_STA_NULL; + + DHD_IF_STA_LIST_LOCK(ifp, flags); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif // endif + list_for_each_entry(sta, &ifp->sta_list, list) { + if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) { + DHD_INFO(("%s: Found STA " MACDBG "\n", + __FUNCTION__, MAC2STRDBG((char *)ea))); + DHD_IF_STA_LIST_UNLOCK(ifp, flags); + return sta; + } + } +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif // endif + DHD_IF_STA_LIST_UNLOCK(ifp, flags); + + return DHD_STA_NULL; +} + +/** Add STA into the interface's STA list. */ +dhd_sta_t * +dhd_add_sta(void *pub, int ifidx, void *ea) +{ + dhd_sta_t *sta; + dhd_if_t *ifp; + unsigned long flags; + + ASSERT(ea != NULL); + ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx); + if (ifp == NULL) + return DHD_STA_NULL; + + if (!memcmp(ifp->net->dev_addr, ea, ETHER_ADDR_LEN)) { + DHD_ERROR(("%s: Serious FAILURE, receive own MAC %pM !!\n", __FUNCTION__, ea)); + return DHD_STA_NULL; + } + + sta = dhd_sta_alloc((dhd_pub_t *)pub); + if (sta == DHD_STA_NULL) { + DHD_ERROR(("%s: Alloc failed\n", __FUNCTION__)); + return DHD_STA_NULL; + } + + memcpy(sta->ea.octet, ea, ETHER_ADDR_LEN); + + /* link the sta and the dhd interface */ + sta->ifp = ifp; + sta->ifidx = ifidx; + INIT_LIST_HEAD(&sta->list); + + DHD_IF_STA_LIST_LOCK(ifp, flags); + + list_add_tail(&sta->list, &ifp->sta_list); + + DHD_ERROR(("%s: Adding STA " MACDBG "\n", + __FUNCTION__, MAC2STRDBG((char *)ea))); + + DHD_IF_STA_LIST_UNLOCK(ifp, flags); + + return sta; +} + +/** Delete all STAs from the interface's STA list. */ +void +dhd_del_all_sta(void *pub, int ifidx) +{ + dhd_sta_t *sta, *next; + dhd_if_t *ifp; + unsigned long flags; + + ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx); + if (ifp == NULL) + return; + + DHD_IF_STA_LIST_LOCK(ifp, flags); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif // endif + list_for_each_entry_safe(sta, next, &ifp->sta_list, list) { + + list_del(&sta->list); + dhd_sta_free(&ifp->info->pub, sta); +#ifdef DHD_L2_FILTER + if (ifp->parp_enable) { + /* clear Proxy ARP cache of specific Ethernet Address */ + bcm_l2_filter_arp_table_update(((dhd_pub_t*)pub)->osh, + ifp->phnd_arp_table, FALSE, + sta->ea.octet, FALSE, ((dhd_pub_t*)pub)->tickcnt); + } +#endif /* DHD_L2_FILTER */ + } +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif // endif + DHD_IF_STA_LIST_UNLOCK(ifp, flags); + + return; +} + +/** Delete STA from the interface's STA list. */ +void +dhd_del_sta(void *pub, int ifidx, void *ea) +{ + dhd_sta_t *sta, *next; + dhd_if_t *ifp; + unsigned long flags; + + ASSERT(ea != NULL); + ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx); + if (ifp == NULL) + return; + + DHD_IF_STA_LIST_LOCK(ifp, flags); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif // endif + list_for_each_entry_safe(sta, next, &ifp->sta_list, list) { + if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) { + DHD_ERROR(("%s: Deleting STA " MACDBG "\n", + __FUNCTION__, MAC2STRDBG(sta->ea.octet))); + list_del(&sta->list); + dhd_sta_free(&ifp->info->pub, sta); + } + } +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif // endif + DHD_IF_STA_LIST_UNLOCK(ifp, flags); +#ifdef DHD_L2_FILTER + if (ifp->parp_enable) { + /* clear Proxy ARP cache of specific Ethernet Address */ + bcm_l2_filter_arp_table_update(((dhd_pub_t*)pub)->osh, ifp->phnd_arp_table, FALSE, + ea, FALSE, ((dhd_pub_t*)pub)->tickcnt); + } +#endif /* DHD_L2_FILTER */ + return; +} + +/** Add STA if it doesn't exist. Not reentrant. */ +dhd_sta_t* +dhd_findadd_sta(void *pub, int ifidx, void *ea) +{ + dhd_sta_t *sta; + + sta = dhd_find_sta(pub, ifidx, ea); + + if (!sta) { + /* Add entry */ + sta = dhd_add_sta(pub, ifidx, ea); + } + + return sta; +} + +#if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP) +static struct list_head * +dhd_sta_list_snapshot(dhd_info_t *dhd, dhd_if_t *ifp, struct list_head *snapshot_list) +{ + unsigned long flags; + dhd_sta_t *sta, *snapshot; + + INIT_LIST_HEAD(snapshot_list); + + DHD_IF_STA_LIST_LOCK(ifp, flags); + + list_for_each_entry(sta, &ifp->sta_list, list) { + /* allocate one and add to snapshot */ + snapshot = (dhd_sta_t *)MALLOC(dhd->pub.osh, sizeof(dhd_sta_t)); + if (snapshot == NULL) { + DHD_ERROR(("%s: Cannot allocate memory\n", __FUNCTION__)); + continue; + } + + memcpy(snapshot->ea.octet, sta->ea.octet, ETHER_ADDR_LEN); + + INIT_LIST_HEAD(&snapshot->list); + list_add_tail(&snapshot->list, snapshot_list); + } + + DHD_IF_STA_LIST_UNLOCK(ifp, flags); + + return snapshot_list; +} + +static void +dhd_sta_list_snapshot_free(dhd_info_t *dhd, struct list_head *snapshot_list) +{ + dhd_sta_t *sta, *next; + + list_for_each_entry_safe(sta, next, snapshot_list, list) { + list_del(&sta->list); + MFREE(dhd->pub.osh, sta, sizeof(dhd_sta_t)); + } +} +#endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */ + +#else +static inline void dhd_if_flush_sta(dhd_if_t * ifp) { } +static inline void dhd_if_del_sta_list(dhd_if_t *ifp) {} +static inline int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta) { return BCME_OK; } +static inline void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta) {} +static inline void dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta) {} +dhd_sta_t *dhd_findadd_sta(void *pub, int ifidx, void *ea) { return NULL; } +dhd_sta_t *dhd_find_sta(void *pub, int ifidx, void *ea) { return NULL; } +void dhd_del_sta(void *pub, int ifidx, void *ea) {} +#endif /* PCIE_FULL_DONGLE */ + +#if defined(DHD_LB) + +#if defined(DHD_LB_TXC) || defined(DHD_LB_RXC) || defined(DHD_LB_TXP) || \ + defined(DHD_LB_RXP) +/** + * dhd_tasklet_schedule - Function that runs in IPI context of the destination + * CPU and schedules a tasklet. + * @tasklet: opaque pointer to the tasklet + */ +INLINE void +dhd_tasklet_schedule(void *tasklet) +{ + tasklet_schedule((struct tasklet_struct *)tasklet); +} +/** + * dhd_tasklet_schedule_on - Executes the passed takslet in a given CPU + * @tasklet: tasklet to be scheduled + * @on_cpu: cpu core id + * + * If the requested cpu is online, then an IPI is sent to this cpu via the + * smp_call_function_single with no wait and the tasklet_schedule function + * will be invoked to schedule the specified tasklet on the requested CPU. + */ +INLINE void +dhd_tasklet_schedule_on(struct tasklet_struct *tasklet, int on_cpu) +{ + const int wait = 0; + smp_call_function_single(on_cpu, + dhd_tasklet_schedule, (void *)tasklet, wait); +} + +/** + * dhd_work_schedule_on - Executes the passed work in a given CPU + * @work: work to be scheduled + * @on_cpu: cpu core id + * + * If the requested cpu is online, then an IPI is sent to this cpu via the + * schedule_work_on and the work function + * will be invoked to schedule the specified work on the requested CPU. + */ + +INLINE void +dhd_work_schedule_on(struct work_struct *work, int on_cpu) +{ + schedule_work_on(on_cpu, work); +} +#endif /* DHD_LB_TXC || DHD_LB_RXC || DHD_LB_TXP || DHD_LB_RXP */ + +#if defined(DHD_LB_TXC) +/** + * dhd_lb_tx_compl_dispatch - load balance by dispatching the tx_compl_tasklet + * on another cpu. The tx_compl_tasklet will take care of DMA unmapping and + * freeing the packets placed in the tx_compl workq + */ +void +dhd_lb_tx_compl_dispatch(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd = dhdp->info; + int curr_cpu, on_cpu; + + if (dhd->rx_napi_netdev == NULL) { + DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__)); + return; + } + + DHD_LB_STATS_INCR(dhd->txc_sched_cnt); + /* + * If the destination CPU is NOT online or is same as current CPU + * no need to schedule the work + */ + curr_cpu = get_cpu(); + put_cpu(); + + on_cpu = atomic_read(&dhd->tx_compl_cpu); + + if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu))) { + dhd_tasklet_schedule(&dhd->tx_compl_tasklet); + } else { + schedule_work(&dhd->tx_compl_dispatcher_work); + } +} + +static void dhd_tx_compl_dispatcher_fn(struct work_struct * work) +{ + struct dhd_info *dhd = + container_of(work, struct dhd_info, tx_compl_dispatcher_work); + int cpu; + + get_online_cpus(); + cpu = atomic_read(&dhd->tx_compl_cpu); + if (!cpu_online(cpu)) + dhd_tasklet_schedule(&dhd->tx_compl_tasklet); + else + dhd_tasklet_schedule_on(&dhd->tx_compl_tasklet, cpu); + put_online_cpus(); +} +#endif /* DHD_LB_TXC */ + +#if defined(DHD_LB_RXC) +/** + * dhd_lb_rx_compl_dispatch - load balance by dispatching the rx_compl_tasklet + * on another cpu. The rx_compl_tasklet will take care of reposting rx buffers + * in the H2D RxBuffer Post common ring, by using the recycled pktids that were + * placed in the rx_compl workq. + * + * @dhdp: pointer to dhd_pub object + */ +void +dhd_lb_rx_compl_dispatch(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd = dhdp->info; + int curr_cpu, on_cpu; + + if (dhd->rx_napi_netdev == NULL) { + DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__)); + return; + } + + DHD_LB_STATS_INCR(dhd->rxc_sched_cnt); + /* + * If the destination CPU is NOT online or is same as current CPU + * no need to schedule the work + */ + curr_cpu = get_cpu(); + put_cpu(); + on_cpu = atomic_read(&dhd->rx_compl_cpu); + + if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu))) { + dhd_tasklet_schedule(&dhd->rx_compl_tasklet); + } else { + schedule_work(&dhd->rx_compl_dispatcher_work); + } +} + +static void dhd_rx_compl_dispatcher_fn(struct work_struct * work) +{ + struct dhd_info *dhd = + container_of(work, struct dhd_info, rx_compl_dispatcher_work); + int cpu; + + get_online_cpus(); + cpu = atomic_read(&dhd->rx_compl_cpu); + if (!cpu_online(cpu)) + dhd_tasklet_schedule(&dhd->rx_compl_tasklet); + else { + dhd_tasklet_schedule_on(&dhd->rx_compl_tasklet, cpu); + } + put_online_cpus(); +} +#endif /* DHD_LB_RXC */ + +#if defined(DHD_LB_TXP) +static void dhd_tx_dispatcher_work(struct work_struct * work) +{ +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif // endif + struct dhd_info *dhd = + container_of(work, struct dhd_info, tx_dispatcher_work); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif // endif + dhd_tasklet_schedule(&dhd->tx_tasklet); +} + +static void dhd_tx_dispatcher_fn(dhd_pub_t *dhdp) +{ + int cpu; + int net_tx_cpu; + dhd_info_t *dhd = dhdp->info; + + preempt_disable(); + cpu = atomic_read(&dhd->tx_cpu); + net_tx_cpu = atomic_read(&dhd->net_tx_cpu); + + /* + * Now if the NET_TX has pushed the packet in the same + * CPU that is chosen for Tx processing, seperate it out + * i.e run the TX processing tasklet in compl_cpu + */ + if (net_tx_cpu == cpu) + cpu = atomic_read(&dhd->tx_compl_cpu); + + if (!cpu_online(cpu)) { + /* + * Ooohh... but the Chosen CPU is not online, + * Do the job in the current CPU itself. + */ + dhd_tasklet_schedule(&dhd->tx_tasklet); + } else { + /* + * Schedule tx_dispatcher_work to on the cpu which + * in turn will schedule tx_tasklet. + */ + dhd_work_schedule_on(&dhd->tx_dispatcher_work, cpu); + } + preempt_enable(); +} + +/** + * dhd_lb_tx_dispatch - load balance by dispatching the tx_tasklet + * on another cpu. The tx_tasklet will take care of actually putting + * the skbs into appropriate flow ring and ringing H2D interrupt + * + * @dhdp: pointer to dhd_pub object + */ +static void +dhd_lb_tx_dispatch(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd = dhdp->info; + int curr_cpu; + + curr_cpu = get_cpu(); + put_cpu(); + + /* Record the CPU in which the TX request from Network stack came */ + atomic_set(&dhd->net_tx_cpu, curr_cpu); + + /* Schedule the work to dispatch ... */ + dhd_tx_dispatcher_fn(dhdp); +} +#endif /* DHD_LB_TXP */ + +#if defined(DHD_LB_RXP) +/** + * dhd_napi_poll - Load balance napi poll function to process received + * packets and send up the network stack using netif_receive_skb() + * + * @napi: napi object in which context this poll function is invoked + * @budget: number of packets to be processed. + * + * Fetch the dhd_info given the rx_napi_struct. Move all packets from the + * rx_napi_queue into a local rx_process_queue (lock and queue move and unlock). + * Dequeue each packet from head of rx_process_queue, fetch the ifid from the + * packet tag and sendup. + */ +static int +dhd_napi_poll(struct napi_struct *napi, int budget) +{ + int ifid; + const int pkt_count = 1; + const int chan = 0; + struct sk_buff * skb; + unsigned long flags; + struct dhd_info *dhd; + int processed = 0; + struct sk_buff_head rx_process_queue; + +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif // endif + dhd = container_of(napi, struct dhd_info, rx_napi_struct); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif // endif + + DHD_INFO(("%s napi_queue<%d> budget<%d>\n", + __FUNCTION__, skb_queue_len(&dhd->rx_napi_queue), budget)); + __skb_queue_head_init(&rx_process_queue); + + /* extract the entire rx_napi_queue into local rx_process_queue */ + spin_lock_irqsave(&dhd->rx_napi_queue.lock, flags); + skb_queue_splice_tail_init(&dhd->rx_napi_queue, &rx_process_queue); + spin_unlock_irqrestore(&dhd->rx_napi_queue.lock, flags); + + while ((skb = __skb_dequeue(&rx_process_queue)) != NULL) { + OSL_PREFETCH(skb->data); + + ifid = DHD_PKTTAG_IFID((dhd_pkttag_fr_t *)PKTTAG(skb)); + + DHD_INFO(("%s dhd_rx_frame pkt<%p> ifid<%d>\n", + __FUNCTION__, skb, ifid)); + + dhd_rx_frame(&dhd->pub, ifid, skb, pkt_count, chan); + processed++; + } + + DHD_LB_STATS_UPDATE_NAPI_HISTO(&dhd->pub, processed); + + DHD_INFO(("%s processed %d\n", __FUNCTION__, processed)); + napi_complete(napi); + + return budget - 1; +} + +/** + * dhd_napi_schedule - Place the napi struct into the current cpus softnet napi + * poll list. This function may be invoked via the smp_call_function_single + * from a remote CPU. + * + * This function will essentially invoke __raise_softirq_irqoff(NET_RX_SOFTIRQ) + * after the napi_struct is added to the softnet data's poll_list + * + * @info: pointer to a dhd_info struct + */ +static void +dhd_napi_schedule(void *info) +{ + dhd_info_t *dhd = (dhd_info_t *)info; + + DHD_INFO(("%s rx_napi_struct<%p> on cpu<%d>\n", + __FUNCTION__, &dhd->rx_napi_struct, atomic_read(&dhd->rx_napi_cpu))); + + /* add napi_struct to softnet data poll list and raise NET_RX_SOFTIRQ */ + if (napi_schedule_prep(&dhd->rx_napi_struct)) { + __napi_schedule(&dhd->rx_napi_struct); + DHD_LB_STATS_PERCPU_ARR_INCR(dhd->napi_percpu_run_cnt); + } + + /* + * If the rx_napi_struct was already running, then we let it complete + * processing all its packets. The rx_napi_struct may only run on one + * core at a time, to avoid out-of-order handling. + */ +} + +/** + * dhd_napi_schedule_on - API to schedule on a desired CPU core a NET_RX_SOFTIRQ + * action after placing the dhd's rx_process napi object in the the remote CPU's + * softnet data's poll_list. + * + * @dhd: dhd_info which has the rx_process napi object + * @on_cpu: desired remote CPU id + */ +static INLINE int +dhd_napi_schedule_on(dhd_info_t *dhd, int on_cpu) +{ + int wait = 0; /* asynchronous IPI */ + DHD_INFO(("%s dhd<%p> napi<%p> on_cpu<%d>\n", + __FUNCTION__, dhd, &dhd->rx_napi_struct, on_cpu)); + + if (smp_call_function_single(on_cpu, dhd_napi_schedule, dhd, wait)) { + DHD_ERROR(("%s smp_call_function_single on_cpu<%d> failed\n", + __FUNCTION__, on_cpu)); + } + + DHD_LB_STATS_INCR(dhd->napi_sched_cnt); + + return 0; +} + +/* + * Call get_online_cpus/put_online_cpus around dhd_napi_schedule_on + * Why should we do this? + * The candidacy algorithm is run from the call back function + * registered to CPU hotplug notifier. This call back happens from Worker + * context. The dhd_napi_schedule_on is also from worker context. + * Note that both of this can run on two different CPUs at the same time. + * So we can possibly have a window where a given CPUn is being brought + * down from CPUm while we try to run a function on CPUn. + * To prevent this its better have the whole code to execute an SMP + * function under get_online_cpus. + * This function call ensures that hotplug mechanism does not kick-in + * until we are done dealing with online CPUs + * If the hotplug worker is already running, no worries because the + * candidacy algo would then reflect the same in dhd->rx_napi_cpu. + * + * The below mentioned code structure is proposed in + * https://www.kernel.org/doc/Documentation/cpu-hotplug.txt + * for the question + * Q: I need to ensure that a particular cpu is not removed when there is some + * work specific to this cpu is in progress + * + * According to the documentation calling get_online_cpus is NOT required, if + * we are running from tasklet context. Since dhd_rx_napi_dispatcher_fn can + * run from Work Queue context we have to call these functions + */ +static void dhd_rx_napi_dispatcher_fn(struct work_struct * work) +{ +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif // endif + struct dhd_info *dhd = + container_of(work, struct dhd_info, rx_napi_dispatcher_work); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif // endif + + dhd_napi_schedule(dhd); +} + +/** + * dhd_lb_rx_napi_dispatch - load balance by dispatching the rx_napi_struct + * to run on another CPU. The rx_napi_struct's poll function will retrieve all + * the packets enqueued into the rx_napi_queue and sendup. + * The producer's rx packet queue is appended to the rx_napi_queue before + * dispatching the rx_napi_struct. + */ +void +dhd_lb_rx_napi_dispatch(dhd_pub_t *dhdp) +{ + unsigned long flags; + dhd_info_t *dhd = dhdp->info; + int curr_cpu; + int on_cpu; +#ifdef DHD_LB_IRQSET + cpumask_t cpus; +#endif /* DHD_LB_IRQSET */ + + if (dhd->rx_napi_netdev == NULL) { + DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__)); + return; + } + + DHD_INFO(("%s append napi_queue<%d> pend_queue<%d>\n", __FUNCTION__, + skb_queue_len(&dhd->rx_napi_queue), skb_queue_len(&dhd->rx_pend_queue))); + + /* append the producer's queue of packets to the napi's rx process queue */ + spin_lock_irqsave(&dhd->rx_napi_queue.lock, flags); + skb_queue_splice_tail_init(&dhd->rx_pend_queue, &dhd->rx_napi_queue); + spin_unlock_irqrestore(&dhd->rx_napi_queue.lock, flags); + + /* + * If the destination CPU is NOT online or is same as current CPU + * no need to schedule the work + */ + curr_cpu = get_cpu(); + put_cpu(); + + preempt_disable(); + on_cpu = atomic_read(&dhd->rx_napi_cpu); +#ifdef DHD_LB_IRQSET + if (cpumask_and(&cpus, cpumask_of(curr_cpu), dhd->cpumask_primary) || + (!cpu_online(on_cpu))) +#else + if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu))) +#endif /* DHD_LB_IRQSET */ + { + DHD_INFO(("%s : curr_cpu : %d, cpumask : 0x%lx\n", __FUNCTION__, + curr_cpu, *cpumask_bits(dhd->cpumask_primary))); + dhd_napi_schedule(dhd); + } else { + DHD_INFO(("%s : schedule to curr_cpu : %d, cpumask : 0x%lx\n", + __FUNCTION__, curr_cpu, *cpumask_bits(dhd->cpumask_primary))); + dhd_work_schedule_on(&dhd->rx_napi_dispatcher_work, on_cpu); + } + preempt_enable(); +} + +/** + * dhd_lb_rx_pkt_enqueue - Enqueue the packet into the producer's queue + */ +void +dhd_lb_rx_pkt_enqueue(dhd_pub_t *dhdp, void *pkt, int ifidx) +{ + dhd_info_t *dhd = dhdp->info; + + DHD_INFO(("%s enqueue pkt<%p> ifidx<%d> pend_queue<%d>\n", __FUNCTION__, + pkt, ifidx, skb_queue_len(&dhd->rx_pend_queue))); + DHD_PKTTAG_SET_IFID((dhd_pkttag_fr_t *)PKTTAG(pkt), ifidx); + __skb_queue_tail(&dhd->rx_pend_queue, pkt); +} +#endif /* DHD_LB_RXP */ + +#ifdef DHD_LB_IRQSET +void +dhd_irq_set_affinity(dhd_pub_t *dhdp) +{ + unsigned int irq = (unsigned int)-1; + int err = BCME_OK; + + if (!dhdp) { + DHD_ERROR(("%s : dhdp is NULL\n", __FUNCTION__)); + return; + } + + if (!dhdp->bus) { + DHD_ERROR(("%s : bus is NULL\n", __FUNCTION__)); + return; + } + + dhdpcie_get_pcieirq(dhdp->bus, &irq); + err = irq_set_affinity(irq, dhdp->info->cpumask_primary); + if (err) + DHD_ERROR(("%s : irq set affinity is failed cpu:0x%lx\n", + __FUNCTION__, *cpumask_bits(dhdp->info->cpumask_primary))); +} +#endif /* DHD_LB_IRQSET */ +#endif /* DHD_LB */ + +/** Returns dhd iflist index corresponding the the bssidx provided by apps */ +int dhd_bssidx2idx(dhd_pub_t *dhdp, uint32 bssidx) +{ + dhd_if_t *ifp; + dhd_info_t *dhd = dhdp->info; + int i; + + ASSERT(bssidx < DHD_MAX_IFS); + ASSERT(dhdp); + + for (i = 0; i < DHD_MAX_IFS; i++) { + ifp = dhd->iflist[i]; + if (ifp && (ifp->bssidx == bssidx)) { + DHD_TRACE(("Index manipulated for %s from %d to %d\n", + ifp->name, bssidx, i)); + break; + } + } + return i; +} + +static inline int dhd_rxf_enqueue(dhd_pub_t *dhdp, void* skb) +{ + uint32 store_idx; + uint32 sent_idx; + + if (!skb) { + DHD_ERROR(("dhd_rxf_enqueue: NULL skb!!!\n")); + return BCME_ERROR; + } + + dhd_os_rxflock(dhdp); + store_idx = dhdp->store_idx; + sent_idx = dhdp->sent_idx; + if (dhdp->skbbuf[store_idx] != NULL) { + /* Make sure the previous packets are processed */ + dhd_os_rxfunlock(dhdp); + DHD_ERROR(("dhd_rxf_enqueue: pktbuf not consumed %p, store idx %d sent idx %d\n", + skb, store_idx, sent_idx)); + /* removed msleep here, should use wait_event_timeout if we + * want to give rx frame thread a chance to run + */ +#if defined(WAIT_DEQUEUE) + OSL_SLEEP(1); +#endif // endif + return BCME_ERROR; + } + DHD_TRACE(("dhd_rxf_enqueue: Store SKB %p. idx %d -> %d\n", + skb, store_idx, (store_idx + 1) & (MAXSKBPEND - 1))); + dhdp->skbbuf[store_idx] = skb; + dhdp->store_idx = (store_idx + 1) & (MAXSKBPEND - 1); + dhd_os_rxfunlock(dhdp); + + return BCME_OK; +} + +static inline void* dhd_rxf_dequeue(dhd_pub_t *dhdp) +{ + uint32 store_idx; + uint32 sent_idx; + void *skb; + + dhd_os_rxflock(dhdp); + + store_idx = dhdp->store_idx; + sent_idx = dhdp->sent_idx; + skb = dhdp->skbbuf[sent_idx]; + + if (skb == NULL) { + dhd_os_rxfunlock(dhdp); + DHD_ERROR(("dhd_rxf_dequeue: Dequeued packet is NULL, store idx %d sent idx %d\n", + store_idx, sent_idx)); + return NULL; + } + + dhdp->skbbuf[sent_idx] = NULL; + dhdp->sent_idx = (sent_idx + 1) & (MAXSKBPEND - 1); + + DHD_TRACE(("dhd_rxf_dequeue: netif_rx_ni(%p), sent idx %d\n", + skb, sent_idx)); + + dhd_os_rxfunlock(dhdp); + + return skb; +} + +int dhd_process_cid_mac(dhd_pub_t *dhdp, bool prepost) +{ + if (prepost) { /* pre process */ + dhd_read_cis(dhdp); + dhd_check_module_cid(dhdp); + dhd_check_module_mac(dhdp); + dhd_set_macaddr_from_file(dhdp); + } else { /* post process */ + dhd_write_macaddr(&dhdp->mac); + dhd_clear_cis(dhdp); + } + + return 0; +} + +// terence 20160615: fix building error if ARP_OFFLOAD_SUPPORT removed +#if defined(PKT_FILTER_SUPPORT) +#if defined(ARP_OFFLOAD_SUPPORT) && !defined(GAN_LITE_NAT_KEEPALIVE_FILTER) +static bool +_turn_on_arp_filter(dhd_pub_t *dhd, int op_mode_param) +{ + bool _apply = FALSE; + /* In case of IBSS mode, apply arp pkt filter */ + if (op_mode_param & DHD_FLAG_IBSS_MODE) { + _apply = TRUE; + goto exit; + } + /* In case of P2P GO or GC, apply pkt filter to pass arp pkt to host */ + if (op_mode_param & (DHD_FLAG_P2P_GC_MODE | DHD_FLAG_P2P_GO_MODE)) { + _apply = TRUE; + goto exit; + } + +exit: + return _apply; +} +#endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */ + +void +dhd_set_packet_filter(dhd_pub_t *dhd) +{ + int i; + + DHD_TRACE(("%s: enter\n", __FUNCTION__)); + if (dhd_pkt_filter_enable) { + for (i = 0; i < dhd->pktfilter_count; i++) { + dhd_pktfilter_offload_set(dhd, dhd->pktfilter[i]); + } + } +} + +void +dhd_enable_packet_filter(int value, dhd_pub_t *dhd) +{ + int i; + + DHD_ERROR(("%s: enter, value = %d\n", __FUNCTION__, value)); + if ((dhd->op_mode & DHD_FLAG_HOSTAP_MODE) && value) { + DHD_ERROR(("%s: DHD_FLAG_HOSTAP_MODE\n", __FUNCTION__)); + return; + } + /* 1 - Enable packet filter, only allow unicast packet to send up */ + /* 0 - Disable packet filter */ + if (dhd_pkt_filter_enable && (!value || + (dhd_support_sta_mode(dhd) && !dhd->dhcp_in_progress))) + { + for (i = 0; i < dhd->pktfilter_count; i++) { +// terence 20160615: fix building error if ARP_OFFLOAD_SUPPORT removed +#if defined(ARP_OFFLOAD_SUPPORT) && !defined(GAN_LITE_NAT_KEEPALIVE_FILTER) + if (value && (i == DHD_ARP_FILTER_NUM) && + !_turn_on_arp_filter(dhd, dhd->op_mode)) { + DHD_TRACE(("Do not turn on ARP white list pkt filter:" + "val %d, cnt %d, op_mode 0x%x\n", + value, i, dhd->op_mode)); + continue; + } +#endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */ + dhd_pktfilter_offload_enable(dhd, dhd->pktfilter[i], + value, dhd_master_mode); + } + } +} + +int +dhd_packet_filter_add_remove(dhd_pub_t *dhdp, int add_remove, int num) +{ + char *filterp = NULL; + int filter_id = 0; + + switch (num) { + case DHD_BROADCAST_FILTER_NUM: + filterp = "101 0 0 0 0xFFFFFFFFFFFF 0xFFFFFFFFFFFF"; + filter_id = 101; + break; + case DHD_MULTICAST4_FILTER_NUM: + filter_id = 102; + if (FW_SUPPORTED((dhdp), pf6)) { + if (dhdp->pktfilter[num] != NULL) { + dhd_pktfilter_offload_delete(dhdp, filter_id); + dhdp->pktfilter[num] = NULL; + } + if (!add_remove) { + filterp = DISCARD_IPV4_MCAST; + add_remove = 1; + break; + } + } + filterp = "102 0 0 0 0xFFFFFF 0x01005E"; + break; + case DHD_MULTICAST6_FILTER_NUM: + filter_id = 103; + if (FW_SUPPORTED((dhdp), pf6)) { + if (dhdp->pktfilter[num] != NULL) { + dhd_pktfilter_offload_delete(dhdp, filter_id); + dhdp->pktfilter[num] = NULL; + } + if (!add_remove) { + filterp = DISCARD_IPV6_MCAST; + add_remove = 1; + break; + } + } + filterp = "103 0 0 0 0xFFFF 0x3333"; + break; + case DHD_MDNS_FILTER_NUM: + filterp = "104 0 0 0 0xFFFFFFFFFFFF 0x01005E0000FB"; + filter_id = 104; + break; + case DHD_ARP_FILTER_NUM: + filterp = "105 0 0 12 0xFFFF 0x0806"; + filter_id = 105; + break; + case DHD_BROADCAST_ARP_FILTER_NUM: + filterp = "106 0 0 0 0xFFFFFFFFFFFF0000000000000806" + " 0xFFFFFFFFFFFF0000000000000806"; + filter_id = 106; + break; + default: + return -EINVAL; + } + + /* Add filter */ + if (add_remove) { + dhdp->pktfilter[num] = filterp; + dhd_pktfilter_offload_set(dhdp, dhdp->pktfilter[num]); + } else { /* Delete filter */ + if (dhdp->pktfilter[num]) { + dhd_pktfilter_offload_delete(dhdp, filter_id); + dhdp->pktfilter[num] = NULL; + } + } + + return 0; +} +#endif /* PKT_FILTER_SUPPORT */ + +static int dhd_set_suspend(int value, dhd_pub_t *dhd) +{ + int power_mode = PM_MAX; + /* wl_pkt_filter_enable_t enable_parm; */ + int bcn_li_dtim = 0; /* Default bcn_li_dtim in resume mode is 0 */ + int ret = 0; +#ifdef DHD_USE_EARLYSUSPEND +#ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND + int bcn_timeout = 0; +#endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */ +#ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND + int roam_time_thresh = 0; /* (ms) */ +#endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */ +#ifndef ENABLE_FW_ROAM_SUSPEND + uint roamvar = dhd->conf->roam_off_suspend; +#endif /* ENABLE_FW_ROAM_SUSPEND */ +#ifdef ENABLE_BCN_LI_BCN_WAKEUP + int bcn_li_bcn; +#endif /* ENABLE_BCN_LI_BCN_WAKEUP */ + uint nd_ra_filter = 0; +#ifdef ENABLE_IPMCAST_FILTER + int ipmcast_l2filter; +#endif /* ENABLE_IPMCAST_FILTER */ +#ifdef CUSTOM_EVENT_PM_WAKE + uint32 pm_awake_thresh = CUSTOM_EVENT_PM_WAKE; +#endif /* CUSTOM_EVENT_PM_WAKE */ +#endif /* DHD_USE_EARLYSUSPEND */ +#ifdef PASS_ALL_MCAST_PKTS + struct dhd_info *dhdinfo; + uint32 allmulti; + uint i; +#endif /* PASS_ALL_MCAST_PKTS */ +#ifdef DYNAMIC_SWOOB_DURATION +#ifndef CUSTOM_INTR_WIDTH +#define CUSTOM_INTR_WIDTH 100 + int intr_width = 0; +#endif /* CUSTOM_INTR_WIDTH */ +#endif /* DYNAMIC_SWOOB_DURATION */ + +#if defined(BCMPCIE) + int lpas = 0; + int dtim_period = 0; + int bcn_interval = 0; + int bcn_to_dly = 0; +#if defined(CUSTOM_BCN_TIMEOUT_IN_SUSPEND) && defined(DHD_USE_EARLYSUSPEND) + bcn_timeout = CUSTOM_BCN_TIMEOUT_SETTING; +#else + int bcn_timeout = CUSTOM_BCN_TIMEOUT_SETTING; +#endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND && DHD_USE_EARLYSUSPEND */ +#endif /* OEM_ANDROID && BCMPCIE */ + + if (!dhd) + return -ENODEV; + +#ifdef PASS_ALL_MCAST_PKTS + dhdinfo = dhd->info; +#endif /* PASS_ALL_MCAST_PKTS */ + + DHD_TRACE(("%s: enter, value = %d in_suspend=%d\n", + __FUNCTION__, value, dhd->in_suspend)); + + dhd_suspend_lock(dhd); + +#ifdef CUSTOM_SET_CPUCORE + DHD_TRACE(("%s set cpucore(suspend%d)\n", __FUNCTION__, value)); + /* set specific cpucore */ + dhd_set_cpucore(dhd, TRUE); +#endif /* CUSTOM_SET_CPUCORE */ + + if (dhd->conf->pm >= 0) + power_mode = dhd->conf->pm; + else + power_mode = PM_FAST; + + if (dhd->up) { + if (value && dhd->in_suspend) { +#ifdef PKT_FILTER_SUPPORT + dhd->early_suspended = 1; +#endif // endif + /* Kernel suspended */ + DHD_ERROR(("%s: force extra Suspend setting\n", __FUNCTION__)); + + if (dhd->conf->pm_in_suspend >= 0) + power_mode = dhd->conf->pm_in_suspend; + dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode, + sizeof(power_mode), TRUE, 0); + +#ifdef PKT_FILTER_SUPPORT + /* Enable packet filter, + * only allow unicast packet to send up + */ + dhd_enable_packet_filter(1, dhd); +#ifdef APF + dhd_dev_apf_enable_filter(dhd_linux_get_primary_netdev(dhd)); +#endif /* APF */ +#endif /* PKT_FILTER_SUPPORT */ + +#ifdef PASS_ALL_MCAST_PKTS + allmulti = 0; + for (i = 0; i < DHD_MAX_IFS; i++) { + if (dhdinfo->iflist[i] && dhdinfo->iflist[i]->net) + ret = dhd_iovar(dhd, i, "allmulti", (char *)&allmulti, + sizeof(allmulti), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s allmulti failed %d\n", __FUNCTION__, ret)); + } + } +#endif /* PASS_ALL_MCAST_PKTS */ + + /* If DTIM skip is set up as default, force it to wake + * each third DTIM for better power savings. Note that + * one side effect is a chance to miss BC/MC packet. + */ +#ifdef WLTDLS + /* Do not set bcn_li_ditm on WFD mode */ + if (dhd->tdls_mode) { + bcn_li_dtim = 0; + } else +#endif /* WLTDLS */ +#if defined(BCMPCIE) + bcn_li_dtim = dhd_get_suspend_bcn_li_dtim(dhd, &dtim_period, + &bcn_interval); + ret = dhd_iovar(dhd, 0, "bcn_li_dtim", (char *)&bcn_li_dtim, + sizeof(bcn_li_dtim), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s bcn_li_dtim failed %d\n", __FUNCTION__, ret)); + } + if ((bcn_li_dtim * dtim_period * bcn_interval) >= + MIN_DTIM_FOR_ROAM_THRES_EXTEND) { + /* + * Increase max roaming threshold from 2 secs to 8 secs + * the real roam threshold is MIN(max_roam_threshold, + * bcn_timeout/2) + */ + lpas = 1; + ret = dhd_iovar(dhd, 0, "lpas", (char *)&lpas, sizeof(lpas), + NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s lpas failed %d\n", __FUNCTION__, ret)); + } + bcn_to_dly = 1; + /* + * if bcn_to_dly is 1, the real roam threshold is + * MIN(max_roam_threshold, bcn_timeout -1); + * notify link down event after roaming procedure complete + * if we hit bcn_timeout while we are in roaming progress. + */ + ret = dhd_iovar(dhd, 0, "bcn_to_dly", (char *)&bcn_to_dly, + sizeof(bcn_to_dly), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s bcn_to_dly failed %d\n", __FUNCTION__, ret)); + } + /* Increase beacon timeout to 6 secs or use bigger one */ + bcn_timeout = max(bcn_timeout, BCN_TIMEOUT_IN_SUSPEND); + ret = dhd_iovar(dhd, 0, "bcn_timeout", (char *)&bcn_timeout, + sizeof(bcn_timeout), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s bcn_timeout failed %d\n", __FUNCTION__, ret)); + } + } +#else + bcn_li_dtim = dhd_get_suspend_bcn_li_dtim(dhd); + if (dhd_iovar(dhd, 0, "bcn_li_dtim", (char *)&bcn_li_dtim, + sizeof(bcn_li_dtim), NULL, 0, TRUE) < 0) + DHD_ERROR(("%s: set dtim failed\n", __FUNCTION__)); +#endif /* OEM_ANDROID && BCMPCIE */ + +#ifdef DHD_USE_EARLYSUSPEND +#ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND + bcn_timeout = CUSTOM_BCN_TIMEOUT_IN_SUSPEND; + ret = dhd_iovar(dhd, 0, "bcn_timeout", (char *)&bcn_timeout, + sizeof(bcn_timeout), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s bcn_timeout failed %d\n", __FUNCTION__, ret)); + } +#endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */ +#ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND + roam_time_thresh = CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND; + ret = dhd_iovar(dhd, 0, "roam_time_thresh", (char *)&roam_time_thresh, + sizeof(roam_time_thresh), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s roam_time_thresh failed %d\n", __FUNCTION__, ret)); + } +#endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */ +#ifndef ENABLE_FW_ROAM_SUSPEND + /* Disable firmware roaming during suspend */ + ret = dhd_iovar(dhd, 0, "roam_off", (char *)&roamvar, + sizeof(roamvar), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s roam_off failed %d\n", __FUNCTION__, ret)); + } +#endif /* ENABLE_FW_ROAM_SUSPEND */ +#ifdef ENABLE_BCN_LI_BCN_WAKEUP + bcn_li_bcn = 0; + ret = dhd_iovar(dhd, 0, "bcn_li_bcn", (char *)&bcn_li_bcn, + sizeof(bcn_li_bcn), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s bcn_li_bcn failed %d\n", __FUNCTION__, ret)); + } +#endif /* ENABLE_BCN_LI_BCN_WAKEUP */ +#if defined(WL_CFG80211) && defined(WL_BCNRECV) + ret = wl_android_bcnrecv_suspend(dhd_linux_get_primary_netdev(dhd)); + if (ret != BCME_OK) { + DHD_ERROR(("failed to stop beacon recv event on" + " suspend state (%d)\n", ret)); + } +#endif /* WL_CFG80211 && WL_BCNRECV */ +#ifdef NDO_CONFIG_SUPPORT + if (dhd->ndo_enable) { + if (!dhd->ndo_host_ip_overflow) { + /* enable ND offload on suspend */ + ret = dhd_ndo_enable(dhd, TRUE); + if (ret < 0) { + DHD_ERROR(("%s: failed to enable NDO\n", + __FUNCTION__)); + } + } else { + DHD_INFO(("%s: NDO disabled on suspend due to" + "HW capacity\n", __FUNCTION__)); + } + } +#endif /* NDO_CONFIG_SUPPORT */ +#ifndef APF + if (FW_SUPPORTED(dhd, ndoe)) +#else + if (FW_SUPPORTED(dhd, ndoe) && !FW_SUPPORTED(dhd, apf)) +#endif /* APF */ + { + /* enable IPv6 RA filter in firmware during suspend */ + nd_ra_filter = 1; + ret = dhd_iovar(dhd, 0, "nd_ra_filter_enable", + (char *)&nd_ra_filter, sizeof(nd_ra_filter), + NULL, 0, TRUE); + if (ret < 0) + DHD_ERROR(("failed to set nd_ra_filter (%d)\n", + ret)); + } + dhd_os_suppress_logging(dhd, TRUE); +#ifdef ENABLE_IPMCAST_FILTER + ipmcast_l2filter = 1; + ret = dhd_iovar(dhd, 0, "ipmcast_l2filter", + (char *)&ipmcast_l2filter, sizeof(ipmcast_l2filter), + NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("failed to set ipmcast_l2filter (%d)\n", ret)); + } +#endif /* ENABLE_IPMCAST_FILTER */ +#ifdef DYNAMIC_SWOOB_DURATION + intr_width = CUSTOM_INTR_WIDTH; + ret = dhd_iovar(dhd, 0, "bus:intr_width", (char *)&intr_width, + sizeof(intr_width), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("failed to set intr_width (%d)\n", ret)); + } +#endif /* DYNAMIC_SWOOB_DURATION */ +#ifdef CUSTOM_EVENT_PM_WAKE + pm_awake_thresh = CUSTOM_EVENT_PM_WAKE * 4; + ret = dhd_iovar(dhd, 0, "const_awake_thresh", + (char *)&pm_awake_thresh, + sizeof(pm_awake_thresh), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s set const_awake_thresh failed %d\n", + __FUNCTION__, ret)); + } +#endif /* CUSTOM_EVENT_PM_WAKE */ +#endif /* DHD_USE_EARLYSUSPEND */ + dhd_conf_set_ap_in_suspend(dhd, value); + } else { + dhd_conf_set_ap_in_suspend(dhd, value); +#ifdef PKT_FILTER_SUPPORT + dhd->early_suspended = 0; +#endif // endif + /* Kernel resumed */ + DHD_ERROR(("%s: Remove extra suspend setting \n", __FUNCTION__)); +#ifdef DYNAMIC_SWOOB_DURATION + intr_width = 0; + ret = dhd_iovar(dhd, 0, "bus:intr_width", (char *)&intr_width, + sizeof(intr_width), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("failed to set intr_width (%d)\n", ret)); + } +#endif /* DYNAMIC_SWOOB_DURATION */ +#ifndef SUPPORT_PM2_ONLY + power_mode = PM_FAST; + dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode, + sizeof(power_mode), TRUE, 0); +#endif /* SUPPORT_PM2_ONLY */ +#if defined(WL_CFG80211) && defined(WL_BCNRECV) + ret = wl_android_bcnrecv_resume(dhd_linux_get_primary_netdev(dhd)); + if (ret != BCME_OK) { + DHD_ERROR(("failed to resume beacon recv state (%d)\n", + ret)); + } +#endif /* WL_CF80211 && WL_BCNRECV */ +#ifdef PKT_FILTER_SUPPORT + /* disable pkt filter */ + dhd_enable_packet_filter(0, dhd); +#ifdef APF + dhd_dev_apf_disable_filter(dhd_linux_get_primary_netdev(dhd)); +#endif /* APF */ +#endif /* PKT_FILTER_SUPPORT */ +#ifdef PASS_ALL_MCAST_PKTS + allmulti = 1; + for (i = 0; i < DHD_MAX_IFS; i++) { + if (dhdinfo->iflist[i] && dhdinfo->iflist[i]->net) + ret = dhd_iovar(dhd, i, "allmulti", (char *)&allmulti, + sizeof(allmulti), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s: allmulti failed:%d\n", __FUNCTION__, ret)); + } + } +#endif /* PASS_ALL_MCAST_PKTS */ +#if defined(BCMPCIE) + /* restore pre-suspend setting */ + ret = dhd_iovar(dhd, 0, "bcn_li_dtim", (char *)&bcn_li_dtim, + sizeof(bcn_li_dtim), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s:bcn_li_ditm failed:%d\n", __FUNCTION__, ret)); + } + ret = dhd_iovar(dhd, 0, "lpas", (char *)&lpas, sizeof(lpas), NULL, + 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s:lpas failed:%d\n", __FUNCTION__, ret)); + } + ret = dhd_iovar(dhd, 0, "bcn_to_dly", (char *)&bcn_to_dly, + sizeof(bcn_to_dly), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s:bcn_to_dly failed:%d\n", __FUNCTION__, ret)); + } + ret = dhd_iovar(dhd, 0, "bcn_timeout", (char *)&bcn_timeout, + sizeof(bcn_timeout), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s:bcn_timeout failed:%d\n", __FUNCTION__, ret)); + } +#else + /* restore pre-suspend setting for dtim_skip */ + ret = dhd_iovar(dhd, 0, "bcn_li_dtim", (char *)&bcn_li_dtim, + sizeof(bcn_li_dtim), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s:bcn_li_ditm fail:%d\n", __FUNCTION__, ret)); + } +#endif /* OEM_ANDROID && BCMPCIE */ +#ifdef DHD_USE_EARLYSUSPEND +#ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND + bcn_timeout = CUSTOM_BCN_TIMEOUT; + ret = dhd_iovar(dhd, 0, "bcn_timeout", (char *)&bcn_timeout, + sizeof(bcn_timeout), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s:bcn_timeout failed:%d\n", __FUNCTION__, ret)); + } +#endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */ +#ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND + roam_time_thresh = 2000; + ret = dhd_iovar(dhd, 0, "roam_time_thresh", (char *)&roam_time_thresh, + sizeof(roam_time_thresh), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s:roam_time_thresh failed:%d\n", __FUNCTION__, ret)); + } + +#endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */ +#ifndef ENABLE_FW_ROAM_SUSPEND + roamvar = dhd_roam_disable; + ret = dhd_iovar(dhd, 0, "roam_off", (char *)&roamvar, + sizeof(roamvar), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s: roam_off fail:%d\n", __FUNCTION__, ret)); + } +#endif /* ENABLE_FW_ROAM_SUSPEND */ +#ifdef ENABLE_BCN_LI_BCN_WAKEUP + bcn_li_bcn = 1; + ret = dhd_iovar(dhd, 0, "bcn_li_bcn", (char *)&bcn_li_bcn, + sizeof(bcn_li_bcn), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s: bcn_li_bcn failed:%d\n", __FUNCTION__, ret)); + } +#endif /* ENABLE_BCN_LI_BCN_WAKEUP */ +#ifdef NDO_CONFIG_SUPPORT + if (dhd->ndo_enable) { + /* Disable ND offload on resume */ + ret = dhd_ndo_enable(dhd, FALSE); + if (ret < 0) { + DHD_ERROR(("%s: failed to disable NDO\n", + __FUNCTION__)); + } + } +#endif /* NDO_CONFIG_SUPPORT */ +#ifndef APF + if (FW_SUPPORTED(dhd, ndoe)) +#else + if (FW_SUPPORTED(dhd, ndoe) && !FW_SUPPORTED(dhd, apf)) +#endif /* APF */ + { + /* disable IPv6 RA filter in firmware during suspend */ + nd_ra_filter = 0; + ret = dhd_iovar(dhd, 0, "nd_ra_filter_enable", + (char *)&nd_ra_filter, sizeof(nd_ra_filter), + NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("failed to set nd_ra_filter (%d)\n", + ret)); + } + } + dhd_os_suppress_logging(dhd, FALSE); +#ifdef ENABLE_IPMCAST_FILTER + ipmcast_l2filter = 0; + ret = dhd_iovar(dhd, 0, "ipmcast_l2filter", + (char *)&ipmcast_l2filter, sizeof(ipmcast_l2filter), + NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("failed to clear ipmcast_l2filter ret:%d", ret)); + } +#endif /* ENABLE_IPMCAST_FILTER */ +#ifdef CUSTOM_EVENT_PM_WAKE + ret = dhd_iovar(dhd, 0, "const_awake_thresh", + (char *)&pm_awake_thresh, + sizeof(pm_awake_thresh), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s set const_awake_thresh failed %d\n", + __FUNCTION__, ret)); + } +#endif /* CUSTOM_EVENT_PM_WAKE */ +#endif /* DHD_USE_EARLYSUSPEND */ +#ifdef DHD_LB_IRQSET + dhd_irq_set_affinity(dhd); +#endif /* DHD_LB_IRQSET */ + + /* terence 2017029: Reject in early suspend */ + if (!dhd->conf->xmit_in_suspend) { + dhd_txflowcontrol(dhd, ALL_INTERFACES, OFF); + } + } + } + dhd_suspend_unlock(dhd); + + return 0; +} + +static int dhd_suspend_resume_helper(struct dhd_info *dhd, int val, int force) +{ + dhd_pub_t *dhdp = &dhd->pub; + int ret = 0; + + DHD_OS_WAKE_LOCK(dhdp); + DHD_PERIM_LOCK(dhdp); + + /* Set flag when early suspend was called */ + dhdp->in_suspend = val; + if ((force || !dhdp->suspend_disable_flag) && + (dhd_support_sta_mode(dhdp) || dhd_conf_get_ap_mode_in_suspend(dhdp))) + { + ret = dhd_set_suspend(val, dhdp); + } + + DHD_PERIM_UNLOCK(dhdp); + DHD_OS_WAKE_UNLOCK(dhdp); + return ret; +} + +#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) +static void dhd_early_suspend(struct early_suspend *h) +{ + struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend); + DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__)); + + if (dhd) + dhd_suspend_resume_helper(dhd, 1, 0); +} + +static void dhd_late_resume(struct early_suspend *h) +{ + struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend); + DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__)); + + if (dhd) + dhd_suspend_resume_helper(dhd, 0, 0); +} +#endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */ + +/* + * Generalized timeout mechanism. Uses spin sleep with exponential back-off until + * the sleep time reaches one jiffy, then switches over to task delay. Usage: + * + * dhd_timeout_start(&tmo, usec); + * while (!dhd_timeout_expired(&tmo)) + * if (poll_something()) + * break; + * if (dhd_timeout_expired(&tmo)) + * fatal(); + */ + +void +dhd_timeout_start(dhd_timeout_t *tmo, uint usec) +{ + tmo->limit = usec; + tmo->increment = 0; + tmo->elapsed = 0; + tmo->tick = jiffies_to_usecs(1); +} + +int +dhd_timeout_expired(dhd_timeout_t *tmo) +{ + /* Does nothing the first call */ + if (tmo->increment == 0) { + tmo->increment = 1; + return 0; + } + + if (tmo->elapsed >= tmo->limit) + return 1; + + /* Add the delay that's about to take place */ + tmo->elapsed += tmo->increment; + + if ((!CAN_SLEEP()) || tmo->increment < tmo->tick) { + OSL_DELAY(tmo->increment); + tmo->increment *= 2; + if (tmo->increment > tmo->tick) + tmo->increment = tmo->tick; + } else { + wait_queue_head_t delay_wait; + DECLARE_WAITQUEUE(wait, current); + init_waitqueue_head(&delay_wait); + add_wait_queue(&delay_wait, &wait); + set_current_state(TASK_INTERRUPTIBLE); + (void)schedule_timeout(1); + remove_wait_queue(&delay_wait, &wait); + set_current_state(TASK_RUNNING); + } + + return 0; +} + +int +dhd_net2idx(dhd_info_t *dhd, struct net_device *net) +{ + int i = 0; + + if (!dhd) { + DHD_ERROR(("%s : DHD_BAD_IF return\n", __FUNCTION__)); + return DHD_BAD_IF; + } + + while (i < DHD_MAX_IFS) { + if (dhd->iflist[i] && dhd->iflist[i]->net && (dhd->iflist[i]->net == net)) + return i; + i++; + } + + return DHD_BAD_IF; +} + +struct net_device * dhd_idx2net(void *pub, int ifidx) +{ + struct dhd_pub *dhd_pub = (struct dhd_pub *)pub; + struct dhd_info *dhd_info; + + if (!dhd_pub || ifidx < 0 || ifidx >= DHD_MAX_IFS) + return NULL; + dhd_info = dhd_pub->info; + if (dhd_info && dhd_info->iflist[ifidx]) + return dhd_info->iflist[ifidx]->net; + return NULL; +} + +int +dhd_ifname2idx(dhd_info_t *dhd, char *name) +{ + int i = DHD_MAX_IFS; + + ASSERT(dhd); + + if (name == NULL || *name == '\0') + return 0; + + while (--i > 0) + if (dhd->iflist[i] && !strncmp(dhd->iflist[i]->dngl_name, name, IFNAMSIZ)) + break; + + DHD_TRACE(("%s: return idx %d for \"%s\"\n", __FUNCTION__, i, name)); + + return i; /* default - the primary interface */ +} + +char * +dhd_ifname(dhd_pub_t *dhdp, int ifidx) +{ + dhd_info_t *dhd = (dhd_info_t *)dhdp->info; + + ASSERT(dhd); + + if (ifidx < 0 || ifidx >= DHD_MAX_IFS) { + DHD_ERROR(("%s: ifidx %d out of range\n", __FUNCTION__, ifidx)); + return ""; + } + + if (dhd->iflist[ifidx] == NULL) { + DHD_ERROR(("%s: null i/f %d\n", __FUNCTION__, ifidx)); + return ""; + } + + if (dhd->iflist[ifidx]->net) + return dhd->iflist[ifidx]->net->name; + + return ""; +} + +uint8 * +dhd_bssidx2bssid(dhd_pub_t *dhdp, int idx) +{ + int i; + dhd_info_t *dhd = (dhd_info_t *)dhdp; + + ASSERT(dhd); + for (i = 0; i < DHD_MAX_IFS; i++) + if (dhd->iflist[i] && dhd->iflist[i]->bssidx == idx) + return dhd->iflist[i]->mac_addr; + + return NULL; +} + +static void +_dhd_set_multicast_list(dhd_info_t *dhd, int ifidx) +{ + struct net_device *dev; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35) + struct netdev_hw_addr *ha; +#else + struct dev_mc_list *mclist; +#endif // endif + uint32 allmulti, cnt; + + wl_ioctl_t ioc; + char *buf, *bufp; + uint buflen; + int ret; + +#ifdef MCAST_LIST_ACCUMULATION + int i; + uint32 cnt_iface[DHD_MAX_IFS]; + cnt = 0; + allmulti = 0; + + for (i = 0; i < DHD_MAX_IFS; i++) { + if (dhd->iflist[i]) { + dev = dhd->iflist[i]->net; + if (!dev) + continue; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) + netif_addr_lock_bh(dev); +#endif /* LINUX >= 2.6.27 */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35) + cnt_iface[i] = netdev_mc_count(dev); + cnt += cnt_iface[i]; +#else + cnt += dev->mc_count; +#endif /* LINUX >= 2.6.35 */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) + netif_addr_unlock_bh(dev); +#endif /* LINUX >= 2.6.27 */ + + /* Determine initial value of allmulti flag */ + allmulti |= (dev->flags & IFF_ALLMULTI) ? TRUE : FALSE; + } + } +#else /* !MCAST_LIST_ACCUMULATION */ + if (!dhd->iflist[ifidx]) { + DHD_ERROR(("%s : dhd->iflist[%d] was NULL\n", __FUNCTION__, ifidx)); + return; + } + dev = dhd->iflist[ifidx]->net; + if (!dev) + return; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) + netif_addr_lock_bh(dev); +#endif /* LINUX >= 2.6.27 */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35) + cnt = netdev_mc_count(dev); +#else + cnt = dev->mc_count; +#endif /* LINUX >= 2.6.35 */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) + netif_addr_unlock_bh(dev); +#endif /* LINUX >= 2.6.27 */ + + /* Determine initial value of allmulti flag */ + allmulti = (dev->flags & IFF_ALLMULTI) ? TRUE : FALSE; +#endif /* MCAST_LIST_ACCUMULATION */ + +#ifdef PASS_ALL_MCAST_PKTS +#ifdef PKT_FILTER_SUPPORT + if (!dhd->pub.early_suspended) +#endif /* PKT_FILTER_SUPPORT */ + allmulti = TRUE; +#endif /* PASS_ALL_MCAST_PKTS */ + + /* Send down the multicast list first. */ + + buflen = sizeof("mcast_list") + sizeof(cnt) + (cnt * ETHER_ADDR_LEN); + if (!(bufp = buf = MALLOC(dhd->pub.osh, buflen))) { + DHD_ERROR(("%s: out of memory for mcast_list, cnt %d\n", + dhd_ifname(&dhd->pub, ifidx), cnt)); + return; + } + + strncpy(bufp, "mcast_list", buflen - 1); + bufp[buflen - 1] = '\0'; + bufp += strlen("mcast_list") + 1; + + cnt = htol32(cnt); + memcpy(bufp, &cnt, sizeof(cnt)); + bufp += sizeof(cnt); + +#ifdef MCAST_LIST_ACCUMULATION + for (i = 0; i < DHD_MAX_IFS; i++) { + if (dhd->iflist[i]) { + DHD_TRACE(("_dhd_set_multicast_list: ifidx %d\n", i)); + dev = dhd->iflist[i]->net; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) + netif_addr_lock_bh(dev); +#endif /* LINUX >= 2.6.27 */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35) +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif // endif + netdev_for_each_mc_addr(ha, dev) { +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif // endif + if (!cnt_iface[i]) + break; + memcpy(bufp, ha->addr, ETHER_ADDR_LEN); + bufp += ETHER_ADDR_LEN; + DHD_TRACE(("_dhd_set_multicast_list: cnt " + "%d " MACDBG "\n", + cnt_iface[i], MAC2STRDBG(ha->addr))); + cnt_iface[i]--; + } +#else /* LINUX < 2.6.35 */ + for (mclist = dev->mc_list; (mclist && (cnt_iface[i] > 0)); + cnt_iface[i]--, mclist = mclist->next) { + memcpy(bufp, (void *)mclist->dmi_addr, ETHER_ADDR_LEN); + bufp += ETHER_ADDR_LEN; + } +#endif /* LINUX >= 2.6.35 */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) + netif_addr_unlock_bh(dev); +#endif /* LINUX >= 2.6.27 */ + } + } +#else /* !MCAST_LIST_ACCUMULATION */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) + netif_addr_lock_bh(dev); +#endif /* LINUX >= 2.6.27 */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35) +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif // endif + netdev_for_each_mc_addr(ha, dev) { + if (!cnt) + break; + memcpy(bufp, ha->addr, ETHER_ADDR_LEN); + bufp += ETHER_ADDR_LEN; + cnt--; + } +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif // endif +#else /* LINUX < 2.6.35 */ + for (mclist = dev->mc_list; (mclist && (cnt > 0)); + cnt--, mclist = mclist->next) { + memcpy(bufp, (void *)mclist->dmi_addr, ETHER_ADDR_LEN); + bufp += ETHER_ADDR_LEN; + } +#endif /* LINUX >= 2.6.35 */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) + netif_addr_unlock_bh(dev); +#endif /* LINUX >= 2.6.27 */ +#endif /* MCAST_LIST_ACCUMULATION */ + + memset(&ioc, 0, sizeof(ioc)); + ioc.cmd = WLC_SET_VAR; + ioc.buf = buf; + ioc.len = buflen; + ioc.set = TRUE; + + ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len); + if (ret < 0) { + DHD_ERROR(("%s: set mcast_list failed, cnt %d\n", + dhd_ifname(&dhd->pub, ifidx), cnt)); + allmulti = cnt ? TRUE : allmulti; + } + + MFREE(dhd->pub.osh, buf, buflen); + + /* Now send the allmulti setting. This is based on the setting in the + * net_device flags, but might be modified above to be turned on if we + * were trying to set some addresses and dongle rejected it... + */ + + allmulti = htol32(allmulti); + ret = dhd_iovar(&dhd->pub, ifidx, "allmulti", (char *)&allmulti, + sizeof(allmulti), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s: set allmulti %d failed\n", + dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti))); + } + + /* Finally, pick up the PROMISC flag as well, like the NIC driver does */ + +#ifdef MCAST_LIST_ACCUMULATION + allmulti = 0; + for (i = 0; i < DHD_MAX_IFS; i++) { + if (dhd->iflist[i]) { + dev = dhd->iflist[i]->net; + allmulti |= (dev->flags & IFF_PROMISC) ? TRUE : FALSE; + } + } +#else + allmulti = (dev->flags & IFF_PROMISC) ? TRUE : FALSE; +#endif /* MCAST_LIST_ACCUMULATION */ + + allmulti = htol32(allmulti); + + memset(&ioc, 0, sizeof(ioc)); + ioc.cmd = WLC_SET_PROMISC; + ioc.buf = &allmulti; + ioc.len = sizeof(allmulti); + ioc.set = TRUE; + + ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len); + if (ret < 0) { + DHD_ERROR(("%s: set promisc %d failed\n", + dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti))); + } +} + +int +_dhd_set_mac_address(dhd_info_t *dhd, int ifidx, uint8 *addr) +{ + int ret; + + ret = dhd_iovar(&dhd->pub, ifidx, "cur_etheraddr", (char *)addr, + ETHER_ADDR_LEN, NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s: set cur_etheraddr failed\n", dhd_ifname(&dhd->pub, ifidx))); + } else { + memcpy(dhd->iflist[ifidx]->net->dev_addr, addr, ETHER_ADDR_LEN); + if (ifidx == 0) + memcpy(dhd->pub.mac.octet, addr, ETHER_ADDR_LEN); + } + + return ret; +} + +#ifdef DHD_PSTA +/* Get psta/psr configuration configuration */ +int dhd_get_psta_mode(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd = dhdp->info; + return (int)dhd->psta_mode; +} +/* Set psta/psr configuration configuration */ +int dhd_set_psta_mode(dhd_pub_t *dhdp, uint32 val) +{ + dhd_info_t *dhd = dhdp->info; + dhd->psta_mode = val; + return 0; +} +#endif /* DHD_PSTA */ + +#if (defined(DHD_WET) || defined(DHD_MCAST_REGEN) || defined(DHD_L2_FILTER)) +static void +dhd_update_rx_pkt_chainable_state(dhd_pub_t* dhdp, uint32 idx) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + + ASSERT(idx < DHD_MAX_IFS); + + ifp = dhd->iflist[idx]; + + if ( +#ifdef DHD_L2_FILTER + (ifp->block_ping) || +#endif // endif +#ifdef DHD_WET + (dhd->wet_mode) || +#endif // endif +#ifdef DHD_MCAST_REGEN + (ifp->mcast_regen_bss_enable) || +#endif // endif + FALSE) { + ifp->rx_pkt_chainable = FALSE; + } +} +#endif /* DHD_WET || DHD_MCAST_REGEN || DHD_L2_FILTER */ + +#ifdef DHD_WET +/* Get wet configuration configuration */ +int dhd_get_wet_mode(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd = dhdp->info; + return (int)dhd->wet_mode; +} + +/* Set wet configuration configuration */ +int dhd_set_wet_mode(dhd_pub_t *dhdp, uint32 val) +{ + dhd_info_t *dhd = dhdp->info; + dhd->wet_mode = val; + dhd_update_rx_pkt_chainable_state(dhdp, 0); + return 0; +} +#endif /* DHD_WET */ + +#if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) +int32 dhd_role_to_nl80211_iftype(int32 role) +{ + switch (role) { + case WLC_E_IF_ROLE_STA: + return NL80211_IFTYPE_STATION; + case WLC_E_IF_ROLE_AP: + return NL80211_IFTYPE_AP; + case WLC_E_IF_ROLE_WDS: + return NL80211_IFTYPE_WDS; + case WLC_E_IF_ROLE_P2P_GO: + return NL80211_IFTYPE_P2P_GO; + case WLC_E_IF_ROLE_P2P_CLIENT: + return NL80211_IFTYPE_P2P_CLIENT; + case WLC_E_IF_ROLE_IBSS: + case WLC_E_IF_ROLE_NAN: + return NL80211_IFTYPE_ADHOC; + default: + return NL80211_IFTYPE_UNSPECIFIED; + } +} +#endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ + +static void +dhd_ifadd_event_handler(void *handle, void *event_info, u8 event) +{ + dhd_info_t *dhd = handle; + dhd_if_event_t *if_event = event_info; + int ifidx, bssidx; + int ret; +#if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) + struct wl_if_event_info info; +#else + struct net_device *ndev; +#endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ + + BCM_REFERENCE(ret); + if (event != DHD_WQ_WORK_IF_ADD) { + DHD_ERROR(("%s: unexpected event \n", __FUNCTION__)); + return; + } + + if (!dhd) { + DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__)); + return; + } + + if (!if_event) { + DHD_ERROR(("%s: event data is null \n", __FUNCTION__)); + return; + } + + dhd_net_if_lock_local(dhd); + DHD_OS_WAKE_LOCK(&dhd->pub); + DHD_PERIM_LOCK(&dhd->pub); + + ifidx = if_event->event.ifidx; + bssidx = if_event->event.bssidx; + DHD_TRACE(("%s: registering if with ifidx %d\n", __FUNCTION__, ifidx)); + +#if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) + if (if_event->event.ifidx > 0) { + u8 *mac_addr; + bzero(&info, sizeof(info)); + info.ifidx = ifidx; + info.bssidx = bssidx; + info.role = if_event->event.role; + strncpy(info.name, if_event->name, IFNAMSIZ); + if (is_valid_ether_addr(if_event->mac)) { + mac_addr = if_event->mac; + } else { + mac_addr = NULL; + } + + if (wl_cfg80211_post_ifcreate(dhd->pub.info->iflist[0]->net, + &info, mac_addr, NULL, true) == NULL) { + /* Do the post interface create ops */ + DHD_ERROR(("Post ifcreate ops failed. Returning \n")); + goto done; + } + } +#else + /* This path is for non-android case */ + /* The interface name in host and in event msg are same */ + /* if name in event msg is used to create dongle if list on host */ + ndev = dhd_allocate_if(&dhd->pub, ifidx, if_event->name, + if_event->mac, bssidx, TRUE, if_event->name); + if (!ndev) { + DHD_ERROR(("%s: net device alloc failed \n", __FUNCTION__)); + goto done; + } + + DHD_PERIM_UNLOCK(&dhd->pub); + ret = dhd_register_if(&dhd->pub, ifidx, TRUE); + DHD_PERIM_LOCK(&dhd->pub); + if (ret != BCME_OK) { + DHD_ERROR(("%s: dhd_register_if failed\n", __FUNCTION__)); + dhd_remove_if(&dhd->pub, ifidx, TRUE); + goto done; + } +#endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ + +#ifndef PCIE_FULL_DONGLE + /* Turn on AP isolation in the firmware for interfaces operating in AP mode */ + if (FW_SUPPORTED((&dhd->pub), ap) && (if_event->event.role != WLC_E_IF_ROLE_STA)) { + uint32 var_int = 1; + ret = dhd_iovar(&dhd->pub, ifidx, "ap_isolate", (char *)&var_int, sizeof(var_int), + NULL, 0, TRUE); + if (ret != BCME_OK) { + DHD_ERROR(("%s: Failed to set ap_isolate to dongle\n", __FUNCTION__)); + dhd_remove_if(&dhd->pub, ifidx, TRUE); + } + } +#endif /* PCIE_FULL_DONGLE */ + +done: + MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t)); + + DHD_PERIM_UNLOCK(&dhd->pub); + DHD_OS_WAKE_UNLOCK(&dhd->pub); + dhd_net_if_unlock_local(dhd); +} + +static void +dhd_ifdel_event_handler(void *handle, void *event_info, u8 event) +{ + dhd_info_t *dhd = handle; + int ifidx; + dhd_if_event_t *if_event = event_info; + + if (event != DHD_WQ_WORK_IF_DEL) { + DHD_ERROR(("%s: unexpected event \n", __FUNCTION__)); + return; + } + + if (!dhd) { + DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__)); + return; + } + + if (!if_event) { + DHD_ERROR(("%s: event data is null \n", __FUNCTION__)); + return; + } + + dhd_net_if_lock_local(dhd); + DHD_OS_WAKE_LOCK(&dhd->pub); + DHD_PERIM_LOCK(&dhd->pub); + + ifidx = if_event->event.ifidx; + DHD_TRACE(("Removing interface with idx %d\n", ifidx)); + + DHD_PERIM_UNLOCK(&dhd->pub); + if (!dhd->pub.info->iflist[ifidx]) { + /* No matching netdev found */ + DHD_ERROR(("Netdev not found! Do nothing.\n")); + goto done; + } +#if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) + if (if_event->event.ifidx > 0) { + /* Do the post interface del ops */ + if (wl_cfg80211_post_ifdel(dhd->pub.info->iflist[ifidx]->net, + true, if_event->event.ifidx) != 0) { + DHD_TRACE(("Post ifdel ops failed. Returning \n")); + goto done; + } + } +#else + /* For non-cfg80211 drivers */ + dhd_remove_if(&dhd->pub, ifidx, TRUE); +#endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ + +done: + DHD_PERIM_LOCK(&dhd->pub); + MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t)); + DHD_PERIM_UNLOCK(&dhd->pub); + DHD_OS_WAKE_UNLOCK(&dhd->pub); + dhd_net_if_unlock_local(dhd); +} + +#ifdef DHD_UPDATE_INTF_MAC +static void +dhd_ifupdate_event_handler(void *handle, void *event_info, u8 event) +{ + dhd_info_t *dhd = handle; + int ifidx; + dhd_if_event_t *if_event = event_info; + + if (event != DHD_WQ_WORK_IF_UPDATE) { + DHD_ERROR(("%s: unexpected event \n", __FUNCTION__)); + return; + } + + if (!dhd) { + DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__)); + return; + } + + if (!if_event) { + DHD_ERROR(("%s: event data is null \n", __FUNCTION__)); + return; + } + + dhd_net_if_lock_local(dhd); + DHD_OS_WAKE_LOCK(&dhd->pub); + + ifidx = if_event->event.ifidx; + DHD_TRACE(("%s: Update interface with idx %d\n", __FUNCTION__, ifidx)); + + dhd_op_if_update(&dhd->pub, ifidx); + + MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t)); + + DHD_OS_WAKE_UNLOCK(&dhd->pub); + dhd_net_if_unlock_local(dhd); +} + +int dhd_op_if_update(dhd_pub_t *dhdpub, int ifidx) +{ + dhd_info_t * dhdinfo = NULL; + dhd_if_t * ifp = NULL; + int ret = 0; + char buf[128]; + + if ((NULL==dhdpub)||(NULL==dhdpub->info)) { + DHD_ERROR(("%s: *** DHD handler is NULL!\n", __FUNCTION__)); + return -1; + } else { + dhdinfo = (dhd_info_t *)dhdpub->info; + ifp = dhdinfo->iflist[ifidx]; + if (NULL==ifp) { + DHD_ERROR(("%s: *** ifp handler is NULL!\n", __FUNCTION__)); + return -2; + } + } + + DHD_TRACE(("%s: idx %d\n", __FUNCTION__, ifidx)); + // Get MAC address + strcpy(buf, "cur_etheraddr"); + ret = dhd_wl_ioctl_cmd(&dhdinfo->pub, WLC_GET_VAR, buf, sizeof(buf), FALSE, ifp->idx); + if (0>ret) { + DHD_ERROR(("Failed to upudate the MAC address for itf=%s, ret=%d\n", ifp->name, ret)); + // avoid collision + dhdinfo->iflist[ifp->idx]->mac_addr[5] += 1; + // force locally administrate address + ETHER_SET_LOCALADDR(&dhdinfo->iflist[ifp->idx]->mac_addr); + } else { + DHD_EVENT(("Got mac for itf %s, idx %d, MAC=%02X:%02X:%02X:%02X:%02X:%02X\n", + ifp->name, ifp->idx, + (unsigned char)buf[0], (unsigned char)buf[1], (unsigned char)buf[2], + (unsigned char)buf[3], (unsigned char)buf[4], (unsigned char)buf[5])); + memcpy(dhdinfo->iflist[ifp->idx]->mac_addr, buf, ETHER_ADDR_LEN); + if (dhdinfo->iflist[ifp->idx]->net) { + memcpy(dhdinfo->iflist[ifp->idx]->net->dev_addr, buf, ETHER_ADDR_LEN); + } + } + + return ret; +} +#endif /* DHD_UPDATE_INTF_MAC */ + +static void +dhd_set_mac_addr_handler(void *handle, void *event_info, u8 event) +{ + dhd_info_t *dhd = handle; + dhd_if_t *ifp = event_info; + + if (event != DHD_WQ_WORK_SET_MAC) { + DHD_ERROR(("%s: unexpected event \n", __FUNCTION__)); + } + + if (!dhd) { + DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__)); + return; + } + + dhd_net_if_lock_local(dhd); + DHD_OS_WAKE_LOCK(&dhd->pub); + DHD_PERIM_LOCK(&dhd->pub); + + // terence 20160907: fix for not able to set mac when wlan0 is down + if (ifp == NULL || !ifp->set_macaddress) { + goto done; + } + if (ifp == NULL || !dhd->pub.up) { + DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__)); + goto done; + } + + DHD_ERROR(("%s: MACID is overwritten\n", __FUNCTION__)); + ifp->set_macaddress = FALSE; + if (_dhd_set_mac_address(dhd, ifp->idx, ifp->mac_addr) == 0) + DHD_INFO(("%s: MACID is overwritten\n", __FUNCTION__)); + else + DHD_ERROR(("%s: _dhd_set_mac_address() failed\n", __FUNCTION__)); + +done: + DHD_PERIM_UNLOCK(&dhd->pub); + DHD_OS_WAKE_UNLOCK(&dhd->pub); + dhd_net_if_unlock_local(dhd); +} + +static void +dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event) +{ + dhd_info_t *dhd = handle; + int ifidx = (int)((long int)event_info); + dhd_if_t *ifp = NULL; + + if (event != DHD_WQ_WORK_SET_MCAST_LIST) { + DHD_ERROR(("%s: unexpected event \n", __FUNCTION__)); + return; + } + + if (!dhd) { + DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__)); + return; + } + + dhd_net_if_lock_local(dhd); + DHD_OS_WAKE_LOCK(&dhd->pub); + DHD_PERIM_LOCK(&dhd->pub); + + ifp = dhd->iflist[ifidx]; + + if (ifp == NULL || !dhd->pub.up) { + DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__)); + goto done; + } + + if (ifp == NULL || !dhd->pub.up) { + DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__)); + goto done; + } + + ifidx = ifp->idx; + +#ifdef MCAST_LIST_ACCUMULATION + ifidx = 0; +#endif /* MCAST_LIST_ACCUMULATION */ + + _dhd_set_multicast_list(dhd, ifidx); + DHD_INFO(("%s: set multicast list for if %d\n", __FUNCTION__, ifidx)); + +done: + DHD_PERIM_UNLOCK(&dhd->pub); + DHD_OS_WAKE_UNLOCK(&dhd->pub); + dhd_net_if_unlock_local(dhd); +} + +static int +dhd_set_mac_address(struct net_device *dev, void *addr) +{ + int ret = 0; + + dhd_info_t *dhd = DHD_DEV_INFO(dev); + struct sockaddr *sa = (struct sockaddr *)addr; + int ifidx; + dhd_if_t *dhdif; + + ifidx = dhd_net2idx(dhd, dev); + if (ifidx == DHD_BAD_IF) + return -1; + + dhdif = dhd->iflist[ifidx]; + + dhd_net_if_lock_local(dhd); + memcpy(dhdif->mac_addr, sa->sa_data, ETHER_ADDR_LEN); + dhdif->set_macaddress = TRUE; + dhd_net_if_unlock_local(dhd); + dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)dhdif, DHD_WQ_WORK_SET_MAC, + dhd_set_mac_addr_handler, DHD_WQ_WORK_PRIORITY_LOW); + return ret; +} + +static void +dhd_set_multicast_list(struct net_device *dev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + int ifidx; + + ifidx = dhd_net2idx(dhd, dev); + if (ifidx == DHD_BAD_IF) + return; + + dhd->iflist[ifidx]->set_multicast = TRUE; + dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)((long int)ifidx), + DHD_WQ_WORK_SET_MCAST_LIST, dhd_set_mcast_list_handler, DHD_WQ_WORK_PRIORITY_LOW); + + // terence 20160907: fix for not able to set mac when wlan0 is down + dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)dhd->iflist[ifidx], + DHD_WQ_WORK_SET_MAC, dhd_set_mac_addr_handler, DHD_WQ_WORK_PRIORITY_LOW); +} + +#ifdef DHD_UCODE_DOWNLOAD +/* Get ucode path */ +char * +dhd_get_ucode_path(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd = dhdp->info; + return dhd->uc_path; +} +#endif /* DHD_UCODE_DOWNLOAD */ + +#ifdef PROP_TXSTATUS +int +dhd_os_wlfc_block(dhd_pub_t *pub) +{ + dhd_info_t *di = (dhd_info_t *)(pub->info); + ASSERT(di != NULL); + /* terence 20161229: don't do spin lock if proptx not enabled */ + if (disable_proptx) + return 1; +#ifdef BCMDBUS + spin_lock_irqsave(&di->wlfc_spinlock, di->wlfc_lock_flags); +#else + spin_lock_bh(&di->wlfc_spinlock); +#endif /* BCMDBUS */ + return 1; +} + +int +dhd_os_wlfc_unblock(dhd_pub_t *pub) +{ + dhd_info_t *di = (dhd_info_t *)(pub->info); + + ASSERT(di != NULL); + /* terence 20161229: don't do spin lock if proptx not enabled */ + if (disable_proptx) + return 1; +#ifdef BCMDBUS + spin_unlock_irqrestore(&di->wlfc_spinlock, di->wlfc_lock_flags); +#else + spin_unlock_bh(&di->wlfc_spinlock); +#endif /* BCMDBUS */ + return 1; +} + +#endif /* PROP_TXSTATUS */ + +#if defined(DHD_RX_DUMP) || defined(DHD_TX_DUMP) +typedef struct { + uint16 type; + const char *str; +} PKTTYPE_INFO; + +static const PKTTYPE_INFO packet_type_info[] = +{ + { ETHER_TYPE_IP, "IP" }, + { ETHER_TYPE_ARP, "ARP" }, + { ETHER_TYPE_BRCM, "BRCM" }, + { ETHER_TYPE_802_1X, "802.1X" }, + { ETHER_TYPE_WAI, "WAPI" }, + { 0, ""} +}; + +static const char *_get_packet_type_str(uint16 type) +{ + int i; + int n = sizeof(packet_type_info)/sizeof(packet_type_info[1]) - 1; + + for (i = 0; i < n; i++) { + if (packet_type_info[i].type == type) + return packet_type_info[i].str; + } + + return packet_type_info[n].str; +} + +void +dhd_trx_dump(struct net_device *ndev, uint8 *dump_data, uint datalen, bool tx) +{ + uint16 protocol; + char *ifname; + + protocol = (dump_data[12] << 8) | dump_data[13]; + ifname = ndev ? ndev->name : "N/A"; + + if (protocol != ETHER_TYPE_BRCM) { + DHD_ERROR(("%s DUMP[%s] - %s\n", tx?"Tx":"Rx", ifname, + _get_packet_type_str(protocol))); +#if defined(DHD_TX_FULL_DUMP) || defined(DHD_RX_FULL_DUMP) + prhex("Data", dump_data, datalen); +#endif /* DHD_TX_FULL_DUMP || DHD_RX_FULL_DUMP */ + } +} +#endif /* DHD_TX_DUMP || DHD_RX_DUMP */ + +/* This routine do not support Packet chain feature, Currently tested for + * proxy arp feature + */ +int dhd_sendup(dhd_pub_t *dhdp, int ifidx, void *p) +{ + struct sk_buff *skb; + void *skbhead = NULL; + void *skbprev = NULL; + dhd_if_t *ifp; + ASSERT(!PKTISCHAINED(p)); + skb = PKTTONATIVE(dhdp->osh, p); + + ifp = dhdp->info->iflist[ifidx]; + skb->dev = ifp->net; + + skb->protocol = eth_type_trans(skb, skb->dev); + + if (in_interrupt()) { + bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, + __FUNCTION__, __LINE__); + netif_rx(skb); + } else { + if (dhdp->info->rxthread_enabled) { + if (!skbhead) { + skbhead = skb; + } else { + PKTSETNEXT(dhdp->osh, skbprev, skb); + } + skbprev = skb; + } else { + /* If the receive is not processed inside an ISR, + * the softirqd must be woken explicitly to service + * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled + * by netif_rx_ni(), but in earlier kernels, we need + * to do it manually. + */ + bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, + __FUNCTION__, __LINE__); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) + netif_rx_ni(skb); +#else + ulong flags; + netif_rx(skb); + local_irq_save(flags); + RAISE_RX_SOFTIRQ(); + local_irq_restore(flags); +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */ + } + } + + if (dhdp->info->rxthread_enabled && skbhead) + dhd_sched_rxf(dhdp, skbhead); + + return BCME_OK; +} + +int BCMFASTPATH +__dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf) +{ + int ret = BCME_OK; + dhd_info_t *dhd = (dhd_info_t *)(dhdp->info); + struct ether_header *eh = NULL; +#if defined(DHD_L2_FILTER) + dhd_if_t *ifp = dhd_get_ifp(dhdp, ifidx); +#endif // endif + + /* Reject if down */ + if (!dhdp->up || (dhdp->busstate == DHD_BUS_DOWN)) { + /* free the packet here since the caller won't */ + PKTCFREE(dhdp->osh, pktbuf, TRUE); + return -ENODEV; + } + +#ifdef PCIE_FULL_DONGLE + if (dhdp->busstate == DHD_BUS_SUSPEND) { + DHD_ERROR(("%s : pcie is still in suspend state!!\n", __FUNCTION__)); + PKTCFREE(dhdp->osh, pktbuf, TRUE); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)) + return -ENODEV; +#else + return NETDEV_TX_BUSY; +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20) */ + } +#endif /* PCIE_FULL_DONGLE */ + + /* Reject if pktlen > MAX_MTU_SZ */ + if (PKTLEN(dhdp->osh, pktbuf) > MAX_MTU_SZ) { + /* free the packet here since the caller won't */ + dhdp->tx_big_packets++; + PKTCFREE(dhdp->osh, pktbuf, TRUE); + return BCME_ERROR; + } + +#ifdef DHD_L2_FILTER + /* if dhcp_unicast is enabled, we need to convert the */ + /* broadcast DHCP ACK/REPLY packets to Unicast. */ + if (ifp->dhcp_unicast) { + uint8* mac_addr; + uint8* ehptr = NULL; + int ret; + ret = bcm_l2_filter_get_mac_addr_dhcp_pkt(dhdp->osh, pktbuf, ifidx, &mac_addr); + if (ret == BCME_OK) { + /* if given mac address having valid entry in sta list + * copy the given mac address, and return with BCME_OK + */ + if (dhd_find_sta(dhdp, ifidx, mac_addr)) { + ehptr = PKTDATA(dhdp->osh, pktbuf); + bcopy(mac_addr, ehptr + ETHER_DEST_OFFSET, ETHER_ADDR_LEN); + } + } + } + + if (ifp->grat_arp && DHD_IF_ROLE_AP(dhdp, ifidx)) { + if (bcm_l2_filter_gratuitous_arp(dhdp->osh, pktbuf) == BCME_OK) { + PKTCFREE(dhdp->osh, pktbuf, TRUE); + return BCME_ERROR; + } + } + + if (ifp->parp_enable && DHD_IF_ROLE_AP(dhdp, ifidx)) { + ret = dhd_l2_filter_pkt_handle(dhdp, ifidx, pktbuf, TRUE); + + /* Drop the packets if l2 filter has processed it already + * otherwise continue with the normal path + */ + if (ret == BCME_OK) { + PKTCFREE(dhdp->osh, pktbuf, TRUE); + return BCME_ERROR; + } + } +#endif /* DHD_L2_FILTER */ + /* Update multicast statistic */ + if (PKTLEN(dhdp->osh, pktbuf) >= ETHER_HDR_LEN) { + uint8 *pktdata = (uint8 *)PKTDATA(dhdp->osh, pktbuf); + eh = (struct ether_header *)pktdata; + + if (ETHER_ISMULTI(eh->ether_dhost)) + dhdp->tx_multicast++; + if (ntoh16(eh->ether_type) == ETHER_TYPE_802_1X) { +#ifdef DHD_LOSSLESS_ROAMING + uint8 prio = (uint8)PKTPRIO(pktbuf); + + /* back up 802.1x's priority */ + dhdp->prio_8021x = prio; +#endif /* DHD_LOSSLESS_ROAMING */ + DBG_EVENT_LOG(dhdp, WIFI_EVENT_DRIVER_EAPOL_FRAME_TRANSMIT_REQUESTED); + atomic_inc(&dhd->pend_8021x_cnt); +#if defined(WL_CFG80211) && defined(WL_WPS_SYNC) + wl_handle_wps_states(dhd_idx2net(dhdp, ifidx), + pktdata, PKTLEN(dhdp->osh, pktbuf), TRUE); +#endif /* WL_CFG80211 && WL_WPS_SYNC */ + dhd_dump_eapol_4way_message(dhdp, dhd_ifname(dhdp, ifidx), pktdata, TRUE); + } + + if (ntoh16(eh->ether_type) == ETHER_TYPE_IP) { +#ifdef DHD_DHCP_DUMP + dhd_dhcp_dump(dhd_ifname(dhdp, ifidx), pktdata, TRUE); +#endif /* DHD_DHCP_DUMP */ +#ifdef DHD_ICMP_DUMP + dhd_icmp_dump(dhd_ifname(dhdp, ifidx), pktdata, TRUE); +#endif /* DHD_ICMP_DUMP */ + } + } else { + PKTCFREE(dhdp->osh, pktbuf, TRUE); + return BCME_ERROR; + } + + { + /* Look into the packet and update the packet priority */ +#ifndef PKTPRIO_OVERRIDE + if (PKTPRIO(pktbuf) == 0) +#endif /* !PKTPRIO_OVERRIDE */ + { +#if defined(QOS_MAP_SET) + pktsetprio_qms(pktbuf, wl_get_up_table(dhdp, ifidx), FALSE); +#else + pktsetprio(pktbuf, FALSE); +#endif /* QOS_MAP_SET */ + } +#ifndef PKTPRIO_OVERRIDE + else { + /* Some protocols like OZMO use priority values from 256..263. + * these are magic values to indicate a specific 802.1d priority. + * make sure that priority field is in range of 0..7 + */ + PKTSETPRIO(pktbuf, PKTPRIO(pktbuf) & 0x7); + } +#endif /* !PKTPRIO_OVERRIDE */ + } + +#ifdef PCIE_FULL_DONGLE + /* + * Lkup the per interface hash table, for a matching flowring. If one is not + * available, allocate a unique flowid and add a flowring entry. + * The found or newly created flowid is placed into the pktbuf's tag. + */ + ret = dhd_flowid_update(dhdp, ifidx, dhdp->flow_prio_map[(PKTPRIO(pktbuf))], pktbuf); + if (ret != BCME_OK) { + PKTCFREE(dhd->pub.osh, pktbuf, TRUE); + return ret; + } +#endif // endif + +#if defined(DHD_TX_DUMP) + dhd_trx_dump(dhd_idx2net(dhdp, ifidx), PKTDATA(dhdp->osh, pktbuf), + PKTLEN(dhdp->osh, pktbuf), TRUE); +#endif + /* terence 20150901: Micky add to ajust the 802.1X priority */ + /* Set the 802.1X packet with the highest priority 7 */ + if (dhdp->conf->pktprio8021x >= 0) + pktset8021xprio(pktbuf, dhdp->conf->pktprio8021x); + +#ifdef PROP_TXSTATUS + if (dhd_wlfc_is_supported(dhdp)) { + /* store the interface ID */ + DHD_PKTTAG_SETIF(PKTTAG(pktbuf), ifidx); + + /* store destination MAC in the tag as well */ + DHD_PKTTAG_SETDSTN(PKTTAG(pktbuf), eh->ether_dhost); + + /* decide which FIFO this packet belongs to */ + if (ETHER_ISMULTI(eh->ether_dhost)) + /* one additional queue index (highest AC + 1) is used for bc/mc queue */ + DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), AC_COUNT); + else + DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), WME_PRIO2AC(PKTPRIO(pktbuf))); + } else +#endif /* PROP_TXSTATUS */ + { + /* If the protocol uses a data header, apply it */ + dhd_prot_hdrpush(dhdp, ifidx, pktbuf); + } + + /* Use bus module to send data frame */ +#ifdef PROP_TXSTATUS + { + if (dhd_wlfc_commit_packets(dhdp, (f_commitpkt_t)dhd_bus_txdata, + dhdp->bus, pktbuf, TRUE) == WLFC_UNSUPPORTED) { + /* non-proptxstatus way */ +#ifdef BCMPCIE + ret = dhd_bus_txdata(dhdp->bus, pktbuf, (uint8)ifidx); +#else + ret = dhd_bus_txdata(dhdp->bus, pktbuf); +#endif /* BCMPCIE */ + } + } +#else +#ifdef BCMPCIE + ret = dhd_bus_txdata(dhdp->bus, pktbuf, (uint8)ifidx); +#else + ret = dhd_bus_txdata(dhdp->bus, pktbuf); +#endif /* BCMPCIE */ +#endif /* PROP_TXSTATUS */ +#ifdef BCMDBUS + if (ret) + PKTCFREE(dhdp->osh, pktbuf, TRUE); +#endif /* BCMDBUS */ + + return ret; +} + +int BCMFASTPATH +dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf) +{ + int ret = 0; + unsigned long flags; + dhd_if_t *ifp; + + DHD_GENERAL_LOCK(dhdp, flags); + ifp = dhd_get_ifp(dhdp, ifidx); + if (!ifp || ifp->del_in_progress) { + DHD_ERROR(("%s: ifp:%p del_in_progress:%d\n", + __FUNCTION__, ifp, ifp ? ifp->del_in_progress : 0)); + DHD_GENERAL_UNLOCK(dhdp, flags); + PKTCFREE(dhdp->osh, pktbuf, TRUE); + return -ENODEV; + } + if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) { + DHD_ERROR(("%s: returning as busstate=%d\n", + __FUNCTION__, dhdp->busstate)); + DHD_GENERAL_UNLOCK(dhdp, flags); + PKTCFREE(dhdp->osh, pktbuf, TRUE); + return -ENODEV; + } + DHD_IF_SET_TX_ACTIVE(ifp, DHD_TX_SEND_PKT); + DHD_BUS_BUSY_SET_IN_SEND_PKT(dhdp); + DHD_GENERAL_UNLOCK(dhdp, flags); + + DHD_GENERAL_LOCK(dhdp, flags); + if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhdp)) { + DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n", + __FUNCTION__, dhdp->busstate, dhdp->dhd_bus_busy_state)); + DHD_BUS_BUSY_CLEAR_IN_SEND_PKT(dhdp); + DHD_IF_CLR_TX_ACTIVE(ifp, DHD_TX_SEND_PKT); + dhd_os_tx_completion_wake(dhdp); + dhd_os_busbusy_wake(dhdp); + DHD_GENERAL_UNLOCK(dhdp, flags); + PKTCFREE(dhdp->osh, pktbuf, TRUE); + return -ENODEV; + } + DHD_GENERAL_UNLOCK(dhdp, flags); + + ret = __dhd_sendpkt(dhdp, ifidx, pktbuf); + + DHD_GENERAL_LOCK(dhdp, flags); + DHD_BUS_BUSY_CLEAR_IN_SEND_PKT(dhdp); + DHD_IF_CLR_TX_ACTIVE(ifp, DHD_TX_SEND_PKT); + dhd_os_tx_completion_wake(dhdp); + dhd_os_busbusy_wake(dhdp); + DHD_GENERAL_UNLOCK(dhdp, flags); + return ret; +} + +#if defined(DHD_LB_TXP) + +int BCMFASTPATH +dhd_lb_sendpkt(dhd_info_t *dhd, struct net_device *net, + int ifidx, void *skb) +{ + DHD_LB_STATS_PERCPU_ARR_INCR(dhd->tx_start_percpu_run_cnt); + + /* If the feature is disabled run-time do TX from here */ + if (atomic_read(&dhd->lb_txp_active) == 0) { + DHD_LB_STATS_PERCPU_ARR_INCR(dhd->txp_percpu_run_cnt); + return __dhd_sendpkt(&dhd->pub, ifidx, skb); + } + + /* Store the address of net device and interface index in the Packet tag */ + DHD_LB_TX_PKTTAG_SET_NETDEV((dhd_tx_lb_pkttag_fr_t *)PKTTAG(skb), net); + DHD_LB_TX_PKTTAG_SET_IFIDX((dhd_tx_lb_pkttag_fr_t *)PKTTAG(skb), ifidx); + + /* Enqueue the skb into tx_pend_queue */ + skb_queue_tail(&dhd->tx_pend_queue, skb); + + DHD_TRACE(("%s(): Added skb %p for netdev %p \r\n", __FUNCTION__, skb, net)); + + /* Dispatch the Tx job to be processed by the tx_tasklet */ + dhd_lb_tx_dispatch(&dhd->pub); + + return NETDEV_TX_OK; +} +#endif /* DHD_LB_TXP */ + +int BCMFASTPATH +dhd_start_xmit(struct sk_buff *skb, struct net_device *net) +{ + int ret; + uint datalen; + void *pktbuf; + dhd_info_t *dhd = DHD_DEV_INFO(net); + dhd_if_t *ifp = NULL; + int ifidx; + unsigned long flags; + uint8 htsfdlystat_sz = 0; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (dhd_query_bus_erros(&dhd->pub)) { + return -ENODEV; + } + + /* terence 2017029: Reject in early suspend */ + if (!dhd->pub.conf->xmit_in_suspend && dhd->pub.early_suspended) { + dhd_txflowcontrol(&dhd->pub, ALL_INTERFACES, ON); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)) + return -ENODEV; +#else + return NETDEV_TX_BUSY; +#endif + } + + DHD_GENERAL_LOCK(&dhd->pub, flags); + DHD_BUS_BUSY_SET_IN_TX(&dhd->pub); + DHD_GENERAL_UNLOCK(&dhd->pub, flags); + + DHD_GENERAL_LOCK(&dhd->pub, flags); +#ifdef BCMPCIE + if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(&dhd->pub)) { + DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n", + __FUNCTION__, dhd->pub.busstate, dhd->pub.dhd_bus_busy_state)); + DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub); +#ifdef PCIE_FULL_DONGLE + /* Stop tx queues if suspend is in progress */ + if (DHD_BUS_CHECK_ANY_SUSPEND_IN_PROGRESS(&dhd->pub)) { + dhd_bus_stop_queue(dhd->pub.bus); + } +#endif /* PCIE_FULL_DONGLE */ + dhd_os_busbusy_wake(&dhd->pub); + DHD_GENERAL_UNLOCK(&dhd->pub, flags); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)) + return -ENODEV; +#else + return NETDEV_TX_BUSY; +#endif // endif + } +#else + if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(&dhd->pub)) { + DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n", + __FUNCTION__, dhd->pub.busstate, dhd->pub.dhd_bus_busy_state)); + } +#endif + + DHD_OS_WAKE_LOCK(&dhd->pub); + DHD_PERIM_LOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken); + + /* Reject if down */ + if (dhd->pub.hang_was_sent || DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(&dhd->pub)) { + DHD_ERROR(("%s: xmit rejected pub.up=%d busstate=%d \n", + __FUNCTION__, dhd->pub.up, dhd->pub.busstate)); + netif_stop_queue(net); + /* Send Event when bus down detected during data session */ + if (dhd->pub.up && !dhd->pub.hang_was_sent && !DHD_BUS_CHECK_REMOVE(&dhd->pub)) { + DHD_ERROR(("%s: Event HANG sent up\n", __FUNCTION__)); + dhd->pub.hang_reason = HANG_REASON_BUS_DOWN; + net_os_send_hang_message(net); + } + DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub); + dhd_os_busbusy_wake(&dhd->pub); + DHD_GENERAL_UNLOCK(&dhd->pub, flags); + DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken); + DHD_OS_WAKE_UNLOCK(&dhd->pub); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)) + return -ENODEV; +#else + return NETDEV_TX_BUSY; +#endif // endif + } + + ifp = DHD_DEV_IFP(net); + ifidx = DHD_DEV_IFIDX(net); + if (!ifp || (ifidx == DHD_BAD_IF) || + ifp->del_in_progress) { + DHD_ERROR(("%s: ifidx %d ifp:%p del_in_progress:%d\n", + __FUNCTION__, ifidx, ifp, (ifp ? ifp->del_in_progress : 0))); + netif_stop_queue(net); + DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub); + dhd_os_busbusy_wake(&dhd->pub); + DHD_GENERAL_UNLOCK(&dhd->pub, flags); + DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken); + DHD_OS_WAKE_UNLOCK(&dhd->pub); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)) + return -ENODEV; +#else + return NETDEV_TX_BUSY; +#endif // endif + } + + DHD_IF_SET_TX_ACTIVE(ifp, DHD_TX_START_XMIT); + DHD_GENERAL_UNLOCK(&dhd->pub, flags); + + ASSERT(ifidx == dhd_net2idx(dhd, net)); + ASSERT((ifp != NULL) && ((ifidx < DHD_MAX_IFS) && (ifp == dhd->iflist[ifidx]))); + + bcm_object_trace_opr(skb, BCM_OBJDBG_ADD_PKT, __FUNCTION__, __LINE__); + + /* re-align socket buffer if "skb->data" is odd address */ + if (((unsigned long)(skb->data)) & 0x1) { + unsigned char *data = skb->data; + uint32 length = skb->len; + PKTPUSH(dhd->pub.osh, skb, 1); + memmove(skb->data, data, length); + PKTSETLEN(dhd->pub.osh, skb, length); + } + + datalen = PKTLEN(dhd->pub.osh, skb); + + /* Make sure there's enough room for any header */ + if (skb_headroom(skb) < dhd->pub.hdrlen + htsfdlystat_sz) { + struct sk_buff *skb2; + + DHD_INFO(("%s: insufficient headroom\n", + dhd_ifname(&dhd->pub, ifidx))); + dhd->pub.tx_realloc++; + + bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, __FUNCTION__, __LINE__); + skb2 = skb_realloc_headroom(skb, dhd->pub.hdrlen + htsfdlystat_sz); + + dev_kfree_skb(skb); + if ((skb = skb2) == NULL) { + DHD_ERROR(("%s: skb_realloc_headroom failed\n", + dhd_ifname(&dhd->pub, ifidx))); + ret = -ENOMEM; + goto done; + } + bcm_object_trace_opr(skb, BCM_OBJDBG_ADD_PKT, __FUNCTION__, __LINE__); + } + + /* Convert to packet */ + if (!(pktbuf = PKTFRMNATIVE(dhd->pub.osh, skb))) { + DHD_ERROR(("%s: PKTFRMNATIVE failed\n", + dhd_ifname(&dhd->pub, ifidx))); + bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, __FUNCTION__, __LINE__); + dev_kfree_skb_any(skb); + ret = -ENOMEM; + goto done; + } + +#ifdef DHD_WET + /* wet related packet proto manipulation should be done in DHD + since dongle doesn't have complete payload + */ + if (WET_ENABLED(&dhd->pub) && + (dhd_wet_send_proc(dhd->pub.wet_info, pktbuf, &pktbuf) < 0)) { + DHD_INFO(("%s:%s: wet send proc failed\n", + __FUNCTION__, dhd_ifname(&dhd->pub, ifidx))); + PKTFREE(dhd->pub.osh, pktbuf, FALSE); + ret = -EFAULT; + goto done; + } +#endif /* DHD_WET */ + +#ifdef DHD_PSTA + /* PSR related packet proto manipulation should be done in DHD + * since dongle doesn't have complete payload + */ + if (PSR_ENABLED(&dhd->pub) && + (dhd_psta_proc(&dhd->pub, ifidx, &pktbuf, TRUE) < 0)) { + + DHD_ERROR(("%s:%s: psta send proc failed\n", __FUNCTION__, + dhd_ifname(&dhd->pub, ifidx))); + } +#endif /* DHD_PSTA */ + +#ifdef DHDTCPACK_SUPPRESS + if (dhd->pub.tcpack_sup_mode == TCPACK_SUP_HOLD) { + /* If this packet has been hold or got freed, just return */ + if (dhd_tcpack_hold(&dhd->pub, pktbuf, ifidx)) { + ret = 0; + goto done; + } + } else { + /* If this packet has replaced another packet and got freed, just return */ + if (dhd_tcpack_suppress(&dhd->pub, pktbuf)) { + ret = 0; + goto done; + } + } +#endif /* DHDTCPACK_SUPPRESS */ + + /* + * If Load Balance is enabled queue the packet + * else send directly from here. + */ +#if defined(DHD_LB_TXP) + ret = dhd_lb_sendpkt(dhd, net, ifidx, pktbuf); +#else + ret = __dhd_sendpkt(&dhd->pub, ifidx, pktbuf); +#endif // endif + +done: + if (ret) { + ifp->stats.tx_dropped++; + dhd->pub.tx_dropped++; + } else { +#ifdef PROP_TXSTATUS + /* tx_packets counter can counted only when wlfc is disabled */ + if (!dhd_wlfc_is_supported(&dhd->pub)) +#endif // endif + { + dhd->pub.tx_packets++; + ifp->stats.tx_packets++; + ifp->stats.tx_bytes += datalen; + } + } + + DHD_GENERAL_LOCK(&dhd->pub, flags); + DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub); + DHD_IF_CLR_TX_ACTIVE(ifp, DHD_TX_START_XMIT); + dhd_os_tx_completion_wake(&dhd->pub); + dhd_os_busbusy_wake(&dhd->pub); + DHD_GENERAL_UNLOCK(&dhd->pub, flags); + DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken); + DHD_OS_WAKE_UNLOCK(&dhd->pub); + /* Return ok: we always eat the packet */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)) + return 0; +#else + return NETDEV_TX_OK; +#endif // endif +} + +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM +void dhd_rx_wq_wakeup(struct work_struct *ptr) +{ + struct dhd_rx_tx_work *work; + struct dhd_pub * pub; + + work = container_of(ptr, struct dhd_rx_tx_work, work); + + pub = work->pub; + + DHD_RPM(("%s: ENTER. \n", __FUNCTION__)); + + if (atomic_read(&pub->block_bus) || pub->busstate == DHD_BUS_DOWN) { + return; + } + + DHD_OS_WAKE_LOCK(pub); + if (pm_runtime_get_sync(dhd_bus_to_dev(pub->bus)) >= 0) { + + // do nothing but wakeup the bus. + pm_runtime_mark_last_busy(dhd_bus_to_dev(pub->bus)); + pm_runtime_put_autosuspend(dhd_bus_to_dev(pub->bus)); + } + DHD_OS_WAKE_UNLOCK(pub); + kfree(work); +} + +void dhd_start_xmit_wq_adapter(struct work_struct *ptr) +{ + struct dhd_rx_tx_work *work; + int ret; + dhd_info_t *dhd; + struct dhd_bus * bus; + + work = container_of(ptr, struct dhd_rx_tx_work, work); + + dhd = DHD_DEV_INFO(work->net); + + bus = dhd->pub.bus; + + if (atomic_read(&dhd->pub.block_bus)) { + kfree_skb(work->skb); + kfree(work); + dhd_netif_start_queue(bus); + return; + } + + if (pm_runtime_get_sync(dhd_bus_to_dev(bus)) >= 0) { + ret = dhd_start_xmit(work->skb, work->net); + pm_runtime_mark_last_busy(dhd_bus_to_dev(bus)); + pm_runtime_put_autosuspend(dhd_bus_to_dev(bus)); + } + kfree(work); + dhd_netif_start_queue(bus); + + if (ret) + netdev_err(work->net, + "error: dhd_start_xmit():%d\n", ret); +} + +int BCMFASTPATH +dhd_start_xmit_wrapper(struct sk_buff *skb, struct net_device *net) +{ + struct dhd_rx_tx_work *start_xmit_work; + int ret; + dhd_info_t *dhd = DHD_DEV_INFO(net); + + if (dhd->pub.busstate == DHD_BUS_SUSPEND) { + DHD_RPM(("%s: wakeup the bus using workqueue.\n", __FUNCTION__)); + + dhd_netif_stop_queue(dhd->pub.bus); + + start_xmit_work = (struct dhd_rx_tx_work*) + kmalloc(sizeof(*start_xmit_work), GFP_ATOMIC); + + if (!start_xmit_work) { + netdev_err(net, + "error: failed to alloc start_xmit_work\n"); + ret = -ENOMEM; + goto exit; + } + + INIT_WORK(&start_xmit_work->work, dhd_start_xmit_wq_adapter); + start_xmit_work->skb = skb; + start_xmit_work->net = net; + queue_work(dhd->tx_wq, &start_xmit_work->work); + ret = NET_XMIT_SUCCESS; + + } else if (dhd->pub.busstate == DHD_BUS_DATA) { + ret = dhd_start_xmit(skb, net); + } else { + /* when bus is down */ + ret = -ENODEV; + } + +exit: + return ret; +} +void +dhd_bus_wakeup_work(dhd_pub_t *dhdp) +{ + struct dhd_rx_tx_work *rx_work; + dhd_info_t *dhd = (dhd_info_t *)dhdp->info; + + rx_work = kmalloc(sizeof(*rx_work), GFP_ATOMIC); + if (!rx_work) { + DHD_ERROR(("%s: start_rx_work alloc error. \n", __FUNCTION__)); + return; + } + + INIT_WORK(&rx_work->work, dhd_rx_wq_wakeup); + rx_work->pub = dhdp; + queue_work(dhd->rx_wq, &rx_work->work); + +} +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ +void +dhd_txflowcontrol(dhd_pub_t *dhdp, int ifidx, bool state) +{ + struct net_device *net; + dhd_info_t *dhd = dhdp->info; + int i; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + ASSERT(dhd); + +#ifdef DHD_LOSSLESS_ROAMING + /* block flowcontrol during roaming */ + if ((dhdp->dequeue_prec_map == 1 << PRIO_8021D_NC) && state == ON) { + return; + } +#endif // endif + + if (ifidx == ALL_INTERFACES) { + /* Flow control on all active interfaces */ + dhdp->txoff = state; + for (i = 0; i < DHD_MAX_IFS; i++) { + if (dhd->iflist[i]) { + net = dhd->iflist[i]->net; + if (state == ON) + netif_stop_queue(net); + else + netif_wake_queue(net); + } + } + } else { + if (dhd->iflist[ifidx]) { + net = dhd->iflist[ifidx]->net; + if (state == ON) + netif_stop_queue(net); + else + netif_wake_queue(net); + } + } +} + +#ifdef DHD_MCAST_REGEN +/* + * Description: This function is called to do the reverse translation + * + * Input eh - pointer to the ethernet header + */ +int32 +dhd_mcast_reverse_translation(struct ether_header *eh) +{ + uint8 *iph; + uint32 dest_ip; + + iph = (uint8 *)eh + ETHER_HDR_LEN; + dest_ip = ntoh32(*((uint32 *)(iph + IPV4_DEST_IP_OFFSET))); + + /* Only IP packets are handled */ + if (eh->ether_type != hton16(ETHER_TYPE_IP)) + return BCME_ERROR; + + /* Non-IPv4 multicast packets are not handled */ + if (IP_VER(iph) != IP_VER_4) + return BCME_ERROR; + + /* + * The packet has a multicast IP and unicast MAC. That means + * we have to do the reverse translation + */ + if (IPV4_ISMULTI(dest_ip) && !ETHER_ISMULTI(&eh->ether_dhost)) { + ETHER_FILL_MCAST_ADDR_FROM_IP(eh->ether_dhost, dest_ip); + return BCME_OK; + } + + return BCME_ERROR; +} +#endif /* MCAST_REGEN */ + +#ifdef SHOW_LOGTRACE +static void +dhd_netif_rx_ni(struct sk_buff * skb) +{ + /* Do not call netif_recieve_skb as this workqueue scheduler is + * not from NAPI Also as we are not in INTR context, do not call + * netif_rx, instead call netif_rx_ni (for kerenl >= 2.6) which + * does netif_rx, disables irq, raise NET_IF_RX softirq and + * enables interrupts back + */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) + netif_rx_ni(skb); +#else + ulong flags; + netif_rx(skb); + local_irq_save(flags); + RAISE_RX_SOFTIRQ(); + local_irq_restore(flags); +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */ +} + +static int +dhd_event_logtrace_pkt_process(dhd_pub_t *dhdp, struct sk_buff * skb) +{ + dhd_info_t *dhd = (dhd_info_t *)dhdp->info; + int ret = BCME_OK; + uint datalen; + bcm_event_msg_u_t evu; + void *data = NULL; + void *pktdata = NULL; + bcm_event_t *pvt_data; + uint pktlen; + + DHD_TRACE(("%s:Enter\n", __FUNCTION__)); + + /* In dhd_rx_frame, header is stripped using skb_pull + * of size ETH_HLEN, so adjust pktlen accordingly + */ + pktlen = skb->len + ETH_HLEN; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) + pktdata = (void *)skb_mac_header(skb); +#else + pktdata = (void *)skb->mac.raw; +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) */ + + ret = wl_host_event_get_data(pktdata, pktlen, &evu); + + if (ret != BCME_OK) { + DHD_ERROR(("%s: wl_host_event_get_data err = %d\n", + __FUNCTION__, ret)); + goto exit; + } + + datalen = ntoh32(evu.event.datalen); + + pvt_data = (bcm_event_t *)pktdata; + data = &pvt_data[1]; + + dhd_dbg_trace_evnt_handler(dhdp, data, &dhd->event_data, datalen); + +exit: + return ret; +} + +/* + * dhd_event_logtrace_process_items processes + * each skb from evt_trace_queue. + * Returns TRUE if more packets to be processed + * else returns FALSE + */ + +static int +dhd_event_logtrace_process_items(dhd_info_t *dhd) +{ + dhd_pub_t *dhdp; + struct sk_buff *skb; + uint32 qlen; + uint32 process_len; + + if (!dhd) { + DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__)); + return 0; + } + + dhdp = &dhd->pub; + + if (!dhdp) { + DHD_ERROR(("%s: dhd pub is null \n", __FUNCTION__)); + return 0; + } + + qlen = skb_queue_len(&dhd->evt_trace_queue); + process_len = MIN(qlen, DHD_EVENT_LOGTRACE_BOUND); + + /* Run while loop till bound is reached or skb queue is empty */ + while (process_len--) { + int ifid = 0; + skb = skb_dequeue(&dhd->evt_trace_queue); + if (skb == NULL) { + DHD_ERROR(("%s: skb is NULL, which is not valid case\n", + __FUNCTION__)); + break; + } + BCM_REFERENCE(ifid); +#ifdef PCIE_FULL_DONGLE + /* Check if pkt is from INFO ring or WLC_E_TRACE */ + ifid = DHD_PKTTAG_IFID((dhd_pkttag_fr_t *)PKTTAG(skb)); + if (ifid == DHD_DUMMY_INFO_IF) { + /* Process logtrace from info rings */ + dhd_event_logtrace_infobuf_pkt_process(dhdp, skb, &dhd->event_data); + } else +#endif /* PCIE_FULL_DONGLE */ + { + /* Processing WLC_E_TRACE case OR non PCIE PCIE_FULL_DONGLE case */ + dhd_event_logtrace_pkt_process(dhdp, skb); + } + + /* Send packet up if logtrace_pkt_sendup is TRUE */ + if (dhdp->logtrace_pkt_sendup) { +#ifdef DHD_USE_STATIC_CTRLBUF + /* If bufs are allocated via static buf pool + * and logtrace_pkt_sendup enabled, make a copy, + * free the local one and send the copy up. + */ + void *npkt = PKTDUP(dhdp->osh, skb); + /* Clone event and send it up */ + PKTFREE_STATIC(dhdp->osh, skb, FALSE); + if (npkt) { + skb = npkt; + } else { + DHD_ERROR(("skb clone failed. dropping logtrace pkt.\n")); + /* Packet is already freed, go to next packet */ + continue; + } +#endif /* DHD_USE_STATIC_CTRLBUF */ +#ifdef PCIE_FULL_DONGLE + /* For infobuf packets as if is DHD_DUMMY_INFO_IF, + * to send skb to network layer, assign skb->dev with + * Primary interface n/w device + */ + if (ifid == DHD_DUMMY_INFO_IF) { + skb = PKTTONATIVE(dhdp->osh, skb); + skb->dev = dhd->iflist[0]->net; + } +#endif /* PCIE_FULL_DONGLE */ + /* Send pkt UP */ + dhd_netif_rx_ni(skb); + } else { + /* Don't send up. Free up the packet. */ +#ifdef DHD_USE_STATIC_CTRLBUF + PKTFREE_STATIC(dhdp->osh, skb, FALSE); +#else + PKTFREE(dhdp->osh, skb, FALSE); +#endif /* DHD_USE_STATIC_CTRLBUF */ + } + } + + /* Reschedule if more packets to be processed */ + return (qlen >= DHD_EVENT_LOGTRACE_BOUND); +} + +#ifdef DHD_USE_KTHREAD_FOR_LOGTRACE +static int +dhd_logtrace_thread(void *data) +{ + tsk_ctl_t *tsk = (tsk_ctl_t *)data; + dhd_info_t *dhd = (dhd_info_t *)tsk->parent; + dhd_pub_t *dhdp = (dhd_pub_t *)&dhd->pub; + int ret; + + while (1) { + dhdp->logtrace_thr_ts.entry_time = OSL_SYSUPTIME_US(); + if (!binary_sema_down(tsk)) { + dhdp->logtrace_thr_ts.sem_down_time = OSL_SYSUPTIME_US(); + SMP_RD_BARRIER_DEPENDS(); + if (dhd->pub.dongle_reset == FALSE) { + do { +#ifdef EWP_EDL + /* check if EDL is being used */ + if (dhd->pub.dongle_edl_support) { + ret = dhd_prot_process_edl_complete(&dhd->pub, + &dhd->event_data); + } else { + ret = dhd_event_logtrace_process_items(dhd); + } +#else + ret = dhd_event_logtrace_process_items(dhd); +#endif /* EWP_EDL */ + /* if ret > 0, bound has reached so to be fair to other + * processes need to yield the scheduler. + * The comment above yield()'s definition says: + * If you want to use yield() to wait for something, + * use wait_event(). + * If you want to use yield() to be 'nice' for others, + * use cond_resched(). + * If you still want to use yield(), do not! + */ + if (ret > 0) { + cond_resched(); + OSL_SLEEP(DHD_EVENT_LOGTRACE_RESCHEDULE_DELAY_MS); + } else if (ret < 0) { + DHD_ERROR(("%s: ERROR should not reach here\n", + __FUNCTION__)); + } + } while (ret > 0); + } + /* Check terminated after processing the items */ + if (tsk->terminated) { + DHD_ERROR(("%s: task terminated\n", __FUNCTION__)); + break; + } + if (tsk->flush_ind) { + DHD_ERROR(("%s: flushed\n", __FUNCTION__)); + dhdp->logtrace_thr_ts.flush_time = OSL_SYSUPTIME_US(); + tsk->flush_ind = 0; + complete(&tsk->flushed); + } + } else { + DHD_ERROR(("%s: unexpted break\n", __FUNCTION__)); + dhdp->logtrace_thr_ts.unexpected_break_time = OSL_SYSUPTIME_US(); + break; + } + } + complete_and_exit(&tsk->completed, 0); + dhdp->logtrace_thr_ts.complete_time = OSL_SYSUPTIME_US(); +} +#else +static void +dhd_event_logtrace_process(struct work_struct * work) +{ + int ret = 0; +/* Ignore compiler warnings due to -Werror=cast-qual */ +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif // endif + struct delayed_work *dw = to_delayed_work(work); + struct dhd_info *dhd = + container_of(dw, struct dhd_info, event_log_dispatcher_work); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif // endif +#ifdef EWP_EDL + if (dhd->pub.dongle_edl_support) { + ret = dhd_prot_process_edl_complete(&dhd->pub, &dhd->event_data); + } else { + ret = dhd_event_logtrace_process_items(dhd); + } +#else + ret = dhd_event_logtrace_process_items(dhd); +#endif /* EWP_EDL */ + + if (ret > 0) { + schedule_delayed_work(&(dhd)->event_log_dispatcher_work, + msecs_to_jiffies(DHD_EVENT_LOGTRACE_RESCHEDULE_DELAY_MS)); + } + + return; +} +#endif /* DHD_USE_KTHREAD_FOR_LOGTRACE */ + +void +dhd_schedule_logtrace(void *dhd_info) +{ + dhd_info_t *dhd = (dhd_info_t *)dhd_info; + +#ifdef DHD_USE_KTHREAD_FOR_LOGTRACE + if (dhd->thr_logtrace_ctl.thr_pid >= 0) { + binary_sema_up(&dhd->thr_logtrace_ctl); + } else { + DHD_ERROR(("%s: thr_logtrace_ctl(%ld) not inited\n", __FUNCTION__, + dhd->thr_logtrace_ctl.thr_pid)); + } +#else + schedule_delayed_work(&dhd->event_log_dispatcher_work, 0); +#endif /* DHD_USE_KTHREAD_FOR_LOGTRACE */ + return; +} + +void +dhd_cancel_logtrace_process_sync(dhd_info_t *dhd) +{ +#ifdef DHD_USE_KTHREAD_FOR_LOGTRACE + if (dhd->thr_logtrace_ctl.thr_pid >= 0) { + PROC_STOP_USING_BINARY_SEMA(&dhd->thr_logtrace_ctl); + } else { + DHD_ERROR(("%s: thr_logtrace_ctl(%ld) not inited\n", __FUNCTION__, + dhd->thr_logtrace_ctl.thr_pid)); + } +#else + cancel_delayed_work_sync(&dhd->event_log_dispatcher_work); +#endif /* DHD_USE_KTHREAD_FOR_LOGTRACE */ +} + +void +dhd_flush_logtrace_process(dhd_info_t *dhd) +{ +#ifdef DHD_USE_KTHREAD_FOR_LOGTRACE + if (dhd->thr_logtrace_ctl.thr_pid >= 0) { + PROC_FLUSH_USING_BINARY_SEMA(&dhd->thr_logtrace_ctl); + } else { + DHD_ERROR(("%s: thr_logtrace_ctl(%ld) not inited\n", __FUNCTION__, + dhd->thr_logtrace_ctl.thr_pid)); + } +#else + flush_delayed_work(&dhd->event_log_dispatcher_work); +#endif /* DHD_USE_KTHREAD_FOR_LOGTRACE */ +} + +int +dhd_init_logtrace_process(dhd_info_t *dhd) +{ +#ifdef DHD_USE_KTHREAD_FOR_LOGTRACE + dhd->thr_logtrace_ctl.thr_pid = DHD_PID_KT_INVALID; + PROC_START(dhd_logtrace_thread, dhd, &dhd->thr_logtrace_ctl, 0, "dhd_logtrace_thread"); + if (dhd->thr_logtrace_ctl.thr_pid < 0) { + DHD_ERROR(("%s: init logtrace process failed\n", __FUNCTION__)); + return BCME_ERROR; + } else { + DHD_ERROR(("%s: thr_logtrace_ctl(%ld) succedded\n", __FUNCTION__, + dhd->thr_logtrace_ctl.thr_pid)); + } +#else + INIT_DELAYED_WORK(&dhd->event_log_dispatcher_work, dhd_event_logtrace_process); +#endif /* DHD_USE_KTHREAD_FOR_LOGTRACE */ + return BCME_OK; +} + +int +dhd_reinit_logtrace_process(dhd_info_t *dhd) +{ +#ifdef DHD_USE_KTHREAD_FOR_LOGTRACE + /* Re-init only if PROC_STOP from dhd_stop was called + * which can be checked via thr_pid + */ + if (dhd->thr_logtrace_ctl.thr_pid < 0) { + PROC_START(dhd_logtrace_thread, dhd, &dhd->thr_logtrace_ctl, + 0, "dhd_logtrace_thread"); + if (dhd->thr_logtrace_ctl.thr_pid < 0) { + DHD_ERROR(("%s: reinit logtrace process failed\n", __FUNCTION__)); + return BCME_ERROR; + } else { + DHD_ERROR(("%s: thr_logtrace_ctl(%ld) succedded\n", __FUNCTION__, + dhd->thr_logtrace_ctl.thr_pid)); + } + } +#else + /* No need to re-init for WQ as calcel_delayed_work_sync will + * will not delete the WQ + */ +#endif /* DHD_USE_KTHREAD_FOR_LOGTRACE */ + return BCME_OK; +} + +void +dhd_event_logtrace_enqueue(dhd_pub_t *dhdp, int ifidx, void *pktbuf) +{ + dhd_info_t *dhd = (dhd_info_t *)dhdp->info; + +#ifdef PCIE_FULL_DONGLE + /* Add ifidx in the PKTTAG */ + DHD_PKTTAG_SET_IFID((dhd_pkttag_fr_t *)PKTTAG(pktbuf), ifidx); +#endif /* PCIE_FULL_DONGLE */ + skb_queue_tail(&dhd->evt_trace_queue, pktbuf); + + dhd_schedule_logtrace(dhd); +} + +void +dhd_event_logtrace_flush_queue(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd = (dhd_info_t *)dhdp->info; + struct sk_buff *skb; + + while ((skb = skb_dequeue(&dhd->evt_trace_queue)) != NULL) { +#ifdef DHD_USE_STATIC_CTRLBUF + PKTFREE_STATIC(dhdp->osh, skb, FALSE); +#else + PKTFREE(dhdp->osh, skb, FALSE); +#endif /* DHD_USE_STATIC_CTRLBUF */ + } +} + +void +dhd_sendup_info_buf(dhd_pub_t *dhdp, uint8 *msg) +{ + struct sk_buff *skb = NULL; + uint32 pktsize = 0; + void *pkt = NULL; + info_buf_payload_hdr_t *infobuf = NULL; + dhd_info_t *dhd = dhdp->info; + uint8 *pktdata = NULL; + + if (!msg) + return; + + /* msg = |infobuf_ver(u32)|info_buf_payload_hdr_t|msgtrace_hdr_t|| */ + infobuf = (info_buf_payload_hdr_t *)(msg + sizeof(uint32)); + pktsize = ltoh16(infobuf->length) + sizeof(info_buf_payload_hdr_t) + + sizeof(uint32); + pkt = PKTGET(dhdp->osh, pktsize, FALSE); + if (!pkt) { + DHD_ERROR(("%s: skb alloc failed ! not sending event log up.\n", __FUNCTION__)); + } else { + PKTSETLEN(dhdp->osh, pkt, pktsize); + pktdata = PKTDATA(dhdp->osh, pkt); + memcpy(pktdata, msg, pktsize); + /* For infobuf packets assign skb->dev with + * Primary interface n/w device + */ + skb = PKTTONATIVE(dhdp->osh, pkt); + skb->dev = dhd->iflist[0]->net; + /* Send pkt UP */ + dhd_netif_rx_ni(skb); + } +} +#endif /* SHOW_LOGTRACE */ + +/** Called when a frame is received by the dongle on interface 'ifidx' */ +void +dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan) +{ + dhd_info_t *dhd = (dhd_info_t *)dhdp->info; + struct sk_buff *skb; + uchar *eth; + uint len; + void *data, *pnext = NULL; + int i; + dhd_if_t *ifp; + wl_event_msg_t event; + int tout_rx = 0; + int tout_ctrl = 0; + void *skbhead = NULL; + void *skbprev = NULL; + uint16 protocol; + unsigned char *dump_data; +#ifdef DHD_MCAST_REGEN + uint8 interface_role; + if_flow_lkup_t *if_flow_lkup; + unsigned long flags; +#endif // endif +#ifdef DHD_WAKE_STATUS + int pkt_wake = 0; + wake_counts_t *wcp = NULL; +#endif /* DHD_WAKE_STATUS */ + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + for (i = 0; pktbuf && i < numpkt; i++, pktbuf = pnext) { + struct ether_header *eh; + + pnext = PKTNEXT(dhdp->osh, pktbuf); + PKTSETNEXT(dhdp->osh, pktbuf, NULL); + + /* info ring "debug" data, which is not a 802.3 frame, is sent/hacked with a + * special ifidx of DHD_DUMMY_INFO_IF. This is just internal to dhd to get the data + * from dhd_msgbuf.c:dhd_prot_infobuf_cmplt_process() to here (dhd_rx_frame). + */ + if (ifidx == DHD_DUMMY_INFO_IF) { + /* Event msg printing is called from dhd_rx_frame which is in Tasklet + * context in case of PCIe FD, in case of other bus this will be from + * DPC context. If we get bunch of events from Dongle then printing all + * of them from Tasklet/DPC context that too in data path is costly. + * Also in the new Dongle SW(4359, 4355 onwards) console prints too come as + * events with type WLC_E_TRACE. + * We'll print this console logs from the WorkQueue context by enqueing SKB + * here and Dequeuing will be done in WorkQueue and will be freed only if + * logtrace_pkt_sendup is TRUE + */ +#ifdef SHOW_LOGTRACE + dhd_event_logtrace_enqueue(dhdp, ifidx, pktbuf); +#else /* !SHOW_LOGTRACE */ + /* If SHOW_LOGTRACE not defined and ifidx is DHD_DUMMY_INFO_IF, + * free the PKT here itself + */ +#ifdef DHD_USE_STATIC_CTRLBUF + PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE); +#else + PKTFREE(dhdp->osh, pktbuf, FALSE); +#endif /* DHD_USE_STATIC_CTRLBUF */ +#endif /* SHOW_LOGTRACE */ + continue; + } +#ifdef DHD_WAKE_STATUS +#ifdef BCMDBUS + wcp = NULL; +#else + pkt_wake = dhd_bus_get_bus_wake(dhdp); + wcp = dhd_bus_get_wakecount(dhdp); +#endif /* BCMDBUS */ + if (wcp == NULL) { + /* If wakeinfo count buffer is null do not update wake count values */ + pkt_wake = 0; + } +#endif /* DHD_WAKE_STATUS */ + + eh = (struct ether_header *)PKTDATA(dhdp->osh, pktbuf); + + ifp = dhd->iflist[ifidx]; + if (ifp == NULL) { + DHD_ERROR(("%s: ifp is NULL. drop packet\n", + __FUNCTION__)); + if (ntoh16(eh->ether_type) == ETHER_TYPE_BRCM) { +#ifdef DHD_USE_STATIC_CTRLBUF + PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE); +#else + PKTFREE(dhdp->osh, pktbuf, FALSE); +#endif /* DHD_USE_STATIC_CTRLBUF */ + } else { + PKTCFREE(dhdp->osh, pktbuf, FALSE); + } + continue; + } + + /* Dropping only data packets before registering net device to avoid kernel panic */ +#ifndef PROP_TXSTATUS_VSDB + if ((!ifp->net || ifp->net->reg_state != NETREG_REGISTERED) && + (ntoh16(eh->ether_type) != ETHER_TYPE_BRCM)) +#else + if ((!ifp->net || ifp->net->reg_state != NETREG_REGISTERED || !dhd->pub.up) && + (ntoh16(eh->ether_type) != ETHER_TYPE_BRCM)) +#endif /* PROP_TXSTATUS_VSDB */ + { + DHD_ERROR(("%s: net device is NOT registered yet. drop packet\n", + __FUNCTION__)); + PKTCFREE(dhdp->osh, pktbuf, FALSE); + continue; + } + +#ifdef PROP_TXSTATUS + if (dhd_wlfc_is_header_only_pkt(dhdp, pktbuf)) { + /* WLFC may send header only packet when + there is an urgent message but no packet to + piggy-back on + */ + PKTCFREE(dhdp->osh, pktbuf, FALSE); + continue; + } +#endif // endif +#ifdef DHD_L2_FILTER + /* If block_ping is enabled drop the ping packet */ + if (ifp->block_ping) { + if (bcm_l2_filter_block_ping(dhdp->osh, pktbuf) == BCME_OK) { + PKTCFREE(dhdp->osh, pktbuf, FALSE); + continue; + } + } + if (ifp->grat_arp && DHD_IF_ROLE_STA(dhdp, ifidx)) { + if (bcm_l2_filter_gratuitous_arp(dhdp->osh, pktbuf) == BCME_OK) { + PKTCFREE(dhdp->osh, pktbuf, FALSE); + continue; + } + } + if (ifp->parp_enable && DHD_IF_ROLE_AP(dhdp, ifidx)) { + int ret = dhd_l2_filter_pkt_handle(dhdp, ifidx, pktbuf, FALSE); + + /* Drop the packets if l2 filter has processed it already + * otherwise continue with the normal path + */ + if (ret == BCME_OK) { + PKTCFREE(dhdp->osh, pktbuf, TRUE); + continue; + } + } + if (ifp->block_tdls) { + if (bcm_l2_filter_block_tdls(dhdp->osh, pktbuf) == BCME_OK) { + PKTCFREE(dhdp->osh, pktbuf, FALSE); + continue; + } + } +#endif /* DHD_L2_FILTER */ + +#ifdef DHD_MCAST_REGEN + DHD_FLOWID_LOCK(dhdp->flowid_lock, flags); + if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup; + ASSERT(if_flow_lkup); + + interface_role = if_flow_lkup[ifidx].role; + DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags); + + if (ifp->mcast_regen_bss_enable && (interface_role != WLC_E_IF_ROLE_WDS) && + !DHD_IF_ROLE_AP(dhdp, ifidx) && + ETHER_ISUCAST(eh->ether_dhost)) { + if (dhd_mcast_reverse_translation(eh) == BCME_OK) { +#ifdef DHD_PSTA + /* Change bsscfg to primary bsscfg for unicast-multicast packets */ + if ((dhd_get_psta_mode(dhdp) == DHD_MODE_PSTA) || + (dhd_get_psta_mode(dhdp) == DHD_MODE_PSR)) { + if (ifidx != 0) { + /* Let the primary in PSTA interface handle this + * frame after unicast to Multicast conversion + */ + ifp = dhd_get_ifp(dhdp, 0); + ASSERT(ifp); + } + } + } +#endif /* PSTA */ + } +#endif /* MCAST_REGEN */ + +#ifdef DHDTCPACK_SUPPRESS + dhd_tcpdata_info_get(dhdp, pktbuf); +#endif // endif + skb = PKTTONATIVE(dhdp->osh, pktbuf); + + ASSERT(ifp); + skb->dev = ifp->net; +#ifdef DHD_WET + /* wet related packet proto manipulation should be done in DHD + * since dongle doesn't have complete payload + */ + if (WET_ENABLED(&dhd->pub) && (dhd_wet_recv_proc(dhd->pub.wet_info, + pktbuf) < 0)) { + DHD_INFO(("%s:%s: wet recv proc failed\n", + __FUNCTION__, dhd_ifname(dhdp, ifidx))); + } +#endif /* DHD_WET */ + +#ifdef DHD_PSTA + if (PSR_ENABLED(dhdp) && + (dhd_psta_proc(dhdp, ifidx, &pktbuf, FALSE) < 0)) { + DHD_ERROR(("%s:%s: psta recv proc failed\n", __FUNCTION__, + dhd_ifname(dhdp, ifidx))); + } +#endif /* DHD_PSTA */ + +#ifdef PCIE_FULL_DONGLE + if ((DHD_IF_ROLE_AP(dhdp, ifidx) || DHD_IF_ROLE_P2PGO(dhdp, ifidx)) && + (!ifp->ap_isolate)) { + eh = (struct ether_header *)PKTDATA(dhdp->osh, pktbuf); + if (ETHER_ISUCAST(eh->ether_dhost)) { + if (dhd_find_sta(dhdp, ifidx, (void *)eh->ether_dhost)) { + dhd_sendpkt(dhdp, ifidx, pktbuf); + continue; + } + } else { + void *npktbuf = NULL; + if ((ntoh16(eh->ether_type) != ETHER_TYPE_IAPP_L2_UPDATE) && + (npktbuf = PKTDUP(dhdp->osh, pktbuf)) != NULL) { + dhd_sendpkt(dhdp, ifidx, npktbuf); + } + } + } +#endif /* PCIE_FULL_DONGLE */ + + /* Get the protocol, maintain skb around eth_type_trans() + * The main reason for this hack is for the limitation of + * Linux 2.4 where 'eth_type_trans' uses the 'net->hard_header_len' + * to perform skb_pull inside vs ETH_HLEN. Since to avoid + * coping of the packet coming from the network stack to add + * BDC, Hardware header etc, during network interface registration + * we set the 'net->hard_header_len' to ETH_HLEN + extra space required + * for BDC, Hardware header etc. and not just the ETH_HLEN + */ + eth = skb->data; + len = skb->len; + + dump_data = skb->data; + + protocol = (skb->data[12] << 8) | skb->data[13]; + if (protocol == ETHER_TYPE_802_1X) { + DBG_EVENT_LOG(dhdp, WIFI_EVENT_DRIVER_EAPOL_FRAME_RECEIVED); +#if defined(WL_CFG80211) && defined(WL_WPS_SYNC) + wl_handle_wps_states(ifp->net, dump_data, len, FALSE); +#endif /* WL_CFG80211 && WL_WPS_SYNC */ + dhd_dump_eapol_4way_message(dhdp, dhd_ifname(dhdp, ifidx), dump_data, FALSE); + } + + if (protocol != ETHER_TYPE_BRCM && protocol == ETHER_TYPE_IP) { +#ifdef DHD_DHCP_DUMP + dhd_dhcp_dump(dhd_ifname(dhdp, ifidx), dump_data, FALSE); +#endif /* DHD_DHCP_DUMP */ +#ifdef DHD_ICMP_DUMP + dhd_icmp_dump(dhd_ifname(dhdp, ifidx), dump_data, FALSE); +#endif /* DHD_ICMP_DUMP */ + } +#ifdef DHD_RX_DUMP + dhd_trx_dump(dhd_idx2net(dhdp, ifidx), dump_data, skb->len, FALSE); +#endif /* DHD_RX_DUMP */ + + skb->protocol = eth_type_trans(skb, skb->dev); + + if (skb->pkt_type == PACKET_MULTICAST) { + dhd->pub.rx_multicast++; + ifp->stats.multicast++; + } + + skb->data = eth; + skb->len = len; + + DHD_DBG_PKT_MON_RX(dhdp, skb); + /* Strip header, count, deliver upward */ + skb_pull(skb, ETH_HLEN); + + /* Process special event packets and then discard them */ + memset(&event, 0, sizeof(event)); + + if (ntoh16(skb->protocol) == ETHER_TYPE_BRCM) { + bcm_event_msg_u_t evu; + int ret_event; + int event_type; + + ret_event = wl_host_event_get_data( +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) + skb_mac_header(skb), +#else + skb->mac.raw, +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) */ + len, &evu); + + if (ret_event != BCME_OK) { + DHD_ERROR(("%s: wl_host_event_get_data err = %d\n", + __FUNCTION__, ret_event)); +#ifdef DHD_USE_STATIC_CTRLBUF + PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE); +#else + PKTFREE(dhdp->osh, pktbuf, FALSE); +#endif // endif + continue; + } + + memcpy(&event, &evu.event, sizeof(wl_event_msg_t)); + event_type = ntoh32_ua((void *)&event.event_type); +#ifdef SHOW_LOGTRACE + /* Event msg printing is called from dhd_rx_frame which is in Tasklet + * context in case of PCIe FD, in case of other bus this will be from + * DPC context. If we get bunch of events from Dongle then printing all + * of them from Tasklet/DPC context that too in data path is costly. + * Also in the new Dongle SW(4359, 4355 onwards) console prints too come as + * events with type WLC_E_TRACE. + * We'll print this console logs from the WorkQueue context by enqueing SKB + * here and Dequeuing will be done in WorkQueue and will be freed only if + * logtrace_pkt_sendup is true + */ + if (event_type == WLC_E_TRACE) { + DHD_TRACE(("%s: WLC_E_TRACE\n", __FUNCTION__)); + dhd_event_logtrace_enqueue(dhdp, ifidx, pktbuf); + continue; + } +#endif /* SHOW_LOGTRACE */ + + ret_event = dhd_wl_host_event(dhd, ifidx, +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) + skb_mac_header(skb), +#else + skb->mac.raw, +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) */ + len, &event, &data); + + wl_event_to_host_order(&event); + if (!tout_ctrl) + tout_ctrl = DHD_PACKET_TIMEOUT_MS; + +#if defined(PNO_SUPPORT) + if (event_type == WLC_E_PFN_NET_FOUND) { + /* enforce custom wake lock to garantee that Kernel not suspended */ + tout_ctrl = CUSTOM_PNO_EVENT_LOCK_xTIME * DHD_PACKET_TIMEOUT_MS; + } +#endif /* PNO_SUPPORT */ + if (numpkt != 1) { + DHD_TRACE(("%s: Got BRCM event packet in a chained packet.\n", + __FUNCTION__)); + } + +#ifdef DHD_WAKE_STATUS + if (unlikely(pkt_wake)) { +#ifdef DHD_WAKE_EVENT_STATUS + if (event.event_type < WLC_E_LAST) { + wcp->rc_event[event.event_type]++; + wcp->rcwake++; + pkt_wake = 0; + } +#endif /* DHD_WAKE_EVENT_STATUS */ + } +#endif /* DHD_WAKE_STATUS */ + + /* For delete virtual interface event, wl_host_event returns positive + * i/f index, do not proceed. just free the pkt. + */ + if ((event_type == WLC_E_IF) && (ret_event > 0)) { + DHD_ERROR(("%s: interface is deleted. Free event packet\n", + __FUNCTION__)); +#ifdef DHD_USE_STATIC_CTRLBUF + PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE); +#else + PKTFREE(dhdp->osh, pktbuf, FALSE); +#endif // endif + continue; + } + + /* + * For the event packets, there is a possibility + * of ifidx getting modifed.Thus update the ifp + * once again. + */ + ASSERT(ifidx < DHD_MAX_IFS && dhd->iflist[ifidx]); + ifp = dhd->iflist[ifidx]; +#ifndef PROP_TXSTATUS_VSDB + if (!(ifp && ifp->net && (ifp->net->reg_state == NETREG_REGISTERED))) +#else + if (!(ifp && ifp->net && (ifp->net->reg_state == NETREG_REGISTERED) && + dhd->pub.up)) +#endif /* PROP_TXSTATUS_VSDB */ + { + DHD_ERROR(("%s: net device is NOT registered. drop event packet\n", + __FUNCTION__)); +#ifdef DHD_USE_STATIC_CTRLBUF + PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE); +#else + PKTFREE(dhdp->osh, pktbuf, FALSE); +#endif // endif + continue; + } + + if (dhdp->wl_event_enabled) { +#ifdef DHD_USE_STATIC_CTRLBUF + /* If event bufs are allocated via static buf pool + * and wl events are enabled, make a copy, free the + * local one and send the copy up. + */ + void *npkt = PKTDUP(dhdp->osh, skb); + /* Clone event and send it up */ + PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE); + if (npkt) { + skb = npkt; + } else { + DHD_ERROR(("skb clone failed. dropping event.\n")); + continue; + } +#endif /* DHD_USE_STATIC_CTRLBUF */ + } else { + /* If event enabled not explictly set, drop events */ +#ifdef DHD_USE_STATIC_CTRLBUF + PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE); +#else + PKTFREE(dhdp->osh, pktbuf, FALSE); +#endif /* DHD_USE_STATIC_CTRLBUF */ + continue; + } + } else { + tout_rx = DHD_PACKET_TIMEOUT_MS; + +#ifdef PROP_TXSTATUS + dhd_wlfc_save_rxpath_ac_time(dhdp, (uint8)PKTPRIO(skb)); +#endif /* PROP_TXSTATUS */ + +#ifdef DHD_WAKE_STATUS + if (unlikely(pkt_wake)) { + wcp->rxwake++; +#ifdef DHD_WAKE_RX_STATUS +#define ETHER_ICMP6_HEADER 20 +#define ETHER_IPV6_SADDR (ETHER_ICMP6_HEADER + 2) +#define ETHER_IPV6_DAADR (ETHER_IPV6_SADDR + IPV6_ADDR_LEN) +#define ETHER_ICMPV6_TYPE (ETHER_IPV6_DAADR + IPV6_ADDR_LEN) + + if (ntoh16(skb->protocol) == ETHER_TYPE_ARP) /* ARP */ + wcp->rx_arp++; + if (dump_data[0] == 0xFF) { /* Broadcast */ + wcp->rx_bcast++; + } else if (dump_data[0] & 0x01) { /* Multicast */ + wcp->rx_mcast++; + if (ntoh16(skb->protocol) == ETHER_TYPE_IPV6) { + wcp->rx_multi_ipv6++; + if ((skb->len > ETHER_ICMP6_HEADER) && + (dump_data[ETHER_ICMP6_HEADER] == IPPROTO_ICMPV6)) { + wcp->rx_icmpv6++; + if (skb->len > ETHER_ICMPV6_TYPE) { + switch (dump_data[ETHER_ICMPV6_TYPE]) { + case NDISC_ROUTER_ADVERTISEMENT: + wcp->rx_icmpv6_ra++; + break; + case NDISC_NEIGHBOUR_ADVERTISEMENT: + wcp->rx_icmpv6_na++; + break; + case NDISC_NEIGHBOUR_SOLICITATION: + wcp->rx_icmpv6_ns++; + break; + } + } + } + } else if (dump_data[2] == 0x5E) { + wcp->rx_multi_ipv4++; + } else { + wcp->rx_multi_other++; + } + } else { /* Unicast */ + wcp->rx_ucast++; + } +#undef ETHER_ICMP6_HEADER +#undef ETHER_IPV6_SADDR +#undef ETHER_IPV6_DAADR +#undef ETHER_ICMPV6_TYPE +#endif /* DHD_WAKE_RX_STATUS */ + pkt_wake = 0; + } +#endif /* DHD_WAKE_STATUS */ + } + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0) + ifp->net->last_rx = jiffies; +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0) */ + + if (ntoh16(skb->protocol) != ETHER_TYPE_BRCM) { + dhdp->dstats.rx_bytes += skb->len; + dhdp->rx_packets++; /* Local count */ + ifp->stats.rx_bytes += skb->len; + ifp->stats.rx_packets++; + } + + if (in_interrupt()) { + bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, + __FUNCTION__, __LINE__); + DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT)); +#if defined(DHD_LB_RXP) + netif_receive_skb(skb); +#else /* !defined(DHD_LB_RXP) */ + netif_rx(skb); +#endif /* !defined(DHD_LB_RXP) */ + DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT)); + } else { + if (dhd->rxthread_enabled) { + if (!skbhead) + skbhead = skb; + else + PKTSETNEXT(dhdp->osh, skbprev, skb); + skbprev = skb; + } else { + + /* If the receive is not processed inside an ISR, + * the softirqd must be woken explicitly to service + * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled + * by netif_rx_ni(), but in earlier kernels, we need + * to do it manually. + */ + bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, + __FUNCTION__, __LINE__); + +#if defined(ARGOS_NOTIFY_CB) + argos_register_notifier_deinit(); +#endif // endif +#if defined(BCMPCIE) && defined(DHDTCPACK_SUPPRESS) + dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF); +#endif /* BCMPCIE && DHDTCPACK_SUPPRESS */ +#if defined(DHD_LB_RXP) + DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT)); + netif_receive_skb(skb); + DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT)); +#else /* !defined(DHD_LB_RXP) */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) + DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT)); + netif_rx_ni(skb); + DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT)); +#else + ulong flags; + DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT)); + netif_rx(skb); + DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT)); + local_irq_save(flags); + RAISE_RX_SOFTIRQ(); + local_irq_restore(flags); +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */ +#endif /* !defined(DHD_LB_RXP) */ + } + } + } + + if (dhd->rxthread_enabled && skbhead) + dhd_sched_rxf(dhdp, skbhead); + + DHD_OS_WAKE_LOCK_RX_TIMEOUT_ENABLE(dhdp, tout_rx); + DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(dhdp, tout_ctrl); +} + +void +dhd_event(struct dhd_info *dhd, char *evpkt, int evlen, int ifidx) +{ + /* Linux version has nothing to do */ + return; +} + +void +dhd_txcomplete(dhd_pub_t *dhdp, void *txp, bool success) +{ + dhd_info_t *dhd = (dhd_info_t *)(dhdp->info); + struct ether_header *eh; + uint16 type; + + dhd_prot_hdrpull(dhdp, NULL, txp, NULL, NULL); + + eh = (struct ether_header *)PKTDATA(dhdp->osh, txp); + type = ntoh16(eh->ether_type); + + if (type == ETHER_TYPE_802_1X) { + atomic_dec(&dhd->pend_8021x_cnt); + } + +#ifdef PROP_TXSTATUS + if (dhdp->wlfc_state && (dhdp->proptxstatus_mode != WLFC_FCMODE_NONE)) { + dhd_if_t *ifp = dhd->iflist[DHD_PKTTAG_IF(PKTTAG(txp))]; + uint datalen = PKTLEN(dhd->pub.osh, txp); + if (ifp != NULL) { + if (success) { + dhd->pub.tx_packets++; + ifp->stats.tx_packets++; + ifp->stats.tx_bytes += datalen; + } else { + ifp->stats.tx_dropped++; + } + } + } +#endif // endif +} + +static struct net_device_stats * +dhd_get_stats(struct net_device *net) +{ + dhd_info_t *dhd = DHD_DEV_INFO(net); + dhd_if_t *ifp; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (!dhd) { + DHD_ERROR(("%s : dhd is NULL\n", __FUNCTION__)); + goto error; + } + + ifp = dhd_get_ifp_by_ndev(&dhd->pub, net); + if (!ifp) { + /* return empty stats */ + DHD_ERROR(("%s: BAD_IF\n", __FUNCTION__)); + goto error; + } + + if (dhd->pub.up) { + /* Use the protocol to get dongle stats */ + dhd_prot_dstats(&dhd->pub); + } + return &ifp->stats; + +error: + memset(&net->stats, 0, sizeof(net->stats)); + return &net->stats; +} + +#ifndef BCMDBUS +static int +dhd_watchdog_thread(void *data) +{ + tsk_ctl_t *tsk = (tsk_ctl_t *)data; + dhd_info_t *dhd = (dhd_info_t *)tsk->parent; + /* This thread doesn't need any user-level access, + * so get rid of all our resources + */ + if (dhd_watchdog_prio > 0) { + struct sched_param param; + param.sched_priority = (dhd_watchdog_prio < MAX_RT_PRIO)? + dhd_watchdog_prio:(MAX_RT_PRIO-1); + setScheduler(current, SCHED_FIFO, ¶m); + } + + while (1) { + if (down_interruptible (&tsk->sema) == 0) { + unsigned long flags; + unsigned long jiffies_at_start = jiffies; + unsigned long time_lapse; +#ifdef BCMPCIE + DHD_OS_WD_WAKE_LOCK(&dhd->pub); +#endif /* BCMPCIE */ + + SMP_RD_BARRIER_DEPENDS(); + if (tsk->terminated) { +#ifdef BCMPCIE + DHD_OS_WD_WAKE_UNLOCK(&dhd->pub); +#endif /* BCMPCIE */ + break; + } + + if (dhd->pub.dongle_reset == FALSE) { + DHD_TIMER(("%s:\n", __FUNCTION__)); + dhd_bus_watchdog(&dhd->pub); + + DHD_GENERAL_LOCK(&dhd->pub, flags); + /* Count the tick for reference */ + dhd->pub.tickcnt++; +#ifdef DHD_L2_FILTER + dhd_l2_filter_watchdog(&dhd->pub); +#endif /* DHD_L2_FILTER */ + time_lapse = jiffies - jiffies_at_start; + + /* Reschedule the watchdog */ + if (dhd->wd_timer_valid) { + mod_timer(&dhd->timer, + jiffies + + msecs_to_jiffies(dhd_watchdog_ms) - + min(msecs_to_jiffies(dhd_watchdog_ms), time_lapse)); + } + DHD_GENERAL_UNLOCK(&dhd->pub, flags); + } +#ifdef BCMPCIE + DHD_OS_WD_WAKE_UNLOCK(&dhd->pub); +#endif /* BCMPCIE */ + } else { + break; + } + } + + complete_and_exit(&tsk->completed, 0); +} + +static void dhd_watchdog( +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0) + struct timer_list *t +#else + ulong data +#endif +) +{ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0) + dhd_info_t *dhd = from_timer(dhd, t, timer); +#else + dhd_info_t *dhd = (dhd_info_t *)data; +#endif + unsigned long flags; + + if (dhd->pub.dongle_reset) { + return; + } + + if (dhd->thr_wdt_ctl.thr_pid >= 0) { + up(&dhd->thr_wdt_ctl.sema); + return; + } + +#ifdef BCMPCIE + DHD_OS_WD_WAKE_LOCK(&dhd->pub); +#endif /* BCMPCIE */ + /* Call the bus module watchdog */ + dhd_bus_watchdog(&dhd->pub); + + DHD_GENERAL_LOCK(&dhd->pub, flags); + /* Count the tick for reference */ + dhd->pub.tickcnt++; + +#ifdef DHD_L2_FILTER + dhd_l2_filter_watchdog(&dhd->pub); +#endif /* DHD_L2_FILTER */ + /* Reschedule the watchdog */ + if (dhd->wd_timer_valid) + mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms)); + DHD_GENERAL_UNLOCK(&dhd->pub, flags); +#ifdef BCMPCIE + DHD_OS_WD_WAKE_UNLOCK(&dhd->pub); +#endif /* BCMPCIE */ +} + +#ifdef ENABLE_ADAPTIVE_SCHED +static void +dhd_sched_policy(int prio) +{ + struct sched_param param; + if (cpufreq_quick_get(0) <= CUSTOM_CPUFREQ_THRESH) { + param.sched_priority = 0; + setScheduler(current, SCHED_NORMAL, ¶m); + } else { + if (get_scheduler_policy(current) != SCHED_FIFO) { + param.sched_priority = (prio < MAX_RT_PRIO)? prio : (MAX_RT_PRIO-1); + setScheduler(current, SCHED_FIFO, ¶m); + } + } +} +#endif /* ENABLE_ADAPTIVE_SCHED */ +#ifdef DEBUG_CPU_FREQ +static int dhd_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data) +{ + dhd_info_t *dhd = container_of(nb, struct dhd_info, freq_trans); + struct cpufreq_freqs *freq = data; + if (dhd) { + if (!dhd->new_freq) + goto exit; + if (val == CPUFREQ_POSTCHANGE) { + DHD_ERROR(("cpu freq is changed to %u kHZ on CPU %d\n", + freq->new, freq->cpu)); + *per_cpu_ptr(dhd->new_freq, freq->cpu) = freq->new; + } + } +exit: + return 0; +} +#endif /* DEBUG_CPU_FREQ */ + +static int +dhd_dpc_thread(void *data) +{ + tsk_ctl_t *tsk = (tsk_ctl_t *)data; + dhd_info_t *dhd = (dhd_info_t *)tsk->parent; + + /* This thread doesn't need any user-level access, + * so get rid of all our resources + */ + if (dhd_dpc_prio > 0) + { + struct sched_param param; + param.sched_priority = (dhd_dpc_prio < MAX_RT_PRIO)?dhd_dpc_prio:(MAX_RT_PRIO-1); + setScheduler(current, SCHED_FIFO, ¶m); + } + +#ifdef CUSTOM_DPC_CPUCORE + set_cpus_allowed_ptr(current, cpumask_of(CUSTOM_DPC_CPUCORE)); +#endif // endif +#ifdef CUSTOM_SET_CPUCORE + dhd->pub.current_dpc = current; +#endif /* CUSTOM_SET_CPUCORE */ + /* Run until signal received */ + while (1) { + if (dhd->pub.conf->dpc_cpucore >= 0) { + printf("%s: set dpc_cpucore %d\n", __FUNCTION__, dhd->pub.conf->dpc_cpucore); + set_cpus_allowed_ptr(current, cpumask_of(dhd->pub.conf->dpc_cpucore)); + dhd->pub.conf->dpc_cpucore = -1; + } + if (!binary_sema_down(tsk)) { +#ifdef ENABLE_ADAPTIVE_SCHED + dhd_sched_policy(dhd_dpc_prio); +#endif /* ENABLE_ADAPTIVE_SCHED */ + SMP_RD_BARRIER_DEPENDS(); + if (tsk->terminated) { + break; + } + + /* Call bus dpc unless it indicated down (then clean stop) */ + if (dhd->pub.busstate != DHD_BUS_DOWN) { +#ifdef DEBUG_DPC_THREAD_WATCHDOG + int resched_cnt = 0; +#endif /* DEBUG_DPC_THREAD_WATCHDOG */ + dhd_os_wd_timer_extend(&dhd->pub, TRUE); + while (dhd_bus_dpc(dhd->pub.bus)) { + /* process all data */ +#ifdef DEBUG_DPC_THREAD_WATCHDOG + resched_cnt++; + if (resched_cnt > MAX_RESCHED_CNT) { + DHD_INFO(("%s Calling msleep to" + "let other processes run. \n", + __FUNCTION__)); + dhd->pub.dhd_bug_on = true; + resched_cnt = 0; + OSL_SLEEP(1); + } +#endif /* DEBUG_DPC_THREAD_WATCHDOG */ + } + dhd_os_wd_timer_extend(&dhd->pub, FALSE); + DHD_OS_WAKE_UNLOCK(&dhd->pub); + } else { + if (dhd->pub.up) + dhd_bus_stop(dhd->pub.bus, TRUE); + DHD_OS_WAKE_UNLOCK(&dhd->pub); + } + } else { + break; + } + } + complete_and_exit(&tsk->completed, 0); +} + +static int +dhd_rxf_thread(void *data) +{ + tsk_ctl_t *tsk = (tsk_ctl_t *)data; + dhd_info_t *dhd = (dhd_info_t *)tsk->parent; +#if defined(WAIT_DEQUEUE) +#define RXF_WATCHDOG_TIME 250 /* BARK_TIME(1000) / */ + ulong watchdogTime = OSL_SYSUPTIME(); /* msec */ +#endif // endif + dhd_pub_t *pub = &dhd->pub; + + /* This thread doesn't need any user-level access, + * so get rid of all our resources + */ + if (dhd_rxf_prio > 0) + { + struct sched_param param; + param.sched_priority = (dhd_rxf_prio < MAX_RT_PRIO)?dhd_rxf_prio:(MAX_RT_PRIO-1); + setScheduler(current, SCHED_FIFO, ¶m); + } + +#ifdef CUSTOM_SET_CPUCORE + dhd->pub.current_rxf = current; +#endif /* CUSTOM_SET_CPUCORE */ + /* Run until signal received */ + while (1) { + if (dhd->pub.conf->rxf_cpucore >= 0) { + printf("%s: set rxf_cpucore %d\n", __FUNCTION__, dhd->pub.conf->rxf_cpucore); + set_cpus_allowed_ptr(current, cpumask_of(dhd->pub.conf->rxf_cpucore)); + dhd->pub.conf->rxf_cpucore = -1; + } + if (down_interruptible(&tsk->sema) == 0) { + void *skb; +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) + ulong flags; +#endif // endif +#ifdef ENABLE_ADAPTIVE_SCHED + dhd_sched_policy(dhd_rxf_prio); +#endif /* ENABLE_ADAPTIVE_SCHED */ + + SMP_RD_BARRIER_DEPENDS(); + + if (tsk->terminated) { + break; + } + skb = dhd_rxf_dequeue(pub); + + if (skb == NULL) { + continue; + } + while (skb) { + void *skbnext = PKTNEXT(pub->osh, skb); + PKTSETNEXT(pub->osh, skb, NULL); + bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, + __FUNCTION__, __LINE__); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) + netif_rx_ni(skb); +#else + netif_rx(skb); + local_irq_save(flags); + RAISE_RX_SOFTIRQ(); + local_irq_restore(flags); + +#endif // endif + skb = skbnext; + } +#if defined(WAIT_DEQUEUE) + if (OSL_SYSUPTIME() - watchdogTime > RXF_WATCHDOG_TIME) { + OSL_SLEEP(1); + watchdogTime = OSL_SYSUPTIME(); + } +#endif // endif + + DHD_OS_WAKE_UNLOCK(pub); + } else { + break; + } + } + complete_and_exit(&tsk->completed, 0); +} + +#ifdef BCMPCIE +void dhd_dpc_enable(dhd_pub_t *dhdp) +{ +#if defined(DHD_LB_RXP) || defined(DHD_LB_TXP) + dhd_info_t *dhd; + + if (!dhdp || !dhdp->info) + return; + dhd = dhdp->info; +#endif /* DHD_LB_RXP || DHD_LB_TXP */ + +#ifdef DHD_LB_RXP + __skb_queue_head_init(&dhd->rx_pend_queue); +#endif /* DHD_LB_RXP */ + +#ifdef DHD_LB_TXP + skb_queue_head_init(&dhd->tx_pend_queue); +#endif /* DHD_LB_TXP */ +} +#endif /* BCMPCIE */ + +#ifdef BCMPCIE +void +dhd_dpc_kill(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd; + + if (!dhdp) { + return; + } + + dhd = dhdp->info; + + if (!dhd) { + return; + } + + if (dhd->thr_dpc_ctl.thr_pid < 0) { + tasklet_kill(&dhd->tasklet); + DHD_ERROR(("%s: tasklet disabled\n", __FUNCTION__)); + } + +#ifdef DHD_LB +#ifdef DHD_LB_RXP + cancel_work_sync(&dhd->rx_napi_dispatcher_work); + __skb_queue_purge(&dhd->rx_pend_queue); +#endif /* DHD_LB_RXP */ +#ifdef DHD_LB_TXP + cancel_work_sync(&dhd->tx_dispatcher_work); + skb_queue_purge(&dhd->tx_pend_queue); +#endif /* DHD_LB_TXP */ + + /* Kill the Load Balancing Tasklets */ +#if defined(DHD_LB_TXC) + tasklet_kill(&dhd->tx_compl_tasklet); +#endif /* DHD_LB_TXC */ +#if defined(DHD_LB_RXC) + tasklet_kill(&dhd->rx_compl_tasklet); +#endif /* DHD_LB_RXC */ +#if defined(DHD_LB_TXP) + tasklet_kill(&dhd->tx_tasklet); +#endif /* DHD_LB_TXP */ +#endif /* DHD_LB */ +} + +void +dhd_dpc_tasklet_kill(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd; + + if (!dhdp) { + return; + } + + dhd = dhdp->info; + + if (!dhd) { + return; + } + + if (dhd->thr_dpc_ctl.thr_pid < 0) { + tasklet_kill(&dhd->tasklet); + } +} +#endif /* BCMPCIE */ + +static void +dhd_dpc(ulong data) +{ + dhd_info_t *dhd; + + dhd = (dhd_info_t *)data; + + /* this (tasklet) can be scheduled in dhd_sched_dpc[dhd_linux.c] + * down below , wake lock is set, + * the tasklet is initialized in dhd_attach() + */ + /* Call bus dpc unless it indicated down (then clean stop) */ + if (dhd->pub.busstate != DHD_BUS_DOWN) { +#if defined(DHD_LB_STATS) && defined(PCIE_FULL_DONGLE) + DHD_LB_STATS_INCR(dhd->dhd_dpc_cnt); +#endif /* DHD_LB_STATS && PCIE_FULL_DONGLE */ + if (dhd_bus_dpc(dhd->pub.bus)) { + tasklet_schedule(&dhd->tasklet); + } + } else { + dhd_bus_stop(dhd->pub.bus, TRUE); + } +} + +void +dhd_sched_dpc(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd = (dhd_info_t *)dhdp->info; + + if (dhd->thr_dpc_ctl.thr_pid >= 0) { + DHD_OS_WAKE_LOCK(dhdp); + /* If the semaphore does not get up, + * wake unlock should be done here + */ + if (!binary_sema_up(&dhd->thr_dpc_ctl)) { + DHD_OS_WAKE_UNLOCK(dhdp); + } + return; + } else { + tasklet_schedule(&dhd->tasklet); + } +} +#endif /* BCMDBUS */ + +static void +dhd_sched_rxf(dhd_pub_t *dhdp, void *skb) +{ + dhd_info_t *dhd = (dhd_info_t *)dhdp->info; + + DHD_OS_WAKE_LOCK(dhdp); + + DHD_TRACE(("dhd_sched_rxf: Enter\n")); + do { + if (dhd_rxf_enqueue(dhdp, skb) == BCME_OK) + break; + } while (1); + if (dhd->thr_rxf_ctl.thr_pid >= 0) { + up(&dhd->thr_rxf_ctl.sema); + } + return; +} + +#if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) +#endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */ + +#ifdef TOE +/* Retrieve current toe component enables, which are kept as a bitmap in toe_ol iovar */ +static int +dhd_toe_get(dhd_info_t *dhd, int ifidx, uint32 *toe_ol) +{ + char buf[32]; + int ret; + + ret = dhd_iovar(&dhd->pub, ifidx, "toe_ol", NULL, 0, (char *)&buf, sizeof(buf), FALSE); + + if (ret < 0) { + if (ret == -EIO) { + DHD_ERROR(("%s: toe not supported by device\n", dhd_ifname(&dhd->pub, + ifidx))); + return -EOPNOTSUPP; + } + + DHD_INFO(("%s: could not get toe_ol: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret)); + return ret; + } + + memcpy(toe_ol, buf, sizeof(uint32)); + return 0; +} + +/* Set current toe component enables in toe_ol iovar, and set toe global enable iovar */ +static int +dhd_toe_set(dhd_info_t *dhd, int ifidx, uint32 toe_ol) +{ + int toe, ret; + + /* Set toe_ol as requested */ + ret = dhd_iovar(&dhd->pub, ifidx, "toe_ol", (char *)&toe_ol, sizeof(toe_ol), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s: could not set toe_ol: ret=%d\n", + dhd_ifname(&dhd->pub, ifidx), ret)); + return ret; + } + + /* Enable toe globally only if any components are enabled. */ + toe = (toe_ol != 0); + ret = dhd_iovar(&dhd->pub, ifidx, "toe", (char *)&toe, sizeof(toe), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s: could not set toe: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret)); + return ret; + } + + return 0; +} +#endif /* TOE */ + +#if defined(WL_CFG80211) && defined(NUM_SCB_MAX_PROBE) +void dhd_set_scb_probe(dhd_pub_t *dhd) +{ + wl_scb_probe_t scb_probe; + char iovbuf[WL_EVENTING_MASK_LEN + sizeof(wl_scb_probe_t)]; + int ret; + + if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) { + return; + } + + ret = dhd_iovar(dhd, 0, "scb_probe", NULL, 0, iovbuf, sizeof(iovbuf), FALSE); + if (ret < 0) { + DHD_ERROR(("%s: GET max_scb_probe failed\n", __FUNCTION__)); + } + + memcpy(&scb_probe, iovbuf, sizeof(wl_scb_probe_t)); + + scb_probe.scb_max_probe = NUM_SCB_MAX_PROBE; + + ret = dhd_iovar(dhd, 0, "scb_probe", (char *)&scb_probe, sizeof(wl_scb_probe_t), NULL, 0, + TRUE); + if (ret < 0) { + DHD_ERROR(("%s: max_scb_probe setting failed\n", __FUNCTION__)); + return; + } +} +#endif /* WL_CFG80211 && NUM_SCB_MAX_PROBE */ + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) +static void +dhd_ethtool_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info) +{ + dhd_info_t *dhd = DHD_DEV_INFO(net); + + snprintf(info->driver, sizeof(info->driver), "wl"); + snprintf(info->version, sizeof(info->version), "%lu", dhd->pub.drv_version); +} + +struct ethtool_ops dhd_ethtool_ops = { + .get_drvinfo = dhd_ethtool_get_drvinfo +}; +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */ + +#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) +static int +dhd_ethtool(dhd_info_t *dhd, void *uaddr) +{ + struct ethtool_drvinfo info; + char drvname[sizeof(info.driver)]; + uint32 cmd; +#ifdef TOE + struct ethtool_value edata; + uint32 toe_cmpnt, csum_dir; + int ret; +#endif // endif + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + /* all ethtool calls start with a cmd word */ + if (copy_from_user(&cmd, uaddr, sizeof (uint32))) + return -EFAULT; + + switch (cmd) { + case ETHTOOL_GDRVINFO: + /* Copy out any request driver name */ + if (copy_from_user(&info, uaddr, sizeof(info))) + return -EFAULT; + strncpy(drvname, info.driver, sizeof(info.driver)); + drvname[sizeof(info.driver)-1] = '\0'; + + /* clear struct for return */ + memset(&info, 0, sizeof(info)); + info.cmd = cmd; + + /* if dhd requested, identify ourselves */ + if (strcmp(drvname, "?dhd") == 0) { + snprintf(info.driver, sizeof(info.driver), "dhd"); + strncpy(info.version, EPI_VERSION_STR, sizeof(info.version) - 1); + info.version[sizeof(info.version) - 1] = '\0'; + } + + /* otherwise, require dongle to be up */ + else if (!dhd->pub.up) { + DHD_ERROR(("%s: dongle is not up\n", __FUNCTION__)); + return -ENODEV; + } + + /* finally, report dongle driver type */ + else if (dhd->pub.iswl) + snprintf(info.driver, sizeof(info.driver), "wl"); + else + snprintf(info.driver, sizeof(info.driver), "xx"); + + snprintf(info.version, sizeof(info.version), "%lu", dhd->pub.drv_version); + if (copy_to_user(uaddr, &info, sizeof(info))) + return -EFAULT; + DHD_CTL(("%s: given %*s, returning %s\n", __FUNCTION__, + (int)sizeof(drvname), drvname, info.driver)); + break; + +#ifdef TOE + /* Get toe offload components from dongle */ + case ETHTOOL_GRXCSUM: + case ETHTOOL_GTXCSUM: + if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0) + return ret; + + csum_dir = (cmd == ETHTOOL_GTXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL; + + edata.cmd = cmd; + edata.data = (toe_cmpnt & csum_dir) ? 1 : 0; + + if (copy_to_user(uaddr, &edata, sizeof(edata))) + return -EFAULT; + break; + + /* Set toe offload components in dongle */ + case ETHTOOL_SRXCSUM: + case ETHTOOL_STXCSUM: + if (copy_from_user(&edata, uaddr, sizeof(edata))) + return -EFAULT; + + /* Read the current settings, update and write back */ + if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0) + return ret; + + csum_dir = (cmd == ETHTOOL_STXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL; + + if (edata.data != 0) + toe_cmpnt |= csum_dir; + else + toe_cmpnt &= ~csum_dir; + + if ((ret = dhd_toe_set(dhd, 0, toe_cmpnt)) < 0) + return ret; + + /* If setting TX checksum mode, tell Linux the new mode */ + if (cmd == ETHTOOL_STXCSUM) { + if (edata.data) + dhd->iflist[0]->net->features |= NETIF_F_IP_CSUM; + else + dhd->iflist[0]->net->features &= ~NETIF_F_IP_CSUM; + } + + break; +#endif /* TOE */ + + default: + return -EOPNOTSUPP; + } + + return 0; +} +#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */ + +static bool dhd_check_hang(struct net_device *net, dhd_pub_t *dhdp, int error) +{ + if (!dhdp) { + DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__)); + return FALSE; + } + + if (!dhdp->up) + return FALSE; + +#if !defined(BCMPCIE) && !defined(BCMDBUS) + if (dhdp->info->thr_dpc_ctl.thr_pid < 0) { + DHD_ERROR(("%s : skipped due to negative pid - unloading?\n", __FUNCTION__)); + return FALSE; + } +#endif /* !BCMPCIE && !BCMDBUS */ + + if ((error == -ETIMEDOUT) || (error == -EREMOTEIO) || + ((dhdp->busstate == DHD_BUS_DOWN) && (!dhdp->dongle_reset))) { +#ifdef BCMPCIE + DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d d3acke=%d e=%d s=%d\n", + __FUNCTION__, dhdp->rxcnt_timeout, dhdp->txcnt_timeout, + dhdp->d3ackcnt_timeout, error, dhdp->busstate)); +#else + DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d e=%d s=%d\n", __FUNCTION__, + dhdp->rxcnt_timeout, dhdp->txcnt_timeout, error, dhdp->busstate)); +#endif /* BCMPCIE */ + if (dhdp->hang_reason == 0) { + if (dhdp->dongle_trap_occured) { + dhdp->hang_reason = HANG_REASON_DONGLE_TRAP; +#ifdef BCMPCIE + } else if (dhdp->d3ackcnt_timeout) { + dhdp->hang_reason = HANG_REASON_D3_ACK_TIMEOUT; +#endif /* BCMPCIE */ + } else { + dhdp->hang_reason = HANG_REASON_IOCTL_RESP_TIMEOUT; + } + } + net_os_send_hang_message(net); + return TRUE; + } + return FALSE; +} + +#ifdef WL_MONITOR +bool +dhd_monitor_enabled(dhd_pub_t *dhd, int ifidx) +{ + return (dhd->info->monitor_type != 0); +} + +void +dhd_rx_mon_pkt(dhd_pub_t *dhdp, host_rxbuf_cmpl_t* msg, void *pkt, int ifidx) +{ + dhd_info_t *dhd = (dhd_info_t *)dhdp->info; + { + uint8 amsdu_flag = (msg->flags & BCMPCIE_PKT_FLAGS_MONITOR_MASK) >> + BCMPCIE_PKT_FLAGS_MONITOR_SHIFT; + switch (amsdu_flag) { + case BCMPCIE_PKT_FLAGS_MONITOR_NO_AMSDU: + default: + if (!dhd->monitor_skb) { + if ((dhd->monitor_skb = PKTTONATIVE(dhdp->osh, pkt)) + == NULL) + return; + } + if (dhd->monitor_type && dhd->monitor_dev) + dhd->monitor_skb->dev = dhd->monitor_dev; + else { + PKTFREE(dhdp->osh, pkt, FALSE); + dhd->monitor_skb = NULL; + return; + } + dhd->monitor_skb->protocol = + eth_type_trans(dhd->monitor_skb, dhd->monitor_skb->dev); + dhd->monitor_len = 0; + break; + + case BCMPCIE_PKT_FLAGS_MONITOR_FIRST_PKT: + if (!dhd->monitor_skb) { + if ((dhd->monitor_skb = dev_alloc_skb(MAX_MON_PKT_SIZE)) + == NULL) + return; + dhd->monitor_len = 0; + } + if (dhd->monitor_type && dhd->monitor_dev) + dhd->monitor_skb->dev = dhd->monitor_dev; + else { + PKTFREE(dhdp->osh, pkt, FALSE); + dev_kfree_skb(dhd->monitor_skb); + return; + } + memcpy(PKTDATA(dhdp->osh, dhd->monitor_skb), + PKTDATA(dhdp->osh, pkt), PKTLEN(dhdp->osh, pkt)); + dhd->monitor_len = PKTLEN(dhdp->osh, pkt); + PKTFREE(dhdp->osh, pkt, FALSE); + return; + + case BCMPCIE_PKT_FLAGS_MONITOR_INTER_PKT: + memcpy(PKTDATA(dhdp->osh, dhd->monitor_skb) + dhd->monitor_len, + PKTDATA(dhdp->osh, pkt), PKTLEN(dhdp->osh, pkt)); + dhd->monitor_len += PKTLEN(dhdp->osh, pkt); + PKTFREE(dhdp->osh, pkt, FALSE); + return; + + case BCMPCIE_PKT_FLAGS_MONITOR_LAST_PKT: + memcpy(PKTDATA(dhdp->osh, dhd->monitor_skb) + dhd->monitor_len, + PKTDATA(dhdp->osh, pkt), PKTLEN(dhdp->osh, pkt)); + dhd->monitor_len += PKTLEN(dhdp->osh, pkt); + PKTFREE(dhdp->osh, pkt, FALSE); + skb_put(dhd->monitor_skb, dhd->monitor_len); + dhd->monitor_skb->protocol = + eth_type_trans(dhd->monitor_skb, dhd->monitor_skb->dev); + dhd->monitor_len = 0; + break; + } + } + + if (in_interrupt()) { + bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, + __FUNCTION__, __LINE__); + DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT)); + netif_rx(dhd->monitor_skb); + DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT)); + } else { + /* If the receive is not processed inside an ISR, + * the softirqd must be woken explicitly to service + * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled + * by netif_rx_ni(), but in earlier kernels, we need + * to do it manually. + */ + bcm_object_trace_opr(dhd->monitor_skb, BCM_OBJDBG_REMOVE, + __FUNCTION__, __LINE__); + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) + DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT)); + netif_rx_ni(dhd->monitor_skb); + DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT)); +#else + ulong flags; + DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT)); + netif_rx(dhd->monitor_skb); + DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT)); + local_irq_save(flags); + RAISE_RX_SOFTIRQ(); + local_irq_restore(flags); +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */ + } + + dhd->monitor_skb = NULL; +} + +typedef struct dhd_mon_dev_priv { + struct net_device_stats stats; +} dhd_mon_dev_priv_t; + +#define DHD_MON_DEV_PRIV_SIZE (sizeof(dhd_mon_dev_priv_t)) +#define DHD_MON_DEV_PRIV(dev) ((dhd_mon_dev_priv_t *)DEV_PRIV(dev)) +#define DHD_MON_DEV_STATS(dev) (((dhd_mon_dev_priv_t *)DEV_PRIV(dev))->stats) + +static int +dhd_monitor_start(struct sk_buff *skb, struct net_device *dev) +{ + PKTFREE(NULL, skb, FALSE); + return 0; +} + +#if defined(BT_OVER_SDIO) + +void +dhdsdio_bus_usr_cnt_inc(dhd_pub_t *dhdp) +{ + dhdp->info->bus_user_count++; +} + +void +dhdsdio_bus_usr_cnt_dec(dhd_pub_t *dhdp) +{ + dhdp->info->bus_user_count--; +} + +/* Return values: + * Success: Returns 0 + * Failure: Returns -1 or errono code + */ +int +dhd_bus_get(wlan_bt_handle_t handle, bus_owner_t owner) +{ + dhd_pub_t *dhdp = (dhd_pub_t *)handle; + dhd_info_t *dhd = (dhd_info_t *)dhdp->info; + int ret = 0; + + mutex_lock(&dhd->bus_user_lock); + ++dhd->bus_user_count; + if (dhd->bus_user_count < 0) { + DHD_ERROR(("%s(): bus_user_count is negative, which is invalid\n", __FUNCTION__)); + ret = -1; + goto exit; + } + + if (dhd->bus_user_count == 1) { + + dhd->pub.hang_was_sent = 0; + + /* First user, turn on WL_REG, start the bus */ + DHD_ERROR(("%s(): First user Turn On WL_REG & start the bus", __FUNCTION__)); + + if (!wifi_platform_set_power(dhd->adapter, TRUE, WIFI_TURNON_DELAY)) { + /* Enable F1 */ + ret = dhd_bus_resume(dhdp, 0); + if (ret) { + DHD_ERROR(("%s(): Failed to enable F1, err=%d\n", + __FUNCTION__, ret)); + goto exit; + } + } + + dhd_update_fw_nv_path(dhd); + /* update firmware and nvram path to sdio bus */ + dhd_bus_update_fw_nv_path(dhd->pub.bus, + dhd->fw_path, dhd->nv_path); + /* download the firmware, Enable F2 */ + /* TODO: Should be done only in case of FW switch */ + ret = dhd_bus_devreset(dhdp, FALSE); + dhd_bus_resume(dhdp, 1); + if (!ret) { + if (dhd_sync_with_dongle(&dhd->pub) < 0) { + DHD_ERROR(("%s(): Sync with dongle failed!!\n", __FUNCTION__)); + ret = -EFAULT; + } + } else { + DHD_ERROR(("%s(): Failed to download, err=%d\n", __FUNCTION__, ret)); + } + } else { + DHD_ERROR(("%s(): BUS is already acquired, just increase the count %d \r\n", + __FUNCTION__, dhd->bus_user_count)); + } +exit: + mutex_unlock(&dhd->bus_user_lock); + return ret; +} +EXPORT_SYMBOL(dhd_bus_get); + +/* Return values: + * Success: Returns 0 + * Failure: Returns -1 or errono code + */ +int +dhd_bus_put(wlan_bt_handle_t handle, bus_owner_t owner) +{ + dhd_pub_t *dhdp = (dhd_pub_t *)handle; + dhd_info_t *dhd = (dhd_info_t *)dhdp->info; + int ret = 0; + BCM_REFERENCE(owner); + + mutex_lock(&dhd->bus_user_lock); + --dhd->bus_user_count; + if (dhd->bus_user_count < 0) { + DHD_ERROR(("%s(): bus_user_count is negative, which is invalid\n", __FUNCTION__)); + dhd->bus_user_count = 0; + ret = -1; + goto exit; + } + + if (dhd->bus_user_count == 0) { + /* Last user, stop the bus and turn Off WL_REG */ + DHD_ERROR(("%s(): There are no owners left Trunf Off WL_REG & stop the bus \r\n", + __FUNCTION__)); +#ifdef PROP_TXSTATUS + if (dhd->pub.wlfc_enabled) { + dhd_wlfc_deinit(&dhd->pub); + } +#endif /* PROP_TXSTATUS */ +#ifdef PNO_SUPPORT + if (dhd->pub.pno_state) { + dhd_pno_deinit(&dhd->pub); + } +#endif /* PNO_SUPPORT */ +#ifdef RTT_SUPPORT + if (dhd->pub.rtt_state) { + dhd_rtt_deinit(&dhd->pub); + } +#endif /* RTT_SUPPORT */ + ret = dhd_bus_devreset(dhdp, TRUE); + if (!ret) { + dhd_bus_suspend(dhdp); + wifi_platform_set_power(dhd->adapter, FALSE, WIFI_TURNOFF_DELAY); + } + } else { + DHD_ERROR(("%s(): Other owners using bus, decrease the count %d \r\n", + __FUNCTION__, dhd->bus_user_count)); + } +exit: + mutex_unlock(&dhd->bus_user_lock); + return ret; +} +EXPORT_SYMBOL(dhd_bus_put); + +int +dhd_net_bus_get(struct net_device *dev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + return dhd_bus_get(&dhd->pub, WLAN_MODULE); +} + +int +dhd_net_bus_put(struct net_device *dev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + return dhd_bus_put(&dhd->pub, WLAN_MODULE); +} + +/* + * Function to enable the Bus Clock + * Returns BCME_OK on success and BCME_xxx on failure + * + * This function is not callable from non-sleepable context + */ +int dhd_bus_clk_enable(wlan_bt_handle_t handle, bus_owner_t owner) +{ + dhd_pub_t *dhdp = (dhd_pub_t *)handle; + + int ret; + + dhd_os_sdlock(dhdp); + /* + * The second argument is TRUE, that means, we expect + * the function to "wait" until the clocks are really + * available + */ + ret = __dhdsdio_clk_enable(dhdp->bus, owner, TRUE); + dhd_os_sdunlock(dhdp); + + return ret; +} +EXPORT_SYMBOL(dhd_bus_clk_enable); + +/* + * Function to disable the Bus Clock + * Returns BCME_OK on success and BCME_xxx on failure + * + * This function is not callable from non-sleepable context + */ +int dhd_bus_clk_disable(wlan_bt_handle_t handle, bus_owner_t owner) +{ + dhd_pub_t *dhdp = (dhd_pub_t *)handle; + + int ret; + + dhd_os_sdlock(dhdp); + /* + * The second argument is TRUE, that means, we expect + * the function to "wait" until the clocks are really + * disabled + */ + ret = __dhdsdio_clk_disable(dhdp->bus, owner, TRUE); + dhd_os_sdunlock(dhdp); + + return ret; +} +EXPORT_SYMBOL(dhd_bus_clk_disable); + +/* + * Function to reset bt_use_count counter to zero. + * + * This function is not callable from non-sleepable context + */ +void dhd_bus_reset_bt_use_count(wlan_bt_handle_t handle) +{ + dhd_pub_t *dhdp = (dhd_pub_t *)handle; + + /* take the lock and reset bt use count */ + dhd_os_sdlock(dhdp); + dhdsdio_reset_bt_use_count(dhdp->bus); + dhd_os_sdunlock(dhdp); +} +EXPORT_SYMBOL(dhd_bus_reset_bt_use_count); + +void dhd_bus_retry_hang_recovery(wlan_bt_handle_t handle) +{ + dhd_pub_t *dhdp = (dhd_pub_t *)handle; + dhd_info_t *dhd = (dhd_info_t*)dhdp->info; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + dhdp->hang_was_sent = 0; + + dhd_os_send_hang_message(&dhd->pub); +#else + DHD_ERROR(("%s: unsupported\n", __FUNCTION__)); +#endif // endif +} +EXPORT_SYMBOL(dhd_bus_retry_hang_recovery); + +#endif /* BT_OVER_SDIO */ + +static int +dhd_monitor_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + return 0; +} + +static struct net_device_stats* +dhd_monitor_get_stats(struct net_device *dev) +{ + return &DHD_MON_DEV_STATS(dev); +} + +static const struct net_device_ops netdev_monitor_ops = +{ + .ndo_start_xmit = dhd_monitor_start, + .ndo_get_stats = dhd_monitor_get_stats, + .ndo_do_ioctl = dhd_monitor_ioctl +}; + +static void +dhd_add_monitor_if(void *handle, void *event_info, u8 event) +{ + dhd_info_t *dhd = handle; + struct net_device *dev; + char *devname; + uint32 scan_suppress = FALSE; + int ret = BCME_OK; + + if (event != DHD_WQ_WORK_IF_ADD) { + DHD_ERROR(("%s: unexpected event \n", __FUNCTION__)); + return; + } + + if (!dhd) { + DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__)); + return; + } + + dev = alloc_etherdev(DHD_MON_DEV_PRIV_SIZE); + if (!dev) { + DHD_ERROR(("%s: alloc wlif failed\n", __FUNCTION__)); + return; + } + + devname = "radiotap"; + + snprintf(dev->name, sizeof(dev->name), "%s%u", devname, dhd->unit); + +#ifndef ARPHRD_IEEE80211_PRISM /* From Linux 2.4.18 */ +#define ARPHRD_IEEE80211_PRISM 802 +#endif // endif + +#ifndef ARPHRD_IEEE80211_RADIOTAP +#define ARPHRD_IEEE80211_RADIOTAP 803 /* IEEE 802.11 + radiotap header */ +#endif /* ARPHRD_IEEE80211_RADIOTAP */ + + dev->type = ARPHRD_IEEE80211_RADIOTAP; + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)) + dev->hard_start_xmit = dhd_monitor_start; + dev->do_ioctl = dhd_monitor_ioctl; + dev->get_stats = dhd_monitor_get_stats; +#else + dev->netdev_ops = &netdev_monitor_ops; +#endif // endif + + if (register_netdev(dev)) { + DHD_ERROR(("%s, register_netdev failed for %s\n", + __FUNCTION__, dev->name)); + free_netdev(dev); + } + + if (FW_SUPPORTED((&dhd->pub), monitor)) { + scan_suppress = TRUE; + /* Set the SCAN SUPPRESS Flag in the firmware to disable scan in Monitor mode */ + ret = dhd_iovar(&dhd->pub, 0, "scansuppress", (char *)&scan_suppress, + sizeof(scan_suppress), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s: scansuppress set failed, ret=%d\n", __FUNCTION__, ret)); + } + } + + dhd->monitor_dev = dev; +} + +static void +dhd_del_monitor_if(void *handle, void *event_info, u8 event) +{ + dhd_info_t *dhd = handle; + + if (event != DHD_WQ_WORK_IF_DEL) { + DHD_ERROR(("%s: unexpected event \n", __FUNCTION__)); + return; + } + + if (!dhd) { + DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__)); + return; + } + + if (dhd->monitor_dev) { + unregister_netdev(dhd->monitor_dev); + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24)) + MFREE(dhd->osh, dhd->monitor_dev->priv, DHD_MON_DEV_PRIV_SIZE); + MFREE(dhd->osh, dhd->monitor_dev, sizeof(struct net_device)); +#else + free_netdev(dhd->monitor_dev); +#endif /* 2.6.24 */ + + dhd->monitor_dev = NULL; + } +} + +static void +dhd_set_monitor(dhd_pub_t *dhd, int ifidx, int val) +{ + dhd_info_t *info = dhd->info; + + DHD_TRACE(("%s: val %d\n", __FUNCTION__, val)); + if ((val && info->monitor_dev) || (!val && !info->monitor_dev)) { + DHD_ERROR(("%s: Mismatched params, return\n", __FUNCTION__)); + return; + } + + /* Delete monitor */ + if (!val) { + info->monitor_type = val; + dhd_deferred_schedule_work(info->dhd_deferred_wq, NULL, DHD_WQ_WORK_IF_DEL, + dhd_del_monitor_if, DHD_WQ_WORK_PRIORITY_LOW); + return; + } + + /* Add monitor */ + info->monitor_type = val; + dhd_deferred_schedule_work(info->dhd_deferred_wq, NULL, DHD_WQ_WORK_IF_ADD, + dhd_add_monitor_if, DHD_WQ_WORK_PRIORITY_LOW); +} +#endif /* WL_MONITOR */ + +#if defined(DHD_H2D_LOG_TIME_SYNC) +/* + * Helper function: + * Used for RTE console message time syncing with Host printk + */ +void dhd_h2d_log_time_sync_deferred_wq_schedule(dhd_pub_t *dhdp) +{ + dhd_info_t *info = dhdp->info; + + /* Ideally the "state" should be always TRUE */ + dhd_deferred_schedule_work(info->dhd_deferred_wq, NULL, + DHD_WQ_WORK_H2D_CONSOLE_TIME_STAMP_MATCH, + dhd_deferred_work_rte_log_time_sync, + DHD_WQ_WORK_PRIORITY_LOW); +} + +void +dhd_deferred_work_rte_log_time_sync(void *handle, void *event_info, u8 event) +{ + dhd_info_t *dhd_info = handle; + dhd_pub_t *dhd; + + if (event != DHD_WQ_WORK_H2D_CONSOLE_TIME_STAMP_MATCH) { + DHD_ERROR(("%s: unexpected event \n", __FUNCTION__)); + return; + } + + if (!dhd_info) { + DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__)); + return; + } + + dhd = &dhd_info->pub; + + /* + * Function to send IOVAR for console timesyncing + * between Host and Dongle. + * If the IOVAR fails, + * 1. dhd_rte_time_sync_ms is set to 0 and + * 2. HOST Dongle console time sync will *not* happen. + */ + dhd_h2d_log_time_sync(dhd); +} +#endif /* DHD_H2D_LOG_TIME_SYNC */ + +int dhd_ioctl_process(dhd_pub_t *pub, int ifidx, dhd_ioctl_t *ioc, void *data_buf) +{ + int bcmerror = BCME_OK; + int buflen = 0; + struct net_device *net; + + net = dhd_idx2net(pub, ifidx); + if (!net) { + bcmerror = BCME_BADARG; + /* + * The netdev pointer is bad means the DHD can't communicate + * to higher layers, so just return from here + */ + return bcmerror; + } + + /* check for local dhd ioctl and handle it */ + if (ioc->driver == DHD_IOCTL_MAGIC) { + /* This is a DHD IOVAR, truncate buflen to DHD_IOCTL_MAXLEN */ + if (data_buf) + buflen = MIN(ioc->len, DHD_IOCTL_MAXLEN); + bcmerror = dhd_ioctl((void *)pub, ioc, data_buf, buflen); + if (bcmerror) + pub->bcmerror = bcmerror; + goto done; + } + + /* This is a WL IOVAR, truncate buflen to WLC_IOCTL_MAXLEN */ + if (data_buf) + buflen = MIN(ioc->len, WLC_IOCTL_MAXLEN); + +#ifndef BCMDBUS + /* send to dongle (must be up, and wl). */ + if (pub->busstate == DHD_BUS_DOWN || pub->busstate == DHD_BUS_LOAD) { + if ((!pub->dongle_trap_occured) && allow_delay_fwdl) { + int ret; + if (atomic_read(&exit_in_progress)) { + DHD_ERROR(("%s module exit in progress\n", __func__)); + bcmerror = BCME_DONGLE_DOWN; + goto done; + } + ret = dhd_bus_start(pub); + if (ret != 0) { + DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret)); + bcmerror = BCME_DONGLE_DOWN; + goto done; + } + } else { + bcmerror = BCME_DONGLE_DOWN; + goto done; + } + } + + if (!pub->iswl) { + bcmerror = BCME_DONGLE_DOWN; + goto done; + } +#endif /* !BCMDBUS */ + + /* + * Flush the TX queue if required for proper message serialization: + * Intercept WLC_SET_KEY IOCTL - serialize M4 send and set key IOCTL to + * prevent M4 encryption and + * intercept WLC_DISASSOC IOCTL - serialize WPS-DONE and WLC_DISASSOC IOCTL to + * prevent disassoc frame being sent before WPS-DONE frame. + */ + if (ioc->cmd == WLC_SET_KEY || + (ioc->cmd == WLC_SET_VAR && data_buf != NULL && + strncmp("wsec_key", data_buf, 9) == 0) || + (ioc->cmd == WLC_SET_VAR && data_buf != NULL && + strncmp("bsscfg:wsec_key", data_buf, 15) == 0) || + ioc->cmd == WLC_DISASSOC) + dhd_wait_pend8021x(net); + + if ((ioc->cmd == WLC_SET_VAR || ioc->cmd == WLC_GET_VAR) && + data_buf != NULL && strncmp("rpc_", data_buf, 4) == 0) { + bcmerror = BCME_UNSUPPORTED; + goto done; + } + bcmerror = dhd_wl_ioctl(pub, ifidx, (wl_ioctl_t *)ioc, data_buf, buflen); + +#ifdef WL_MONITOR + /* Intercept monitor ioctl here, add/del monitor if */ + if (bcmerror == BCME_OK && ioc->cmd == WLC_SET_MONITOR) { + int val = 0; + if (data_buf != NULL && buflen != 0) { + if (buflen >= 4) { + val = *(int*)data_buf; + } else if (buflen >= 2) { + val = *(short*)data_buf; + } else { + val = *(char*)data_buf; + } + } + dhd_set_monitor(pub, ifidx, val); + } +#endif /* WL_MONITOR */ + +done: + dhd_check_hang(net, pub, bcmerror); + + return bcmerror; +} + +/** + * Called by the OS (optionally via a wrapper function). + * @param net Linux per dongle instance + * @param ifr Linux request structure + * @param cmd e.g. SIOCETHTOOL + */ +static int +dhd_ioctl_entry(struct net_device *net, struct ifreq *ifr, int cmd) +{ + dhd_info_t *dhd = DHD_DEV_INFO(net); + dhd_ioctl_t ioc; + int bcmerror = 0; + int ifidx; + int ret; + void *local_buf = NULL; /**< buffer in kernel space */ + void __user *ioc_buf_user = NULL; /**< buffer in user space */ + u16 buflen = 0; + + if (atomic_read(&exit_in_progress)) { + DHD_ERROR(("%s module exit in progress\n", __func__)); + bcmerror = BCME_DONGLE_DOWN; + return OSL_ERROR(bcmerror); + } + + DHD_OS_WAKE_LOCK(&dhd->pub); + DHD_PERIM_LOCK(&dhd->pub); + + /* Interface up check for built-in type */ + if (!dhd_download_fw_on_driverload && dhd->pub.up == FALSE) { + DHD_ERROR(("%s: Interface is down \n", __FUNCTION__)); + DHD_PERIM_UNLOCK(&dhd->pub); + DHD_OS_WAKE_UNLOCK(&dhd->pub); + return OSL_ERROR(BCME_NOTUP); + } + + ifidx = dhd_net2idx(dhd, net); + DHD_TRACE(("%s: ifidx %d, cmd 0x%04x\n", __FUNCTION__, ifidx, cmd)); + +#if defined(WL_STATIC_IF) + /* skip for static ndev when it is down */ + if (dhd_is_static_ndev(&dhd->pub, net) && !(net->flags & IFF_UP)) { + DHD_PERIM_UNLOCK(&dhd->pub); + DHD_OS_WAKE_UNLOCK(&dhd->pub); + return -1; + } +#endif /* WL_STATIC_iF */ + + if (ifidx == DHD_BAD_IF) { + DHD_ERROR(("%s: BAD IF\n", __FUNCTION__)); + DHD_PERIM_UNLOCK(&dhd->pub); + DHD_OS_WAKE_UNLOCK(&dhd->pub); + return -1; + } + +#if defined(WL_WIRELESS_EXT) + /* linux wireless extensions */ + if ((cmd >= SIOCIWFIRST) && (cmd <= SIOCIWLAST)) { + /* may recurse, do NOT lock */ + ret = wl_iw_ioctl(net, ifr, cmd); + DHD_PERIM_UNLOCK(&dhd->pub); + DHD_OS_WAKE_UNLOCK(&dhd->pub); + return ret; + } +#endif /* defined(WL_WIRELESS_EXT) */ + +#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) + if (cmd == SIOCETHTOOL) { + ret = dhd_ethtool(dhd, (void*)ifr->ifr_data); + DHD_PERIM_UNLOCK(&dhd->pub); + DHD_OS_WAKE_UNLOCK(&dhd->pub); + return ret; + } +#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */ + + if (cmd == SIOCDEVPRIVATE+1) { + ret = wl_android_priv_cmd(net, ifr); + dhd_check_hang(net, &dhd->pub, ret); + DHD_PERIM_UNLOCK(&dhd->pub); + DHD_OS_WAKE_UNLOCK(&dhd->pub); + return ret; + } + + if (cmd != SIOCDEVPRIVATE) { + DHD_PERIM_UNLOCK(&dhd->pub); + DHD_OS_WAKE_UNLOCK(&dhd->pub); + return -EOPNOTSUPP; + } + + memset(&ioc, 0, sizeof(ioc)); + +#ifdef CONFIG_COMPAT +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0)) + if (in_compat_syscall()) +#else + if (is_compat_task()) +#endif /* LINUX_VER >= 4.6 */ + { + compat_wl_ioctl_t compat_ioc; + if (copy_from_user(&compat_ioc, ifr->ifr_data, sizeof(compat_wl_ioctl_t))) { + bcmerror = BCME_BADADDR; + goto done; + } + ioc.cmd = compat_ioc.cmd; + if (ioc.cmd & WLC_SPEC_FLAG) { + memset(&ioc, 0, sizeof(ioc)); + /* Copy the ioc control structure part of ioctl request */ + if (copy_from_user(&ioc, ifr->ifr_data, sizeof(wl_ioctl_t))) { + bcmerror = BCME_BADADDR; + goto done; + } + ioc.cmd &= ~WLC_SPEC_FLAG; /* Clear the FLAG */ + + /* To differentiate between wl and dhd read 4 more byes */ + if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(wl_ioctl_t), + sizeof(uint)) != 0)) { + bcmerror = BCME_BADADDR; + goto done; + } + + } else { /* ioc.cmd & WLC_SPEC_FLAG */ + ioc.buf = compat_ptr(compat_ioc.buf); + ioc.len = compat_ioc.len; + ioc.set = compat_ioc.set; + ioc.used = compat_ioc.used; + ioc.needed = compat_ioc.needed; + /* To differentiate between wl and dhd read 4 more byes */ + if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(compat_wl_ioctl_t), + sizeof(uint)) != 0)) { + bcmerror = BCME_BADADDR; + goto done; + } + } /* ioc.cmd & WLC_SPEC_FLAG */ + } else +#endif /* CONFIG_COMPAT */ + { + /* Copy the ioc control structure part of ioctl request */ + if (copy_from_user(&ioc, ifr->ifr_data, sizeof(wl_ioctl_t))) { + bcmerror = BCME_BADADDR; + goto done; + } +#ifdef CONFIG_COMPAT + ioc.cmd &= ~WLC_SPEC_FLAG; /* make sure it was clear when it isn't a compat task*/ +#endif + + /* To differentiate between wl and dhd read 4 more byes */ + if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(wl_ioctl_t), + sizeof(uint)) != 0)) { + bcmerror = BCME_BADADDR; + goto done; + } + } + + if (!capable(CAP_NET_ADMIN)) { + bcmerror = BCME_EPERM; + goto done; + } + + /* Take backup of ioc.buf and restore later */ + ioc_buf_user = ioc.buf; + + if (ioc.len > 0) { + buflen = MIN(ioc.len, DHD_IOCTL_MAXLEN); + if (!(local_buf = MALLOC(dhd->pub.osh, buflen+1))) { + bcmerror = BCME_NOMEM; + goto done; + } + + DHD_PERIM_UNLOCK(&dhd->pub); + if (copy_from_user(local_buf, ioc.buf, buflen)) { + DHD_PERIM_LOCK(&dhd->pub); + bcmerror = BCME_BADADDR; + goto done; + } + DHD_PERIM_LOCK(&dhd->pub); + + *((char *)local_buf + buflen) = '\0'; + + /* For some platforms accessing userspace memory + * of ioc.buf is causing kernel panic, so to avoid that + * make ioc.buf pointing to kernel space memory local_buf + */ + ioc.buf = local_buf; + } + + /* Skip all the non DHD iovars (wl iovars) after f/w hang */ + if (ioc.driver != DHD_IOCTL_MAGIC && dhd->pub.hang_was_sent) { + DHD_TRACE(("%s: HANG was sent up earlier\n", __FUNCTION__)); + DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(&dhd->pub, DHD_EVENT_TIMEOUT_MS); + bcmerror = BCME_DONGLE_DOWN; + goto done; + } + + bcmerror = dhd_ioctl_process(&dhd->pub, ifidx, &ioc, local_buf); + + /* Restore back userspace pointer to ioc.buf */ + ioc.buf = ioc_buf_user; + + if (!bcmerror && buflen && local_buf && ioc.buf) { + DHD_PERIM_UNLOCK(&dhd->pub); + if (copy_to_user(ioc.buf, local_buf, buflen)) + bcmerror = -EFAULT; + DHD_PERIM_LOCK(&dhd->pub); + } + +done: + if (local_buf) + MFREE(dhd->pub.osh, local_buf, buflen+1); + + DHD_PERIM_UNLOCK(&dhd->pub); + DHD_OS_WAKE_UNLOCK(&dhd->pub); + + return OSL_ERROR(bcmerror); +} + +#if defined(WL_CFG80211) && defined(SUPPORT_DEEP_SLEEP) +/* Flags to indicate if we distingish power off policy when + * user set the memu "Keep Wi-Fi on during sleep" to "Never" + */ +int trigger_deep_sleep = 0; +#endif /* WL_CFG80211 && SUPPORT_DEEP_SLEEP */ + +#ifdef FIX_CPU_MIN_CLOCK +static int dhd_init_cpufreq_fix(dhd_info_t *dhd) +{ + if (dhd) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) + mutex_init(&dhd->cpufreq_fix); +#endif // endif + dhd->cpufreq_fix_status = FALSE; + } + return 0; +} + +static void dhd_fix_cpu_freq(dhd_info_t *dhd) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) + mutex_lock(&dhd->cpufreq_fix); +#endif // endif + if (dhd && !dhd->cpufreq_fix_status) { + pm_qos_add_request(&dhd->dhd_cpu_qos, PM_QOS_CPU_FREQ_MIN, 300000); +#ifdef FIX_BUS_MIN_CLOCK + pm_qos_add_request(&dhd->dhd_bus_qos, PM_QOS_BUS_THROUGHPUT, 400000); +#endif /* FIX_BUS_MIN_CLOCK */ + DHD_ERROR(("pm_qos_add_requests called\n")); + + dhd->cpufreq_fix_status = TRUE; + } +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) + mutex_unlock(&dhd->cpufreq_fix); +#endif // endif +} + +static void dhd_rollback_cpu_freq(dhd_info_t *dhd) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) + mutex_lock(&dhd ->cpufreq_fix); +#endif // endif + if (dhd && dhd->cpufreq_fix_status != TRUE) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) + mutex_unlock(&dhd->cpufreq_fix); +#endif // endif + return; + } + + pm_qos_remove_request(&dhd->dhd_cpu_qos); +#ifdef FIX_BUS_MIN_CLOCK + pm_qos_remove_request(&dhd->dhd_bus_qos); +#endif /* FIX_BUS_MIN_CLOCK */ + DHD_ERROR(("pm_qos_add_requests called\n")); + + dhd->cpufreq_fix_status = FALSE; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) + mutex_unlock(&dhd->cpufreq_fix); +#endif // endif +} +#endif /* FIX_CPU_MIN_CLOCK */ + +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM +static int +dhd_ioctl_entry_wrapper(struct net_device *net, struct ifreq *ifr, int cmd) +{ + int error; + dhd_info_t *dhd = DHD_DEV_INFO(net); + + if (atomic_read(&dhd->pub.block_bus)) + return -EHOSTDOWN; + + if (pm_runtime_get_sync(dhd_bus_to_dev(dhd->pub.bus)) < 0) + return BCME_ERROR; + + error = dhd_ioctl_entry(net, ifr, cmd); + + pm_runtime_mark_last_busy(dhd_bus_to_dev(dhd->pub.bus)); + pm_runtime_put_autosuspend(dhd_bus_to_dev(dhd->pub.bus)); + + return error; +} +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + +static int +dhd_stop(struct net_device *net) +{ + int ifidx = 0; + bool skip_reset = false; +#if defined(WL_CFG80211) + unsigned long flags = 0; +#ifdef WL_STATIC_IF + struct bcm_cfg80211 *cfg = wl_get_cfg(net); +#endif /* WL_STATIC_IF */ +#endif /* WL_CFG80211 */ + dhd_info_t *dhd = DHD_DEV_INFO(net); + DHD_OS_WAKE_LOCK(&dhd->pub); + DHD_PERIM_LOCK(&dhd->pub); + printf("%s: Enter %p\n", __FUNCTION__, net); + dhd->pub.rxcnt_timeout = 0; + dhd->pub.txcnt_timeout = 0; + +#ifdef BCMPCIE + dhd->pub.d3ackcnt_timeout = 0; +#endif /* BCMPCIE */ + + mutex_lock(&dhd->pub.ndev_op_sync); + + if (dhd->pub.up == 0) { + goto exit; + } + + dhd_if_flush_sta(DHD_DEV_IFP(net)); + +#ifdef FIX_CPU_MIN_CLOCK + if (dhd_get_fw_mode(dhd) == DHD_FLAG_HOSTAP_MODE) + dhd_rollback_cpu_freq(dhd); +#endif /* FIX_CPU_MIN_CLOCK */ + + ifidx = dhd_net2idx(dhd, net); + BCM_REFERENCE(ifidx); + + /* Set state and stop OS transmissions */ + netif_stop_queue(net); +#if defined(WL_STATIC_IF) && defined(WL_CFG80211) + /* If static if is operational, don't reset the chip */ + if (IS_CFG80211_STATIC_IF_ACTIVE(cfg)) { + DHD_INFO(("[STATIC_IF] static if operational. Avoiding chip reset!\n")); + skip_reset = true; + goto exit; + } +#endif /* WL_STATIC_IF && WL_CFG80211 */ +#ifdef WL_CFG80211 + + /* Disable Runtime PM before interface down */ + DHD_DISABLE_RUNTIME_PM(&dhd->pub); + + spin_lock_irqsave(&dhd->pub.up_lock, flags); + dhd->pub.up = 0; + spin_unlock_irqrestore(&dhd->pub.up_lock, flags); +#else + dhd->pub.up = 0; +#endif /* WL_CFG80211 */ + +#ifdef WL_CFG80211 + if (ifidx == 0) { + dhd_if_t *ifp; + wl_cfg80211_down(net); + + ifp = dhd->iflist[0]; + /* + * For CFG80211: Clean up all the left over virtual interfaces + * when the primary Interface is brought down. [ifconfig wlan0 down] + */ + if (!dhd_download_fw_on_driverload) { + if ((dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) && + (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211)) { + int i; +#ifdef WL_CFG80211_P2P_DEV_IF + wl_cfg80211_del_p2p_wdev(net); +#endif /* WL_CFG80211_P2P_DEV_IF */ + + dhd_net_if_lock_local(dhd); + for (i = 1; i < DHD_MAX_IFS; i++) + dhd_remove_if(&dhd->pub, i, FALSE); + + if (ifp && ifp->net) { + dhd_if_del_sta_list(ifp); + } +#ifdef ARP_OFFLOAD_SUPPORT + if (dhd_inetaddr_notifier_registered) { + dhd_inetaddr_notifier_registered = FALSE; + unregister_inetaddr_notifier(&dhd_inetaddr_notifier); + } +#endif /* ARP_OFFLOAD_SUPPORT */ +#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT) + if (dhd_inet6addr_notifier_registered) { + dhd_inet6addr_notifier_registered = FALSE; + unregister_inet6addr_notifier(&dhd_inet6addr_notifier); + } +#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */ + dhd_net_if_unlock_local(dhd); + } +#if 0 + // terence 20161024: remove this to prevent dev_close() get stuck in dhd_hang_process + cancel_work_sync(dhd->dhd_deferred_wq); +#endif + +#ifdef SHOW_LOGTRACE + /* Wait till event logs work/kthread finishes */ + dhd_cancel_logtrace_process_sync(dhd); +#endif /* SHOW_LOGTRACE */ + +#if defined(DHD_LB_RXP) + __skb_queue_purge(&dhd->rx_pend_queue); +#endif /* DHD_LB_RXP */ + +#if defined(DHD_LB_TXP) + skb_queue_purge(&dhd->tx_pend_queue); +#endif /* DHD_LB_TXP */ + } + +#if defined(ARGOS_NOTIFY_CB) + argos_register_notifier_deinit(); +#endif // endif +#ifdef DHDTCPACK_SUPPRESS + dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF); +#endif /* DHDTCPACK_SUPPRESS */ +#if defined(DHD_LB_RXP) + if (ifp && ifp->net == dhd->rx_napi_netdev) { + DHD_INFO(("%s napi<%p> disabled ifp->net<%p,%s>\n", + __FUNCTION__, &dhd->rx_napi_struct, net, net->name)); + skb_queue_purge(&dhd->rx_napi_queue); + napi_disable(&dhd->rx_napi_struct); + netif_napi_del(&dhd->rx_napi_struct); + dhd->rx_napi_netdev = NULL; + } +#endif /* DHD_LB_RXP */ + } +#endif /* WL_CFG80211 */ + + DHD_SSSR_DUMP_DEINIT(&dhd->pub); + +#ifdef PROP_TXSTATUS + dhd_wlfc_cleanup(&dhd->pub, NULL, 0); +#endif // endif +#ifdef SHOW_LOGTRACE + if (!dhd_download_fw_on_driverload) { + /* Release the skbs from queue for WLC_E_TRACE event */ + dhd_event_logtrace_flush_queue(&dhd->pub); + if (dhd->dhd_state & DHD_ATTACH_LOGTRACE_INIT) { + if (dhd->event_data.fmts) { + MFREE(dhd->pub.osh, dhd->event_data.fmts, + dhd->event_data.fmts_size); + dhd->event_data.fmts = NULL; + } + if (dhd->event_data.raw_fmts) { + MFREE(dhd->pub.osh, dhd->event_data.raw_fmts, + dhd->event_data.raw_fmts_size); + dhd->event_data.raw_fmts = NULL; + } + if (dhd->event_data.raw_sstr) { + MFREE(dhd->pub.osh, dhd->event_data.raw_sstr, + dhd->event_data.raw_sstr_size); + dhd->event_data.raw_sstr = NULL; + } + if (dhd->event_data.rom_raw_sstr) { + MFREE(dhd->pub.osh, dhd->event_data.rom_raw_sstr, + dhd->event_data.rom_raw_sstr_size); + dhd->event_data.rom_raw_sstr = NULL; + } + dhd->dhd_state &= ~DHD_ATTACH_LOGTRACE_INIT; + } + } +#endif /* SHOW_LOGTRACE */ +#ifdef APF + dhd_dev_apf_delete_filter(net); +#endif /* APF */ + + /* Stop the protocol module */ + dhd_prot_stop(&dhd->pub); + + OLD_MOD_DEC_USE_COUNT; +exit: + if (skip_reset == false) { + if (ifidx == 0 && !dhd_download_fw_on_driverload) { +#if defined(BT_OVER_SDIO) + dhd_bus_put(&dhd->pub, WLAN_MODULE); + wl_android_set_wifi_on_flag(FALSE); +#else + wl_android_wifi_off(net, TRUE); +#ifdef WL_EXT_IAPSTA + wl_ext_iapsta_dettach_netdev(net, ifidx); +#endif +#endif /* BT_OVER_SDIO */ + } +#ifdef SUPPORT_DEEP_SLEEP + else { + /* CSP#505233: Flags to indicate if we distingish + * power off policy when user set the memu + * "Keep Wi-Fi on during sleep" to "Never" + */ + if (trigger_deep_sleep) { + dhd_deepsleep(net, 1); + trigger_deep_sleep = 0; + } + } +#endif /* SUPPORT_DEEP_SLEEP */ + dhd->pub.hang_was_sent = 0; + + /* Clear country spec for for built-in type driver */ + if (!dhd_download_fw_on_driverload) { + dhd->pub.dhd_cspec.country_abbrev[0] = 0x00; + dhd->pub.dhd_cspec.rev = 0; + dhd->pub.dhd_cspec.ccode[0] = 0x00; + } + +#ifdef BCMDBGFS + dhd_dbgfs_remove(); +#endif // endif + } + + DHD_PERIM_UNLOCK(&dhd->pub); + DHD_OS_WAKE_UNLOCK(&dhd->pub); + + /* Destroy wakelock */ + if (!dhd_download_fw_on_driverload && + (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) && + (skip_reset == false)) { + DHD_OS_WAKE_LOCK_DESTROY(dhd); + dhd->dhd_state &= ~DHD_ATTACH_STATE_WAKELOCKS_INIT; + } + printf("%s: Exit\n", __FUNCTION__); + + mutex_unlock(&dhd->pub.ndev_op_sync); + return 0; +} + +#if defined(WL_CFG80211) && (defined(USE_INITIAL_2G_SCAN) || \ + defined(USE_INITIAL_SHORT_DWELL_TIME)) +extern bool g_first_broadcast_scan; +#endif /* OEM_ANDROID && WL_CFG80211 && (USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME) */ + +#ifdef WL11U +static int dhd_interworking_enable(dhd_pub_t *dhd) +{ + uint32 enable = true; + int ret = BCME_OK; + + ret = dhd_iovar(dhd, 0, "interworking", (char *)&enable, sizeof(enable), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s: enableing interworking failed, ret=%d\n", __FUNCTION__, ret)); + } + + return ret; +} +#endif /* WL11u */ + +static int +dhd_open(struct net_device *net) +{ + dhd_info_t *dhd = DHD_DEV_INFO(net); +#ifdef TOE + uint32 toe_ol; +#endif // endif + int ifidx; + int32 ret = 0; +#if defined(OOB_INTR_ONLY) + uint32 bus_type = -1; + uint32 bus_num = -1; + uint32 slot_num = -1; + wifi_adapter_info_t *adapter = NULL; +#endif +#if defined(WL_EXT_IAPSTA) && defined(ISAM_PREINIT) + int bytes_written = 0; + struct dhd_conf *conf; +#endif + + mutex_lock(&dhd->pub.ndev_op_sync); + + if (dhd->pub.up == 1) { + /* already up */ + DHD_ERROR(("Primary net_device is already up \n")); + mutex_unlock(&dhd->pub.ndev_op_sync); + return BCME_OK; + } + + if (!dhd_download_fw_on_driverload) { + if (!dhd_driver_init_done) { + DHD_ERROR(("%s: WLAN driver is not initialized\n", __FUNCTION__)); + mutex_unlock(&dhd->pub.ndev_op_sync); + return -1; + } + } + + printf("%s: Enter %p\n", __FUNCTION__, net); + DHD_MUTEX_LOCK(); + /* Init wakelock */ + if (!dhd_download_fw_on_driverload) { + if (!(dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) { + DHD_OS_WAKE_LOCK_INIT(dhd); + dhd->dhd_state |= DHD_ATTACH_STATE_WAKELOCKS_INIT; + } + +#ifdef SHOW_LOGTRACE + skb_queue_head_init(&dhd->evt_trace_queue); + + if (!(dhd->dhd_state & DHD_ATTACH_LOGTRACE_INIT)) { + ret = dhd_init_logstrs_array(dhd->pub.osh, &dhd->event_data); + if (ret == BCME_OK) { + dhd_init_static_strs_array(dhd->pub.osh, &dhd->event_data, + st_str_file_path, map_file_path); + dhd_init_static_strs_array(dhd->pub.osh, &dhd->event_data, + rom_st_str_file_path, rom_map_file_path); + dhd->dhd_state |= DHD_ATTACH_LOGTRACE_INIT; + } + } +#endif /* SHOW_LOGTRACE */ + } + + DHD_OS_WAKE_LOCK(&dhd->pub); + DHD_PERIM_LOCK(&dhd->pub); + dhd->pub.dongle_trap_occured = 0; + dhd->pub.hang_was_sent = 0; + dhd->pub.hang_reason = 0; + dhd->pub.iovar_timeout_occured = 0; +#ifdef PCIE_FULL_DONGLE + dhd->pub.d3ack_timeout_occured = 0; + dhd->pub.livelock_occured = 0; +#endif /* PCIE_FULL_DONGLE */ +#ifdef DHD_MAP_LOGGING + dhd->pub.smmu_fault_occurred = 0; +#endif /* DHD_MAP_LOGGING */ + +#ifdef DHD_LOSSLESS_ROAMING + dhd->pub.dequeue_prec_map = ALLPRIO; +#endif // endif + +#if 0 + /* + * Force start if ifconfig_up gets called before START command + * We keep WEXT's wl_control_wl_start to provide backward compatibility + * This should be removed in the future + */ + ret = wl_control_wl_start(net); + if (ret != 0) { + DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret)); + ret = -1; + goto exit; + } + +#endif // endif + + ifidx = dhd_net2idx(dhd, net); + DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx)); + + if (ifidx < 0) { + DHD_ERROR(("%s: Error: called with invalid IF\n", __FUNCTION__)); + ret = -1; + goto exit; + } + + if (!dhd->iflist[ifidx]) { + DHD_ERROR(("%s: Error: called when IF already deleted\n", __FUNCTION__)); + ret = -1; + goto exit; + } + + if (ifidx == 0) { + atomic_set(&dhd->pend_8021x_cnt, 0); + if (!dhd_download_fw_on_driverload) { + DHD_ERROR(("\n%s\n", dhd_version)); +#ifdef WL_EXT_IAPSTA + wl_ext_iapsta_attach_netdev(net, ifidx, dhd->iflist[ifidx]->bssidx); +#endif +#if defined(USE_INITIAL_2G_SCAN) || defined(USE_INITIAL_SHORT_DWELL_TIME) + g_first_broadcast_scan = TRUE; +#endif /* USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME */ +#ifdef SHOW_LOGTRACE + /* dhd_cancel_logtrace_process_sync is called in dhd_stop + * for built-in models. Need to start logtrace kthread before + * calling wifi on, because once wifi is on, EDL will be in action + * any moment, and if kthread is not active, FW event logs will + * not be available + */ + if (dhd_reinit_logtrace_process(dhd) != BCME_OK) { + goto exit; + } +#endif /* SHOW_LOGTRACE */ +#if defined(BT_OVER_SDIO) + ret = dhd_bus_get(&dhd->pub, WLAN_MODULE); + wl_android_set_wifi_on_flag(TRUE); +#else + ret = wl_android_wifi_on(net); +#endif /* BT_OVER_SDIO */ + if (ret != 0) { + DHD_ERROR(("%s : wl_android_wifi_on failed (%d)\n", + __FUNCTION__, ret)); + ret = -1; + goto exit; + } +#if defined(WL_EXT_IAPSTA) && defined(ISAM_PREINIT) + conf = dhd_get_conf(net); + if (conf) { + wl_android_ext_priv_cmd(net, conf->isam_init, 0, &bytes_written); + wl_android_ext_priv_cmd(net, conf->isam_config, 0, &bytes_written); + wl_android_ext_priv_cmd(net, conf->isam_enable, 0, &bytes_written); + } +#endif + } +#ifdef SUPPORT_DEEP_SLEEP + else { + /* Flags to indicate if we distingish + * power off policy when user set the memu + * "Keep Wi-Fi on during sleep" to "Never" + */ + if (trigger_deep_sleep) { +#if defined(USE_INITIAL_2G_SCAN) || defined(USE_INITIAL_SHORT_DWELL_TIME) + g_first_broadcast_scan = TRUE; +#endif /* USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME */ + dhd_deepsleep(net, 0); + trigger_deep_sleep = 0; + } + } +#endif /* SUPPORT_DEEP_SLEEP */ +#ifdef FIX_CPU_MIN_CLOCK + if (dhd_get_fw_mode(dhd) == DHD_FLAG_HOSTAP_MODE) { + dhd_init_cpufreq_fix(dhd); + dhd_fix_cpu_freq(dhd); + } +#endif /* FIX_CPU_MIN_CLOCK */ +#if defined(OOB_INTR_ONLY) + if (dhd->pub.conf->dpc_cpucore >= 0) { + dhd_bus_get_ids(dhd->pub.bus, &bus_type, &bus_num, &slot_num); + adapter = dhd_wifi_platform_get_adapter(bus_type, bus_num, slot_num); + if (adapter) { + printf("%s: set irq affinity hit %d\n", __FUNCTION__, dhd->pub.conf->dpc_cpucore); + irq_set_affinity_hint(adapter->irq_num, cpumask_of(dhd->pub.conf->dpc_cpucore)); + } + } +#endif + + if (dhd->pub.busstate != DHD_BUS_DATA) { +#ifdef BCMDBUS + dhd_set_path(&dhd->pub); + DHD_MUTEX_UNLOCK(); + wait_event_interruptible_timeout(dhd->adapter->status_event, + wifi_get_adapter_status(dhd->adapter, WIFI_STATUS_FW_READY), + msecs_to_jiffies(DHD_FW_READY_TIMEOUT)); + DHD_MUTEX_LOCK(); + if ((ret = dbus_up(dhd->pub.bus)) != 0) { + DHD_ERROR(("%s: failed to dbus_up with code %d\n", __FUNCTION__, ret)); + goto exit; + } else { + dhd->pub.busstate = DHD_BUS_DATA; + } + if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0) { + DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret)); + goto exit; + } +#else + /* try to bring up bus */ + DHD_PERIM_UNLOCK(&dhd->pub); + +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM + if (pm_runtime_get_sync(dhd_bus_to_dev(dhd->pub.bus)) >= 0) { + ret = dhd_bus_start(&dhd->pub); + pm_runtime_mark_last_busy(dhd_bus_to_dev(dhd->pub.bus)); + pm_runtime_put_autosuspend(dhd_bus_to_dev(dhd->pub.bus)); + } +#else + ret = dhd_bus_start(&dhd->pub); +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + + DHD_PERIM_LOCK(&dhd->pub); + if (ret) { + DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret)); + ret = -1; + goto exit; + } +#endif /* !BCMDBUS */ + + } +#ifdef WL_EXT_IAPSTA + wl_ext_iapsta_attach_name(net, ifidx); +#endif + +#ifdef BT_OVER_SDIO + if (dhd->pub.is_bt_recovery_required) { + DHD_ERROR(("%s: Send Hang Notification 2 to BT\n", __FUNCTION__)); + bcmsdh_btsdio_process_dhd_hang_notification(TRUE); + } + dhd->pub.is_bt_recovery_required = FALSE; +#endif // endif + + /* dhd_sync_with_dongle has been called in dhd_bus_start or wl_android_wifi_on */ + memcpy(net->dev_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN); + +#ifdef TOE + /* Get current TOE mode from dongle */ + if (dhd_toe_get(dhd, ifidx, &toe_ol) >= 0 && (toe_ol & TOE_TX_CSUM_OL) != 0) { + dhd->iflist[ifidx]->net->features |= NETIF_F_IP_CSUM; + } else { + dhd->iflist[ifidx]->net->features &= ~NETIF_F_IP_CSUM; + } +#endif /* TOE */ + +#if defined(DHD_LB_RXP) + __skb_queue_head_init(&dhd->rx_pend_queue); + if (dhd->rx_napi_netdev == NULL) { + dhd->rx_napi_netdev = dhd->iflist[ifidx]->net; + memset(&dhd->rx_napi_struct, 0, sizeof(struct napi_struct)); + netif_napi_add(dhd->rx_napi_netdev, &dhd->rx_napi_struct, + dhd_napi_poll, dhd_napi_weight); + DHD_INFO(("%s napi<%p> enabled ifp->net<%p,%s>\n", + __FUNCTION__, &dhd->rx_napi_struct, net, net->name)); + napi_enable(&dhd->rx_napi_struct); + DHD_INFO(("%s load balance init rx_napi_struct\n", __FUNCTION__)); + skb_queue_head_init(&dhd->rx_napi_queue); + } /* rx_napi_netdev == NULL */ +#endif /* DHD_LB_RXP */ +#ifdef DHD_LB_IRQSET + dhd_irq_set_affinity(&dhd->pub); +#endif /* DHD_LB_IRQSET */ + +#if defined(DHD_LB_TXP) + /* Use the variant that uses locks */ + skb_queue_head_init(&dhd->tx_pend_queue); +#endif /* DHD_LB_TXP */ + +#if defined(WL_CFG80211) + if (unlikely(wl_cfg80211_up(net))) { + DHD_ERROR(("%s: failed to bring up cfg80211\n", __FUNCTION__)); + ret = -1; + goto exit; + } + if (!dhd_download_fw_on_driverload) { +#ifdef ARP_OFFLOAD_SUPPORT + dhd->pend_ipaddr = 0; + if (!dhd_inetaddr_notifier_registered) { + dhd_inetaddr_notifier_registered = TRUE; + register_inetaddr_notifier(&dhd_inetaddr_notifier); + } +#endif /* ARP_OFFLOAD_SUPPORT */ +#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT) + if (!dhd_inet6addr_notifier_registered) { + dhd_inet6addr_notifier_registered = TRUE; + register_inet6addr_notifier(&dhd_inet6addr_notifier); + } +#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */ + } + +#if defined(ARGOS_NOTIFY_CB) + argos_register_notifier_init(net); +#endif // endif +#if defined(NUM_SCB_MAX_PROBE) + dhd_set_scb_probe(&dhd->pub); +#endif /* NUM_SCB_MAX_PROBE */ +#endif /* WL_CFG80211 */ + } + + /* Allow transmit calls */ + netif_start_queue(net); + dhd->pub.up = 1; + + if (wl_event_enable) { + /* For wl utility to receive events */ + dhd->pub.wl_event_enabled = true; + } else { + dhd->pub.wl_event_enabled = false; + } + + if (logtrace_pkt_sendup) { + /* For any deamon to recieve logtrace */ + dhd->pub.logtrace_pkt_sendup = true; + } else { + dhd->pub.logtrace_pkt_sendup = false; + } + + OLD_MOD_INC_USE_COUNT; + +#ifdef BCMDBGFS + dhd_dbgfs_init(&dhd->pub); +#endif // endif + +exit: + mutex_unlock(&dhd->pub.ndev_op_sync); + if (ret) { + dhd_stop(net); + } + + DHD_PERIM_UNLOCK(&dhd->pub); + DHD_OS_WAKE_UNLOCK(&dhd->pub); + DHD_MUTEX_UNLOCK(); + + printf("%s: Exit ret=%d\n", __FUNCTION__, ret); + return ret; +} + +#if defined(WL_STATIC_IF) && defined(WL_CFG80211) +/* + * For static I/Fs, the firmware interface init + * is done from the IFF_UP context. + */ +static int +dhd_static_if_open(struct net_device *net) +{ + s32 ret = 0; + struct bcm_cfg80211 *cfg; + struct net_device *primary_netdev = NULL; + + cfg = wl_get_cfg(net); + primary_netdev = bcmcfg_to_prmry_ndev(cfg); + + if (!IS_CFG80211_STATIC_IF(cfg, net)) { + DHD_TRACE(("non-static interface (%s)..do nothing \n", net->name)); + ret = BCME_OK; + goto done; + } + + DHD_INFO(("[%s][STATIC_IF] Enter \n", net->name)); + /* Ensure fw is initialized. If it is already initialized, + * dhd_open will return success. + */ + ret = dhd_open(primary_netdev); + if (unlikely(ret)) { + DHD_ERROR(("Failed to open primary dev ret %d\n", ret)); + goto done; + } + + ret = wl_cfg80211_static_if_open(net); + if (!ret) { + /* Allow transmit calls */ + netif_start_queue(net); + } +done: + return ret; +} + +static int +dhd_static_if_stop(struct net_device *net) +{ + struct bcm_cfg80211 *cfg; + struct net_device *primary_netdev = NULL; + int ret = BCME_OK; + + DHD_INFO(("[%s][STATIC_IF] Enter \n", net->name)); + + /* Ensure queue is disabled */ + netif_tx_disable(net); + + cfg = wl_get_cfg(net); + if (!IS_CFG80211_STATIC_IF(cfg, net)) { + DHD_TRACE(("non-static interface (%s)..do nothing \n", net->name)); + return BCME_OK; + } + + ret = wl_cfg80211_static_if_close(net); + + /* If STA iface is not in operational, invoke dhd_close from this + * context. + */ + primary_netdev = bcmcfg_to_prmry_ndev(cfg); + if (!(primary_netdev->flags & IFF_UP)) { + ret = dhd_stop(primary_netdev); + } + + return ret; +} +#endif /* WL_STATIC_IF && WL_CF80211 */ + +int dhd_do_driver_init(struct net_device *net) +{ + dhd_info_t *dhd = NULL; + + if (!net) { + DHD_ERROR(("Primary Interface not initialized \n")); + return -EINVAL; + } + + DHD_MUTEX_IS_LOCK_RETURN(); + + /* && defined(OEM_ANDROID) && defined(BCMSDIO) */ + dhd = DHD_DEV_INFO(net); + + /* If driver is already initialized, do nothing + */ + if (dhd->pub.busstate == DHD_BUS_DATA) { + DHD_TRACE(("Driver already Inititalized. Nothing to do")); + return 0; + } + + if (dhd_open(net) < 0) { + DHD_ERROR(("Driver Init Failed \n")); + return -1; + } + + return 0; +} + +int +dhd_event_ifadd(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac) +{ + +#ifdef WL_CFG80211 + if (wl_cfg80211_notify_ifadd(dhd_linux_get_primary_netdev(&dhdinfo->pub), + ifevent->ifidx, name, mac, ifevent->bssidx, ifevent->role) == BCME_OK) + return BCME_OK; +#endif // endif + + /* handle IF event caused by wl commands, SoftAP, WEXT and + * anything else. This has to be done asynchronously otherwise + * DPC will be blocked (and iovars will timeout as DPC has no chance + * to read the response back) + */ + if (ifevent->ifidx > 0) { + dhd_if_event_t *if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t)); + if (if_event == NULL) { + DHD_ERROR(("dhd_event_ifadd: Failed MALLOC, malloced %d bytes", + MALLOCED(dhdinfo->pub.osh))); + return BCME_NOMEM; + } + + memcpy(&if_event->event, ifevent, sizeof(if_event->event)); + memcpy(if_event->mac, mac, ETHER_ADDR_LEN); + strncpy(if_event->name, name, IFNAMSIZ); + if_event->name[IFNAMSIZ - 1] = '\0'; + dhd_deferred_schedule_work(dhdinfo->dhd_deferred_wq, (void *)if_event, + DHD_WQ_WORK_IF_ADD, dhd_ifadd_event_handler, DHD_WQ_WORK_PRIORITY_LOW); + } + + return BCME_OK; +} + +int +dhd_event_ifdel(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac) +{ + dhd_if_event_t *if_event; + +#ifdef WL_CFG80211 + if (wl_cfg80211_notify_ifdel(dhd_linux_get_primary_netdev(&dhdinfo->pub), + ifevent->ifidx, name, mac, ifevent->bssidx) == BCME_OK) + return BCME_OK; +#endif /* WL_CFG80211 */ + + /* handle IF event caused by wl commands, SoftAP, WEXT and + * anything else + */ + if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t)); + if (if_event == NULL) { + DHD_ERROR(("dhd_event_ifdel: malloc failed for if_event, malloced %d bytes", + MALLOCED(dhdinfo->pub.osh))); + return BCME_NOMEM; + } + memcpy(&if_event->event, ifevent, sizeof(if_event->event)); + memcpy(if_event->mac, mac, ETHER_ADDR_LEN); + strncpy(if_event->name, name, IFNAMSIZ); + if_event->name[IFNAMSIZ - 1] = '\0'; + dhd_deferred_schedule_work(dhdinfo->dhd_deferred_wq, (void *)if_event, DHD_WQ_WORK_IF_DEL, + dhd_ifdel_event_handler, DHD_WQ_WORK_PRIORITY_LOW); + + return BCME_OK; +} + +int +dhd_event_ifchange(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac) +{ +#ifdef DHD_UPDATE_INTF_MAC + dhd_if_event_t *if_event; +#endif /* DHD_UPDATE_INTF_MAC */ + +#ifdef WL_CFG80211 + wl_cfg80211_notify_ifchange(dhd_linux_get_primary_netdev(&dhdinfo->pub), + ifevent->ifidx, name, mac, ifevent->bssidx); +#endif /* WL_CFG80211 */ + +#ifdef DHD_UPDATE_INTF_MAC + /* handle IF event caused by wl commands, SoftAP, WEXT, MBSS and + * anything else + */ + if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t)); + if (if_event == NULL) { + DHD_ERROR(("dhd_event_ifdel: malloc failed for if_event, malloced %d bytes", + MALLOCED(dhdinfo->pub.osh))); + return BCME_NOMEM; + } + memcpy(&if_event->event, ifevent, sizeof(if_event->event)); + // construct a change event + if_event->event.ifidx = dhd_ifname2idx(dhdinfo, name); + if_event->event.opcode = WLC_E_IF_CHANGE; + memcpy(if_event->mac, mac, ETHER_ADDR_LEN); + strncpy(if_event->name, name, IFNAMSIZ); + if_event->name[IFNAMSIZ - 1] = '\0'; + dhd_deferred_schedule_work(dhdinfo->dhd_deferred_wq, (void *)if_event, DHD_WQ_WORK_IF_UPDATE, + dhd_ifupdate_event_handler, DHD_WQ_WORK_PRIORITY_LOW); +#endif /* DHD_UPDATE_INTF_MAC */ + + return BCME_OK; +} + +#ifdef WL_NATOE +/* Handler to update natoe info and bind with new subscriptions if there is change in config */ +static void +dhd_natoe_ct_event_hanlder(void *handle, void *event_info, u8 event) +{ + dhd_info_t *dhd = handle; + wl_event_data_natoe_t *natoe = event_info; + dhd_nfct_info_t *nfct = dhd->pub.nfct; + + if (event != DHD_WQ_WORK_NATOE_EVENT) { + DHD_ERROR(("%s: unexpected event \n", __FUNCTION__)); + return; + } + + if (!dhd) { + DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__)); + return; + } + if (natoe->natoe_active && natoe->sta_ip && natoe->start_port && natoe->end_port && + (natoe->start_port < natoe->end_port)) { + /* Rebind subscriptions to start receiving notifications from groups */ + if (dhd_ct_nl_bind(nfct, nfct->subscriptions) < 0) { + dhd_ct_close(nfct); + } + dhd_ct_send_dump_req(nfct); + } else if (!natoe->natoe_active) { + /* Rebind subscriptions to stop receiving notifications from groups */ + if (dhd_ct_nl_bind(nfct, CT_NULL_SUBSCRIPTION) < 0) { + dhd_ct_close(nfct); + } + } +} + +/* As NATOE enable/disbale event is received, we have to bind with new NL subscriptions. + * Scheduling workq to switch from tasklet context as bind call may sleep in handler + */ +int +dhd_natoe_ct_event(dhd_pub_t *dhd, char *data) +{ + wl_event_data_natoe_t *event_data = (wl_event_data_natoe_t *)data; + + if (dhd->nfct) { + wl_event_data_natoe_t *natoe = dhd->nfct->natoe_info; + uint8 prev_enable = natoe->natoe_active; + + spin_lock_bh(&dhd->nfct_lock); + memcpy(natoe, event_data, sizeof(*event_data)); + spin_unlock_bh(&dhd->nfct_lock); + + if (prev_enable != event_data->natoe_active) { + dhd_deferred_schedule_work(dhd->info->dhd_deferred_wq, + (void *)natoe, DHD_WQ_WORK_NATOE_EVENT, + dhd_natoe_ct_event_hanlder, DHD_WQ_WORK_PRIORITY_LOW); + } + return BCME_OK; + } + DHD_ERROR(("%s ERROR NFCT is not enabled \n", __FUNCTION__)); + return BCME_ERROR; +} + +/* Handler to send natoe ioctl to dongle */ +static void +dhd_natoe_ct_ioctl_handler(void *handle, void *event_info, uint8 event) +{ + dhd_info_t *dhd = handle; + dhd_ct_ioc_t *ct_ioc = event_info; + + if (event != DHD_WQ_WORK_NATOE_IOCTL) { + DHD_ERROR(("%s: unexpected event \n", __FUNCTION__)); + return; + } + + if (!dhd) { + DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__)); + return; + } + + if (dhd_natoe_prep_send_exception_port_ioctl(&dhd->pub, ct_ioc) < 0) { + DHD_ERROR(("%s: Error in sending NATOE IOCTL \n", __FUNCTION__)); + } +} + +/* When Netlink message contains port collision info, the info must be sent to dongle FW + * For that we have to switch context from softirq/tasklet by scheduling workq for natoe_ct ioctl + */ +void +dhd_natoe_ct_ioctl_schedule_work(dhd_pub_t *dhd, dhd_ct_ioc_t *ioc) +{ + + dhd_deferred_schedule_work(dhd->info->dhd_deferred_wq, (void *)ioc, + DHD_WQ_WORK_NATOE_IOCTL, dhd_natoe_ct_ioctl_handler, + DHD_WQ_WORK_PRIORITY_HIGH); +} +#endif /* WL_NATOE */ + +/* This API maps ndev to ifp inclusive of static IFs */ +static dhd_if_t * +dhd_get_ifp_by_ndev(dhd_pub_t *dhdp, struct net_device *ndev) +{ + dhd_if_t *ifp = NULL; +#ifdef WL_STATIC_IF + u32 ifidx = (DHD_MAX_IFS + DHD_MAX_STATIC_IFS - 1); +#else + u32 ifidx = (DHD_MAX_IFS - 1); +#endif /* WL_STATIC_IF */ + + dhd_info_t *dhdinfo = (dhd_info_t *)dhdp->info; + do { + ifp = dhdinfo->iflist[ifidx]; + if (ifp && (ifp->net == ndev)) { + DHD_TRACE(("match found for %s. ifidx:%d\n", + ndev->name, ifidx)); + return ifp; + } + } while (ifidx--); + + DHD_ERROR(("no entry found for %s\n", ndev->name)); + return NULL; +} + +bool +dhd_is_static_ndev(dhd_pub_t *dhdp, struct net_device *ndev) +{ + dhd_if_t *ifp = NULL; + + if (!dhdp || !ndev) { + DHD_ERROR(("wrong input\n")); + ASSERT(0); + return false; + } + + ifp = dhd_get_ifp_by_ndev(dhdp, ndev); + return (ifp && (ifp->static_if == true)); +} + +#ifdef WL_STATIC_IF +/* In some cases, while registering I/F, the actual ifidx, bssidx and dngl_name + * are not known. For e.g: static i/f case. This function lets to update it once + * it is known. + */ +s32 +dhd_update_iflist_info(dhd_pub_t *dhdp, struct net_device *ndev, int ifidx, + uint8 *mac, uint8 bssidx, const char *dngl_name, int if_state) +{ + dhd_info_t *dhdinfo = (dhd_info_t *)dhdp->info; + dhd_if_t *ifp, *ifp_new; + s32 cur_idx; + dhd_dev_priv_t * dev_priv; + + DHD_TRACE(("[STATIC_IF] update ifinfo for state:%d ifidx:%d\n", + if_state, ifidx)); + + ASSERT(dhdinfo && (ifidx < (DHD_MAX_IFS + DHD_MAX_STATIC_IFS))); + + if ((ifp = dhd_get_ifp_by_ndev(dhdp, ndev)) == NULL) { + return -ENODEV; + } + cur_idx = ifp->idx; + + if (if_state == NDEV_STATE_OS_IF_CREATED) { + /* mark static if */ + ifp->static_if = TRUE; + return BCME_OK; + } + + ifp_new = dhdinfo->iflist[ifidx]; + if (ifp_new && (ifp_new != ifp)) { + /* There should be only one entry for a given ifidx. */ + DHD_ERROR(("ifp ptr already present for ifidx:%d\n", ifidx)); + ASSERT(0); + dhdp->hang_reason = HANG_REASON_IFACE_ADD_FAILURE; + net_os_send_hang_message(ifp->net); + return -EINVAL; + } + + /* For static if delete case, cleanup the if before ifidx update */ + if ((if_state == NDEV_STATE_FW_IF_DELETED) || + (if_state == NDEV_STATE_FW_IF_FAILED)) { + dhd_cleanup_if(ifp->net); + dev_priv = DHD_DEV_PRIV(ndev); + dev_priv->ifidx = ifidx; + } + + /* update the iflist ifidx slot with cached info */ + dhdinfo->iflist[ifidx] = ifp; + dhdinfo->iflist[cur_idx] = NULL; + + /* update the values */ + ifp->idx = ifidx; + ifp->bssidx = bssidx; + + if (if_state == NDEV_STATE_FW_IF_CREATED) { + dhd_dev_priv_save(ndev, dhdinfo, ifp, ifidx); + /* initialize the dongle provided if name */ + if (dngl_name) { + strlcpy(ifp->dngl_name, dngl_name, IFNAMSIZ); + } else if (ndev->name[0] != '\0') { + strlcpy(ifp->dngl_name, ndev->name, IFNAMSIZ); + } + if (mac != NULL) + memcpy_s(&ifp->mac_addr, ETHER_ADDR_LEN, mac, ETHER_ADDR_LEN); + } + DHD_INFO(("[STATIC_IF] ifp ptr updated for ifidx:%d curidx:%d if_state:%d\n", + ifidx, cur_idx, if_state)); + return BCME_OK; +} +#endif /* WL_STATIC_IF */ + +/* unregister and free the existing net_device interface (if any) in iflist and + * allocate a new one. the slot is reused. this function does NOT register the + * new interface to linux kernel. dhd_register_if does the job + */ +struct net_device* +dhd_allocate_if(dhd_pub_t *dhdpub, int ifidx, const char *name, + uint8 *mac, uint8 bssidx, bool need_rtnl_lock, const char *dngl_name) +{ + dhd_info_t *dhdinfo = (dhd_info_t *)dhdpub->info; + dhd_if_t *ifp; + + ASSERT(dhdinfo && (ifidx < (DHD_MAX_IFS + DHD_MAX_STATIC_IFS))); + + ifp = dhdinfo->iflist[ifidx]; + + if (ifp != NULL) { + if (ifp->net != NULL) { + DHD_ERROR(("%s: free existing IF %s ifidx:%d \n", + __FUNCTION__, ifp->net->name, ifidx)); + + if (ifidx == 0) { + /* For primary ifidx (0), there shouldn't be + * any netdev present already. + */ + DHD_ERROR(("Primary ifidx populated already\n")); + ASSERT(0); + return NULL; + } + + dhd_dev_priv_clear(ifp->net); /* clear net_device private */ + + /* in unregister_netdev case, the interface gets freed by net->destructor + * (which is set to free_netdev) + */ + if (ifp->net->reg_state == NETREG_UNINITIALIZED) { + free_netdev(ifp->net); + } else { + netif_stop_queue(ifp->net); + if (need_rtnl_lock) + unregister_netdev(ifp->net); + else + unregister_netdevice(ifp->net); + } + ifp->net = NULL; + } + } else { + ifp = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_t)); + if (ifp == NULL) { + DHD_ERROR(("%s: OOM - dhd_if_t(%zu)\n", __FUNCTION__, sizeof(dhd_if_t))); + return NULL; + } + } + + memset(ifp, 0, sizeof(dhd_if_t)); + ifp->info = dhdinfo; + ifp->idx = ifidx; + ifp->bssidx = bssidx; +#ifdef DHD_MCAST_REGEN + ifp->mcast_regen_bss_enable = FALSE; +#endif // endif + /* set to TRUE rx_pkt_chainable at alloc time */ + ifp->rx_pkt_chainable = TRUE; + + if (mac != NULL) + memcpy(&ifp->mac_addr, mac, ETHER_ADDR_LEN); + + /* Allocate etherdev, including space for private structure */ + ifp->net = alloc_etherdev(DHD_DEV_PRIV_SIZE); + if (ifp->net == NULL) { + DHD_ERROR(("%s: OOM - alloc_etherdev(%zu)\n", __FUNCTION__, sizeof(dhdinfo))); + goto fail; + } + + /* Setup the dhd interface's netdevice private structure. */ + dhd_dev_priv_save(ifp->net, dhdinfo, ifp, ifidx); + + if (name && name[0]) { + strncpy(ifp->net->name, name, IFNAMSIZ); + ifp->net->name[IFNAMSIZ - 1] = '\0'; + } + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 9)) +#define IFP_NET_DESTRUCTOR ifp->net->priv_destructor +#else +#define IFP_NET_DESTRUCTOR ifp->net->destructor +#endif // endif + +#ifdef WL_CFG80211 + if (ifidx == 0) { + IFP_NET_DESTRUCTOR = free_netdev; + } else { + IFP_NET_DESTRUCTOR = dhd_netdev_free; + } +#else + IFP_NET_DESTRUCTOR = free_netdev; +#endif /* WL_CFG80211 */ + strncpy(ifp->name, ifp->net->name, IFNAMSIZ); + ifp->name[IFNAMSIZ - 1] = '\0'; + dhdinfo->iflist[ifidx] = ifp; + + /* initialize the dongle provided if name */ + if (dngl_name) { + strncpy(ifp->dngl_name, dngl_name, IFNAMSIZ); + } else if (name) { + strncpy(ifp->dngl_name, name, IFNAMSIZ); + } + +#ifdef PCIE_FULL_DONGLE + /* Initialize STA info list */ + INIT_LIST_HEAD(&ifp->sta_list); + DHD_IF_STA_LIST_LOCK_INIT(ifp); +#endif /* PCIE_FULL_DONGLE */ + +#ifdef DHD_L2_FILTER + ifp->phnd_arp_table = init_l2_filter_arp_table(dhdpub->osh); + ifp->parp_allnode = TRUE; +#endif /* DHD_L2_FILTER */ + + DHD_CUMM_CTR_INIT(&ifp->cumm_ctr); + + return ifp->net; + +fail: + if (ifp != NULL) { + if (ifp->net != NULL) { +#if defined(DHD_LB_RXP) && defined(PCIE_FULL_DONGLE) + if (ifp->net == dhdinfo->rx_napi_netdev) { + napi_disable(&dhdinfo->rx_napi_struct); + netif_napi_del(&dhdinfo->rx_napi_struct); + skb_queue_purge(&dhdinfo->rx_napi_queue); + dhdinfo->rx_napi_netdev = NULL; + } +#endif /* DHD_LB_RXP && PCIE_FULL_DONGLE */ + dhd_dev_priv_clear(ifp->net); + free_netdev(ifp->net); + ifp->net = NULL; + } + MFREE(dhdinfo->pub.osh, ifp, sizeof(*ifp)); + ifp = NULL; + } + + dhdinfo->iflist[ifidx] = NULL; + return NULL; +} + +static void +dhd_cleanup_ifp(dhd_pub_t *dhdp, s32 ifidx) +{ + dhd_if_t *ifp; + dhd_info_t *dhdinfo = (dhd_info_t *)dhdp->info; +#ifdef PCIE_FULL_DONGLE + if_flow_lkup_t *if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup; +#endif /* PCIE_FULL_DONGLE */ + + ifp = dhdinfo->iflist[ifidx]; + if (ifp != NULL) { +#ifdef DHD_L2_FILTER + bcm_l2_filter_arp_table_update(dhdpub->osh, ifp->phnd_arp_table, TRUE, + NULL, FALSE, dhdpub->tickcnt); + deinit_l2_filter_arp_table(dhdpub->osh, ifp->phnd_arp_table); + ifp->phnd_arp_table = NULL; +#endif /* DHD_L2_FILTER */ + + dhd_if_del_sta_list(ifp); +#ifdef PCIE_FULL_DONGLE + /* Delete flowrings of virtual interface */ + if ((ifidx != 0) && (if_flow_lkup[ifidx].role != WLC_E_IF_ROLE_AP)) { + dhd_flow_rings_delete(dhdp, ifidx); + } +#endif /* PCIE_FULL_DONGLE */ + } +} + +void +dhd_cleanup_if(struct net_device *net) +{ + dhd_info_t *dhdinfo = DHD_DEV_INFO(net); + dhd_pub_t *dhdp = &dhdinfo->pub; + dhd_if_t *ifp; + + if (!(ifp = dhd_get_ifp_by_ndev(dhdp, net)) || + (ifp->idx >= DHD_MAX_IFS)) { + DHD_ERROR(("Wrong ifidx: %p, %d\n", ifp, ifp ? ifp->idx : -1)); + ASSERT(0); + return; + } + + dhd_cleanup_ifp(dhdp, ifp->idx); +} + +/* unregister and free the the net_device interface associated with the indexed + * slot, also free the slot memory and set the slot pointer to NULL + */ +#define DHD_TX_COMPLETION_TIMEOUT 5000 +int +dhd_remove_if(dhd_pub_t *dhdpub, int ifidx, bool need_rtnl_lock) +{ + dhd_info_t *dhdinfo = (dhd_info_t *)dhdpub->info; + dhd_if_t *ifp; + unsigned long flags; + u32 timeout; + + ifp = dhdinfo->iflist[ifidx]; + + if (ifp != NULL) { +#ifdef WL_STATIC_IF + /* static IF will be handled in detach */ + if (ifp->static_if) { + DHD_TRACE(("Skip del iface for static interface\n")); + return BCME_OK; + } +#endif /* WL_STATIC_IF */ + if (ifp->net != NULL) { + DHD_ERROR(("deleting interface '%s' idx %d\n", ifp->net->name, ifp->idx)); + + DHD_GENERAL_LOCK(dhdpub, flags); + ifp->del_in_progress = true; + DHD_GENERAL_UNLOCK(dhdpub, flags); + + /* If TX is in progress, hold the if del */ + if (DHD_IF_IS_TX_ACTIVE(ifp)) { + DHD_INFO(("TX in progress. Wait for it to be complete.")); + timeout = wait_event_timeout(dhdpub->tx_completion_wait, + ((ifp->tx_paths_active & DHD_TX_CONTEXT_MASK) == 0), + msecs_to_jiffies(DHD_TX_COMPLETION_TIMEOUT)); + if (!timeout) { + /* Tx completion timeout. Attempt proceeding ahead */ + DHD_ERROR(("Tx completion timed out!\n")); + ASSERT(0); + } + } else { + DHD_TRACE(("No outstanding TX!\n")); + } + dhdinfo->iflist[ifidx] = NULL; + /* in unregister_netdev case, the interface gets freed by net->destructor + * (which is set to free_netdev) + */ + if (ifp->net->reg_state == NETREG_UNINITIALIZED) { + free_netdev(ifp->net); + } else { + netif_tx_disable(ifp->net); + +#if defined(SET_RPS_CPUS) + custom_rps_map_clear(ifp->net->_rx); +#endif /* SET_RPS_CPUS */ +#if defined(SET_RPS_CPUS) +#if (defined(DHDTCPACK_SUPPRESS) && defined(BCMPCIE)) + dhd_tcpack_suppress_set(dhdpub, TCPACK_SUP_OFF); +#endif /* DHDTCPACK_SUPPRESS && BCMPCIE */ +#endif // endif + if (need_rtnl_lock) + unregister_netdev(ifp->net); + else + unregister_netdevice(ifp->net); +#ifdef WL_EXT_IAPSTA + wl_ext_iapsta_dettach_netdev(ifp->net, ifidx); +#endif + } + ifp->net = NULL; + DHD_GENERAL_LOCK(dhdpub, flags); + ifp->del_in_progress = false; + DHD_GENERAL_UNLOCK(dhdpub, flags); + } + dhd_cleanup_ifp(dhdpub, ifidx); + DHD_CUMM_CTR_INIT(&ifp->cumm_ctr); + + MFREE(dhdinfo->pub.osh, ifp, sizeof(*ifp)); + ifp = NULL; + } + + return BCME_OK; +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) +static struct net_device_ops dhd_ops_pri = { + .ndo_open = dhd_open, + .ndo_stop = dhd_stop, + .ndo_get_stats = dhd_get_stats, +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM + .ndo_do_ioctl = dhd_ioctl_entry_wrapper, + .ndo_start_xmit = dhd_start_xmit_wrapper, +#else + .ndo_do_ioctl = dhd_ioctl_entry, + .ndo_start_xmit = dhd_start_xmit, +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + .ndo_set_mac_address = dhd_set_mac_address, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)) + .ndo_set_rx_mode = dhd_set_multicast_list, +#else + .ndo_set_multicast_list = dhd_set_multicast_list, +#endif // endif +}; + +static struct net_device_ops dhd_ops_virt = { +#if defined(WL_CFG80211) && defined(WL_STATIC_IF) + .ndo_open = dhd_static_if_open, + .ndo_stop = dhd_static_if_stop, +#endif // endif + .ndo_get_stats = dhd_get_stats, +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM + .ndo_do_ioctl = dhd_ioctl_entry_wrapper, + .ndo_start_xmit = dhd_start_xmit_wrapper, +#else + .ndo_do_ioctl = dhd_ioctl_entry, + .ndo_start_xmit = dhd_start_xmit, +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + .ndo_set_mac_address = dhd_set_mac_address, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)) + .ndo_set_rx_mode = dhd_set_multicast_list, +#else + .ndo_set_multicast_list = dhd_set_multicast_list, +#endif // endif +}; +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) */ + +int +dhd_os_write_file_posn(void *fp, unsigned long *posn, void *buf, + unsigned long buflen) +{ + loff_t wr_posn = *posn; + + if (!fp || !buf || buflen == 0) + return -1; + + if (vfs_write((struct file *)fp, buf, buflen, &wr_posn) < 0) + return -1; + + *posn = wr_posn; + return 0; +} + +#ifdef SHOW_LOGTRACE +int +dhd_os_read_file(void *file, char *buf, uint32 size) +{ + struct file *filep = (struct file *)file; + + if (!file || !buf) + return -1; + + return vfs_read(filep, buf, size, &filep->f_pos); +} + +int +dhd_os_seek_file(void *file, int64 offset) +{ + struct file *filep = (struct file *)file; + if (!file) + return -1; + + /* offset can be -ve */ + filep->f_pos = filep->f_pos + offset; + + return 0; +} + +static int +dhd_init_logstrs_array(osl_t *osh, dhd_event_log_t *temp) +{ + struct file *filep = NULL; + struct kstat stat; + mm_segment_t fs; + char *raw_fmts = NULL; + int logstrs_size = 0; + int error = 0; + + fs = get_fs(); + set_fs(KERNEL_DS); + + filep = filp_open(logstrs_path, O_RDONLY, 0); + + if (IS_ERR(filep)) { + DHD_ERROR_NO_HW4(("%s: Failed to open the file %s \n", __FUNCTION__, logstrs_path)); + goto fail; + } + error = vfs_stat(logstrs_path, &stat); + if (error) { + DHD_ERROR_NO_HW4(("%s: Failed to stat file %s \n", __FUNCTION__, logstrs_path)); + goto fail; + } + logstrs_size = (int) stat.size; + + if (logstrs_size == 0) { + DHD_ERROR(("%s: return as logstrs_size is 0\n", __FUNCTION__)); + goto fail1; + } + + raw_fmts = MALLOC(osh, logstrs_size); + if (raw_fmts == NULL) { + DHD_ERROR(("%s: Failed to allocate memory \n", __FUNCTION__)); + goto fail; + } + + if (vfs_read(filep, raw_fmts, logstrs_size, &filep->f_pos) != logstrs_size) { + DHD_ERROR_NO_HW4(("%s: Failed to read file %s\n", __FUNCTION__, logstrs_path)); + goto fail; + } + + if (dhd_parse_logstrs_file(osh, raw_fmts, logstrs_size, temp) + == BCME_OK) { + filp_close(filep, NULL); + set_fs(fs); + return BCME_OK; + } + +fail: + if (raw_fmts) { + MFREE(osh, raw_fmts, logstrs_size); + raw_fmts = NULL; + } + +fail1: + if (!IS_ERR(filep)) + filp_close(filep, NULL); + + set_fs(fs); + temp->fmts = NULL; + return BCME_ERROR; +} + +static int +dhd_read_map(osl_t *osh, char *fname, uint32 *ramstart, uint32 *rodata_start, + uint32 *rodata_end) +{ + struct file *filep = NULL; + mm_segment_t fs; + int err = BCME_ERROR; + + if (fname == NULL) { + DHD_ERROR(("%s: ERROR fname is NULL \n", __FUNCTION__)); + return BCME_ERROR; + } + + fs = get_fs(); + set_fs(KERNEL_DS); + + filep = filp_open(fname, O_RDONLY, 0); + if (IS_ERR(filep)) { + DHD_ERROR_NO_HW4(("%s: Failed to open %s \n", __FUNCTION__, fname)); + goto fail; + } + + if ((err = dhd_parse_map_file(osh, filep, ramstart, + rodata_start, rodata_end)) < 0) + goto fail; + +fail: + if (!IS_ERR(filep)) + filp_close(filep, NULL); + + set_fs(fs); + + return err; +} + +static int +dhd_init_static_strs_array(osl_t *osh, dhd_event_log_t *temp, char *str_file, char *map_file) +{ + struct file *filep = NULL; + mm_segment_t fs; + char *raw_fmts = NULL; + uint32 logstrs_size = 0; + int error = 0; + uint32 ramstart = 0; + uint32 rodata_start = 0; + uint32 rodata_end = 0; + uint32 logfilebase = 0; + + error = dhd_read_map(osh, map_file, &ramstart, &rodata_start, &rodata_end); + if (error != BCME_OK) { + DHD_ERROR(("readmap Error!! \n")); + /* don't do event log parsing in actual case */ + if (strstr(str_file, ram_file_str) != NULL) { + temp->raw_sstr = NULL; + } else if (strstr(str_file, rom_file_str) != NULL) { + temp->rom_raw_sstr = NULL; + } + return error; + } + DHD_ERROR(("ramstart: 0x%x, rodata_start: 0x%x, rodata_end:0x%x\n", + ramstart, rodata_start, rodata_end)); + + fs = get_fs(); + set_fs(KERNEL_DS); + + filep = filp_open(str_file, O_RDONLY, 0); + if (IS_ERR(filep)) { + DHD_ERROR(("%s: Failed to open the file %s \n", __FUNCTION__, str_file)); + goto fail; + } + + if (TRUE) { + /* Full file size is huge. Just read required part */ + logstrs_size = rodata_end - rodata_start; + logfilebase = rodata_start - ramstart; + } + + if (logstrs_size == 0) { + DHD_ERROR(("%s: return as logstrs_size is 0\n", __FUNCTION__)); + goto fail1; + } + + raw_fmts = MALLOC(osh, logstrs_size); + if (raw_fmts == NULL) { + DHD_ERROR(("%s: Failed to allocate raw_fmts memory \n", __FUNCTION__)); + goto fail; + } + + if (TRUE) { + error = generic_file_llseek(filep, logfilebase, SEEK_SET); + if (error < 0) { + DHD_ERROR(("%s: %s llseek failed %d \n", __FUNCTION__, str_file, error)); + goto fail; + } + } + + error = vfs_read(filep, raw_fmts, logstrs_size, (&filep->f_pos)); + if (error != logstrs_size) { + DHD_ERROR(("%s: %s read failed %d \n", __FUNCTION__, str_file, error)); + goto fail; + } + + if (strstr(str_file, ram_file_str) != NULL) { + temp->raw_sstr = raw_fmts; + temp->raw_sstr_size = logstrs_size; + temp->rodata_start = rodata_start; + temp->rodata_end = rodata_end; + } else if (strstr(str_file, rom_file_str) != NULL) { + temp->rom_raw_sstr = raw_fmts; + temp->rom_raw_sstr_size = logstrs_size; + temp->rom_rodata_start = rodata_start; + temp->rom_rodata_end = rodata_end; + } + + filp_close(filep, NULL); + set_fs(fs); + + return BCME_OK; + +fail: + if (raw_fmts) { + MFREE(osh, raw_fmts, logstrs_size); + raw_fmts = NULL; + } + +fail1: + if (!IS_ERR(filep)) + filp_close(filep, NULL); + + set_fs(fs); + + if (strstr(str_file, ram_file_str) != NULL) { + temp->raw_sstr = NULL; + } else if (strstr(str_file, rom_file_str) != NULL) { + temp->rom_raw_sstr = NULL; + } + + return error; +} /* dhd_init_static_strs_array */ + +static int +dhd_trace_open_proc(struct inode *inode, struct file *file) +{ + return single_open(file, 0, NULL); +} + +ssize_t +dhd_trace_read_proc(struct file *file, char __user *buffer, size_t tt, loff_t *loff) +{ + trace_buf_info_t *trace_buf_info; + int ret = BCME_ERROR; + + ASSERT(g_dhd_pub); + mutex_lock(&g_dhd_pub->dhd_trace_lock); + trace_buf_info = (trace_buf_info_t *)MALLOC(g_dhd_pub->osh, + sizeof(trace_buf_info_t)); + if (trace_buf_info) { + dhd_get_read_buf_ptr(g_dhd_pub, trace_buf_info); + if (copy_to_user(buffer, (void*)trace_buf_info->buf, MIN(trace_buf_info->size, tt))) + { + ret = -EFAULT; + goto exit; + } + if (trace_buf_info->availability == BUF_NOT_AVAILABLE) + ret = BUF_NOT_AVAILABLE; + else + ret = trace_buf_info->size; + } else + DHD_ERROR(("Memory allocation Failed\n")); + +exit: + if (trace_buf_info) { + MFREE(g_dhd_pub->osh, trace_buf_info, sizeof(trace_buf_info_t)); + } + mutex_unlock(&g_dhd_pub->dhd_trace_lock); + return ret; +} +#endif /* SHOW_LOGTRACE */ + +#ifdef DHD_ERPOM +uint enable_erpom = 0; +module_param(enable_erpom, int, 0); + +int +dhd_wlan_power_off_handler(void *handler, unsigned char reason) +{ + dhd_pub_t *dhdp = (dhd_pub_t *)handler; + bool dongle_isolation = dhdp->dongle_isolation; + + DHD_ERROR(("%s: WLAN DHD cleanup reason: %d\n", __FUNCTION__, reason)); + + if ((reason == BY_BT_DUE_TO_BT) || (reason == BY_BT_DUE_TO_WLAN)) { +#if defined(DHD_FW_COREDUMP) + /* save core dump to a file */ + if (dhdp->memdump_enabled) { +#ifdef DHD_SSSR_DUMP + if (dhdp->sssr_inited) { + dhdp->info->no_wq_sssrdump = TRUE; + dhd_bus_sssr_dump(dhdp); + dhdp->info->no_wq_sssrdump = FALSE; + } +#endif /* DHD_SSSR_DUMP */ + dhdp->memdump_type = DUMP_TYPE_DUE_TO_BT; + dhd_bus_mem_dump(dhdp); + } +#endif /* DHD_FW_COREDUMP */ + } + + /* pause data on all the interfaces */ + dhd_bus_stop_queue(dhdp->bus); + + /* Devreset function will perform FLR again, to avoid it set dongle_isolation */ + dhdp->dongle_isolation = TRUE; + dhd_bus_devreset(dhdp, 1); /* DHD structure cleanup */ + dhdp->dongle_isolation = dongle_isolation; /* Restore the old value */ + return 0; +} + +int +dhd_wlan_power_on_handler(void *handler, unsigned char reason) +{ + dhd_pub_t *dhdp = (dhd_pub_t *)handler; + bool dongle_isolation = dhdp->dongle_isolation; + + DHD_ERROR(("%s: WLAN DHD re-init reason: %d\n", __FUNCTION__, reason)); + /* Devreset function will perform FLR again, to avoid it set dongle_isolation */ + dhdp->dongle_isolation = TRUE; + dhd_bus_devreset(dhdp, 0); /* DHD structure re-init */ + dhdp->dongle_isolation = dongle_isolation; /* Restore the old value */ + /* resume data on all the interfaces */ + dhd_bus_start_queue(dhdp->bus); + return 0; + +} + +#endif /* DHD_ERPOM */ + +#ifdef BCMDBUS +uint +dhd_get_rxsz(dhd_pub_t *pub) +{ + struct net_device *net = NULL; + dhd_info_t *dhd = NULL; + uint rxsz; + + /* Assign rxsz for dbus_attach */ + dhd = pub->info; + net = dhd->iflist[0]->net; + net->hard_header_len = ETH_HLEN + pub->hdrlen; + rxsz = DBUS_RX_BUFFER_SIZE_DHD(net); + + return rxsz; +} + +void +dhd_set_path(dhd_pub_t *pub) +{ + dhd_info_t *dhd = NULL; + + dhd = pub->info; + + /* try to download image and nvram to the dongle */ + if (dhd_update_fw_nv_path(dhd) && dhd->pub.bus) { + DHD_INFO(("%s: fw %s, nv %s, conf %s\n", + __FUNCTION__, dhd->fw_path, dhd->nv_path, dhd->conf_path)); + dhd_bus_update_fw_nv_path(dhd->pub.bus, + dhd->fw_path, dhd->nv_path, dhd->clm_path, dhd->conf_path); + } +} +#endif + +/** Called once for each hardware (dongle) instance that this DHD manages */ +dhd_pub_t * +dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen +#ifdef BCMDBUS + , void *data +#endif +) +{ + dhd_info_t *dhd = NULL; + struct net_device *net = NULL; + char if_name[IFNAMSIZ] = {'\0'}; +#ifdef SHOW_LOGTRACE + int ret; +#endif /* SHOW_LOGTRACE */ +#ifdef DHD_ERPOM + pom_func_handler_t *pom_handler; +#endif /* DHD_ERPOM */ +#if defined(BCMSDIO) || defined(BCMPCIE) + uint32 bus_type = -1; + uint32 bus_num = -1; + uint32 slot_num = -1; + wifi_adapter_info_t *adapter = NULL; +#elif defined(BCMDBUS) + wifi_adapter_info_t *adapter = data; +#endif +#ifdef GET_CUSTOM_MAC_ENABLE + char hw_ether[62]; +#endif /* GET_CUSTOM_MAC_ENABLE */ + + dhd_attach_states_t dhd_state = DHD_ATTACH_STATE_INIT; + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + +#ifdef PCIE_FULL_DONGLE + ASSERT(sizeof(dhd_pkttag_fd_t) <= OSL_PKTTAG_SZ); + ASSERT(sizeof(dhd_pkttag_fr_t) <= OSL_PKTTAG_SZ); +#endif /* PCIE_FULL_DONGLE */ + + /* will implement get_ids for DBUS later */ +#if defined(BCMSDIO) + dhd_bus_get_ids(bus, &bus_type, &bus_num, &slot_num); +#endif // endif +#if defined(BCMSDIO) || defined(BCMPCIE) + adapter = dhd_wifi_platform_get_adapter(bus_type, bus_num, slot_num); +#endif + + /* Allocate primary dhd_info */ + dhd = wifi_platform_prealloc(adapter, DHD_PREALLOC_DHD_INFO, sizeof(dhd_info_t)); + if (dhd == NULL) { + dhd = MALLOC(osh, sizeof(dhd_info_t)); + if (dhd == NULL) { + DHD_ERROR(("%s: OOM - alloc dhd_info\n", __FUNCTION__)); + goto dhd_null_flag; + } + } + memset(dhd, 0, sizeof(dhd_info_t)); + dhd_state |= DHD_ATTACH_STATE_DHD_ALLOC; + + dhd->unit = dhd_found + instance_base; /* do not increment dhd_found, yet */ + + dhd->pub.osh = osh; +#ifdef DUMP_IOCTL_IOV_LIST + dll_init(&(dhd->pub.dump_iovlist_head)); +#endif /* DUMP_IOCTL_IOV_LIST */ + dhd->adapter = adapter; + dhd->pub.adapter = (void *)adapter; +#ifdef BT_OVER_SDIO + dhd->pub.is_bt_recovery_required = FALSE; + mutex_init(&dhd->bus_user_lock); +#endif /* BT_OVER_SDIO */ + + g_dhd_pub = &dhd->pub; + DHD_INFO(("%s: g_dhd_pub %p\n", __FUNCTION__, g_dhd_pub)); + +#ifdef DHD_DEBUG + dll_init(&(dhd->pub.mw_list_head)); +#endif /* DHD_DEBUG */ + +#ifdef GET_CUSTOM_MAC_ENABLE + wifi_platform_get_mac_addr(dhd->adapter, hw_ether); + bcopy(hw_ether, dhd->pub.mac.octet, sizeof(struct ether_addr)); +#endif /* GET_CUSTOM_MAC_ENABLE */ +#ifdef CUSTOM_FORCE_NODFS_FLAG + dhd->pub.dhd_cflags |= WLAN_PLAT_NODFS_FLAG; + dhd->pub.force_country_change = TRUE; +#endif /* CUSTOM_FORCE_NODFS_FLAG */ +#ifdef CUSTOM_COUNTRY_CODE + get_customized_country_code(dhd->adapter, + dhd->pub.dhd_cspec.country_abbrev, &dhd->pub.dhd_cspec, + dhd->pub.dhd_cflags); +#endif /* CUSTOM_COUNTRY_CODE */ +#ifndef BCMDBUS + dhd->thr_dpc_ctl.thr_pid = DHD_PID_KT_TL_INVALID; + dhd->thr_wdt_ctl.thr_pid = DHD_PID_KT_INVALID; +#ifdef DHD_WET + dhd->pub.wet_info = dhd_get_wet_info(&dhd->pub); +#endif /* DHD_WET */ + /* Initialize thread based operation and lock */ + sema_init(&dhd->sdsem, 1); +#endif /* !BCMDBUS */ + dhd->pub.pcie_txs_metadata_enable = pcie_txs_metadata_enable; + + /* Link to info module */ + dhd->pub.info = dhd; + + /* Link to bus module */ + dhd->pub.bus = bus; + dhd->pub.hdrlen = bus_hdrlen; + + /* dhd_conf must be attached after linking dhd to dhd->pub.info, + * because dhd_detech will check .info is NULL or not. + */ + if (dhd_conf_attach(&dhd->pub) != 0) { + DHD_ERROR(("dhd_conf_attach failed\n")); + goto fail; + } +#ifndef BCMDBUS + dhd_conf_reset(&dhd->pub); + dhd_conf_set_chiprev(&dhd->pub, dhd_bus_chip(bus), dhd_bus_chiprev(bus)); + dhd_conf_preinit(&dhd->pub); +#endif /* !BCMDBUS */ + + /* Some DHD modules (e.g. cfg80211) configures operation mode based on firmware name. + * This is indeed a hack but we have to make it work properly before we have a better + * solution + */ + dhd_update_fw_nv_path(dhd); + + /* Set network interface name if it was provided as module parameter */ + if (iface_name[0]) { + int len; + char ch; + strncpy(if_name, iface_name, IFNAMSIZ); + if_name[IFNAMSIZ - 1] = 0; + len = strlen(if_name); + ch = if_name[len - 1]; + if ((ch > '9' || ch < '0') && (len < IFNAMSIZ - 2)) + strncat(if_name, "%d", 2); + } + + /* Passing NULL to dngl_name to ensure host gets if_name in dngl_name member */ + net = dhd_allocate_if(&dhd->pub, 0, if_name, NULL, 0, TRUE, NULL); + if (net == NULL) { + goto fail; + } + mutex_init(&dhd->pub.ndev_op_sync); + + dhd_state |= DHD_ATTACH_STATE_ADD_IF; +#ifdef DHD_L2_FILTER + /* initialize the l2_filter_cnt */ + dhd->pub.l2_filter_cnt = 0; +#endif // endif +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)) + net->open = NULL; +#else + net->netdev_ops = NULL; +#endif // endif + + mutex_init(&dhd->dhd_iovar_mutex); + sema_init(&dhd->proto_sem, 1); +#ifdef DHD_ULP + if (!(dhd_ulp_init(osh, &dhd->pub))) + goto fail; +#endif /* DHD_ULP */ + +#ifdef PROP_TXSTATUS + spin_lock_init(&dhd->wlfc_spinlock); + + dhd->pub.skip_fc = dhd_wlfc_skip_fc; + dhd->pub.plat_init = dhd_wlfc_plat_init; + dhd->pub.plat_deinit = dhd_wlfc_plat_deinit; + +#ifdef DHD_WLFC_THREAD + init_waitqueue_head(&dhd->pub.wlfc_wqhead); + dhd->pub.wlfc_thread = kthread_create(dhd_wlfc_transfer_packets, &dhd->pub, "wlfc-thread"); + if (IS_ERR(dhd->pub.wlfc_thread)) { + DHD_ERROR(("create wlfc thread failed\n")); + goto fail; + } else { + wake_up_process(dhd->pub.wlfc_thread); + } +#endif /* DHD_WLFC_THREAD */ +#endif /* PROP_TXSTATUS */ + + /* Initialize other structure content */ + init_waitqueue_head(&dhd->ioctl_resp_wait); + init_waitqueue_head(&dhd->d3ack_wait); + init_waitqueue_head(&dhd->ctrl_wait); + init_waitqueue_head(&dhd->dhd_bus_busy_state_wait); + init_waitqueue_head(&dhd->dmaxfer_wait); + init_waitqueue_head(&dhd->pub.tx_completion_wait); + dhd->pub.dhd_bus_busy_state = 0; + + /* Initialize the spinlocks */ + spin_lock_init(&dhd->sdlock); + spin_lock_init(&dhd->txqlock); + spin_lock_init(&dhd->dhd_lock); + spin_lock_init(&dhd->rxf_lock); +#ifdef WLTDLS + spin_lock_init(&dhd->pub.tdls_lock); +#endif /* WLTDLS */ +#if defined(PCIE_FULL_DONGLE) + spin_lock_init(&dhd->backplane_access_lock); +#endif /* defined(PCIE_FULL_DONGLE) */ +#if defined(RXFRAME_THREAD) + dhd->rxthread_enabled = TRUE; +#endif /* defined(RXFRAME_THREAD) */ + +#ifdef DHDTCPACK_SUPPRESS + spin_lock_init(&dhd->tcpack_lock); +#endif /* DHDTCPACK_SUPPRESS */ + + /* Initialize Wakelock stuff */ + spin_lock_init(&dhd->wakelock_spinlock); + spin_lock_init(&dhd->wakelock_evt_spinlock); + DHD_OS_WAKE_LOCK_INIT(dhd); + dhd->wakelock_counter = 0; + /* wakelocks prevent a system from going into a low power state */ +#ifdef CONFIG_HAS_WAKELOCK + // terence 20161023: can not destroy wl_wifi when wlan down, it will happen null pointer in dhd_ioctl_entry + wake_lock_init(&dhd->wl_wifi, WAKE_LOCK_SUSPEND, "wlan_wake"); + wake_lock_init(&dhd->wl_wdwake, WAKE_LOCK_SUSPEND, "wlan_wd_wake"); +#endif /* CONFIG_HAS_WAKELOCK */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) + mutex_init(&dhd->dhd_net_if_mutex); + mutex_init(&dhd->dhd_suspend_mutex); +#if defined(PKT_FILTER_SUPPORT) && defined(APF) + mutex_init(&dhd->dhd_apf_mutex); +#endif /* PKT_FILTER_SUPPORT && APF */ +#endif // endif + dhd_state |= DHD_ATTACH_STATE_WAKELOCKS_INIT; + + /* Attach and link in the protocol */ + if (dhd_prot_attach(&dhd->pub) != 0) { + DHD_ERROR(("dhd_prot_attach failed\n")); + goto fail; + } + dhd_state |= DHD_ATTACH_STATE_PROT_ATTACH; + +#ifdef WL_CFG80211 + spin_lock_init(&dhd->pub.up_lock); + /* Attach and link in the cfg80211 */ + if (unlikely(wl_cfg80211_attach(net, &dhd->pub))) { + DHD_ERROR(("wl_cfg80211_attach failed\n")); + goto fail; + } + + dhd_monitor_init(&dhd->pub); + dhd_state |= DHD_ATTACH_STATE_CFG80211; +#endif // endif + +#if defined(WL_WIRELESS_EXT) +#ifdef WL_ESCAN + if (wl_escan_attach(net, &dhd->pub) != 0) { + DHD_ERROR(("wl_escan_attach failed\n")); + goto fail; + } +#else + /* Attach and link in the iw */ + if (wl_iw_attach(net, &dhd->pub) != 0) { + DHD_ERROR(("wl_iw_attach failed\n")); + goto fail; + } + dhd_state |= DHD_ATTACH_STATE_WL_ATTACH; +#endif /* WL_ESCAN */ +#endif /* defined(WL_WIRELESS_EXT) */ +#ifdef WL_EXT_IAPSTA + if (wl_ext_iapsta_attach(&dhd->pub) != 0) { + DHD_ERROR(("wl_ext_iapsta_attach failed\n")); + goto fail; + } +#endif + +#ifdef SHOW_LOGTRACE + ret = dhd_init_logstrs_array(osh, &dhd->event_data); + if (ret == BCME_OK) { + dhd_init_static_strs_array(osh, &dhd->event_data, st_str_file_path, map_file_path); + dhd_init_static_strs_array(osh, &dhd->event_data, rom_st_str_file_path, + rom_map_file_path); + dhd_state |= DHD_ATTACH_LOGTRACE_INIT; + } +#endif /* SHOW_LOGTRACE */ + +#ifdef DEBUGABILITY + /* attach debug if support */ + if (dhd_os_dbg_attach(&dhd->pub)) { + DHD_ERROR(("%s debug module attach failed\n", __FUNCTION__)); + goto fail; + } + +#if defined(SHOW_LOGTRACE) && defined(DBG_RING_LOG_INIT_DEFAULT) + /* enable verbose ring to support dump_trace_buf */ + dhd_os_start_logging(&dhd->pub, FW_VERBOSE_RING_NAME, 3, 0, 0, 0); +#endif /* SHOW_LOGTRACE */ + +#ifdef DBG_PKT_MON + dhd->pub.dbg->pkt_mon_lock = dhd_os_spin_lock_init(dhd->pub.osh); +#ifdef DBG_PKT_MON_INIT_DEFAULT + dhd_os_dbg_attach_pkt_monitor(&dhd->pub); +#endif /* DBG_PKT_MON_INIT_DEFAULT */ +#endif /* DBG_PKT_MON */ +#endif /* DEBUGABILITY */ + +#ifdef DHD_LOG_DUMP + dhd_log_dump_init(&dhd->pub); +#endif /* DHD_LOG_DUMP */ + + if (dhd_sta_pool_init(&dhd->pub, DHD_MAX_STA) != BCME_OK) { + DHD_ERROR(("%s: Initializing %u sta\n", __FUNCTION__, DHD_MAX_STA)); + goto fail; + } + +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM + dhd->tx_wq = alloc_workqueue("bcmdhd-tx-wq", WQ_HIGHPRI | WQ_UNBOUND | WQ_MEM_RECLAIM, 1); + if (!dhd->tx_wq) { + DHD_ERROR(("%s: alloc_workqueue(bcmdhd-tx-wq) failed\n", __FUNCTION__)); + goto fail; + } + dhd->rx_wq = alloc_workqueue("bcmdhd-rx-wq", WQ_HIGHPRI | WQ_UNBOUND | WQ_MEM_RECLAIM, 1); + if (!dhd->rx_wq) { + DHD_ERROR(("%s: alloc_workqueue(bcmdhd-rx-wq) failed\n", __FUNCTION__)); + destroy_workqueue(dhd->tx_wq); + dhd->tx_wq = NULL; + goto fail; + } +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + +#ifndef BCMDBUS + /* Set up the watchdog timer */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0) + timer_setup(&dhd->timer, dhd_watchdog, 0); +#else + init_timer(&dhd->timer); + dhd->timer.data = (ulong)dhd; + dhd->timer.function = dhd_watchdog; +#endif + dhd->default_wd_interval = dhd_watchdog_ms; + + if (dhd_watchdog_prio >= 0) { + /* Initialize watchdog thread */ + PROC_START(dhd_watchdog_thread, dhd, &dhd->thr_wdt_ctl, 0, "dhd_watchdog_thread"); + if (dhd->thr_wdt_ctl.thr_pid < 0) { + goto fail; + } + + } else { + dhd->thr_wdt_ctl.thr_pid = -1; + } + +#ifdef SHOW_LOGTRACE + skb_queue_head_init(&dhd->evt_trace_queue); + if (proc_create("dhd_trace", S_IRUSR, NULL, &proc_file_fops) == NULL) + DHD_ERROR(("Failed to create /proc/dhd_trace procfs interface\n")); + mutex_init(&dhd->pub.dhd_trace_lock); +#endif /* SHOW_LOGTRACE */ + + /* Set up the bottom half handler */ + if (dhd_dpc_prio >= 0) { + /* Initialize DPC thread */ + PROC_START(dhd_dpc_thread, dhd, &dhd->thr_dpc_ctl, 0, "dhd_dpc"); + if (dhd->thr_dpc_ctl.thr_pid < 0) { + goto fail; + } + } else { + /* use tasklet for dpc */ + tasklet_init(&dhd->tasklet, dhd_dpc, (ulong)dhd); + dhd->thr_dpc_ctl.thr_pid = -1; + } + + if (dhd->rxthread_enabled) { + bzero(&dhd->pub.skbbuf[0], sizeof(void *) * MAXSKBPEND); + /* Initialize RXF thread */ + PROC_START(dhd_rxf_thread, dhd, &dhd->thr_rxf_ctl, 0, "dhd_rxf"); + if (dhd->thr_rxf_ctl.thr_pid < 0) { + goto fail; + } + } +#endif /* !BCMDBUS */ + + dhd_state |= DHD_ATTACH_STATE_THREADS_CREATED; + +#if defined(CONFIG_PM_SLEEP) + if (!dhd_pm_notifier_registered) { + dhd_pm_notifier_registered = TRUE; + dhd->pm_notifier.notifier_call = dhd_pm_callback; + dhd->pm_notifier.priority = 10; + register_pm_notifier(&dhd->pm_notifier); + } + +#endif /* CONFIG_PM_SLEEP */ + +#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) + dhd->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 20; + dhd->early_suspend.suspend = dhd_early_suspend; + dhd->early_suspend.resume = dhd_late_resume; + register_early_suspend(&dhd->early_suspend); + dhd_state |= DHD_ATTACH_STATE_EARLYSUSPEND_DONE; +#endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */ + +#ifdef ARP_OFFLOAD_SUPPORT + dhd->pend_ipaddr = 0; + if (!dhd_inetaddr_notifier_registered) { + dhd_inetaddr_notifier_registered = TRUE; + register_inetaddr_notifier(&dhd_inetaddr_notifier); + } +#endif /* ARP_OFFLOAD_SUPPORT */ + +#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT) + if (!dhd_inet6addr_notifier_registered) { + dhd_inet6addr_notifier_registered = TRUE; + register_inet6addr_notifier(&dhd_inet6addr_notifier); + } +#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */ + dhd->dhd_deferred_wq = dhd_deferred_work_init((void *)dhd); +#ifdef DEBUG_CPU_FREQ + dhd->new_freq = alloc_percpu(int); + dhd->freq_trans.notifier_call = dhd_cpufreq_notifier; + cpufreq_register_notifier(&dhd->freq_trans, CPUFREQ_TRANSITION_NOTIFIER); +#endif // endif +#ifdef DHDTCPACK_SUPPRESS +#ifdef BCMSDIO + dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_DELAYTX); +#elif defined(BCMPCIE) + dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_HOLD); +#else + dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF); +#endif /* BCMSDIO */ +#endif /* DHDTCPACK_SUPPRESS */ + +#if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) +#endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */ + +#ifdef DHD_DEBUG_PAGEALLOC + register_page_corrupt_cb(dhd_page_corrupt_cb, &dhd->pub); +#endif /* DHD_DEBUG_PAGEALLOC */ + +#if defined(DHD_LB) + + dhd_lb_set_default_cpus(dhd); + DHD_LB_STATS_INIT(&dhd->pub); + + /* Initialize the CPU Masks */ + if (dhd_cpumasks_init(dhd) == 0) { + /* Now we have the current CPU maps, run through candidacy */ + dhd_select_cpu_candidacy(dhd); + + /* Register the call backs to CPU Hotplug sub-system */ + dhd_register_cpuhp_callback(dhd); + + } else { + /* + * We are unable to initialize CPU masks, so candidacy algorithm + * won't run, but still Load Balancing will be honoured based + * on the CPUs allocated for a given job statically during init + */ + dhd->cpu_notifier.notifier_call = NULL; + DHD_ERROR(("%s():dhd_cpumasks_init failed CPUs for JOB would be static\n", + __FUNCTION__)); + } + +#ifdef DHD_LB_TXP +#ifdef DHD_LB_TXP_DEFAULT_ENAB + /* Trun ON the feature by default */ + atomic_set(&dhd->lb_txp_active, 1); +#else + /* Trun OFF the feature by default */ + atomic_set(&dhd->lb_txp_active, 0); +#endif /* DHD_LB_TXP_DEFAULT_ENAB */ +#endif /* DHD_LB_TXP */ + + /* Initialize the Load Balancing Tasklets and Napi object */ +#if defined(DHD_LB_TXC) + tasklet_init(&dhd->tx_compl_tasklet, + dhd_lb_tx_compl_handler, (ulong)(&dhd->pub)); + INIT_WORK(&dhd->tx_compl_dispatcher_work, dhd_tx_compl_dispatcher_fn); + DHD_INFO(("%s load balance init tx_compl_tasklet\n", __FUNCTION__)); +#endif /* DHD_LB_TXC */ + +#if defined(DHD_LB_RXC) + tasklet_init(&dhd->rx_compl_tasklet, + dhd_lb_rx_compl_handler, (ulong)(&dhd->pub)); + INIT_WORK(&dhd->rx_compl_dispatcher_work, dhd_rx_compl_dispatcher_fn); + DHD_INFO(("%s load balance init rx_compl_tasklet\n", __FUNCTION__)); +#endif /* DHD_LB_RXC */ + +#if defined(DHD_LB_RXP) + __skb_queue_head_init(&dhd->rx_pend_queue); + skb_queue_head_init(&dhd->rx_napi_queue); + /* Initialize the work that dispatches NAPI job to a given core */ + INIT_WORK(&dhd->rx_napi_dispatcher_work, dhd_rx_napi_dispatcher_fn); + DHD_INFO(("%s load balance init rx_napi_queue\n", __FUNCTION__)); +#endif /* DHD_LB_RXP */ + +#if defined(DHD_LB_TXP) + INIT_WORK(&dhd->tx_dispatcher_work, dhd_tx_dispatcher_work); + skb_queue_head_init(&dhd->tx_pend_queue); + /* Initialize the work that dispatches TX job to a given core */ + tasklet_init(&dhd->tx_tasklet, + dhd_lb_tx_handler, (ulong)(dhd)); + DHD_INFO(("%s load balance init tx_pend_queue\n", __FUNCTION__)); +#endif /* DHD_LB_TXP */ + + dhd_state |= DHD_ATTACH_STATE_LB_ATTACH_DONE; +#endif /* DHD_LB */ + +#if defined(BCMPCIE) + dhd->pub.extended_trap_data = MALLOCZ(osh, BCMPCIE_EXT_TRAP_DATA_MAXLEN); + if (dhd->pub.extended_trap_data == NULL) { + DHD_ERROR(("%s: Failed to alloc extended_trap_data\n", __FUNCTION__)); + } +#endif /* BCMPCIE && ETD */ + +#ifdef SHOW_LOGTRACE + if (dhd_init_logtrace_process(dhd) != BCME_OK) { + goto fail; + } +#endif /* SHOW_LOGTRACE */ + + DHD_SSSR_MEMPOOL_INIT(&dhd->pub); + +#ifdef EWP_EDL + if (host_edl_support) { + if (DHD_EDL_MEM_INIT(&dhd->pub) != BCME_OK) { + host_edl_support = FALSE; + } + } +#endif /* EWP_EDL */ + + (void)dhd_sysfs_init(dhd); + +#ifdef WL_NATOE + /* Open Netlink socket for NF_CONNTRACK notifications */ + dhd->pub.nfct = dhd_ct_open(&dhd->pub, NFNL_SUBSYS_CTNETLINK | NFNL_SUBSYS_CTNETLINK_EXP, + CT_ALL); +#endif /* WL_NATOE */ + + dhd_state |= DHD_ATTACH_STATE_DONE; + dhd->dhd_state = dhd_state; + + dhd_found++; + +#ifdef DHD_DUMP_MNGR + dhd->pub.dump_file_manage = + (dhd_dump_file_manage_t *)MALLOCZ(dhd->pub.osh, sizeof(dhd_dump_file_manage_t)); + if (unlikely(!dhd->pub.dump_file_manage)) { + DHD_ERROR(("%s(): could not allocate memory for - " + "dhd_dump_file_manage_t\n", __FUNCTION__)); + } +#endif /* DHD_DUMP_MNGR */ +#ifdef DHD_FW_COREDUMP + /* Check the memdump capability */ + dhd_get_memdump_info(&dhd->pub); +#endif /* DHD_FW_COREDUMP */ + +#ifdef DHD_ERPOM + if (enable_erpom) { + pom_handler = &dhd->pub.pom_wlan_handler; + pom_handler->func_id = WLAN_FUNC_ID; + pom_handler->handler = (void *)g_dhd_pub; + pom_handler->power_off = dhd_wlan_power_off_handler; + pom_handler->power_on = dhd_wlan_power_on_handler; + + dhd->pub.pom_func_register = NULL; + dhd->pub.pom_func_deregister = NULL; + dhd->pub.pom_toggle_reg_on = NULL; + + dhd->pub.pom_func_register = symbol_get(pom_func_register); + dhd->pub.pom_func_deregister = symbol_get(pom_func_deregister); + dhd->pub.pom_toggle_reg_on = symbol_get(pom_toggle_reg_on); + + symbol_put(pom_func_register); + symbol_put(pom_func_deregister); + symbol_put(pom_toggle_reg_on); + + if (!dhd->pub.pom_func_register || + !dhd->pub.pom_func_deregister || + !dhd->pub.pom_toggle_reg_on) { + DHD_ERROR(("%s, enable_erpom enabled through module parameter but " + "POM is not loaded\n", __FUNCTION__)); + ASSERT(0); + goto fail; + } + dhd->pub.pom_func_register(pom_handler); + dhd->pub.enable_erpom = TRUE; + + } +#endif /* DHD_ERPOM */ + return &dhd->pub; + +fail: + if (dhd_state >= DHD_ATTACH_STATE_DHD_ALLOC) { + DHD_TRACE(("%s: Calling dhd_detach dhd_state 0x%x &dhd->pub %p\n", + __FUNCTION__, dhd_state, &dhd->pub)); + dhd->dhd_state = dhd_state; + dhd_detach(&dhd->pub); + dhd_free(&dhd->pub); + } + +dhd_null_flag: + return NULL; +} + +int dhd_get_fw_mode(dhd_info_t *dhdinfo) +{ + if (strstr(dhdinfo->fw_path, "_apsta") != NULL) + return DHD_FLAG_HOSTAP_MODE; + if (strstr(dhdinfo->fw_path, "_p2p") != NULL) + return DHD_FLAG_P2P_MODE; + if (strstr(dhdinfo->fw_path, "_ibss") != NULL) + return DHD_FLAG_IBSS_MODE; + if (strstr(dhdinfo->fw_path, "_mfg") != NULL) + return DHD_FLAG_MFG_MODE; + + return DHD_FLAG_STA_MODE; +} + +int dhd_bus_get_fw_mode(dhd_pub_t *dhdp) +{ + return dhd_get_fw_mode(dhdp->info); +} + +extern char * nvram_get(const char *name); +bool dhd_update_fw_nv_path(dhd_info_t *dhdinfo) +{ + int fw_len; + int nv_len; + int clm_len; + int conf_len; + const char *fw = NULL; + const char *nv = NULL; + const char *clm = NULL; + const char *conf = NULL; +#ifdef DHD_UCODE_DOWNLOAD + int uc_len; + const char *uc = NULL; +#endif /* DHD_UCODE_DOWNLOAD */ + wifi_adapter_info_t *adapter = dhdinfo->adapter; + int fw_path_len = sizeof(dhdinfo->fw_path); + int nv_path_len = sizeof(dhdinfo->nv_path); + + /* Update firmware and nvram path. The path may be from adapter info or module parameter + * The path from adapter info is used for initialization only (as it won't change). + * + * The firmware_path/nvram_path module parameter may be changed by the system at run + * time. When it changes we need to copy it to dhdinfo->fw_path. Also Android private + * command may change dhdinfo->fw_path. As such we need to clear the path info in + * module parameter after it is copied. We won't update the path until the module parameter + * is changed again (first character is not '\0') + */ + + /* set default firmware and nvram path for built-in type driver */ +// if (!dhd_download_fw_on_driverload) { +#ifdef CONFIG_BCMDHD_FW_PATH + fw = VENDOR_PATH CONFIG_BCMDHD_FW_PATH; +#endif /* CONFIG_BCMDHD_FW_PATH */ +#ifdef CONFIG_BCMDHD_NVRAM_PATH + nv = VENDOR_PATH CONFIG_BCMDHD_NVRAM_PATH; +#endif /* CONFIG_BCMDHD_NVRAM_PATH */ +// } + + /* check if we need to initialize the path */ + if (dhdinfo->fw_path[0] == '\0') { + if (adapter && adapter->fw_path && adapter->fw_path[0] != '\0') + fw = adapter->fw_path; + } + if (dhdinfo->nv_path[0] == '\0') { + if (adapter && adapter->nv_path && adapter->nv_path[0] != '\0') + nv = adapter->nv_path; + } + if (dhdinfo->clm_path[0] == '\0') { + if (adapter && adapter->clm_path && adapter->clm_path[0] != '\0') + clm = adapter->clm_path; + } + if (dhdinfo->conf_path[0] == '\0') { + if (adapter && adapter->conf_path && adapter->conf_path[0] != '\0') + conf = adapter->conf_path; + } + + /* Use module parameter if it is valid, EVEN IF the path has not been initialized + * + * TODO: need a solution for multi-chip, can't use the same firmware for all chips + */ + if (firmware_path[0] != '\0') + fw = firmware_path; + + if (nvram_path[0] != '\0') + nv = nvram_path; + if (clm_path[0] != '\0') + clm = clm_path; + if (config_path[0] != '\0') + conf = config_path; + +#ifdef DHD_UCODE_DOWNLOAD + if (ucode_path[0] != '\0') + uc = ucode_path; +#endif /* DHD_UCODE_DOWNLOAD */ + + if (fw && fw[0] != '\0') { + fw_len = strlen(fw); + if (fw_len >= fw_path_len) { + DHD_ERROR(("fw path len exceeds max len of dhdinfo->fw_path\n")); + return FALSE; + } + strncpy(dhdinfo->fw_path, fw, fw_path_len); + if (dhdinfo->fw_path[fw_len-1] == '\n') + dhdinfo->fw_path[fw_len-1] = '\0'; + } + if (nv && nv[0] != '\0') { + nv_len = strlen(nv); + if (nv_len >= nv_path_len) { + DHD_ERROR(("nvram path len exceeds max len of dhdinfo->nv_path\n")); + return FALSE; + } + memset(dhdinfo->nv_path, 0, nv_path_len); + strncpy(dhdinfo->nv_path, nv, nv_path_len); + dhdinfo->nv_path[nv_len] = '\0'; +#ifdef DHD_USE_SINGLE_NVRAM_FILE + /* Remove "_net" or "_mfg" tag from current nvram path */ + { + char *nvram_tag = "nvram_"; + char *ext_tag = ".txt"; + char *sp_nvram = strnstr(dhdinfo->nv_path, nvram_tag, nv_path_len); + bool valid_buf = sp_nvram && ((uint32)(sp_nvram + strlen(nvram_tag) + + strlen(ext_tag) - dhdinfo->nv_path) <= nv_path_len); + if (valid_buf) { + char *sp = sp_nvram + strlen(nvram_tag) - 1; + uint32 padding_size = (uint32)(dhdinfo->nv_path + + nv_path_len - sp); + memset(sp, 0, padding_size); + strncat(dhdinfo->nv_path, ext_tag, strlen(ext_tag)); + nv_len = strlen(dhdinfo->nv_path); + DHD_INFO(("%s: new nvram path = %s\n", + __FUNCTION__, dhdinfo->nv_path)); + } else if (sp_nvram) { + DHD_ERROR(("%s: buffer space for nvram path is not enough\n", + __FUNCTION__)); + return FALSE; + } else { + DHD_ERROR(("%s: Couldn't find the nvram tag. current" + " nvram path = %s\n", __FUNCTION__, dhdinfo->nv_path)); + } + } +#endif /* DHD_USE_SINGLE_NVRAM_FILE */ + if (dhdinfo->nv_path[nv_len-1] == '\n') + dhdinfo->nv_path[nv_len-1] = '\0'; + } + if (clm && clm[0] != '\0') { + clm_len = strlen(clm); + if (clm_len >= sizeof(dhdinfo->clm_path)) { + DHD_ERROR(("clm path len exceeds max len of dhdinfo->clm_path\n")); + return FALSE; + } + strncpy(dhdinfo->clm_path, clm, sizeof(dhdinfo->clm_path)); + if (dhdinfo->clm_path[clm_len-1] == '\n') + dhdinfo->clm_path[clm_len-1] = '\0'; + } + if (conf && conf[0] != '\0') { + conf_len = strlen(conf); + if (conf_len >= sizeof(dhdinfo->conf_path)) { + DHD_ERROR(("config path len exceeds max len of dhdinfo->conf_path\n")); + return FALSE; + } + strncpy(dhdinfo->conf_path, conf, sizeof(dhdinfo->conf_path)); + if (dhdinfo->conf_path[conf_len-1] == '\n') + dhdinfo->conf_path[conf_len-1] = '\0'; + } +#ifdef DHD_UCODE_DOWNLOAD + if (uc && uc[0] != '\0') { + uc_len = strlen(uc); + if (uc_len >= sizeof(dhdinfo->uc_path)) { + DHD_ERROR(("uc path len exceeds max len of dhdinfo->uc_path\n")); + return FALSE; + } + strncpy(dhdinfo->uc_path, uc, sizeof(dhdinfo->uc_path)); + if (dhdinfo->uc_path[uc_len-1] == '\n') + dhdinfo->uc_path[uc_len-1] = '\0'; + } +#endif /* DHD_UCODE_DOWNLOAD */ + +#if 0 + /* clear the path in module parameter */ + if (dhd_download_fw_on_driverload) { + firmware_path[0] = '\0'; + nvram_path[0] = '\0'; + clm_path[0] = '\0'; + config_path[0] = '\0'; + } +#endif +#ifdef DHD_UCODE_DOWNLOAD + ucode_path[0] = '\0'; + DHD_ERROR(("ucode path: %s\n", dhdinfo->uc_path)); +#endif /* DHD_UCODE_DOWNLOAD */ + + /* fw_path and nv_path are not mandatory for BCMEMBEDIMAGE */ + if (dhdinfo->fw_path[0] == '\0') { + DHD_ERROR(("firmware path not found\n")); + return FALSE; + } + if (dhdinfo->nv_path[0] == '\0') { + DHD_ERROR(("nvram path not found\n")); + return FALSE; + } + + return TRUE; +} + +#if defined(BT_OVER_SDIO) +extern bool dhd_update_btfw_path(dhd_info_t *dhdinfo, char* btfw_path) +{ + int fw_len; + const char *fw = NULL; + wifi_adapter_info_t *adapter = dhdinfo->adapter; + + /* Update bt firmware path. The path may be from adapter info or module parameter + * The path from adapter info is used for initialization only (as it won't change). + * + * The btfw_path module parameter may be changed by the system at run + * time. When it changes we need to copy it to dhdinfo->btfw_path. Also Android private + * command may change dhdinfo->btfw_path. As such we need to clear the path info in + * module parameter after it is copied. We won't update the path until the module parameter + * is changed again (first character is not '\0') + */ + + /* set default firmware and nvram path for built-in type driver */ + if (!dhd_download_fw_on_driverload) { +#ifdef CONFIG_BCMDHD_BTFW_PATH + fw = CONFIG_BCMDHD_BTFW_PATH; +#endif /* CONFIG_BCMDHD_FW_PATH */ + } + + /* check if we need to initialize the path */ + if (dhdinfo->btfw_path[0] == '\0') { + if (adapter && adapter->btfw_path && adapter->btfw_path[0] != '\0') + fw = adapter->btfw_path; + } + + /* Use module parameter if it is valid, EVEN IF the path has not been initialized + */ + if (btfw_path[0] != '\0') + fw = btfw_path; + + if (fw && fw[0] != '\0') { + fw_len = strlen(fw); + if (fw_len >= sizeof(dhdinfo->btfw_path)) { + DHD_ERROR(("fw path len exceeds max len of dhdinfo->btfw_path\n")); + return FALSE; + } + strncpy(dhdinfo->btfw_path, fw, sizeof(dhdinfo->btfw_path)); + if (dhdinfo->btfw_path[fw_len-1] == '\n') + dhdinfo->btfw_path[fw_len-1] = '\0'; + } + + /* clear the path in module parameter */ + btfw_path[0] = '\0'; + + if (dhdinfo->btfw_path[0] == '\0') { + DHD_ERROR(("bt firmware path not found\n")); + return FALSE; + } + + return TRUE; +} +#endif /* defined (BT_OVER_SDIO) */ + +#if defined(BT_OVER_SDIO) +wlan_bt_handle_t dhd_bt_get_pub_hndl(void) +{ + DHD_ERROR(("%s: g_dhd_pub %p\n", __FUNCTION__, g_dhd_pub)); + /* assuming that dhd_pub_t type pointer is available from a global variable */ + return (wlan_bt_handle_t) g_dhd_pub; +} EXPORT_SYMBOL(dhd_bt_get_pub_hndl); + +int dhd_download_btfw(wlan_bt_handle_t handle, char* btfw_path) +{ + int ret = -1; + dhd_pub_t *dhdp = (dhd_pub_t *)handle; + dhd_info_t *dhd = (dhd_info_t*)dhdp->info; + + /* Download BT firmware image to the dongle */ + if (dhd->pub.busstate == DHD_BUS_DATA && dhd_update_btfw_path(dhd, btfw_path)) { + DHD_INFO(("%s: download btfw from: %s\n", __FUNCTION__, dhd->btfw_path)); + ret = dhd_bus_download_btfw(dhd->pub.bus, dhd->pub.osh, dhd->btfw_path); + if (ret < 0) { + DHD_ERROR(("%s: failed to download btfw from: %s\n", + __FUNCTION__, dhd->btfw_path)); + return ret; + } + } + return ret; +} EXPORT_SYMBOL(dhd_download_btfw); +#endif /* defined (BT_OVER_SDIO) */ + +#ifndef BCMDBUS +int +dhd_bus_start(dhd_pub_t *dhdp) +{ + int ret = -1; + dhd_info_t *dhd = (dhd_info_t*)dhdp->info; + unsigned long flags; + +#if defined(DHD_DEBUG) && defined(BCMSDIO) + int fw_download_start = 0, fw_download_end = 0, f2_sync_start = 0, f2_sync_end = 0; +#endif /* DHD_DEBUG && BCMSDIO */ + ASSERT(dhd); + + DHD_TRACE(("Enter %s:\n", __FUNCTION__)); + dhdp->dongle_trap_occured = 0; + dhdp->iovar_timeout_occured = 0; +#ifdef PCIE_FULL_DONGLE + dhdp->d3ack_timeout_occured = 0; + dhdp->livelock_occured = 0; +#endif /* PCIE_FULL_DONGLE */ +#ifdef DHD_MAP_LOGGING + dhdp->smmu_fault_occurred = 0; +#endif /* DHD_MAP_LOGGING */ + + DHD_PERIM_LOCK(dhdp); + /* try to download image and nvram to the dongle */ + if (dhd->pub.busstate == DHD_BUS_DOWN && dhd_update_fw_nv_path(dhd)) { + /* Indicate FW Download has not yet done */ + dhd->pub.fw_download_done = FALSE; + DHD_INFO(("%s download fw %s, nv %s, conf %s\n", + __FUNCTION__, dhd->fw_path, dhd->nv_path, dhd->conf_path)); +#if defined(DHD_DEBUG) && defined(BCMSDIO) + fw_download_start = OSL_SYSUPTIME(); +#endif /* DHD_DEBUG && BCMSDIO */ + ret = dhd_bus_download_firmware(dhd->pub.bus, dhd->pub.osh, + dhd->fw_path, dhd->nv_path, dhd->clm_path, dhd->conf_path); +#if defined(DHD_DEBUG) && defined(BCMSDIO) + fw_download_end = OSL_SYSUPTIME(); +#endif /* DHD_DEBUG && BCMSDIO */ + if (ret < 0) { + DHD_ERROR(("%s: failed to download firmware %s\n", + __FUNCTION__, dhd->fw_path)); + DHD_PERIM_UNLOCK(dhdp); + return ret; + } + /* Indicate FW Download has succeeded */ + dhd->pub.fw_download_done = TRUE; + } + if (dhd->pub.busstate != DHD_BUS_LOAD) { + DHD_PERIM_UNLOCK(dhdp); + return -ENETDOWN; + } + +#ifdef BCMSDIO + dhd_os_sdlock(dhdp); +#endif /* BCMSDIO */ + + /* Start the watchdog timer */ + dhd->pub.tickcnt = 0; + dhd_os_wd_timer(&dhd->pub, dhd_watchdog_ms); + + /* Bring up the bus */ + if ((ret = dhd_bus_init(&dhd->pub, FALSE)) != 0) { + + DHD_ERROR(("%s, dhd_bus_init failed %d\n", __FUNCTION__, ret)); +#ifdef BCMSDIO + dhd_os_sdunlock(dhdp); +#endif /* BCMSDIO */ + DHD_PERIM_UNLOCK(dhdp); + return ret; + } + + DHD_ENABLE_RUNTIME_PM(&dhd->pub); + +#ifdef DHD_ULP + dhd_ulp_set_ulp_state(dhdp, DHD_ULP_DISABLED); +#endif /* DHD_ULP */ +#if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) || defined(BCMPCIE_OOB_HOST_WAKE) + /* Host registration for OOB interrupt */ + if (dhd_bus_oob_intr_register(dhdp)) { + /* deactivate timer and wait for the handler to finish */ +#if !defined(BCMPCIE_OOB_HOST_WAKE) + DHD_GENERAL_LOCK(&dhd->pub, flags); + dhd->wd_timer_valid = FALSE; + DHD_GENERAL_UNLOCK(&dhd->pub, flags); + del_timer_sync(&dhd->timer); + +#endif /* !BCMPCIE_OOB_HOST_WAKE */ + DHD_DISABLE_RUNTIME_PM(&dhd->pub); + DHD_PERIM_UNLOCK(dhdp); + DHD_ERROR(("%s Host failed to register for OOB\n", __FUNCTION__)); + DHD_OS_WD_WAKE_UNLOCK(&dhd->pub); + return -ENODEV; + } + +#if defined(BCMPCIE_OOB_HOST_WAKE) + dhd_bus_oob_intr_set(dhdp, TRUE); +#else + /* Enable oob at firmware */ + dhd_enable_oob_intr(dhd->pub.bus, TRUE); +#endif /* BCMPCIE_OOB_HOST_WAKE */ +#elif defined(FORCE_WOWLAN) + /* Enable oob at firmware */ + dhd_enable_oob_intr(dhd->pub.bus, TRUE); +#endif /* OOB_INTR_ONLY || BCMSPI_ANDROID || BCMPCIE_OOB_HOST_WAKE */ +#ifdef PCIE_FULL_DONGLE + { + /* max_h2d_rings includes H2D common rings */ + uint32 max_h2d_rings = dhd_bus_max_h2d_queues(dhd->pub.bus); + + DHD_ERROR(("%s: Initializing %u h2drings\n", __FUNCTION__, + max_h2d_rings)); + if ((ret = dhd_flow_rings_init(&dhd->pub, max_h2d_rings)) != BCME_OK) { +#ifdef BCMSDIO + dhd_os_sdunlock(dhdp); +#endif /* BCMSDIO */ + DHD_PERIM_UNLOCK(dhdp); + return ret; + } + } +#endif /* PCIE_FULL_DONGLE */ + + /* Do protocol initialization necessary for IOCTL/IOVAR */ + ret = dhd_prot_init(&dhd->pub); + if (unlikely(ret) != BCME_OK) { + DHD_PERIM_UNLOCK(dhdp); + DHD_OS_WD_WAKE_UNLOCK(&dhd->pub); + return ret; + } + + /* If bus is not ready, can't come up */ + if (dhd->pub.busstate != DHD_BUS_DATA) { + DHD_GENERAL_LOCK(&dhd->pub, flags); + dhd->wd_timer_valid = FALSE; + DHD_GENERAL_UNLOCK(&dhd->pub, flags); + del_timer_sync(&dhd->timer); + DHD_ERROR(("%s failed bus is not ready\n", __FUNCTION__)); + DHD_DISABLE_RUNTIME_PM(&dhd->pub); +#ifdef BCMSDIO + dhd_os_sdunlock(dhdp); +#endif /* BCMSDIO */ + DHD_PERIM_UNLOCK(dhdp); + DHD_OS_WD_WAKE_UNLOCK(&dhd->pub); + return -ENODEV; + } + +#ifdef BCMSDIO + dhd_os_sdunlock(dhdp); +#endif /* BCMSDIO */ + + /* Bus is ready, query any dongle information */ +#if defined(DHD_DEBUG) && defined(BCMSDIO) + f2_sync_start = OSL_SYSUPTIME(); +#endif /* DHD_DEBUG && BCMSDIO */ + if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0) { + DHD_GENERAL_LOCK(&dhd->pub, flags); + dhd->wd_timer_valid = FALSE; + DHD_GENERAL_UNLOCK(&dhd->pub, flags); + del_timer_sync(&dhd->timer); + DHD_ERROR(("%s failed to sync with dongle\n", __FUNCTION__)); + DHD_OS_WD_WAKE_UNLOCK(&dhd->pub); + DHD_PERIM_UNLOCK(dhdp); + return ret; + } + +#if defined(CONFIG_SOC_EXYNOS8895) || defined(CONFIG_SOC_EXYNOS9810) || \ + defined(CONFIG_SOC_EXYNOS9820) + DHD_ERROR(("%s: Enable L1ss EP side\n", __FUNCTION__)); + exynos_pcie_l1ss_ctrl(1, PCIE_L1SS_CTRL_WIFI); +#endif /* CONFIG_SOC_EXYNOS8895 || CONFIG_SOC_EXYNOS9810 || CONFIG_SOC_EXYNOS9820 */ + +#if defined(DHD_DEBUG) && defined(BCMSDIO) + f2_sync_end = OSL_SYSUPTIME(); + DHD_ERROR(("Time taken for FW download and F2 ready is: %d msec\n", + (fw_download_end - fw_download_start) + (f2_sync_end - f2_sync_start))); +#endif /* DHD_DEBUG && BCMSDIO */ + +#ifdef ARP_OFFLOAD_SUPPORT + if (dhd->pend_ipaddr) { +#ifdef AOE_IP_ALIAS_SUPPORT + aoe_update_host_ipv4_table(&dhd->pub, dhd->pend_ipaddr, TRUE, 0); +#endif /* AOE_IP_ALIAS_SUPPORT */ + dhd->pend_ipaddr = 0; + } +#endif /* ARP_OFFLOAD_SUPPORT */ + + DHD_PERIM_UNLOCK(dhdp); + + return 0; +} +#endif /* !BCMDBUS */ + +#ifdef WLTDLS +int _dhd_tdls_enable(dhd_pub_t *dhd, bool tdls_on, bool auto_on, struct ether_addr *mac) +{ + uint32 tdls = tdls_on; + int ret = 0; + uint32 tdls_auto_op = 0; + uint32 tdls_idle_time = CUSTOM_TDLS_IDLE_MODE_SETTING; + int32 tdls_rssi_high = CUSTOM_TDLS_RSSI_THRESHOLD_HIGH; + int32 tdls_rssi_low = CUSTOM_TDLS_RSSI_THRESHOLD_LOW; + uint32 tdls_pktcnt_high = CUSTOM_TDLS_PCKTCNT_THRESHOLD_HIGH; + uint32 tdls_pktcnt_low = CUSTOM_TDLS_PCKTCNT_THRESHOLD_LOW; + + BCM_REFERENCE(mac); + if (!FW_SUPPORTED(dhd, tdls)) + return BCME_ERROR; + + if (dhd->tdls_enable == tdls_on) + goto auto_mode; + ret = dhd_iovar(dhd, 0, "tdls_enable", (char *)&tdls, sizeof(tdls), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s: tdls %d failed %d\n", __FUNCTION__, tdls, ret)); + goto exit; + } + dhd->tdls_enable = tdls_on; +auto_mode: + + tdls_auto_op = auto_on; + ret = dhd_iovar(dhd, 0, "tdls_auto_op", (char *)&tdls_auto_op, sizeof(tdls_auto_op), NULL, + 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s: tdls_auto_op failed %d\n", __FUNCTION__, ret)); + goto exit; + } + + if (tdls_auto_op) { + ret = dhd_iovar(dhd, 0, "tdls_idle_time", (char *)&tdls_idle_time, + sizeof(tdls_idle_time), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s: tdls_idle_time failed %d\n", __FUNCTION__, ret)); + goto exit; + } + ret = dhd_iovar(dhd, 0, "tdls_rssi_high", (char *)&tdls_rssi_high, + sizeof(tdls_rssi_high), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s: tdls_rssi_high failed %d\n", __FUNCTION__, ret)); + goto exit; + } + ret = dhd_iovar(dhd, 0, "tdls_rssi_low", (char *)&tdls_rssi_low, + sizeof(tdls_rssi_low), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s: tdls_rssi_low failed %d\n", __FUNCTION__, ret)); + goto exit; + } + ret = dhd_iovar(dhd, 0, "tdls_trigger_pktcnt_high", (char *)&tdls_pktcnt_high, + sizeof(tdls_pktcnt_high), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s: tdls_trigger_pktcnt_high failed %d\n", __FUNCTION__, ret)); + goto exit; + } + ret = dhd_iovar(dhd, 0, "tdls_trigger_pktcnt_low", (char *)&tdls_pktcnt_low, + sizeof(tdls_pktcnt_low), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s: tdls_trigger_pktcnt_low failed %d\n", __FUNCTION__, ret)); + goto exit; + } + } + +exit: + return ret; +} + +int dhd_tdls_enable(struct net_device *dev, bool tdls_on, bool auto_on, struct ether_addr *mac) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + int ret = 0; + if (dhd) + ret = _dhd_tdls_enable(&dhd->pub, tdls_on, auto_on, mac); + else + ret = BCME_ERROR; + return ret; +} + +int +dhd_tdls_set_mode(dhd_pub_t *dhd, bool wfd_mode) +{ + int ret = 0; + bool auto_on = false; + uint32 mode = wfd_mode; + +#ifdef ENABLE_TDLS_AUTO_MODE + if (wfd_mode) { + auto_on = false; + } else { + auto_on = true; + } +#else + auto_on = false; +#endif /* ENABLE_TDLS_AUTO_MODE */ + ret = _dhd_tdls_enable(dhd, false, auto_on, NULL); + if (ret < 0) { + DHD_ERROR(("Disable tdls_auto_op failed. %d\n", ret)); + return ret; + } + + ret = dhd_iovar(dhd, 0, "tdls_wfd_mode", (char *)&mode, sizeof(mode), NULL, 0, TRUE); + if ((ret < 0) && (ret != BCME_UNSUPPORTED)) { + DHD_ERROR(("%s: tdls_wfd_mode faile_wfd_mode %d\n", __FUNCTION__, ret)); + return ret; + } + + ret = _dhd_tdls_enable(dhd, true, auto_on, NULL); + if (ret < 0) { + DHD_ERROR(("enable tdls_auto_op failed. %d\n", ret)); + return ret; + } + + dhd->tdls_mode = mode; + return ret; +} +#ifdef PCIE_FULL_DONGLE +int dhd_tdls_update_peer_info(dhd_pub_t *dhdp, wl_event_msg_t *event) +{ + dhd_pub_t *dhd_pub = dhdp; + tdls_peer_node_t *cur = dhd_pub->peer_tbl.node; + tdls_peer_node_t *new = NULL, *prev = NULL; + int ifindex = dhd_ifname2idx(dhd_pub->info, event->ifname); + uint8 *da = (uint8 *)&event->addr.octet[0]; + bool connect = FALSE; + uint32 reason = ntoh32(event->reason); + unsigned long flags; + + if (reason == WLC_E_TDLS_PEER_CONNECTED) + connect = TRUE; + else if (reason == WLC_E_TDLS_PEER_DISCONNECTED) + connect = FALSE; + else + { + DHD_ERROR(("%s: TDLS Event reason is unknown\n", __FUNCTION__)); + return BCME_ERROR; + } + if (ifindex == DHD_BAD_IF) + return BCME_ERROR; + + if (connect) { + while (cur != NULL) { + if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) { + DHD_ERROR(("%s: TDLS Peer exist already %d\n", + __FUNCTION__, __LINE__)); + return BCME_ERROR; + } + cur = cur->next; + } + + new = MALLOC(dhd_pub->osh, sizeof(tdls_peer_node_t)); + if (new == NULL) { + DHD_ERROR(("%s: Failed to allocate memory\n", __FUNCTION__)); + return BCME_ERROR; + } + memcpy(new->addr, da, ETHER_ADDR_LEN); + DHD_TDLS_LOCK(&dhdp->tdls_lock, flags); + new->next = dhd_pub->peer_tbl.node; + dhd_pub->peer_tbl.node = new; + dhd_pub->peer_tbl.tdls_peer_count++; + DHD_TDLS_UNLOCK(&dhdp->tdls_lock, flags); + + } else { + while (cur != NULL) { + if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) { + dhd_flow_rings_delete_for_peer(dhd_pub, (uint8)ifindex, da); + DHD_TDLS_LOCK(&dhdp->tdls_lock, flags); + if (prev) + prev->next = cur->next; + else + dhd_pub->peer_tbl.node = cur->next; + MFREE(dhd_pub->osh, cur, sizeof(tdls_peer_node_t)); + dhd_pub->peer_tbl.tdls_peer_count--; + DHD_TDLS_UNLOCK(&dhdp->tdls_lock, flags); + return BCME_OK; + } + prev = cur; + cur = cur->next; + } + DHD_ERROR(("%s: TDLS Peer Entry Not found\n", __FUNCTION__)); + } + return BCME_OK; +} +#endif /* PCIE_FULL_DONGLE */ +#endif // endif + +bool dhd_is_concurrent_mode(dhd_pub_t *dhd) +{ + if (!dhd) + return FALSE; + + if (dhd->op_mode & DHD_FLAG_CONCURR_MULTI_CHAN_MODE) + return TRUE; + else if ((dhd->op_mode & DHD_FLAG_CONCURR_SINGLE_CHAN_MODE) == + DHD_FLAG_CONCURR_SINGLE_CHAN_MODE) + return TRUE; + else + return FALSE; +} +#if !defined(AP) && defined(WLP2P) +/* From Android JerryBean release, the concurrent mode is enabled by default and the firmware + * name would be fw_bcmdhd.bin. So we need to determine whether P2P is enabled in the STA + * firmware and accordingly enable concurrent mode (Apply P2P settings). SoftAP firmware + * would still be named as fw_bcmdhd_apsta. + */ +uint32 +dhd_get_concurrent_capabilites(dhd_pub_t *dhd) +{ + int32 ret = 0; + char buf[WLC_IOCTL_SMLEN]; + bool mchan_supported = FALSE; + /* if dhd->op_mode is already set for HOSTAP and Manufacturing + * test mode, that means we only will use the mode as it is + */ + if (dhd->op_mode & (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE)) + return 0; + if (FW_SUPPORTED(dhd, vsdb)) { + mchan_supported = TRUE; + } + if (!FW_SUPPORTED(dhd, p2p)) { + DHD_TRACE(("Chip does not support p2p\n")); + return 0; + } else { + /* Chip supports p2p but ensure that p2p is really implemented in firmware or not */ + memset(buf, 0, sizeof(buf)); + ret = dhd_iovar(dhd, 0, "p2p", NULL, 0, (char *)&buf, + sizeof(buf), FALSE); + if (ret < 0) { + DHD_ERROR(("%s: Get P2P failed (error=%d)\n", __FUNCTION__, ret)); + return 0; + } else { + if (buf[0] == 1) { + /* By default, chip supports single chan concurrency, + * now lets check for mchan + */ + ret = DHD_FLAG_CONCURR_SINGLE_CHAN_MODE; + if (mchan_supported) + ret |= DHD_FLAG_CONCURR_MULTI_CHAN_MODE; + if (FW_SUPPORTED(dhd, rsdb)) { + ret |= DHD_FLAG_RSDB_MODE; + } +#ifdef WL_SUPPORT_MULTIP2P + if (FW_SUPPORTED(dhd, mp2p)) { + ret |= DHD_FLAG_MP2P_MODE; + } +#endif /* WL_SUPPORT_MULTIP2P */ +#if defined(WL_ENABLE_P2P_IF) || defined(WL_CFG80211_P2P_DEV_IF) + return ret; +#else + return 0; +#endif /* WL_ENABLE_P2P_IF || WL_CFG80211_P2P_DEV_IF */ + } + } + } + return 0; +} +#endif // endif + +#if defined(WLADPS) + +int +dhd_enable_adps(dhd_pub_t *dhd, uint8 on) +{ + int i; + int len; + int ret = BCME_OK; + + bcm_iov_buf_t *iov_buf = NULL; + wl_adps_params_v1_t *data = NULL; + + len = OFFSETOF(bcm_iov_buf_t, data) + sizeof(*data); + iov_buf = MALLOC(dhd->osh, len); + if (iov_buf == NULL) { + DHD_ERROR(("%s - failed to allocate %d bytes for iov_buf\n", __FUNCTION__, len)); + ret = BCME_NOMEM; + goto exit; + } + + iov_buf->version = WL_ADPS_IOV_VER; + iov_buf->len = sizeof(*data); + iov_buf->id = WL_ADPS_IOV_MODE; + + data = (wl_adps_params_v1_t *)iov_buf->data; + data->version = ADPS_SUB_IOV_VERSION_1; + data->length = sizeof(*data); + data->mode = on; + + for (i = 1; i <= MAX_BANDS; i++) { + data->band = i; + ret = dhd_iovar(dhd, 0, "adps", (char *)iov_buf, len, NULL, 0, TRUE); + if (ret < 0) { + if (ret == BCME_UNSUPPORTED) { + DHD_ERROR(("%s adps is not supported\n", __FUNCTION__)); + ret = BCME_OK; + goto exit; + } + else { + DHD_ERROR(("%s fail to set adps %s for band %d (%d)\n", + __FUNCTION__, on ? "On" : "Off", i, ret)); + goto exit; + } + } + } + +exit: + if (iov_buf) { + MFREE(dhd->osh, iov_buf, len); + iov_buf = NULL; + } + return ret; +} +#endif // endif + +int +dhd_preinit_ioctls(dhd_pub_t *dhd) +{ + int ret = 0; + char eventmask[WL_EVENTING_MASK_LEN]; + char iovbuf[WL_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" + '\0' + bitvec */ + uint32 buf_key_b4_m4 = 1; + uint8 msglen; + eventmsgs_ext_t *eventmask_msg = NULL; + char* iov_buf = NULL; + int ret2 = 0; + uint32 wnm_cap = 0; +#if defined(BCMSUP_4WAY_HANDSHAKE) + uint32 sup_wpa = 1; +#endif /* BCMSUP_4WAY_HANDSHAKE */ +#if defined(CUSTOM_AMPDU_BA_WSIZE) + uint32 ampdu_ba_wsize = 0; +#endif // endif +#if defined(CUSTOM_AMPDU_MPDU) + int32 ampdu_mpdu = 0; +#endif // endif +#if defined(CUSTOM_AMPDU_RELEASE) + int32 ampdu_release = 0; +#endif // endif +#if defined(CUSTOM_AMSDU_AGGSF) + int32 amsdu_aggsf = 0; +#endif // endif + +#if defined(BCMSDIO) || defined(BCMDBUS) +#ifdef PROP_TXSTATUS + int wlfc_enable = TRUE; +#ifndef DISABLE_11N + uint32 hostreorder = 1; + uint wl_down = 1; +#endif /* DISABLE_11N */ +#endif /* PROP_TXSTATUS */ +#endif /* BCMSDIO || BCMDBUS */ +#ifndef PCIE_FULL_DONGLE + uint32 wl_ap_isolate; +#endif /* PCIE_FULL_DONGLE */ + uint32 frameburst = CUSTOM_FRAMEBURST_SET; + uint wnm_bsstrans_resp = 0; +#ifdef SUPPORT_SET_CAC + uint32 cac = 1; +#endif /* SUPPORT_SET_CAC */ + +#if defined(DHD_NON_DMA_M2M_CORRUPTION) + dhd_pcie_dmaxfer_lpbk_t pcie_dmaxfer_lpbk; +#endif /* DHD_NON_DMA_M2M_CORRUPTION */ + +#ifdef DHD_ENABLE_LPC + uint32 lpc = 1; +#endif /* DHD_ENABLE_LPC */ + uint power_mode = PM_FAST; +#if defined(BCMSDIO) + uint32 dongle_align = DHD_SDALIGN; + uint32 glom = CUSTOM_GLOM_SETTING; +#endif /* defined(BCMSDIO) */ +#if defined(USE_WL_CREDALL) + uint32 credall = 1; +#endif // endif + uint bcn_timeout = CUSTOM_BCN_TIMEOUT; + uint scancache_enab = TRUE; +#ifdef ENABLE_BCN_LI_BCN_WAKEUP + uint32 bcn_li_bcn = 1; +#endif /* ENABLE_BCN_LI_BCN_WAKEUP */ + uint retry_max = CUSTOM_ASSOC_RETRY_MAX; +#if defined(ARP_OFFLOAD_SUPPORT) + int arpoe = 1; +#endif // endif + int scan_assoc_time = DHD_SCAN_ASSOC_ACTIVE_TIME; + int scan_unassoc_time = DHD_SCAN_UNASSOC_ACTIVE_TIME; + int scan_passive_time = DHD_SCAN_PASSIVE_TIME; + char buf[WLC_IOCTL_SMLEN]; + char *ptr; + uint32 listen_interval = CUSTOM_LISTEN_INTERVAL; /* Default Listen Interval in Beacons */ +#if defined(DHD_8021X_DUMP) && defined(SHOW_LOGTRACE) + wl_el_tag_params_t *el_tag = NULL; +#endif /* DHD_8021X_DUMP */ +#ifdef ROAM_ENABLE + uint roamvar = 0; + int roam_trigger[2] = {CUSTOM_ROAM_TRIGGER_SETTING, WLC_BAND_ALL}; + int roam_scan_period[2] = {10, WLC_BAND_ALL}; + int roam_delta[2] = {CUSTOM_ROAM_DELTA_SETTING, WLC_BAND_ALL}; +#ifdef ROAM_AP_ENV_DETECTION + int roam_env_mode = AP_ENV_INDETERMINATE; +#endif /* ROAM_AP_ENV_DETECTION */ +#ifdef FULL_ROAMING_SCAN_PERIOD_60_SEC + int roam_fullscan_period = 60; +#else /* FULL_ROAMING_SCAN_PERIOD_60_SEC */ + int roam_fullscan_period = 120; +#endif /* FULL_ROAMING_SCAN_PERIOD_60_SEC */ +#ifdef DISABLE_BCNLOSS_ROAM + uint roam_bcnloss_off = 1; +#endif /* DISABLE_BCNLOSS_ROAM */ +#else +#ifdef DISABLE_BUILTIN_ROAM + uint roamvar = 1; +#endif /* DISABLE_BUILTIN_ROAM */ +#endif /* ROAM_ENABLE */ + +#if defined(SOFTAP) + uint dtim = 1; +#endif // endif +#if (defined(AP) && !defined(WLP2P)) || (!defined(AP) && defined(WL_CFG80211)) + struct ether_addr p2p_ea; +#endif // endif +#ifdef BCMCCX + uint32 ccx = 1; +#endif // endif +#ifdef SOFTAP_UAPSD_OFF + uint32 wme_apsd = 0; +#endif /* SOFTAP_UAPSD_OFF */ +#if (defined(AP) || defined(WLP2P)) && !defined(SOFTAP_AND_GC) + uint32 apsta = 1; /* Enable APSTA mode */ +#elif defined(SOFTAP_AND_GC) + uint32 apsta = 0; + int ap_mode = 1; +#endif /* (defined(AP) || defined(WLP2P)) && !defined(SOFTAP_AND_GC) */ +#ifdef GET_CUSTOM_MAC_ENABLE + struct ether_addr ea_addr; + char hw_ether[62]; +#endif /* GET_CUSTOM_MAC_ENABLE */ +#ifdef OKC_SUPPORT + uint32 okc = 1; +#endif // endif + +#ifdef DISABLE_11N + uint32 nmode = 0; +#endif /* DISABLE_11N */ + +#ifdef USE_WL_TXBF + uint32 txbf = 1; +#endif /* USE_WL_TXBF */ +#ifdef DISABLE_TXBFR + uint32 txbf_bfr_cap = 0; +#endif /* DISABLE_TXBFR */ +#ifdef AMPDU_VO_ENABLE + struct ampdu_tid_control tid; +#endif // endif +#if defined(PROP_TXSTATUS) +#ifdef USE_WFA_CERT_CONF + uint32 proptx = 0; +#endif /* USE_WFA_CERT_CONF */ +#endif /* PROP_TXSTATUS */ +#ifdef DHD_SET_FW_HIGHSPEED + uint32 ack_ratio = 250; + uint32 ack_ratio_depth = 64; +#endif /* DHD_SET_FW_HIGHSPEED */ +#if defined(SUPPORT_2G_VHT) || defined(SUPPORT_5G_1024QAM_VHT) + uint32 vht_features = 0; /* init to 0, will be set based on each support */ +#endif /* SUPPORT_2G_VHT || SUPPORT_5G_1024QAM_VHT */ +#ifdef DISABLE_11N_PROPRIETARY_RATES + uint32 ht_features = 0; +#endif /* DISABLE_11N_PROPRIETARY_RATES */ +#ifdef CUSTOM_PSPRETEND_THR + uint32 pspretend_thr = CUSTOM_PSPRETEND_THR; +#endif // endif +#ifdef CUSTOM_EVENT_PM_WAKE + uint32 pm_awake_thresh = CUSTOM_EVENT_PM_WAKE; +#endif /* CUSTOM_EVENT_PM_WAKE */ +#ifdef DISABLE_PRUNED_SCAN + uint32 scan_features = 0; +#endif /* DISABLE_PRUNED_SCAN */ +#ifdef BCMPCIE_OOB_HOST_WAKE + uint32 hostwake_oob = 0; +#endif /* BCMPCIE_OOB_HOST_WAKE */ + +#ifdef PKT_FILTER_SUPPORT + dhd_pkt_filter_enable = TRUE; +#ifdef APF + dhd->apf_set = FALSE; +#endif /* APF */ +#endif /* PKT_FILTER_SUPPORT */ +#ifdef WLTDLS + dhd->tdls_enable = FALSE; + dhd_tdls_set_mode(dhd, false); +#endif /* WLTDLS */ + dhd->suspend_bcn_li_dtim = CUSTOM_SUSPEND_BCN_LI_DTIM; +#ifdef ENABLE_MAX_DTIM_IN_SUSPEND + dhd->max_dtim_enable = TRUE; +#else + dhd->max_dtim_enable = FALSE; +#endif /* ENABLE_MAX_DTIM_IN_SUSPEND */ + DHD_TRACE(("Enter %s\n", __FUNCTION__)); + +#ifdef DHDTCPACK_SUPPRESS + dhd_tcpack_suppress_set(dhd, dhd->conf->tcpack_sup_mode); +#endif + dhd->op_mode = 0; + +#if defined(CUSTOM_COUNTRY_CODE) + /* clear AP flags */ + dhd->dhd_cflags &= ~WLAN_PLAT_AP_FLAG; +#endif /* CUSTOM_COUNTRY_CODE && (CUSTOMER_HW2 || BOARD_HIKEY) */ + + if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) || + (op_mode == DHD_FLAG_MFG_MODE)) { + dhd->op_mode = DHD_FLAG_MFG_MODE; +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM + /* disable runtimePM by default in MFG mode. */ + pm_runtime_disable(dhd_bus_to_dev(dhd->bus)); +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + /* Check and adjust IOCTL response timeout for Manufactring firmware */ + dhd_os_set_ioctl_resp_timeout(MFG_IOCTL_RESP_TIMEOUT); + DHD_ERROR(("%s : Set IOCTL response time for Manufactring Firmware\n", + __FUNCTION__)); + } else { + dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT); + DHD_INFO(("%s : Set IOCTL response time.\n", __FUNCTION__)); + } +#ifdef BCMPCIE_OOB_HOST_WAKE + ret = dhd_iovar(dhd, 0, "bus:hostwake_oob", NULL, 0, (char *)&hostwake_oob, + sizeof(hostwake_oob), FALSE); + if (ret < 0) { + DHD_ERROR(("%s: hostwake_oob IOVAR not present, proceed\n", __FUNCTION__)); + } else { + if (hostwake_oob == 0) { + DHD_ERROR(("%s: hostwake_oob is not enabled in the NVRAM, STOP\n", + __FUNCTION__)); + ret = BCME_UNSUPPORTED; + goto done; + } else { + DHD_ERROR(("%s: hostwake_oob enabled\n", __FUNCTION__)); + } + } +#endif /* BCMPCIE_OOB_HOST_WAKE */ +#ifdef GET_CUSTOM_MAC_ENABLE + ret = wifi_platform_get_mac_addr(dhd->info->adapter, hw_ether); + if (!ret) { + memset(buf, 0, sizeof(buf)); + bcopy(hw_ether, ea_addr.octet, sizeof(struct ether_addr)); + bcm_mkiovar("cur_etheraddr", (void *)&ea_addr, ETHER_ADDR_LEN, buf, sizeof(buf)); + ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0); + if (ret < 0) { + memset(buf, 0, sizeof(buf)); + bcm_mkiovar("hw_ether", hw_ether, sizeof(hw_ether), buf, sizeof(buf)); + ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0); + if (ret) { + int i; + DHD_ERROR(("%s: can't set MAC address MAC="MACDBG", error=%d\n", + __FUNCTION__, MAC2STRDBG(hw_ether), ret)); + for (i=0; imac.octet, buf, ETHER_ADDR_LEN); + + if ((ret = dhd_apply_default_clm(dhd, dhd->clm_path)) < 0) { + DHD_ERROR(("%s: CLM set failed. Abort initialization.\n", __FUNCTION__)); + goto done; + } + + /* get a capabilities from firmware */ + { + uint32 cap_buf_size = sizeof(dhd->fw_capabilities); + memset(dhd->fw_capabilities, 0, cap_buf_size); + ret = dhd_iovar(dhd, 0, "cap", NULL, 0, dhd->fw_capabilities, (cap_buf_size - 1), + FALSE); + if (ret < 0) { + DHD_ERROR(("%s: Get Capability failed (error=%d)\n", + __FUNCTION__, ret)); + return 0; + } + + memmove(&dhd->fw_capabilities[1], dhd->fw_capabilities, (cap_buf_size - 1)); + dhd->fw_capabilities[0] = ' '; + dhd->fw_capabilities[cap_buf_size - 2] = ' '; + dhd->fw_capabilities[cap_buf_size - 1] = '\0'; + } + + if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_HOSTAP_MODE) || + (op_mode == DHD_FLAG_HOSTAP_MODE)) { +#ifdef SET_RANDOM_MAC_SOFTAP + uint rand_mac; +#endif /* SET_RANDOM_MAC_SOFTAP */ + dhd->op_mode = DHD_FLAG_HOSTAP_MODE; +#if defined(ARP_OFFLOAD_SUPPORT) + arpoe = 0; +#endif // endif +#ifdef PKT_FILTER_SUPPORT + dhd_pkt_filter_enable = FALSE; +#endif // endif +#ifdef SET_RANDOM_MAC_SOFTAP + SRANDOM32((uint)jiffies); + rand_mac = RANDOM32(); + iovbuf[0] = (unsigned char)(vendor_oui >> 16) | 0x02; /* local admin bit */ + iovbuf[1] = (unsigned char)(vendor_oui >> 8); + iovbuf[2] = (unsigned char)vendor_oui; + iovbuf[3] = (unsigned char)(rand_mac & 0x0F) | 0xF0; + iovbuf[4] = (unsigned char)(rand_mac >> 8); + iovbuf[5] = (unsigned char)(rand_mac >> 16); + + ret = dhd_iovar(dhd, 0, "cur_etheraddr", (char *)&iovbuf, ETHER_ADDR_LEN, NULL, 0, + TRUE); + if (ret < 0) { + DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret)); + } else + memcpy(dhd->mac.octet, iovbuf, ETHER_ADDR_LEN); +#endif /* SET_RANDOM_MAC_SOFTAP */ +#ifdef USE_DYNAMIC_F2_BLKSIZE + dhdsdio_func_blocksize(dhd, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY); +#endif /* USE_DYNAMIC_F2_BLKSIZE */ +#ifdef SOFTAP_UAPSD_OFF + ret = dhd_iovar(dhd, 0, "wme_apsd", (char *)&wme_apsd, sizeof(wme_apsd), NULL, 0, + TRUE); + if (ret < 0) { + DHD_ERROR(("%s: set wme_apsd 0 fail (error=%d)\n", + __FUNCTION__, ret)); + } +#endif /* SOFTAP_UAPSD_OFF */ +#if defined(CUSTOM_COUNTRY_CODE) + /* set AP flag for specific country code of SOFTAP */ + dhd->dhd_cflags |= WLAN_PLAT_AP_FLAG | WLAN_PLAT_NODFS_FLAG; +#endif /* CUSTOM_COUNTRY_CODE && (CUSTOMER_HW2 || BOARD_HIKEY) */ + } else if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) || + (op_mode == DHD_FLAG_MFG_MODE)) { +#if defined(ARP_OFFLOAD_SUPPORT) + arpoe = 0; +#endif /* ARP_OFFLOAD_SUPPORT */ +#ifdef PKT_FILTER_SUPPORT + dhd_pkt_filter_enable = FALSE; +#endif /* PKT_FILTER_SUPPORT */ + dhd->op_mode = DHD_FLAG_MFG_MODE; +#ifdef USE_DYNAMIC_F2_BLKSIZE + dhdsdio_func_blocksize(dhd, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY); +#endif /* USE_DYNAMIC_F2_BLKSIZE */ +#ifndef CUSTOM_SET_ANTNPM + if (FW_SUPPORTED(dhd, rsdb)) { + wl_config_t rsdb_mode; + memset(&rsdb_mode, 0, sizeof(rsdb_mode)); + ret = dhd_iovar(dhd, 0, "rsdb_mode", (char *)&rsdb_mode, sizeof(rsdb_mode), + NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s Disable rsdb_mode is failed ret= %d\n", + __FUNCTION__, ret)); + } + } +#endif /* !CUSTOM_SET_ANTNPM */ + } else { + uint32 concurrent_mode = 0; + if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_P2P_MODE) || + (op_mode == DHD_FLAG_P2P_MODE)) { +#if defined(ARP_OFFLOAD_SUPPORT) + arpoe = 0; +#endif // endif +#ifdef PKT_FILTER_SUPPORT + dhd_pkt_filter_enable = FALSE; +#endif // endif + dhd->op_mode = DHD_FLAG_P2P_MODE; + } else if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_IBSS_MODE) || + (op_mode == DHD_FLAG_IBSS_MODE)) { + dhd->op_mode = DHD_FLAG_IBSS_MODE; + } else + dhd->op_mode = DHD_FLAG_STA_MODE; +#if !defined(AP) && defined(WLP2P) + if (dhd->op_mode != DHD_FLAG_IBSS_MODE && + (concurrent_mode = dhd_get_concurrent_capabilites(dhd))) { +#if defined(ARP_OFFLOAD_SUPPORT) + arpoe = 1; +#endif // endif + dhd->op_mode |= concurrent_mode; + } + + /* Check if we are enabling p2p */ + if (dhd->op_mode & DHD_FLAG_P2P_MODE) { + ret = dhd_iovar(dhd, 0, "apsta", (char *)&apsta, sizeof(apsta), NULL, 0, + TRUE); + if (ret < 0) + DHD_ERROR(("%s APSTA for P2P failed ret= %d\n", __FUNCTION__, ret)); + +#if defined(SOFTAP_AND_GC) + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_AP, + (char *)&ap_mode, sizeof(ap_mode), TRUE, 0)) < 0) { + DHD_ERROR(("%s WLC_SET_AP failed %d\n", __FUNCTION__, ret)); + } +#endif // endif + memcpy(&p2p_ea, &dhd->mac, ETHER_ADDR_LEN); + ETHER_SET_LOCALADDR(&p2p_ea); + ret = dhd_iovar(dhd, 0, "p2p_da_override", (char *)&p2p_ea, sizeof(p2p_ea), + NULL, 0, TRUE); + if (ret < 0) + DHD_ERROR(("%s p2p_da_override ret= %d\n", __FUNCTION__, ret)); + else + DHD_INFO(("dhd_preinit_ioctls: p2p_da_override succeeded\n")); + } +#else + (void)concurrent_mode; +#endif // endif + } +#ifdef BCMSDIO + if (dhd->conf->sd_f2_blocksize) + dhdsdio_func_blocksize(dhd, 2, dhd->conf->sd_f2_blocksize); +#endif + +#ifdef DISABLE_PRUNED_SCAN + if (FW_SUPPORTED(dhd, rsdb)) { + ret = dhd_iovar(dhd, 0, "scan_features", (char *)&scan_features, + sizeof(scan_features), iovbuf, sizeof(iovbuf), FALSE); + if (ret < 0) { + DHD_ERROR(("%s get scan_features is failed ret=%d\n", + __FUNCTION__, ret)); + } else { + memcpy(&scan_features, iovbuf, 4); + scan_features &= ~RSDB_SCAN_DOWNGRADED_CH_PRUNE_ROAM; + ret = dhd_iovar(dhd, 0, "scan_features", (char *)&scan_features, + sizeof(scan_features), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s set scan_features is failed ret=%d\n", + __FUNCTION__, ret)); + } + } + } +#endif /* DISABLE_PRUNED_SCAN */ + + DHD_ERROR(("Firmware up: op_mode=0x%04x, MAC="MACDBG"\n", + dhd->op_mode, MAC2STRDBG(dhd->mac.octet))); +#if defined(DHD_BLOB_EXISTENCE_CHECK) + if (!dhd->is_blob) +#endif /* DHD_BLOB_EXISTENCE_CHECK */ + { + /* get a ccode and revision for the country code */ +#if defined(CUSTOM_COUNTRY_CODE) + get_customized_country_code(dhd->info->adapter, dhd->dhd_cspec.country_abbrev, + &dhd->dhd_cspec, dhd->dhd_cflags); +#else + get_customized_country_code(dhd->info->adapter, dhd->dhd_cspec.country_abbrev, + &dhd->dhd_cspec); +#endif /* CUSTOM_COUNTRY_CODE */ + } + +#if defined(RXFRAME_THREAD) && defined(RXTHREAD_ONLYSTA) + if (dhd->op_mode == DHD_FLAG_HOSTAP_MODE) + dhd->info->rxthread_enabled = FALSE; + else + dhd->info->rxthread_enabled = TRUE; +#endif // endif + /* Set Country code */ + if (dhd->dhd_cspec.ccode[0] != 0) { + ret = dhd_iovar(dhd, 0, "country", (char *)&dhd->dhd_cspec, sizeof(wl_country_t), + NULL, 0, TRUE); + if (ret < 0) + DHD_ERROR(("%s: country code setting failed\n", __FUNCTION__)); + } + + /* Set Listen Interval */ + ret = dhd_iovar(dhd, 0, "assoc_listen", (char *)&listen_interval, sizeof(listen_interval), + NULL, 0, TRUE); + if (ret < 0) + DHD_ERROR(("%s assoc_listen failed %d\n", __FUNCTION__, ret)); + +#if defined(ROAM_ENABLE) || defined(DISABLE_BUILTIN_ROAM) +#ifdef USE_WFA_CERT_CONF + if (sec_get_param_wfa_cert(dhd, SET_PARAM_ROAMOFF, &roamvar) == BCME_OK) { + DHD_ERROR(("%s: read roam_off param =%d\n", __FUNCTION__, roamvar)); + } +#endif /* USE_WFA_CERT_CONF */ + /* Disable built-in roaming to allowed ext supplicant to take care of roaming */ + ret = dhd_iovar(dhd, 0, "roam_off", (char *)&roamvar, sizeof(roamvar), NULL, 0, TRUE); +#endif /* ROAM_ENABLE || DISABLE_BUILTIN_ROAM */ +#if defined(ROAM_ENABLE) +#ifdef DISABLE_BCNLOSS_ROAM + ret = dhd_iovar(dhd, 0, "roam_bcnloss_off", (char *)&roam_bcnloss_off, + sizeof(roam_bcnloss_off), NULL, 0, TRUE); +#endif /* DISABLE_BCNLOSS_ROAM */ + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_TRIGGER, roam_trigger, + sizeof(roam_trigger), TRUE, 0)) < 0) + DHD_ERROR(("%s: roam trigger set failed %d\n", __FUNCTION__, ret)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_SCAN_PERIOD, roam_scan_period, + sizeof(roam_scan_period), TRUE, 0)) < 0) + DHD_ERROR(("%s: roam scan period set failed %d\n", __FUNCTION__, ret)); + if ((dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_DELTA, roam_delta, + sizeof(roam_delta), TRUE, 0)) < 0) + DHD_ERROR(("%s: roam delta set failed %d\n", __FUNCTION__, ret)); + ret = dhd_iovar(dhd, 0, "fullroamperiod", (char *)&roam_fullscan_period, + sizeof(roam_fullscan_period), NULL, 0, TRUE); + if (ret < 0) + DHD_ERROR(("%s: roam fullscan period set failed %d\n", __FUNCTION__, ret)); +#ifdef ROAM_AP_ENV_DETECTION + if (roam_trigger[0] == WL_AUTO_ROAM_TRIGGER) { + if (dhd_iovar(dhd, 0, "roam_env_detection", (char *)&roam_env_mode, + sizeof(roam_env_mode), NULL, 0, TRUE) == BCME_OK) + dhd->roam_env_detection = TRUE; + else + dhd->roam_env_detection = FALSE; + } +#endif /* ROAM_AP_ENV_DETECTION */ +#endif /* ROAM_ENABLE */ + +#ifdef CUSTOM_EVENT_PM_WAKE + ret = dhd_iovar(dhd, 0, "const_awake_thresh", (char *)&pm_awake_thresh, + sizeof(pm_awake_thresh), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s set const_awake_thresh failed %d\n", __FUNCTION__, ret)); + } +#endif /* CUSTOM_EVENT_PM_WAKE */ +#ifdef OKC_SUPPORT + ret = dhd_iovar(dhd, 0, "okc_enable", (char *)&okc, sizeof(okc), NULL, 0, TRUE); +#endif // endif +#ifdef BCMCCX + ret = dhd_iovar(dhd, 0, "ccx_enable", (char *)&ccx, sizeof(ccx), NULL, 0, TRUE); +#endif /* BCMCCX */ +#ifdef WLTDLS +#ifdef ENABLE_TDLS_AUTO_MODE + /* by default TDLS on and auto mode on */ + _dhd_tdls_enable(dhd, true, true, NULL); +#else + /* by default TDLS on and auto mode off */ + _dhd_tdls_enable(dhd, true, false, NULL); +#endif /* ENABLE_TDLS_AUTO_MODE */ +#endif /* WLTDLS */ + +#ifdef DHD_ENABLE_LPC + /* Set lpc 1 */ + ret = dhd_iovar(dhd, 0, "lpc", (char *)&lpc, sizeof(lpc), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s Set lpc failed %d\n", __FUNCTION__, ret)); + + if (ret == BCME_NOTDOWN) { + uint wl_down = 1; + ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, + (char *)&wl_down, sizeof(wl_down), TRUE, 0); + DHD_ERROR(("%s lpc fail WL_DOWN : %d, lpc = %d\n", __FUNCTION__, ret, lpc)); + + ret = dhd_iovar(dhd, 0, "lpc", (char *)&lpc, sizeof(lpc), NULL, 0, TRUE); + DHD_ERROR(("%s Set lpc ret --> %d\n", __FUNCTION__, ret)); + } + } +#endif /* DHD_ENABLE_LPC */ + +#ifdef WLADPS + if (dhd->op_mode & DHD_FLAG_STA_MODE) { + if ((ret = dhd_enable_adps(dhd, ADPS_ENABLE)) != BCME_OK) { + DHD_ERROR(("%s dhd_enable_adps failed %d\n", + __FUNCTION__, ret)); + } + } +#endif /* WLADPS */ + +#ifdef DHD_PM_CONTROL_FROM_FILE + sec_control_pm(dhd, &power_mode); +#else + /* Set PowerSave mode */ + (void) dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode, sizeof(power_mode), TRUE, 0); +#endif /* DHD_PM_CONTROL_FROM_FILE */ + +#if defined(BCMSDIO) + /* Match Host and Dongle rx alignment */ + ret = dhd_iovar(dhd, 0, "bus:txglomalign", (char *)&dongle_align, sizeof(dongle_align), + NULL, 0, TRUE); + +#if defined(USE_WL_CREDALL) + /* enable credall to reduce the chance of no bus credit happened. */ + ret = dhd_iovar(dhd, 0, "bus:credall", (char *)&credall, sizeof(credall), NULL, 0, TRUE); +#endif // endif + +#ifdef USE_WFA_CERT_CONF + if (sec_get_param_wfa_cert(dhd, SET_PARAM_BUS_TXGLOM_MODE, &glom) == BCME_OK) { + DHD_ERROR(("%s, read txglom param =%d\n", __FUNCTION__, glom)); + } +#endif /* USE_WFA_CERT_CONF */ + if (glom != DEFAULT_GLOM_VALUE) { + DHD_INFO(("%s set glom=0x%X\n", __FUNCTION__, glom)); + ret = dhd_iovar(dhd, 0, "bus:txglom", (char *)&glom, sizeof(glom), NULL, 0, TRUE); + } +#endif /* defined(BCMSDIO) */ + + /* Setup timeout if Beacons are lost and roam is off to report link down */ + ret = dhd_iovar(dhd, 0, "bcn_timeout", (char *)&bcn_timeout, sizeof(bcn_timeout), NULL, 0, + TRUE); + + /* Setup assoc_retry_max count to reconnect target AP in dongle */ + ret = dhd_iovar(dhd, 0, "assoc_retry_max", (char *)&retry_max, sizeof(retry_max), NULL, 0, + TRUE); + +#if defined(AP) && !defined(WLP2P) + ret = dhd_iovar(dhd, 0, "apsta", (char *)&apsta, sizeof(apsta), NULL, 0, TRUE); + +#endif /* defined(AP) && !defined(WLP2P) */ + +#ifdef MIMO_ANT_SETTING + dhd_sel_ant_from_file(dhd); +#endif /* MIMO_ANT_SETTING */ + +#if defined(SOFTAP) + if (ap_fw_loaded == TRUE) { + dhd_wl_ioctl_cmd(dhd, WLC_SET_DTIMPRD, (char *)&dtim, sizeof(dtim), TRUE, 0); + } +#endif // endif + +#if defined(KEEP_ALIVE) + { + /* Set Keep Alive : be sure to use FW with -keepalive */ + int res; + +#if defined(SOFTAP) + if (ap_fw_loaded == FALSE) +#endif // endif + if (!(dhd->op_mode & + (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))) { + if ((res = dhd_keep_alive_onoff(dhd)) < 0) + DHD_ERROR(("%s set keeplive failed %d\n", + __FUNCTION__, res)); + } + } +#endif /* defined(KEEP_ALIVE) */ + +#ifdef USE_WL_TXBF + ret = dhd_iovar(dhd, 0, "txbf", (char *)&txbf, sizeof(txbf), NULL, 0, TRUE); + if (ret < 0) + DHD_ERROR(("%s Set txbf failed %d\n", __FUNCTION__, ret)); + +#endif /* USE_WL_TXBF */ + + ret = dhd_iovar(dhd, 0, "scancache", (char *)&scancache_enab, sizeof(scancache_enab), NULL, + 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s Set scancache failed %d\n", __FUNCTION__, ret)); + } + +#ifdef DISABLE_TXBFR + ret = dhd_iovar(dhd, 0, "txbf_bfr_cap", (char *)&txbf_bfr_cap, sizeof(txbf_bfr_cap), NULL, + 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s Clear txbf_bfr_cap failed %d\n", __FUNCTION__, ret)); + } +#endif /* DISABLE_TXBFR */ + +#ifdef USE_WFA_CERT_CONF +#ifdef USE_WL_FRAMEBURST + if (sec_get_param_wfa_cert(dhd, SET_PARAM_FRAMEBURST, &frameburst) == BCME_OK) { + DHD_ERROR(("%s, read frameburst param=%d\n", __FUNCTION__, frameburst)); + } +#endif /* USE_WL_FRAMEBURST */ + g_frameburst = frameburst; +#endif /* USE_WFA_CERT_CONF */ +#ifdef DISABLE_WL_FRAMEBURST_SOFTAP + /* Disable Framebursting for SofAP */ + if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) { + frameburst = 0; + } +#endif /* DISABLE_WL_FRAMEBURST_SOFTAP */ + /* Set frameburst to value */ + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_FAKEFRAG, (char *)&frameburst, + sizeof(frameburst), TRUE, 0)) < 0) { + DHD_INFO(("%s frameburst not supported %d\n", __FUNCTION__, ret)); + } +#ifdef DHD_SET_FW_HIGHSPEED + /* Set ack_ratio */ + ret = dhd_iovar(dhd, 0, "ack_ratio", (char *)&ack_ratio, sizeof(ack_ratio), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s Set ack_ratio failed %d\n", __FUNCTION__, ret)); + } + + /* Set ack_ratio_depth */ + ret = dhd_iovar(dhd, 0, "ack_ratio_depth", (char *)&ack_ratio_depth, + sizeof(ack_ratio_depth), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s Set ack_ratio_depth failed %d\n", __FUNCTION__, ret)); + } +#endif /* DHD_SET_FW_HIGHSPEED */ + + iov_buf = (char*)MALLOC(dhd->osh, WLC_IOCTL_SMLEN); + if (iov_buf == NULL) { + DHD_ERROR(("failed to allocate %d bytes for iov_buf\n", WLC_IOCTL_SMLEN)); + ret = BCME_NOMEM; + goto done; + } + +#if defined(CUSTOM_AMPDU_BA_WSIZE) + /* Set ampdu ba wsize to 64 or 16 */ +#ifdef CUSTOM_AMPDU_BA_WSIZE + ampdu_ba_wsize = CUSTOM_AMPDU_BA_WSIZE; +#endif // endif + if (ampdu_ba_wsize != 0) { + ret = dhd_iovar(dhd, 0, "ampdu_ba_wsize", (char *)&du_ba_wsize, + sizeof(ampdu_ba_wsize), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s Set ampdu_ba_wsize to %d failed %d\n", + __FUNCTION__, ampdu_ba_wsize, ret)); + } + } +#endif // endif + +#if defined(CUSTOM_AMPDU_MPDU) + ampdu_mpdu = CUSTOM_AMPDU_MPDU; + if (ampdu_mpdu != 0 && (ampdu_mpdu <= ampdu_ba_wsize)) { + ret = dhd_iovar(dhd, 0, "ampdu_mpdu", (char *)&du_mpdu, sizeof(ampdu_mpdu), + NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s Set ampdu_mpdu to %d failed %d\n", + __FUNCTION__, CUSTOM_AMPDU_MPDU, ret)); + } + } +#endif /* CUSTOM_AMPDU_MPDU */ + +#if defined(CUSTOM_AMPDU_RELEASE) + ampdu_release = CUSTOM_AMPDU_RELEASE; + if (ampdu_release != 0 && (ampdu_release <= ampdu_ba_wsize)) { + ret = dhd_iovar(dhd, 0, "ampdu_release", (char *)&du_release, + sizeof(ampdu_release), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s Set ampdu_release to %d failed %d\n", + __FUNCTION__, CUSTOM_AMPDU_RELEASE, ret)); + } + } +#endif /* CUSTOM_AMPDU_RELEASE */ + +#if defined(CUSTOM_AMSDU_AGGSF) + amsdu_aggsf = CUSTOM_AMSDU_AGGSF; + if (amsdu_aggsf != 0) { + ret = dhd_iovar(dhd, 0, "amsdu_aggsf", (char *)&amsdu_aggsf, sizeof(amsdu_aggsf), + NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s Set amsdu_aggsf to %d failed %d\n", + __FUNCTION__, CUSTOM_AMSDU_AGGSF, ret)); + } + } +#endif /* CUSTOM_AMSDU_AGGSF */ + +#if defined(BCMSUP_4WAY_HANDSHAKE) + /* Read 4-way handshake requirements */ + if (dhd_use_idsup == 1) { + ret = dhd_iovar(dhd, 0, "sup_wpa", (char *)&sup_wpa, sizeof(sup_wpa), + (char *)&iovbuf, sizeof(iovbuf), FALSE); + /* sup_wpa iovar returns NOTREADY status on some platforms using modularized + * in-dongle supplicant. + */ + if (ret >= 0 || ret == BCME_NOTREADY) + dhd->fw_4way_handshake = TRUE; + DHD_TRACE(("4-way handshake mode is: %d\n", dhd->fw_4way_handshake)); + } +#endif /* BCMSUP_4WAY_HANDSHAKE */ +#if defined(SUPPORT_2G_VHT) || defined(SUPPORT_5G_1024QAM_VHT) + ret = dhd_iovar(dhd, 0, "vht_features", (char *)&vht_features, sizeof(vht_features), + NULL, 0, FALSE); + if (ret < 0) { + DHD_ERROR(("%s vht_features get failed %d\n", __FUNCTION__, ret)); + vht_features = 0; + } else { +#ifdef SUPPORT_2G_VHT + vht_features |= 0x3; /* 2G support */ +#endif /* SUPPORT_2G_VHT */ +#ifdef SUPPORT_5G_1024QAM_VHT + vht_features |= 0x6; /* 5G 1024 QAM support */ +#endif /* SUPPORT_5G_1024QAM_VHT */ + } + if (vht_features) { + ret = dhd_iovar(dhd, 0, "vht_features", (char *)&vht_features, sizeof(vht_features), + NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s vht_features set failed %d\n", __FUNCTION__, ret)); + + if (ret == BCME_NOTDOWN) { + uint wl_down = 1; + ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, + (char *)&wl_down, sizeof(wl_down), TRUE, 0); + DHD_ERROR(("%s vht_features fail WL_DOWN : %d," + " vht_features = 0x%x\n", + __FUNCTION__, ret, vht_features)); + + ret = dhd_iovar(dhd, 0, "vht_features", (char *)&vht_features, + sizeof(vht_features), NULL, 0, TRUE); + + DHD_ERROR(("%s vht_features set. ret --> %d\n", __FUNCTION__, ret)); + } + } + } +#endif /* SUPPORT_2G_VHT || SUPPORT_5G_1024QAM_VHT */ +#ifdef DISABLE_11N_PROPRIETARY_RATES + ret = dhd_iovar(dhd, 0, "ht_features", (char *)&ht_features, sizeof(ht_features), NULL, 0, + TRUE); + if (ret < 0) { + DHD_ERROR(("%s ht_features set failed %d\n", __FUNCTION__, ret)); + } +#endif /* DISABLE_11N_PROPRIETARY_RATES */ +#ifdef CUSTOM_PSPRETEND_THR + /* Turn off MPC in AP mode */ + ret = dhd_iovar(dhd, 0, "pspretend_threshold", (char *)&pspretend_thr, + sizeof(pspretend_thr), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s pspretend_threshold for HostAPD failed %d\n", + __FUNCTION__, ret)); + } +#endif // endif + + ret = dhd_iovar(dhd, 0, "buf_key_b4_m4", (char *)&buf_key_b4_m4, sizeof(buf_key_b4_m4), + NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s buf_key_b4_m4 set failed %d\n", __FUNCTION__, ret)); + } +#ifdef SUPPORT_SET_CAC + ret = dhd_iovar(dhd, 0, "cac", (char *)&cac, sizeof(cac), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s Failed to set cac to %d, %d\n", __FUNCTION__, cac, ret)); + } +#endif /* SUPPORT_SET_CAC */ +#ifdef DHD_ULP + /* Get the required details from dongle during preinit ioctl */ + dhd_ulp_preinit(dhd); +#endif /* DHD_ULP */ + + /* Read event_msgs mask */ + ret = dhd_iovar(dhd, 0, "event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf, + sizeof(iovbuf), FALSE); + if (ret < 0) { + DHD_ERROR(("%s read Event mask failed %d\n", __FUNCTION__, ret)); + goto done; + } + bcopy(iovbuf, eventmask, WL_EVENTING_MASK_LEN); + + /* Setup event_msgs */ + setbit(eventmask, WLC_E_SET_SSID); + setbit(eventmask, WLC_E_PRUNE); + setbit(eventmask, WLC_E_AUTH); + setbit(eventmask, WLC_E_AUTH_IND); + setbit(eventmask, WLC_E_ASSOC); + setbit(eventmask, WLC_E_REASSOC); + setbit(eventmask, WLC_E_REASSOC_IND); + if (!(dhd->op_mode & DHD_FLAG_IBSS_MODE)) + setbit(eventmask, WLC_E_DEAUTH); + setbit(eventmask, WLC_E_DEAUTH_IND); + setbit(eventmask, WLC_E_DISASSOC_IND); + setbit(eventmask, WLC_E_DISASSOC); + setbit(eventmask, WLC_E_JOIN); + setbit(eventmask, WLC_E_START); + setbit(eventmask, WLC_E_ASSOC_IND); + setbit(eventmask, WLC_E_PSK_SUP); + setbit(eventmask, WLC_E_LINK); + setbit(eventmask, WLC_E_MIC_ERROR); + setbit(eventmask, WLC_E_ASSOC_REQ_IE); + setbit(eventmask, WLC_E_ASSOC_RESP_IE); +#ifdef LIMIT_BORROW + setbit(eventmask, WLC_E_ALLOW_CREDIT_BORROW); +#endif // endif +#ifndef WL_CFG80211 + setbit(eventmask, WLC_E_PMKID_CACHE); +// setbit(eventmask, WLC_E_TXFAIL); // terence 20181106: remove unnecessary event +#endif // endif + setbit(eventmask, WLC_E_JOIN_START); +// setbit(eventmask, WLC_E_SCAN_COMPLETE); // terence 20150628: remove redundant event +#ifdef DHD_DEBUG + setbit(eventmask, WLC_E_SCAN_CONFIRM_IND); +#endif // endif +#ifdef PNO_SUPPORT + setbit(eventmask, WLC_E_PFN_NET_FOUND); + setbit(eventmask, WLC_E_PFN_BEST_BATCHING); + setbit(eventmask, WLC_E_PFN_BSSID_NET_FOUND); + setbit(eventmask, WLC_E_PFN_BSSID_NET_LOST); +#endif /* PNO_SUPPORT */ + /* enable dongle roaming event */ +#ifdef WL_CFG80211 + setbit(eventmask, WLC_E_ROAM); + setbit(eventmask, WLC_E_BSSID); +#endif /* WL_CFG80211 */ +#ifdef BCMCCX + setbit(eventmask, WLC_E_ADDTS_IND); + setbit(eventmask, WLC_E_DELTS_IND); +#endif /* BCMCCX */ +#ifdef WLTDLS + setbit(eventmask, WLC_E_TDLS_PEER_EVENT); +#endif /* WLTDLS */ +#ifdef WL_ESCAN + setbit(eventmask, WLC_E_ESCAN_RESULT); +#endif /* WL_ESCAN */ +#ifdef RTT_SUPPORT + setbit(eventmask, WLC_E_PROXD); +#endif /* RTT_SUPPORT */ +#ifdef WL_CFG80211 + setbit(eventmask, WLC_E_ESCAN_RESULT); + setbit(eventmask, WLC_E_AP_STARTED); + setbit(eventmask, WLC_E_ACTION_FRAME_RX); + if (dhd->op_mode & DHD_FLAG_P2P_MODE) { + setbit(eventmask, WLC_E_P2P_DISC_LISTEN_COMPLETE); + } +#endif /* WL_CFG80211 */ + +#if defined(SHOW_LOGTRACE) && defined(LOGTRACE_FROM_FILE) + if (dhd_logtrace_from_file(dhd)) { + setbit(eventmask, WLC_E_TRACE); + } else { + clrbit(eventmask, WLC_E_TRACE); + } +#elif defined(SHOW_LOGTRACE) + setbit(eventmask, WLC_E_TRACE); +#else + clrbit(eventmask, WLC_E_TRACE); +#endif /* defined(SHOW_LOGTRACE) && defined(LOGTRACE_FROM_FILE) */ + + setbit(eventmask, WLC_E_CSA_COMPLETE_IND); +#ifdef CUSTOM_EVENT_PM_WAKE + setbit(eventmask, WLC_E_EXCESS_PM_WAKE_EVENT); +#endif /* CUSTOM_EVENT_PM_WAKE */ +#ifdef DHD_LOSSLESS_ROAMING + setbit(eventmask, WLC_E_ROAM_PREP); +#endif // endif + /* nan events */ + setbit(eventmask, WLC_E_NAN); +#if defined(PCIE_FULL_DONGLE) && defined(DHD_LOSSLESS_ROAMING) + dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_LLR_MAP); +#endif /* defined(PCIE_FULL_DONGLE) && defined(DHD_LOSSLESS_ROAMING) */ + +#if defined(BCMPCIE) && defined(EAPOL_PKT_PRIO) + dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_LLR_MAP); +#endif /* defined(BCMPCIE) && defined(EAPOL_PKT_PRIO) */ + +#ifdef SUSPEND_EVENT + bcopy(eventmask, dhd->conf->resume_eventmask, WL_EVENTING_MASK_LEN); +#endif + /* Write updated Event mask */ + ret = dhd_iovar(dhd, 0, "event_msgs", eventmask, WL_EVENTING_MASK_LEN, NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s Set Event mask failed %d\n", __FUNCTION__, ret)); + goto done; + } + + /* make up event mask ext message iovar for event larger than 128 */ + msglen = ROUNDUP(WLC_E_LAST, NBBY)/NBBY + EVENTMSGS_EXT_STRUCT_SIZE; + eventmask_msg = (eventmsgs_ext_t*)MALLOC(dhd->osh, msglen); + if (eventmask_msg == NULL) { + DHD_ERROR(("failed to allocate %d bytes for event_msg_ext\n", msglen)); + ret = BCME_NOMEM; + goto done; + } + bzero(eventmask_msg, msglen); + eventmask_msg->ver = EVENTMSGS_VER; + eventmask_msg->len = ROUNDUP(WLC_E_LAST, NBBY)/NBBY; + + /* Read event_msgs_ext mask */ + ret2 = dhd_iovar(dhd, 0, "event_msgs_ext", (char *)eventmask_msg, msglen, iov_buf, + WLC_IOCTL_SMLEN, FALSE); + + if (ret2 == 0) { /* event_msgs_ext must be supported */ + bcopy(iov_buf, eventmask_msg, msglen); +#ifdef RSSI_MONITOR_SUPPORT + setbit(eventmask_msg->mask, WLC_E_RSSI_LQM); +#endif /* RSSI_MONITOR_SUPPORT */ +#ifdef GSCAN_SUPPORT + setbit(eventmask_msg->mask, WLC_E_PFN_GSCAN_FULL_RESULT); + setbit(eventmask_msg->mask, WLC_E_PFN_SCAN_COMPLETE); + setbit(eventmask_msg->mask, WLC_E_PFN_SSID_EXT); + setbit(eventmask_msg->mask, WLC_E_ROAM_EXP_EVENT); +#endif /* GSCAN_SUPPORT */ + setbit(eventmask_msg->mask, WLC_E_RSSI_LQM); +#ifdef BT_WIFI_HANDOVER + setbit(eventmask_msg->mask, WLC_E_BT_WIFI_HANDOVER_REQ); +#endif /* BT_WIFI_HANDOVER */ +#ifdef DBG_PKT_MON + setbit(eventmask_msg->mask, WLC_E_ROAM_PREP); +#endif /* DBG_PKT_MON */ +#ifdef DHD_ULP + setbit(eventmask_msg->mask, WLC_E_ULP); +#endif // endif +#ifdef WL_NATOE + setbit(eventmask_msg->mask, WLC_E_NATOE_NFCT); +#endif /* WL_NATOE */ +#ifdef WL_NAN + setbit(eventmask_msg->mask, WLC_E_SLOTTED_BSS_PEER_OP); +#endif /* WL_NAN */ +#ifdef WL_MBO + setbit(eventmask_msg->mask, WLC_E_MBO); +#endif /* WL_MBO */ + /* Write updated Event mask */ + eventmask_msg->ver = EVENTMSGS_VER; + eventmask_msg->command = EVENTMSGS_SET_MASK; + eventmask_msg->len = ROUNDUP(WLC_E_LAST, NBBY)/NBBY; + ret = dhd_iovar(dhd, 0, "event_msgs_ext", (char *)eventmask_msg, msglen, NULL, 0, + TRUE); + if (ret < 0) { + DHD_ERROR(("%s write event mask ext failed %d\n", __FUNCTION__, ret)); + goto done; + } + } else if (ret2 == BCME_UNSUPPORTED || ret2 == BCME_VERSION) { + /* Skip for BCME_UNSUPPORTED or BCME_VERSION */ + DHD_ERROR(("%s event_msgs_ext not support or version mismatch %d\n", + __FUNCTION__, ret2)); + } else { + DHD_ERROR(("%s read event mask ext failed %d\n", __FUNCTION__, ret2)); + ret = ret2; + goto done; + } + +#if defined(DHD_8021X_DUMP) && defined(SHOW_LOGTRACE) + /* Enabling event log trace for EAP events */ + el_tag = (wl_el_tag_params_t *)MALLOC(dhd->osh, sizeof(wl_el_tag_params_t)); + if (el_tag == NULL) { + DHD_ERROR(("failed to allocate %d bytes for event_msg_ext\n", + (int)sizeof(wl_el_tag_params_t))); + ret = BCME_NOMEM; + goto done; + } + el_tag->tag = EVENT_LOG_TAG_4WAYHANDSHAKE; + el_tag->set = 1; + el_tag->flags = EVENT_LOG_TAG_FLAG_LOG; + ret = dhd_iovar(dhd, 0, "event_log_tag_control", (char *)el_tag, sizeof(*el_tag), NULL, 0, + TRUE); +#endif /* DHD_8021X_DUMP */ + + dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_CHANNEL_TIME, (char *)&scan_assoc_time, + sizeof(scan_assoc_time), TRUE, 0); + dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_UNASSOC_TIME, (char *)&scan_unassoc_time, + sizeof(scan_unassoc_time), TRUE, 0); + dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_PASSIVE_TIME, (char *)&scan_passive_time, + sizeof(scan_passive_time), TRUE, 0); + +#ifdef ARP_OFFLOAD_SUPPORT + /* Set and enable ARP offload feature for STA only */ +#if defined(SOFTAP) + if (arpoe && !ap_fw_loaded) +#else + if (arpoe) +#endif // endif + { + dhd_arp_offload_enable(dhd, TRUE); + dhd_arp_offload_set(dhd, dhd_arp_mode); + } else { + dhd_arp_offload_enable(dhd, FALSE); + dhd_arp_offload_set(dhd, 0); + } + dhd_arp_enable = arpoe; +#endif /* ARP_OFFLOAD_SUPPORT */ + +#ifdef PKT_FILTER_SUPPORT + /* Setup default defintions for pktfilter , enable in suspend */ + if (dhd_master_mode) { + dhd->pktfilter_count = 6; + dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = NULL; + if (!FW_SUPPORTED(dhd, pf6)) { + dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = NULL; + dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = NULL; + } else { + /* Immediately pkt filter TYPE 6 Discard IPv4/IPv6 Multicast Packet */ + dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = DISCARD_IPV4_MCAST; + dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = DISCARD_IPV6_MCAST; + } + /* apply APP pktfilter */ + dhd->pktfilter[DHD_ARP_FILTER_NUM] = "105 0 0 12 0xFFFF 0x0806"; + +#ifdef BLOCK_IPV6_PACKET + /* Setup filter to allow only IPv4 unicast frames */ + dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 " + HEX_PREF_STR UNI_FILTER_STR ZERO_ADDR_STR ETHER_TYPE_STR IPV6_FILTER_STR + " " + HEX_PREF_STR ZERO_ADDR_STR ZERO_ADDR_STR ETHER_TYPE_STR ZERO_TYPE_STR; +#else + /* Setup filter to allow only unicast */ + dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 0x01 0x00"; +#endif /* BLOCK_IPV6_PACKET */ + +#ifdef PASS_IPV4_SUSPEND + dhd->pktfilter[DHD_MDNS_FILTER_NUM] = "104 0 0 0 0xFFFFFF 0x01005E"; +#else + /* Add filter to pass multicastDNS packet and NOT filter out as Broadcast */ + dhd->pktfilter[DHD_MDNS_FILTER_NUM] = NULL; +#endif /* PASS_IPV4_SUSPEND */ + if (FW_SUPPORTED(dhd, pf6)) { + /* Immediately pkt filter TYPE 6 Dicard Broadcast IP packet */ + dhd->pktfilter[DHD_IP4BCAST_DROP_FILTER_NUM] = DISCARD_IPV4_BCAST; + /* Immediately pkt filter TYPE 6 Dicard Cisco STP packet */ + dhd->pktfilter[DHD_LLC_STP_DROP_FILTER_NUM] = DISCARD_LLC_STP; + /* Immediately pkt filter TYPE 6 Dicard Cisco XID protocol */ + dhd->pktfilter[DHD_LLC_XID_DROP_FILTER_NUM] = DISCARD_LLC_XID; + dhd->pktfilter_count = 10; + } + +#ifdef GAN_LITE_NAT_KEEPALIVE_FILTER + dhd->pktfilter_count = 4; + /* Setup filter to block broadcast and NAT Keepalive packets */ + /* discard all broadcast packets */ + dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 0xffffff 0xffffff"; + /* discard NAT Keepalive packets */ + dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = "102 0 0 36 0xffffffff 0x11940009"; + /* discard NAT Keepalive packets */ + dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = "104 0 0 38 0xffffffff 0x11940009"; + dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = NULL; +#endif /* GAN_LITE_NAT_KEEPALIVE_FILTER */ + } else + dhd_conf_discard_pkt_filter(dhd); + dhd_conf_add_pkt_filter(dhd); + +#if defined(SOFTAP) + if (ap_fw_loaded) { + dhd_enable_packet_filter(0, dhd); + } +#endif /* defined(SOFTAP) */ + dhd_set_packet_filter(dhd); +#endif /* PKT_FILTER_SUPPORT */ +#ifdef DISABLE_11N + ret = dhd_iovar(dhd, 0, "nmode", (char *)&nmode, sizeof(nmode), NULL, 0, TRUE); + if (ret < 0) + DHD_ERROR(("%s wl nmode 0 failed %d\n", __FUNCTION__, ret)); +#endif /* DISABLE_11N */ + +#ifdef ENABLE_BCN_LI_BCN_WAKEUP + ret = dhd_iovar(dhd, 0, "bcn_li_bcn", (char *)&bcn_li_bcn, sizeof(bcn_li_bcn), NULL, 0, + TRUE); +#endif /* ENABLE_BCN_LI_BCN_WAKEUP */ +#ifdef AMPDU_VO_ENABLE + tid.tid = PRIO_8021D_VO; /* Enable TID(6) for voice */ + tid.enable = TRUE; + ret = dhd_iovar(dhd, 0, "ampdu_tid", (char *)&tid, sizeof(tid), NULL, 0, TRUE); + + tid.tid = PRIO_8021D_NC; /* Enable TID(7) for voice */ + tid.enable = TRUE; + ret = dhd_iovar(dhd, 0, "ampdu_tid", (char *)&tid, sizeof(tid), NULL, 0, TRUE); +#endif // endif + /* query for 'clmver' to get clm version info from firmware */ + memset(buf, 0, sizeof(buf)); + ret = dhd_iovar(dhd, 0, "clmver", NULL, 0, buf, sizeof(buf), FALSE); + if (ret < 0) + DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret)); + else { + char *clmver_temp_buf = NULL; + + if ((clmver_temp_buf = bcmstrstr(buf, "Data:")) == NULL) { + DHD_ERROR(("Couldn't find \"Data:\"\n")); + } else { + ptr = (clmver_temp_buf + strlen("Data:")); + if ((clmver_temp_buf = bcmstrtok(&ptr, "\n", 0)) == NULL) { + DHD_ERROR(("Couldn't find New line character\n")); + } else { + memset(clm_version, 0, CLM_VER_STR_LEN); + strncpy(clm_version, clmver_temp_buf, + MIN(strlen(clmver_temp_buf), CLM_VER_STR_LEN - 1)); + } + } + } + + /* query for 'ver' to get version info from firmware */ + memset(buf, 0, sizeof(buf)); + ptr = buf; + ret = dhd_iovar(dhd, 0, "ver", NULL, 0, (char *)&buf, sizeof(buf), FALSE); + if (ret < 0) + DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret)); + else { + bcmstrtok(&ptr, "\n", 0); + strncpy(fw_version, buf, FW_VER_STR_LEN); + fw_version[FW_VER_STR_LEN-1] = '\0'; + dhd_set_version_info(dhd, buf); +#ifdef WRITE_WLANINFO + sec_save_wlinfo(buf, EPI_VERSION_STR, dhd->info->nv_path, clm_version); +#endif /* WRITE_WLANINFO */ + } +#ifdef GEN_SOFTAP_INFO_FILE + sec_save_softap_info(); +#endif /* GEN_SOFTAP_INFO_FILE */ + +#if defined(BCMSDIO) + dhd_txglom_enable(dhd, dhd->conf->bus_rxglom); +#endif /* defined(BCMSDIO) */ + +#if defined(BCMSDIO) || defined(BCMDBUS) +#ifdef PROP_TXSTATUS + if (disable_proptx || +#ifdef PROP_TXSTATUS_VSDB + /* enable WLFC only if the firmware is VSDB when it is in STA mode */ + (dhd->op_mode != DHD_FLAG_HOSTAP_MODE && + dhd->op_mode != DHD_FLAG_IBSS_MODE) || +#endif /* PROP_TXSTATUS_VSDB */ + FALSE) { + wlfc_enable = FALSE; + } + ret = dhd_conf_get_disable_proptx(dhd); + if (ret == 0){ + disable_proptx = 0; + wlfc_enable = TRUE; + } else if (ret >= 1) { + disable_proptx = 1; + wlfc_enable = FALSE; + /* terence 20161229: we should set ampdu_hostreorder=0 when disable_proptx=1 */ + hostreorder = 0; + } + +#if defined(PROP_TXSTATUS) +#ifdef USE_WFA_CERT_CONF + if (sec_get_param_wfa_cert(dhd, SET_PARAM_PROPTX, &proptx) == BCME_OK) { + DHD_ERROR(("%s , read proptx param=%d\n", __FUNCTION__, proptx)); + wlfc_enable = proptx; + } +#endif /* USE_WFA_CERT_CONF */ +#endif /* PROP_TXSTATUS */ + +#ifndef DISABLE_11N + ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, (char *)&wl_down, sizeof(wl_down), TRUE, 0); + ret2 = dhd_iovar(dhd, 0, "ampdu_hostreorder", (char *)&hostreorder, sizeof(hostreorder), + NULL, 0, TRUE); + if (ret2 < 0) { + DHD_ERROR(("%s wl ampdu_hostreorder failed %d\n", __FUNCTION__, ret2)); + if (ret2 != BCME_UNSUPPORTED) + ret = ret2; + + if (ret == BCME_NOTDOWN) { + uint wl_down = 1; + ret2 = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, (char *)&wl_down, + sizeof(wl_down), TRUE, 0); + DHD_ERROR(("%s ampdu_hostreorder fail WL_DOWN : %d, hostreorder :%d\n", + __FUNCTION__, ret2, hostreorder)); + + ret2 = dhd_iovar(dhd, 0, "ampdu_hostreorder", (char *)&hostreorder, + sizeof(hostreorder), NULL, 0, TRUE); + DHD_ERROR(("%s wl ampdu_hostreorder. ret --> %d\n", __FUNCTION__, ret2)); + if (ret2 != BCME_UNSUPPORTED) + ret = ret2; + } + if (ret2 != BCME_OK) + hostreorder = 0; + } +#endif /* DISABLE_11N */ + + if (wlfc_enable) { + dhd_wlfc_init(dhd); + /* terence 20161229: enable ampdu_hostreorder if tlv enabled */ + dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "ampdu_hostreorder", 1, 0, TRUE); + } +#ifndef DISABLE_11N + else if (hostreorder) + dhd_wlfc_hostreorder_init(dhd); +#endif /* DISABLE_11N */ +#else + /* terence 20161229: disable ampdu_hostreorder if PROP_TXSTATUS not defined */ + printf("%s: not define PROP_TXSTATUS\n", __FUNCTION__); + dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "ampdu_hostreorder", 0, 0, TRUE); +#endif /* PROP_TXSTATUS */ +#endif /* BCMSDIO || BCMDBUS */ +#ifndef PCIE_FULL_DONGLE + /* For FD we need all the packets at DHD to handle intra-BSS forwarding */ + if (FW_SUPPORTED(dhd, ap)) { + wl_ap_isolate = AP_ISOLATE_SENDUP_ALL; + ret = dhd_iovar(dhd, 0, "ap_isolate", (char *)&wl_ap_isolate, sizeof(wl_ap_isolate), + NULL, 0, TRUE); + if (ret < 0) + DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret)); + } +#endif /* PCIE_FULL_DONGLE */ +#ifdef PNO_SUPPORT + if (!dhd->pno_state) { + dhd_pno_init(dhd); + } +#endif // endif +#ifdef RTT_SUPPORT + if (!dhd->rtt_state) { + ret = dhd_rtt_init(dhd); + if (ret < 0) { + DHD_ERROR(("%s failed to initialize RTT\n", __FUNCTION__)); + } + } +#endif // endif +#ifdef FILTER_IE + /* Failure to configure filter IE is not a fatal error, ignore it. */ + if (!(dhd->op_mode & (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))) + dhd_read_from_file(dhd); +#endif /* FILTER_IE */ +#ifdef WL11U + dhd_interworking_enable(dhd); +#endif /* WL11U */ + +#ifdef NDO_CONFIG_SUPPORT + dhd->ndo_enable = FALSE; + dhd->ndo_host_ip_overflow = FALSE; + dhd->ndo_max_host_ip = NDO_MAX_HOST_IP_ENTRIES; +#endif /* NDO_CONFIG_SUPPORT */ + + /* ND offload version supported */ + dhd->ndo_version = dhd_ndo_get_version(dhd); + if (dhd->ndo_version > 0) { + DHD_INFO(("%s: ndo version %d\n", __FUNCTION__, dhd->ndo_version)); + +#ifdef NDO_CONFIG_SUPPORT + /* enable Unsolicited NA filter */ + ret = dhd_ndo_unsolicited_na_filter_enable(dhd, 1); + if (ret < 0) { + DHD_ERROR(("%s failed to enable Unsolicited NA filter\n", __FUNCTION__)); + } +#endif /* NDO_CONFIG_SUPPORT */ + } + + /* check dongle supports wbtext (product policy) or not */ + dhd->wbtext_support = FALSE; + if (dhd_wl_ioctl_get_intiovar(dhd, "wnm_bsstrans_resp", &wnm_bsstrans_resp, + WLC_GET_VAR, FALSE, 0) != BCME_OK) { + DHD_ERROR(("failed to get wnm_bsstrans_resp\n")); + } + dhd->wbtext_policy = wnm_bsstrans_resp; + if (dhd->wbtext_policy == WL_BSSTRANS_POLICY_PRODUCT_WBTEXT) { + dhd->wbtext_support = TRUE; + } + /* driver can turn off wbtext feature through makefile */ + if (dhd->wbtext_support) { + if (dhd_wl_ioctl_set_intiovar(dhd, "wnm_bsstrans_resp", + WL_BSSTRANS_POLICY_ROAM_ALWAYS, + WLC_SET_VAR, FALSE, 0) != BCME_OK) { + DHD_ERROR(("failed to disable WBTEXT\n")); + } + } + +#if defined(DHD_NON_DMA_M2M_CORRUPTION) + /* check pcie non dma loopback */ + if (dhd->op_mode == DHD_FLAG_MFG_MODE) { + memset(&pcie_dmaxfer_lpbk, 0, sizeof(dhd_pcie_dmaxfer_lpbk_t)); + pcie_dmaxfer_lpbk.u.length = PCIE_DMAXFER_LPBK_LENGTH; + pcie_dmaxfer_lpbk.lpbkmode = M2M_NON_DMA_LPBK; + pcie_dmaxfer_lpbk.wait = TRUE; + + if ((ret = dhd_bus_iovar_op(dhd, "pcie_dmaxfer", NULL, 0, + (char *)&pcie_dmaxfer_lpbk, sizeof(dhd_pcie_dmaxfer_lpbk_t), + IOV_SET)) < 0) { + DHD_ERROR(("failed to check PCIe Non DMA Loopback Test!!! Reason : %d\n", + ret)); + goto done; + } + + if (pcie_dmaxfer_lpbk.u.status != BCME_OK) { + DHD_ERROR(("failed to check PCIe Non DMA Loopback Test!!! Reason : %d" + " Status : %d\n", ret, pcie_dmaxfer_lpbk.u.status)); + ret = BCME_ERROR; + goto done; + } else { + + DHD_ERROR(("successful to check PCIe Non DMA Loopback Test\n")); + } + } +#endif /* DHD_NON_DMA_M2M_CORRUPTION */ + + /* WNM capabilities */ + wnm_cap = 0 +#ifdef WL11U + | WL_WNM_BSSTRANS | WL_WNM_NOTIF +#endif // endif + ; + if (dhd_iovar(dhd, 0, "wnm", (char *)&wnm_cap, sizeof(wnm_cap), NULL, 0, TRUE) < 0) { + DHD_ERROR(("failed to set WNM capabilities\n")); + } + + if (FW_SUPPORTED(dhd, ecounters) && enable_ecounter) { + if (dhd_start_ecounters(dhd) != BCME_OK) { + DHD_ERROR(("%s Ecounters start failed\n", __FUNCTION__)); + } else if (dhd_start_event_ecounters(dhd) != BCME_OK) { + DHD_ERROR(("%s Event_Ecounters start failed\n", __FUNCTION__)); + } + + } + + /* store the preserve log set numbers */ + if (dhd_get_preserve_log_numbers(dhd, &dhd->logset_prsrv_mask) + != BCME_OK) { + DHD_ERROR(("%s: Failed to get preserve log # !\n", __FUNCTION__)); + } + +#ifdef WL_MONITOR + if (FW_SUPPORTED(dhd, monitor)) { + dhd->monitor_enable = TRUE; + DHD_ERROR(("%s: Monitor mode is enabled in FW cap\n", __FUNCTION__)); + } else { + dhd->monitor_enable = FALSE; + DHD_ERROR(("%s: Monitor mode is not enabled in FW cap\n", __FUNCTION__)); + } +#endif /* WL_MONITOR */ + + dhd_conf_postinit_ioctls(dhd); +done: + + if (eventmask_msg) { + MFREE(dhd->osh, eventmask_msg, msglen); + eventmask_msg = NULL; + } + if (iov_buf) { + MFREE(dhd->osh, iov_buf, WLC_IOCTL_SMLEN); + iov_buf = NULL; + } +#if defined(DHD_8021X_DUMP) && defined(SHOW_LOGTRACE) + if (el_tag) { + MFREE(dhd->osh, el_tag, sizeof(wl_el_tag_params_t)); + el_tag = NULL; + } +#endif /* DHD_8021X_DUMP */ + return ret; +} + +int +dhd_iovar(dhd_pub_t *pub, int ifidx, char *name, char *param_buf, uint param_len, char *res_buf, + uint res_len, int set) +{ + char *buf = NULL; + int input_len; + wl_ioctl_t ioc; + int ret; + + if (res_len > WLC_IOCTL_MAXLEN || param_len > WLC_IOCTL_MAXLEN) + return BCME_BADARG; + + input_len = strlen(name) + 1 + param_len; + if (input_len > WLC_IOCTL_MAXLEN) + return BCME_BADARG; + + buf = NULL; + if (set) { + if (res_buf || res_len != 0) { + DHD_ERROR(("%s: SET wrong arguemnet\n", __FUNCTION__)); + ret = BCME_BADARG; + goto exit; + } + buf = MALLOCZ(pub->osh, input_len); + if (!buf) { + DHD_ERROR(("%s: mem alloc failed\n", __FUNCTION__)); + ret = BCME_NOMEM; + goto exit; + } + ret = bcm_mkiovar(name, param_buf, param_len, buf, input_len); + if (!ret) { + ret = BCME_NOMEM; + goto exit; + } + + ioc.cmd = WLC_SET_VAR; + ioc.buf = buf; + ioc.len = input_len; + ioc.set = set; + + ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len); + } else { + if (!res_buf || !res_len) { + DHD_ERROR(("%s: GET failed. resp_buf NULL or length 0.\n", __FUNCTION__)); + ret = BCME_BADARG; + goto exit; + } + + if (res_len < input_len) { + DHD_INFO(("%s: res_len(%d) < input_len(%d)\n", __FUNCTION__, + res_len, input_len)); + buf = MALLOCZ(pub->osh, input_len); + if (!buf) { + DHD_ERROR(("%s: mem alloc failed\n", __FUNCTION__)); + ret = BCME_NOMEM; + goto exit; + } + ret = bcm_mkiovar(name, param_buf, param_len, buf, input_len); + if (!ret) { + ret = BCME_NOMEM; + goto exit; + } + + ioc.cmd = WLC_GET_VAR; + ioc.buf = buf; + ioc.len = input_len; + ioc.set = set; + + ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len); + + if (ret == BCME_OK) { + memcpy(res_buf, buf, res_len); + } + } else { + memset(res_buf, 0, res_len); + ret = bcm_mkiovar(name, param_buf, param_len, res_buf, res_len); + if (!ret) { + ret = BCME_NOMEM; + goto exit; + } + + ioc.cmd = WLC_GET_VAR; + ioc.buf = res_buf; + ioc.len = res_len; + ioc.set = set; + + ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len); + } + } +exit: + if (buf) { + MFREE(pub->osh, buf, input_len); + buf = NULL; + } + return ret; +} + +int +dhd_getiovar(dhd_pub_t *pub, int ifidx, char *name, char *cmd_buf, + uint cmd_len, char **resptr, uint resp_len) +{ + int len = resp_len; + int ret; + char *buf = *resptr; + wl_ioctl_t ioc; + if (resp_len > WLC_IOCTL_MAXLEN) + return BCME_BADARG; + + memset(buf, 0, resp_len); + + ret = bcm_mkiovar(name, cmd_buf, cmd_len, buf, len); + if (ret == 0) { + return BCME_BUFTOOSHORT; + } + + memset(&ioc, 0, sizeof(ioc)); + + ioc.cmd = WLC_GET_VAR; + ioc.buf = buf; + ioc.len = len; + ioc.set = 0; + + ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len); + + return ret; +} + +int dhd_change_mtu(dhd_pub_t *dhdp, int new_mtu, int ifidx) +{ + struct dhd_info *dhd = dhdp->info; + struct net_device *dev = NULL; + + ASSERT(dhd && dhd->iflist[ifidx]); + dev = dhd->iflist[ifidx]->net; + ASSERT(dev); + + if (netif_running(dev)) { + DHD_ERROR(("%s: Must be down to change its MTU\n", dev->name)); + return BCME_NOTDOWN; + } + +#define DHD_MIN_MTU 1500 +#define DHD_MAX_MTU 1752 + + if ((new_mtu < DHD_MIN_MTU) || (new_mtu > DHD_MAX_MTU)) { + DHD_ERROR(("%s: MTU size %d is invalid.\n", __FUNCTION__, new_mtu)); + return BCME_BADARG; + } + + dev->mtu = new_mtu; + return 0; +} + +#ifdef ARP_OFFLOAD_SUPPORT +/* add or remove AOE host ip(s) (up to 8 IPs on the interface) */ +void +aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add, int idx) +{ + u32 ipv4_buf[MAX_IPV4_ENTRIES]; /* temp save for AOE host_ip table */ + int i; + int ret; + + bzero(ipv4_buf, sizeof(ipv4_buf)); + + /* display what we've got */ + ret = dhd_arp_get_arp_hostip_table(dhd_pub, ipv4_buf, sizeof(ipv4_buf), idx); + DHD_ARPOE(("%s: hostip table read from Dongle:\n", __FUNCTION__)); +#ifdef AOE_DBG + dhd_print_buf(ipv4_buf, 32, 4); /* max 8 IPs 4b each */ +#endif // endif + /* now we saved hoste_ip table, clr it in the dongle AOE */ + dhd_aoe_hostip_clr(dhd_pub, idx); + + if (ret) { + DHD_ERROR(("%s failed\n", __FUNCTION__)); + return; + } + + for (i = 0; i < MAX_IPV4_ENTRIES; i++) { + if (add && (ipv4_buf[i] == 0)) { + ipv4_buf[i] = ipa; + add = FALSE; /* added ipa to local table */ + DHD_ARPOE(("%s: Saved new IP in temp arp_hostip[%d]\n", + __FUNCTION__, i)); + } else if (ipv4_buf[i] == ipa) { + ipv4_buf[i] = 0; + DHD_ARPOE(("%s: removed IP:%x from temp table %d\n", + __FUNCTION__, ipa, i)); + } + + if (ipv4_buf[i] != 0) { + /* add back host_ip entries from our local cache */ + dhd_arp_offload_add_ip(dhd_pub, ipv4_buf[i], idx); + DHD_ARPOE(("%s: added IP:%x to dongle arp_hostip[%d]\n\n", + __FUNCTION__, ipv4_buf[i], i)); + } + } +#ifdef AOE_DBG + /* see the resulting hostip table */ + dhd_arp_get_arp_hostip_table(dhd_pub, ipv4_buf, sizeof(ipv4_buf), idx); + DHD_ARPOE(("%s: read back arp_hostip table:\n", __FUNCTION__)); + dhd_print_buf(ipv4_buf, 32, 4); /* max 8 IPs 4b each */ +#endif // endif +} + +/* + * Notification mechanism from kernel to our driver. This function is called by the Linux kernel + * whenever there is an event related to an IP address. + * ptr : kernel provided pointer to IP address that has changed + */ +static int dhd_inetaddr_notifier_call(struct notifier_block *this, + unsigned long event, + void *ptr) +{ + struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; + + dhd_info_t *dhd; + dhd_pub_t *dhd_pub; + int idx; + + if (!dhd_arp_enable) + return NOTIFY_DONE; + if (!ifa || !(ifa->ifa_dev->dev)) + return NOTIFY_DONE; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) + /* Filter notifications meant for non Broadcom devices */ + if ((ifa->ifa_dev->dev->netdev_ops != &dhd_ops_pri) && + (ifa->ifa_dev->dev->netdev_ops != &dhd_ops_virt)) { +#if defined(WL_ENABLE_P2P_IF) + if (!wl_cfgp2p_is_ifops(ifa->ifa_dev->dev->netdev_ops)) +#endif /* WL_ENABLE_P2P_IF */ + return NOTIFY_DONE; + } +#endif /* LINUX_VERSION_CODE */ + + dhd = DHD_DEV_INFO(ifa->ifa_dev->dev); + if (!dhd) + return NOTIFY_DONE; + + dhd_pub = &dhd->pub; + + if (dhd_pub->arp_version == 1) { + idx = 0; + } else { + for (idx = 0; idx < DHD_MAX_IFS; idx++) { + if (dhd->iflist[idx] && dhd->iflist[idx]->net == ifa->ifa_dev->dev) + break; + } + if (idx < DHD_MAX_IFS) + DHD_TRACE(("ifidx : %p %s %d\n", dhd->iflist[idx]->net, + dhd->iflist[idx]->name, dhd->iflist[idx]->idx)); + else { + DHD_ERROR(("Cannot find ifidx for(%s) set to 0\n", ifa->ifa_label)); + idx = 0; + } + } + + switch (event) { + case NETDEV_UP: + DHD_ARPOE(("%s: [%s] Up IP: 0x%x\n", + __FUNCTION__, ifa->ifa_label, ifa->ifa_address)); + + /* + * Skip if Bus is not in a state to transport the IOVAR + * (or) the Dongle is not ready. + */ + if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(&dhd->pub) || + dhd->pub.busstate == DHD_BUS_LOAD) { + DHD_ERROR(("%s: bus not ready, exit NETDEV_UP : %d\n", + __FUNCTION__, dhd->pub.busstate)); + if (dhd->pend_ipaddr) { + DHD_ERROR(("%s: overwrite pending ipaddr: 0x%x\n", + __FUNCTION__, dhd->pend_ipaddr)); + } + dhd->pend_ipaddr = ifa->ifa_address; + break; + } + +#ifdef AOE_IP_ALIAS_SUPPORT + DHD_ARPOE(("%s:add aliased IP to AOE hostip cache\n", + __FUNCTION__)); + aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, TRUE, idx); +#endif /* AOE_IP_ALIAS_SUPPORT */ + break; + + case NETDEV_DOWN: + DHD_ARPOE(("%s: [%s] Down IP: 0x%x\n", + __FUNCTION__, ifa->ifa_label, ifa->ifa_address)); + dhd->pend_ipaddr = 0; +#ifdef AOE_IP_ALIAS_SUPPORT + DHD_ARPOE(("%s:interface is down, AOE clr all for this if\n", + __FUNCTION__)); + if ((dhd_pub->op_mode & DHD_FLAG_HOSTAP_MODE) || + (ifa->ifa_dev->dev != dhd_linux_get_primary_netdev(dhd_pub))) { + aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, FALSE, idx); + } else +#endif /* AOE_IP_ALIAS_SUPPORT */ + { + dhd_aoe_hostip_clr(&dhd->pub, idx); + dhd_aoe_arp_clr(&dhd->pub, idx); + } + break; + + default: + DHD_ARPOE(("%s: do noting for [%s] Event: %lu\n", + __func__, ifa->ifa_label, event)); + break; + } + return NOTIFY_DONE; +} +#endif /* ARP_OFFLOAD_SUPPORT */ + +#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT) +/* Neighbor Discovery Offload: defered handler */ +static void +dhd_inet6_work_handler(void *dhd_info, void *event_data, u8 event) +{ + struct ipv6_work_info_t *ndo_work = (struct ipv6_work_info_t *)event_data; + dhd_info_t *dhd = (dhd_info_t *)dhd_info; + dhd_pub_t *dhdp; + int ret; + + if (!dhd) { + DHD_ERROR(("%s: invalid dhd_info\n", __FUNCTION__)); + goto done; + } + dhdp = &dhd->pub; + + if (event != DHD_WQ_WORK_IPV6_NDO) { + DHD_ERROR(("%s: unexpected event\n", __FUNCTION__)); + goto done; + } + + if (!ndo_work) { + DHD_ERROR(("%s: ipv6 work info is not initialized\n", __FUNCTION__)); + return; + } + + switch (ndo_work->event) { + case NETDEV_UP: +#ifndef NDO_CONFIG_SUPPORT + DHD_TRACE(("%s: Enable NDO \n ", __FUNCTION__)); + ret = dhd_ndo_enable(dhdp, TRUE); + if (ret < 0) { + DHD_ERROR(("%s: Enabling NDO Failed %d\n", __FUNCTION__, ret)); + } +#endif /* !NDO_CONFIG_SUPPORT */ + DHD_TRACE(("%s: Add a host ip for NDO\n", __FUNCTION__)); + if (dhdp->ndo_version > 0) { + /* inet6 addr notifier called only for unicast address */ + ret = dhd_ndo_add_ip_with_type(dhdp, &ndo_work->ipv6_addr[0], + WL_ND_IPV6_ADDR_TYPE_UNICAST, ndo_work->if_idx); + } else { + ret = dhd_ndo_add_ip(dhdp, &ndo_work->ipv6_addr[0], + ndo_work->if_idx); + } + if (ret < 0) { + DHD_ERROR(("%s: Adding a host ip for NDO failed %d\n", + __FUNCTION__, ret)); + } + break; + case NETDEV_DOWN: + if (dhdp->ndo_version > 0) { + DHD_TRACE(("%s: Remove a host ip for NDO\n", __FUNCTION__)); + ret = dhd_ndo_remove_ip_by_addr(dhdp, + &ndo_work->ipv6_addr[0], ndo_work->if_idx); + } else { + DHD_TRACE(("%s: Clear host ip table for NDO \n", __FUNCTION__)); + ret = dhd_ndo_remove_ip(dhdp, ndo_work->if_idx); + } + if (ret < 0) { + DHD_ERROR(("%s: Removing host ip for NDO failed %d\n", + __FUNCTION__, ret)); + goto done; + } +#ifdef NDO_CONFIG_SUPPORT + if (dhdp->ndo_host_ip_overflow) { + ret = dhd_dev_ndo_update_inet6addr( + dhd_idx2net(dhdp, ndo_work->if_idx)); + if ((ret < 0) && (ret != BCME_NORESOURCE)) { + DHD_ERROR(("%s: Updating host ip for NDO failed %d\n", + __FUNCTION__, ret)); + goto done; + } + } +#else /* !NDO_CONFIG_SUPPORT */ + DHD_TRACE(("%s: Disable NDO\n ", __FUNCTION__)); + ret = dhd_ndo_enable(dhdp, FALSE); + if (ret < 0) { + DHD_ERROR(("%s: disabling NDO Failed %d\n", __FUNCTION__, ret)); + goto done; + } +#endif /* NDO_CONFIG_SUPPORT */ + break; + + default: + DHD_ERROR(("%s: unknown notifier event \n", __FUNCTION__)); + break; + } +done: + + /* free ndo_work. alloced while scheduling the work */ + if (ndo_work) { + kfree(ndo_work); + } + + return; +} /* dhd_init_logstrs_array */ + +/* + * Neighbor Discovery Offload: Called when an interface + * is assigned with ipv6 address. + * Handles only primary interface + */ +int dhd_inet6addr_notifier_call(struct notifier_block *this, unsigned long event, void *ptr) +{ + dhd_info_t *dhd; + dhd_pub_t *dhdp; + struct inet6_ifaddr *inet6_ifa = ptr; + struct ipv6_work_info_t *ndo_info; + int idx; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) + /* Filter notifications meant for non Broadcom devices */ + if (inet6_ifa->idev->dev->netdev_ops != &dhd_ops_pri) { + return NOTIFY_DONE; + } +#endif /* LINUX_VERSION_CODE */ + + dhd = DHD_DEV_INFO(inet6_ifa->idev->dev); + if (!dhd) { + return NOTIFY_DONE; + } + dhdp = &dhd->pub; + + /* Supports only primary interface */ + idx = dhd_net2idx(dhd, inet6_ifa->idev->dev); + if (idx != 0) { + return NOTIFY_DONE; + } + + /* FW capability */ + if (!FW_SUPPORTED(dhdp, ndoe)) { + return NOTIFY_DONE; + } + + ndo_info = (struct ipv6_work_info_t *)kzalloc(sizeof(struct ipv6_work_info_t), GFP_ATOMIC); + if (!ndo_info) { + DHD_ERROR(("%s: ipv6 work alloc failed\n", __FUNCTION__)); + return NOTIFY_DONE; + } + + /* fill up ndo_info */ + ndo_info->event = event; + ndo_info->if_idx = idx; + memcpy(ndo_info->ipv6_addr, &inet6_ifa->addr, IPV6_ADDR_LEN); + + /* defer the work to thread as it may block kernel */ + dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)ndo_info, DHD_WQ_WORK_IPV6_NDO, + dhd_inet6_work_handler, DHD_WQ_WORK_PRIORITY_LOW); + return NOTIFY_DONE; +} +#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */ + +int +dhd_register_if(dhd_pub_t *dhdp, int ifidx, bool need_rtnl_lock) +{ + dhd_info_t *dhd = (dhd_info_t *)dhdp->info; + dhd_if_t *ifp; + struct net_device *net = NULL; + int err = 0; + uint8 temp_addr[ETHER_ADDR_LEN] = { 0x00, 0x90, 0x4c, 0x11, 0x22, 0x33 }; + + DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx)); + + if (dhd == NULL || dhd->iflist[ifidx] == NULL) { + DHD_ERROR(("%s: Invalid Interface\n", __FUNCTION__)); + return BCME_ERROR; + } + + ASSERT(dhd && dhd->iflist[ifidx]); + ifp = dhd->iflist[ifidx]; + net = ifp->net; + ASSERT(net && (ifp->idx == ifidx)); + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)) + ASSERT(!net->open); + net->get_stats = dhd_get_stats; +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM + net->do_ioctl = dhd_ioctl_entry_wrapper; + net->hard_start_xmit = dhd_start_xmit_wrapper; +#else + net->do_ioctl = dhd_ioctl_entry; + net->hard_start_xmit = dhd_start_xmit; +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + + net->set_mac_address = dhd_set_mac_address; + net->set_multicast_list = dhd_set_multicast_list; + net->open = net->stop = NULL; +#else + ASSERT(!net->netdev_ops); + net->netdev_ops = &dhd_ops_virt; +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */ + + /* Ok, link into the network layer... */ + if (ifidx == 0) { + /* + * device functions for the primary interface only + */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)) + net->open = dhd_open; + net->stop = dhd_stop; +#else + net->netdev_ops = &dhd_ops_pri; +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */ + if (!ETHER_ISNULLADDR(dhd->pub.mac.octet)) + memcpy(temp_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN); + } else { + /* + * We have to use the primary MAC for virtual interfaces + */ + memcpy(temp_addr, ifp->mac_addr, ETHER_ADDR_LEN); + /* + * Android sets the locally administered bit to indicate that this is a + * portable hotspot. This will not work in simultaneous AP/STA mode, + * nor with P2P. Need to set the Donlge's MAC address, and then use that. + */ + if (!memcmp(temp_addr, dhd->iflist[0]->mac_addr, + ETHER_ADDR_LEN)) { + DHD_ERROR(("%s interface [%s]: set locally administered bit in MAC\n", + __func__, net->name)); + temp_addr[0] |= 0x02; + } + } + + net->hard_header_len = ETH_HLEN + dhd->pub.hdrlen; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) + net->ethtool_ops = &dhd_ethtool_ops; +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */ + +#if defined(WL_WIRELESS_EXT) +#if WIRELESS_EXT < 19 + net->get_wireless_stats = dhd_get_wireless_stats; +#endif /* WIRELESS_EXT < 19 */ +#if WIRELESS_EXT > 12 + net->wireless_handlers = &wl_iw_handler_def; +#endif /* WIRELESS_EXT > 12 */ +#endif /* defined(WL_WIRELESS_EXT) */ + + dhd->pub.rxsz = DBUS_RX_BUFFER_SIZE_DHD(net); + +#ifdef WLMESH + if (ifidx >= 2 && dhdp->conf->fw_type == FW_TYPE_MESH) { + temp_addr[4] ^= 0x80; + temp_addr[4] += ifidx; + temp_addr[5] += ifidx; + } +#endif + memcpy(net->dev_addr, temp_addr, ETHER_ADDR_LEN); + + if (ifidx == 0) + printf("%s\n", dhd_version); +#ifdef WL_EXT_IAPSTA + else + wl_ext_iapsta_attach_netdev(net, ifidx, ifp->bssidx); +#endif + if (ifidx != 0) { + if (_dhd_set_mac_address(dhd, ifidx, net->dev_addr) == 0) + DHD_INFO(("%s: MACID is overwritten\n", __FUNCTION__)); + else + DHD_ERROR(("%s: _dhd_set_mac_address() failed\n", __FUNCTION__)); + } + + if (need_rtnl_lock) + err = register_netdev(net); + else + err = register_netdevice(net); + + if (err != 0) { + DHD_ERROR(("couldn't register the net device [%s], err %d\n", net->name, err)); + goto fail; + } +#ifdef WL_EXT_IAPSTA + if (ifidx == 0) + wl_ext_iapsta_attach_netdev(net, ifidx, ifp->bssidx); + wl_ext_iapsta_attach_name(net, ifidx); +#endif + + printf("Register interface [%s] MAC: "MACDBG"\n\n", net->name, + MAC2STRDBG(net->dev_addr)); + +#if defined(SOFTAP) && defined(WL_WIRELESS_EXT) && !defined(WL_CFG80211) +// wl_iw_iscan_set_scan_broadcast_prep(net, 1); +#endif // endif + +#if (defined(BCMPCIE) || (defined(BCMLXSDMMC) && (LINUX_VERSION_CODE >= \ + KERNEL_VERSION(2, 6, 27))) || defined(BCMDBUS)) + if (ifidx == 0) { +#if defined(BCMLXSDMMC) && !defined(DHD_PRELOAD) + up(&dhd_registration_sem); +#endif /* BCMLXSDMMC */ + if (!dhd_download_fw_on_driverload) { +#ifdef WL_CFG80211 + wl_terminate_event_handler(net); +#endif /* WL_CFG80211 */ +#if defined(DHD_LB_RXP) + __skb_queue_purge(&dhd->rx_pend_queue); +#endif /* DHD_LB_RXP */ + +#if defined(DHD_LB_TXP) + skb_queue_purge(&dhd->tx_pend_queue); +#endif /* DHD_LB_TXP */ + +#ifdef SHOW_LOGTRACE + /* Release the skbs from queue for WLC_E_TRACE event */ + dhd_event_logtrace_flush_queue(dhdp); +#endif /* SHOW_LOGTRACE */ + +#if defined(BCMPCIE) && defined(DHDTCPACK_SUPPRESS) + dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_OFF); +#endif /* BCMPCIE && DHDTCPACK_SUPPRESS */ + dhd_net_bus_devreset(net, TRUE); +#ifdef BCMLXSDMMC + dhd_net_bus_suspend(net); +#endif /* BCMLXSDMMC */ + wifi_platform_set_power(dhdp->info->adapter, FALSE, WIFI_TURNOFF_DELAY); +#if defined(BT_OVER_SDIO) + dhd->bus_user_count--; +#endif /* BT_OVER_SDIO */ + } + } +#endif /* OEM_ANDROID && (BCMPCIE || (BCMLXSDMMC && KERNEL_VERSION >= 2.6.27)) */ + return 0; + +fail: +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) + net->open = NULL; +#else + net->netdev_ops = NULL; +#endif // endif + return err; +} + +void +dhd_bus_detach(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (dhdp) { + dhd = (dhd_info_t *)dhdp->info; + if (dhd) { + + /* + * In case of Android cfg80211 driver, the bus is down in dhd_stop, + * calling stop again will cuase SD read/write errors. + */ + if (dhd->pub.busstate != DHD_BUS_DOWN && dhd_download_fw_on_driverload) { + /* Stop the protocol module */ + dhd_prot_stop(&dhd->pub); + + /* Stop the bus module */ +#ifdef BCMDBUS + /* Force Dongle terminated */ + if (dhd_wl_ioctl_cmd(dhdp, WLC_TERMINATED, NULL, 0, TRUE, 0) < 0) + DHD_ERROR(("%s Setting WLC_TERMINATED failed\n", + __FUNCTION__)); + dbus_stop(dhd->pub.bus); + dhd->pub.busstate = DHD_BUS_DOWN; +#else + dhd_bus_stop(dhd->pub.bus, TRUE); +#endif /* BCMDBUS */ + } + +#if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) || defined(BCMPCIE_OOB_HOST_WAKE) + dhd_bus_oob_intr_unregister(dhdp); +#endif /* OOB_INTR_ONLY || BCMSPI_ANDROID || BCMPCIE_OOB_HOST_WAKE */ + } + } +} + +void dhd_detach(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd; + unsigned long flags; + int timer_valid = FALSE; + struct net_device *dev; +#ifdef WL_CFG80211 + struct bcm_cfg80211 *cfg = NULL; +#endif // endif + if (!dhdp) + return; + + dhd = (dhd_info_t *)dhdp->info; + if (!dhd) + return; + + dev = dhd->iflist[0]->net; + + if (dev) { + rtnl_lock(); + if (dev->flags & IFF_UP) { + /* If IFF_UP is still up, it indicates that + * "ifconfig wlan0 down" hasn't been called. + * So invoke dev_close explicitly here to + * bring down the interface. + */ + DHD_TRACE(("IFF_UP flag is up. Enforcing dev_close from detach \n")); + dev_close(dev); + } + rtnl_unlock(); + } + + DHD_TRACE(("%s: Enter state 0x%x\n", __FUNCTION__, dhd->dhd_state)); + + dhd->pub.up = 0; + if (!(dhd->dhd_state & DHD_ATTACH_STATE_DONE)) { + /* Give sufficient time for threads to start running in case + * dhd_attach() has failed + */ + OSL_SLEEP(100); + } +#ifdef DHD_WET + dhd_free_wet_info(&dhd->pub, dhd->pub.wet_info); +#endif /* DHD_WET */ +#if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) +#endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */ + +#ifdef PROP_TXSTATUS +#ifdef DHD_WLFC_THREAD + if (dhd->pub.wlfc_thread) { + kthread_stop(dhd->pub.wlfc_thread); + dhdp->wlfc_thread_go = TRUE; + wake_up_interruptible(&dhdp->wlfc_wqhead); + } + dhd->pub.wlfc_thread = NULL; +#endif /* DHD_WLFC_THREAD */ +#endif /* PROP_TXSTATUS */ + +#ifdef WL_CFG80211 + if (dev) + wl_cfg80211_down(dev); +#endif /* WL_CFG80211 */ + + if (dhd->dhd_state & DHD_ATTACH_STATE_PROT_ATTACH) { + + dhd_bus_detach(dhdp); +#ifdef BCMPCIE + if (is_reboot == SYS_RESTART) { + extern bcmdhd_wifi_platdata_t *dhd_wifi_platdata; + if (dhd_wifi_platdata && !dhdp->dongle_reset) { + dhdpcie_bus_clock_stop(dhdp->bus); + wifi_platform_set_power(dhd_wifi_platdata->adapters, + FALSE, WIFI_TURNOFF_DELAY); + } + } +#endif /* BCMPCIE */ +#ifndef PCIE_FULL_DONGLE + if (dhdp->prot) + dhd_prot_detach(dhdp); +#endif /* !PCIE_FULL_DONGLE */ + } + +#ifdef ARP_OFFLOAD_SUPPORT + if (dhd_inetaddr_notifier_registered) { + dhd_inetaddr_notifier_registered = FALSE; + unregister_inetaddr_notifier(&dhd_inetaddr_notifier); + } +#endif /* ARP_OFFLOAD_SUPPORT */ +#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT) + if (dhd_inet6addr_notifier_registered) { + dhd_inet6addr_notifier_registered = FALSE; + unregister_inet6addr_notifier(&dhd_inet6addr_notifier); + } +#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */ +#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) + if (dhd->dhd_state & DHD_ATTACH_STATE_EARLYSUSPEND_DONE) { + if (dhd->early_suspend.suspend) + unregister_early_suspend(&dhd->early_suspend); + } +#endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */ + +#if defined(WL_WIRELESS_EXT) +#ifdef WL_ESCAN + wl_escan_detach(dhdp); +#else + if (dhd->dhd_state & DHD_ATTACH_STATE_WL_ATTACH) { + /* Detatch and unlink in the iw */ + wl_iw_detach(dhdp); + } +#endif /* WL_ESCAN */ +#endif /* defined(WL_WIRELESS_EXT) */ +#ifdef WL_EXT_IAPSTA + wl_ext_iapsta_dettach(dhdp); +#endif + +#ifdef DHD_ULP + dhd_ulp_deinit(dhd->pub.osh, dhdp); +#endif /* DHD_ULP */ + + /* delete all interfaces, start with virtual */ + if (dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) { + int i = 1; + dhd_if_t *ifp; + + /* Cleanup virtual interfaces */ + dhd_net_if_lock_local(dhd); + for (i = 1; i < DHD_MAX_IFS; i++) { + if (dhd->iflist[i]) { + dhd_remove_if(&dhd->pub, i, TRUE); + } + } + dhd_net_if_unlock_local(dhd); + + /* delete primary interface 0 */ + ifp = dhd->iflist[0]; + if (ifp && ifp->net) { + +#ifdef WL_CFG80211 + cfg = wl_get_cfg(ifp->net); +#endif // endif + /* in unregister_netdev case, the interface gets freed by net->destructor + * (which is set to free_netdev) + */ + if (ifp->net->reg_state == NETREG_UNINITIALIZED) { + free_netdev(ifp->net); + } else { +#if defined(ARGOS_NOTIFY_CB) + argos_register_notifier_deinit(); +#endif // endif +#ifdef SET_RPS_CPUS + custom_rps_map_clear(ifp->net->_rx); +#endif /* SET_RPS_CPUS */ + netif_tx_disable(ifp->net); + unregister_netdev(ifp->net); + } +#ifdef PCIE_FULL_DONGLE + ifp->net = DHD_NET_DEV_NULL; +#else + ifp->net = NULL; +#endif /* PCIE_FULL_DONGLE */ + +#ifdef DHD_L2_FILTER + bcm_l2_filter_arp_table_update(dhdp->osh, ifp->phnd_arp_table, TRUE, + NULL, FALSE, dhdp->tickcnt); + deinit_l2_filter_arp_table(dhdp->osh, ifp->phnd_arp_table); + ifp->phnd_arp_table = NULL; +#endif /* DHD_L2_FILTER */ + + dhd_if_del_sta_list(ifp); + + MFREE(dhd->pub.osh, ifp, sizeof(*ifp)); + dhd->iflist[0] = NULL; + } + } + + /* Clear the watchdog timer */ + DHD_GENERAL_LOCK(&dhd->pub, flags); + timer_valid = dhd->wd_timer_valid; + dhd->wd_timer_valid = FALSE; + DHD_GENERAL_UNLOCK(&dhd->pub, flags); + if (timer_valid) + del_timer_sync(&dhd->timer); + DHD_DISABLE_RUNTIME_PM(&dhd->pub); + +#ifdef BCMDBUS + tasklet_kill(&dhd->tasklet); +#else + if (dhd->dhd_state & DHD_ATTACH_STATE_THREADS_CREATED) { + if (dhd->thr_wdt_ctl.thr_pid >= 0) { + PROC_STOP(&dhd->thr_wdt_ctl); + } + + if (dhd->rxthread_enabled && dhd->thr_rxf_ctl.thr_pid >= 0) { + PROC_STOP(&dhd->thr_rxf_ctl); + } + + if (dhd->thr_dpc_ctl.thr_pid >= 0) { + PROC_STOP(&dhd->thr_dpc_ctl); + } else + { + tasklet_kill(&dhd->tasklet); + } + } +#endif /* BCMDBUS */ + +#ifdef WL_NATOE + if (dhd->pub.nfct) { + dhd_ct_close(dhd->pub.nfct); + } +#endif /* WL_NATOE */ + +#ifdef DHD_LB + if (dhd->dhd_state & DHD_ATTACH_STATE_LB_ATTACH_DONE) { + /* Clear the flag first to avoid calling the cpu notifier */ + dhd->dhd_state &= ~DHD_ATTACH_STATE_LB_ATTACH_DONE; + + /* Kill the Load Balancing Tasklets */ +#ifdef DHD_LB_RXP + cancel_work_sync(&dhd->rx_napi_dispatcher_work); + __skb_queue_purge(&dhd->rx_pend_queue); +#endif /* DHD_LB_RXP */ +#ifdef DHD_LB_TXP + cancel_work_sync(&dhd->tx_dispatcher_work); + tasklet_kill(&dhd->tx_tasklet); + __skb_queue_purge(&dhd->tx_pend_queue); +#endif /* DHD_LB_TXP */ +#ifdef DHD_LB_TXC + cancel_work_sync(&dhd->tx_compl_dispatcher_work); + tasklet_kill(&dhd->tx_compl_tasklet); +#endif /* DHD_LB_TXC */ +#ifdef DHD_LB_RXC + tasklet_kill(&dhd->rx_compl_tasklet); +#endif /* DHD_LB_RXC */ + + /* Unregister from CPU Hotplug framework */ + dhd_unregister_cpuhp_callback(dhd); + + dhd_cpumasks_deinit(dhd); + DHD_LB_STATS_DEINIT(&dhd->pub); + } +#endif /* DHD_LB */ + + DHD_SSSR_MEMPOOL_DEINIT(&dhd->pub); + +#ifdef WL_CFG80211 + if (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211) { + if (!cfg) { + DHD_ERROR(("cfg NULL!\n")); + ASSERT(0); + } else { + wl_cfg80211_detach(cfg); + dhd_monitor_uninit(); + } + } +#endif // endif + +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM + destroy_workqueue(dhd->tx_wq); + dhd->tx_wq = NULL; + destroy_workqueue(dhd->rx_wq); + dhd->rx_wq = NULL; +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ +#ifdef DEBUGABILITY + if (dhdp->dbg) { +#ifdef DBG_PKT_MON + dhd_os_dbg_detach_pkt_monitor(dhdp); + dhd_os_spin_lock_deinit(dhd->pub.osh, dhd->pub.dbg->pkt_mon_lock); +#endif /* DBG_PKT_MON */ + dhd_os_dbg_detach(dhdp); + } +#endif /* DEBUGABILITY */ +#ifdef SHOW_LOGTRACE + /* Release the skbs from queue for WLC_E_TRACE event */ + dhd_event_logtrace_flush_queue(dhdp); + + /* Wait till event logtrace context finishes */ + dhd_cancel_logtrace_process_sync(dhd); + mutex_lock(&dhd->pub.dhd_trace_lock); + remove_proc_entry("dhd_trace", NULL); + mutex_unlock(&dhd->pub.dhd_trace_lock); + + if (dhd->dhd_state & DHD_ATTACH_LOGTRACE_INIT) { + if (dhd->event_data.fmts) { + MFREE(dhd->pub.osh, dhd->event_data.fmts, + dhd->event_data.fmts_size); + dhd->event_data.fmts = NULL; + } + if (dhd->event_data.raw_fmts) { + MFREE(dhd->pub.osh, dhd->event_data.raw_fmts, + dhd->event_data.raw_fmts_size); + dhd->event_data.raw_fmts = NULL; + } + if (dhd->event_data.raw_sstr) { + MFREE(dhd->pub.osh, dhd->event_data.raw_sstr, + dhd->event_data.raw_sstr_size); + dhd->event_data.raw_sstr = NULL; + } + if (dhd->event_data.rom_raw_sstr) { + MFREE(dhd->pub.osh, dhd->event_data.rom_raw_sstr, + dhd->event_data.rom_raw_sstr_size); + dhd->event_data.rom_raw_sstr = NULL; + } + dhd->dhd_state &= ~DHD_ATTACH_LOGTRACE_INIT; + } +#endif /* SHOW_LOGTRACE */ +#ifdef PNO_SUPPORT + if (dhdp->pno_state) + dhd_pno_deinit(dhdp); +#endif // endif +#ifdef RTT_SUPPORT + if (dhdp->rtt_state) { + dhd_rtt_deinit(dhdp); + } +#endif // endif +#if defined(CONFIG_PM_SLEEP) + if (dhd_pm_notifier_registered) { + unregister_pm_notifier(&dhd->pm_notifier); + dhd_pm_notifier_registered = FALSE; + } +#endif /* CONFIG_PM_SLEEP */ + +#ifdef DEBUG_CPU_FREQ + if (dhd->new_freq) + free_percpu(dhd->new_freq); + dhd->new_freq = NULL; + cpufreq_unregister_notifier(&dhd->freq_trans, CPUFREQ_TRANSITION_NOTIFIER); +#endif // endif + DHD_TRACE(("wd wakelock count:%d\n", dhd->wakelock_wd_counter)); +#ifdef CONFIG_HAS_WAKELOCK + dhd->wakelock_wd_counter = 0; + wake_lock_destroy(&dhd->wl_wdwake); + // terence 20161023: can not destroy wl_wifi when wlan down, it will happen null pointer in dhd_ioctl_entry + wake_lock_destroy(&dhd->wl_wifi); +#endif /* CONFIG_HAS_WAKELOCK */ + if (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) { + DHD_OS_WAKE_LOCK_DESTROY(dhd); + } + +#ifdef DHDTCPACK_SUPPRESS + /* This will free all MEM allocated for TCPACK SUPPRESS */ + dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF); +#endif /* DHDTCPACK_SUPPRESS */ + +#ifdef PCIE_FULL_DONGLE + dhd_flow_rings_deinit(dhdp); + if (dhdp->prot) + dhd_prot_detach(dhdp); +#endif // endif + +#if defined(WLTDLS) && defined(PCIE_FULL_DONGLE) + dhd_free_tdls_peer_list(dhdp); +#endif // endif + +#ifdef DUMP_IOCTL_IOV_LIST + dhd_iov_li_delete(dhdp, &(dhdp->dump_iovlist_head)); +#endif /* DUMP_IOCTL_IOV_LIST */ +#ifdef DHD_DEBUG + /* memory waste feature list initilization */ + dhd_mw_list_delete(dhdp, &(dhdp->mw_list_head)); +#endif /* DHD_DEBUG */ +#ifdef WL_MONITOR + dhd_del_monitor_if(dhd, NULL, DHD_WQ_WORK_IF_DEL); +#endif /* WL_MONITOR */ + +#ifdef DHD_ERPOM + if (dhdp->enable_erpom) { + dhdp->pom_func_deregister(&dhdp->pom_wlan_handler); + } +#endif /* DHD_ERPOM */ + + /* Prefer adding de-init code above this comment unless necessary. + * The idea is to cancel work queue, sysfs and flags at the end. + */ + dhd_deferred_work_deinit(dhd->dhd_deferred_wq); + dhd->dhd_deferred_wq = NULL; + + /* log dump related buffers should be freed after wq is purged */ +#ifdef DHD_LOG_DUMP + dhd_log_dump_deinit(&dhd->pub); +#endif /* DHD_LOG_DUMP */ +#if defined(BCMPCIE) + if (dhdp->extended_trap_data) + { + MFREE(dhdp->osh, dhdp->extended_trap_data, BCMPCIE_EXT_TRAP_DATA_MAXLEN); + dhdp->extended_trap_data = NULL; + } +#endif /* BCMPCIE */ + +#ifdef DHD_DUMP_MNGR + if (dhd->pub.dump_file_manage) { + MFREE(dhd->pub.osh, dhd->pub.dump_file_manage, + sizeof(dhd_dump_file_manage_t)); + } +#endif /* DHD_DUMP_MNGR */ + dhd_sysfs_exit(dhd); + dhd->pub.fw_download_done = FALSE; + +#if defined(BT_OVER_SDIO) + mutex_destroy(&dhd->bus_user_lock); +#endif /* BT_OVER_SDIO */ + dhd_conf_detach(dhdp); + +} /* dhd_detach */ + +void +dhd_free(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd; + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (dhdp) { + int i; + for (i = 0; i < ARRAYSIZE(dhdp->reorder_bufs); i++) { + if (dhdp->reorder_bufs[i]) { + reorder_info_t *ptr; + uint32 buf_size = sizeof(struct reorder_info); + + ptr = dhdp->reorder_bufs[i]; + + buf_size += ((ptr->max_idx + 1) * sizeof(void*)); + DHD_REORDER(("free flow id buf %d, maxidx is %d, buf_size %d\n", + i, ptr->max_idx, buf_size)); + + MFREE(dhdp->osh, dhdp->reorder_bufs[i], buf_size); + dhdp->reorder_bufs[i] = NULL; + } + } + + dhd_sta_pool_fini(dhdp, DHD_MAX_STA); + + dhd = (dhd_info_t *)dhdp->info; + if (dhdp->soc_ram) { +#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP) + DHD_OS_PREFREE(dhdp, dhdp->soc_ram, dhdp->soc_ram_length); +#else + MFREE(dhdp->osh, dhdp->soc_ram, dhdp->soc_ram_length); +#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */ + dhdp->soc_ram = NULL; + } + if (dhd != NULL) { + + /* If pointer is allocated by dhd_os_prealloc then avoid MFREE */ + if (dhd != (dhd_info_t *)dhd_os_prealloc(dhdp, + DHD_PREALLOC_DHD_INFO, 0, FALSE)) + MFREE(dhd->pub.osh, dhd, sizeof(*dhd)); + dhd = NULL; + } + } +} + +void +dhd_clear(dhd_pub_t *dhdp) +{ + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (dhdp) { + int i; +#ifdef DHDTCPACK_SUPPRESS + /* Clean up timer/data structure for any remaining/pending packet or timer. */ + dhd_tcpack_info_tbl_clean(dhdp); +#endif /* DHDTCPACK_SUPPRESS */ + for (i = 0; i < ARRAYSIZE(dhdp->reorder_bufs); i++) { + if (dhdp->reorder_bufs[i]) { + reorder_info_t *ptr; + uint32 buf_size = sizeof(struct reorder_info); + + ptr = dhdp->reorder_bufs[i]; + + buf_size += ((ptr->max_idx + 1) * sizeof(void*)); + DHD_REORDER(("free flow id buf %d, maxidx is %d, buf_size %d\n", + i, ptr->max_idx, buf_size)); + + MFREE(dhdp->osh, dhdp->reorder_bufs[i], buf_size); + dhdp->reorder_bufs[i] = NULL; + } + } + + dhd_sta_pool_clear(dhdp, DHD_MAX_STA); + + if (dhdp->soc_ram) { +#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP) + DHD_OS_PREFREE(dhdp, dhdp->soc_ram, dhdp->soc_ram_length); +#else + MFREE(dhdp->osh, dhdp->soc_ram, dhdp->soc_ram_length); +#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */ + dhdp->soc_ram = NULL; + } + } +} + +static void +dhd_module_cleanup(void) +{ + printf("%s: Enter\n", __FUNCTION__); + + dhd_bus_unregister(); + + wl_android_exit(); + + dhd_wifi_platform_unregister_drv(); + +#ifdef CUSTOMER_HW_AMLOGIC +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)) + wifi_teardown_dt(); +#endif +#endif + printf("%s: Exit\n", __FUNCTION__); +} + +static void __exit +dhd_module_exit(void) +{ + atomic_set(&exit_in_progress, 1); + dhd_module_cleanup(); + unregister_reboot_notifier(&dhd_reboot_notifier); + dhd_destroy_to_notifier_skt(); +} + +static int __init +dhd_module_init(void) +{ + int err; + int retry = POWERUP_MAX_RETRY; + + printf("%s: in %s\n", __FUNCTION__, dhd_version); +#ifdef CUSTOMER_HW_AMLOGIC +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)) + if (wifi_setup_dt()) { + printf("wifi_dt : fail to setup dt\n"); + } +#endif +#endif + + DHD_PERIM_RADIO_INIT(); + + if (firmware_path[0] != '\0') { + strncpy(fw_bak_path, firmware_path, MOD_PARAM_PATHLEN); + fw_bak_path[MOD_PARAM_PATHLEN-1] = '\0'; + } + + if (nvram_path[0] != '\0') { + strncpy(nv_bak_path, nvram_path, MOD_PARAM_PATHLEN); + nv_bak_path[MOD_PARAM_PATHLEN-1] = '\0'; + } + + do { + err = dhd_wifi_platform_register_drv(); + if (!err) { + register_reboot_notifier(&dhd_reboot_notifier); + break; + } else { + DHD_ERROR(("%s: Failed to load the driver, try cnt %d\n", + __FUNCTION__, retry)); + strncpy(firmware_path, fw_bak_path, MOD_PARAM_PATHLEN); + firmware_path[MOD_PARAM_PATHLEN-1] = '\0'; + strncpy(nvram_path, nv_bak_path, MOD_PARAM_PATHLEN); + nvram_path[MOD_PARAM_PATHLEN-1] = '\0'; + } + } while (retry--); + + dhd_create_to_notifier_skt(); + + if (err) { +#ifdef CUSTOMER_HW_AMLOGIC +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)) + wifi_teardown_dt(); +#endif +#endif + DHD_ERROR(("%s: Failed to load driver max retry reached**\n", __FUNCTION__)); + } else { + if (!dhd_download_fw_on_driverload) { + dhd_driver_init_done = TRUE; + } + } + + printf("%s: Exit err=%d\n", __FUNCTION__, err); + return err; +} + +static int +dhd_reboot_callback(struct notifier_block *this, unsigned long code, void *unused) +{ + DHD_TRACE(("%s: code = %ld\n", __FUNCTION__, code)); + if (code == SYS_RESTART) { +#ifdef BCMPCIE + is_reboot = code; +#endif /* BCMPCIE */ + } + return NOTIFY_DONE; +} + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) +#if defined(CONFIG_DEFERRED_INITCALLS) && !defined(EXYNOS_PCIE_MODULE_PATCH) +#if defined(CONFIG_MACH_UNIVERSAL7420) || defined(CONFIG_SOC_EXYNOS8890) || \ + defined(CONFIG_ARCH_MSM8996) || defined(CONFIG_ARCH_MSM8998) || \ + defined(CONFIG_SOC_EXYNOS8895) || defined(CONFIG_SOC_EXYNOS9810) || \ + defined(CONFIG_ARCH_SDM845) || defined(CONFIG_SOC_EXYNOS9820) || \ + defined(CONFIG_ARCH_SM8150) +deferred_module_init_sync(dhd_module_init); +#else +deferred_module_init(dhd_module_init); +#endif /* CONFIG_MACH_UNIVERSAL7420 || CONFIG_SOC_EXYNOS8890 || + * CONFIG_ARCH_MSM8996 || CONFIG_ARCH_MSM8998 || CONFIG_SOC_EXYNOS8895 + * CONFIG_SOC_EXYNOS9810 || CONFIG_ARCH_SDM845 || CONFIG_SOC_EXYNOS9820 + * CONFIG_ARCH_SM8150 + */ +#elif defined(USE_LATE_INITCALL_SYNC) +late_initcall_sync(dhd_module_init); +#else +late_initcall(dhd_module_init); +#endif /* USE_LATE_INITCALL_SYNC */ +#else +module_init(dhd_module_init); +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */ + +module_exit(dhd_module_exit); + +/* + * OS specific functions required to implement DHD driver in OS independent way + */ +int +dhd_os_proto_block(dhd_pub_t *pub) +{ + dhd_info_t * dhd = (dhd_info_t *)(pub->info); + + if (dhd) { + DHD_PERIM_UNLOCK(pub); + + down(&dhd->proto_sem); + + DHD_PERIM_LOCK(pub); + return 1; + } + + return 0; +} + +int +dhd_os_proto_unblock(dhd_pub_t *pub) +{ + dhd_info_t * dhd = (dhd_info_t *)(pub->info); + + if (dhd) { + up(&dhd->proto_sem); + return 1; + } + + return 0; +} + +void +dhd_os_dhdiovar_lock(dhd_pub_t *pub) +{ + dhd_info_t * dhd = (dhd_info_t *)(pub->info); + + if (dhd) { + mutex_lock(&dhd->dhd_iovar_mutex); + } +} + +void +dhd_os_dhdiovar_unlock(dhd_pub_t *pub) +{ + dhd_info_t * dhd = (dhd_info_t *)(pub->info); + + if (dhd) { + mutex_unlock(&dhd->dhd_iovar_mutex); + } +} + +void +dhd_os_logdump_lock(dhd_pub_t *pub) +{ + dhd_info_t *dhd = NULL; + + if (!pub) + return; + + dhd = (dhd_info_t *)(pub->info); + + if (dhd) { + mutex_lock(&dhd->logdump_lock); + } +} + +void +dhd_os_logdump_unlock(dhd_pub_t *pub) +{ + dhd_info_t *dhd = NULL; + + if (!pub) + return; + + dhd = (dhd_info_t *)(pub->info); + + if (dhd) { + mutex_unlock(&dhd->logdump_lock); + } +} + +unsigned long +dhd_os_dbgring_lock(void *lock) +{ + if (!lock) + return 0; + + mutex_lock((struct mutex *)lock); + + return 0; +} + +void +dhd_os_dbgring_unlock(void *lock, unsigned long flags) +{ + BCM_REFERENCE(flags); + + if (!lock) + return; + + mutex_unlock((struct mutex *)lock); +} + +unsigned int +dhd_os_get_ioctl_resp_timeout(void) +{ + return ((unsigned int)dhd_ioctl_timeout_msec); +} + +void +dhd_os_set_ioctl_resp_timeout(unsigned int timeout_msec) +{ + dhd_ioctl_timeout_msec = (int)timeout_msec; +} + +int +dhd_os_ioctl_resp_wait(dhd_pub_t *pub, uint *condition, bool resched) +{ + dhd_info_t * dhd = (dhd_info_t *)(pub->info); + int timeout, timeout_tmp = dhd_ioctl_timeout_msec; + + if (!resched && pub->conf->ctrl_resched>0 && pub->conf->dhd_ioctl_timeout_msec>0) { + timeout_tmp = dhd_ioctl_timeout_msec; + dhd_ioctl_timeout_msec = pub->conf->dhd_ioctl_timeout_msec; + } + + /* Convert timeout in millsecond to jiffies */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + timeout = msecs_to_jiffies(dhd_ioctl_timeout_msec); +#else + timeout = dhd_ioctl_timeout_msec * HZ / 1000; +#endif // endif + + DHD_PERIM_UNLOCK(pub); + + timeout = wait_event_timeout(dhd->ioctl_resp_wait, (*condition), timeout); + + if (!resched && pub->conf->ctrl_resched>0 && pub->conf->dhd_ioctl_timeout_msec>0) { + dhd_ioctl_timeout_msec = timeout_tmp; + } + + DHD_PERIM_LOCK(pub); + + return timeout; +} + +int +dhd_os_ioctl_resp_wake(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + + wake_up(&dhd->ioctl_resp_wait); + return 0; +} + +int +dhd_os_d3ack_wait(dhd_pub_t *pub, uint *condition) +{ + dhd_info_t * dhd = (dhd_info_t *)(pub->info); + int timeout; + + /* Convert timeout in millsecond to jiffies */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + timeout = msecs_to_jiffies(D3_ACK_RESP_TIMEOUT); +#else + timeout = D3_ACK_RESP_TIMEOUT * HZ / 1000; +#endif // endif + + DHD_PERIM_UNLOCK(pub); + + timeout = wait_event_timeout(dhd->d3ack_wait, (*condition), timeout); + + DHD_PERIM_LOCK(pub); + + return timeout; +} + +int +dhd_os_d3ack_wake(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + + wake_up(&dhd->d3ack_wait); + return 0; +} + +int +dhd_os_busbusy_wait_negation(dhd_pub_t *pub, uint *condition) +{ + dhd_info_t * dhd = (dhd_info_t *)(pub->info); + int timeout; + + /* Wait for bus usage contexts to gracefully exit within some timeout value + * Set time out to little higher than dhd_ioctl_timeout_msec, + * so that IOCTL timeout should not get affected. + */ + /* Convert timeout in millsecond to jiffies */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + timeout = msecs_to_jiffies(DHD_BUS_BUSY_TIMEOUT); +#else + timeout = DHD_BUS_BUSY_TIMEOUT * HZ / 1000; +#endif // endif + + timeout = wait_event_timeout(dhd->dhd_bus_busy_state_wait, !(*condition), timeout); + + return timeout; +} + +/* + * Wait until the condition *var == condition is met. + * Returns 0 if the @condition evaluated to false after the timeout elapsed + * Returns 1 if the @condition evaluated to true + */ +int +dhd_os_busbusy_wait_condition(dhd_pub_t *pub, uint *var, uint condition) +{ + dhd_info_t * dhd = (dhd_info_t *)(pub->info); + int timeout; + + /* Convert timeout in millsecond to jiffies */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + timeout = msecs_to_jiffies(DHD_BUS_BUSY_TIMEOUT); +#else + timeout = DHD_BUS_BUSY_TIMEOUT * HZ / 1000; +#endif // endif + + timeout = wait_event_timeout(dhd->dhd_bus_busy_state_wait, (*var == condition), timeout); + + return timeout; +} + +/* + * Wait until the '(*var & bitmask) == condition' is met. + * Returns 0 if the @condition evaluated to false after the timeout elapsed + * Returns 1 if the @condition evaluated to true + */ +int +dhd_os_busbusy_wait_bitmask(dhd_pub_t *pub, uint *var, + uint bitmask, uint condition) +{ + dhd_info_t * dhd = (dhd_info_t *)(pub->info); + int timeout; + + /* Convert timeout in millsecond to jiffies */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + timeout = msecs_to_jiffies(DHD_BUS_BUSY_TIMEOUT); +#else + timeout = DHD_BUS_BUSY_TIMEOUT * HZ / 1000; +#endif // endif + + timeout = wait_event_timeout(dhd->dhd_bus_busy_state_wait, + ((*var & bitmask) == condition), timeout); + + return timeout; +} + +int +dhd_os_dmaxfer_wait(dhd_pub_t *pub, uint *condition) +{ + int ret = 0; + dhd_info_t * dhd = (dhd_info_t *)(pub->info); + + DHD_PERIM_UNLOCK(pub); + ret = wait_event_interruptible(dhd->dmaxfer_wait, (*condition)); + DHD_PERIM_LOCK(pub); + + return ret; + +} + +int +dhd_os_dmaxfer_wake(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + + wake_up(&dhd->dmaxfer_wait); + return 0; +} + +void +dhd_os_tx_completion_wake(dhd_pub_t *dhd) +{ + /* Call wmb() to make sure before waking up the other event value gets updated */ + OSL_SMP_WMB(); + wake_up(&dhd->tx_completion_wait); +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)) +/* Fix compilation error for FC11 */ +INLINE +#endif // endif +int +dhd_os_busbusy_wake(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + /* Call wmb() to make sure before waking up the other event value gets updated */ + OSL_SMP_WMB(); + wake_up(&dhd->dhd_bus_busy_state_wait); + return 0; +} + +void +dhd_os_wd_timer_extend(void *bus, bool extend) +{ +#ifndef BCMDBUS + dhd_pub_t *pub = bus; + dhd_info_t *dhd = (dhd_info_t *)pub->info; + + if (extend) + dhd_os_wd_timer(bus, WATCHDOG_EXTEND_INTERVAL); + else + dhd_os_wd_timer(bus, dhd->default_wd_interval); +#endif /* !BCMDBUS */ +} + +void +dhd_os_wd_timer(void *bus, uint wdtick) +{ +#ifndef BCMDBUS + dhd_pub_t *pub = bus; + dhd_info_t *dhd = (dhd_info_t *)pub->info; + unsigned long flags; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (!dhd) { + DHD_ERROR(("%s: dhd NULL\n", __FUNCTION__)); + return; + } + + DHD_GENERAL_LOCK(pub, flags); + + /* don't start the wd until fw is loaded */ + if (pub->busstate == DHD_BUS_DOWN) { + DHD_GENERAL_UNLOCK(pub, flags); +#ifdef BCMSDIO + if (!wdtick) { + DHD_OS_WD_WAKE_UNLOCK(pub); + } +#endif /* BCMSDIO */ + return; + } + + /* Totally stop the timer */ + if (!wdtick && dhd->wd_timer_valid == TRUE) { + dhd->wd_timer_valid = FALSE; + DHD_GENERAL_UNLOCK(pub, flags); + del_timer_sync(&dhd->timer); +#ifdef BCMSDIO + DHD_OS_WD_WAKE_UNLOCK(pub); +#endif /* BCMSDIO */ + return; + } + + if (wdtick) { +#ifdef BCMSDIO + DHD_OS_WD_WAKE_LOCK(pub); + dhd_watchdog_ms = (uint)wdtick; +#endif /* BCMSDIO */ + /* Re arm the timer, at last watchdog period */ + mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms)); + dhd->wd_timer_valid = TRUE; + } + DHD_GENERAL_UNLOCK(pub, flags); +#endif /* !BCMDBUS */ +} + +void * +dhd_os_open_image1(dhd_pub_t *pub, char *filename) +{ + struct file *fp; + int size; + + fp = filp_open(filename, O_RDONLY, 0); + /* + * 2.6.11 (FC4) supports filp_open() but later revs don't? + * Alternative: + * fp = open_namei(AT_FDCWD, filename, O_RD, 0); + * ??? + */ + if (IS_ERR(fp)) { + fp = NULL; + goto err; + } + + if (!S_ISREG(file_inode(fp)->i_mode)) { + DHD_ERROR(("%s: %s is not regular file\n", __FUNCTION__, filename)); + fp = NULL; + goto err; + } + + size = i_size_read(file_inode(fp)); + if (size <= 0) { + DHD_ERROR(("%s: %s file size invalid %d\n", __FUNCTION__, filename, size)); + fp = NULL; + goto err; + } + + DHD_ERROR(("%s: %s (%d bytes) open success\n", __FUNCTION__, filename, size)); + +err: + return fp; +} + +int +dhd_os_get_image_block(char *buf, int len, void *image) +{ + struct file *fp = (struct file *)image; + int rdlen; + int size; + + if (!image) { + return 0; + } + + size = i_size_read(file_inode(fp)); + rdlen = compat_kernel_read(fp, fp->f_pos, buf, MIN(len, size)); + + if (len >= size && size != rdlen) { + return -EIO; + } + + if (rdlen > 0) { + fp->f_pos += rdlen; + } + + return rdlen; +} + +#if defined(BT_OVER_SDIO) +int +dhd_os_gets_image(dhd_pub_t *pub, char *str, int len, void *image) +{ + struct file *fp = (struct file *)image; + int rd_len; + uint str_len = 0; + char *str_end = NULL; + + if (!image) + return 0; + + rd_len = compat_kernel_read(fp, fp->f_pos, str, len); + str_end = strnchr(str, len, '\n'); + if (str_end == NULL) { + goto err; + } + str_len = (uint)(str_end - str); + + /* Advance file pointer past the string length */ + fp->f_pos += str_len + 1; + bzero(str_end, rd_len - str_len); + +err: + return str_len; +} +#endif /* defined (BT_OVER_SDIO) */ + +int +dhd_os_get_image_size(void *image) +{ + struct file *fp = (struct file *)image; + int size; + if (!image) { + return 0; + } + + size = i_size_read(file_inode(fp)); + + return size; +} + +void +dhd_os_close_image1(dhd_pub_t *pub, void *image) +{ + if (image) { + filp_close((struct file *)image, NULL); + } +} + +void +dhd_os_sdlock(dhd_pub_t *pub) +{ + dhd_info_t *dhd; + + dhd = (dhd_info_t *)(pub->info); + +#ifdef BCMDBUS + spin_lock_bh(&dhd->sdlock); +#else + if (dhd_dpc_prio >= 0) + down(&dhd->sdsem); + else + spin_lock_bh(&dhd->sdlock); +#endif /* !BCMDBUS */ +} + +void +dhd_os_sdunlock(dhd_pub_t *pub) +{ + dhd_info_t *dhd; + + dhd = (dhd_info_t *)(pub->info); + +#ifdef BCMDBUS + spin_unlock_bh(&dhd->sdlock); +#else + if (dhd_dpc_prio >= 0) + up(&dhd->sdsem); + else + spin_unlock_bh(&dhd->sdlock); +#endif /* !BCMDBUS */ +} + +void +dhd_os_sdlock_txq(dhd_pub_t *pub) +{ + dhd_info_t *dhd; + + dhd = (dhd_info_t *)(pub->info); +#ifdef BCMDBUS + spin_lock_irqsave(&dhd->txqlock, dhd->txqlock_flags); +#else + spin_lock_bh(&dhd->txqlock); +#endif /* BCMDBUS */ +} + +void +dhd_os_sdunlock_txq(dhd_pub_t *pub) +{ + dhd_info_t *dhd; + + dhd = (dhd_info_t *)(pub->info); +#ifdef BCMDBUS + spin_unlock_irqrestore(&dhd->txqlock, dhd->txqlock_flags); +#else + spin_unlock_bh(&dhd->txqlock); +#endif /* BCMDBUS */ +} + +void +dhd_os_sdlock_rxq(dhd_pub_t *pub) +{ +} + +void +dhd_os_sdunlock_rxq(dhd_pub_t *pub) +{ +} + +static void +dhd_os_rxflock(dhd_pub_t *pub) +{ + dhd_info_t *dhd; + + dhd = (dhd_info_t *)(pub->info); + spin_lock_bh(&dhd->rxf_lock); + +} + +static void +dhd_os_rxfunlock(dhd_pub_t *pub) +{ + dhd_info_t *dhd; + + dhd = (dhd_info_t *)(pub->info); + spin_unlock_bh(&dhd->rxf_lock); +} + +#ifdef DHDTCPACK_SUPPRESS +unsigned long +dhd_os_tcpacklock(dhd_pub_t *pub) +{ + dhd_info_t *dhd; + unsigned long flags = 0; + + dhd = (dhd_info_t *)(pub->info); + + if (dhd) { +#ifdef BCMSDIO + spin_lock_bh(&dhd->tcpack_lock); +#else + spin_lock_irqsave(&dhd->tcpack_lock, flags); +#endif /* BCMSDIO */ + } + + return flags; +} + +void +dhd_os_tcpackunlock(dhd_pub_t *pub, unsigned long flags) +{ + dhd_info_t *dhd; + +#ifdef BCMSDIO + BCM_REFERENCE(flags); +#endif /* BCMSDIO */ + + dhd = (dhd_info_t *)(pub->info); + + if (dhd) { +#ifdef BCMSDIO + spin_unlock_bh(&dhd->tcpack_lock); +#else + spin_unlock_irqrestore(&dhd->tcpack_lock, flags); +#endif /* BCMSDIO */ + } +} +#endif /* DHDTCPACK_SUPPRESS */ + +uint8* dhd_os_prealloc(dhd_pub_t *dhdpub, int section, uint size, bool kmalloc_if_fail) +{ + uint8* buf; + gfp_t flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC; + + buf = (uint8*)wifi_platform_prealloc(dhdpub->info->adapter, section, size); + if (buf == NULL && kmalloc_if_fail) + buf = kmalloc(size, flags); + + return buf; +} + +void dhd_os_prefree(dhd_pub_t *dhdpub, void *addr, uint size) +{ +} + +#if defined(WL_WIRELESS_EXT) +struct iw_statistics * +dhd_get_wireless_stats(struct net_device *dev) +{ + int res = 0; + dhd_info_t *dhd = DHD_DEV_INFO(dev); + + if (!dhd->pub.up) { + return NULL; + } + + res = wl_iw_get_wireless_stats(dev, &dhd->iw.wstats); + + if (res == 0) + return &dhd->iw.wstats; + else + return NULL; +} +#endif /* defined(WL_WIRELESS_EXT) */ + +static int +dhd_wl_host_event(dhd_info_t *dhd, int ifidx, void *pktdata, uint16 pktlen, + wl_event_msg_t *event, void **data) +{ + int bcmerror = 0; +#ifdef WL_CFG80211 + unsigned long flags = 0; +#endif /* WL_CFG80211 */ + ASSERT(dhd != NULL); + +#ifdef SHOW_LOGTRACE + bcmerror = wl_process_host_event(&dhd->pub, &ifidx, pktdata, pktlen, event, data, + &dhd->event_data); +#else + bcmerror = wl_process_host_event(&dhd->pub, &ifidx, pktdata, pktlen, event, data, + NULL); +#endif /* SHOW_LOGTRACE */ + if (unlikely(bcmerror != BCME_OK)) { + return bcmerror; + } + + if (ntoh32(event->event_type) == WLC_E_IF) { + /* WLC_E_IF event types are consumed by wl_process_host_event. + * For ifadd/del ops, the netdev ptr may not be valid at this + * point. so return before invoking cfg80211/wext handlers. + */ + return BCME_OK; + } + +#if defined(WL_EXT_IAPSTA) + wl_ext_iapsta_event(dhd->iflist[ifidx]->net, event, *data); +#endif /* defined(WL_EXT_IAPSTA) */ +#if defined(WL_WIRELESS_EXT) + if (event->bsscfgidx == 0) { + /* + * Wireless ext is on primary interface only + */ + ASSERT(dhd->iflist[ifidx] != NULL); + ASSERT(dhd->iflist[ifidx]->net != NULL); + + if (dhd->iflist[ifidx]->net) { + wl_iw_event(dhd->iflist[ifidx]->net, event, *data); + } + } +#endif /* defined(WL_WIRELESS_EXT) */ + +#ifdef WL_CFG80211 + if (dhd->iflist[ifidx]->net) { + spin_lock_irqsave(&dhd->pub.up_lock, flags); + if (dhd->pub.up) { + wl_cfg80211_event(dhd->iflist[ifidx]->net, event, *data); + } + spin_unlock_irqrestore(&dhd->pub.up_lock, flags); + } +#endif /* defined(WL_CFG80211) */ + + return (bcmerror); +} + +/* send up locally generated event */ +void +dhd_sendup_event(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data) +{ + switch (ntoh32(event->event_type)) { + /* Handle error case or further events here */ + default: + break; + } +} + +#ifdef LOG_INTO_TCPDUMP +void +dhd_sendup_log(dhd_pub_t *dhdp, void *data, int data_len) +{ + struct sk_buff *p, *skb; + uint32 pktlen; + int len; + dhd_if_t *ifp; + dhd_info_t *dhd; + uchar *skb_data; + int ifidx = 0; + struct ether_header eth; + + pktlen = sizeof(eth) + data_len; + dhd = dhdp->info; + + if ((p = PKTGET(dhdp->osh, pktlen, FALSE))) { + ASSERT(ISALIGNED((uintptr)PKTDATA(dhdp->osh, p), sizeof(uint32))); + + bcopy(&dhdp->mac, ð.ether_dhost, ETHER_ADDR_LEN); + bcopy(&dhdp->mac, ð.ether_shost, ETHER_ADDR_LEN); + ETHER_TOGGLE_LOCALADDR(ð.ether_shost); + eth.ether_type = hton16(ETHER_TYPE_BRCM); + + bcopy((void *)ð, PKTDATA(dhdp->osh, p), sizeof(eth)); + bcopy(data, PKTDATA(dhdp->osh, p) + sizeof(eth), data_len); + skb = PKTTONATIVE(dhdp->osh, p); + skb_data = skb->data; + len = skb->len; + + ifidx = dhd_ifname2idx(dhd, "wlan0"); + ifp = dhd->iflist[ifidx]; + if (ifp == NULL) + ifp = dhd->iflist[0]; + + ASSERT(ifp); + skb->dev = ifp->net; + skb->protocol = eth_type_trans(skb, skb->dev); + skb->data = skb_data; + skb->len = len; + + /* Strip header, count, deliver upward */ + skb_pull(skb, ETH_HLEN); + + bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, + __FUNCTION__, __LINE__); + /* Send the packet */ + if (in_interrupt()) { + netif_rx(skb); + } else { + netif_rx_ni(skb); + } + } else { + /* Could not allocate a sk_buf */ + DHD_ERROR(("%s: unable to alloc sk_buf\n", __FUNCTION__)); + } +} +#endif /* LOG_INTO_TCPDUMP */ + +void dhd_wait_for_event(dhd_pub_t *dhd, bool *lockvar) +{ +#if defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) + struct dhd_info *dhdinfo = dhd->info; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + int timeout = msecs_to_jiffies(IOCTL_RESP_TIMEOUT); +#else + int timeout = (IOCTL_RESP_TIMEOUT / 1000) * HZ; +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */ + + dhd_os_sdunlock(dhd); + wait_event_timeout(dhdinfo->ctrl_wait, (*lockvar == FALSE), timeout); + dhd_os_sdlock(dhd); +#endif /* defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) */ + return; +} /* dhd_init_static_strs_array */ + +void dhd_wait_event_wakeup(dhd_pub_t *dhd) +{ +#if defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) + struct dhd_info *dhdinfo = dhd->info; + if (waitqueue_active(&dhdinfo->ctrl_wait)) + wake_up(&dhdinfo->ctrl_wait); +#endif // endif + return; +} + +#if defined(BCMSDIO) || defined(BCMPCIE) || defined(BCMDBUS) +int +dhd_net_bus_devreset(struct net_device *dev, uint8 flag) +{ + int ret; + + dhd_info_t *dhd = DHD_DEV_INFO(dev); + +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM + if (pm_runtime_get_sync(dhd_bus_to_dev(dhd->pub.bus)) < 0) + return BCME_ERROR; +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + + if (flag == TRUE) { + /* Issue wl down command before resetting the chip */ + if (dhd_wl_ioctl_cmd(&dhd->pub, WLC_DOWN, NULL, 0, TRUE, 0) < 0) { + DHD_TRACE(("%s: wl down failed\n", __FUNCTION__)); + } +#ifdef PROP_TXSTATUS + if (dhd->pub.wlfc_enabled) { + dhd_wlfc_deinit(&dhd->pub); + } +#endif /* PROP_TXSTATUS */ +#ifdef PNO_SUPPORT + if (dhd->pub.pno_state) { + dhd_pno_deinit(&dhd->pub); + } +#endif // endif +#ifdef RTT_SUPPORT + if (dhd->pub.rtt_state) { + dhd_rtt_deinit(&dhd->pub); + } +#endif /* RTT_SUPPORT */ + +#if defined(DBG_PKT_MON) && !defined(DBG_PKT_MON_INIT_DEFAULT) + dhd_os_dbg_detach_pkt_monitor(&dhd->pub); +#endif /* DBG_PKT_MON */ + } + +#ifdef BCMSDIO + if (!flag) { + dhd_update_fw_nv_path(dhd); + /* update firmware and nvram path to sdio bus */ + dhd_bus_update_fw_nv_path(dhd->pub.bus, + dhd->fw_path, dhd->nv_path, dhd->clm_path, dhd->conf_path); + } +#endif /* BCMSDIO */ + + ret = dhd_bus_devreset(&dhd->pub, flag); + +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM + pm_runtime_mark_last_busy(dhd_bus_to_dev(dhd->pub.bus)); + pm_runtime_put_autosuspend(dhd_bus_to_dev(dhd->pub.bus)); +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + + if (flag) { + /* Clear some flags for recovery logic */ + dhd->pub.dongle_trap_occured = 0; + dhd->pub.iovar_timeout_occured = 0; +#ifdef PCIE_FULL_DONGLE + dhd->pub.d3ack_timeout_occured = 0; + dhd->pub.livelock_occured = 0; +#endif /* PCIE_FULL_DONGLE */ +#ifdef DHD_MAP_LOGGING + dhd->pub.smmu_fault_occurred = 0; +#endif /* DHD_MAP_LOGGING */ + } + + if (ret) { + DHD_ERROR(("%s: dhd_bus_devreset: %d\n", __FUNCTION__, ret)); + } + + return ret; +} + +#ifdef BCMSDIO +int +dhd_net_bus_suspend(struct net_device *dev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + return dhd_bus_suspend(&dhd->pub); +} + +int +dhd_net_bus_resume(struct net_device *dev, uint8 stage) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + return dhd_bus_resume(&dhd->pub, stage); +} + +#endif /* BCMSDIO */ +#endif /* BCMSDIO || BCMPCIE || BCMDBUS */ + +int net_os_set_suspend_disable(struct net_device *dev, int val) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + int ret = 0; + + if (dhd) { + ret = dhd->pub.suspend_disable_flag; + dhd->pub.suspend_disable_flag = val; + } + return ret; +} + +int net_os_set_suspend(struct net_device *dev, int val, int force) +{ + int ret = 0; + dhd_info_t *dhd = DHD_DEV_INFO(dev); + + if (dhd) { +#ifdef CONFIG_MACH_UNIVERSAL7420 +#endif /* CONFIG_MACH_UNIVERSAL7420 */ +#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) + ret = dhd_set_suspend(val, &dhd->pub); +#else + ret = dhd_suspend_resume_helper(dhd, val, force); +#endif // endif +#ifdef WL_CFG80211 + wl_cfg80211_update_power_mode(dev); +#endif // endif + } + return ret; +} + +int net_os_set_suspend_bcn_li_dtim(struct net_device *dev, int val) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + + if (dhd) + dhd->pub.suspend_bcn_li_dtim = val; + + return 0; +} + +int net_os_set_max_dtim_enable(struct net_device *dev, int val) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + + if (dhd) { + DHD_ERROR(("%s: use MAX bcn_li_dtim in suspend %s\n", + __FUNCTION__, (val ? "Enable" : "Disable"))); + if (val) { + dhd->pub.max_dtim_enable = TRUE; + } else { + dhd->pub.max_dtim_enable = FALSE; + } + } else { + return -1; + } + + return 0; +} + +#if defined(PCIE_FULL_DONGLE) +void +dhd_pcie_backplane_access_lock(dhd_pub_t * pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + spin_lock_bh(&dhd->backplane_access_lock); +} + +void +dhd_pcie_backplane_access_unlock(dhd_pub_t * pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + spin_unlock_bh(&dhd->backplane_access_lock); +} +#endif /* defined(PCIE_FULL_DONGLE) */ + +#ifdef PKT_FILTER_SUPPORT +int net_os_rxfilter_add_remove(struct net_device *dev, int add_remove, int num) +{ + int ret = 0; + +#ifndef GAN_LITE_NAT_KEEPALIVE_FILTER + dhd_info_t *dhd = DHD_DEV_INFO(dev); + + if (!dhd_master_mode) + add_remove = !add_remove; + DHD_ERROR(("%s: add_remove = %d, num = %d\n", __FUNCTION__, add_remove, num)); + if (!dhd || (num == DHD_UNICAST_FILTER_NUM)) { + return 0; + } + +#ifdef BLOCK_IPV6_PACKET + /* customer want to use NO IPV6 packets only */ + if (num == DHD_MULTICAST6_FILTER_NUM) { + return 0; + } +#endif /* BLOCK_IPV6_PACKET */ + + if (num >= dhd->pub.pktfilter_count) { + return -EINVAL; + } + + ret = dhd_packet_filter_add_remove(&dhd->pub, add_remove, num); +#endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */ + + return ret; +} + +int dhd_os_enable_packet_filter(dhd_pub_t *dhdp, int val) + +{ + int ret = 0; + + /* Packet filtering is set only if we still in early-suspend and + * we need either to turn it ON or turn it OFF + * We can always turn it OFF in case of early-suspend, but we turn it + * back ON only if suspend_disable_flag was not set + */ + if (dhdp && dhdp->up) { + if (dhdp->in_suspend) { + if (!val || (val && !dhdp->suspend_disable_flag)) + dhd_enable_packet_filter(val, dhdp); + } + } + return ret; +} + +/* function to enable/disable packet for Network device */ +int net_os_enable_packet_filter(struct net_device *dev, int val) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + + DHD_ERROR(("%s: val = %d\n", __FUNCTION__, val)); + return dhd_os_enable_packet_filter(&dhd->pub, val); +} +#endif /* PKT_FILTER_SUPPORT */ + +int +dhd_dev_init_ioctl(struct net_device *dev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + int ret; + + if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0) + goto done; + +done: + return ret; +} + +int +dhd_dev_get_feature_set(struct net_device *dev) +{ + dhd_info_t *ptr = *(dhd_info_t **)netdev_priv(dev); + dhd_pub_t *dhd = (&ptr->pub); + int feature_set = 0; + + if (FW_SUPPORTED(dhd, sta)) + feature_set |= WIFI_FEATURE_INFRA; + if (FW_SUPPORTED(dhd, dualband)) + feature_set |= WIFI_FEATURE_INFRA_5G; + if (FW_SUPPORTED(dhd, p2p)) + feature_set |= WIFI_FEATURE_P2P; + if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) + feature_set |= WIFI_FEATURE_SOFT_AP; + if (FW_SUPPORTED(dhd, tdls)) + feature_set |= WIFI_FEATURE_TDLS; + if (FW_SUPPORTED(dhd, vsdb)) + feature_set |= WIFI_FEATURE_TDLS_OFFCHANNEL; + if (FW_SUPPORTED(dhd, nan)) { + feature_set |= WIFI_FEATURE_NAN; + /* NAN is essentail for d2d rtt */ + if (FW_SUPPORTED(dhd, rttd2d)) + feature_set |= WIFI_FEATURE_D2D_RTT; + } +#ifdef RTT_SUPPORT + if (dhd->rtt_supported) { + feature_set |= WIFI_FEATURE_D2D_RTT; + feature_set |= WIFI_FEATURE_D2AP_RTT; + } +#endif /* RTT_SUPPORT */ +#ifdef LINKSTAT_SUPPORT + feature_set |= WIFI_FEATURE_LINKSTAT; +#endif /* LINKSTAT_SUPPORT */ + +#if defined(PNO_SUPPORT) && !defined(DISABLE_ANDROID_PNO) + if (dhd_is_pno_supported(dhd)) { + feature_set |= WIFI_FEATURE_PNO; +#ifdef GSCAN_SUPPORT + /* terence 20171115: remove to get GTS PASS + * com.google.android.gts.wifi.WifiHostTest#testWifiScannerBatchTimestamp + */ +// feature_set |= WIFI_FEATURE_GSCAN; +// feature_set |= WIFI_FEATURE_HAL_EPNO; +#endif /* GSCAN_SUPPORT */ + } +#endif /* PNO_SUPPORT && !DISABLE_ANDROID_PNO */ +#ifdef RSSI_MONITOR_SUPPORT + if (FW_SUPPORTED(dhd, rssi_mon)) { + feature_set |= WIFI_FEATURE_RSSI_MONITOR; + } +#endif /* RSSI_MONITOR_SUPPORT */ +#ifdef WL11U + feature_set |= WIFI_FEATURE_HOTSPOT; +#endif /* WL11U */ +#ifdef NDO_CONFIG_SUPPORT + feature_set |= WIFI_FEATURE_CONFIG_NDO; +#endif /* NDO_CONFIG_SUPPORT */ +#ifdef KEEP_ALIVE + feature_set |= WIFI_FEATURE_MKEEP_ALIVE; +#endif /* KEEP_ALIVE */ +#ifdef FILTER_IE + if (FW_SUPPORTED(dhd, fie)) { + feature_set |= WIFI_FEATURE_FILTER_IE; + } +#endif /* FILTER_IE */ +#ifdef ROAMEXP_SUPPORT + /* Check if the Android O roam feature is supported by FW */ + if (!(BCME_UNSUPPORTED == dhd_dev_set_whitelist_ssid(dev, NULL, 0, true))) { + feature_set |= WIFI_FEATURE_CONTROL_ROAMING; + } +#endif /* ROAMEXP_SUPPORT */ + return feature_set; +} + +int +dhd_dev_get_feature_set_matrix(struct net_device *dev, int num) +{ + int feature_set_full; + int ret = 0; + + feature_set_full = dhd_dev_get_feature_set(dev); + + /* Common feature set for all interface */ + ret = (feature_set_full & WIFI_FEATURE_INFRA) | + (feature_set_full & WIFI_FEATURE_INFRA_5G) | + (feature_set_full & WIFI_FEATURE_D2D_RTT) | + (feature_set_full & WIFI_FEATURE_D2AP_RTT) | + (feature_set_full & WIFI_FEATURE_RSSI_MONITOR) | + (feature_set_full & WIFI_FEATURE_EPR); + + /* Specific feature group for each interface */ + switch (num) { + case 0: + ret |= (feature_set_full & WIFI_FEATURE_P2P) | + /* Not supported yet */ + /* (feature_set_full & WIFI_FEATURE_NAN) | */ + (feature_set_full & WIFI_FEATURE_TDLS) | + (feature_set_full & WIFI_FEATURE_PNO) | + (feature_set_full & WIFI_FEATURE_HAL_EPNO) | + (feature_set_full & WIFI_FEATURE_BATCH_SCAN) | + (feature_set_full & WIFI_FEATURE_GSCAN) | + (feature_set_full & WIFI_FEATURE_HOTSPOT) | + (feature_set_full & WIFI_FEATURE_ADDITIONAL_STA); + break; + + case 1: + ret |= (feature_set_full & WIFI_FEATURE_P2P); + /* Not yet verified NAN with P2P */ + /* (feature_set_full & WIFI_FEATURE_NAN) | */ + break; + + case 2: + ret |= (feature_set_full & WIFI_FEATURE_NAN) | + (feature_set_full & WIFI_FEATURE_TDLS) | + (feature_set_full & WIFI_FEATURE_TDLS_OFFCHANNEL); + break; + + default: + ret = WIFI_FEATURE_INVALID; + DHD_ERROR(("%s: Out of index(%d) for get feature set\n", __FUNCTION__, num)); + break; + } + + return ret; +} + +#ifdef CUSTOM_FORCE_NODFS_FLAG +int +dhd_dev_set_nodfs(struct net_device *dev, u32 nodfs) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + + if (nodfs) + dhd->pub.dhd_cflags |= WLAN_PLAT_NODFS_FLAG; + else + dhd->pub.dhd_cflags &= ~WLAN_PLAT_NODFS_FLAG; + dhd->pub.force_country_change = TRUE; + return 0; +} +#endif /* CUSTOM_FORCE_NODFS_FLAG */ + +#ifdef NDO_CONFIG_SUPPORT +int +dhd_dev_ndo_cfg(struct net_device *dev, u8 enable) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + dhd_pub_t *dhdp = &dhd->pub; + int ret = 0; + + if (enable) { + /* enable ND offload feature (will be enabled in FW on suspend) */ + dhdp->ndo_enable = TRUE; + + /* Update changes of anycast address & DAD failed address */ + ret = dhd_dev_ndo_update_inet6addr(dev); + if ((ret < 0) && (ret != BCME_NORESOURCE)) { + DHD_ERROR(("%s: failed to update host ip addr: %d\n", __FUNCTION__, ret)); + return ret; + } + } else { + /* disable ND offload feature */ + dhdp->ndo_enable = FALSE; + + /* disable ND offload in FW */ + ret = dhd_ndo_enable(dhdp, FALSE); + if (ret < 0) { + DHD_ERROR(("%s: failed to disable NDO: %d\n", __FUNCTION__, ret)); + } + } + return ret; +} + +/* #pragma used as a WAR to fix build failure, +* ignore dropping of 'const' qualifier in 'list_entry' macro +* this pragma disables the warning only for the following function +*/ +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" + +static int +dhd_dev_ndo_get_valid_inet6addr_count(struct inet6_dev *inet6) +{ + struct inet6_ifaddr *ifa; + struct ifacaddr6 *acaddr = NULL; + int addr_count = 0; + + /* lock */ + read_lock_bh(&inet6->lock); + + /* Count valid unicast address */ + list_for_each_entry(ifa, &inet6->addr_list, if_list) { + if ((ifa->flags & IFA_F_DADFAILED) == 0) { + addr_count++; + } + } + + /* Count anycast address */ + acaddr = inet6->ac_list; + while (acaddr) { + addr_count++; + acaddr = acaddr->aca_next; + } + + /* unlock */ + read_unlock_bh(&inet6->lock); + + return addr_count; +} + +int +dhd_dev_ndo_update_inet6addr(struct net_device *dev) +{ + dhd_info_t *dhd; + dhd_pub_t *dhdp; + struct inet6_dev *inet6; + struct inet6_ifaddr *ifa; + struct ifacaddr6 *acaddr = NULL; + struct in6_addr *ipv6_addr = NULL; + int cnt, i; + int ret = BCME_OK; + + /* + * this function evaulates host ip address in struct inet6_dev + * unicast addr in inet6_dev->addr_list + * anycast addr in inet6_dev->ac_list + * while evaluating inet6_dev, read_lock_bh() is required to prevent + * access on null(freed) pointer. + */ + + if (dev) { + inet6 = dev->ip6_ptr; + if (!inet6) { + DHD_ERROR(("%s: Invalid inet6_dev\n", __FUNCTION__)); + return BCME_ERROR; + } + + dhd = DHD_DEV_INFO(dev); + if (!dhd) { + DHD_ERROR(("%s: Invalid dhd_info\n", __FUNCTION__)); + return BCME_ERROR; + } + dhdp = &dhd->pub; + + if (dhd_net2idx(dhd, dev) != 0) { + DHD_ERROR(("%s: Not primary interface\n", __FUNCTION__)); + return BCME_ERROR; + } + } else { + DHD_ERROR(("%s: Invalid net_device\n", __FUNCTION__)); + return BCME_ERROR; + } + + /* Check host IP overflow */ + cnt = dhd_dev_ndo_get_valid_inet6addr_count(inet6); + if (cnt > dhdp->ndo_max_host_ip) { + if (!dhdp->ndo_host_ip_overflow) { + dhdp->ndo_host_ip_overflow = TRUE; + /* Disable ND offload in FW */ + DHD_INFO(("%s: Host IP overflow, disable NDO\n", __FUNCTION__)); + ret = dhd_ndo_enable(dhdp, FALSE); + } + + return ret; + } + + /* + * Allocate ipv6 addr buffer to store addresses to be added/removed. + * driver need to lock inet6_dev while accessing structure. but, driver + * cannot use ioctl while inet6_dev locked since it requires scheduling + * hence, copy addresses to the buffer and do ioctl after unlock. + */ + ipv6_addr = (struct in6_addr *)MALLOC(dhdp->osh, + sizeof(struct in6_addr) * dhdp->ndo_max_host_ip); + if (!ipv6_addr) { + DHD_ERROR(("%s: failed to alloc ipv6 addr buffer\n", __FUNCTION__)); + return BCME_NOMEM; + } + + /* Find DAD failed unicast address to be removed */ + cnt = 0; + read_lock_bh(&inet6->lock); + list_for_each_entry(ifa, &inet6->addr_list, if_list) { + /* DAD failed unicast address */ + if ((ifa->flags & IFA_F_DADFAILED) && + (cnt < dhdp->ndo_max_host_ip)) { + memcpy(&ipv6_addr[cnt], &ifa->addr, sizeof(struct in6_addr)); + cnt++; + } + } + read_unlock_bh(&inet6->lock); + + /* Remove DAD failed unicast address */ + for (i = 0; i < cnt; i++) { + DHD_INFO(("%s: Remove DAD failed addr\n", __FUNCTION__)); + ret = dhd_ndo_remove_ip_by_addr(dhdp, (char *)&ipv6_addr[i], 0); + if (ret < 0) { + goto done; + } + } + + /* Remove all anycast address */ + ret = dhd_ndo_remove_ip_by_type(dhdp, WL_ND_IPV6_ADDR_TYPE_ANYCAST, 0); + if (ret < 0) { + goto done; + } + + /* + * if ND offload was disabled due to host ip overflow, + * attempt to add valid unicast address. + */ + if (dhdp->ndo_host_ip_overflow) { + /* Find valid unicast address */ + cnt = 0; + read_lock_bh(&inet6->lock); + list_for_each_entry(ifa, &inet6->addr_list, if_list) { + /* valid unicast address */ + if (!(ifa->flags & IFA_F_DADFAILED) && + (cnt < dhdp->ndo_max_host_ip)) { + memcpy(&ipv6_addr[cnt], &ifa->addr, + sizeof(struct in6_addr)); + cnt++; + } + } + read_unlock_bh(&inet6->lock); + + /* Add valid unicast address */ + for (i = 0; i < cnt; i++) { + ret = dhd_ndo_add_ip_with_type(dhdp, + (char *)&ipv6_addr[i], WL_ND_IPV6_ADDR_TYPE_UNICAST, 0); + if (ret < 0) { + goto done; + } + } + } + + /* Find anycast address */ + cnt = 0; + read_lock_bh(&inet6->lock); + acaddr = inet6->ac_list; + while (acaddr) { + if (cnt < dhdp->ndo_max_host_ip) { + memcpy(&ipv6_addr[cnt], &acaddr->aca_addr, sizeof(struct in6_addr)); + cnt++; + } + acaddr = acaddr->aca_next; + } + read_unlock_bh(&inet6->lock); + + /* Add anycast address */ + for (i = 0; i < cnt; i++) { + ret = dhd_ndo_add_ip_with_type(dhdp, + (char *)&ipv6_addr[i], WL_ND_IPV6_ADDR_TYPE_ANYCAST, 0); + if (ret < 0) { + goto done; + } + } + + /* Now All host IP addr were added successfully */ + if (dhdp->ndo_host_ip_overflow) { + dhdp->ndo_host_ip_overflow = FALSE; + if (dhdp->in_suspend) { + /* drvier is in (early) suspend state, need to enable ND offload in FW */ + DHD_INFO(("%s: enable NDO\n", __FUNCTION__)); + ret = dhd_ndo_enable(dhdp, TRUE); + } + } + +done: + if (ipv6_addr) { + MFREE(dhdp->osh, ipv6_addr, sizeof(struct in6_addr) * dhdp->ndo_max_host_ip); + } + + return ret; +} +#pragma GCC diagnostic pop + +#endif /* NDO_CONFIG_SUPPORT */ + +#ifdef PNO_SUPPORT +/* Linux wrapper to call common dhd_pno_stop_for_ssid */ +int +dhd_dev_pno_stop_for_ssid(struct net_device *dev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + + return (dhd_pno_stop_for_ssid(&dhd->pub)); +} + +/* Linux wrapper to call common dhd_pno_set_for_ssid */ +int +dhd_dev_pno_set_for_ssid(struct net_device *dev, wlc_ssid_ext_t* ssids_local, int nssid, + uint16 scan_fr, int pno_repeat, int pno_freq_expo_max, uint16 *channel_list, int nchan) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + + return (dhd_pno_set_for_ssid(&dhd->pub, ssids_local, nssid, scan_fr, + pno_repeat, pno_freq_expo_max, channel_list, nchan)); +} + +/* Linux wrapper to call common dhd_pno_enable */ +int +dhd_dev_pno_enable(struct net_device *dev, int enable) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + + return (dhd_pno_enable(&dhd->pub, enable)); +} + +/* Linux wrapper to call common dhd_pno_set_for_hotlist */ +int +dhd_dev_pno_set_for_hotlist(struct net_device *dev, wl_pfn_bssid_t *p_pfn_bssid, + struct dhd_pno_hotlist_params *hotlist_params) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + return (dhd_pno_set_for_hotlist(&dhd->pub, p_pfn_bssid, hotlist_params)); +} +/* Linux wrapper to call common dhd_dev_pno_stop_for_batch */ +int +dhd_dev_pno_stop_for_batch(struct net_device *dev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + return (dhd_pno_stop_for_batch(&dhd->pub)); +} + +/* Linux wrapper to call common dhd_dev_pno_set_for_batch */ +int +dhd_dev_pno_set_for_batch(struct net_device *dev, struct dhd_pno_batch_params *batch_params) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + return (dhd_pno_set_for_batch(&dhd->pub, batch_params)); +} + +/* Linux wrapper to call common dhd_dev_pno_get_for_batch */ +int +dhd_dev_pno_get_for_batch(struct net_device *dev, char *buf, int bufsize) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + return (dhd_pno_get_for_batch(&dhd->pub, buf, bufsize, PNO_STATUS_NORMAL)); +} +#endif /* PNO_SUPPORT */ + +#if defined(PNO_SUPPORT) +#ifdef GSCAN_SUPPORT +bool +dhd_dev_is_legacy_pno_enabled(struct net_device *dev) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_is_legacy_pno_enabled(&dhd->pub)); +} + +int +dhd_dev_set_epno(struct net_device *dev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + if (!dhd) { + return BCME_ERROR; + } + return dhd_pno_set_epno(&dhd->pub); +} +int +dhd_dev_flush_fw_epno(struct net_device *dev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + if (!dhd) { + return BCME_ERROR; + } + return dhd_pno_flush_fw_epno(&dhd->pub); +} + +/* Linux wrapper to call common dhd_pno_set_cfg_gscan */ +int +dhd_dev_pno_set_cfg_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type, + void *buf, bool flush) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_pno_set_cfg_gscan(&dhd->pub, type, buf, flush)); +} + +/* Linux wrapper to call common dhd_wait_batch_results_complete */ +int +dhd_dev_wait_batch_results_complete(struct net_device *dev) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_wait_batch_results_complete(&dhd->pub)); +} + +/* Linux wrapper to call common dhd_pno_lock_batch_results */ +int +dhd_dev_pno_lock_access_batch_results(struct net_device *dev) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_pno_lock_batch_results(&dhd->pub)); +} +/* Linux wrapper to call common dhd_pno_unlock_batch_results */ +void +dhd_dev_pno_unlock_access_batch_results(struct net_device *dev) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_pno_unlock_batch_results(&dhd->pub)); +} + +/* Linux wrapper to call common dhd_pno_initiate_gscan_request */ +int +dhd_dev_pno_run_gscan(struct net_device *dev, bool run, bool flush) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_pno_initiate_gscan_request(&dhd->pub, run, flush)); +} + +/* Linux wrapper to call common dhd_pno_enable_full_scan_result */ +int +dhd_dev_pno_enable_full_scan_result(struct net_device *dev, bool real_time_flag) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_pno_enable_full_scan_result(&dhd->pub, real_time_flag)); +} + +/* Linux wrapper to call common dhd_handle_hotlist_scan_evt */ +void * +dhd_dev_hotlist_scan_event(struct net_device *dev, + const void *data, int *send_evt_bytes, hotlist_type_t type, u32 *buf_len) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_handle_hotlist_scan_evt(&dhd->pub, data, send_evt_bytes, type, buf_len)); +} + +/* Linux wrapper to call common dhd_process_full_gscan_result */ +void * +dhd_dev_process_full_gscan_result(struct net_device *dev, +const void *data, uint32 len, int *send_evt_bytes) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_process_full_gscan_result(&dhd->pub, data, len, send_evt_bytes)); +} + +void +dhd_dev_gscan_hotlist_cache_cleanup(struct net_device *dev, hotlist_type_t type) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + dhd_gscan_hotlist_cache_cleanup(&dhd->pub, type); + + return; +} + +int +dhd_dev_gscan_batch_cache_cleanup(struct net_device *dev) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_gscan_batch_cache_cleanup(&dhd->pub)); +} + +/* Linux wrapper to call common dhd_retreive_batch_scan_results */ +int +dhd_dev_retrieve_batch_scan(struct net_device *dev) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_retreive_batch_scan_results(&dhd->pub)); +} + +/* Linux wrapper to call common dhd_pno_process_epno_result */ +void * dhd_dev_process_epno_result(struct net_device *dev, + const void *data, uint32 event, int *send_evt_bytes) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_pno_process_epno_result(&dhd->pub, data, event, send_evt_bytes)); +} + +int +dhd_dev_set_lazy_roam_cfg(struct net_device *dev, + wlc_roam_exp_params_t *roam_param) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + wl_roam_exp_cfg_t roam_exp_cfg; + int err; + + if (!roam_param) { + return BCME_BADARG; + } + + DHD_INFO(("a_band_boost_thr %d a_band_penalty_thr %d\n", + roam_param->a_band_boost_threshold, roam_param->a_band_penalty_threshold)); + DHD_INFO(("a_band_boost_factor %d a_band_penalty_factor %d cur_bssid_boost %d\n", + roam_param->a_band_boost_factor, roam_param->a_band_penalty_factor, + roam_param->cur_bssid_boost)); + DHD_INFO(("alert_roam_trigger_thr %d a_band_max_boost %d\n", + roam_param->alert_roam_trigger_threshold, roam_param->a_band_max_boost)); + + memcpy(&roam_exp_cfg.params, roam_param, sizeof(*roam_param)); + roam_exp_cfg.version = ROAM_EXP_CFG_VERSION; + roam_exp_cfg.flags = ROAM_EXP_CFG_PRESENT; + if (dhd->pub.lazy_roam_enable) { + roam_exp_cfg.flags |= ROAM_EXP_ENABLE_FLAG; + } + err = dhd_iovar(&dhd->pub, 0, "roam_exp_params", + (char *)&roam_exp_cfg, sizeof(roam_exp_cfg), NULL, 0, + TRUE); + if (err < 0) { + DHD_ERROR(("%s : Failed to execute roam_exp_params %d\n", __FUNCTION__, err)); + } + return err; +} + +int +dhd_dev_lazy_roam_enable(struct net_device *dev, uint32 enable) +{ + int err; + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + wl_roam_exp_cfg_t roam_exp_cfg; + + memset(&roam_exp_cfg, 0, sizeof(roam_exp_cfg)); + roam_exp_cfg.version = ROAM_EXP_CFG_VERSION; + if (enable) { + roam_exp_cfg.flags = ROAM_EXP_ENABLE_FLAG; + } + + err = dhd_iovar(&dhd->pub, 0, "roam_exp_params", + (char *)&roam_exp_cfg, sizeof(roam_exp_cfg), NULL, 0, + TRUE); + if (err < 0) { + DHD_ERROR(("%s : Failed to execute roam_exp_params %d\n", __FUNCTION__, err)); + } else { + dhd->pub.lazy_roam_enable = (enable != 0); + } + return err; +} + +int +dhd_dev_set_lazy_roam_bssid_pref(struct net_device *dev, + wl_bssid_pref_cfg_t *bssid_pref, uint32 flush) +{ + int err; + uint len; + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + bssid_pref->version = BSSID_PREF_LIST_VERSION; + /* By default programming bssid pref flushes out old values */ + bssid_pref->flags = (flush && !bssid_pref->count) ? ROAM_EXP_CLEAR_BSSID_PREF: 0; + len = sizeof(wl_bssid_pref_cfg_t); + if (bssid_pref->count) { + len += (bssid_pref->count - 1) * sizeof(wl_bssid_pref_list_t); + } + err = dhd_iovar(&dhd->pub, 0, "roam_exp_bssid_pref", + (char *)bssid_pref, len, NULL, 0, TRUE); + if (err != BCME_OK) { + DHD_ERROR(("%s : Failed to execute roam_exp_bssid_pref %d\n", __FUNCTION__, err)); + } + return err; +} +#endif /* GSCAN_SUPPORT */ + +#if defined(GSCAN_SUPPORT) || defined(ROAMEXP_SUPPORT) +int +dhd_dev_set_blacklist_bssid(struct net_device *dev, maclist_t *blacklist, + uint32 len, uint32 flush) +{ + int err; + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + int macmode; + + if (blacklist) { + err = dhd_wl_ioctl_cmd(&(dhd->pub), WLC_SET_MACLIST, (char *)blacklist, + len, TRUE, 0); + if (err != BCME_OK) { + DHD_ERROR(("%s : WLC_SET_MACLIST failed %d\n", __FUNCTION__, err)); + return err; + } + } + /* By default programming blacklist flushes out old values */ + macmode = (flush && !blacklist) ? WLC_MACMODE_DISABLED : WLC_MACMODE_DENY; + err = dhd_wl_ioctl_cmd(&(dhd->pub), WLC_SET_MACMODE, (char *)&macmode, + sizeof(macmode), TRUE, 0); + if (err != BCME_OK) { + DHD_ERROR(("%s : WLC_SET_MACMODE failed %d\n", __FUNCTION__, err)); + } + return err; +} + +int +dhd_dev_set_whitelist_ssid(struct net_device *dev, wl_ssid_whitelist_t *ssid_whitelist, + uint32 len, uint32 flush) +{ + int err; + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + wl_ssid_whitelist_t whitelist_ssid_flush; + + if (!ssid_whitelist) { + if (flush) { + ssid_whitelist = &whitelist_ssid_flush; + ssid_whitelist->ssid_count = 0; + } else { + DHD_ERROR(("%s : Nothing to do here\n", __FUNCTION__)); + return BCME_BADARG; + } + } + ssid_whitelist->version = SSID_WHITELIST_VERSION; + ssid_whitelist->flags = flush ? ROAM_EXP_CLEAR_SSID_WHITELIST : 0; + err = dhd_iovar(&dhd->pub, 0, "roam_exp_ssid_whitelist", (char *)ssid_whitelist, len, NULL, + 0, TRUE); + if (err != BCME_OK) { + DHD_ERROR(("%s : Failed to execute roam_exp_bssid_pref %d\n", __FUNCTION__, err)); + } + return err; +} +#endif /* GSCAN_SUPPORT || ROAMEXP_SUPPORT */ + +#if defined(GSCAN_SUPPORT) || defined(DHD_GET_VALID_CHANNELS) +/* Linux wrapper to call common dhd_pno_get_gscan */ +void * +dhd_dev_pno_get_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type, + void *info, uint32 *len) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_pno_get_gscan(&dhd->pub, type, info, len)); +} +#endif /* GSCAN_SUPPORT || DHD_GET_VALID_CHANNELS */ +#endif // endif + +#ifdef RSSI_MONITOR_SUPPORT +int +dhd_dev_set_rssi_monitor_cfg(struct net_device *dev, int start, + int8 max_rssi, int8 min_rssi) +{ + int err; + wl_rssi_monitor_cfg_t rssi_monitor; + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + rssi_monitor.version = RSSI_MONITOR_VERSION; + rssi_monitor.max_rssi = max_rssi; + rssi_monitor.min_rssi = min_rssi; + rssi_monitor.flags = start ? 0: RSSI_MONITOR_STOP; + err = dhd_iovar(&dhd->pub, 0, "rssi_monitor", (char *)&rssi_monitor, sizeof(rssi_monitor), + NULL, 0, TRUE); + if (err < 0 && err != BCME_UNSUPPORTED) { + DHD_ERROR(("%s : Failed to execute rssi_monitor %d\n", __FUNCTION__, err)); + } + return err; +} +#endif /* RSSI_MONITOR_SUPPORT */ + +#ifdef DHDTCPACK_SUPPRESS +int +dhd_dev_set_tcpack_sup_mode_cfg(struct net_device *dev, uint8 enable) +{ + int err; + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + err = dhd_tcpack_suppress_set(&dhd->pub, enable); + if (err != BCME_OK) { + DHD_ERROR(("%s : Failed to set tcpack_suppress mode: %d\n", __FUNCTION__, err)); + } + return err; +} +#endif /* DHDTCPACK_SUPPRESS */ + +int +dhd_dev_cfg_rand_mac_oui(struct net_device *dev, uint8 *oui) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + dhd_pub_t *dhdp = &dhd->pub; + + if (!dhdp || !oui) { + DHD_ERROR(("NULL POINTER : %s\n", + __FUNCTION__)); + return BCME_ERROR; + } + if (ETHER_ISMULTI(oui)) { + DHD_ERROR(("Expected unicast OUI\n")); + return BCME_ERROR; + } else { + uint8 *rand_mac_oui = dhdp->rand_mac_oui; + memcpy(rand_mac_oui, oui, DOT11_OUI_LEN); + DHD_ERROR(("Random MAC OUI to be used - "MACOUIDBG"\n", + MACOUI2STRDBG(rand_mac_oui))); + } + return BCME_OK; +} + +int +dhd_set_rand_mac_oui(dhd_pub_t *dhd) +{ + int err; + wl_pfn_macaddr_cfg_t wl_cfg; + uint8 *rand_mac_oui = dhd->rand_mac_oui; + + memset(&wl_cfg.macaddr, 0, ETHER_ADDR_LEN); + memcpy(&wl_cfg.macaddr, rand_mac_oui, DOT11_OUI_LEN); + wl_cfg.version = WL_PFN_MACADDR_CFG_VER; + if (ETHER_ISNULLADDR(&wl_cfg.macaddr)) { + wl_cfg.flags = 0; + } else { + wl_cfg.flags = (WL_PFN_MAC_OUI_ONLY_MASK | WL_PFN_SET_MAC_UNASSOC_MASK); + } + + DHD_ERROR(("Setting rand mac oui to FW - "MACOUIDBG"\n", + MACOUI2STRDBG(rand_mac_oui))); + + err = dhd_iovar(dhd, 0, "pfn_macaddr", (char *)&wl_cfg, sizeof(wl_cfg), NULL, 0, TRUE); + if (err < 0) { + DHD_ERROR(("%s : failed to execute pfn_macaddr %d\n", __FUNCTION__, err)); + } + return err; +} + +#ifdef RTT_SUPPORT +/* Linux wrapper to call common dhd_pno_set_cfg_gscan */ +int +dhd_dev_rtt_set_cfg(struct net_device *dev, void *buf) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_rtt_set_cfg(&dhd->pub, buf)); +} + +int +dhd_dev_rtt_cancel_cfg(struct net_device *dev, struct ether_addr *mac_list, int mac_cnt) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_rtt_stop(&dhd->pub, mac_list, mac_cnt)); +} + +int +dhd_dev_rtt_register_noti_callback(struct net_device *dev, void *ctx, dhd_rtt_compl_noti_fn noti_fn) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_rtt_register_noti_callback(&dhd->pub, ctx, noti_fn)); +} + +int +dhd_dev_rtt_unregister_noti_callback(struct net_device *dev, dhd_rtt_compl_noti_fn noti_fn) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_rtt_unregister_noti_callback(&dhd->pub, noti_fn)); +} + +int +dhd_dev_rtt_capability(struct net_device *dev, rtt_capabilities_t *capa) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_rtt_capability(&dhd->pub, capa)); +} + +int +dhd_dev_rtt_avail_channel(struct net_device *dev, wifi_channel_info *channel_info) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + return (dhd_rtt_avail_channel(&dhd->pub, channel_info)); +} + +int +dhd_dev_rtt_enable_responder(struct net_device *dev, wifi_channel_info *channel_info) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + return (dhd_rtt_enable_responder(&dhd->pub, channel_info)); +} + +int dhd_dev_rtt_cancel_responder(struct net_device *dev) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + return (dhd_rtt_cancel_responder(&dhd->pub)); +} + +#endif /* RTT_SUPPORT */ + +#ifdef KEEP_ALIVE +#define KA_TEMP_BUF_SIZE 512 +#define KA_FRAME_SIZE 300 + +int +dhd_dev_start_mkeep_alive(dhd_pub_t *dhd_pub, uint8 mkeep_alive_id, uint8 *ip_pkt, + uint16 ip_pkt_len, uint8* src_mac, uint8* dst_mac, uint32 period_msec) +{ + const int ETHERTYPE_LEN = 2; + char *pbuf = NULL; + const char *str; + wl_mkeep_alive_pkt_t mkeep_alive_pkt; + wl_mkeep_alive_pkt_t *mkeep_alive_pktp = NULL; + int buf_len = 0; + int str_len = 0; + int res = BCME_ERROR; + int len_bytes = 0; + int i = 0; + + /* ether frame to have both max IP pkt (256 bytes) and ether header */ + char *pmac_frame = NULL; + char *pmac_frame_begin = NULL; + + /* + * The mkeep_alive packet is for STA interface only; if the bss is configured as AP, + * dongle shall reject a mkeep_alive request. + */ + if (!dhd_support_sta_mode(dhd_pub)) + return res; + + DHD_TRACE(("%s execution\n", __FUNCTION__)); + + if ((pbuf = MALLOCZ(dhd_pub->osh, KA_TEMP_BUF_SIZE)) == NULL) { + DHD_ERROR(("failed to allocate buf with size %d\n", KA_TEMP_BUF_SIZE)); + res = BCME_NOMEM; + return res; + } + + if ((pmac_frame = MALLOCZ(dhd_pub->osh, KA_FRAME_SIZE)) == NULL) { + DHD_ERROR(("failed to allocate mac_frame with size %d\n", KA_FRAME_SIZE)); + res = BCME_NOMEM; + goto exit; + } + pmac_frame_begin = pmac_frame; + + /* + * Get current mkeep-alive status. + */ + res = dhd_iovar(dhd_pub, 0, "mkeep_alive", &mkeep_alive_id, sizeof(mkeep_alive_id), pbuf, + KA_TEMP_BUF_SIZE, FALSE); + if (res < 0) { + DHD_ERROR(("%s: Get mkeep_alive failed (error=%d)\n", __FUNCTION__, res)); + goto exit; + } else { + /* Check available ID whether it is occupied */ + mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) pbuf; + if (dtoh32(mkeep_alive_pktp->period_msec != 0)) { + DHD_ERROR(("%s: Get mkeep_alive failed, ID %u is in use.\n", + __FUNCTION__, mkeep_alive_id)); + + /* Current occupied ID info */ + DHD_ERROR(("%s: mkeep_alive\n", __FUNCTION__)); + DHD_ERROR((" Id : %d\n" + " Period: %d msec\n" + " Length: %d\n" + " Packet: 0x", + mkeep_alive_pktp->keep_alive_id, + dtoh32(mkeep_alive_pktp->period_msec), + dtoh16(mkeep_alive_pktp->len_bytes))); + + for (i = 0; i < mkeep_alive_pktp->len_bytes; i++) { + DHD_ERROR(("%02x", mkeep_alive_pktp->data[i])); + } + DHD_ERROR(("\n")); + + res = BCME_NOTFOUND; + goto exit; + } + } + + /* Request the specified ID */ + memset(&mkeep_alive_pkt, 0, sizeof(wl_mkeep_alive_pkt_t)); + memset(pbuf, 0, KA_TEMP_BUF_SIZE); + str = "mkeep_alive"; + str_len = strlen(str); + strncpy(pbuf, str, str_len); + pbuf[str_len] = '\0'; + + mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) (pbuf + str_len + 1); + mkeep_alive_pkt.period_msec = htod32(period_msec); + buf_len = str_len + 1; + mkeep_alive_pkt.version = htod16(WL_MKEEP_ALIVE_VERSION); + mkeep_alive_pkt.length = htod16(WL_MKEEP_ALIVE_FIXED_LEN); + + /* ID assigned */ + mkeep_alive_pkt.keep_alive_id = mkeep_alive_id; + + buf_len += WL_MKEEP_ALIVE_FIXED_LEN; + + /* + * Build up Ethernet Frame + */ + + /* Mapping dest mac addr */ + memcpy(pmac_frame, dst_mac, ETHER_ADDR_LEN); + pmac_frame += ETHER_ADDR_LEN; + + /* Mapping src mac addr */ + memcpy(pmac_frame, src_mac, ETHER_ADDR_LEN); + pmac_frame += ETHER_ADDR_LEN; + + /* Mapping Ethernet type (ETHERTYPE_IP: 0x0800) */ + *(pmac_frame++) = 0x08; + *(pmac_frame++) = 0x00; + + /* Mapping IP pkt */ + memcpy(pmac_frame, ip_pkt, ip_pkt_len); + pmac_frame += ip_pkt_len; + + /* + * Length of ether frame (assume to be all hexa bytes) + * = src mac + dst mac + ether type + ip pkt len + */ + len_bytes = ETHER_ADDR_LEN*2 + ETHERTYPE_LEN + ip_pkt_len; + memcpy(mkeep_alive_pktp->data, pmac_frame_begin, len_bytes); + buf_len += len_bytes; + mkeep_alive_pkt.len_bytes = htod16(len_bytes); + + /* + * Keep-alive attributes are set in local variable (mkeep_alive_pkt), and + * then memcpy'ed into buffer (mkeep_alive_pktp) since there is no + * guarantee that the buffer is properly aligned. + */ + memcpy((char *)mkeep_alive_pktp, &mkeep_alive_pkt, WL_MKEEP_ALIVE_FIXED_LEN); + + res = dhd_wl_ioctl_cmd(dhd_pub, WLC_SET_VAR, pbuf, buf_len, TRUE, 0); +exit: + if (pmac_frame_begin) { + MFREE(dhd_pub->osh, pmac_frame_begin, KA_FRAME_SIZE); + pmac_frame_begin = NULL; + } + if (pbuf) { + MFREE(dhd_pub->osh, pbuf, KA_TEMP_BUF_SIZE); + pbuf = NULL; + } + return res; +} + +int +dhd_dev_stop_mkeep_alive(dhd_pub_t *dhd_pub, uint8 mkeep_alive_id) +{ + char *pbuf = NULL; + wl_mkeep_alive_pkt_t mkeep_alive_pkt; + wl_mkeep_alive_pkt_t *mkeep_alive_pktp = NULL; + int res = BCME_ERROR; + int i = 0; + + /* + * The mkeep_alive packet is for STA interface only; if the bss is configured as AP, + * dongle shall reject a mkeep_alive request. + */ + if (!dhd_support_sta_mode(dhd_pub)) + return res; + + DHD_TRACE(("%s execution\n", __FUNCTION__)); + + /* + * Get current mkeep-alive status. Skip ID 0 which is being used for NULL pkt. + */ + if ((pbuf = MALLOC(dhd_pub->osh, KA_TEMP_BUF_SIZE)) == NULL) { + DHD_ERROR(("failed to allocate buf with size %d\n", KA_TEMP_BUF_SIZE)); + return res; + } + + res = dhd_iovar(dhd_pub, 0, "mkeep_alive", &mkeep_alive_id, + sizeof(mkeep_alive_id), pbuf, KA_TEMP_BUF_SIZE, FALSE); + if (res < 0) { + DHD_ERROR(("%s: Get mkeep_alive failed (error=%d)\n", __FUNCTION__, res)); + goto exit; + } else { + /* Check occupied ID */ + mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) pbuf; + DHD_INFO(("%s: mkeep_alive\n", __FUNCTION__)); + DHD_INFO((" Id : %d\n" + " Period: %d msec\n" + " Length: %d\n" + " Packet: 0x", + mkeep_alive_pktp->keep_alive_id, + dtoh32(mkeep_alive_pktp->period_msec), + dtoh16(mkeep_alive_pktp->len_bytes))); + + for (i = 0; i < mkeep_alive_pktp->len_bytes; i++) { + DHD_INFO(("%02x", mkeep_alive_pktp->data[i])); + } + DHD_INFO(("\n")); + } + + /* Make it stop if available */ + if (dtoh32(mkeep_alive_pktp->period_msec != 0)) { + DHD_INFO(("stop mkeep_alive on ID %d\n", mkeep_alive_id)); + memset(&mkeep_alive_pkt, 0, sizeof(wl_mkeep_alive_pkt_t)); + + mkeep_alive_pkt.period_msec = 0; + mkeep_alive_pkt.version = htod16(WL_MKEEP_ALIVE_VERSION); + mkeep_alive_pkt.length = htod16(WL_MKEEP_ALIVE_FIXED_LEN); + mkeep_alive_pkt.keep_alive_id = mkeep_alive_id; + + res = dhd_iovar(dhd_pub, 0, "mkeep_alive", + (char *)&mkeep_alive_pkt, + WL_MKEEP_ALIVE_FIXED_LEN, NULL, 0, TRUE); + } else { + DHD_ERROR(("%s: ID %u does not exist.\n", __FUNCTION__, mkeep_alive_id)); + res = BCME_NOTFOUND; + } +exit: + if (pbuf) { + MFREE(dhd_pub->osh, pbuf, KA_TEMP_BUF_SIZE); + pbuf = NULL; + } + return res; +} +#endif /* KEEP_ALIVE */ + +#if defined(PKT_FILTER_SUPPORT) && defined(APF) +static void _dhd_apf_lock_local(dhd_info_t *dhd) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) + if (dhd) { + mutex_lock(&dhd->dhd_apf_mutex); + } +#endif // endif +} + +static void _dhd_apf_unlock_local(dhd_info_t *dhd) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) + if (dhd) { + mutex_unlock(&dhd->dhd_apf_mutex); + } +#endif // endif +} + +static int +__dhd_apf_add_filter(struct net_device *ndev, uint32 filter_id, + u8* program, uint32 program_len) +{ + dhd_info_t *dhd = DHD_DEV_INFO(ndev); + dhd_pub_t *dhdp = &dhd->pub; + wl_pkt_filter_t * pkt_filterp; + wl_apf_program_t *apf_program; + char *buf; + u32 cmd_len, buf_len; + int ifidx, ret; + char cmd[] = "pkt_filter_add"; + + ifidx = dhd_net2idx(dhd, ndev); + if (ifidx == DHD_BAD_IF) { + DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__)); + return -ENODEV; + } + + cmd_len = sizeof(cmd); + + /* Check if the program_len is more than the expected len + * and if the program is NULL return from here. + */ + if ((program_len > WL_APF_PROGRAM_MAX_SIZE) || (program == NULL)) { + DHD_ERROR(("%s Invalid program_len: %d, program: %pK\n", + __FUNCTION__, program_len, program)); + return -EINVAL; + } + buf_len = cmd_len + WL_PKT_FILTER_FIXED_LEN + + WL_APF_PROGRAM_FIXED_LEN + program_len; + + buf = MALLOCZ(dhdp->osh, buf_len); + if (unlikely(!buf)) { + DHD_ERROR(("%s: MALLOC failure, %d bytes\n", __FUNCTION__, buf_len)); + return -ENOMEM; + } + + memcpy(buf, cmd, cmd_len); + + pkt_filterp = (wl_pkt_filter_t *) (buf + cmd_len); + pkt_filterp->id = htod32(filter_id); + pkt_filterp->negate_match = htod32(FALSE); + pkt_filterp->type = htod32(WL_PKT_FILTER_TYPE_APF_MATCH); + + apf_program = &pkt_filterp->u.apf_program; + apf_program->version = htod16(WL_APF_INTERNAL_VERSION); + apf_program->instr_len = htod16(program_len); + memcpy(apf_program->instrs, program, program_len); + + ret = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, buf, buf_len, TRUE, ifidx); + if (unlikely(ret)) { + DHD_ERROR(("%s: failed to add APF filter, id=%d, ret=%d\n", + __FUNCTION__, filter_id, ret)); + } + + if (buf) { + MFREE(dhdp->osh, buf, buf_len); + } + return ret; +} + +static int +__dhd_apf_config_filter(struct net_device *ndev, uint32 filter_id, + uint32 mode, uint32 enable) +{ + dhd_info_t *dhd = DHD_DEV_INFO(ndev); + dhd_pub_t *dhdp = &dhd->pub; + wl_pkt_filter_enable_t * pkt_filterp; + char *buf; + u32 cmd_len, buf_len; + int ifidx, ret; + char cmd[] = "pkt_filter_enable"; + + ifidx = dhd_net2idx(dhd, ndev); + if (ifidx == DHD_BAD_IF) { + DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__)); + return -ENODEV; + } + + cmd_len = sizeof(cmd); + buf_len = cmd_len + sizeof(*pkt_filterp); + + buf = MALLOCZ(dhdp->osh, buf_len); + if (unlikely(!buf)) { + DHD_ERROR(("%s: MALLOC failure, %d bytes\n", __FUNCTION__, buf_len)); + return -ENOMEM; + } + + memcpy(buf, cmd, cmd_len); + + pkt_filterp = (wl_pkt_filter_enable_t *) (buf + cmd_len); + pkt_filterp->id = htod32(filter_id); + pkt_filterp->enable = htod32(enable); + + ret = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, buf, buf_len, TRUE, ifidx); + if (unlikely(ret)) { + DHD_ERROR(("%s: failed to enable APF filter, id=%d, ret=%d\n", + __FUNCTION__, filter_id, ret)); + goto exit; + } + + ret = dhd_wl_ioctl_set_intiovar(dhdp, "pkt_filter_mode", dhd_master_mode, + WLC_SET_VAR, TRUE, ifidx); + if (unlikely(ret)) { + DHD_ERROR(("%s: failed to set APF filter mode, id=%d, ret=%d\n", + __FUNCTION__, filter_id, ret)); + } + +exit: + if (buf) { + MFREE(dhdp->osh, buf, buf_len); + } + return ret; +} + +static int +__dhd_apf_delete_filter(struct net_device *ndev, uint32 filter_id) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(ndev); + dhd_pub_t *dhdp = &dhd->pub; + int ifidx, ret; + + ifidx = dhd_net2idx(dhd, ndev); + if (ifidx == DHD_BAD_IF) { + DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__)); + return -ENODEV; + } + + ret = dhd_wl_ioctl_set_intiovar(dhdp, "pkt_filter_delete", + htod32(filter_id), WLC_SET_VAR, TRUE, ifidx); + if (unlikely(ret)) { + DHD_ERROR(("%s: failed to delete APF filter, id=%d, ret=%d\n", + __FUNCTION__, filter_id, ret)); + } + + return ret; +} + +void dhd_apf_lock(struct net_device *dev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + _dhd_apf_lock_local(dhd); +} + +void dhd_apf_unlock(struct net_device *dev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + _dhd_apf_unlock_local(dhd); +} + +int +dhd_dev_apf_get_version(struct net_device *ndev, uint32 *version) +{ + dhd_info_t *dhd = DHD_DEV_INFO(ndev); + dhd_pub_t *dhdp = &dhd->pub; + int ifidx, ret; + + if (!FW_SUPPORTED(dhdp, apf)) { + DHD_ERROR(("%s: firmware doesn't support APF\n", __FUNCTION__)); + + /* + * Notify Android framework that APF is not supported by setting + * version as zero. + */ + *version = 0; + return BCME_OK; + } + + ifidx = dhd_net2idx(dhd, ndev); + if (ifidx == DHD_BAD_IF) { + DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__)); + return -ENODEV; + } + + ret = dhd_wl_ioctl_get_intiovar(dhdp, "apf_ver", version, + WLC_GET_VAR, FALSE, ifidx); + if (unlikely(ret)) { + DHD_ERROR(("%s: failed to get APF version, ret=%d\n", + __FUNCTION__, ret)); + } + + return ret; +} + +int +dhd_dev_apf_get_max_len(struct net_device *ndev, uint32 *max_len) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(ndev); + dhd_pub_t *dhdp = &dhd->pub; + int ifidx, ret; + + if (!FW_SUPPORTED(dhdp, apf)) { + DHD_ERROR(("%s: firmware doesn't support APF\n", __FUNCTION__)); + *max_len = 0; + return BCME_OK; + } + + ifidx = dhd_net2idx(dhd, ndev); + if (ifidx == DHD_BAD_IF) { + DHD_ERROR(("%s bad ifidx\n", __FUNCTION__)); + return -ENODEV; + } + + ret = dhd_wl_ioctl_get_intiovar(dhdp, "apf_size_limit", max_len, + WLC_GET_VAR, FALSE, ifidx); + if (unlikely(ret)) { + DHD_ERROR(("%s: failed to get APF size limit, ret=%d\n", + __FUNCTION__, ret)); + } + + return ret; +} + +int +dhd_dev_apf_add_filter(struct net_device *ndev, u8* program, + uint32 program_len) +{ + dhd_info_t *dhd = DHD_DEV_INFO(ndev); + dhd_pub_t *dhdp = &dhd->pub; + int ret; + + DHD_APF_LOCK(ndev); + + /* delete, if filter already exists */ + if (dhdp->apf_set) { + ret = __dhd_apf_delete_filter(ndev, PKT_FILTER_APF_ID); + if (unlikely(ret)) { + goto exit; + } + dhdp->apf_set = FALSE; + } + + ret = __dhd_apf_add_filter(ndev, PKT_FILTER_APF_ID, program, program_len); + if (ret) { + goto exit; + } + dhdp->apf_set = TRUE; + + if (dhdp->in_suspend && dhdp->apf_set && !(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) { + /* Driver is still in (early) suspend state, enable APF filter back */ + ret = __dhd_apf_config_filter(ndev, PKT_FILTER_APF_ID, + PKT_FILTER_MODE_FORWARD_ON_MATCH, TRUE); + } +exit: + DHD_APF_UNLOCK(ndev); + + return ret; +} + +int +dhd_dev_apf_enable_filter(struct net_device *ndev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(ndev); + dhd_pub_t *dhdp = &dhd->pub; + int ret = 0; + bool nan_dp_active = false; + + DHD_APF_LOCK(ndev); +#ifdef WL_NAN + nan_dp_active = wl_cfgnan_is_dp_active(ndev); +#endif /* WL_NAN */ + if (dhdp->apf_set && (!(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE) && + !nan_dp_active)) { + ret = __dhd_apf_config_filter(ndev, PKT_FILTER_APF_ID, + PKT_FILTER_MODE_FORWARD_ON_MATCH, TRUE); + } + + DHD_APF_UNLOCK(ndev); + + return ret; +} + +int +dhd_dev_apf_disable_filter(struct net_device *ndev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(ndev); + dhd_pub_t *dhdp = &dhd->pub; + int ret = 0; + + DHD_APF_LOCK(ndev); + + if (dhdp->apf_set) { + ret = __dhd_apf_config_filter(ndev, PKT_FILTER_APF_ID, + PKT_FILTER_MODE_FORWARD_ON_MATCH, FALSE); + } + + DHD_APF_UNLOCK(ndev); + + return ret; +} + +int +dhd_dev_apf_delete_filter(struct net_device *ndev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(ndev); + dhd_pub_t *dhdp = &dhd->pub; + int ret = 0; + + DHD_APF_LOCK(ndev); + + if (dhdp->apf_set) { + ret = __dhd_apf_delete_filter(ndev, PKT_FILTER_APF_ID); + if (!ret) { + dhdp->apf_set = FALSE; + } + } + + DHD_APF_UNLOCK(ndev); + + return ret; +} +#endif /* PKT_FILTER_SUPPORT && APF */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) +static void dhd_hang_process(void *dhd_info, void *event_info, u8 event) +{ + dhd_info_t *dhd; + struct net_device *dev; + + dhd = (dhd_info_t *)dhd_info; + dev = dhd->iflist[0]->net; + + if (dev) { + /* + * For HW2, dev_close need to be done to recover + * from upper layer after hang. For Interposer skip + * dev_close so that dhd iovars can be used to take + * socramdump after crash, also skip for HW4 as + * handling of hang event is different + */ +#if defined(WL_WIRELESS_EXT) + wl_iw_send_priv_event(dev, "HANG"); +#endif // endif +#if defined(WL_CFG80211) + wl_cfg80211_hang(dev, WLAN_REASON_UNSPECIFIED); +#endif // endif + } +} + +#ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY +extern dhd_pub_t *link_recovery; +void dhd_host_recover_link(void) +{ + DHD_ERROR(("****** %s ******\n", __FUNCTION__)); + link_recovery->hang_reason = HANG_REASON_PCIE_LINK_DOWN; + dhd_bus_set_linkdown(link_recovery, TRUE); + dhd_os_send_hang_message(link_recovery); +} +EXPORT_SYMBOL(dhd_host_recover_link); +#endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */ + +int dhd_os_send_hang_message(dhd_pub_t *dhdp) +{ + int ret = 0; + + if (dhdp) { +#ifdef WL_CFG80211 + struct net_device *primary_ndev; + struct bcm_cfg80211 *cfg; + + primary_ndev = dhd_linux_get_primary_netdev(dhdp); + if (!primary_ndev) { + DHD_ERROR(("%s: Cannot find primary netdev\n", + __FUNCTION__)); + return -ENODEV; + } + + cfg = wl_get_cfg(primary_ndev); + if (!cfg) { + DHD_ERROR(("%s: Cannot find cfg\n", __FUNCTION__)); + return -EINVAL; + } + + /* Skip sending HANG event to framework if driver is not ready */ + if (!wl_get_drv_status(cfg, READY, primary_ndev)) { + DHD_ERROR(("%s: device is not ready\n", __FUNCTION__)); + return -ENODEV; + } +#endif /* WL_CFG80211 */ + + if (!dhdp->hang_was_sent) { +#if defined(CONFIG_BCM_DETECT_CONSECUTIVE_HANG) + dhdp->hang_counts++; + if (dhdp->hang_counts >= MAX_CONSECUTIVE_HANG_COUNTS) { + DHD_ERROR(("%s, Consecutive hang from Dongle :%u\n", + __func__, dhdp->hang_counts)); + BUG_ON(1); + } +#endif /* CONFIG_BCM_DETECT_CONSECUTIVE_HANG */ +#ifdef DHD_DEBUG_UART + /* If PCIe lane has broken, execute the debug uart application + * to gether a ramdump data from dongle via uart + */ + if (!dhdp->info->duart_execute) { + dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, + (void *)dhdp, DHD_WQ_WORK_DEBUG_UART_DUMP, + dhd_debug_uart_exec_rd, DHD_WQ_WORK_PRIORITY_HIGH); + } +#endif /* DHD_DEBUG_UART */ + dhdp->hang_was_sent = 1; +#ifdef BT_OVER_SDIO + dhdp->is_bt_recovery_required = TRUE; +#endif // endif + dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, (void *)dhdp, + DHD_WQ_WORK_HANG_MSG, dhd_hang_process, DHD_WQ_WORK_PRIORITY_HIGH); + DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d s=%d\n", __FUNCTION__, + dhdp->rxcnt_timeout, dhdp->txcnt_timeout, dhdp->busstate)); + } + } + return ret; +} + +int net_os_send_hang_message(struct net_device *dev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + int ret = 0; + + if (dhd) { + /* Report FW problem when enabled */ + if (dhd->pub.hang_report) { +#ifdef BT_OVER_SDIO + if (netif_running(dev)) { +#endif /* BT_OVER_SDIO */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + ret = dhd_os_send_hang_message(&dhd->pub); +#else + ret = wl_cfg80211_hang(dev, WLAN_REASON_UNSPECIFIED); +#endif // endif +#ifdef BT_OVER_SDIO + } + DHD_ERROR(("%s: HANG -> Reset BT\n", __FUNCTION__)); + bcmsdh_btsdio_process_dhd_hang_notification(!netif_running(dev)); +#endif /* BT_OVER_SDIO */ + } else { + DHD_ERROR(("%s: FW HANG ignored (for testing purpose) and not sent up\n", + __FUNCTION__)); + } + } + return ret; +} + +int net_os_send_hang_message_reason(struct net_device *dev, const char *string_num) +{ + dhd_info_t *dhd = NULL; + dhd_pub_t *dhdp = NULL; + int reason; + + dhd = DHD_DEV_INFO(dev); + if (dhd) { + dhdp = &dhd->pub; + } + + if (!dhd || !dhdp) { + return 0; + } + + reason = bcm_strtoul(string_num, NULL, 0); + DHD_INFO(("%s: Enter, reason=0x%x\n", __FUNCTION__, reason)); + + if ((reason <= HANG_REASON_MASK) || (reason >= HANG_REASON_MAX)) { + reason = 0; + } + + dhdp->hang_reason = reason; + + return net_os_send_hang_message(dev); +} +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) && OEM_ANDROID */ + +int dhd_net_wifi_platform_set_power(struct net_device *dev, bool on, unsigned long delay_msec) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + return wifi_platform_set_power(dhd->adapter, on, delay_msec); +} + +bool dhd_force_country_change(struct net_device *dev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + + if (dhd && dhd->pub.up) + return dhd->pub.force_country_change; + return FALSE; +} + +void dhd_get_customized_country_code(struct net_device *dev, char *country_iso_code, + wl_country_t *cspec) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); +#if defined(DHD_BLOB_EXISTENCE_CHECK) + if (!dhd->pub.is_blob) +#endif /* DHD_BLOB_EXISTENCE_CHECK */ + { +#if defined(CUSTOM_COUNTRY_CODE) + get_customized_country_code(dhd->adapter, country_iso_code, cspec, + dhd->pub.dhd_cflags); +#else + get_customized_country_code(dhd->adapter, country_iso_code, cspec); +#endif /* CUSTOM_COUNTRY_CODE */ + } +#if defined(DHD_BLOB_EXISTENCE_CHECK) && !defined(CUSTOM_COUNTRY_CODE) + else { + /* Replace the ccode to XZ if ccode is undefined country */ + if (strncmp(country_iso_code, "", WLC_CNTRY_BUF_SZ) == 0) { + strlcpy(country_iso_code, "XZ", WLC_CNTRY_BUF_SZ); + strlcpy(cspec->country_abbrev, country_iso_code, WLC_CNTRY_BUF_SZ); + strlcpy(cspec->ccode, country_iso_code, WLC_CNTRY_BUF_SZ); + DHD_ERROR(("%s: ccode change to %s\n", __FUNCTION__, country_iso_code)); + } + } +#endif /* DHD_BLOB_EXISTENCE_CHECK && !CUSTOM_COUNTRY_CODE */ + + BCM_REFERENCE(dhd); +} + +void dhd_bus_country_set(struct net_device *dev, wl_country_t *cspec, bool notify) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); +#ifdef WL_CFG80211 + struct bcm_cfg80211 *cfg = wl_get_cfg(dev); +#endif // endif + + if (dhd && dhd->pub.up) { + memcpy(&dhd->pub.dhd_cspec, cspec, sizeof(wl_country_t)); +#ifdef WL_CFG80211 + wl_update_wiphybands(cfg, notify); +#endif // endif + } +} + +void dhd_bus_band_set(struct net_device *dev, uint band) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); +#ifdef WL_CFG80211 + struct bcm_cfg80211 *cfg = wl_get_cfg(dev); +#endif // endif + if (dhd && dhd->pub.up) { +#ifdef WL_CFG80211 + wl_update_wiphybands(cfg, true); +#endif // endif + } +} + +int dhd_net_set_fw_path(struct net_device *dev, char *fw) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + + if (!fw || fw[0] == '\0') + return -EINVAL; + + strncpy(dhd->fw_path, fw, sizeof(dhd->fw_path) - 1); + dhd->fw_path[sizeof(dhd->fw_path)-1] = '\0'; + +#if defined(SOFTAP) + if (strstr(fw, "apsta") != NULL) { + DHD_INFO(("GOT APSTA FIRMWARE\n")); + ap_fw_loaded = TRUE; + } else { + DHD_INFO(("GOT STA FIRMWARE\n")); + ap_fw_loaded = FALSE; + } +#endif // endif + return 0; +} + +void dhd_net_if_lock(struct net_device *dev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + dhd_net_if_lock_local(dhd); +} + +void dhd_net_if_unlock(struct net_device *dev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + dhd_net_if_unlock_local(dhd); +} + +static void dhd_net_if_lock_local(dhd_info_t *dhd) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) + if (dhd) + mutex_lock(&dhd->dhd_net_if_mutex); +#endif // endif +} + +static void dhd_net_if_unlock_local(dhd_info_t *dhd) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) + if (dhd) + mutex_unlock(&dhd->dhd_net_if_mutex); +#endif // endif +} + +static void dhd_suspend_lock(dhd_pub_t *pub) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + if (dhd) + mutex_lock(&dhd->dhd_suspend_mutex); +#endif // endif +} + +static void dhd_suspend_unlock(dhd_pub_t *pub) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + if (dhd) + mutex_unlock(&dhd->dhd_suspend_mutex); +#endif // endif +} + +unsigned long dhd_os_general_spin_lock(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + unsigned long flags = 0; + + if (dhd) + spin_lock_irqsave(&dhd->dhd_lock, flags); + + return flags; +} + +void dhd_os_general_spin_unlock(dhd_pub_t *pub, unsigned long flags) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + + if (dhd) + spin_unlock_irqrestore(&dhd->dhd_lock, flags); +} + +/* Linux specific multipurpose spinlock API */ +void * +dhd_os_spin_lock_init(osl_t *osh) +{ + /* Adding 4 bytes since the sizeof(spinlock_t) could be 0 */ + /* if CONFIG_SMP and CONFIG_DEBUG_SPINLOCK are not defined */ + /* and this results in kernel asserts in internal builds */ + spinlock_t * lock = MALLOC(osh, sizeof(spinlock_t) + 4); + if (lock) + spin_lock_init(lock); + return ((void *)lock); +} +void +dhd_os_spin_lock_deinit(osl_t *osh, void *lock) +{ + if (lock) + MFREE(osh, lock, sizeof(spinlock_t) + 4); +} +unsigned long +dhd_os_spin_lock(void *lock) +{ + unsigned long flags = 0; + + if (lock) + spin_lock_irqsave((spinlock_t *)lock, flags); + + return flags; +} +void +dhd_os_spin_unlock(void *lock, unsigned long flags) +{ + if (lock) + spin_unlock_irqrestore((spinlock_t *)lock, flags); +} + +void * +dhd_os_dbgring_lock_init(osl_t *osh) +{ + struct mutex *mtx = NULL; + + mtx = MALLOCZ(osh, sizeof(*mtx)); + if (mtx) + mutex_init(mtx); + + return mtx; +} + +void +dhd_os_dbgring_lock_deinit(osl_t *osh, void *mtx) +{ + if (mtx) { + mutex_destroy(mtx); + MFREE(osh, mtx, sizeof(struct mutex)); + } +} + +static int +dhd_get_pend_8021x_cnt(dhd_info_t *dhd) +{ + return (atomic_read(&dhd->pend_8021x_cnt)); +} + +#define MAX_WAIT_FOR_8021X_TX 100 + +int +dhd_wait_pend8021x(struct net_device *dev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + int timeout = msecs_to_jiffies(10); + int ntimes = MAX_WAIT_FOR_8021X_TX; + int pend = dhd_get_pend_8021x_cnt(dhd); + + while (ntimes && pend) { + if (pend) { + set_current_state(TASK_INTERRUPTIBLE); + DHD_PERIM_UNLOCK(&dhd->pub); + schedule_timeout(timeout); + DHD_PERIM_LOCK(&dhd->pub); + set_current_state(TASK_RUNNING); + ntimes--; + } + pend = dhd_get_pend_8021x_cnt(dhd); + } + if (ntimes == 0) + { + atomic_set(&dhd->pend_8021x_cnt, 0); + DHD_ERROR(("%s: TIMEOUT\n", __FUNCTION__)); + } + return pend; +} + +#if defined(DHD_DEBUG) +int write_file(const char * file_name, uint32 flags, uint8 *buf, int size) +{ + int ret = 0; + struct file *fp = NULL; + mm_segment_t old_fs; + loff_t pos = 0; + /* change to KERNEL_DS address limit */ + old_fs = get_fs(); + set_fs(KERNEL_DS); + + /* open file to write */ + fp = filp_open(file_name, flags, 0664); + if (IS_ERR(fp)) { + DHD_ERROR(("open file error, err = %ld\n", PTR_ERR(fp))); + goto exit; + } + + /* Write buf to file */ + ret = vfs_write(fp, buf, size, &pos); + if (ret < 0) { + DHD_ERROR(("write file error, err = %d\n", ret)); + goto exit; + } + + /* Sync file from filesystem to physical media */ + ret = vfs_fsync(fp, 0); + if (ret < 0) { + DHD_ERROR(("sync file error, error = %d\n", ret)); + goto exit; + } + ret = BCME_OK; + +exit: + /* close file before return */ + if (!IS_ERR(fp)) + filp_close(fp, current->files); + + /* restore previous address limit */ + set_fs(old_fs); + + return ret; +} +#endif // endif + +#ifdef DHD_DEBUG +static void +dhd_convert_memdump_type_to_str(uint32 type, char *buf, int substr_type) +{ + char *type_str = NULL; + + switch (type) { + case DUMP_TYPE_RESUMED_ON_TIMEOUT: + type_str = "resumed_on_timeout"; + break; + case DUMP_TYPE_D3_ACK_TIMEOUT: + type_str = "D3_ACK_timeout"; + break; + case DUMP_TYPE_DONGLE_TRAP: + type_str = "Dongle_Trap"; + break; + case DUMP_TYPE_MEMORY_CORRUPTION: + type_str = "Memory_Corruption"; + break; + case DUMP_TYPE_PKTID_AUDIT_FAILURE: + type_str = "PKTID_AUDIT_Fail"; + break; + case DUMP_TYPE_PKTID_INVALID: + type_str = "PKTID_INVALID"; + break; + case DUMP_TYPE_SCAN_TIMEOUT: + type_str = "SCAN_timeout"; + break; + case DUMP_TYPE_SCAN_BUSY: + type_str = "SCAN_Busy"; + break; + case DUMP_TYPE_BY_SYSDUMP: + if (substr_type == CMD_UNWANTED) { + type_str = "BY_SYSDUMP_FORUSER_unwanted"; + } else if (substr_type == CMD_DISCONNECTED) { + type_str = "BY_SYSDUMP_FORUSER_disconnected"; + } else { + type_str = "BY_SYSDUMP_FORUSER"; + } + break; + case DUMP_TYPE_BY_LIVELOCK: + type_str = "BY_LIVELOCK"; + break; + case DUMP_TYPE_AP_LINKUP_FAILURE: + type_str = "BY_AP_LINK_FAILURE"; + break; + case DUMP_TYPE_AP_ABNORMAL_ACCESS: + type_str = "INVALID_ACCESS"; + break; + case DUMP_TYPE_RESUMED_ON_TIMEOUT_RX: + type_str = "ERROR_RX_TIMED_OUT"; + break; + case DUMP_TYPE_RESUMED_ON_TIMEOUT_TX: + type_str = "ERROR_TX_TIMED_OUT"; + break; + case DUMP_TYPE_CFG_VENDOR_TRIGGERED: + type_str = "CFG_VENDOR_TRIGGERED"; + break; + case DUMP_TYPE_RESUMED_ON_INVALID_RING_RDWR: + type_str = "BY_INVALID_RING_RDWR"; + break; + case DUMP_TYPE_IFACE_OP_FAILURE: + type_str = "BY_IFACE_OP_FAILURE"; + break; + case DUMP_TYPE_TRANS_ID_MISMATCH: + type_str = "BY_TRANS_ID_MISMATCH"; + break; +#ifdef DEBUG_DNGL_INIT_FAIL + case DUMP_TYPE_DONGLE_INIT_FAILURE: + type_str = "DONGLE_INIT_FAIL"; + break; +#endif /* DEBUG_DNGL_INIT_FAIL */ + case DUMP_TYPE_DONGLE_HOST_EVENT: + type_str = "BY_DONGLE_HOST_EVENT"; + break; + case DUMP_TYPE_SMMU_FAULT: + type_str = "SMMU_FAULT"; + break; + case DUMP_TYPE_BY_USER: + type_str = "BY_USER"; + break; +#ifdef DHD_ERPOM + case DUMP_TYPE_DUE_TO_BT: + type_str = "DUE_TO_BT"; + break; +#endif /* DHD_ERPOM */ + case DUMP_TYPE_LOGSET_BEYOND_RANGE: + type_str = "LOGSET_BEYOND_RANGE"; + break; + default: + type_str = "Unknown_type"; + break; + } + + strncpy(buf, type_str, strlen(type_str)); + buf[strlen(type_str)] = 0; +} + +int +write_dump_to_file(dhd_pub_t *dhd, uint8 *buf, int size, char *fname) +{ + int ret = 0; + char memdump_path[128]; + char memdump_type[32]; + struct timeval curtime; + uint32 file_mode; + + /* Init file name */ + memset(memdump_path, 0, sizeof(memdump_path)); + memset(memdump_type, 0, sizeof(memdump_type)); + do_gettimeofday(&curtime); + dhd_convert_memdump_type_to_str(dhd->memdump_type, memdump_type, dhd->debug_dump_subcmd); + snprintf(memdump_path, sizeof(memdump_path), "%s%s_%s_%ld.%ld", + "/data/misc/wifi/", fname, memdump_type, + (unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_usec); + file_mode = O_CREAT | O_WRONLY | O_SYNC; + + /* print SOCRAM dump file path */ + DHD_ERROR(("%s: file_path = %s\n", __FUNCTION__, memdump_path)); + +#ifdef DHD_LOG_DUMP + dhd_print_buf_addr(dhd, "write_dump_to_file", buf, size); +#endif /* DHD_LOG_DUMP */ + + /* Write file */ + ret = write_file(memdump_path, file_mode, buf, size); + +#ifdef DHD_DUMP_MNGR + if (ret == BCME_OK) { + dhd_dump_file_manage_enqueue(dhd, memdump_path, fname); + } +#endif /* DHD_DUMP_MNGR */ + + return ret; +} +#endif /* DHD_DEBUG */ + +int dhd_os_wake_lock_timeout(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + unsigned long flags; + int ret = 0; + + if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) { + spin_lock_irqsave(&dhd->wakelock_spinlock, flags); + ret = dhd->wakelock_rx_timeout_enable > dhd->wakelock_ctrl_timeout_enable ? + dhd->wakelock_rx_timeout_enable : dhd->wakelock_ctrl_timeout_enable; +#ifdef CONFIG_HAS_WAKELOCK + if (dhd->wakelock_rx_timeout_enable) + wake_lock_timeout(&dhd->wl_rxwake, + msecs_to_jiffies(dhd->wakelock_rx_timeout_enable)); + if (dhd->wakelock_ctrl_timeout_enable) + wake_lock_timeout(&dhd->wl_ctrlwake, + msecs_to_jiffies(dhd->wakelock_ctrl_timeout_enable)); +#endif // endif + dhd->wakelock_rx_timeout_enable = 0; + dhd->wakelock_ctrl_timeout_enable = 0; + spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); + } + return ret; +} + +int net_os_wake_lock_timeout(struct net_device *dev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + int ret = 0; + + if (dhd) + ret = dhd_os_wake_lock_timeout(&dhd->pub); + return ret; +} + +int dhd_os_wake_lock_rx_timeout_enable(dhd_pub_t *pub, int val) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + unsigned long flags; + + if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) { + spin_lock_irqsave(&dhd->wakelock_spinlock, flags); + if (val > dhd->wakelock_rx_timeout_enable) + dhd->wakelock_rx_timeout_enable = val; + spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); + } + return 0; +} + +int dhd_os_wake_lock_ctrl_timeout_enable(dhd_pub_t *pub, int val) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + unsigned long flags; + + if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) { + spin_lock_irqsave(&dhd->wakelock_spinlock, flags); + if (val > dhd->wakelock_ctrl_timeout_enable) + dhd->wakelock_ctrl_timeout_enable = val; + spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); + } + return 0; +} + +int dhd_os_wake_lock_ctrl_timeout_cancel(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + unsigned long flags; + + if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) { + spin_lock_irqsave(&dhd->wakelock_spinlock, flags); + dhd->wakelock_ctrl_timeout_enable = 0; +#ifdef CONFIG_HAS_WAKELOCK + if (wake_lock_active(&dhd->wl_ctrlwake)) + wake_unlock(&dhd->wl_ctrlwake); +#endif // endif + spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); + } + return 0; +} + +int net_os_wake_lock_rx_timeout_enable(struct net_device *dev, int val) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + int ret = 0; + + if (dhd) + ret = dhd_os_wake_lock_rx_timeout_enable(&dhd->pub, val); + return ret; +} + +int net_os_wake_lock_ctrl_timeout_enable(struct net_device *dev, int val) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + int ret = 0; + + if (dhd) + ret = dhd_os_wake_lock_ctrl_timeout_enable(&dhd->pub, val); + return ret; +} + +#if defined(DHD_TRACE_WAKE_LOCK) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) +#include +#else +#include +#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) +/* Define 2^5 = 32 bucket size hash table */ +DEFINE_HASHTABLE(wklock_history, 5); +#else +/* Define 2^5 = 32 bucket size hash table */ +struct hlist_head wklock_history[32] = { [0 ... 31] = HLIST_HEAD_INIT }; +#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */ + +int trace_wklock_onoff = 1; +typedef enum dhd_wklock_type { + DHD_WAKE_LOCK, + DHD_WAKE_UNLOCK, + DHD_WAIVE_LOCK, + DHD_RESTORE_LOCK +} dhd_wklock_t; + +struct wk_trace_record { + unsigned long addr; /* Address of the instruction */ + dhd_wklock_t lock_type; /* lock_type */ + unsigned long long counter; /* counter information */ + struct hlist_node wklock_node; /* hash node */ +}; + +static struct wk_trace_record *find_wklock_entry(unsigned long addr) +{ + struct wk_trace_record *wklock_info; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) + hash_for_each_possible(wklock_history, wklock_info, wklock_node, addr) +#else + struct hlist_node *entry; + int index = hash_long(addr, ilog2(ARRAY_SIZE(wklock_history))); + hlist_for_each_entry(wklock_info, entry, &wklock_history[index], wklock_node) +#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */ + { + if (wklock_info->addr == addr) { + return wklock_info; + } + } + return NULL; +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) +#define HASH_ADD(hashtable, node, key) \ + do { \ + hash_add(hashtable, node, key); \ + } while (0); +#else +#define HASH_ADD(hashtable, node, key) \ + do { \ + int index = hash_long(key, ilog2(ARRAY_SIZE(hashtable))); \ + hlist_add_head(node, &hashtable[index]); \ + } while (0); +#endif /* KERNEL_VER < KERNEL_VERSION(3, 7, 0) */ + +#define STORE_WKLOCK_RECORD(wklock_type) \ + do { \ + struct wk_trace_record *wklock_info = NULL; \ + unsigned long func_addr = (unsigned long)__builtin_return_address(0); \ + wklock_info = find_wklock_entry(func_addr); \ + if (wklock_info) { \ + if (wklock_type == DHD_WAIVE_LOCK || wklock_type == DHD_RESTORE_LOCK) { \ + wklock_info->counter = dhd->wakelock_counter; \ + } else { \ + wklock_info->counter++; \ + } \ + } else { \ + wklock_info = kzalloc(sizeof(*wklock_info), GFP_ATOMIC); \ + if (!wklock_info) {\ + printk("Can't allocate wk_trace_record \n"); \ + } else { \ + wklock_info->addr = func_addr; \ + wklock_info->lock_type = wklock_type; \ + if (wklock_type == DHD_WAIVE_LOCK || \ + wklock_type == DHD_RESTORE_LOCK) { \ + wklock_info->counter = dhd->wakelock_counter; \ + } else { \ + wklock_info->counter++; \ + } \ + HASH_ADD(wklock_history, &wklock_info->wklock_node, func_addr); \ + } \ + } \ + } while (0); + +static inline void dhd_wk_lock_rec_dump(void) +{ + int bkt; + struct wk_trace_record *wklock_info; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) + hash_for_each(wklock_history, bkt, wklock_info, wklock_node) +#else + struct hlist_node *entry = NULL; + int max_index = ARRAY_SIZE(wklock_history); + for (bkt = 0; bkt < max_index; bkt++) + hlist_for_each_entry(wklock_info, entry, &wklock_history[bkt], wklock_node) +#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */ + { + switch (wklock_info->lock_type) { + case DHD_WAKE_LOCK: + printk("wakelock lock : %pS lock_counter : %llu \n", + (void *)wklock_info->addr, wklock_info->counter); + break; + case DHD_WAKE_UNLOCK: + printk("wakelock unlock : %pS, unlock_counter : %llu \n", + (void *)wklock_info->addr, wklock_info->counter); + break; + case DHD_WAIVE_LOCK: + printk("wakelock waive : %pS before_waive : %llu \n", + (void *)wklock_info->addr, wklock_info->counter); + break; + case DHD_RESTORE_LOCK: + printk("wakelock restore : %pS, after_waive : %llu \n", + (void *)wklock_info->addr, wklock_info->counter); + break; + } + } +} + +static void dhd_wk_lock_trace_init(struct dhd_info *dhd) +{ + unsigned long flags; +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0)) + int i; +#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */ + + spin_lock_irqsave(&dhd->wakelock_spinlock, flags); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) + hash_init(wklock_history); +#else + for (i = 0; i < ARRAY_SIZE(wklock_history); i++) + INIT_HLIST_HEAD(&wklock_history[i]); +#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */ + spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); +} + +static void dhd_wk_lock_trace_deinit(struct dhd_info *dhd) +{ + int bkt; + struct wk_trace_record *wklock_info; + struct hlist_node *tmp; + unsigned long flags; +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0)) + struct hlist_node *entry = NULL; + int max_index = ARRAY_SIZE(wklock_history); +#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */ + + spin_lock_irqsave(&dhd->wakelock_spinlock, flags); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) + hash_for_each_safe(wklock_history, bkt, tmp, wklock_info, wklock_node) +#else + for (bkt = 0; bkt < max_index; bkt++) + hlist_for_each_entry_safe(wklock_info, entry, tmp, + &wklock_history[bkt], wklock_node) +#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0)) */ + { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) + hash_del(&wklock_info->wklock_node); +#else + hlist_del_init(&wklock_info->wklock_node); +#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0)) */ + kfree(wklock_info); + } + spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); +} + +void dhd_wk_lock_stats_dump(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd = (dhd_info_t *)(dhdp->info); + unsigned long flags; + + printk(KERN_ERR"DHD Printing wl_wake Lock/Unlock Record \r\n"); + spin_lock_irqsave(&dhd->wakelock_spinlock, flags); + dhd_wk_lock_rec_dump(); + spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); + +} +#else +#define STORE_WKLOCK_RECORD(wklock_type) +#endif /* ! DHD_TRACE_WAKE_LOCK */ + +int dhd_os_wake_lock(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + unsigned long flags; + int ret = 0; + + if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) { + spin_lock_irqsave(&dhd->wakelock_spinlock, flags); + if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) { +#ifdef CONFIG_HAS_WAKELOCK + wake_lock(&dhd->wl_wifi); +#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) + dhd_bus_dev_pm_stay_awake(pub); +#endif // endif + } +#ifdef DHD_TRACE_WAKE_LOCK + if (trace_wklock_onoff) { + STORE_WKLOCK_RECORD(DHD_WAKE_LOCK); + } +#endif /* DHD_TRACE_WAKE_LOCK */ + dhd->wakelock_counter++; + ret = dhd->wakelock_counter; + spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); + } + + return ret; +} + +void dhd_event_wake_lock(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + + if (dhd) { +#ifdef CONFIG_HAS_WAKELOCK + wake_lock(&dhd->wl_evtwake); +#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) + dhd_bus_dev_pm_stay_awake(pub); +#endif // endif + } +} + +void +dhd_pm_wake_lock_timeout(dhd_pub_t *pub, int val) +{ +#ifdef CONFIG_HAS_WAKELOCK + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + + if (dhd) { + wake_lock_timeout(&dhd->wl_pmwake, msecs_to_jiffies(val)); + } +#endif /* CONFIG_HAS_WAKE_LOCK */ +} + +void +dhd_txfl_wake_lock_timeout(dhd_pub_t *pub, int val) +{ +#ifdef CONFIG_HAS_WAKELOCK + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + + if (dhd) { + wake_lock_timeout(&dhd->wl_txflwake, msecs_to_jiffies(val)); + } +#endif /* CONFIG_HAS_WAKE_LOCK */ +} + +int net_os_wake_lock(struct net_device *dev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + int ret = 0; + + if (dhd) + ret = dhd_os_wake_lock(&dhd->pub); + return ret; +} + +int dhd_os_wake_unlock(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + unsigned long flags; + int ret = 0; + + dhd_os_wake_lock_timeout(pub); + if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) { + spin_lock_irqsave(&dhd->wakelock_spinlock, flags); + + if (dhd->wakelock_counter > 0) { + dhd->wakelock_counter--; +#ifdef DHD_TRACE_WAKE_LOCK + if (trace_wklock_onoff) { + STORE_WKLOCK_RECORD(DHD_WAKE_UNLOCK); + } +#endif /* DHD_TRACE_WAKE_LOCK */ + if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) { +#ifdef CONFIG_HAS_WAKELOCK + wake_unlock(&dhd->wl_wifi); +#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) + dhd_bus_dev_pm_relax(pub); +#endif // endif + } + ret = dhd->wakelock_counter; + } + spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); + } + return ret; +} + +void dhd_event_wake_unlock(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + + if (dhd) { +#ifdef CONFIG_HAS_WAKELOCK + wake_unlock(&dhd->wl_evtwake); +#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) + dhd_bus_dev_pm_relax(pub); +#endif // endif + } +} + +void dhd_pm_wake_unlock(dhd_pub_t *pub) +{ +#ifdef CONFIG_HAS_WAKELOCK + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + + if (dhd) { + /* if wl_pmwake is active, unlock it */ + if (wake_lock_active(&dhd->wl_pmwake)) { + wake_unlock(&dhd->wl_pmwake); + } + } +#endif /* CONFIG_HAS_WAKELOCK */ +} + +void dhd_txfl_wake_unlock(dhd_pub_t *pub) +{ +#ifdef CONFIG_HAS_WAKELOCK + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + + if (dhd) { + /* if wl_txflwake is active, unlock it */ + if (wake_lock_active(&dhd->wl_txflwake)) { + wake_unlock(&dhd->wl_txflwake); + } + } +#endif /* CONFIG_HAS_WAKELOCK */ +} + +int dhd_os_check_wakelock(dhd_pub_t *pub) +{ +#if defined(CONFIG_HAS_WAKELOCK) || (defined(BCMSDIO) && (LINUX_VERSION_CODE > \ + KERNEL_VERSION(2, 6, 36))) + dhd_info_t *dhd; + + if (!pub) + return 0; + dhd = (dhd_info_t *)(pub->info); +#endif /* CONFIG_HAS_WAKELOCK || BCMSDIO */ + +#ifdef CONFIG_HAS_WAKELOCK + /* Indicate to the SD Host to avoid going to suspend if internal locks are up */ + if (dhd && (wake_lock_active(&dhd->wl_wifi) || + (wake_lock_active(&dhd->wl_wdwake)))) + return 1; +#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) + if (dhd && (dhd->wakelock_counter > 0) && dhd_bus_dev_pm_enabled(pub)) + return 1; +#endif // endif + return 0; +} + +int +dhd_os_check_wakelock_all(dhd_pub_t *pub) +{ +#if defined(CONFIG_HAS_WAKELOCK) || (defined(BCMSDIO) && (LINUX_VERSION_CODE > \ + KERNEL_VERSION(2, 6, 36))) +#if defined(CONFIG_HAS_WAKELOCK) + int l1, l2, l3, l4, l7, l8, l9; + int l5 = 0, l6 = 0; + int c, lock_active; +#endif /* CONFIG_HAS_WAKELOCK */ + dhd_info_t *dhd; + + if (!pub) { + return 0; + } + dhd = (dhd_info_t *)(pub->info); + if (!dhd) { + return 0; + } +#endif /* CONFIG_HAS_WAKELOCK || BCMSDIO */ + +#ifdef CONFIG_HAS_WAKELOCK + c = dhd->wakelock_counter; + l1 = wake_lock_active(&dhd->wl_wifi); + l2 = wake_lock_active(&dhd->wl_wdwake); + l3 = wake_lock_active(&dhd->wl_rxwake); + l4 = wake_lock_active(&dhd->wl_ctrlwake); + l7 = wake_lock_active(&dhd->wl_evtwake); +#ifdef BCMPCIE_OOB_HOST_WAKE + l5 = wake_lock_active(&dhd->wl_intrwake); +#endif /* BCMPCIE_OOB_HOST_WAKE */ +#ifdef DHD_USE_SCAN_WAKELOCK + l6 = wake_lock_active(&dhd->wl_scanwake); +#endif /* DHD_USE_SCAN_WAKELOCK */ + l8 = wake_lock_active(&dhd->wl_pmwake); + l9 = wake_lock_active(&dhd->wl_txflwake); + lock_active = (l1 || l2 || l3 || l4 || l5 || l6 || l7 || l8 || l9); + + /* Indicate to the Host to avoid going to suspend if internal locks are up */ + if (lock_active) { + DHD_ERROR(("%s wakelock c-%d wl-%d wd-%d rx-%d " + "ctl-%d intr-%d scan-%d evt-%d, pm-%d, txfl-%d\n", + __FUNCTION__, c, l1, l2, l3, l4, l5, l6, l7, l8, l9)); + return 1; + } +#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) + if (dhd && (dhd->wakelock_counter > 0) && dhd_bus_dev_pm_enabled(pub)) { + return 1; + } +#endif /* defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) */ + return 0; +} + +int net_os_wake_unlock(struct net_device *dev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + int ret = 0; + + if (dhd) + ret = dhd_os_wake_unlock(&dhd->pub); + return ret; +} + +int dhd_os_wd_wake_lock(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + unsigned long flags; + int ret = 0; + + if (dhd) { + spin_lock_irqsave(&dhd->wakelock_spinlock, flags); + if (dhd->wakelock_wd_counter == 0 && !dhd->waive_wakelock) { +#ifdef CONFIG_HAS_WAKELOCK + /* if wakelock_wd_counter was never used : lock it at once */ + wake_lock(&dhd->wl_wdwake); +#endif // endif + } + dhd->wakelock_wd_counter++; + ret = dhd->wakelock_wd_counter; + spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); + } + return ret; +} + +int dhd_os_wd_wake_unlock(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + unsigned long flags; + int ret = 0; + + if (dhd) { + spin_lock_irqsave(&dhd->wakelock_spinlock, flags); + if (dhd->wakelock_wd_counter > 0) { + dhd->wakelock_wd_counter = 0; + if (!dhd->waive_wakelock) { +#ifdef CONFIG_HAS_WAKELOCK + wake_unlock(&dhd->wl_wdwake); +#endif // endif + } + } + spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); + } + return ret; +} + +#ifdef BCMPCIE_OOB_HOST_WAKE +void +dhd_os_oob_irq_wake_lock_timeout(dhd_pub_t *pub, int val) +{ +#ifdef CONFIG_HAS_WAKELOCK + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + + if (dhd) { + wake_lock_timeout(&dhd->wl_intrwake, msecs_to_jiffies(val)); + } +#endif /* CONFIG_HAS_WAKELOCK */ +} + +void +dhd_os_oob_irq_wake_unlock(dhd_pub_t *pub) +{ +#ifdef CONFIG_HAS_WAKELOCK + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + + if (dhd) { + /* if wl_intrwake is active, unlock it */ + if (wake_lock_active(&dhd->wl_intrwake)) { + wake_unlock(&dhd->wl_intrwake); + } + } +#endif /* CONFIG_HAS_WAKELOCK */ +} +#endif /* BCMPCIE_OOB_HOST_WAKE */ + +#ifdef DHD_USE_SCAN_WAKELOCK +void +dhd_os_scan_wake_lock_timeout(dhd_pub_t *pub, int val) +{ +#ifdef CONFIG_HAS_WAKELOCK + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + + if (dhd) { + wake_lock_timeout(&dhd->wl_scanwake, msecs_to_jiffies(val)); + } +#endif /* CONFIG_HAS_WAKELOCK */ +} + +void +dhd_os_scan_wake_unlock(dhd_pub_t *pub) +{ +#ifdef CONFIG_HAS_WAKELOCK + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + + if (dhd) { + /* if wl_scanwake is active, unlock it */ + if (wake_lock_active(&dhd->wl_scanwake)) { + wake_unlock(&dhd->wl_scanwake); + } + } +#endif /* CONFIG_HAS_WAKELOCK */ +} +#endif /* DHD_USE_SCAN_WAKELOCK */ + +/* waive wakelocks for operations such as IOVARs in suspend function, must be closed + * by a paired function call to dhd_wakelock_restore. returns current wakelock counter + */ +int dhd_os_wake_lock_waive(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + unsigned long flags; + int ret = 0; + + if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) { + spin_lock_irqsave(&dhd->wakelock_spinlock, flags); + + /* dhd_wakelock_waive/dhd_wakelock_restore must be paired */ + if (dhd->waive_wakelock == FALSE) { +#ifdef DHD_TRACE_WAKE_LOCK + if (trace_wklock_onoff) { + STORE_WKLOCK_RECORD(DHD_WAIVE_LOCK); + } +#endif /* DHD_TRACE_WAKE_LOCK */ + /* record current lock status */ + dhd->wakelock_before_waive = dhd->wakelock_counter; + dhd->waive_wakelock = TRUE; + } + ret = dhd->wakelock_wd_counter; + spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); + } + return ret; +} + +int dhd_os_wake_lock_restore(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + unsigned long flags; + int ret = 0; + + if (!dhd) + return 0; + if ((dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) == 0) + return 0; + + spin_lock_irqsave(&dhd->wakelock_spinlock, flags); + + /* dhd_wakelock_waive/dhd_wakelock_restore must be paired */ + if (!dhd->waive_wakelock) + goto exit; + + dhd->waive_wakelock = FALSE; + /* if somebody else acquires wakelock between dhd_wakelock_waive/dhd_wakelock_restore, + * we need to make it up by calling wake_lock or pm_stay_awake. or if somebody releases + * the lock in between, do the same by calling wake_unlock or pm_relax + */ +#ifdef DHD_TRACE_WAKE_LOCK + if (trace_wklock_onoff) { + STORE_WKLOCK_RECORD(DHD_RESTORE_LOCK); + } +#endif /* DHD_TRACE_WAKE_LOCK */ + + if (dhd->wakelock_before_waive == 0 && dhd->wakelock_counter > 0) { +#ifdef CONFIG_HAS_WAKELOCK + wake_lock(&dhd->wl_wifi); +#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) + dhd_bus_dev_pm_stay_awake(&dhd->pub); +#endif // endif + } else if (dhd->wakelock_before_waive > 0 && dhd->wakelock_counter == 0) { +#ifdef CONFIG_HAS_WAKELOCK + wake_unlock(&dhd->wl_wifi); +#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) + dhd_bus_dev_pm_relax(&dhd->pub); +#endif // endif + } + dhd->wakelock_before_waive = 0; +exit: + ret = dhd->wakelock_wd_counter; + spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); + return ret; +} + +void dhd_os_wake_lock_init(struct dhd_info *dhd) +{ + DHD_TRACE(("%s: initialize wake_lock_counters\n", __FUNCTION__)); + dhd->wakelock_counter = 0; + dhd->wakelock_rx_timeout_enable = 0; + dhd->wakelock_ctrl_timeout_enable = 0; + /* wakelocks prevent a system from going into a low power state */ +#ifdef CONFIG_HAS_WAKELOCK + // terence 20161023: can not destroy wl_wifi when wlan down, it will happen null pointer in dhd_ioctl_entry + wake_lock_init(&dhd->wl_rxwake, WAKE_LOCK_SUSPEND, "wlan_rx_wake"); + wake_lock_init(&dhd->wl_ctrlwake, WAKE_LOCK_SUSPEND, "wlan_ctrl_wake"); + wake_lock_init(&dhd->wl_evtwake, WAKE_LOCK_SUSPEND, "wlan_evt_wake"); + wake_lock_init(&dhd->wl_pmwake, WAKE_LOCK_SUSPEND, "wlan_pm_wake"); + wake_lock_init(&dhd->wl_txflwake, WAKE_LOCK_SUSPEND, "wlan_txfl_wake"); +#ifdef BCMPCIE_OOB_HOST_WAKE + wake_lock_init(&dhd->wl_intrwake, WAKE_LOCK_SUSPEND, "wlan_oob_irq_wake"); +#endif /* BCMPCIE_OOB_HOST_WAKE */ +#ifdef DHD_USE_SCAN_WAKELOCK + wake_lock_init(&dhd->wl_scanwake, WAKE_LOCK_SUSPEND, "wlan_scan_wake"); +#endif /* DHD_USE_SCAN_WAKELOCK */ +#endif /* CONFIG_HAS_WAKELOCK */ +#ifdef DHD_TRACE_WAKE_LOCK + dhd_wk_lock_trace_init(dhd); +#endif /* DHD_TRACE_WAKE_LOCK */ +} + +void dhd_os_wake_lock_destroy(struct dhd_info *dhd) +{ + DHD_TRACE(("%s: deinit wake_lock_counters\n", __FUNCTION__)); +#ifdef CONFIG_HAS_WAKELOCK + dhd->wakelock_counter = 0; + dhd->wakelock_rx_timeout_enable = 0; + dhd->wakelock_ctrl_timeout_enable = 0; + // terence 20161023: can not destroy wl_wifi when wlan down, it will happen null pointer in dhd_ioctl_entry + wake_lock_destroy(&dhd->wl_rxwake); + wake_lock_destroy(&dhd->wl_ctrlwake); + wake_lock_destroy(&dhd->wl_evtwake); + wake_lock_destroy(&dhd->wl_pmwake); + wake_lock_destroy(&dhd->wl_txflwake); +#ifdef BCMPCIE_OOB_HOST_WAKE + wake_lock_destroy(&dhd->wl_intrwake); +#endif /* BCMPCIE_OOB_HOST_WAKE */ +#ifdef DHD_USE_SCAN_WAKELOCK + wake_lock_destroy(&dhd->wl_scanwake); +#endif /* DHD_USE_SCAN_WAKELOCK */ +#ifdef DHD_TRACE_WAKE_LOCK + dhd_wk_lock_trace_deinit(dhd); +#endif /* DHD_TRACE_WAKE_LOCK */ +#endif /* CONFIG_HAS_WAKELOCK */ +} + +bool dhd_os_check_if_up(dhd_pub_t *pub) +{ + if (!pub) + return FALSE; + return pub->up; +} + +/* function to collect firmware, chip id and chip version info */ +void dhd_set_version_info(dhd_pub_t *dhdp, char *fw) +{ + int i; + + i = snprintf(info_string, sizeof(info_string), + " Driver: %s\n Firmware: %s\n CLM: %s ", EPI_VERSION_STR, fw, clm_version); + printf("%s\n", info_string); + + if (!dhdp) + return; + + i = snprintf(&info_string[i], sizeof(info_string) - i, + "\n Chip: %x Rev %x", dhd_conf_get_chip(dhdp), + dhd_conf_get_chiprev(dhdp)); +} + +int dhd_ioctl_entry_local(struct net_device *net, wl_ioctl_t *ioc, int cmd) +{ + int ifidx; + int ret = 0; + dhd_info_t *dhd = NULL; + + if (!net || !DEV_PRIV(net)) { + DHD_ERROR(("%s invalid parameter net %p dev_priv %p\n", + __FUNCTION__, net, DEV_PRIV(net))); + return -EINVAL; + } + + dhd = DHD_DEV_INFO(net); + if (!dhd) + return -EINVAL; + + ifidx = dhd_net2idx(dhd, net); + if (ifidx == DHD_BAD_IF) { + DHD_ERROR(("%s bad ifidx\n", __FUNCTION__)); + return -ENODEV; + } + + DHD_OS_WAKE_LOCK(&dhd->pub); + DHD_PERIM_LOCK(&dhd->pub); + + ret = dhd_wl_ioctl(&dhd->pub, ifidx, ioc, ioc->buf, ioc->len); + dhd_check_hang(net, &dhd->pub, ret); + + DHD_PERIM_UNLOCK(&dhd->pub); + DHD_OS_WAKE_UNLOCK(&dhd->pub); + + return ret; +} + +bool dhd_os_check_hang(dhd_pub_t *dhdp, int ifidx, int ret) +{ + struct net_device *net; + + net = dhd_idx2net(dhdp, ifidx); + if (!net) { + DHD_ERROR(("%s : Invalid index : %d\n", __FUNCTION__, ifidx)); + return -EINVAL; + } + + return dhd_check_hang(net, dhdp, ret); +} + +/* Return instance */ +int dhd_get_instance(dhd_pub_t *dhdp) +{ + return dhdp->info->unit; +} + +#if defined(WL_CFG80211) && defined(SUPPORT_DEEP_SLEEP) +#define MAX_TRY_CNT 5 /* Number of tries to disable deepsleep */ +int dhd_deepsleep(struct net_device *dev, int flag) +{ + char iovbuf[20]; + uint powervar = 0; + dhd_info_t *dhd; + dhd_pub_t *dhdp; + int cnt = 0; + int ret = 0; + + dhd = DHD_DEV_INFO(dev); + dhdp = &dhd->pub; + + switch (flag) { + case 1 : /* Deepsleep on */ + DHD_ERROR(("[WiFi] Deepsleep On\n")); + /* give some time to sysioc_work before deepsleep */ + OSL_SLEEP(200); +#ifdef PKT_FILTER_SUPPORT + /* disable pkt filter */ + dhd_enable_packet_filter(0, dhdp); +#endif /* PKT_FILTER_SUPPORT */ + /* Disable MPC */ + powervar = 0; + ret = dhd_iovar(dhdp, 0, "mpc", (char *)&powervar, sizeof(powervar), NULL, + 0, TRUE); + + /* Enable Deepsleep */ + powervar = 1; + ret = dhd_iovar(dhdp, 0, "deepsleep", (char *)&powervar, sizeof(powervar), + NULL, 0, TRUE); + break; + + case 0: /* Deepsleep Off */ + DHD_ERROR(("[WiFi] Deepsleep Off\n")); + + /* Disable Deepsleep */ + for (cnt = 0; cnt < MAX_TRY_CNT; cnt++) { + powervar = 0; + ret = dhd_iovar(dhdp, 0, "deepsleep", (char *)&powervar, + sizeof(powervar), NULL, 0, TRUE); + + ret = dhd_iovar(dhdp, 0, "deepsleep", (char *)&powervar, + sizeof(powervar), iovbuf, sizeof(iovbuf), FALSE); + if (ret < 0) { + DHD_ERROR(("the error of dhd deepsleep status" + " ret value :%d\n", ret)); + } else { + if (!(*(int *)iovbuf)) { + DHD_ERROR(("deepsleep mode is 0," + " count: %d\n", cnt)); + break; + } + } + } + + /* Enable MPC */ + powervar = 1; + ret = dhd_iovar(dhdp, 0, "mpc", (char *)&powervar, sizeof(powervar), NULL, + 0, TRUE); + break; + } + + return 0; +} +#endif /* WL_CFG80211 && SUPPORT_DEEP_SLEEP */ + +#ifdef PROP_TXSTATUS + +void dhd_wlfc_plat_init(void *dhd) +{ +#ifdef USE_DYNAMIC_F2_BLKSIZE + dhdsdio_func_blocksize((dhd_pub_t *)dhd, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY); +#endif /* USE_DYNAMIC_F2_BLKSIZE */ + return; +} + +void dhd_wlfc_plat_deinit(void *dhd) +{ +#ifdef USE_DYNAMIC_F2_BLKSIZE + dhdsdio_func_blocksize((dhd_pub_t *)dhd, 2, sd_f2_blocksize); +#endif /* USE_DYNAMIC_F2_BLKSIZE */ + return; +} + +bool dhd_wlfc_skip_fc(void * dhdp, uint8 idx) +{ +#ifdef SKIP_WLFC_ON_CONCURRENT + +#ifdef WL_CFG80211 + struct net_device * net = dhd_idx2net((dhd_pub_t *)dhdp, idx); + if (net) + /* enable flow control in vsdb mode */ + return !(wl_cfg80211_is_concurrent_mode(net)); +#else + return TRUE; /* skip flow control */ +#endif /* WL_CFG80211 */ + +#else + return FALSE; +#endif /* SKIP_WLFC_ON_CONCURRENT */ + return FALSE; +} +#endif /* PROP_TXSTATUS */ + +#ifdef BCMDBGFS +#include + +typedef struct dhd_dbgfs { + struct dentry *debugfs_dir; + struct dentry *debugfs_mem; + dhd_pub_t *dhdp; + uint32 size; +} dhd_dbgfs_t; + +dhd_dbgfs_t g_dbgfs; + +extern uint32 dhd_readregl(void *bp, uint32 addr); +extern uint32 dhd_writeregl(void *bp, uint32 addr, uint32 data); + +static int +dhd_dbg_state_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +static ssize_t +dhd_dbg_state_read(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + ssize_t rval; + uint32 tmp; + loff_t pos = *ppos; + size_t ret; + + if (pos < 0) + return -EINVAL; + if (pos >= g_dbgfs.size || !count) + return 0; + if (count > g_dbgfs.size - pos) + count = g_dbgfs.size - pos; + + /* Basically enforce aligned 4 byte reads. It's up to the user to work out the details */ + tmp = dhd_readregl(g_dbgfs.dhdp->bus, file->f_pos & (~3)); + + ret = copy_to_user(ubuf, &tmp, 4); + if (ret == count) + return -EFAULT; + + count -= ret; + *ppos = pos + count; + rval = count; + + return rval; +} + +static ssize_t +dhd_debugfs_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos) +{ + loff_t pos = *ppos; + size_t ret; + uint32 buf; + + if (pos < 0) + return -EINVAL; + if (pos >= g_dbgfs.size || !count) + return 0; + if (count > g_dbgfs.size - pos) + count = g_dbgfs.size - pos; + + ret = copy_from_user(&buf, ubuf, sizeof(uint32)); + if (ret == count) + return -EFAULT; + + /* Basically enforce aligned 4 byte writes. It's up to the user to work out the details */ + dhd_writeregl(g_dbgfs.dhdp->bus, file->f_pos & (~3), buf); + + return count; +} + +loff_t +dhd_debugfs_lseek(struct file *file, loff_t off, int whence) +{ + loff_t pos = -1; + + switch (whence) { + case 0: + pos = off; + break; + case 1: + pos = file->f_pos + off; + break; + case 2: + pos = g_dbgfs.size - off; + } + return (pos < 0 || pos > g_dbgfs.size) ? -EINVAL : (file->f_pos = pos); +} + +static const struct file_operations dhd_dbg_state_ops = { + .read = dhd_dbg_state_read, + .write = dhd_debugfs_write, + .open = dhd_dbg_state_open, + .llseek = dhd_debugfs_lseek +}; + +static void dhd_dbgfs_create(void) +{ + if (g_dbgfs.debugfs_dir) { + g_dbgfs.debugfs_mem = debugfs_create_file("mem", 0644, g_dbgfs.debugfs_dir, + NULL, &dhd_dbg_state_ops); + } +} + +void dhd_dbgfs_init(dhd_pub_t *dhdp) +{ + g_dbgfs.dhdp = dhdp; + g_dbgfs.size = 0x20000000; /* Allow access to various cores regs */ + + g_dbgfs.debugfs_dir = debugfs_create_dir("dhd", 0); + if (IS_ERR(g_dbgfs.debugfs_dir)) { + g_dbgfs.debugfs_dir = NULL; + return; + } + + dhd_dbgfs_create(); + + return; +} + +void dhd_dbgfs_remove(void) +{ + debugfs_remove(g_dbgfs.debugfs_mem); + debugfs_remove(g_dbgfs.debugfs_dir); + + bzero((unsigned char *) &g_dbgfs, sizeof(g_dbgfs)); +} +#endif /* BCMDBGFS */ + +#ifdef CUSTOM_SET_CPUCORE +void dhd_set_cpucore(dhd_pub_t *dhd, int set) +{ + int e_dpc = 0, e_rxf = 0, retry_set = 0; + + if (!(dhd->chan_isvht80)) { + DHD_ERROR(("%s: chan_status(%d) cpucore!!!\n", __FUNCTION__, dhd->chan_isvht80)); + return; + } + + if (DPC_CPUCORE) { + do { + if (set == TRUE) { + e_dpc = set_cpus_allowed_ptr(dhd->current_dpc, + cpumask_of(DPC_CPUCORE)); + } else { + e_dpc = set_cpus_allowed_ptr(dhd->current_dpc, + cpumask_of(PRIMARY_CPUCORE)); + } + if (retry_set++ > MAX_RETRY_SET_CPUCORE) { + DHD_ERROR(("%s: dpc(%d) invalid cpu!\n", __FUNCTION__, e_dpc)); + return; + } + if (e_dpc < 0) + OSL_SLEEP(1); + } while (e_dpc < 0); + } + if (RXF_CPUCORE) { + do { + if (set == TRUE) { + e_rxf = set_cpus_allowed_ptr(dhd->current_rxf, + cpumask_of(RXF_CPUCORE)); + } else { + e_rxf = set_cpus_allowed_ptr(dhd->current_rxf, + cpumask_of(PRIMARY_CPUCORE)); + } + if (retry_set++ > MAX_RETRY_SET_CPUCORE) { + DHD_ERROR(("%s: rxf(%d) invalid cpu!\n", __FUNCTION__, e_rxf)); + return; + } + if (e_rxf < 0) + OSL_SLEEP(1); + } while (e_rxf < 0); + } +#ifdef DHD_OF_SUPPORT + interrupt_set_cpucore(set, DPC_CPUCORE, PRIMARY_CPUCORE); +#endif /* DHD_OF_SUPPORT */ + DHD_TRACE(("%s: set(%d) cpucore success!\n", __FUNCTION__, set)); + + return; +} +#endif /* CUSTOM_SET_CPUCORE */ + +#ifdef DHD_MCAST_REGEN +/* Get interface specific ap_isolate configuration */ +int dhd_get_mcast_regen_bss_enable(dhd_pub_t *dhdp, uint32 idx) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + + ASSERT(idx < DHD_MAX_IFS); + + ifp = dhd->iflist[idx]; + + return ifp->mcast_regen_bss_enable; +} + +/* Set interface specific mcast_regen configuration */ +int dhd_set_mcast_regen_bss_enable(dhd_pub_t *dhdp, uint32 idx, int val) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + + ASSERT(idx < DHD_MAX_IFS); + + ifp = dhd->iflist[idx]; + + ifp->mcast_regen_bss_enable = val; + + /* Disable rx_pkt_chain feature for interface, if mcast_regen feature + * is enabled + */ + dhd_update_rx_pkt_chainable_state(dhdp, idx); + return BCME_OK; +} +#endif /* DHD_MCAST_REGEN */ + +/* Get interface specific ap_isolate configuration */ +int dhd_get_ap_isolate(dhd_pub_t *dhdp, uint32 idx) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + + ASSERT(idx < DHD_MAX_IFS); + + ifp = dhd->iflist[idx]; + + return ifp->ap_isolate; +} + +/* Set interface specific ap_isolate configuration */ +int dhd_set_ap_isolate(dhd_pub_t *dhdp, uint32 idx, int val) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + + ASSERT(idx < DHD_MAX_IFS); + + ifp = dhd->iflist[idx]; + + if (ifp) + ifp->ap_isolate = val; + + return 0; +} + +#ifdef DHD_FW_COREDUMP +#if defined(CONFIG_X86) +#define MEMDUMPINFO_LIVE "/installmedia/.memdump.info" +#define MEMDUMPINFO_INST "/data/.memdump.info" +#endif /* CONFIG_X86 && OEM_ANDROID */ + +#define MEMDUMPINFO "/data/misc/wifi/.memdump.info" + +void dhd_get_memdump_info(dhd_pub_t *dhd) +{ + struct file *fp = NULL; + uint32 mem_val = DUMP_MEMFILE_MAX; + int ret = 0; + char *filepath = MEMDUMPINFO; + + /* Read memdump info from the file */ + fp = filp_open(filepath, O_RDONLY, 0); + if (IS_ERR(fp)) { + DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath)); +#if defined(CONFIG_X86) + /* Check if it is Live Brix Image */ + if (strcmp(filepath, MEMDUMPINFO_LIVE) != 0) { + goto done; + } + /* Try if it is Installed Brix Image */ + filepath = MEMDUMPINFO_INST; + DHD_ERROR(("%s: Try File [%s]\n", __FUNCTION__, filepath)); + fp = filp_open(filepath, O_RDONLY, 0); + if (IS_ERR(fp)) { + DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath)); + goto done; + } +#else /* Non Brix Android platform */ + goto done; +#endif /* CONFIG_X86 && OEM_ANDROID */ + } + + /* Handle success case */ + ret = compat_kernel_read(fp, 0, (char *)&mem_val, 4); + if (ret < 0) { + DHD_ERROR(("%s: File read error, ret=%d\n", __FUNCTION__, ret)); + filp_close(fp, NULL); + goto done; + } + + mem_val = bcm_atoi((char *)&mem_val); + + filp_close(fp, NULL); + +#ifdef DHD_INIT_DEFAULT_MEMDUMP + if (mem_val == 0 || mem_val == DUMP_MEMFILE_MAX) + mem_val = DUMP_MEMFILE_BUGON; +#endif /* DHD_INIT_DEFAULT_MEMDUMP */ + +done: + dhd->memdump_enabled = (mem_val < DUMP_MEMFILE_MAX) ? mem_val : DUMP_MEMFILE; + + DHD_ERROR(("%s: MEMDUMP ENABLED = %d\n", __FUNCTION__, dhd->memdump_enabled)); +} + +void dhd_schedule_memdump(dhd_pub_t *dhdp, uint8 *buf, uint32 size) +{ + unsigned long flags = 0; + dhd_dump_t *dump = NULL; + dhd_info_t *dhd_info = NULL; + dhd_info = (dhd_info_t *)dhdp->info; + dump = (dhd_dump_t *)MALLOC(dhdp->osh, sizeof(dhd_dump_t)); + if (dump == NULL) { + DHD_ERROR(("%s: dhd dump memory allocation failed\n", __FUNCTION__)); + return; + } + dump->buf = buf; + dump->bufsize = size; +#ifdef BCMPCIE + if (dhdp->hscb_enable) { + dhd_get_hscb_info(dhdp->prot, (void*)(&dump->hscb_buf), + (uint32 *)(&dump->hscb_bufsize)); + } + else +#endif /* BCMPCIE */ + { + dump->hscb_bufsize = 0; + } + +#ifdef DHD_LOG_DUMP + dhd_print_buf_addr(dhdp, "memdump", buf, size); +#endif /* DHD_LOG_DUMP */ + + if (dhdp->memdump_enabled == DUMP_MEMONLY) { + BUG_ON(1); + } + +#if defined(DEBUG_DNGL_INIT_FAIL) || defined(DHD_ERPOM) + if ( +#if defined(DEBUG_DNGL_INIT_FAIL) + (dhdp->memdump_type == DUMP_TYPE_DONGLE_INIT_FAILURE) || +#endif /* DEBUG_DNGL_INIT_FAIL */ +#ifdef DHD_ERPOM + (dhdp->memdump_type == DUMP_TYPE_DUE_TO_BT) || +#endif /* DHD_ERPOM */ + FALSE) + { +#ifdef DHD_LOG_DUMP + log_dump_type_t *flush_type = NULL; +#endif // endif + dhd_info->scheduled_memdump = FALSE; + dhd_mem_dump((void *)dhdp->info, (void *)dump, 0); + /* for dongle init fail cases, 'dhd_mem_dump' does + * not call 'dhd_log_dump', so call it here. + */ +#ifdef DHD_LOG_DUMP + flush_type = MALLOCZ(dhdp->osh, + sizeof(log_dump_type_t)); + if (flush_type) { + *flush_type = DLD_BUF_TYPE_ALL; + DHD_ERROR(("%s: calling log dump.. \n", __FUNCTION__)); + dhd_log_dump(dhdp->info, flush_type, 0); + } +#endif /* DHD_LOG_DUMP */ + return; + } +#endif /* DEBUG_DNGL_INIT_FAIL || DHD_ERPOM */ + + dhd_info->scheduled_memdump = TRUE; + /* bus busy bit for mem dump will be cleared in mem dump + * work item context, after mem dump file is written + */ + DHD_GENERAL_LOCK(dhdp, flags); + DHD_BUS_BUSY_SET_IN_MEMDUMP(dhdp); + DHD_GENERAL_UNLOCK(dhdp, flags); + DHD_ERROR(("%s: scheduling mem dump.. \n", __FUNCTION__)); + dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, (void *)dump, + DHD_WQ_WORK_SOC_RAM_DUMP, dhd_mem_dump, DHD_WQ_WORK_PRIORITY_HIGH); +} + +static void +dhd_mem_dump(void *handle, void *event_info, u8 event) +{ + dhd_info_t *dhd = handle; + dhd_pub_t *dhdp = NULL; + dhd_dump_t *dump = event_info; + unsigned long flags = 0; + + DHD_ERROR(("%s: ENTER, memdump type %u\n", __FUNCTION__, dhd->pub.memdump_type)); + + if (!dhd) { + DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__)); + return; + } + + dhdp = &dhd->pub; + + DHD_GENERAL_LOCK(dhdp, flags); + if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) { + DHD_GENERAL_UNLOCK(dhdp, flags); + DHD_ERROR(("%s: bus is down! can't collect mem dump. \n", __FUNCTION__)); + goto exit; + } + DHD_GENERAL_UNLOCK(dhdp, flags); + + if (!dump) { + DHD_ERROR(("%s: dump is NULL\n", __FUNCTION__)); + goto exit; + } + + if (write_dump_to_file(&dhd->pub, dump->buf, dump->bufsize, "mem_dump")) { + DHD_ERROR(("%s: writing SoC_RAM dump to the file failed\n", __FUNCTION__)); +#ifdef DHD_DEBUG_UART + dhd->pub.memdump_success = FALSE; +#endif /* DHD_DEBUG_UART */ + } + + /* directly call dhd_log_dump for debug_dump collection from the mem_dump work queue + * context, no need to schedule another work queue for log dump. In case of + * user initiated DEBUG_DUMP wpa_cli command (DUMP_TYPE_BY_SYSDUMP), + * cfg layer is itself scheduling the log_dump work queue. + * that path is not disturbed. If 'dhd_mem_dump' is called directly then we will not + * collect debug_dump as it may be called from non-sleepable context. + */ +#ifdef DHD_LOG_DUMP + if (dhd->scheduled_memdump && + dhdp->memdump_type != DUMP_TYPE_BY_SYSDUMP) { + log_dump_type_t *flush_type = MALLOCZ(dhdp->osh, + sizeof(log_dump_type_t)); + if (flush_type) { + *flush_type = DLD_BUF_TYPE_ALL; + DHD_ERROR(("%s: calling log dump.. \n", __FUNCTION__)); + dhd_log_dump(dhd, flush_type, 0); + } + } +#endif /* DHD_LOG_DUMP */ + + clear_debug_dump_time(dhdp->debug_dump_time_str); + + /* before calling bug on, wait for other logs to be dumped. + * we cannot wait in case dhd_mem_dump is called directly + * as it may not be in a sleepable context + */ + if (dhd->scheduled_memdump) { + uint bitmask = 0; + int timeleft = 0; +#ifdef DHD_SSSR_DUMP + bitmask |= DHD_BUS_BUSY_IN_SSSRDUMP; +#endif // endif + if (bitmask != 0) { + DHD_ERROR(("%s: wait for SSSR dump..\n", __FUNCTION__)); + timeleft = dhd_os_busbusy_wait_bitmask(dhdp, + &dhdp->dhd_bus_busy_state, bitmask, 0); + if ((timeleft == 0) || (timeleft == 1)) { + DHD_ERROR(("%s:Timed out on sssr dump,dhd_bus_busy_state=0x%x\n", + __FUNCTION__, dhdp->dhd_bus_busy_state)); + } + } + } + + if (dump->hscb_buf && dump->hscb_bufsize) { + DHD_ERROR(("%s: write HSCB dump... \n", __FUNCTION__)); + if (write_dump_to_file(&dhd->pub, dump->hscb_buf, + dump->hscb_bufsize, "mem_dump_hscb")) { + DHD_ERROR(("%s: writing HSCB dump to the file failed\n", __FUNCTION__)); +#ifdef DHD_DEBUG_UART + dhd->pub.memdump_success = FALSE; +#endif /* DHD_DEBUG_UART */ + } + } + + DHD_ERROR(("%s: memdump type %u\n", __FUNCTION__, dhd->pub.memdump_type)); + if (dhd->pub.memdump_enabled == DUMP_MEMFILE_BUGON && +#ifdef DHD_LOG_DUMP + dhd->pub.memdump_type != DUMP_TYPE_BY_SYSDUMP && +#endif /* DHD_LOG_DUMP */ + dhd->pub.memdump_type != DUMP_TYPE_BY_USER && +#ifdef DHD_DEBUG_UART + dhd->pub.memdump_success == TRUE && +#endif /* DHD_DEBUG_UART */ +#ifdef DNGL_EVENT_SUPPORT + dhd->pub.memdump_type != DUMP_TYPE_DONGLE_HOST_EVENT && +#endif /* DNGL_EVENT_SUPPORT */ + dhd->pub.memdump_type != DUMP_TYPE_CFG_VENDOR_TRIGGERED) { + +#ifdef SHOW_LOGTRACE + /* Wait till logtrace context is flushed */ + dhd_flush_logtrace_process(dhd); +#endif /* SHOW_LOGTRACE */ + + DHD_ERROR(("%s: call BUG_ON \n", __FUNCTION__)); + BUG_ON(1); + } + DHD_ERROR(("%s: No BUG ON, memdump type %u \n", __FUNCTION__, dhd->pub.memdump_type)); + +exit: + if (dump) + MFREE(dhd->pub.osh, dump, sizeof(dhd_dump_t)); + DHD_GENERAL_LOCK(dhdp, flags); + DHD_BUS_BUSY_CLEAR_IN_MEMDUMP(&dhd->pub); + dhd_os_busbusy_wake(dhdp); + DHD_GENERAL_UNLOCK(dhdp, flags); + dhd->scheduled_memdump = FALSE; + DHD_ERROR(("%s: EXIT \n", __FUNCTION__)); +} +#endif /* DHD_FW_COREDUMP */ + +#ifdef DHD_SSSR_DUMP + +static void +dhd_sssr_dump(void *handle, void *event_info, u8 event) +{ + dhd_info_t *dhd = handle; + dhd_pub_t *dhdp; + int i; + char before_sr_dump[128]; + char after_sr_dump[128]; + unsigned long flags = 0; + uint dig_buf_size = 0; + + DHD_ERROR(("%s: ENTER \n", __FUNCTION__)); + + if (!dhd) { + DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__)); + return; + } + + dhdp = &dhd->pub; + + DHD_GENERAL_LOCK(dhdp, flags); + if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) { + DHD_GENERAL_UNLOCK(dhdp, flags); + DHD_ERROR(("%s: bus is down! can't collect sssr dump. \n", __FUNCTION__)); + goto exit; + } + DHD_GENERAL_UNLOCK(dhdp, flags); + + for (i = 0; i < MAX_NUM_D11CORES; i++) { + /* Init file name */ + memset(before_sr_dump, 0, sizeof(before_sr_dump)); + memset(after_sr_dump, 0, sizeof(after_sr_dump)); + + snprintf(before_sr_dump, sizeof(before_sr_dump), "%s_%d_%s", + "sssr_core", i, "before_SR"); + snprintf(after_sr_dump, sizeof(after_sr_dump), "%s_%d_%s", + "sssr_core", i, "after_SR"); + + if (dhdp->sssr_d11_before[i] && dhdp->sssr_d11_outofreset[i] && + (dhdp->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) { + if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_d11_before[i], + dhdp->sssr_reg_info.mac_regs[i].sr_size, before_sr_dump)) { + DHD_ERROR(("%s: writing SSSR MAIN dump before to the file failed\n", + __FUNCTION__)); + } + } + if (dhdp->sssr_d11_after[i] && dhdp->sssr_d11_outofreset[i]) { + if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_d11_after[i], + dhdp->sssr_reg_info.mac_regs[i].sr_size, after_sr_dump)) { + DHD_ERROR(("%s: writing SSSR AUX dump after to the file failed\n", + __FUNCTION__)); + } + } + } + + if (dhdp->sssr_reg_info.vasip_regs.vasip_sr_size) { + dig_buf_size = dhdp->sssr_reg_info.vasip_regs.vasip_sr_size; + } else if ((dhdp->sssr_reg_info.length > OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) && + dhdp->sssr_reg_info.dig_mem_info.dig_sr_size) { + dig_buf_size = dhdp->sssr_reg_info.dig_mem_info.dig_sr_size; + } + + if (dhdp->sssr_dig_buf_before && (dhdp->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) { + if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_dig_buf_before, + dig_buf_size, "sssr_dig_before_SR")) { + DHD_ERROR(("%s: writing SSSR Dig dump before to the file failed\n", + __FUNCTION__)); + } + } + + if (dhdp->sssr_dig_buf_after) { + if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_dig_buf_after, + dig_buf_size, "sssr_dig_after_SR")) { + DHD_ERROR(("%s: writing SSSR Dig VASIP dump after to the file failed\n", + __FUNCTION__)); + } + } + +exit: + DHD_GENERAL_LOCK(dhdp, flags); + DHD_BUS_BUSY_CLEAR_IN_SSSRDUMP(dhdp); + dhd_os_busbusy_wake(dhdp); + DHD_GENERAL_UNLOCK(dhdp, flags); +} + +void +dhd_schedule_sssr_dump(dhd_pub_t *dhdp, uint32 dump_mode) +{ + unsigned long flags = 0; + + dhdp->sssr_dump_mode = dump_mode; + + /* bus busy bit for sssr dump will be cleared in sssr dump + * work item context, after sssr dump files are created + */ + DHD_GENERAL_LOCK(dhdp, flags); + DHD_BUS_BUSY_SET_IN_SSSRDUMP(dhdp); + DHD_GENERAL_UNLOCK(dhdp, flags); + + if (dhdp->info->no_wq_sssrdump) { + dhd_sssr_dump(dhdp->info, 0, 0); + return; + } + + DHD_ERROR(("%s: scheduling sssr dump.. \n", __FUNCTION__)); + dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, NULL, + DHD_WQ_WORK_SSSR_DUMP, dhd_sssr_dump, DHD_WQ_WORK_PRIORITY_HIGH); +} +#endif /* DHD_SSSR_DUMP */ + +#ifdef DHD_LOG_DUMP +static void +dhd_log_dump(void *handle, void *event_info, u8 event) +{ + dhd_info_t *dhd = handle; + log_dump_type_t *type = (log_dump_type_t *)event_info; + + if (!dhd || !type) { + DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__)); + return; + } + +#ifdef WL_CFG80211 + /* flush the fw side logs */ + wl_flush_fw_log_buffer(dhd_linux_get_primary_netdev(&dhd->pub), + FW_LOGSET_MASK_ALL); +#endif // endif + /* there are currently 3 possible contexts from which + * log dump can be scheduled - + * 1.TRAP 2.supplicant DEBUG_DUMP pvt driver command + * 3.HEALTH CHECK event + * The concise debug info buffer is a shared resource + * and in case a trap is one of the contexts then both the + * scheduled work queues need to run because trap data is + * essential for debugging. Hence a mutex lock is acquired + * before calling do_dhd_log_dump(). + */ + DHD_ERROR(("%s: calling log dump.. \n", __FUNCTION__)); + dhd_os_logdump_lock(&dhd->pub); + DHD_OS_WAKE_LOCK(&dhd->pub); + if (do_dhd_log_dump(&dhd->pub, type) != BCME_OK) { + DHD_ERROR(("%s: writing debug dump to the file failed\n", __FUNCTION__)); + } + DHD_OS_WAKE_UNLOCK(&dhd->pub); + dhd_os_logdump_unlock(&dhd->pub); +} + +void dhd_schedule_log_dump(dhd_pub_t *dhdp, void *type) +{ + DHD_ERROR(("%s: scheduling log dump.. \n", __FUNCTION__)); + dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, + type, DHD_WQ_WORK_DHD_LOG_DUMP, + dhd_log_dump, DHD_WQ_WORK_PRIORITY_HIGH); +} + +static void +dhd_print_buf_addr(dhd_pub_t *dhdp, char *name, void *buf, unsigned int size) +{ + if ((dhdp->memdump_enabled == DUMP_MEMONLY) || + (dhdp->memdump_enabled == DUMP_MEMFILE_BUGON)) { +#if defined(CONFIG_ARM64) + DHD_ERROR(("-------- %s: buf(va)=%llx, buf(pa)=%llx, bufsize=%d\n", + name, (uint64)buf, (uint64)__virt_to_phys((ulong)buf), size)); +#elif defined(__ARM_ARCH_7A__) + DHD_ERROR(("-------- %s: buf(va)=%x, buf(pa)=%x, bufsize=%d\n", + name, (uint32)buf, (uint32)__virt_to_phys((ulong)buf), size)); +#endif /* __ARM_ARCH_7A__ */ + } +} + +static void +dhd_log_dump_buf_addr(dhd_pub_t *dhdp, log_dump_type_t *type) +{ + int i; + unsigned long wr_size = 0; + struct dhd_log_dump_buf *dld_buf = &g_dld_buf[0]; + size_t log_size = 0; + char buf_name[DHD_PRINT_BUF_NAME_LEN]; + dhd_dbg_ring_t *ring = NULL; + + BCM_REFERENCE(ring); + + for (i = 0; i < DLD_BUFFER_NUM; i++) { + dld_buf = &g_dld_buf[i]; + log_size = (unsigned long)dld_buf->max - + (unsigned long)dld_buf->buffer; + if (dld_buf->wraparound) { + wr_size = log_size; + } else { + wr_size = (unsigned long)dld_buf->present - + (unsigned long)dld_buf->front; + } + scnprintf(buf_name, sizeof(buf_name), "dlb_buf[%d]", i); + dhd_print_buf_addr(dhdp, buf_name, dld_buf, dld_buf_size[i]); + scnprintf(buf_name, sizeof(buf_name), "dlb_buf[%d] buffer", i); + dhd_print_buf_addr(dhdp, buf_name, dld_buf->buffer, wr_size); + scnprintf(buf_name, sizeof(buf_name), "dlb_buf[%d] present", i); + dhd_print_buf_addr(dhdp, buf_name, dld_buf->present, wr_size); + scnprintf(buf_name, sizeof(buf_name), "dlb_buf[%d] front", i); + dhd_print_buf_addr(dhdp, buf_name, dld_buf->front, wr_size); + } + +#ifdef EWP_ECNTRS_LOGGING + /* periodic flushing of ecounters is NOT supported */ + if (*type == DLD_BUF_TYPE_ALL && + logdump_ecntr_enable && + dhdp->ecntr_dbg_ring) { + + ring = (dhd_dbg_ring_t *)dhdp->ecntr_dbg_ring; + dhd_print_buf_addr(dhdp, "ecntr_dbg_ring", ring, LOG_DUMP_ECNTRS_MAX_BUFSIZE); + dhd_print_buf_addr(dhdp, "ecntr_dbg_ring ring_buf", ring->ring_buf, + LOG_DUMP_ECNTRS_MAX_BUFSIZE); + } +#endif /* EWP_ECNTRS_LOGGING */ + +#ifdef BCMPCIE + if (dhdp->dongle_trap_occured && dhdp->extended_trap_data) { + dhd_print_buf_addr(dhdp, "extended_trap_data", dhdp->extended_trap_data, + BCMPCIE_EXT_TRAP_DATA_MAXLEN); + } +#endif /* BCMPCIE */ + +#if defined(DHD_FW_COREDUMP) && defined(DNGL_EVENT_SUPPORT) + /* if health check event was received */ + if (dhdp->memdump_type == DUMP_TYPE_DONGLE_HOST_EVENT) { + dhd_print_buf_addr(dhdp, "health_chk_event_data", dhdp->health_chk_event_data, + HEALTH_CHK_BUF_SIZE); + } +#endif /* DHD_FW_COREDUMP && DNGL_EVENT_SUPPORT */ + + /* append the concise debug information */ + if (dhdp->concise_dbg_buf) { + dhd_print_buf_addr(dhdp, "concise_dbg_buf", dhdp->concise_dbg_buf, + CONCISE_DUMP_BUFLEN); + } +} + +/* Must hold 'dhd_os_logdump_lock' before calling this function ! */ +static int +do_dhd_log_dump(dhd_pub_t *dhdp, log_dump_type_t *type) +{ + int ret = 0, i = 0; + struct file *fp = NULL; + mm_segment_t old_fs; + loff_t pos = 0; + unsigned int wr_size = 0; + char dump_path[128]; + uint32 file_mode; + unsigned long flags = 0; + struct dhd_log_dump_buf *dld_buf = &g_dld_buf[0]; + size_t log_size = 0; + size_t fspace_remain = 0; + struct kstat stat; + char time_str[128]; + char *ts = NULL; + uint32 remain_len = 0; + log_dump_section_hdr_t sec_hdr; + dhd_info_t *dhd_info = NULL; + + DHD_ERROR(("%s: ENTER \n", __FUNCTION__)); + + /* if dhdp is null, its extremely unlikely that log dump will be scheduled + * so not freeing 'type' here is ok, even if we want to free 'type' + * we cannot do so, since 'dhdp->osh' is unavailable + * as dhdp is null + */ + if (!dhdp || !type) { + if (dhdp) { + DHD_GENERAL_LOCK(dhdp, flags); + DHD_BUS_BUSY_CLEAR_IN_LOGDUMP(dhdp); + dhd_os_busbusy_wake(dhdp); + DHD_GENERAL_UNLOCK(dhdp, flags); + } + return BCME_ERROR; + } + + DHD_GENERAL_LOCK(dhdp, flags); + if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) { + DHD_BUS_BUSY_CLEAR_IN_LOGDUMP(dhdp); + dhd_os_busbusy_wake(dhdp); + DHD_GENERAL_UNLOCK(dhdp, flags); + MFREE(dhdp->osh, type, sizeof(*type)); + DHD_ERROR(("%s: bus is down! can't collect log dump. \n", __FUNCTION__)); + return BCME_ERROR; + } + DHD_BUS_BUSY_SET_IN_LOGDUMP(dhdp); + DHD_GENERAL_UNLOCK(dhdp, flags); + + dhd_info = (dhd_info_t *)dhdp->info; + /* in case of trap get preserve logs from ETD */ +#if defined(BCMPCIE) && defined(EWP_ETD_PRSRV_LOGS) + if (dhdp->dongle_trap_occured && + dhdp->extended_trap_data) { + dhdpcie_get_etd_preserve_logs(dhdp, (uint8 *)dhdp->extended_trap_data, + &dhd_info->event_data); + } +#endif /* BCMPCIE */ + + /* flush the event work items to get any fw events/logs + * flush_work is a blocking call + */ +#ifdef EWP_EDL + if (dhd_info->pub.dongle_edl_support) { + /* wait till existing edl items are processed */ + dhd_flush_logtrace_process(dhd_info); + /* dhd_flush_logtrace_process will ensure the work items in the ring + * (EDL ring) from rd to wr are processed. But if wr had + * wrapped around, only the work items from rd to ring-end are processed. + * So to ensure that the work items at the + * beginning of ring are also processed in the wrap around case, call + * it twice + */ + for (i = 0; i < 2; i++) { + /* blocks till the edl items are processed */ + dhd_flush_logtrace_process(dhd_info); + } + } else { + dhd_flush_logtrace_process(dhd_info); + } +#else + dhd_flush_logtrace_process(dhd_info); +#endif /* EWP_EDL */ + + /* change to KERNEL_DS address limit */ + old_fs = get_fs(); + set_fs(KERNEL_DS); + + /* Init file name */ + memset(dump_path, 0, sizeof(dump_path)); + switch (dhdp->debug_dump_subcmd) { + case CMD_UNWANTED: + snprintf(dump_path, sizeof(dump_path), "%s", + DHD_COMMON_DUMP_PATH DHD_DEBUG_DUMP_TYPE + DHD_DUMP_SUBSTR_UNWANTED); + break; + case CMD_DISCONNECTED: + snprintf(dump_path, sizeof(dump_path), "%s", + DHD_COMMON_DUMP_PATH DHD_DEBUG_DUMP_TYPE + DHD_DUMP_SUBSTR_DISCONNECTED); + break; + default: + snprintf(dump_path, sizeof(dump_path), "%s", + DHD_COMMON_DUMP_PATH DHD_DEBUG_DUMP_TYPE); + } + + if (!dhdp->logdump_periodic_flush) { + get_debug_dump_time(dhdp->debug_dump_time_str); + snprintf(dump_path, sizeof(dump_path), "%s_" "%s", + dump_path, dhdp->debug_dump_time_str); + } + + memset(time_str, 0, sizeof(time_str)); + ts = dhd_log_dump_get_timestamp(); + snprintf(time_str, sizeof(time_str), + "\n\n ========== LOG DUMP TAKEN AT : %s =========\n", ts); + + DHD_ERROR(("DHD version: %s\n", dhd_version)); + DHD_ERROR(("F/W version: %s\n", fw_version)); + DHD_ERROR(("debug_dump_path = %s\n", dump_path)); + + dhd_log_dump_buf_addr(dhdp, type); + + /* if this is the first time after dhd is loaded, + * or, if periodic flush is disabled, clear the log file + */ + if (!dhdp->logdump_periodic_flush || dhdp->last_file_posn == 0) + file_mode = O_CREAT | O_WRONLY | O_SYNC | O_TRUNC; + else + file_mode = O_CREAT | O_RDWR | O_SYNC; + + fp = filp_open(dump_path, file_mode, 0664); + if (IS_ERR(fp)) { + /* If android installed image, try '/data' directory */ +#if defined(CONFIG_X86) + DHD_ERROR(("%s: File open error on Installed android image, trying /data...\n", + __FUNCTION__)); + snprintf(dump_path, sizeof(dump_path), "/data/" DHD_DEBUG_DUMP_TYPE); + if (!dhdp->logdump_periodic_flush) { + snprintf(dump_path + strlen(dump_path), + sizeof(dump_path) - strlen(dump_path), + "_%s", dhdp->debug_dump_time_str); + } + fp = filp_open(dump_path, file_mode, 0664); + if (IS_ERR(fp)) { + ret = PTR_ERR(fp); + DHD_ERROR(("open file error, err = %d\n", ret)); + goto exit; + } + DHD_ERROR(("debug_dump_path = %s\n", dump_path)); +#else + ret = PTR_ERR(fp); + DHD_ERROR(("open file error, err = %d\n", ret)); + goto exit; +#endif /* CONFIG_X86 && OEM_ANDROID */ + } + + ret = vfs_stat(dump_path, &stat); + if (ret < 0) { + DHD_ERROR(("file stat error, err = %d\n", ret)); + goto exit; + } + + /* if some one else has changed the file */ + if (dhdp->last_file_posn != 0 && + stat.size < dhdp->last_file_posn) { + dhdp->last_file_posn = 0; + } + + if (dhdp->logdump_periodic_flush) { + log_size = strlen(time_str) + strlen(DHD_DUMP_LOG_HDR) + sizeof(sec_hdr); + /* calculate the amount of space required to dump all logs */ + for (i = 0; i < DLD_BUFFER_NUM; ++i) { + if (*type != DLD_BUF_TYPE_ALL && i != *type) + continue; + + if (g_dld_buf[i].wraparound) { + log_size += (unsigned long)g_dld_buf[i].max + - (unsigned long)g_dld_buf[i].buffer; + } else { + spin_lock_irqsave(&g_dld_buf[i].lock, flags); + log_size += (unsigned long)g_dld_buf[i].present - + (unsigned long)g_dld_buf[i].front; + spin_unlock_irqrestore(&g_dld_buf[i].lock, flags); + } + log_size += strlen(dld_hdrs[i].hdr_str) + sizeof(sec_hdr); + + if (*type != DLD_BUF_TYPE_ALL && i == *type) + break; + } + + ret = generic_file_llseek(fp, dhdp->last_file_posn, SEEK_CUR); + if (ret < 0) { + DHD_ERROR(("file seek last posn error ! err = %d \n", ret)); + goto exit; + } + pos = fp->f_pos; + + /* if the max file size is reached, wrap around to beginning of the file + * we're treating the file as a large ring buffer + */ + fspace_remain = logdump_max_filesize - pos; + if (log_size > fspace_remain) { + fp->f_pos -= pos; + pos = fp->f_pos; + } + } + /* write the timestamp hdr to the file first */ + ret = vfs_write(fp, time_str, strlen(time_str), &pos); + if (ret < 0) { + DHD_ERROR(("write file error, err = %d\n", ret)); + goto exit; + } + + /* prep the section header */ + memset(&sec_hdr, 0, sizeof(sec_hdr)); + sec_hdr.magic = LOG_DUMP_MAGIC; + sec_hdr.timestamp = local_clock(); + + for (i = 0; i < DLD_BUFFER_NUM; ++i) { + unsigned int buf_size = 0; + + if (*type != DLD_BUF_TYPE_ALL && i != *type) + continue; + + /* calculate the length of the log */ + dld_buf = &g_dld_buf[i]; + buf_size = (unsigned long)dld_buf->max - + (unsigned long)dld_buf->buffer; + if (dld_buf->wraparound) { + wr_size = buf_size; + } else { + /* need to hold the lock before accessing 'present' and 'remain' ptrs */ + spin_lock_irqsave(&dld_buf->lock, flags); + wr_size = (unsigned long)dld_buf->present - + (unsigned long)dld_buf->front; + spin_unlock_irqrestore(&dld_buf->lock, flags); + } + + /* write the section header first */ + sec_hdr.type = dld_hdrs[i].sec_type; + sec_hdr.length = wr_size; + vfs_write(fp, dld_hdrs[i].hdr_str, strlen(dld_hdrs[i].hdr_str), &pos); + vfs_write(fp, (char *)&sec_hdr, sizeof(sec_hdr), &pos); + /* write the log */ + ret = vfs_write(fp, dld_buf->buffer, wr_size, &pos); + if (ret < 0) { + DHD_ERROR(("write file error, err = %d\n", ret)); + goto exit; + } + + /* re-init dhd_log_dump_buf structure */ + spin_lock_irqsave(&dld_buf->lock, flags); + dld_buf->wraparound = 0; + dld_buf->present = dld_buf->front; + dld_buf->remain = buf_size; + bzero(dld_buf->buffer, buf_size); + spin_unlock_irqrestore(&dld_buf->lock, flags); + + if (*type != DLD_BUF_TYPE_ALL) + break; + } + +#ifdef EWP_ECNTRS_LOGGING + /* periodic flushing of ecounters is NOT supported */ + if (*type == DLD_BUF_TYPE_ALL && + logdump_ecntr_enable && + dhdp->ecntr_dbg_ring) { + dhd_log_dump_ring_to_file(dhdp, dhdp->ecntr_dbg_ring, + fp, (unsigned long *)&pos, &sec_hdr); + } +#endif /* EWP_ECNTRS_LOGGING */ + +#ifdef BCMPCIE + /* append extended trap data to the file in case of traps */ + if (dhdp->dongle_trap_occured && + dhdp->extended_trap_data) { + /* write the section header first */ + vfs_write(fp, EXT_TRAP_LOG_HDR, strlen(EXT_TRAP_LOG_HDR), &pos); + sec_hdr.type = LOG_DUMP_SECTION_EXT_TRAP; + sec_hdr.length = BCMPCIE_EXT_TRAP_DATA_MAXLEN; + vfs_write(fp, (char *)&sec_hdr, sizeof(sec_hdr), &pos); + /* write the log */ + ret = vfs_write(fp, (char *)dhdp->extended_trap_data, + BCMPCIE_EXT_TRAP_DATA_MAXLEN, &pos); + if (ret < 0) { + DHD_ERROR(("write file error of ext trap info," + " err = %d\n", ret)); + goto exit; + } + } +#endif /* BCMPCIE */ + +#if defined(DHD_FW_COREDUMP) && defined(DNGL_EVENT_SUPPORT) + /* if health check event was received, dump to file */ + if (dhdp->memdump_type == DUMP_TYPE_DONGLE_HOST_EVENT) { + /* write the section header first */ + vfs_write(fp, HEALTH_CHK_LOG_HDR, strlen(HEALTH_CHK_LOG_HDR), &pos); + sec_hdr.type = LOG_DUMP_SECTION_HEALTH_CHK; + sec_hdr.length = HEALTH_CHK_BUF_SIZE; + vfs_write(fp, (char *)&sec_hdr, sizeof(sec_hdr), &pos); + /* write the log */ + ret = vfs_write(fp, (char *)dhdp->health_chk_event_data, + HEALTH_CHK_BUF_SIZE, &pos); + if (ret < 0) { + DHD_ERROR(("write file error of health chk info," + " err = %d\n", ret)); + goto exit; + } + } +#endif /* DHD_FW_COREDUMP && DNGL_EVENT_SUPPORT */ + +#ifdef DHD_DUMP_PCIE_RINGS + /* write the section header first */ + vfs_write(fp, FLOWRING_DUMP_HDR, strlen(FLOWRING_DUMP_HDR), &pos); + /* Write the ring summary */ + ret = vfs_write(fp, dhdp->concise_dbg_buf, CONCISE_DUMP_BUFLEN - remain_len, &pos); + if (ret < 0) { + DHD_ERROR(("write file error of concise debug info," + " err = %d\n", ret)); + goto exit; + } + sec_hdr.type = LOG_DUMP_SECTION_FLOWRING; + sec_hdr.length = ((H2DRING_TXPOST_ITEMSIZE + * H2DRING_TXPOST_MAX_ITEM) + + (D2HRING_TXCMPLT_ITEMSIZE + * D2HRING_TXCMPLT_MAX_ITEM) + + (H2DRING_RXPOST_ITEMSIZE + * H2DRING_RXPOST_MAX_ITEM) + + (D2HRING_RXCMPLT_ITEMSIZE + * D2HRING_RXCMPLT_MAX_ITEM) + + (H2DRING_CTRL_SUB_ITEMSIZE + * H2DRING_CTRL_SUB_MAX_ITEM) + + (D2HRING_CTRL_CMPLT_ITEMSIZE + * D2HRING_CTRL_CMPLT_MAX_ITEM) + + (H2DRING_INFO_BUFPOST_ITEMSIZE + * H2DRING_DYNAMIC_INFO_MAX_ITEM) + + (D2HRING_INFO_BUFCMPLT_ITEMSIZE + * D2HRING_DYNAMIC_INFO_MAX_ITEM)); + vfs_write(fp, (char *)&sec_hdr, sizeof(sec_hdr), &pos); + /* write the log */ + ret = dhd_d2h_h2d_ring_dump(dhdp, fp, (unsigned long *)&pos); + if (ret < 0) { + DHD_ERROR(("%s: error dumping ring data!\n", + __FUNCTION__)); + goto exit; + } +#endif /* DHD_DUMP_PCIE_RINGS */ + + /* append the concise debug information to the file. + * This is the information which is seen + * when a 'dhd dump' iovar is fired + */ + if (dhdp->concise_dbg_buf) { + remain_len = dhd_dump(dhdp, (char *)dhdp->concise_dbg_buf, CONCISE_DUMP_BUFLEN); + if (remain_len <= 0) { + DHD_ERROR(("%s: error getting concise debug info !\n", + __FUNCTION__)); + goto exit; + } else { + /* write the section header first */ + vfs_write(fp, DHD_DUMP_LOG_HDR, strlen(DHD_DUMP_LOG_HDR), &pos); + sec_hdr.type = LOG_DUMP_SECTION_DHD_DUMP; + sec_hdr.length = CONCISE_DUMP_BUFLEN - remain_len; + vfs_write(fp, (char *)&sec_hdr, sizeof(sec_hdr), &pos); + /* write the log */ + ret = vfs_write(fp, dhdp->concise_dbg_buf, + CONCISE_DUMP_BUFLEN - remain_len, &pos); + if (ret < 0) { + DHD_ERROR(("write file error of concise debug info," + " err = %d\n", ret)); + goto exit; + } + } + } + + if (dhdp->logdump_cookie && dhd_logdump_cookie_count(dhdp) > 0) { + ret = dhd_log_dump_cookie_to_file(dhdp, fp, (unsigned long *)&pos); + if (ret < 0) { + DHD_ERROR(("write file error of cooke info, err = %d\n", ret)); + goto exit; + } + } + + if (dhdp->logdump_periodic_flush) { + /* store the last position written to in the file for future use */ + dhdp->last_file_posn = pos; + } + +exit: + MFREE(dhdp->osh, type, sizeof(*type)); + if (!IS_ERR(fp) && fp != NULL) { + filp_close(fp, NULL); + DHD_ERROR(("%s: Finished writing log dump to file - '%s' \n", + __FUNCTION__, dump_path)); + } + set_fs(old_fs); + DHD_GENERAL_LOCK(dhdp, flags); + DHD_BUS_BUSY_CLEAR_IN_LOGDUMP(dhdp); + dhd_os_busbusy_wake(dhdp); + DHD_GENERAL_UNLOCK(dhdp, flags); + +#ifdef DHD_DUMP_MNGR + if (ret >= 0) { + dhd_dump_file_manage_enqueue(dhdp, dump_path, DHD_DEBUG_DUMP_TYPE); + } +#endif /* DHD_DUMP_MNGR */ + + return (ret < 0) ? BCME_ERROR : BCME_OK; +} +#endif /* DHD_LOG_DUMP */ + +#ifdef BCMASSERT_LOG +#define ASSERTINFO "/data/misc/wifi/.assert.info" +void dhd_get_assert_info(dhd_pub_t *dhd) +{ + struct file *fp = NULL; + char *filepath = ASSERTINFO; + int mem_val = -1; + + /* + * Read assert info from the file + * 0: Trigger Kernel crash by panic() + * 1: Print out the logs and don't trigger Kernel panic. (default) + * 2: Trigger Kernel crash by BUG() + * File doesn't exist: Keep default value (1). + */ + fp = filp_open(filepath, O_RDONLY, 0); + if (IS_ERR(fp)) { + DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath)); + } else { + int ret = compat_kernel_read(fp, 0, (char *)&mem_val, 4); + if (ret < 0) { + DHD_ERROR(("%s: File read error, ret=%d\n", __FUNCTION__, ret)); + } else { + mem_val = bcm_atoi((char *)&mem_val); + DHD_ERROR(("%s: ASSERT ENABLED = %d\n", __FUNCTION__, mem_val)); + } + filp_close(fp, NULL); + } + /* By default. set to 0, Kernel Panic */ + g_assert_type = (mem_val >= 0) ? mem_val : 0; +} +#endif /* BCMASSERT_LOG */ + +/* + * This call is to get the memdump size so that, + * halutil can alloc that much buffer in user space. + */ +int +dhd_os_socram_dump(struct net_device *dev, uint32 *dump_size) +{ + int ret = BCME_OK; + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + dhd_pub_t *dhdp = &dhd->pub; + + if (dhdp->busstate == DHD_BUS_DOWN) { + DHD_ERROR(("%s: bus is down\n", __FUNCTION__)); + return BCME_ERROR; + } + + if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhdp)) { + DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state, so skip\n", + __FUNCTION__, dhdp->busstate, dhdp->dhd_bus_busy_state)); + return BCME_ERROR; + } + ret = dhd_common_socram_dump(dhdp); + if (ret == BCME_OK) { + *dump_size = dhdp->soc_ram_length; + } + return ret; +} + +/* + * This is to get the actual memdup after getting the memdump size + */ +int +dhd_os_get_socram_dump(struct net_device *dev, char **buf, uint32 *size) +{ + int ret = BCME_OK; + int orig_len = 0; + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + dhd_pub_t *dhdp = &dhd->pub; + if (buf == NULL) + return BCME_ERROR; + orig_len = *size; + if (dhdp->soc_ram) { + if (orig_len >= dhdp->soc_ram_length) { + memcpy(*buf, dhdp->soc_ram, dhdp->soc_ram_length); + /* reset the storage of dump */ + memset(dhdp->soc_ram, 0, dhdp->soc_ram_length); + *size = dhdp->soc_ram_length; + } else { + ret = BCME_BUFTOOSHORT; + DHD_ERROR(("The length of the buffer is too short" + " to save the memory dump with %d\n", dhdp->soc_ram_length)); + } + } else { + DHD_ERROR(("socram_dump is not ready to get\n")); + ret = BCME_NOTREADY; + } + return ret; +} + +int +dhd_os_get_version(struct net_device *dev, bool dhd_ver, char **buf, uint32 size) +{ + char *fw_str; + + if (size == 0) + return BCME_BADARG; + + fw_str = strstr(info_string, "Firmware: "); + if (fw_str == NULL) { + return BCME_ERROR; + } + + memset(*buf, 0, size); + if (dhd_ver) { + strncpy(*buf, dhd_version, size - 1); + } else { + strncpy(*buf, fw_str, size - 1); + } + return BCME_OK; +} + +bool dhd_sta_associated(dhd_pub_t *dhdp, uint32 bssidx, uint8 *mac) +{ + return dhd_find_sta(dhdp, bssidx, mac) ? TRUE : FALSE; +} + +#ifdef DHD_L2_FILTER +arp_table_t* +dhd_get_ifp_arp_table_handle(dhd_pub_t *dhdp, uint32 bssidx) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + + ASSERT(bssidx < DHD_MAX_IFS); + + ifp = dhd->iflist[bssidx]; + return ifp->phnd_arp_table; +} + +int dhd_get_parp_status(dhd_pub_t *dhdp, uint32 idx) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + + ASSERT(idx < DHD_MAX_IFS); + + ifp = dhd->iflist[idx]; + + if (ifp) + return ifp->parp_enable; + else + return FALSE; +} + +/* Set interface specific proxy arp configuration */ +int dhd_set_parp_status(dhd_pub_t *dhdp, uint32 idx, int val) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + ASSERT(idx < DHD_MAX_IFS); + ifp = dhd->iflist[idx]; + + if (!ifp) + return BCME_ERROR; + + /* At present all 3 variables are being + * handled at once + */ + ifp->parp_enable = val; + ifp->parp_discard = val; + ifp->parp_allnode = val; + + /* Flush ARP entries when disabled */ + if (val == FALSE) { + bcm_l2_filter_arp_table_update(dhdp->osh, ifp->phnd_arp_table, TRUE, NULL, + FALSE, dhdp->tickcnt); + } + return BCME_OK; +} + +bool dhd_parp_discard_is_enabled(dhd_pub_t *dhdp, uint32 idx) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + + ASSERT(idx < DHD_MAX_IFS); + + ifp = dhd->iflist[idx]; + + ASSERT(ifp); + return ifp->parp_discard; +} + +bool +dhd_parp_allnode_is_enabled(dhd_pub_t *dhdp, uint32 idx) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + + ASSERT(idx < DHD_MAX_IFS); + + ifp = dhd->iflist[idx]; + + ASSERT(ifp); + + return ifp->parp_allnode; +} + +int dhd_get_dhcp_unicast_status(dhd_pub_t *dhdp, uint32 idx) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + + ASSERT(idx < DHD_MAX_IFS); + + ifp = dhd->iflist[idx]; + + ASSERT(ifp); + + return ifp->dhcp_unicast; +} + +int dhd_set_dhcp_unicast_status(dhd_pub_t *dhdp, uint32 idx, int val) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + ASSERT(idx < DHD_MAX_IFS); + ifp = dhd->iflist[idx]; + + ASSERT(ifp); + + ifp->dhcp_unicast = val; + return BCME_OK; +} + +int dhd_get_block_ping_status(dhd_pub_t *dhdp, uint32 idx) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + + ASSERT(idx < DHD_MAX_IFS); + + ifp = dhd->iflist[idx]; + + ASSERT(ifp); + + return ifp->block_ping; +} + +int dhd_set_block_ping_status(dhd_pub_t *dhdp, uint32 idx, int val) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + ASSERT(idx < DHD_MAX_IFS); + ifp = dhd->iflist[idx]; + + ASSERT(ifp); + + ifp->block_ping = val; + /* Disable rx_pkt_chain feature for interface if block_ping option is + * enabled + */ + dhd_update_rx_pkt_chainable_state(dhdp, idx); + return BCME_OK; +} + +int dhd_get_grat_arp_status(dhd_pub_t *dhdp, uint32 idx) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + + ASSERT(idx < DHD_MAX_IFS); + + ifp = dhd->iflist[idx]; + + ASSERT(ifp); + + return ifp->grat_arp; +} + +int dhd_set_grat_arp_status(dhd_pub_t *dhdp, uint32 idx, int val) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + ASSERT(idx < DHD_MAX_IFS); + ifp = dhd->iflist[idx]; + + ASSERT(ifp); + + ifp->grat_arp = val; + + return BCME_OK; +} + +int dhd_get_block_tdls_status(dhd_pub_t *dhdp, uint32 idx) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + + ASSERT(idx < DHD_MAX_IFS); + + ifp = dhd->iflist[idx]; + + ASSERT(ifp); + + return ifp->block_tdls; +} + +int dhd_set_block_tdls_status(dhd_pub_t *dhdp, uint32 idx, int val) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + ASSERT(idx < DHD_MAX_IFS); + ifp = dhd->iflist[idx]; + + ASSERT(ifp); + + ifp->block_tdls = val; + + return BCME_OK; +} +#endif /* DHD_L2_FILTER */ + +#if defined(SET_RPS_CPUS) +int dhd_rps_cpus_enable(struct net_device *net, int enable) +{ + dhd_info_t *dhd = DHD_DEV_INFO(net); + dhd_if_t *ifp; + int ifidx; + char * RPS_CPU_SETBUF; + + ifidx = dhd_net2idx(dhd, net); + if (ifidx == DHD_BAD_IF) { + DHD_ERROR(("%s bad ifidx\n", __FUNCTION__)); + return -ENODEV; + } + + if (ifidx == PRIMARY_INF) { + if (dhd->pub.op_mode == DHD_FLAG_IBSS_MODE) { + DHD_INFO(("%s : set for IBSS.\n", __FUNCTION__)); + RPS_CPU_SETBUF = RPS_CPUS_MASK_IBSS; + } else { + DHD_INFO(("%s : set for BSS.\n", __FUNCTION__)); + RPS_CPU_SETBUF = RPS_CPUS_MASK; + } + } else if (ifidx == VIRTUAL_INF) { + DHD_INFO(("%s : set for P2P.\n", __FUNCTION__)); + RPS_CPU_SETBUF = RPS_CPUS_MASK_P2P; + } else { + DHD_ERROR(("%s : Invalid index : %d.\n", __FUNCTION__, ifidx)); + return -EINVAL; + } + + ifp = dhd->iflist[ifidx]; + if (ifp) { + if (enable) { + DHD_INFO(("%s : set rps_cpus as [%s]\n", __FUNCTION__, RPS_CPU_SETBUF)); + custom_rps_map_set(ifp->net->_rx, RPS_CPU_SETBUF, strlen(RPS_CPU_SETBUF)); + } else { + custom_rps_map_clear(ifp->net->_rx); + } + } else { + DHD_ERROR(("%s : ifp is NULL!!\n", __FUNCTION__)); + return -ENODEV; + } + return BCME_OK; +} + +int custom_rps_map_set(struct netdev_rx_queue *queue, char *buf, size_t len) +{ + struct rps_map *old_map, *map; + cpumask_var_t mask; + int err, cpu, i; + static DEFINE_SPINLOCK(rps_map_lock); + + DHD_INFO(("%s : Entered.\n", __FUNCTION__)); + + if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { + DHD_ERROR(("%s : alloc_cpumask_var fail.\n", __FUNCTION__)); + return -ENOMEM; + } + + err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits); + if (err) { + free_cpumask_var(mask); + DHD_ERROR(("%s : bitmap_parse fail.\n", __FUNCTION__)); + return err; + } + + map = kzalloc(max_t(unsigned int, + RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES), + GFP_KERNEL); + if (!map) { + free_cpumask_var(mask); + DHD_ERROR(("%s : map malloc fail.\n", __FUNCTION__)); + return -ENOMEM; + } + + i = 0; + for_each_cpu(cpu, mask) { + map->cpus[i++] = cpu; + } + + if (i) { + map->len = i; + } else { + kfree(map); + map = NULL; + free_cpumask_var(mask); + DHD_ERROR(("%s : mapping cpu fail.\n", __FUNCTION__)); + return -1; + } + + spin_lock(&rps_map_lock); + old_map = rcu_dereference_protected(queue->rps_map, + lockdep_is_held(&rps_map_lock)); + rcu_assign_pointer(queue->rps_map, map); + spin_unlock(&rps_map_lock); + + if (map) { + static_key_slow_inc(&rps_needed); + } + if (old_map) { + kfree_rcu(old_map, rcu); + static_key_slow_dec(&rps_needed); + } + free_cpumask_var(mask); + + DHD_INFO(("%s : Done. mapping cpu nummber : %d\n", __FUNCTION__, map->len)); + return map->len; +} + +void custom_rps_map_clear(struct netdev_rx_queue *queue) +{ + struct rps_map *map; + + DHD_INFO(("%s : Entered.\n", __FUNCTION__)); + + map = rcu_dereference_protected(queue->rps_map, 1); + if (map) { + RCU_INIT_POINTER(queue->rps_map, NULL); + kfree_rcu(map, rcu); + DHD_INFO(("%s : rps_cpus map clear.\n", __FUNCTION__)); + } +} +#endif // endif + +#if defined(ARGOS_NOTIFY_CB) +int +argos_register_notifier_init(struct net_device *net) +{ + int ret = 0; + + DHD_INFO(("DHD: %s: \n", __FUNCTION__)); + argos_rps_ctrl_data.wlan_primary_netdev = net; + argos_rps_ctrl_data.argos_rps_cpus_enabled = 0; + + if (argos_wifi.notifier_call == NULL) { + argos_wifi.notifier_call = argos_status_notifier_wifi_cb; + ret = sec_argos_register_notifier(&argos_wifi, ARGOS_WIFI_TABLE_LABEL); + if (ret < 0) { + DHD_ERROR(("DHD:Failed to register WIFI notifier, ret=%d\n", ret)); + goto exit; + } + } + + if (argos_p2p.notifier_call == NULL) { + argos_p2p.notifier_call = argos_status_notifier_p2p_cb; + ret = sec_argos_register_notifier(&argos_p2p, ARGOS_P2P_TABLE_LABEL); + if (ret < 0) { + DHD_ERROR(("DHD:Failed to register P2P notifier, ret=%d\n", ret)); + sec_argos_unregister_notifier(&argos_wifi, ARGOS_WIFI_TABLE_LABEL); + goto exit; + } + } + + return 0; + +exit: + if (argos_wifi.notifier_call) { + argos_wifi.notifier_call = NULL; + } + + if (argos_p2p.notifier_call) { + argos_p2p.notifier_call = NULL; + } + + return ret; +} + +int +argos_register_notifier_deinit(void) +{ + DHD_INFO(("DHD: %s: \n", __FUNCTION__)); + + if (argos_rps_ctrl_data.wlan_primary_netdev == NULL) { + DHD_ERROR(("DHD: primary_net_dev is null %s: \n", __FUNCTION__)); + return -1; + } +#ifndef DHD_LB + custom_rps_map_clear(argos_rps_ctrl_data.wlan_primary_netdev->_rx); +#endif /* !DHD_LB */ + + if (argos_p2p.notifier_call) { + sec_argos_unregister_notifier(&argos_p2p, ARGOS_P2P_TABLE_LABEL); + argos_p2p.notifier_call = NULL; + } + + if (argos_wifi.notifier_call) { + sec_argos_unregister_notifier(&argos_wifi, ARGOS_WIFI_TABLE_LABEL); + argos_wifi.notifier_call = NULL; + } + + argos_rps_ctrl_data.wlan_primary_netdev = NULL; + argos_rps_ctrl_data.argos_rps_cpus_enabled = 0; + + return 0; +} + +int +argos_status_notifier_wifi_cb(struct notifier_block *notifier, + unsigned long speed, void *v) +{ + dhd_info_t *dhd; + dhd_pub_t *dhdp; +#if defined(ARGOS_NOTIFY_CB) + unsigned int pcie_irq = 0; +#endif /* ARGOS_NOTIFY_CB */ + DHD_INFO(("DHD: %s: speed=%ld\n", __FUNCTION__, speed)); + + if (argos_rps_ctrl_data.wlan_primary_netdev == NULL) { + goto exit; + } + + dhd = DHD_DEV_INFO(argos_rps_ctrl_data.wlan_primary_netdev); + if (dhd == NULL) { + goto exit; + } + + dhdp = &dhd->pub; + if (dhdp == NULL || !dhdp->up) { + goto exit; + } +#if defined(ARGOS_NOTIFY_CB) + if (speed > PCIE_IRQ_AFFINITY_THRESHOLD) { + if (dhdpcie_get_pcieirq(dhd->pub.bus, &pcie_irq)) { + DHD_ERROR(("%s, Failed to get PCIe IRQ\n", __FUNCTION__)); + } else { + DHD_ERROR(("%s, PCIe IRQ:%u set Core %d\n", + __FUNCTION__, pcie_irq, PCIE_IRQ_CPU_CORE)); + irq_set_affinity(pcie_irq, cpumask_of(PCIE_IRQ_CPU_CORE)); + } + } else { + dhd_irq_set_affinity(dhdp); + } +#endif /* ARGOS_NOTIFY_CB */ + /* Check if reported TPut value is more than threshold value */ + if (speed > RPS_TPUT_THRESHOLD) { + if (argos_rps_ctrl_data.argos_rps_cpus_enabled == 0) { + /* It does not need to configre rps_cpus + * if Load Balance is enabled + */ +#ifndef DHD_LB + int err = 0; + + if (cpu_online(RPS_CPUS_WLAN_CORE_ID)) { + err = custom_rps_map_set( + argos_rps_ctrl_data.wlan_primary_netdev->_rx, + RPS_CPUS_MASK, strlen(RPS_CPUS_MASK)); + } else { + DHD_ERROR(("DHD: %s: RPS_Set fail," + " Core=%d Offline\n", __FUNCTION__, + RPS_CPUS_WLAN_CORE_ID)); + err = -1; + } + + if (err < 0) { + DHD_ERROR(("DHD: %s: Failed to RPS_CPUs. " + "speed=%ld, error=%d\n", + __FUNCTION__, speed, err)); + } else { +#endif /* !DHD_LB */ +#if (defined(DHDTCPACK_SUPPRESS) && defined(BCMPCIE)) + if (dhdp->tcpack_sup_mode != TCPACK_SUP_HOLD) { + DHD_ERROR(("%s : set ack suppress. TCPACK_SUP_ON(%d)\n", + __FUNCTION__, TCPACK_SUP_HOLD)); + dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_HOLD); + } +#endif /* DHDTCPACK_SUPPRESS && BCMPCIE */ + argos_rps_ctrl_data.argos_rps_cpus_enabled = 1; +#ifndef DHD_LB + DHD_ERROR(("DHD: %s: Set RPS_CPUs, speed=%ld\n", + __FUNCTION__, speed)); + } +#endif /* !DHD_LB */ + } + } else { + if (argos_rps_ctrl_data.argos_rps_cpus_enabled == 1) { +#if (defined(DHDTCPACK_SUPPRESS) && defined(BCMPCIE)) + if (dhdp->tcpack_sup_mode != TCPACK_SUP_OFF) { + DHD_ERROR(("%s : set ack suppress. TCPACK_SUP_OFF\n", + __FUNCTION__)); + dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_OFF); + } +#endif /* DHDTCPACK_SUPPRESS && BCMPCIE */ +#ifndef DHD_LB + /* It does not need to configre rps_cpus + * if Load Balance is enabled + */ + custom_rps_map_clear(argos_rps_ctrl_data.wlan_primary_netdev->_rx); + DHD_ERROR(("DHD: %s: Clear RPS_CPUs, speed=%ld\n", __FUNCTION__, speed)); + OSL_SLEEP(DELAY_TO_CLEAR_RPS_CPUS); +#endif /* !DHD_LB */ + argos_rps_ctrl_data.argos_rps_cpus_enabled = 0; + } + } + +exit: + return NOTIFY_OK; +} + +int +argos_status_notifier_p2p_cb(struct notifier_block *notifier, + unsigned long speed, void *v) +{ + DHD_INFO(("DHD: %s: speed=%ld\n", __FUNCTION__, speed)); + return argos_status_notifier_wifi_cb(notifier, speed, v); +} +#endif // endif + +#ifdef DHD_DEBUG_PAGEALLOC + +void +dhd_page_corrupt_cb(void *handle, void *addr_corrupt, size_t len) +{ + dhd_pub_t *dhdp = (dhd_pub_t *)handle; + + DHD_ERROR(("%s: Got dhd_page_corrupt_cb 0x%p %d\n", + __FUNCTION__, addr_corrupt, (uint32)len)); + + DHD_OS_WAKE_LOCK(dhdp); + prhex("Page Corruption:", addr_corrupt, len); + dhd_dump_to_kernelog(dhdp); +#if defined(BCMPCIE) && defined(DHD_FW_COREDUMP) + /* Load the dongle side dump to host memory and then BUG_ON() */ + dhdp->memdump_enabled = DUMP_MEMONLY; + dhdp->memdump_type = DUMP_TYPE_MEMORY_CORRUPTION; + dhd_bus_mem_dump(dhdp); +#endif /* BCMPCIE && DHD_FW_COREDUMP */ + DHD_OS_WAKE_UNLOCK(dhdp); +} +EXPORT_SYMBOL(dhd_page_corrupt_cb); +#endif /* DHD_DEBUG_PAGEALLOC */ + +#if defined(BCMPCIE) && defined(DHD_PKTID_AUDIT_ENABLED) +void +dhd_pktid_error_handler(dhd_pub_t *dhdp) +{ + DHD_ERROR(("%s: Got Pkt Id Audit failure \n", __FUNCTION__)); + DHD_OS_WAKE_LOCK(dhdp); + dhd_dump_to_kernelog(dhdp); +#ifdef DHD_FW_COREDUMP + /* Load the dongle side dump to host memory */ + if (dhdp->memdump_enabled == DUMP_DISABLED) { + dhdp->memdump_enabled = DUMP_MEMFILE; + } + dhdp->memdump_type = DUMP_TYPE_PKTID_AUDIT_FAILURE; + dhd_bus_mem_dump(dhdp); +#endif /* DHD_FW_COREDUMP */ + dhdp->hang_reason = HANG_REASON_PCIE_PKTID_ERROR; + dhd_os_check_hang(dhdp, 0, -EREMOTEIO); + DHD_OS_WAKE_UNLOCK(dhdp); +} +#endif /* BCMPCIE && DHD_PKTID_AUDIT_ENABLED */ + +struct net_device * +dhd_linux_get_primary_netdev(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd = dhdp->info; + + if (dhd->iflist[0] && dhd->iflist[0]->net) + return dhd->iflist[0]->net; + else + return NULL; +} + +#ifdef DHD_DHCP_DUMP +static void +dhd_dhcp_dump(char *ifname, uint8 *pktdata, bool tx) +{ + struct bootp_fmt *b = (struct bootp_fmt *) &pktdata[ETHER_HDR_LEN]; + struct iphdr *h = &b->ip_header; + uint8 *ptr, *opt, *end = (uint8 *) b + ntohs(b->ip_header.tot_len); + int dhcp_type = 0, len, opt_len; + + /* check IP header */ + if (h->ihl != 5 || h->version != 4 || h->protocol != IPPROTO_UDP) { + return; + } + + /* check UDP port for bootp (67, 68) */ + if (b->udp_header.source != htons(67) && b->udp_header.source != htons(68) && + b->udp_header.dest != htons(67) && b->udp_header.dest != htons(68)) { + return; + } + + /* check header length */ + if (ntohs(h->tot_len) < ntohs(b->udp_header.len) + sizeof(struct iphdr)) { + return; + } + + len = ntohs(b->udp_header.len) - sizeof(struct udphdr); + opt_len = len + - (sizeof(*b) - sizeof(struct iphdr) - sizeof(struct udphdr) - sizeof(b->options)); + + /* parse bootp options */ + if (opt_len >= 4 && !memcmp(b->options, bootp_magic_cookie, 4)) { + ptr = &b->options[4]; + while (ptr < end && *ptr != 0xff) { + opt = ptr++; + if (*opt == 0) { + continue; + } + ptr += *ptr + 1; + if (ptr >= end) { + break; + } + /* 53 is dhcp type */ + if (*opt == 53) { + if (opt[1]) { + dhcp_type = opt[2]; + DHD_ERROR(("DHCP[%s] - %s [%s] [%s]\n", + ifname, dhcp_types[dhcp_type], + tx ? "TX" : "RX", dhcp_ops[b->op])); + break; + } + } + } + } +} +#endif /* DHD_DHCP_DUMP */ + +#ifdef DHD_ICMP_DUMP +static void +dhd_icmp_dump(char *ifname, uint8 *pktdata, bool tx) +{ + uint8 *pkt = (uint8 *)&pktdata[ETHER_HDR_LEN]; + struct iphdr *iph = (struct iphdr *)pkt; + struct icmphdr *icmph; + + /* check IP header */ + if (iph->ihl != 5 || iph->version != 4 || iph->protocol != IP_PROT_ICMP) { + return; + } + + icmph = (struct icmphdr *)((uint8 *)pkt + sizeof(struct iphdr)); + if (icmph->type == ICMP_ECHO) { + DHD_ERROR_MEM(("PING REQUEST[%s] [%s] : SEQNUM=%d\n", + ifname, tx ? "TX" : "RX", ntoh16(icmph->un.echo.sequence))); + } else if (icmph->type == ICMP_ECHOREPLY) { + DHD_ERROR_MEM(("PING REPLY[%s] [%s] : SEQNUM=%d\n", + ifname, tx ? "TX" : "RX", ntoh16(icmph->un.echo.sequence))); + } else { + DHD_ERROR_MEM(("ICMP [%s] [%s] : TYPE=%d, CODE=%d\n", + ifname, tx ? "TX" : "RX", icmph->type, icmph->code)); + } +} +#endif /* DHD_ICMP_DUMP */ + +#ifdef SHOW_LOGTRACE +void +dhd_get_read_buf_ptr(dhd_pub_t *dhd_pub, trace_buf_info_t *trace_buf_info) +{ + dhd_dbg_ring_status_t ring_status; + uint32 rlen = 0; +#if defined(DEBUGABILITY) + rlen = dhd_dbg_pull_single_from_ring(dhd_pub, FW_VERBOSE_RING_ID, trace_buf_info->buf, + TRACE_LOG_BUF_MAX_SIZE, TRUE); +#elif defined(EWP_ECNTRS_LOGGING) + rlen = dhd_dbg_ring_pull_single(dhd_pub->ecntr_dbg_ring, trace_buf_info->buf, + TRACE_LOG_BUF_MAX_SIZE, TRUE); +#else + ASSERT(0); +#endif /* DEBUGABILITY */ + + trace_buf_info->size = rlen; + trace_buf_info->availability = NEXT_BUF_NOT_AVAIL; + if (rlen == 0) { + trace_buf_info->availability = BUF_NOT_AVAILABLE; + return; + } + dhd_dbg_get_ring_status(dhd_pub, FW_VERBOSE_RING_ID, &ring_status); + if (ring_status.written_bytes != ring_status.read_bytes) { + trace_buf_info->availability = NEXT_BUF_AVAIL; + } +} +#endif /* SHOW_LOGTRACE */ + +bool +dhd_fw_download_status(dhd_pub_t * dhd_pub) +{ + return dhd_pub->fw_download_done; +} + +int +dhd_create_to_notifier_skt(void) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) + /* Kernel 3.7 onwards this API accepts only 3 arguments. */ + /* Kernel version 3.6 is a special case which accepts 4 arguments */ + nl_to_event_sk = netlink_kernel_create(&init_net, BCM_NL_USER, &dhd_netlink_cfg); +#elif (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0)) + /* Kernel version 3.5 and below use this old API format */ + nl_to_event_sk = netlink_kernel_create(&init_net, BCM_NL_USER, 0, + dhd_process_daemon_msg, NULL, THIS_MODULE); +#else + nl_to_event_sk = netlink_kernel_create(&init_net, BCM_NL_USER, THIS_MODULE, + &dhd_netlink_cfg); +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) */ + if (!nl_to_event_sk) + { + printf("Error creating socket.\n"); + return -1; + } + DHD_INFO(("nl_to socket created successfully...\n")); + return 0; +} + +void +dhd_destroy_to_notifier_skt(void) +{ + DHD_INFO(("Destroying nl_to socket\n")); + netlink_kernel_release(nl_to_event_sk); +} + +static void +dhd_recv_msg_from_daemon(struct sk_buff *skb) +{ + struct nlmsghdr *nlh; + bcm_to_info_t *cmd; + + nlh = (struct nlmsghdr *)skb->data; + cmd = (bcm_to_info_t *)nlmsg_data(nlh); + if ((cmd->magic == BCM_TO_MAGIC) && (cmd->reason == REASON_DAEMON_STARTED)) { + sender_pid = ((struct nlmsghdr *)(skb->data))->nlmsg_pid; + DHD_INFO(("DHD Daemon Started\n")); + } +} + +int +dhd_send_msg_to_daemon(struct sk_buff *skb, void *data, int size) +{ + struct nlmsghdr *nlh; + struct sk_buff *skb_out; + + BCM_REFERENCE(skb); + if (sender_pid == 0) { + DHD_INFO(("Invalid PID 0\n")); + return -1; + } + + if ((skb_out = nlmsg_new(size, 0)) == NULL) { + DHD_ERROR(("%s: skb alloc failed\n", __FUNCTION__)); + return -1; + } + nlh = nlmsg_put(skb_out, 0, 0, NLMSG_DONE, size, 0); + NETLINK_CB(skb_out).dst_group = 0; /* Unicast */ + memcpy(nlmsg_data(nlh), (char *)data, size); + + if ((nlmsg_unicast(nl_to_event_sk, skb_out, sender_pid)) < 0) { + DHD_INFO(("Error sending message\n")); + } + return 0; +} + +static ssize_t +show_enable_ecounter(struct dhd_info *dev, char *buf) +{ + ssize_t ret = 0; + unsigned long onoff; + + onoff = enable_ecounter; + ret = scnprintf(buf, PAGE_SIZE - 1, "%lu \n", + onoff); + return ret; +} + +static ssize_t +ecounter_onoff(struct dhd_info *dev, const char *buf, size_t count) +{ + unsigned long onoff; + dhd_info_t *dhd = (dhd_info_t *)dev; + dhd_pub_t *dhdp; + + if (!dhd) { + DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__)); + return count; + } + dhdp = &dhd->pub; + if (!FW_SUPPORTED(dhdp, ecounters)) { + DHD_ERROR(("%s: ecounters not supported by FW\n", __FUNCTION__)); + return count; + } + + onoff = bcm_strtoul(buf, NULL, 10); + + sscanf(buf, "%lu", &onoff); + if (onoff != 0 && onoff != 1) { + return -EINVAL; + } + + if (enable_ecounter == onoff) { + DHD_ERROR(("%s: ecounters already %d\n", __FUNCTION__, enable_ecounter)); + return count; + } + + enable_ecounter = onoff; + if (enable_ecounter) { + if (dhd_start_ecounters(dhdp) != BCME_OK) { + DHD_ERROR(("%s Ecounters start failed\n", __FUNCTION__)); + } else if (dhd_start_event_ecounters(dhdp) != BCME_OK) { + DHD_ERROR(("%s Event_Ecounters start failed\n", __FUNCTION__)); + } + } else { + if (dhd_stop_ecounters(dhdp) != BCME_OK) { + DHD_ERROR(("%s Ecounters stop failed\n", __FUNCTION__)); + } else if (dhd_stop_event_ecounters(dhdp) != BCME_OK) { + DHD_ERROR(("%s Event_Ecounters stop failed\n", __FUNCTION__)); + } + } + + return count; +} + +static void +dhd_process_daemon_msg(struct sk_buff *skb) +{ + bcm_to_info_t to_info; + + to_info.magic = BCM_TO_MAGIC; + to_info.reason = REASON_DAEMON_STARTED; + to_info.trap = NO_TRAP; + + dhd_recv_msg_from_daemon(skb); + dhd_send_msg_to_daemon(skb, &to_info, sizeof(to_info)); +} + +#ifdef DHD_LOG_DUMP +bool +dhd_log_dump_ecntr_enabled(void) +{ + return (bool)logdump_ecntr_enable; +} + +void +dhd_log_dump_init(dhd_pub_t *dhd) +{ + struct dhd_log_dump_buf *dld_buf, *dld_buf_special; + int i = 0; + uint8 *prealloc_buf = NULL, *bufptr = NULL; +#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP) + int prealloc_idx = DHD_PREALLOC_DHD_LOG_DUMP_BUF; +#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */ + int ret; + dhd_dbg_ring_t *ring = NULL; + unsigned long flags = 0; + dhd_info_t *dhd_info = dhd->info; + void *cookie_buf = NULL; + + BCM_REFERENCE(ret); + BCM_REFERENCE(ring); + BCM_REFERENCE(flags); + + /* sanity check */ + if (logdump_prsrv_tailsize <= 0 || + logdump_prsrv_tailsize > DHD_LOG_DUMP_MAX_TAIL_FLUSH_SIZE) { + logdump_prsrv_tailsize = DHD_LOG_DUMP_MAX_TAIL_FLUSH_SIZE; + } + /* now adjust the preserve log flush size based on the + * kernel printk log buffer size + */ +#ifdef CONFIG_LOG_BUF_SHIFT + DHD_ERROR(("%s: kernel log buf size = %uKB; logdump_prsrv_tailsize = %uKB;" + " limit prsrv tail size to = %uKB\n", + __FUNCTION__, (1 << CONFIG_LOG_BUF_SHIFT)/1024, + logdump_prsrv_tailsize/1024, LOG_DUMP_KERNEL_TAIL_FLUSH_SIZE/1024)); + + if (logdump_prsrv_tailsize > LOG_DUMP_KERNEL_TAIL_FLUSH_SIZE) { + logdump_prsrv_tailsize = LOG_DUMP_KERNEL_TAIL_FLUSH_SIZE; + } +#else + DHD_ERROR(("%s: logdump_prsrv_tailsize = %uKB \n", + __FUNCTION__, logdump_prsrv_tailsize/1024); +#endif /* CONFIG_LOG_BUF_SHIFT */ + + mutex_init(&dhd_info->logdump_lock); + + /* initialize log dump buf structures */ + memset(g_dld_buf, 0, sizeof(struct dhd_log_dump_buf) * DLD_BUFFER_NUM); + + /* set the log dump buffer size based on the module_param */ + if (logdump_max_bufsize > LOG_DUMP_GENERAL_MAX_BUFSIZE || + logdump_max_bufsize <= 0) + dld_buf_size[DLD_BUF_TYPE_GENERAL] = LOG_DUMP_GENERAL_MAX_BUFSIZE; + else + dld_buf_size[DLD_BUF_TYPE_GENERAL] = logdump_max_bufsize; + + /* pre-alloc the memory for the log buffers & 'special' buffer */ + dld_buf_special = &g_dld_buf[DLD_BUF_TYPE_SPECIAL]; +#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP) + DHD_ERROR(("%s : Try to allocate memory total(%d) special(%d)\n", + __FUNCTION__, LOG_DUMP_TOTAL_BUFSIZE, LOG_DUMP_SPECIAL_MAX_BUFSIZE)); + prealloc_buf = DHD_OS_PREALLOC(dhd, prealloc_idx++, LOG_DUMP_TOTAL_BUFSIZE); + dld_buf_special->buffer = DHD_OS_PREALLOC(dhd, prealloc_idx++, + dld_buf_size[DLD_BUF_TYPE_SPECIAL]); +#else + prealloc_buf = MALLOCZ(dhd->osh, LOG_DUMP_TOTAL_BUFSIZE); + dld_buf_special->buffer = MALLOCZ(dhd->osh, dld_buf_size[DLD_BUF_TYPE_SPECIAL]); +#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */ + if (!prealloc_buf) { + DHD_ERROR(("Failed to pre-allocate memory for log buffers !\n")); + goto fail; + } + if (!dld_buf_special->buffer) { + DHD_ERROR(("Failed to pre-allocate memory for special buffer !\n")); + goto fail; + } + + bufptr = prealloc_buf; + for (i = 0; i < DLD_BUFFER_NUM; i++) { + dld_buf = &g_dld_buf[i]; + dld_buf->dhd_pub = dhd; + spin_lock_init(&dld_buf->lock); + dld_buf->wraparound = 0; + if (i != DLD_BUF_TYPE_SPECIAL) { + dld_buf->buffer = bufptr; + dld_buf->max = (unsigned long)dld_buf->buffer + dld_buf_size[i]; + bufptr = (uint8 *)dld_buf->max; + } else { + dld_buf->max = (unsigned long)dld_buf->buffer + dld_buf_size[i]; + } + dld_buf->present = dld_buf->front = dld_buf->buffer; + dld_buf->remain = dld_buf_size[i]; + dld_buf->enable = 1; + } + +#ifdef EWP_ECNTRS_LOGGING + /* now use the rest of the pre-alloc'd memory for filter and ecounter log */ + dhd->ecntr_dbg_ring = MALLOCZ(dhd->osh, sizeof(dhd_dbg_ring_t)); + if (!dhd->ecntr_dbg_ring) + goto fail; + + ring = (dhd_dbg_ring_t *)dhd->ecntr_dbg_ring; + ret = dhd_dbg_ring_init(dhd, ring, ECNTR_RING_ID, + ECNTR_RING_NAME, LOG_DUMP_ECNTRS_MAX_BUFSIZE, + bufptr); + if (ret != BCME_OK) { + DHD_ERROR(("%s: unable to init ecntr ring !\n", + __FUNCTION__)); + goto fail; + } + DHD_DBG_RING_LOCK(ring->lock, flags); + ring->state = RING_ACTIVE; + ring->threshold = 0; + DHD_DBG_RING_UNLOCK(ring->lock, flags); + + bufptr += LOG_DUMP_ECNTRS_MAX_BUFSIZE; +#endif /* EWP_ECNTRS_LOGGING */ + + /* Concise buffer is used as intermediate buffer for following purposes + * a) pull ecounters records temporarily before + * writing it to file + * b) to store dhd dump data before putting it to file + * It should have a size equal to + * MAX(largest possible ecntr record, 'dhd dump' data size) + */ + dhd->concise_dbg_buf = MALLOC(dhd->osh, CONCISE_DUMP_BUFLEN); + if (!dhd->concise_dbg_buf) { + DHD_ERROR(("%s: unable to alloc mem for concise debug info !\n", + __FUNCTION__)); + goto fail; + } + + cookie_buf = MALLOC(dhd->osh, LOG_DUMP_COOKIE_BUFSIZE); + if (!cookie_buf) { + DHD_ERROR(("%s: unable to alloc mem for logdump cookie buffer\n", + __FUNCTION__)); + goto fail; + } + ret = dhd_logdump_cookie_init(dhd, cookie_buf, LOG_DUMP_COOKIE_BUFSIZE); + if (ret != BCME_OK) { + MFREE(dhd->osh, cookie_buf, LOG_DUMP_COOKIE_BUFSIZE); + goto fail; + } + return; + +fail: + + if (dhd->logdump_cookie) { + dhd_logdump_cookie_deinit(dhd); + MFREE(dhd->osh, dhd->logdump_cookie, LOG_DUMP_COOKIE_BUFSIZE); + dhd->logdump_cookie = NULL; + } + + if (dhd->concise_dbg_buf) { + MFREE(dhd->osh, dhd->concise_dbg_buf, CONCISE_DUMP_BUFLEN); + } + +#ifdef EWP_ECNTRS_LOGGING + if (dhd->ecntr_dbg_ring) { + ring = (dhd_dbg_ring_t *)dhd->ecntr_dbg_ring; + dhd_dbg_ring_deinit(dhd, ring); + ring->ring_buf = NULL; + ring->ring_size = 0; + MFREE(dhd->osh, ring, sizeof(dhd_dbg_ring_t)); + dhd->ecntr_dbg_ring = NULL; + } +#endif /* EWP_ECNTRS_LOGGING */ + +#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP) + if (prealloc_buf) { + DHD_OS_PREFREE(dhd, prealloc_buf, LOG_DUMP_TOTAL_BUFSIZE); + } + if (dld_buf_special->buffer) { + DHD_OS_PREFREE(dhd, dld_buf_special->buffer, + dld_buf_size[DLD_BUF_TYPE_SPECIAL]); + } +#else + if (prealloc_buf) { + MFREE(dhd->osh, prealloc_buf, LOG_DUMP_TOTAL_BUFSIZE); + } + if (dld_buf_special->buffer) { + MFREE(dhd->osh, dld_buf_special->buffer, + dld_buf_size[DLD_BUF_TYPE_SPECIAL]); + } +#endif /* CONFIG_DHD_USE_STATIC_BUF */ + for (i = 0; i < DLD_BUFFER_NUM; i++) { + dld_buf = &g_dld_buf[i]; + dld_buf->enable = 0; + dld_buf->buffer = NULL; + } + + mutex_destroy(&dhd_info->logdump_lock); +} + +void +dhd_log_dump_deinit(dhd_pub_t *dhd) +{ + struct dhd_log_dump_buf *dld_buf = NULL, *dld_buf_special = NULL; + int i = 0; + dhd_info_t *dhd_info = dhd->info; + dhd_dbg_ring_t *ring = NULL; + + BCM_REFERENCE(ring); + + if (dhd->concise_dbg_buf) { + MFREE(dhd->osh, dhd->concise_dbg_buf, CONCISE_DUMP_BUFLEN); + dhd->concise_dbg_buf = NULL; + } + + if (dhd->logdump_cookie) { + dhd_logdump_cookie_deinit(dhd); + MFREE(dhd->osh, dhd->logdump_cookie, LOG_DUMP_COOKIE_BUFSIZE); + dhd->logdump_cookie = NULL; + } + +#ifdef EWP_ECNTRS_LOGGING + if (dhd->ecntr_dbg_ring) { + ring = (dhd_dbg_ring_t *)dhd->ecntr_dbg_ring; + dhd_dbg_ring_deinit(dhd, ring); + ring->ring_buf = NULL; + ring->ring_size = 0; + MFREE(dhd->osh, ring, sizeof(dhd_dbg_ring_t)); + dhd->ecntr_dbg_ring = NULL; + } +#endif /* EWP_ECNTRS_LOGGING */ + + /* 'general' buffer points to start of the pre-alloc'd memory */ + dld_buf = &g_dld_buf[DLD_BUF_TYPE_GENERAL]; + dld_buf_special = &g_dld_buf[DLD_BUF_TYPE_SPECIAL]; +#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP) + if (dld_buf->buffer) { + DHD_OS_PREFREE(dhd, dld_buf->buffer, LOG_DUMP_TOTAL_BUFSIZE); + } + if (dld_buf_special->buffer) { + DHD_OS_PREFREE(dhd, dld_buf_special->buffer, + dld_buf_size[DLD_BUF_TYPE_SPECIAL]); + } +#else + if (dld_buf->buffer) { + MFREE(dhd->osh, dld_buf->buffer, LOG_DUMP_TOTAL_BUFSIZE); + } + if (dld_buf_special->buffer) { + MFREE(dhd->osh, dld_buf_special->buffer, + dld_buf_size[DLD_BUF_TYPE_SPECIAL]); + } +#endif /* CONFIG_DHD_USE_STATIC_BUF */ + for (i = 0; i < DLD_BUFFER_NUM; i++) { + dld_buf = &g_dld_buf[i]; + dld_buf->enable = 0; + dld_buf->buffer = NULL; + } + + mutex_destroy(&dhd_info->logdump_lock); +} + +void +dhd_log_dump_write(int type, char *binary_data, + int binary_len, const char *fmt, ...) +{ + int len = 0; + char tmp_buf[DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE] = {0, }; + va_list args; + unsigned long flags = 0; + struct dhd_log_dump_buf *dld_buf = NULL; + bool flush_log = FALSE; + + if (type < 0 || type >= DLD_BUFFER_NUM) { + DHD_INFO(("%s: Unknown DHD_LOG_DUMP_BUF_TYPE(%d).\n", + __FUNCTION__, type)); + return; + } + + dld_buf = &g_dld_buf[type]; + + if (dld_buf->enable != 1) { + return; + } + + va_start(args, fmt); + len = vsnprintf(tmp_buf, DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE, fmt, args); + /* Non ANSI C99 compliant returns -1, + * ANSI compliant return len >= DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE + */ + va_end(args); + if (len < 0) { + return; + } + + if (len >= DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE) { + len = DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE - 1; + tmp_buf[len] = '\0'; + } + + /* make a critical section to eliminate race conditions */ + spin_lock_irqsave(&dld_buf->lock, flags); + if (dld_buf->remain < len) { + dld_buf->wraparound = 1; + dld_buf->present = dld_buf->front; + dld_buf->remain = dld_buf_size[type]; + /* if wrap around happens, flush the ring buffer to the file */ + flush_log = TRUE; + } + + memcpy(dld_buf->present, tmp_buf, len); + dld_buf->remain -= len; + dld_buf->present += len; + spin_unlock_irqrestore(&dld_buf->lock, flags); + + /* double check invalid memory operation */ + ASSERT((unsigned long)dld_buf->present <= dld_buf->max); + + if (dld_buf->dhd_pub) { + dhd_pub_t *dhdp = (dhd_pub_t *)dld_buf->dhd_pub; + dhdp->logdump_periodic_flush = + logdump_periodic_flush; + if (logdump_periodic_flush && flush_log) { + log_dump_type_t *flush_type = MALLOCZ(dhdp->osh, + sizeof(log_dump_type_t)); + if (flush_type) { + *flush_type = type; + dhd_schedule_log_dump(dld_buf->dhd_pub, flush_type); + } + } + } +} + +char* +dhd_log_dump_get_timestamp(void) +{ + static char buf[16]; + u64 ts_nsec; + unsigned long rem_nsec; + + ts_nsec = local_clock(); + rem_nsec = DIV_AND_MOD_U64_BY_U32(ts_nsec, NSEC_PER_SEC); + snprintf(buf, sizeof(buf), "%5lu.%06lu", + (unsigned long)ts_nsec, rem_nsec / NSEC_PER_USEC); + + return buf; +} +#endif /* DHD_LOG_DUMP */ + +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM +void +dhd_flush_rx_tx_wq(dhd_pub_t *dhdp) +{ + dhd_info_t * dhd; + + if (dhdp) { + dhd = dhdp->info; + if (dhd) { + flush_workqueue(dhd->tx_wq); + flush_workqueue(dhd->rx_wq); + } + } + + return; +} +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + +#ifdef DHD_LB_TXP +#define DHD_LB_TXBOUND 64 +/* + * Function that performs the TX processing on a given CPU + */ +bool +dhd_lb_tx_process(dhd_info_t *dhd) +{ + struct sk_buff *skb; + int cnt = 0; + struct net_device *net; + int ifidx; + bool resched = FALSE; + + DHD_TRACE(("%s(): TX Processing \r\n", __FUNCTION__)); + if (dhd == NULL) { + DHD_ERROR((" Null pointer DHD \r\n")); + return resched; + } + + BCM_REFERENCE(net); + + DHD_LB_STATS_PERCPU_ARR_INCR(dhd->txp_percpu_run_cnt); + + /* Base Loop to perform the actual Tx */ + do { + skb = skb_dequeue(&dhd->tx_pend_queue); + if (skb == NULL) { + DHD_TRACE(("Dequeued a Null Packet \r\n")); + break; + } + cnt++; + + net = DHD_LB_TX_PKTTAG_NETDEV((dhd_tx_lb_pkttag_fr_t *)PKTTAG(skb)); + ifidx = DHD_LB_TX_PKTTAG_IFIDX((dhd_tx_lb_pkttag_fr_t *)PKTTAG(skb)); + + DHD_TRACE(("Processing skb %p for net %p index %d \r\n", skb, + net, ifidx)); + + __dhd_sendpkt(&dhd->pub, ifidx, skb); + + if (cnt >= DHD_LB_TXBOUND) { + resched = TRUE; + break; + } + + } while (1); + + DHD_INFO(("%s(): Processed %d packets \r\n", __FUNCTION__, cnt)); + + return resched; +} + +void +dhd_lb_tx_handler(unsigned long data) +{ + dhd_info_t *dhd = (dhd_info_t *)data; + + if (dhd_lb_tx_process(dhd)) { + dhd_tasklet_schedule(&dhd->tx_tasklet); + } +} + +#endif /* DHD_LB_TXP */ + +/* ---------------------------------------------------------------------------- + * Infrastructure code for sysfs interface support for DHD + * + * What is sysfs interface? + * https://www.kernel.org/doc/Documentation/filesystems/sysfs.txt + * + * Why sysfs interface? + * This is the Linux standard way of changing/configuring Run Time parameters + * for a driver. We can use this interface to control "linux" specific driver + * parameters. + * + * ----------------------------------------------------------------------------- + */ + +#include +#include + +#if defined(DHD_TRACE_WAKE_LOCK) + +/* Function to show the history buffer */ +static ssize_t +show_wklock_trace(struct dhd_info *dev, char *buf) +{ + ssize_t ret = 0; + dhd_info_t *dhd = (dhd_info_t *)dev; + + buf[ret] = '\n'; + buf[ret+1] = 0; + + dhd_wk_lock_stats_dump(&dhd->pub); + return ret+1; +} + +/* Function to enable/disable wakelock trace */ +static ssize_t +wklock_trace_onoff(struct dhd_info *dev, const char *buf, size_t count) +{ + unsigned long onoff; + unsigned long flags; + dhd_info_t *dhd = (dhd_info_t *)dev; + + onoff = bcm_strtoul(buf, NULL, 10); + if (onoff != 0 && onoff != 1) { + return -EINVAL; + } + + spin_lock_irqsave(&dhd->wakelock_spinlock, flags); + trace_wklock_onoff = onoff; + spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); + if (trace_wklock_onoff) { + printk("ENABLE WAKLOCK TRACE\n"); + } else { + printk("DISABLE WAKELOCK TRACE\n"); + } + + return (ssize_t)(onoff+1); +} +#endif /* DHD_TRACE_WAKE_LOCK */ + +#if defined(DHD_LB_TXP) +static ssize_t +show_lbtxp(struct dhd_info *dev, char *buf) +{ + ssize_t ret = 0; + unsigned long onoff; + dhd_info_t *dhd = (dhd_info_t *)dev; + + onoff = atomic_read(&dhd->lb_txp_active); + ret = scnprintf(buf, PAGE_SIZE - 1, "%lu \n", + onoff); + return ret; +} + +static ssize_t +lbtxp_onoff(struct dhd_info *dev, const char *buf, size_t count) +{ + unsigned long onoff; + dhd_info_t *dhd = (dhd_info_t *)dev; + int i; + + onoff = bcm_strtoul(buf, NULL, 10); + + sscanf(buf, "%lu", &onoff); + if (onoff != 0 && onoff != 1) { + return -EINVAL; + } + atomic_set(&dhd->lb_txp_active, onoff); + + /* Since the scheme is changed clear the counters */ + for (i = 0; i < NR_CPUS; i++) { + DHD_LB_STATS_CLR(dhd->txp_percpu_run_cnt[i]); + DHD_LB_STATS_CLR(dhd->tx_start_percpu_run_cnt[i]); + } + + return count; +} + +#endif /* DHD_LB_TXP */ + +#ifdef DHD_LOG_DUMP +static ssize_t +show_logdump_periodic_flush(struct dhd_info *dev, char *buf) +{ + ssize_t ret = 0; + unsigned long val; + + val = logdump_periodic_flush; + ret = scnprintf(buf, PAGE_SIZE - 1, "%lu \n", val); + return ret; +} + +static ssize_t +logdump_periodic_flush_onoff(struct dhd_info *dev, const char *buf, size_t count) +{ + unsigned long val; + + val = bcm_strtoul(buf, NULL, 10); + + sscanf(buf, "%lu", &val); + if (val != 0 && val != 1) { + return -EINVAL; + } + logdump_periodic_flush = val; + return count; +} +static ssize_t +show_logdump_ecntr(struct dhd_info *dev, char *buf) +{ + ssize_t ret = 0; + unsigned long val; + + val = logdump_ecntr_enable; + ret = scnprintf(buf, PAGE_SIZE - 1, "%lu \n", val); + return ret; +} + +static ssize_t +logdump_ecntr_onoff(struct dhd_info *dev, const char *buf, size_t count) +{ + unsigned long val; + + val = bcm_strtoul(buf, NULL, 10); + + sscanf(buf, "%lu", &val); + if (val != 0 && val != 1) { + return -EINVAL; + } + logdump_ecntr_enable = val; + return count; +} + +#endif /* DHD_LOG_DUMP */ +/* + * Generic Attribute Structure for DHD. + * If we have to add a new sysfs entry under /sys/bcm-dhd/, we have + * to instantiate an object of type dhd_attr, populate it with + * the required show/store functions (ex:- dhd_attr_cpumask_primary) + * and add the object to default_attrs[] array, that gets registered + * to the kobject of dhd (named bcm-dhd). + */ + +struct dhd_attr { + struct attribute attr; + ssize_t(*show)(struct dhd_info *, char *); + ssize_t(*store)(struct dhd_info *, const char *, size_t count); +}; + +#if defined(DHD_TRACE_WAKE_LOCK) +static struct dhd_attr dhd_attr_wklock = + __ATTR(wklock_trace, 0660, show_wklock_trace, wklock_trace_onoff); +#endif /* defined(DHD_TRACE_WAKE_LOCK */ + +#if defined(DHD_LB_TXP) +static struct dhd_attr dhd_attr_lbtxp = + __ATTR(lbtxp, 0660, show_lbtxp, lbtxp_onoff); +#endif /* DHD_LB_TXP */ +#ifdef DHD_LOG_DUMP +static struct dhd_attr dhd_attr_logdump_periodic_flush = + __ATTR(logdump_periodic_flush, 0660, show_logdump_periodic_flush, + logdump_periodic_flush_onoff); +static struct dhd_attr dhd_attr_logdump_ecntr = + __ATTR(logdump_ecntr_enable, 0660, show_logdump_ecntr, + logdump_ecntr_onoff); +#endif /* DHD_LOG_DUMP */ + +static struct dhd_attr dhd_attr_ecounters = + __ATTR(ecounters, 0660, show_enable_ecounter, ecounter_onoff); + +/* Attribute object that gets registered with "bcm-dhd" kobject tree */ +static struct attribute *default_attrs[] = { +#if defined(DHD_TRACE_WAKE_LOCK) + &dhd_attr_wklock.attr, +#endif // endif +#if defined(DHD_LB_TXP) + &dhd_attr_lbtxp.attr, +#endif /* DHD_LB_TXP */ +#ifdef DHD_LOG_DUMP + &dhd_attr_logdump_periodic_flush.attr, + &dhd_attr_logdump_ecntr.attr, +#endif // endif + &dhd_attr_ecounters.attr, + NULL +}; + +#define to_dhd(k) container_of(k, struct dhd_info, dhd_kobj) +#define to_attr(a) container_of(a, struct dhd_attr, attr) + +/* + * bcm-dhd kobject show function, the "attr" attribute specifices to which + * node under "bcm-dhd" the show function is called. + */ +static ssize_t dhd_show(struct kobject *kobj, struct attribute *attr, char *buf) +{ +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif // endif + dhd_info_t *dhd = to_dhd(kobj); + struct dhd_attr *d_attr = to_attr(attr); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif // endif + int ret; + + if (d_attr->show) + ret = d_attr->show(dhd, buf); + else + ret = -EIO; + + return ret; +} + +/* + * bcm-dhd kobject show function, the "attr" attribute specifices to which + * node under "bcm-dhd" the store function is called. + */ +static ssize_t dhd_store(struct kobject *kobj, struct attribute *attr, + const char *buf, size_t count) +{ +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif // endif + dhd_info_t *dhd = to_dhd(kobj); + struct dhd_attr *d_attr = to_attr(attr); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif // endif + int ret; + + if (d_attr->store) + ret = d_attr->store(dhd, buf, count); + else + ret = -EIO; + + return ret; + +} + +static struct sysfs_ops dhd_sysfs_ops = { + .show = dhd_show, + .store = dhd_store, +}; + +static struct kobj_type dhd_ktype = { + .sysfs_ops = &dhd_sysfs_ops, + .default_attrs = default_attrs, +}; + +/* Create a kobject and attach to sysfs interface */ +static int dhd_sysfs_init(dhd_info_t *dhd) +{ + int ret = -1; + + if (dhd == NULL) { + DHD_ERROR(("%s(): dhd is NULL \r\n", __FUNCTION__)); + return ret; + } + + /* Initialize the kobject */ + ret = kobject_init_and_add(&dhd->dhd_kobj, &dhd_ktype, NULL, "bcm-dhd"); + if (ret) { + kobject_put(&dhd->dhd_kobj); + DHD_ERROR(("%s(): Unable to allocate kobject \r\n", __FUNCTION__)); + return ret; + } + + /* + * We are always responsible for sending the uevent that the kobject + * was added to the system. + */ + kobject_uevent(&dhd->dhd_kobj, KOBJ_ADD); + + return ret; +} + +/* Done with the kobject and detach the sysfs interface */ +static void dhd_sysfs_exit(dhd_info_t *dhd) +{ + if (dhd == NULL) { + DHD_ERROR(("%s(): dhd is NULL \r\n", __FUNCTION__)); + return; + } + + /* Releae the kobject */ + if (dhd->dhd_kobj.state_initialized) + kobject_put(&dhd->dhd_kobj); +} + +#ifdef DHD_DEBUG_UART +bool +dhd_debug_uart_is_running(struct net_device *dev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + + if (dhd->duart_execute) { + return TRUE; + } + + return FALSE; +} + +static void +dhd_debug_uart_exec_rd(void *handle, void *event_info, u8 event) +{ + dhd_pub_t *dhdp = handle; + dhd_debug_uart_exec(dhdp, "rd"); +} + +static void +dhd_debug_uart_exec(dhd_pub_t *dhdp, char *cmd) +{ + int ret; + + char *argv[] = {DHD_DEBUG_UART_EXEC_PATH, cmd, NULL}; + char *envp[] = {"HOME=/", "TERM=linux", "PATH=/sbin:/system/bin", NULL}; + +#ifdef DHD_FW_COREDUMP + if (dhdp->memdump_enabled == DUMP_MEMFILE_BUGON) +#endif // endif + { + if (dhdp->hang_reason == HANG_REASON_PCIE_LINK_DOWN || +#ifdef DHD_FW_COREDUMP + dhdp->memdump_success == FALSE || +#endif // endif + FALSE) { + dhdp->info->duart_execute = TRUE; + DHD_ERROR(("DHD: %s - execute %s %s\n", + __FUNCTION__, DHD_DEBUG_UART_EXEC_PATH, cmd)); + ret = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC); + DHD_ERROR(("DHD: %s - %s %s ret = %d\n", + __FUNCTION__, DHD_DEBUG_UART_EXEC_PATH, cmd, ret)); + dhdp->info->duart_execute = FALSE; + +#ifdef DHD_LOG_DUMP + if (dhdp->memdump_type != DUMP_TYPE_BY_SYSDUMP) +#endif // endif + { + BUG_ON(1); + } + } + } +} +#endif /* DHD_DEBUG_UART */ + +#if defined(DHD_BLOB_EXISTENCE_CHECK) +void +dhd_set_blob_support(dhd_pub_t *dhdp, char *fw_path) +{ + struct file *fp; + char *filepath = VENDOR_PATH CONFIG_BCMDHD_CLM_PATH; + fp = filp_open(filepath, O_RDONLY, 0); + if (IS_ERR(fp)) { + DHD_ERROR(("%s: ----- blob file doesn't exist (%s) -----\n", __FUNCTION__, + filepath)); + dhdp->is_blob = FALSE; + } else { + DHD_ERROR(("%s: ----- blob file exists (%s)-----\n", __FUNCTION__, filepath)); + dhdp->is_blob = TRUE; +#if defined(CONCATE_BLOB) + strncat(fw_path, "_blob", strlen("_blob")); +#else + BCM_REFERENCE(fw_path); +#endif /* SKIP_CONCATE_BLOB */ + filp_close(fp, NULL); + } +} +#endif /* DHD_BLOB_EXISTENCE_CHECK */ + +#if defined(PCIE_FULL_DONGLE) +/** test / loopback */ +void +dmaxfer_free_dmaaddr_handler(void *handle, void *event_info, u8 event) +{ + dmaxref_mem_map_t *dmmap = (dmaxref_mem_map_t *)event_info; + dhd_info_t *dhd_info = (dhd_info_t *)handle; + + if (event != DHD_WQ_WORK_DMA_LB_MEM_REL) { + DHD_ERROR(("%s: Unexpected event \n", __FUNCTION__)); + return; + } + if (dhd_info == NULL) { + DHD_ERROR(("%s: Invalid dhd_info\n", __FUNCTION__)); + return; + } + if (dmmap == NULL) { + DHD_ERROR(("%s: dmmap is null\n", __FUNCTION__)); + return; + } + dmaxfer_free_prev_dmaaddr(&dhd_info->pub, dmmap); +} + +void +dhd_schedule_dmaxfer_free(dhd_pub_t *dhdp, dmaxref_mem_map_t *dmmap) +{ + dhd_info_t *dhd_info = dhdp->info; + + dhd_deferred_schedule_work(dhd_info->dhd_deferred_wq, (void *)dmmap, + DHD_WQ_WORK_DMA_LB_MEM_REL, dmaxfer_free_dmaaddr_handler, DHD_WQ_WORK_PRIORITY_LOW); +} +#endif /* PCIE_FULL_DONGLE */ +/* ---------------------------- End of sysfs implementation ------------------------------------- */ +#ifdef SET_PCIE_IRQ_CPU_CORE +void +dhd_set_irq_cpucore(dhd_pub_t *dhdp, int set) +{ + unsigned int irq; + if (!dhdp) { + DHD_ERROR(("%s : dhd is NULL\n", __FUNCTION__)); + return; + } + + if (!dhdp->bus) { + DHD_ERROR(("%s : dhd->bus is NULL\n", __FUNCTION__)); + return; + } + + if (dhdpcie_get_pcieirq(dhdp->bus, &irq)) { + return; + } + + set_irq_cpucore(irq, set); +} +#endif /* SET_PCIE_IRQ_CPU_CORE */ + +int +dhd_write_file(const char *filepath, char *buf, int buf_len) +{ + struct file *fp = NULL; + mm_segment_t old_fs; + int ret = 0; + + /* change to KERNEL_DS address limit */ + old_fs = get_fs(); + set_fs(KERNEL_DS); + + /* File is always created. */ + fp = filp_open(filepath, O_RDWR | O_CREAT, 0664); + if (IS_ERR(fp)) { + DHD_ERROR(("%s: Couldn't open file '%s' err %ld\n", + __FUNCTION__, filepath, PTR_ERR(fp))); + ret = BCME_ERROR; + } else { + if (fp->f_mode & FMODE_WRITE) { + ret = vfs_write(fp, buf, buf_len, &fp->f_pos); + if (ret < 0) { + DHD_ERROR(("%s: Couldn't write file '%s'\n", + __FUNCTION__, filepath)); + ret = BCME_ERROR; + } else { + ret = BCME_OK; + } + } + filp_close(fp, NULL); + } + + /* restore previous address limit */ + set_fs(old_fs); + + return ret; +} + +int +dhd_read_file(const char *filepath, char *buf, int buf_len) +{ + struct file *fp = NULL; + mm_segment_t old_fs; + int ret; + + /* change to KERNEL_DS address limit */ + old_fs = get_fs(); + set_fs(KERNEL_DS); + + fp = filp_open(filepath, O_RDONLY, 0); + if (IS_ERR(fp)) { + set_fs(old_fs); + DHD_ERROR(("%s: File %s doesn't exist\n", __FUNCTION__, filepath)); + return BCME_ERROR; + } + + ret = compat_kernel_read(fp, 0, buf, buf_len); + filp_close(fp, NULL); + + /* restore previous address limit */ + set_fs(old_fs); + + /* Return the number of bytes read */ + if (ret > 0) { + /* Success to read */ + ret = 0; + } else { + DHD_ERROR(("%s: Couldn't read the file %s, ret=%d\n", + __FUNCTION__, filepath, ret)); + ret = BCME_ERROR; + } + + return ret; +} + +int +dhd_write_file_and_check(const char *filepath, char *buf, int buf_len) +{ + int ret; + + ret = dhd_write_file(filepath, buf, buf_len); + if (ret < 0) { + return ret; + } + + /* Read the file again and check if the file size is not zero */ + memset(buf, 0, buf_len); + ret = dhd_read_file(filepath, buf, buf_len); + + return ret; +} + +#ifdef FILTER_IE +int dhd_read_from_file(dhd_pub_t *dhd) +{ + int ret = 0, nread = 0; + void *fd; + uint8 *buf; + NULL_CHECK(dhd, "dhd is NULL", ret); + + buf = MALLOCZ(dhd->osh, FILE_BLOCK_READ_SIZE); + if (!buf) { + DHD_ERROR(("error: failed to alllocate buf.\n")); + return BCME_NOMEM; + } + + /* open file to read */ + fd = dhd_os_open_image1(dhd, FILTER_IE_PATH); + if (!fd) { + DHD_ERROR(("error: failed to open %s\n", FILTER_IE_PATH)); + ret = BCME_EPERM; + goto exit; + } + nread = dhd_os_get_image_block(buf, (FILE_BLOCK_READ_SIZE - 1), fd); + if (nread > 0) { + buf[nread] = '\0'; + if ((ret = dhd_parse_filter_ie(dhd, buf)) < 0) { + DHD_ERROR(("error: failed to parse filter ie\n")); + } + } else { + DHD_ERROR(("error: zero length file.failed to read\n")); + ret = BCME_ERROR; + } + dhd_os_close_image1(dhd, fd); +exit: + if (buf) { + MFREE(dhd->osh, buf, FILE_BLOCK_READ_SIZE); + buf = NULL; + } + return ret; +} + +int dhd_get_filter_ie_count(dhd_pub_t *dhdp, uint8* buf) +{ + uint8* pstr = buf; + int element_count = 0; + + if (buf == NULL) { + return BCME_ERROR; + } + + while (*pstr != '\0') { + if (*pstr == '\n') { + element_count++; + } + pstr++; + } + /* + * New line character must not be present after last line. + * To count last line + */ + element_count++; + + return element_count; +} + +int dhd_parse_oui(dhd_pub_t *dhd, uint8 *inbuf, uint8 *oui, int len) +{ + uint8 i, j, msb, lsb, oui_len = 0; + /* + * OUI can vary from 3 bytes to 5 bytes. + * While reading from file as ascii input it can + * take maximum size of 14 bytes and minumum size of + * 8 bytes including ":" + * Example 5byte OUI + * Example 3byte OUI + */ + + if ((inbuf == NULL) || (len < 8) || (len > 14)) { + DHD_ERROR(("error: failed to parse OUI \n")); + return BCME_ERROR; + } + + for (j = 0, i = 0; i < len; i += 3, ++j) { + if (!bcm_isxdigit(inbuf[i]) || !bcm_isxdigit(inbuf[i + 1])) { + DHD_ERROR(("error: invalid OUI format \n")); + return BCME_ERROR; + } + msb = inbuf[i] > '9' ? bcm_toupper(inbuf[i]) - 'A' + 10 : inbuf[i] - '0'; + lsb = inbuf[i + 1] > '9' ? bcm_toupper(inbuf[i + 1]) - + 'A' + 10 : inbuf[i + 1] - '0'; + oui[j] = (msb << 4) | lsb; + } + /* Size of oui.It can vary from 3/4/5 */ + oui_len = j; + + return oui_len; +} + +int dhd_check_valid_ie(dhd_pub_t *dhdp, uint8* buf, int len) +{ + int i = 0; + + while (i < len) { + if (!bcm_isdigit(buf[i])) { + DHD_ERROR(("error: non digit value found in filter_ie \n")); + return BCME_ERROR; + } + i++; + } + if (bcm_atoi((char*)buf) > 255) { + DHD_ERROR(("error: element id cannot be greater than 255 \n")); + return BCME_ERROR; + } + + return BCME_OK; +} + +int dhd_parse_filter_ie(dhd_pub_t *dhd, uint8 *buf) +{ + int element_count = 0, i = 0, oui_size = 0, ret = 0; + uint16 bufsize, buf_space_left, id = 0, len = 0; + uint16 filter_iovsize, all_tlvsize; + wl_filter_ie_tlv_t *p_ie_tlv = NULL; + wl_filter_ie_iov_v1_t *p_filter_iov = (wl_filter_ie_iov_v1_t *) NULL; + char *token = NULL, *ele_token = NULL, *oui_token = NULL, *type = NULL; + uint8 data[20]; + + element_count = dhd_get_filter_ie_count(dhd, buf); + DHD_INFO(("total element count %d \n", element_count)); + /* Calculate the whole buffer size */ + filter_iovsize = sizeof(wl_filter_ie_iov_v1_t) + FILTER_IE_BUFSZ; + p_filter_iov = MALLOCZ(dhd->osh, filter_iovsize); + + if (p_filter_iov == NULL) { + DHD_ERROR(("error: failed to allocate %d bytes of memory\n", filter_iovsize)); + return BCME_ERROR; + } + + /* setup filter iovar header */ + p_filter_iov->version = WL_FILTER_IE_VERSION; + p_filter_iov->len = filter_iovsize; + p_filter_iov->fixed_length = p_filter_iov->len - FILTER_IE_BUFSZ; + p_filter_iov->pktflag = FC_PROBE_REQ; + p_filter_iov->option = WL_FILTER_IE_CHECK_SUB_OPTION; + /* setup TLVs */ + bufsize = filter_iovsize - WL_FILTER_IE_IOV_HDR_SIZE; /* adjust available size for TLVs */ + p_ie_tlv = (wl_filter_ie_tlv_t *)&p_filter_iov->tlvs[0]; + buf_space_left = bufsize; + + while ((i < element_count) && (buf != NULL)) { + len = 0; + /* token contains one line of input data */ + token = bcmstrtok((char**)&buf, "\n", NULL); + if (token == NULL) { + break; + } + if ((ele_token = bcmstrstr(token, ",")) == NULL) { + /* only element id is present */ + if (dhd_check_valid_ie(dhd, token, strlen(token)) == BCME_ERROR) { + DHD_ERROR(("error: Invalid element id \n")); + ret = BCME_ERROR; + goto exit; + } + id = bcm_atoi((char*)token); + data[len++] = WL_FILTER_IE_SET; + } else { + /* oui is present */ + ele_token = bcmstrtok(&token, ",", NULL); + if ((ele_token == NULL) || (dhd_check_valid_ie(dhd, ele_token, + strlen(ele_token)) == BCME_ERROR)) { + DHD_ERROR(("error: Invalid element id \n")); + ret = BCME_ERROR; + goto exit; + } + id = bcm_atoi((char*)ele_token); + data[len++] = WL_FILTER_IE_SET; + if ((oui_token = bcmstrstr(token, ",")) == NULL) { + oui_size = dhd_parse_oui(dhd, token, &(data[len]), strlen(token)); + if (oui_size == BCME_ERROR) { + DHD_ERROR(("error: Invalid OUI \n")); + ret = BCME_ERROR; + goto exit; + } + len += oui_size; + } else { + /* type is present */ + oui_token = bcmstrtok(&token, ",", NULL); + if ((oui_token == NULL) || ((oui_size = + dhd_parse_oui(dhd, oui_token, + &(data[len]), strlen(oui_token))) == BCME_ERROR)) { + DHD_ERROR(("error: Invalid OUI \n")); + ret = BCME_ERROR; + goto exit; + } + len += oui_size; + if ((type = bcmstrstr(token, ",")) == NULL) { + if (dhd_check_valid_ie(dhd, token, + strlen(token)) == BCME_ERROR) { + DHD_ERROR(("error: Invalid type \n")); + ret = BCME_ERROR; + goto exit; + } + data[len++] = bcm_atoi((char*)token); + } else { + /* subtype is present */ + type = bcmstrtok(&token, ",", NULL); + if ((type == NULL) || (dhd_check_valid_ie(dhd, type, + strlen(type)) == BCME_ERROR)) { + DHD_ERROR(("error: Invalid type \n")); + ret = BCME_ERROR; + goto exit; + } + data[len++] = bcm_atoi((char*)type); + /* subtype is last element */ + if ((token == NULL) || (*token == '\0') || + (dhd_check_valid_ie(dhd, token, + strlen(token)) == BCME_ERROR)) { + DHD_ERROR(("error: Invalid subtype \n")); + ret = BCME_ERROR; + goto exit; + } + data[len++] = bcm_atoi((char*)token); + } + } + } + ret = bcm_pack_xtlv_entry((uint8 **)&p_ie_tlv, + &buf_space_left, id, len, data, BCM_XTLV_OPTION_ALIGN32); + if (ret != BCME_OK) { + DHD_ERROR(("%s : bcm_pack_xtlv_entry() failed ," + "status=%d\n", __FUNCTION__, ret)); + goto exit; + } + i++; + } + if (i == 0) { + /* file is empty or first line is blank */ + DHD_ERROR(("error: filter_ie file is empty or first line is blank \n")); + ret = BCME_ERROR; + goto exit; + } + /* update the iov header, set len to include all TLVs + header */ + all_tlvsize = (bufsize - buf_space_left); + p_filter_iov->len = htol16(all_tlvsize + WL_FILTER_IE_IOV_HDR_SIZE); + ret = dhd_iovar(dhd, 0, "filter_ie", (void *)p_filter_iov, + p_filter_iov->len, NULL, 0, TRUE); + if (ret != BCME_OK) { + DHD_ERROR(("error: IOVAR failed, status=%d\n", ret)); + } +exit: + /* clean up */ + if (p_filter_iov) { + MFREE(dhd->osh, p_filter_iov, filter_iovsize); + p_filter_iov = NULL; + } + return ret; +} +#endif /* FILTER_IE */ +#ifdef DHD_WAKE_STATUS +wake_counts_t* +dhd_get_wakecount(dhd_pub_t *dhdp) +{ +#ifdef BCMDBUS + return NULL; +#else + return dhd_bus_get_wakecount(dhdp); +#endif /* BCMDBUS */ +} +#endif /* DHD_WAKE_STATUS */ + +int +dhd_get_random_bytes(uint8 *buf, uint len) +{ +#ifdef BCMPCIE + get_random_bytes_arch(buf, len); +#endif /* BCMPCIE */ + return BCME_OK; +} + +#ifdef DHD_ERPOM +static void +dhd_error_recovery(void *handle, void *event_info, u8 event) +{ + dhd_info_t *dhd = handle; + dhd_pub_t *dhdp; + int ret = 0; + + if (!dhd) { + DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__)); + return; + } + + dhdp = &dhd->pub; + + if (!(dhd->dhd_state & DHD_ATTACH_STATE_DONE)) { + DHD_ERROR(("%s: init not completed, cannot initiate recovery\n", + __FUNCTION__)); + return; + } + + ret = dhd_bus_perform_flr_with_quiesce(dhdp); + if (ret != BCME_DNGL_DEVRESET) { + DHD_ERROR(("%s: dhd_bus_perform_flr_with_quiesce failed with ret: %d," + "toggle REG_ON\n", __FUNCTION__, ret)); + /* toggle REG_ON */ + dhdp->pom_toggle_reg_on(WLAN_FUNC_ID, BY_WLAN_DUE_TO_WLAN); + return; + } +} + +void +dhd_schedule_reset(dhd_pub_t *dhdp) +{ + if (dhdp->enable_erpom) { + dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, NULL, + DHD_WQ_WORK_ERROR_RECOVERY, dhd_error_recovery, DHD_WQ_WORK_PRIORITY_HIGH); + } +} +#endif /* DHD_ERPOM */ + +void +get_debug_dump_time(char *str) +{ + struct timeval curtime; + unsigned long local_time; + struct rtc_time tm; + + if (!strlen(str)) { + do_gettimeofday(&curtime); + local_time = (u32)(curtime.tv_sec - + (sys_tz.tz_minuteswest * DHD_LOG_DUMP_TS_MULTIPLIER_VALUE)); + rtc_time_to_tm(local_time, &tm); + + snprintf(str, DEBUG_DUMP_TIME_BUF_LEN, DHD_LOG_DUMP_TS_FMT_YYMMDDHHMMSSMSMS, + tm.tm_year - 100, tm.tm_mon + 1, tm.tm_mday, tm.tm_hour, tm.tm_min, + tm.tm_sec, (int)(curtime.tv_usec/NSEC_PER_USEC)); + } +} + +void +clear_debug_dump_time(char *str) +{ + memset(str, 0, DEBUG_DUMP_TIME_BUF_LEN); +} + +#define KIRQ_PRINT_BUF_LEN 256 + +void +dhd_print_kirqstats(dhd_pub_t *dhd, unsigned int irq_num) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0)) + unsigned long flags = 0; + struct irq_desc *desc; + int i; /* cpu iterator */ + struct bcmstrbuf strbuf; + char tmp_buf[KIRQ_PRINT_BUF_LEN]; + + desc = irq_to_desc(irq_num); + if (!desc) { + DHD_ERROR(("%s : irqdesc is not found \n", __FUNCTION__)); + return; + } + bcm_binit(&strbuf, tmp_buf, KIRQ_PRINT_BUF_LEN); + raw_spin_lock_irqsave(&desc->lock, flags); + bcm_bprintf(&strbuf, "dhd irq %u:", irq_num); + for_each_online_cpu(i) + bcm_bprintf(&strbuf, "%10u ", + desc->kstat_irqs ? *per_cpu_ptr(desc->kstat_irqs, i) : 0); + if (desc->irq_data.chip) { + if (desc->irq_data.chip->name) + bcm_bprintf(&strbuf, " %8s", desc->irq_data.chip->name); + else + bcm_bprintf(&strbuf, " %8s", "-"); + } else { + bcm_bprintf(&strbuf, " %8s", "None"); + } +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0)) + if (desc->irq_data.domain) + bcm_bprintf(&strbuf, " %d", (int)desc->irq_data.hwirq); +#ifdef CONFIG_GENERIC_IRQ_SHOW_LEVEL + bcm_bprintf(&strbuf, " %-8s", irqd_is_level_type(&desc->irq_data) ? "Level" : "Edge"); +#endif // endif +#endif /* LINUX VERSION > 3.1.0 */ + + if (desc->name) + bcm_bprintf(&strbuf, "-%-8s", desc->name); + + DHD_ERROR(("%s\n", strbuf.origbuf)); + raw_spin_unlock_irqrestore(&desc->lock, flags); +#endif /* LINUX VERSION > 2.6.28 */ +} + +void +dhd_show_kirqstats(dhd_pub_t *dhd) +{ + unsigned int irq = -1; +#ifdef BCMPCIE + dhdpcie_get_pcieirq(dhd->bus, &irq); +#endif /* BCMPCIE */ +#ifdef BCMSDIO + irq = ((wifi_adapter_info_t *)dhd->info->adapter)->irq_num; +#endif /* BCMSDIO */ + if (irq != -1) { +#ifdef BCMPCIE + DHD_ERROR(("DUMP data kernel irq stats : \n")); +#endif /* BCMPCIE */ +#ifdef BCMSDIO + DHD_ERROR(("DUMP data/host wakeup kernel irq stats : \n")); +#endif /* BCMSDIO */ + dhd_print_kirqstats(dhd, irq); + } +#ifdef BCMPCIE_OOB_HOST_WAKE + irq = dhdpcie_get_oob_irq_num(dhd->bus); + if (irq) { + DHD_ERROR(("DUMP PCIE host wakeup kernel irq stats : \n")); + dhd_print_kirqstats(dhd, irq); + } +#endif /* BCMPCIE_OOB_HOST_WAKE */ +} + +void +dhd_print_tasklet_status(dhd_pub_t *dhd) +{ + dhd_info_t *dhdinfo; + + if (!dhd) { + DHD_ERROR(("%s : DHD is null\n", __FUNCTION__)); + return; + } + + dhdinfo = dhd->info; + + if (!dhdinfo) { + DHD_ERROR(("%s : DHD INFO is null \n", __FUNCTION__)); + return; + } + + DHD_ERROR(("DHD Tasklet status : 0x%lx\n", dhdinfo->tasklet.state)); +} + +/* + * DHD RING + */ +#define DHD_RING_ERR_INTERNAL(fmt, ...) DHD_ERROR(("EWPF-" fmt, ##__VA_ARGS__)) +#define DHD_RING_TRACE_INTERNAL(fmt, ...) DHD_INFO(("EWPF-" fmt, ##__VA_ARGS__)) + +#define DHD_RING_ERR(x) DHD_RING_ERR_INTERNAL x +#define DHD_RING_TRACE(x) DHD_RING_TRACE_INTERNAL x + +#define DHD_RING_MAGIC 0x20170910 +#define DHD_RING_IDX_INVALID 0xffffffff + +typedef struct { + uint32 elem_size; + uint32 elem_cnt; + uint32 write_idx; /* next write index, -1 : not started */ + uint32 read_idx; /* next read index, -1 : not start */ + + /* protected elements during serialization */ + int lock_idx; /* start index of locked, element will not be overried */ + int lock_count; /* number of locked, from lock idx */ + + /* saved data elements */ + void *elem; +} dhd_fixed_ring_info_t; + +typedef struct { + uint32 magic; + uint32 type; + struct mutex ring_sync; /* pointer to mutex */ + union { + dhd_fixed_ring_info_t fixed; + }; +} dhd_ring_info_t; + +uint32 +dhd_ring_get_hdr_size(void) +{ + return sizeof(dhd_ring_info_t); +} + +void * +dhd_ring_init(uint8 *buf, uint32 buf_size, uint32 elem_size, uint32 elem_cnt) +{ + dhd_ring_info_t *ret_ring; + + if (!buf) { + DHD_RING_ERR(("NO RING BUFFER\n")); + return NULL; + } + if (buf_size < dhd_ring_get_hdr_size() + elem_size * elem_cnt) { + DHD_RING_ERR(("RING SIZE IS TOO SMALL\n")); + return NULL; + } + + ret_ring = (dhd_ring_info_t *)buf; + ret_ring->type = DHD_RING_TYPE_FIXED; + mutex_init(&ret_ring->ring_sync); + ret_ring->fixed.read_idx = DHD_RING_IDX_INVALID; + ret_ring->fixed.write_idx = DHD_RING_IDX_INVALID; + ret_ring->fixed.lock_idx = DHD_RING_IDX_INVALID; + ret_ring->fixed.elem = buf + sizeof(dhd_ring_info_t); + ret_ring->fixed.elem_size = elem_size; + ret_ring->fixed.elem_cnt = elem_cnt; + ret_ring->magic = DHD_RING_MAGIC; + return ret_ring; +} + +void +dhd_ring_deinit(void *_ring) +{ + dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring; + dhd_fixed_ring_info_t *fixed; + if (!ring) { + return; + } + + if (ring->magic != DHD_RING_MAGIC) { + return; + } + + mutex_destroy(&ring->ring_sync); + fixed = &ring->fixed; + memset(fixed->elem, 0, fixed->elem_size * fixed->elem_cnt); + fixed->elem_size = fixed->elem_cnt = 0; + ring->type = 0; + ring->magic = 0; + return; +} + +/* get counts between two indexes of ring buffer (internal only) */ +static inline int +__dhd_fixed_ring_get_count(dhd_fixed_ring_info_t *ring, int start, int end) +{ + if (start == DHD_RING_IDX_INVALID || end == DHD_RING_IDX_INVALID) { + return 0; + } + + return (ring->elem_cnt + end - start) % ring->elem_cnt + 1; +} + +static inline int +__dhd_fixed_ring_get_cur_size(dhd_fixed_ring_info_t *ring) +{ + return __dhd_fixed_ring_get_count(ring, ring->read_idx, ring->write_idx); +} + +static inline void * +__dhd_fixed_ring_get_first(dhd_fixed_ring_info_t *ring) +{ + if (ring->read_idx == DHD_RING_IDX_INVALID) { + return NULL; + } + return (uint8 *)ring->elem + (ring->elem_size * ring->read_idx); +} + +static inline void +__dhd_fixed_ring_free_first(dhd_fixed_ring_info_t *ring) +{ + uint32 next_idx; + + if (ring->read_idx == DHD_RING_IDX_INVALID) { + DHD_RING_ERR(("EMPTY RING\n")); + return; + } + + next_idx = (ring->read_idx + 1) % ring->elem_cnt; + if (ring->read_idx == ring->write_idx) { + /* Become empty */ + ring->read_idx = ring->write_idx = DHD_RING_IDX_INVALID; + return; + } + + ring->read_idx = next_idx; + return; +} + +static inline void * +__dhd_fixed_ring_get_last(dhd_fixed_ring_info_t *ring) +{ + if (ring->read_idx == DHD_RING_IDX_INVALID) { + return NULL; + } + return (uint8 *)ring->elem + (ring->elem_size * ring->write_idx); +} + +static inline void * +__dhd_fixed_ring_get_empty(dhd_fixed_ring_info_t *ring) +{ + uint32 tmp_idx; + + if (ring->read_idx == DHD_RING_IDX_INVALID) { + ring->read_idx = ring->write_idx = 0; + return (uint8 *)ring->elem; + } + + /* check next index is not locked */ + tmp_idx = (ring->write_idx + 1) % ring->elem_cnt; + if (ring->lock_idx == tmp_idx) { + return NULL; + } + + ring->write_idx = tmp_idx; + if (ring->write_idx == ring->read_idx) { + /* record is full, drop oldest one */ + ring->read_idx = (ring->read_idx + 1) % ring->elem_cnt; + + } + return (uint8 *)ring->elem + (ring->elem_size * ring->write_idx); +} + +static inline uint32 +__dhd_fixed_ring_ptr2idx(dhd_fixed_ring_info_t *ring, void *ptr, char *sig) +{ + uint32 diff; + uint32 ret_idx = (uint32)DHD_RING_IDX_INVALID; + + if (ptr < ring->elem) { + DHD_RING_ERR(("INVALID POINTER %s:%p, ring->elem:%p\n", sig, ptr, ring->elem)); + return ret_idx; + } + diff = (uint32)((uint8 *)ptr - (uint8 *)ring->elem); + if (diff % ring->elem_size != 0) { + DHD_RING_ERR(("INVALID POINTER %s:%p, ring->elem:%p\n", sig, ptr, ring->elem)); + return ret_idx; + } + ret_idx = diff / ring->elem_size; + if (ret_idx >= ring->elem_cnt) { + DHD_RING_ERR(("INVALID POINTER max:%d cur:%d\n", ring->elem_cnt, ret_idx)); + } + return ret_idx; +} + +static inline void * +__dhd_fixed_ring_get_next(dhd_fixed_ring_info_t *ring, void *prev) +{ + uint32 cur_idx; + + if (ring->read_idx == DHD_RING_IDX_INVALID) { + DHD_RING_ERR(("EMPTY RING\n")); + return NULL; + } + + cur_idx = __dhd_fixed_ring_ptr2idx(ring, prev, "NEXT"); + if (cur_idx >= ring->elem_cnt) { + return NULL; + } + + if (cur_idx == ring->write_idx) { + /* no more new record */ + return NULL; + } + + cur_idx = (cur_idx + 1) % ring->elem_cnt; + return (uint8 *)ring->elem + ring->elem_size * cur_idx; +} + +static inline void * +__dhd_fixed_ring_get_prev(dhd_fixed_ring_info_t *ring, void *prev) +{ + uint32 cur_idx; + + if (ring->read_idx == DHD_RING_IDX_INVALID) { + DHD_RING_ERR(("EMPTY RING\n")); + return NULL; + } + cur_idx = __dhd_fixed_ring_ptr2idx(ring, prev, "PREV"); + if (cur_idx >= ring->elem_cnt) { + return NULL; + } + if (cur_idx == ring->read_idx) { + /* no more new record */ + return NULL; + } + + cur_idx = (cur_idx + ring->elem_cnt - 1) % ring->elem_cnt; + return (uint8 *)ring->elem + ring->elem_size * cur_idx; +} + +static inline void +__dhd_fixed_ring_lock(dhd_fixed_ring_info_t *ring, void *first_ptr, void *last_ptr) +{ + uint32 first_idx; + uint32 last_idx; + uint32 ring_filled_cnt; + uint32 tmp_cnt; + + if (ring->read_idx == DHD_RING_IDX_INVALID) { + DHD_RING_ERR(("EMPTY RING\n")); + return; + } + + if (first_ptr) { + first_idx = __dhd_fixed_ring_ptr2idx(ring, first_ptr, "LCK FIRST"); + if (first_idx >= ring->elem_cnt) { + return; + } + } else { + first_idx = ring->read_idx; + } + + if (last_ptr) { + last_idx = __dhd_fixed_ring_ptr2idx(ring, last_ptr, "LCK LAST"); + if (last_idx >= ring->elem_cnt) { + return; + } + } else { + last_idx = ring->write_idx; + } + + ring_filled_cnt = __dhd_fixed_ring_get_count(ring, ring->read_idx, ring->write_idx); + tmp_cnt = __dhd_fixed_ring_get_count(ring, ring->read_idx, first_idx); + if (tmp_cnt > ring_filled_cnt) { + DHD_RING_ERR(("LOCK FIRST IS TO EMPTY ELEM: write: %d read: %d cur:%d\n", + ring->write_idx, ring->read_idx, first_idx)); + return; + } + + tmp_cnt = __dhd_fixed_ring_get_count(ring, ring->read_idx, last_idx); + if (tmp_cnt > ring_filled_cnt) { + DHD_RING_ERR(("LOCK LAST IS TO EMPTY ELEM: write: %d read: %d cur:%d\n", + ring->write_idx, ring->read_idx, last_idx)); + return; + } + + ring->lock_idx = first_idx; + ring->lock_count = __dhd_fixed_ring_get_count(ring, first_idx, last_idx); + return; +} + +static inline void +__dhd_fixed_ring_lock_free(dhd_fixed_ring_info_t *ring) +{ + if (ring->read_idx == DHD_RING_IDX_INVALID) { + DHD_RING_ERR(("EMPTY RING\n")); + return; + } + + ring->lock_idx = DHD_RING_IDX_INVALID; + ring->lock_count = 0; + return; +} +static inline void * +__dhd_fixed_ring_lock_get_first(dhd_fixed_ring_info_t *ring) +{ + if (ring->read_idx == DHD_RING_IDX_INVALID) { + DHD_RING_ERR(("EMPTY RING\n")); + return NULL; + } + if (ring->lock_idx == DHD_RING_IDX_INVALID) { + DHD_RING_ERR(("NO LOCK POINT\n")); + return NULL; + } + return (uint8 *)ring->elem + ring->elem_size * ring->lock_idx; +} + +static inline void * +__dhd_fixed_ring_lock_get_last(dhd_fixed_ring_info_t *ring) +{ + int lock_last_idx; + if (ring->read_idx == DHD_RING_IDX_INVALID) { + DHD_RING_ERR(("EMPTY RING\n")); + return NULL; + } + if (ring->lock_idx == DHD_RING_IDX_INVALID) { + DHD_RING_ERR(("NO LOCK POINT\n")); + return NULL; + } + + lock_last_idx = (ring->lock_idx + ring->lock_count - 1) % ring->elem_cnt; + return (uint8 *)ring->elem + ring->elem_size * lock_last_idx; +} + +static inline int +__dhd_fixed_ring_lock_get_count(dhd_fixed_ring_info_t *ring) +{ + if (ring->read_idx == DHD_RING_IDX_INVALID) { + DHD_RING_ERR(("EMPTY RING\n")); + return BCME_ERROR; + } + if (ring->lock_idx == DHD_RING_IDX_INVALID) { + DHD_RING_ERR(("NO LOCK POINT\n")); + return BCME_ERROR; + } + return ring->lock_count; +} + +static inline void +__dhd_fixed_ring_lock_free_first(dhd_fixed_ring_info_t *ring) +{ + if (ring->read_idx == DHD_RING_IDX_INVALID) { + DHD_RING_ERR(("EMPTY RING\n")); + return; + } + if (ring->lock_idx == DHD_RING_IDX_INVALID) { + DHD_RING_ERR(("NO LOCK POINT\n")); + return; + } + + ring->lock_count--; + if (ring->lock_count <= 0) { + ring->lock_idx = DHD_RING_IDX_INVALID; + } else { + ring->lock_idx = (ring->lock_idx + 1) % ring->elem_cnt; + } + return; +} + +/* Get first element : oldest element */ +void * +dhd_ring_get_first(void *_ring) +{ + dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring; + void *ret = NULL; + + if (!ring || ring->magic != DHD_RING_MAGIC) { + DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__)); + return NULL; + } + + mutex_lock(&ring->ring_sync); + if (ring->type == DHD_RING_TYPE_FIXED) { + ret = __dhd_fixed_ring_get_first(&ring->fixed); + } + mutex_unlock(&ring->ring_sync); + return ret; +} + +/* Free first element : oldest element */ +void +dhd_ring_free_first(void *_ring) +{ + dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring; + + if (!ring || ring->magic != DHD_RING_MAGIC) { + DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__)); + return; + } + + mutex_lock(&ring->ring_sync); + if (ring->type == DHD_RING_TYPE_FIXED) { + __dhd_fixed_ring_free_first(&ring->fixed); + } + mutex_unlock(&ring->ring_sync); + return; +} + +/* Get latest element */ +void * +dhd_ring_get_last(void *_ring) +{ + dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring; + void *ret = NULL; + + if (!ring || ring->magic != DHD_RING_MAGIC) { + DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__)); + return NULL; + } + + mutex_lock(&ring->ring_sync); + if (ring->type == DHD_RING_TYPE_FIXED) { + ret = __dhd_fixed_ring_get_last(&ring->fixed); + } + mutex_unlock(&ring->ring_sync); + return ret; +} + +/* Get next point can be written + * will overwrite which doesn't read + * will return NULL if next pointer is locked + */ +void * +dhd_ring_get_empty(void *_ring) +{ + dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring; + void *ret = NULL; + + if (!ring || ring->magic != DHD_RING_MAGIC) { + DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__)); + return NULL; + } + + mutex_lock(&ring->ring_sync); + if (ring->type == DHD_RING_TYPE_FIXED) { + ret = __dhd_fixed_ring_get_empty(&ring->fixed); + } + mutex_unlock(&ring->ring_sync); + return ret; +} + +void * +dhd_ring_get_next(void *_ring, void *cur) +{ + dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring; + void *ret = NULL; + + if (!ring || ring->magic != DHD_RING_MAGIC) { + DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__)); + return NULL; + } + + mutex_lock(&ring->ring_sync); + if (ring->type == DHD_RING_TYPE_FIXED) { + ret = __dhd_fixed_ring_get_next(&ring->fixed, cur); + } + mutex_unlock(&ring->ring_sync); + return ret; +} + +void * +dhd_ring_get_prev(void *_ring, void *cur) +{ + dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring; + void *ret = NULL; + + if (!ring || ring->magic != DHD_RING_MAGIC) { + DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__)); + return NULL; + } + + mutex_lock(&ring->ring_sync); + if (ring->type == DHD_RING_TYPE_FIXED) { + ret = __dhd_fixed_ring_get_prev(&ring->fixed, cur); + } + mutex_unlock(&ring->ring_sync); + return ret; +} + +int +dhd_ring_get_cur_size(void *_ring) +{ + dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring; + int cnt = 0; + + if (!ring || ring->magic != DHD_RING_MAGIC) { + DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__)); + return cnt; + } + + mutex_lock(&ring->ring_sync); + if (ring->type == DHD_RING_TYPE_FIXED) { + cnt = __dhd_fixed_ring_get_cur_size(&ring->fixed); + } + mutex_unlock(&ring->ring_sync); + return cnt; +} + +/* protect element between lock_ptr and write_idx */ +void +dhd_ring_lock(void *_ring, void *first_ptr, void *last_ptr) +{ + dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring; + + if (!ring || ring->magic != DHD_RING_MAGIC) { + DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__)); + return; + } + + mutex_lock(&ring->ring_sync); + if (ring->type == DHD_RING_TYPE_FIXED) { + __dhd_fixed_ring_lock(&ring->fixed, first_ptr, last_ptr); + } + mutex_unlock(&ring->ring_sync); + return; +} + +/* free all lock */ +void +dhd_ring_lock_free(void *_ring) +{ + dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring; + + if (!ring || ring->magic != DHD_RING_MAGIC) { + DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__)); + return; + } + + mutex_lock(&ring->ring_sync); + if (ring->type == DHD_RING_TYPE_FIXED) { + __dhd_fixed_ring_lock_free(&ring->fixed); + } + mutex_unlock(&ring->ring_sync); + return; +} + +void * +dhd_ring_lock_get_first(void *_ring) +{ + dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring; + void *ret = NULL; + + if (!ring || ring->magic != DHD_RING_MAGIC) { + DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__)); + return NULL; + } + + mutex_lock(&ring->ring_sync); + if (ring->type == DHD_RING_TYPE_FIXED) { + ret = __dhd_fixed_ring_lock_get_first(&ring->fixed); + } + mutex_unlock(&ring->ring_sync); + return ret; +} + +void * +dhd_ring_lock_get_last(void *_ring) +{ + dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring; + void *ret = NULL; + + if (!ring || ring->magic != DHD_RING_MAGIC) { + DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__)); + return NULL; + } + + mutex_lock(&ring->ring_sync); + if (ring->type == DHD_RING_TYPE_FIXED) { + ret = __dhd_fixed_ring_lock_get_last(&ring->fixed); + } + mutex_unlock(&ring->ring_sync); + return ret; +} + +int +dhd_ring_lock_get_count(void *_ring) +{ + dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring; + int ret = BCME_ERROR; + + if (!ring || ring->magic != DHD_RING_MAGIC) { + DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__)); + return ret; + } + + mutex_lock(&ring->ring_sync); + if (ring->type == DHD_RING_TYPE_FIXED) { + ret = __dhd_fixed_ring_lock_get_count(&ring->fixed); + } + mutex_unlock(&ring->ring_sync); + return ret; +} + +/* free first locked element */ +void +dhd_ring_lock_free_first(void *_ring) +{ + dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring; + + if (!ring || ring->magic != DHD_RING_MAGIC) { + DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__)); + return; + } + + mutex_lock(&ring->ring_sync); + if (ring->type == DHD_RING_TYPE_FIXED) { + __dhd_fixed_ring_lock_free_first(&ring->fixed); + } + mutex_unlock(&ring->ring_sync); + return; +} + +#ifdef DHD_DUMP_MNGR +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0)) +#define DHD_VFS_INODE(dir) (dir->d_inode) +#else +#define DHD_VFS_INODE(dir) d_inode(dir) +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0) */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)) +#define DHD_VFS_UNLINK(dir, b, c) vfs_unlink(DHD_VFS_INODE(dir), b) +#else +#define DHD_VFS_UNLINK(dir, b, c) vfs_unlink(DHD_VFS_INODE(dir), b, c) +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0) */ + +static int +dhd_file_delete(char *path) +{ + struct path file_path; + int err; + struct dentry *dir; + + err = kern_path(path, 0, &file_path); + + if (err < 0) { + return err; + } + if (FALSE || +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)) + !d_is_file(file_path.dentry) || +#if (LINUX_VERSION_CODE > KERNEL_VERSION(4, 0, 0)) + d_really_is_negative(file_path.dentry) +#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(4, 0, 0) */ +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0) */ +) + { + err = -EINVAL; + } else { + dir = dget_parent(file_path.dentry); + + if (!IS_ERR(dir)) { + err = DHD_VFS_UNLINK(dir, file_path.dentry, NULL); + dput(dir); + } else { + err = PTR_ERR(dir); + } + } + + path_put(&file_path); + + if (err < 0) { + DHD_ERROR(("Failed to delete file: %s error: %d\n", path, err)); + } + + return err; +} + +static int +dhd_dump_file_manage_idx(dhd_dump_file_manage_t *fm_ptr, char *fname) +{ + int i; + int fm_idx = -1; + + for (i = 0; i < DHD_DUMP_TYPE_COUNT_MAX; i++) { + if (strlen(fm_ptr->elems[i].type_name) == 0) { + fm_idx = i; + break; + } + if (!(strncmp(fname, fm_ptr->elems[i].type_name, strlen(fname)))) { + fm_idx = i; + break; + } + } + + if (fm_idx == -1) { + return fm_idx; + } + + if (strlen(fm_ptr->elems[fm_idx].type_name) == 0) { + strncpy(fm_ptr->elems[fm_idx].type_name, fname, DHD_DUMP_TYPE_NAME_SIZE); + fm_ptr->elems[fm_idx].file_idx = 0; + } + + return fm_idx; +} + +/* + * dhd_dump_file_manage_enqueue - enqueue dump file path + * and delete odest file if file count is max. +*/ +void +dhd_dump_file_manage_enqueue(dhd_pub_t *dhd, char *dump_path, char *fname) +{ + int fm_idx; + int fp_idx; + dhd_dump_file_manage_t *fm_ptr; + DFM_elem_t *elem; + + if (!dhd || !dhd->dump_file_manage) { + DHD_ERROR(("%s(): dhdp=%p dump_file_manage=%p\n", + __FUNCTION__, dhd, (dhd ? dhd->dump_file_manage : NULL))); + return; + } + + fm_ptr = dhd->dump_file_manage; + + /* find file_manage idx */ + DHD_INFO(("%s(): fname: %s dump_path: %s\n", __FUNCTION__, fname, dump_path)); + if ((fm_idx = dhd_dump_file_manage_idx(fm_ptr, fname)) < 0) { + DHD_ERROR(("%s(): Out of file manager entries, fname: %s\n", + __FUNCTION__, fname)); + return; + } + + elem = &fm_ptr->elems[fm_idx]; + fp_idx = elem->file_idx; + DHD_INFO(("%s(): fm_idx: %d fp_idx: %d path: %s\n", + __FUNCTION__, fm_idx, fp_idx, elem->file_path[fp_idx])); + + /* delete oldest file */ + if (strlen(elem->file_path[fp_idx]) != 0) { + if (dhd_file_delete(elem->file_path[fp_idx]) < 0) { + DHD_ERROR(("%s(): Failed to delete file: %s\n", + __FUNCTION__, elem->file_path[fp_idx])); + } else { + DHD_ERROR(("%s(): Successed to delete file: %s\n", + __FUNCTION__, elem->file_path[fp_idx])); + } + } + + /* save dump file path */ + strncpy(elem->file_path[fp_idx], dump_path, DHD_DUMP_FILE_PATH_SIZE); + + /* change file index to next file index */ + elem->file_idx = (elem->file_idx + 1) % DHD_DUMP_FILE_COUNT_MAX; +} +#endif /* DHD_DUMP_MNGR */ + +#ifdef DHD_MAP_LOGGING +/* Will be called from SMMU fault handler */ +void +dhd_debug_info_dump(void) +{ + dhd_pub_t *dhdp = (dhd_pub_t *)g_dhd_pub; + uint32 irq = (uint32)-1; + + DHD_ERROR(("%s: Trigger SMMU Fault\n", __FUNCTION__)); + dhdp->smmu_fault_occurred = TRUE; + + /* Disable PCIe IRQ */ + dhdpcie_get_pcieirq(dhdp->bus, &irq); + if (irq != (uint32)-1) { + disable_irq_nosync(irq); + } + + DHD_OS_WAKE_LOCK(dhdp); + dhd_prot_debug_info_print(dhdp); + osl_dma_map_dump(); +#ifdef DHD_FW_COREDUMP + /* Load the dongle side dump to host memory */ + dhdp->memdump_enabled = DUMP_MEMONLY; + dhdp->memdump_type = DUMP_TYPE_SMMU_FAULT; + dhd_bus_mem_dump(dhdp); +#endif /* DHD_FW_COREDUMP */ + DHD_OS_WAKE_UNLOCK(dhdp); +} +EXPORT_SYMBOL(dhd_debug_info_dump); +#endif /* DHD_MAP_LOGGING */ + +#ifdef DHD_WIFI_SHUTDOWN +void wifi_plat_dev_drv_shutdown(struct platform_device *pdev) +{ + dhd_pub_t *dhd_pub = NULL; + dhd_info_t *dhd_info = NULL; + dhd_if_t *dhd_if = NULL; + + DHD_ERROR(("%s enter\n", __FUNCTION__)); + dhd_pub = g_dhd_pub; + + if (dhd_os_check_if_up(dhd_pub)) { + dhd_info = (dhd_info_t *)dhd_pub->info; + dhd_if = dhd_info->iflist[0]; + ASSERT(dhd_if); + ASSERT(dhd_if->net); + if (dhd_if && dhd_if->net) { + dhd_stop(dhd_if->net); + } + } +} +#endif /* DHD_WIFI_SHUTDOWN */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) +int +compat_kernel_read(struct file *file, loff_t offset, char *addr, unsigned long count) +{ + return (int)kernel_read(file, addr, (size_t)count, &offset); +} +#else +int +compat_kernel_read(struct file *file, loff_t offset, char *addr, unsigned long count) +{ + return kernel_read(file, offset, addr, count); +} +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) */ + +void *dhd_get_pub(struct net_device *dev) +{ + dhd_info_t *dhdinfo = *(dhd_info_t **)netdev_priv(dev); + if (dhdinfo) + return (void *)&dhdinfo->pub; + else { + printf("%s: null dhdinfo\n", __FUNCTION__); + return NULL; + } +} + +void *dhd_get_conf(struct net_device *dev) +{ + dhd_info_t *dhdinfo = *(dhd_info_t **)netdev_priv(dev); + if (dhdinfo) + return (void *)dhdinfo->pub.conf; + else { + printf("%s: null dhdinfo\n", __FUNCTION__); + return NULL; + } +} + +bool dhd_os_wd_timer_enabled(void *bus) +{ + dhd_pub_t *pub = bus; + dhd_info_t *dhd = (dhd_info_t *)pub->info; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + if (!dhd) { + DHD_ERROR(("%s: dhd NULL\n", __FUNCTION__)); + return FALSE; + } + return dhd->wd_timer_valid; +} diff --git a/bcmdhd.100.10.315.x/dhd_linux.h b/bcmdhd.100.10.315.x/dhd_linux.h new file mode 100644 index 0000000..3c9b4ab --- /dev/null +++ b/bcmdhd.100.10.315.x/dhd_linux.h @@ -0,0 +1,169 @@ +/* + * DHD Linux header file (dhd_linux exports for cfg80211 and other components) + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_linux.h 767863 2018-06-15 10:19:19Z $ + */ + +/* wifi platform functions for power, interrupt and pre-alloc, either + * from Android-like platform device data, or Broadcom wifi platform + * device data. + * + */ +#ifndef __DHD_LINUX_H__ +#define __DHD_LINUX_H__ + +#include +#include +#include +#include +#include +/* Linux wireless extension support */ +#if defined(WL_WIRELESS_EXT) +#include +#endif /* defined(WL_WIRELESS_EXT) */ +#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) +#include +#endif /* defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) */ + +/* dongle status */ +enum wifi_adapter_status { + WIFI_STATUS_POWER_ON = 0, + WIFI_STATUS_ATTACH, + WIFI_STATUS_FW_READY, + WIFI_STATUS_DETTACH +}; +#define wifi_chk_adapter_status(adapter, stat) (test_bit(stat, &(adapter)->status)) +#define wifi_get_adapter_status(adapter, stat) (test_bit(stat, &(adapter)->status)) +#define wifi_set_adapter_status(adapter, stat) (set_bit(stat, &(adapter)->status)) +#define wifi_clr_adapter_status(adapter, stat) (clear_bit(stat, &(adapter)->status)) +#define wifi_chg_adapter_status(adapter, stat) (change_bit(stat, &(adapter)->status)) + +#define DHD_REGISTRATION_TIMEOUT 12000 /* msec : allowed time to finished dhd registration */ +#define DHD_FW_READY_TIMEOUT 5000 /* msec : allowed time to finished fw download */ + +typedef struct wifi_adapter_info { + const char *name; + uint irq_num; + uint intr_flags; + const char *fw_path; + const char *nv_path; + const char *clm_path; + const char *conf_path; + void *wifi_plat_data; /* wifi ctrl func, for backward compatibility */ + uint bus_type; + uint bus_num; + uint slot_num; + wait_queue_head_t status_event; + unsigned long status; +#if defined(BT_OVER_SDIO) + const char *btfw_path; +#endif /* defined (BT_OVER_SDIO) */ +#ifdef BUS_POWER_RESTORE +#if defined(BCMSDIO) + struct sdio_func *sdio_func; +#endif /* BCMSDIO */ +#if defined(BCMPCIE) + struct pci_dev *pci_dev; + struct pci_saved_state *pci_saved_state; +#endif /* BCMPCIE */ +#endif +} wifi_adapter_info_t; + +#define WLAN_PLAT_NODFS_FLAG 0x01 +#define WLAN_PLAT_AP_FLAG 0x02 +struct wifi_platform_data { +#ifdef BUS_POWER_RESTORE + int (*set_power)(int val, wifi_adapter_info_t *adapter); +#else + int (*set_power)(int val); +#endif + int (*set_reset)(int val); + int (*set_carddetect)(int val); + void *(*mem_prealloc)(int section, unsigned long size); + int (*get_mac_addr)(unsigned char *buf); +#ifdef BCMSDIO + int (*get_wake_irq)(void); +#endif // endif +#if defined(CUSTOM_COUNTRY_CODE) + void *(*get_country_code)(char *ccode, u32 flags); +#else /* defined (CUSTOM_COUNTRY_CODE) */ + void *(*get_country_code)(char *ccode); +#endif +}; + +typedef struct bcmdhd_wifi_platdata { + uint num_adapters; + wifi_adapter_info_t *adapters; +} bcmdhd_wifi_platdata_t; + +/** Per STA params. A list of dhd_sta objects are managed in dhd_if */ +typedef struct dhd_sta { + cumm_ctr_t cumm_ctr; /* cummulative queue length of child flowrings */ + uint16 flowid[NUMPRIO]; /* allocated flow ring ids (by priority) */ + void * ifp; /* associated dhd_if */ + struct ether_addr ea; /* stations ethernet mac address */ + struct list_head list; /* link into dhd_if::sta_list */ + int idx; /* index of self in dhd_pub::sta_pool[] */ + int ifidx; /* index of interface in dhd */ +} dhd_sta_t; +typedef dhd_sta_t dhd_sta_pool_t; + +int dhd_wifi_platform_register_drv(void); +void dhd_wifi_platform_unregister_drv(void); +wifi_adapter_info_t* dhd_wifi_platform_attach_adapter(uint32 bus_type, + uint32 bus_num, uint32 slot_num, unsigned long status); +wifi_adapter_info_t* dhd_wifi_platform_get_adapter(uint32 bus_type, uint32 bus_num, + uint32 slot_num); +int wifi_platform_set_power(wifi_adapter_info_t *adapter, bool on, unsigned long msec); +int wifi_platform_bus_enumerate(wifi_adapter_info_t *adapter, bool device_present); +int wifi_platform_get_irq_number(wifi_adapter_info_t *adapter, unsigned long *irq_flags_ptr); +int wifi_platform_get_mac_addr(wifi_adapter_info_t *adapter, unsigned char *buf); +#ifdef CUSTOM_COUNTRY_CODE +void *wifi_platform_get_country_code(wifi_adapter_info_t *adapter, char *ccode, + u32 flags); +#else +void *wifi_platform_get_country_code(wifi_adapter_info_t *adapter, char *ccode); +#endif /* CUSTOM_COUNTRY_CODE */ +void* wifi_platform_prealloc(wifi_adapter_info_t *adapter, int section, unsigned long size); +void* wifi_platform_get_prealloc_func_ptr(wifi_adapter_info_t *adapter); + +int dhd_get_fw_mode(struct dhd_info *dhdinfo); +bool dhd_update_fw_nv_path(struct dhd_info *dhdinfo); + +#if defined(BT_OVER_SDIO) +int dhd_net_bus_get(struct net_device *dev); +int dhd_net_bus_put(struct net_device *dev); +#endif /* BT_OVER_SDIO */ +#if defined(WLADPS) +#define ADPS_ENABLE 1 +#define ADPS_DISABLE 0 + +int dhd_enable_adps(dhd_pub_t *dhd, uint8 on); +#endif // endif + +int compat_kernel_read(struct file *file, loff_t offset, char *addr, unsigned long count); + +#endif /* __DHD_LINUX_H__ */ diff --git a/bcmdhd.100.10.315.x/dhd_linux_platdev.c b/bcmdhd.100.10.315.x/dhd_linux_platdev.c new file mode 100644 index 0000000..af392fc --- /dev/null +++ b/bcmdhd.100.10.315.x/dhd_linux_platdev.c @@ -0,0 +1,1021 @@ +/* + * Linux platform device for DHD WLAN adapter + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_linux_platdev.c 765775 2018-06-05 10:10:56Z $ + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if defined(CONFIG_WIFI_CONTROL_FUNC) +#include +#endif // endif +#ifdef CONFIG_DTS +#include +#include +#endif /* CONFIG_DTS */ + +#if defined(CUSTOMER_HW) +extern int dhd_wlan_init_plat_data(void); +extern void dhd_wlan_deinit_plat_data(wifi_adapter_info_t *adapter); +#endif /* CUSTOMER_HW */ + +#define WIFI_PLAT_NAME "bcmdhd_wlan" +#define WIFI_PLAT_NAME2 "bcm4329_wlan" +#define WIFI_PLAT_EXT "bcmdhd_wifi_platform" + +#ifdef DHD_WIFI_SHUTDOWN +extern void wifi_plat_dev_drv_shutdown(struct platform_device *pdev); +#endif // endif + +#ifdef CONFIG_DTS +struct regulator *wifi_regulator = NULL; +#endif /* CONFIG_DTS */ + +bool cfg_multichip = FALSE; +bcmdhd_wifi_platdata_t *dhd_wifi_platdata = NULL; +static int wifi_plat_dev_probe_ret = 0; +static bool is_power_on = FALSE; +#if !defined(CONFIG_DTS) +#if defined(DHD_OF_SUPPORT) +static bool dts_enabled = TRUE; +extern struct resource dhd_wlan_resources; +extern struct wifi_platform_data dhd_wlan_control; +#else +static bool dts_enabled = FALSE; +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wmissing-field-initializers" +#endif // endif +struct resource dhd_wlan_resources = {0}; +struct wifi_platform_data dhd_wlan_control = {0}; +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif // endif +#endif /* CONFIG_OF && !defined(CONFIG_ARCH_MSM) */ +#endif /* !defind(CONFIG_DTS) */ + +static int dhd_wifi_platform_load(void); + +extern void* wl_cfg80211_get_dhdp(struct net_device *dev); + +extern int dhd_wlan_init(void); +extern int dhd_wlan_deinit(void); + +#ifdef ENABLE_4335BT_WAR +extern int bcm_bt_lock(int cookie); +extern void bcm_bt_unlock(int cookie); +static int lock_cookie_wifi = 'W' | 'i'<<8 | 'F'<<16 | 'i'<<24; /* cookie is "WiFi" */ +#endif /* ENABLE_4335BT_WAR */ + +wifi_adapter_info_t* dhd_wifi_platform_attach_adapter(uint32 bus_type, + uint32 bus_num, uint32 slot_num, unsigned long status) +{ + int i; + + if (dhd_wifi_platdata == NULL) + return NULL; + + for (i = 0; i < dhd_wifi_platdata->num_adapters; i++) { + wifi_adapter_info_t *adapter = &dhd_wifi_platdata->adapters[i]; + if ((adapter->bus_type == -1 || adapter->bus_type == bus_type) && + (adapter->bus_num == -1 || adapter->bus_num == bus_num) && + (adapter->slot_num == -1 || adapter->slot_num == slot_num) +#if defined(ENABLE_INSMOD_NO_FW_LOAD) + && (wifi_chk_adapter_status(adapter, status)) +#endif + ) { + DHD_ERROR(("attach adapter info '%s'\n", adapter->name)); + return adapter; + } + } + return NULL; +} + +wifi_adapter_info_t* dhd_wifi_platform_get_adapter(uint32 bus_type, uint32 bus_num, uint32 slot_num) +{ + int i; + + if (dhd_wifi_platdata == NULL) + return NULL; + + for (i = 0; i < dhd_wifi_platdata->num_adapters; i++) { + wifi_adapter_info_t *adapter = &dhd_wifi_platdata->adapters[i]; + if ((adapter->bus_type == -1 || adapter->bus_type == bus_type) && + (adapter->bus_num == -1 || adapter->bus_num == bus_num) && + (adapter->slot_num == -1 || adapter->slot_num == slot_num)) { + DHD_TRACE(("found adapter info '%s'\n", adapter->name)); + return adapter; + } + } + return NULL; +} + +void* wifi_platform_prealloc(wifi_adapter_info_t *adapter, int section, unsigned long size) +{ + void *alloc_ptr = NULL; + struct wifi_platform_data *plat_data; + + if (!adapter || !adapter->wifi_plat_data) + return NULL; + plat_data = adapter->wifi_plat_data; + if (plat_data->mem_prealloc) { + alloc_ptr = plat_data->mem_prealloc(section, size); + if (alloc_ptr) { + DHD_INFO(("success alloc section %d\n", section)); + if (size != 0L) + bzero(alloc_ptr, size); + return alloc_ptr; + } + } else + return NULL; + + DHD_ERROR(("%s: failed to alloc static mem section %d\n", __FUNCTION__, section)); + return NULL; +} + +void* wifi_platform_get_prealloc_func_ptr(wifi_adapter_info_t *adapter) +{ + struct wifi_platform_data *plat_data; + + if (!adapter || !adapter->wifi_plat_data) + return NULL; + plat_data = adapter->wifi_plat_data; + return plat_data->mem_prealloc; +} + +int wifi_platform_get_irq_number(wifi_adapter_info_t *adapter, unsigned long *irq_flags_ptr) +{ + if (adapter == NULL) + return -1; + if (irq_flags_ptr) + *irq_flags_ptr = adapter->intr_flags; + return adapter->irq_num; +} + +int wifi_platform_set_power(wifi_adapter_info_t *adapter, bool on, unsigned long msec) +{ + int err = 0; +#ifndef CONFIG_DTS + struct wifi_platform_data *plat_data; +#endif +#ifdef BT_OVER_SDIO + if (is_power_on == on) { + return -EINVAL; + } +#endif /* BT_OVER_SDIO */ + if (on) { + wifi_set_adapter_status(adapter, WIFI_STATUS_POWER_ON); + } else { + wifi_clr_adapter_status(adapter, WIFI_STATUS_POWER_ON); + } +#ifdef CONFIG_DTS + if (on) { + printf("======== PULL WL_REG_ON HIGH! ========\n"); + err = regulator_enable(wifi_regulator); + is_power_on = TRUE; + } + else { + printf("======== PULL WL_REG_ON LOW! ========\n"); + err = regulator_disable(wifi_regulator); + is_power_on = FALSE; + } + if (err < 0) { + DHD_ERROR(("%s: regulator enable/disable failed", __FUNCTION__)); + goto fail; + } +#else + if (!adapter || !adapter->wifi_plat_data) { + err = -EINVAL; + goto fail; + } + plat_data = adapter->wifi_plat_data; + + DHD_ERROR(("%s = %d\n", __FUNCTION__, on)); + if (plat_data->set_power) { +#ifdef ENABLE_4335BT_WAR + if (on) { + printk("WiFi: trying to acquire BT lock\n"); + if (bcm_bt_lock(lock_cookie_wifi) != 0) + printk("** WiFi: timeout in acquiring bt lock**\n"); + printk("%s: btlock acquired\n", __FUNCTION__); + } + else { + /* For a exceptional case, release btlock */ + bcm_bt_unlock(lock_cookie_wifi); + } +#endif /* ENABLE_4335BT_WAR */ + +#ifdef BUS_POWER_RESTORE + err = plat_data->set_power(on, adapter); +#else + err = plat_data->set_power(on); +#endif + } + + if (msec && !err) + OSL_SLEEP(msec); + + if (on && !err) + is_power_on = TRUE; + else + is_power_on = FALSE; + +#endif /* CONFIG_DTS */ + + return err; +fail: + if (on) { + wifi_clr_adapter_status(adapter, WIFI_STATUS_POWER_ON); + } else { + wifi_set_adapter_status(adapter, WIFI_STATUS_POWER_ON); + } + return err; +} + +int wifi_platform_bus_enumerate(wifi_adapter_info_t *adapter, bool device_present) +{ + int err = 0; + struct wifi_platform_data *plat_data; + + if (!adapter || !adapter->wifi_plat_data) + return -EINVAL; + plat_data = adapter->wifi_plat_data; + + DHD_ERROR(("%s device present %d\n", __FUNCTION__, device_present)); + if (plat_data->set_carddetect) { + err = plat_data->set_carddetect(device_present); + } + return err; + +} + +int wifi_platform_get_mac_addr(wifi_adapter_info_t *adapter, unsigned char *buf) +{ + struct wifi_platform_data *plat_data; + + DHD_ERROR(("%s\n", __FUNCTION__)); + if (!buf || !adapter || !adapter->wifi_plat_data) + return -EINVAL; + plat_data = adapter->wifi_plat_data; + if (plat_data->get_mac_addr) { + return plat_data->get_mac_addr(buf); + } + return -EOPNOTSUPP; +} + +void * +#ifdef CUSTOM_COUNTRY_CODE +wifi_platform_get_country_code(wifi_adapter_info_t *adapter, char *ccode, u32 flags) +#else +wifi_platform_get_country_code(wifi_adapter_info_t *adapter, char *ccode) +#endif /* CUSTOM_COUNTRY_CODE */ +{ + /* get_country_code was added after 2.6.39 */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)) + struct wifi_platform_data *plat_data; + + if (!ccode || !adapter || !adapter->wifi_plat_data) + return NULL; + plat_data = adapter->wifi_plat_data; + + DHD_TRACE(("%s\n", __FUNCTION__)); + if (plat_data->get_country_code) { +#ifdef CUSTOM_COUNTRY_CODE + return plat_data->get_country_code(ccode, flags); +#else + return plat_data->get_country_code(ccode); +#endif /* CUSTOM_COUNTRY_CODE */ + } +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)) */ + + return NULL; +} + +#ifndef CUSTOMER_HW +static int wifi_plat_dev_drv_probe(struct platform_device *pdev) +{ + struct resource *resource; + wifi_adapter_info_t *adapter; +#if defined(CONFIG_DTS) && defined(CUSTOMER_OOB) + int irq, gpio; +#endif /* CONFIG_DTS */ + + /* Android style wifi platform data device ("bcmdhd_wlan" or "bcm4329_wlan") + * is kept for backward compatibility and supports only 1 adapter + */ + ASSERT(dhd_wifi_platdata != NULL); + ASSERT(dhd_wifi_platdata->num_adapters == 1); + adapter = &dhd_wifi_platdata->adapters[0]; + adapter->wifi_plat_data = (void *)&dhd_wlan_control; +// adapter->wifi_plat_data = (struct wifi_platform_data *)(pdev->dev.platform_data); + + resource = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "bcmdhd_wlan_irq"); + if (resource == NULL) + resource = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "bcm4329_wlan_irq"); + if (resource) { + adapter->irq_num = resource->start; + adapter->intr_flags = resource->flags & IRQF_TRIGGER_MASK; +#ifdef DHD_ISR_NO_SUSPEND + adapter->intr_flags |= IRQF_NO_SUSPEND; +#endif // endif + } + +#ifdef CONFIG_DTS + wifi_regulator = regulator_get(&pdev->dev, "wlreg_on"); + if (wifi_regulator == NULL) { + DHD_ERROR(("%s regulator is null\n", __FUNCTION__)); + return -1; + } + +#if defined(CUSTOMER_OOB) + /* This is to get the irq for the OOB */ + gpio = of_get_gpio(pdev->dev.of_node, 0); + + if (gpio < 0) { + DHD_ERROR(("%s gpio information is incorrect\n", __FUNCTION__)); + return -1; + } + irq = gpio_to_irq(gpio); + if (irq < 0) { + DHD_ERROR(("%s irq information is incorrect\n", __FUNCTION__)); + return -1; + } + adapter->irq_num = irq; + + /* need to change the flags according to our requirement */ + adapter->intr_flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL | + IORESOURCE_IRQ_SHAREABLE; +#endif +#endif /* CONFIG_DTS */ + + wifi_plat_dev_probe_ret = dhd_wifi_platform_load(); + return wifi_plat_dev_probe_ret; +} + +static int wifi_plat_dev_drv_remove(struct platform_device *pdev) +{ + wifi_adapter_info_t *adapter; + + /* Android style wifi platform data device ("bcmdhd_wlan" or "bcm4329_wlan") + * is kept for backward compatibility and supports only 1 adapter + */ + ASSERT(dhd_wifi_platdata != NULL); + ASSERT(dhd_wifi_platdata->num_adapters == 1); + adapter = &dhd_wifi_platdata->adapters[0]; + if (is_power_on) { +#ifdef BCMPCIE + wifi_platform_bus_enumerate(adapter, FALSE); + wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY); +#else + wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY); + wifi_platform_bus_enumerate(adapter, FALSE); +#endif /* BCMPCIE */ + } + +#ifdef CONFIG_DTS + regulator_put(wifi_regulator); +#endif /* CONFIG_DTS */ + return 0; +} + +static int wifi_plat_dev_drv_suspend(struct platform_device *pdev, pm_message_t state) +{ + DHD_TRACE(("##> %s\n", __FUNCTION__)); +#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 39)) && defined(OOB_INTR_ONLY) && \ + defined(BCMSDIO) + bcmsdh_oob_intr_set(0); +#endif /* (OOB_INTR_ONLY) */ + return 0; +} + +static int wifi_plat_dev_drv_resume(struct platform_device *pdev) +{ + DHD_TRACE(("##> %s\n", __FUNCTION__)); +#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 39)) && defined(OOB_INTR_ONLY) && \ + defined(BCMSDIO) + if (dhd_os_check_if_up(wl_cfg80211_get_dhdp())) + bcmsdh_oob_intr_set(1); +#endif /* (OOB_INTR_ONLY) */ + return 0; +} + +#ifdef CONFIG_DTS +static const struct of_device_id wifi_device_dt_match[] = { + { .compatible = "android,bcmdhd_wlan", }, + {}, +}; +#endif /* CONFIG_DTS */ + +static struct platform_driver wifi_platform_dev_driver = { + .probe = wifi_plat_dev_drv_probe, + .remove = wifi_plat_dev_drv_remove, + .suspend = wifi_plat_dev_drv_suspend, + .resume = wifi_plat_dev_drv_resume, +#ifdef DHD_WIFI_SHUTDOWN + .shutdown = wifi_plat_dev_drv_shutdown, +#endif // endif + .driver = { + .name = WIFI_PLAT_NAME, +#ifdef CONFIG_DTS + .of_match_table = wifi_device_dt_match, +#endif /* CONFIG_DTS */ + } +}; + +static struct platform_driver wifi_platform_dev_driver_legacy = { + .probe = wifi_plat_dev_drv_probe, + .remove = wifi_plat_dev_drv_remove, + .suspend = wifi_plat_dev_drv_suspend, + .resume = wifi_plat_dev_drv_resume, +#ifdef DHD_WIFI_SHUTDOWN + .shutdown = wifi_plat_dev_drv_shutdown, +#endif // endif + .driver = { + .name = WIFI_PLAT_NAME2, + } +}; + +static int wifi_platdev_match(struct device *dev, void *data) +{ + char *name = (char*)data; +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif // endif + const struct platform_device *pdev = to_platform_device(dev); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif // endif + + if (strcmp(pdev->name, name) == 0) { + DHD_ERROR(("found wifi platform device %s\n", name)); + return TRUE; + } + + return FALSE; +} +#endif + +static int wifi_ctrlfunc_register_drv(void) +{ + wifi_adapter_info_t *adapter; + +#ifndef CUSTOMER_HW + int err = 0; + struct device *dev1, *dev2; + dev1 = bus_find_device(&platform_bus_type, NULL, WIFI_PLAT_NAME, wifi_platdev_match); + dev2 = bus_find_device(&platform_bus_type, NULL, WIFI_PLAT_NAME2, wifi_platdev_match); +#endif + +#ifdef BOARD_HIKEY_MODULAR + dhd_wlan_init(); +#endif /* BOARD_HIKEY_MODULAR */ + +#if !defined(CONFIG_DTS) && !defined(CUSTOMER_HW) + if (!dts_enabled) { + if (dev1 == NULL && dev2 == NULL) { + DHD_ERROR(("no wifi platform data, skip\n")); + return -ENXIO; + } + } +#endif /* !defined(CONFIG_DTS) */ + + /* multi-chip support not enabled, build one adapter information for + * DHD (either SDIO, USB or PCIe) + */ + adapter = kzalloc(sizeof(wifi_adapter_info_t), GFP_KERNEL); + if (adapter == NULL) { + DHD_ERROR(("%s:adapter alloc failed", __FUNCTION__)); + return -ENOMEM; + } + adapter->name = "DHD generic adapter"; + adapter->bus_type = -1; + adapter->bus_num = -1; + adapter->slot_num = -1; + adapter->irq_num = -1; + is_power_on = FALSE; + wifi_plat_dev_probe_ret = 0; + dhd_wifi_platdata = kzalloc(sizeof(bcmdhd_wifi_platdata_t), GFP_KERNEL); + dhd_wifi_platdata->num_adapters = 1; + dhd_wifi_platdata->adapters = adapter; + init_waitqueue_head(&adapter->status_event); + +#ifndef CUSTOMER_HW + if (dev1) { + err = platform_driver_register(&wifi_platform_dev_driver); + if (err) { + DHD_ERROR(("%s: failed to register wifi ctrl func driver\n", + __FUNCTION__)); + return err; + } + } + if (dev2) { + err = platform_driver_register(&wifi_platform_dev_driver_legacy); + if (err) { + DHD_ERROR(("%s: failed to register wifi ctrl func legacy driver\n", + __FUNCTION__)); + return err; + } + } +#endif + +#if !defined(CONFIG_DTS) + if (dts_enabled) { + struct resource *resource; + adapter->wifi_plat_data = (void *)&dhd_wlan_control; + resource = &dhd_wlan_resources; +#ifdef CUSTOMER_HW + wifi_plat_dev_probe_ret = dhd_wlan_init_plat_data(); + if (wifi_plat_dev_probe_ret) + return wifi_plat_dev_probe_ret; +#endif + adapter->irq_num = resource->start; + adapter->intr_flags = resource->flags & IRQF_TRIGGER_MASK; +#ifdef DHD_ISR_NO_SUSPEND + adapter->intr_flags |= IRQF_NO_SUSPEND; +#endif // endif + wifi_plat_dev_probe_ret = dhd_wifi_platform_load(); + } +#endif /* !defined(CONFIG_DTS) */ + +#if defined(CONFIG_DTS) && !defined(CUSTOMER_HW) + wifi_plat_dev_probe_ret = platform_driver_register(&wifi_platform_dev_driver); +#endif /* CONFIG_DTS */ + + /* return probe function's return value if registeration succeeded */ + return wifi_plat_dev_probe_ret; +} + +void wifi_ctrlfunc_unregister_drv(void) +{ +#ifndef CONFIG_DTS + wifi_adapter_info_t *adapter; +#endif + +#if defined(CONFIG_DTS) && !defined(CUSTOMER_HW) + DHD_ERROR(("unregister wifi platform drivers\n")); + platform_driver_unregister(&wifi_platform_dev_driver); +#else +#ifndef CUSTOMER_HW + struct device *dev1, *dev2; + dev1 = bus_find_device(&platform_bus_type, NULL, WIFI_PLAT_NAME, wifi_platdev_match); + dev2 = bus_find_device(&platform_bus_type, NULL, WIFI_PLAT_NAME2, wifi_platdev_match); + if (!dts_enabled) + if (dev1 == NULL && dev2 == NULL) + return; +#endif + DHD_ERROR(("unregister wifi platform drivers\n")); +#ifndef CUSTOMER_HW + if (dev1) + platform_driver_unregister(&wifi_platform_dev_driver); + if (dev2) + platform_driver_unregister(&wifi_platform_dev_driver_legacy); +#endif + if (dts_enabled) { + adapter = &dhd_wifi_platdata->adapters[0]; + if (is_power_on) { + wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY); + } + wifi_platform_bus_enumerate(adapter, FALSE); + } +#ifdef BOARD_HIKEY_MODULAR + dhd_wlan_deinit(); +#endif /* BOARD_HIKEY_MODULAR */ +#endif /* !defined(CONFIG_DTS) */ + +#if defined(CUSTOMER_HW) + dhd_wlan_deinit_plat_data(adapter); +#endif + + kfree(dhd_wifi_platdata->adapters); + dhd_wifi_platdata->adapters = NULL; + dhd_wifi_platdata->num_adapters = 0; + kfree(dhd_wifi_platdata); + dhd_wifi_platdata = NULL; +} + +#ifndef CUSTOMER_HW +static int bcmdhd_wifi_plat_dev_drv_probe(struct platform_device *pdev) +{ + dhd_wifi_platdata = (bcmdhd_wifi_platdata_t *)(pdev->dev.platform_data); + + return dhd_wifi_platform_load(); +} + +static int bcmdhd_wifi_plat_dev_drv_remove(struct platform_device *pdev) +{ + int i; + wifi_adapter_info_t *adapter; + ASSERT(dhd_wifi_platdata != NULL); + + /* power down all adapters */ + for (i = 0; i < dhd_wifi_platdata->num_adapters; i++) { + adapter = &dhd_wifi_platdata->adapters[i]; + wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY); + wifi_platform_bus_enumerate(adapter, FALSE); + } + return 0; +} + +static struct platform_driver dhd_wifi_platform_dev_driver = { + .probe = bcmdhd_wifi_plat_dev_drv_probe, + .remove = bcmdhd_wifi_plat_dev_drv_remove, + .driver = { + .name = WIFI_PLAT_EXT, + } +}; +#endif + +int dhd_wifi_platform_register_drv(void) +{ + int err = 0; +#ifndef CUSTOMER_HW + struct device *dev; + + /* register Broadcom wifi platform data driver if multi-chip is enabled, + * otherwise use Android style wifi platform data (aka wifi control function) + * if it exists + * + * to support multi-chip DHD, Broadcom wifi platform data device must + * be added in kernel early boot (e.g. board config file). + */ + if (cfg_multichip) { + dev = bus_find_device(&platform_bus_type, NULL, WIFI_PLAT_EXT, wifi_platdev_match); + if (dev == NULL) { + DHD_ERROR(("bcmdhd wifi platform data device not found!!\n")); + return -ENXIO; + } + err = platform_driver_register(&dhd_wifi_platform_dev_driver); + } else +#endif + { + err = wifi_ctrlfunc_register_drv(); + + /* no wifi ctrl func either, load bus directly and ignore this error */ + if (err) { + if (err == -ENXIO) { + /* wifi ctrl function does not exist */ + err = dhd_wifi_platform_load(); + } else { + /* unregister driver due to initialization failure */ + wifi_ctrlfunc_unregister_drv(); + } + } + } + + return err; +} + +#ifdef BCMPCIE +static int dhd_wifi_platform_load_pcie(void) +{ + int err = 0; + int i; + wifi_adapter_info_t *adapter; + + BCM_REFERENCE(i); + BCM_REFERENCE(adapter); + + if (dhd_wifi_platdata == NULL) { + err = dhd_bus_register(); + } else { + if (dhd_download_fw_on_driverload) { + /* power up all adapters */ + for (i = 0; i < dhd_wifi_platdata->num_adapters; i++) { + int retry = POWERUP_MAX_RETRY; + adapter = &dhd_wifi_platdata->adapters[i]; + + DHD_ERROR(("Power-up adapter '%s'\n", adapter->name)); + DHD_INFO((" - irq %d [flags %d], firmware: %s, nvram: %s\n", + adapter->irq_num, adapter->intr_flags, adapter->fw_path, + adapter->nv_path)); + DHD_INFO((" - bus type %d, bus num %d, slot num %d\n\n", + adapter->bus_type, adapter->bus_num, adapter->slot_num)); + + do { + err = wifi_platform_set_power(adapter, + TRUE, WIFI_TURNON_DELAY); + if (err) { + DHD_ERROR(("failed to power up %s," + " %d retry left\n", + adapter->name, retry)); + /* WL_REG_ON state unknown, Power off forcely */ + wifi_platform_set_power(adapter, + FALSE, WIFI_TURNOFF_DELAY); + continue; + } else { + err = wifi_platform_bus_enumerate(adapter, TRUE); + if (err) { + DHD_ERROR(("failed to enumerate bus %s, " + "%d retry left\n", + adapter->name, retry)); + wifi_platform_set_power(adapter, FALSE, + WIFI_TURNOFF_DELAY); + } else { + break; + } + } + } while (retry--); + + if (retry < 0) { + DHD_ERROR(("failed to power up %s, max retry reached**\n", + adapter->name)); + return -ENODEV; + } + } + } + + err = dhd_bus_register(); + + if (err) { + DHD_ERROR(("%s: pcie_register_driver failed\n", __FUNCTION__)); + if (dhd_download_fw_on_driverload) { + /* power down all adapters */ + for (i = 0; i < dhd_wifi_platdata->num_adapters; i++) { + adapter = &dhd_wifi_platdata->adapters[i]; + wifi_platform_bus_enumerate(adapter, FALSE); + wifi_platform_set_power(adapter, + FALSE, WIFI_TURNOFF_DELAY); + } + } + } + } + + return err; +} +#else +static int dhd_wifi_platform_load_pcie(void) +{ + return 0; +} +#endif /* BCMPCIE */ + +void dhd_wifi_platform_unregister_drv(void) +{ +#ifndef CUSTOMER_HW + if (cfg_multichip) + platform_driver_unregister(&dhd_wifi_platform_dev_driver); + else +#endif + wifi_ctrlfunc_unregister_drv(); +} + +extern int dhd_watchdog_prio; +extern int dhd_dpc_prio; +extern uint dhd_deferred_tx; +#if defined(BCMLXSDMMC) || defined(BCMDBUS) +extern struct semaphore dhd_registration_sem; +#endif // endif + +#ifdef BCMSDIO +static int dhd_wifi_platform_load_sdio(void) +{ + int i; + int err = 0; + wifi_adapter_info_t *adapter; + + BCM_REFERENCE(i); + BCM_REFERENCE(adapter); + + /* Sanity check on the module parameters + * - Both watchdog and DPC as tasklets are ok + * - If both watchdog and DPC are threads, TX must be deferred + */ + if (!(dhd_watchdog_prio < 0 && dhd_dpc_prio < 0) && + !(dhd_watchdog_prio >= 0 && dhd_dpc_prio >= 0 && dhd_deferred_tx)) + return -EINVAL; + +#if defined(BCMLXSDMMC) && !defined(DHD_PRELOAD) + sema_init(&dhd_registration_sem, 0); +#endif // endif + + if (dhd_wifi_platdata == NULL) { + DHD_ERROR(("DHD wifi platform data is required for Android build\n")); + DHD_ERROR(("DHD registering bus directly\n")); + /* x86 bring-up PC needs no power-up operations */ + err = dhd_bus_register(); + return err; + } + +#if defined(BCMLXSDMMC) && !defined(DHD_PRELOAD) + /* power up all adapters */ + for (i = 0; i < dhd_wifi_platdata->num_adapters; i++) { + bool chip_up = FALSE; + int retry = POWERUP_MAX_RETRY; + struct semaphore dhd_chipup_sem; + + adapter = &dhd_wifi_platdata->adapters[i]; + + DHD_ERROR(("Power-up adapter '%s'\n", adapter->name)); + DHD_INFO((" - irq %d [flags %d], firmware: %s, nvram: %s\n", + adapter->irq_num, adapter->intr_flags, adapter->fw_path, adapter->nv_path)); + DHD_INFO((" - bus type %d, bus num %d, slot num %d\n\n", + adapter->bus_type, adapter->bus_num, adapter->slot_num)); + + do { +#ifndef CUSTOMER_HW_AMLOGIC + sema_init(&dhd_chipup_sem, 0); + err = dhd_bus_reg_sdio_notify(&dhd_chipup_sem); + if (err) { + DHD_ERROR(("%s dhd_bus_reg_sdio_notify fail(%d)\n\n", + __FUNCTION__, err)); + return err; + } +#endif + err = wifi_platform_set_power(adapter, TRUE, WIFI_TURNON_DELAY); + if (err) { + DHD_ERROR(("%s: wifi pwr on error ! \n", __FUNCTION__)); + dhd_bus_unreg_sdio_notify(); + /* WL_REG_ON state unknown, Power off forcely */ + wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY); + continue; + } else { + wifi_platform_bus_enumerate(adapter, TRUE); + } +#ifdef CUSTOMER_HW_AMLOGIC + sema_init(&dhd_chipup_sem, 0); + err = dhd_bus_reg_sdio_notify(&dhd_chipup_sem); + if (err) { + DHD_ERROR(("%s dhd_bus_reg_sdio_notify fail(%d)\n\n", + __FUNCTION__, err)); + return err; + } +#endif + + if (down_timeout(&dhd_chipup_sem, msecs_to_jiffies(POWERUP_WAIT_MS)) == 0) { + dhd_bus_unreg_sdio_notify(); + chip_up = TRUE; + break; + } + + DHD_ERROR(("failed to power up %s, %d retry left\n", adapter->name, retry)); + dhd_bus_unreg_sdio_notify(); + wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY); + wifi_platform_bus_enumerate(adapter, FALSE); + } while (retry--); + + if (!chip_up) { + DHD_ERROR(("failed to power up %s, max retry reached**\n", adapter->name)); + return -ENODEV; + } + + } + + err = dhd_bus_register(); + + if (err) { + DHD_ERROR(("%s: sdio_register_driver failed\n", __FUNCTION__)); + goto fail; + } + + /* + * Wait till MMC sdio_register_driver callback called and made driver attach. + * It's needed to make sync up exit from dhd insmod and + * Kernel MMC sdio device callback registration + */ + err = down_timeout(&dhd_registration_sem, msecs_to_jiffies(DHD_REGISTRATION_TIMEOUT)); + if (err) { + DHD_ERROR(("%s: sdio_register_driver timeout or error \n", __FUNCTION__)); + dhd_bus_unregister(); + goto fail; + } + + return err; + +fail: + /* power down all adapters */ + for (i = 0; i < dhd_wifi_platdata->num_adapters; i++) { + adapter = &dhd_wifi_platdata->adapters[i]; + wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY); + wifi_platform_bus_enumerate(adapter, FALSE); + } +#endif // endif + + return err; +} +#else /* BCMSDIO */ +static int dhd_wifi_platform_load_sdio(void) +{ + return 0; +} +#endif /* BCMSDIO */ + +#ifdef BCMDBUS +static int dhd_wifi_platform_load_usb(void) +{ + wifi_adapter_info_t *adapter; + s32 timeout = -1; + int i; + int err = 0; + enum wifi_adapter_status wait_status; + + err = dhd_bus_register(); + if (err) { + DHD_ERROR(("%s: usb_register failed\n", __FUNCTION__)); + goto exit; + } + + /* power up all adapters */ + for (i = 0; i < dhd_wifi_platdata->num_adapters; i++) { + adapter = &dhd_wifi_platdata->adapters[i]; + DHD_ERROR(("Power-up adapter '%s'\n", adapter->name)); + DHD_INFO((" - irq %d [flags %d], firmware: %s, nvram: %s\n", + adapter->irq_num, adapter->intr_flags, adapter->fw_path, adapter->nv_path)); + DHD_INFO((" - bus type %d, bus num %d, slot num %d\n\n", + adapter->bus_type, adapter->bus_num, adapter->slot_num)); + err = wifi_platform_set_power(adapter, TRUE, WIFI_TURNON_DELAY); + if (err) { + DHD_ERROR(("failed to wifi_platform_set_power on %s\n", adapter->name)); + goto fail; + } + if (dhd_download_fw_on_driverload) + wait_status = WIFI_STATUS_ATTACH; + else + wait_status = WIFI_STATUS_DETTACH; + timeout = wait_event_interruptible_timeout(adapter->status_event, + wifi_get_adapter_status(adapter, wait_status), + msecs_to_jiffies(DHD_REGISTRATION_TIMEOUT)); + if (timeout <= 0) { + err = -1; + DHD_ERROR(("%s: usb_register_driver timeout\n", __FUNCTION__)); + goto fail; + } + } + +exit: + return err; + +fail: + dhd_bus_unregister(); + /* power down all adapters */ + for (i = 0; i < dhd_wifi_platdata->num_adapters; i++) { + adapter = &dhd_wifi_platdata->adapters[i]; + wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY); + } + + return err; +} +#else /* BCMDBUS */ +static int dhd_wifi_platform_load_usb(void) +{ + return 0; +} +#endif /* BCMDBUS */ + +static int dhd_wifi_platform_load() +{ + int err = 0; + printf("%s: Enter\n", __FUNCTION__); + + wl_android_init(); + + if ((err = dhd_wifi_platform_load_usb())) + goto end; + else if ((err = dhd_wifi_platform_load_sdio())) + goto end; + else + err = dhd_wifi_platform_load_pcie(); + +end: + if (err) + wl_android_exit(); +#if !defined(MULTIPLE_SUPPLICANT) + else + wl_android_post_init(); +#endif + + return err; +} diff --git a/bcmdhd.100.10.315.x/dhd_linux_sched.c b/bcmdhd.100.10.315.x/dhd_linux_sched.c new file mode 100644 index 0000000..d1268c7 --- /dev/null +++ b/bcmdhd.100.10.315.x/dhd_linux_sched.c @@ -0,0 +1,51 @@ +/* + * Expose some of the kernel scheduler routines + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_linux_sched.c 514727 2014-11-12 03:02:48Z $ + */ +#include +#include +#include +#include +#include + +int setScheduler(struct task_struct *p, int policy, struct sched_param *param) +{ + int rc = 0; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) + rc = sched_setscheduler(p, policy, param); +#endif /* LinuxVer */ + return rc; +} + +int get_scheduler_policy(struct task_struct *p) +{ + int rc = SCHED_NORMAL; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) + rc = p->policy; +#endif /* LinuxVer */ + return rc; +} diff --git a/bcmdhd.100.10.315.x/dhd_linux_wq.c b/bcmdhd.100.10.315.x/dhd_linux_wq.c new file mode 100644 index 0000000..54b13bd --- /dev/null +++ b/bcmdhd.100.10.315.x/dhd_linux_wq.c @@ -0,0 +1,404 @@ +/* + * Broadcom Dongle Host Driver (DHD), Generic work queue framework + * Generic interface to handle dhd deferred work events + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_linux_wq.c 675839 2016-12-19 03:07:26Z $ + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +typedef struct dhd_deferred_event { + u8 event; /* holds the event */ + void *event_data; /* holds event specific data */ + event_handler_t event_handler; + unsigned long pad; /* for memory alignment to power of 2 */ +} dhd_deferred_event_t; + +#define DEFRD_EVT_SIZE (sizeof(dhd_deferred_event_t)) + +/* + * work events may occur simultaneously. + * can hold upto 64 low priority events and 16 high priority events + */ +#define DHD_PRIO_WORK_FIFO_SIZE (16 * DEFRD_EVT_SIZE) +#define DHD_WORK_FIFO_SIZE (64 * DEFRD_EVT_SIZE) + +#define DHD_FIFO_HAS_FREE_SPACE(fifo) \ + ((fifo) && (kfifo_avail(fifo) >= DEFRD_EVT_SIZE)) +#define DHD_FIFO_HAS_ENOUGH_DATA(fifo) \ + ((fifo) && (kfifo_len(fifo) >= DEFRD_EVT_SIZE)) + +struct dhd_deferred_wq { + struct work_struct deferred_work; /* should be the first member */ + + struct kfifo *prio_fifo; + struct kfifo *work_fifo; + u8 *prio_fifo_buf; + u8 *work_fifo_buf; + spinlock_t work_lock; + void *dhd_info; /* review: does it require */ + u32 event_skip_mask; +}; + +static inline struct kfifo* +dhd_kfifo_init(u8 *buf, int size, spinlock_t *lock) +{ + struct kfifo *fifo; + gfp_t flags = CAN_SLEEP()? GFP_KERNEL : GFP_ATOMIC; + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)) + fifo = kfifo_init(buf, size, flags, lock); +#else + fifo = (struct kfifo *)kzalloc(sizeof(struct kfifo), flags); + if (!fifo) { + return NULL; + } + kfifo_init(fifo, buf, size); +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)) */ + return fifo; +} + +static inline void +dhd_kfifo_free(struct kfifo *fifo) +{ + kfifo_free(fifo); +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 31)) + /* FC11 releases the fifo memory */ + kfree(fifo); +#endif // endif +} + +/* deferred work functions */ +static void dhd_deferred_work_handler(struct work_struct *data); + +void* +dhd_deferred_work_init(void *dhd_info) +{ + struct dhd_deferred_wq *work = NULL; + u8* buf; + unsigned long fifo_size = 0; + gfp_t flags = CAN_SLEEP()? GFP_KERNEL : GFP_ATOMIC; + + if (!dhd_info) { + DHD_ERROR(("%s: dhd info not initialized\n", __FUNCTION__)); + goto return_null; + } + + work = (struct dhd_deferred_wq *)kzalloc(sizeof(struct dhd_deferred_wq), + flags); + if (!work) { + DHD_ERROR(("%s: work queue creation failed\n", __FUNCTION__)); + goto return_null; + } + + INIT_WORK((struct work_struct *)work, dhd_deferred_work_handler); + + /* initialize event fifo */ + spin_lock_init(&work->work_lock); + + /* allocate buffer to hold prio events */ + fifo_size = DHD_PRIO_WORK_FIFO_SIZE; + fifo_size = is_power_of_2(fifo_size) ? fifo_size : + roundup_pow_of_two(fifo_size); + buf = (u8*)kzalloc(fifo_size, flags); + if (!buf) { + DHD_ERROR(("%s: prio work fifo allocation failed\n", + __FUNCTION__)); + goto return_null; + } + + /* Initialize prio event fifo */ + work->prio_fifo = dhd_kfifo_init(buf, fifo_size, &work->work_lock); + if (!work->prio_fifo) { + kfree(buf); + goto return_null; + } + + /* allocate buffer to hold work events */ + fifo_size = DHD_WORK_FIFO_SIZE; + fifo_size = is_power_of_2(fifo_size) ? fifo_size : + roundup_pow_of_two(fifo_size); + buf = (u8*)kzalloc(fifo_size, flags); + if (!buf) { + DHD_ERROR(("%s: work fifo allocation failed\n", __FUNCTION__)); + goto return_null; + } + + /* Initialize event fifo */ + work->work_fifo = dhd_kfifo_init(buf, fifo_size, &work->work_lock); + if (!work->work_fifo) { + kfree(buf); + goto return_null; + } + + work->dhd_info = dhd_info; + work->event_skip_mask = 0; + DHD_ERROR(("%s: work queue initialized\n", __FUNCTION__)); + return work; + +return_null: + if (work) { + dhd_deferred_work_deinit(work); + } + + return NULL; +} + +void +dhd_deferred_work_deinit(void *work) +{ + struct dhd_deferred_wq *deferred_work = work; + + if (!deferred_work) { + DHD_ERROR(("%s: deferred work has been freed already\n", + __FUNCTION__)); + return; + } + + /* cancel the deferred work handling */ + cancel_work_sync((struct work_struct *)deferred_work); + + /* + * free work event fifo. + * kfifo_free frees locally allocated fifo buffer + */ + if (deferred_work->prio_fifo) { + dhd_kfifo_free(deferred_work->prio_fifo); + } + + if (deferred_work->work_fifo) { + dhd_kfifo_free(deferred_work->work_fifo); + } + + kfree(deferred_work); +} + +/* select kfifo according to priority */ +static inline struct kfifo * +dhd_deferred_work_select_kfifo(struct dhd_deferred_wq *deferred_wq, + u8 priority) +{ + if (priority == DHD_WQ_WORK_PRIORITY_HIGH) { + return deferred_wq->prio_fifo; + } else if (priority == DHD_WQ_WORK_PRIORITY_LOW) { + return deferred_wq->work_fifo; + } else { + return NULL; + } +} + +/* + * Prepares event to be queued + * Schedules the event + */ +int +dhd_deferred_schedule_work(void *workq, void *event_data, u8 event, + event_handler_t event_handler, u8 priority) +{ + struct dhd_deferred_wq *deferred_wq = (struct dhd_deferred_wq *)workq; + struct kfifo *fifo; + dhd_deferred_event_t deferred_event; + int bytes_copied = 0; + + if (!deferred_wq) { + DHD_ERROR(("%s: work queue not initialized\n", __FUNCTION__)); + ASSERT(0); + return DHD_WQ_STS_UNINITIALIZED; + } + + if (!event || (event >= DHD_MAX_WQ_EVENTS)) { + DHD_ERROR(("%s: unknown event, event=%d\n", __FUNCTION__, + event)); + return DHD_WQ_STS_UNKNOWN_EVENT; + } + + if (!priority || (priority >= DHD_WQ_MAX_PRIORITY)) { + DHD_ERROR(("%s: unknown priority, priority=%d\n", + __FUNCTION__, priority)); + return DHD_WQ_STS_UNKNOWN_PRIORITY; + } + + if ((deferred_wq->event_skip_mask & (1 << event))) { + DHD_ERROR(("%s: Skip event requested. Mask = 0x%x\n", + __FUNCTION__, deferred_wq->event_skip_mask)); + return DHD_WQ_STS_EVENT_SKIPPED; + } + + /* + * default element size is 1, which can be changed + * using kfifo_esize(). Older kernel(FC11) doesn't support + * changing element size. For compatibility changing + * element size is not prefered + */ + ASSERT(kfifo_esize(deferred_wq->prio_fifo) == 1); + ASSERT(kfifo_esize(deferred_wq->work_fifo) == 1); + + deferred_event.event = event; + deferred_event.event_data = event_data; + deferred_event.event_handler = event_handler; + + fifo = dhd_deferred_work_select_kfifo(deferred_wq, priority); + if (DHD_FIFO_HAS_FREE_SPACE(fifo)) { + bytes_copied = kfifo_in_spinlocked(fifo, &deferred_event, + DEFRD_EVT_SIZE, &deferred_wq->work_lock); + } + if (bytes_copied != DEFRD_EVT_SIZE) { + DHD_ERROR(("%s: failed to schedule deferred work, " + "priority=%d, bytes_copied=%d\n", __FUNCTION__, + priority, bytes_copied)); + return DHD_WQ_STS_SCHED_FAILED; + } + schedule_work((struct work_struct *)deferred_wq); + return DHD_WQ_STS_OK; +} + +static bool +dhd_get_scheduled_work(struct dhd_deferred_wq *deferred_wq, + dhd_deferred_event_t *event) +{ + int bytes_copied = 0; + + if (!deferred_wq) { + DHD_ERROR(("%s: work queue not initialized\n", __FUNCTION__)); + return DHD_WQ_STS_UNINITIALIZED; + } + + /* + * default element size is 1 byte, which can be changed + * using kfifo_esize(). Older kernel(FC11) doesn't support + * changing element size. For compatibility changing + * element size is not prefered + */ + ASSERT(kfifo_esize(deferred_wq->prio_fifo) == 1); + ASSERT(kfifo_esize(deferred_wq->work_fifo) == 1); + + /* handle priority work */ + if (DHD_FIFO_HAS_ENOUGH_DATA(deferred_wq->prio_fifo)) { + bytes_copied = kfifo_out_spinlocked(deferred_wq->prio_fifo, + event, DEFRD_EVT_SIZE, &deferred_wq->work_lock); + } + + /* handle normal work if priority work doesn't have enough data */ + if ((bytes_copied != DEFRD_EVT_SIZE) && + DHD_FIFO_HAS_ENOUGH_DATA(deferred_wq->work_fifo)) { + bytes_copied = kfifo_out_spinlocked(deferred_wq->work_fifo, + event, DEFRD_EVT_SIZE, &deferred_wq->work_lock); + } + + return (bytes_copied == DEFRD_EVT_SIZE); +} + +static inline void +dhd_deferred_dump_work_event(dhd_deferred_event_t *work_event) +{ + if (!work_event) { + DHD_ERROR(("%s: work_event is null\n", __FUNCTION__)); + return; + } + + DHD_ERROR(("%s: work_event->event = %d\n", __FUNCTION__, + work_event->event)); + DHD_ERROR(("%s: work_event->event_data = %p\n", __FUNCTION__, + work_event->event_data)); + DHD_ERROR(("%s: work_event->event_handler = %p\n", __FUNCTION__, + work_event->event_handler)); +} + +/* + * Called when work is scheduled + */ +static void +dhd_deferred_work_handler(struct work_struct *work) +{ + struct dhd_deferred_wq *deferred_work = (struct dhd_deferred_wq *)work; + dhd_deferred_event_t work_event; + + if (!deferred_work) { + DHD_ERROR(("%s: work queue not initialized\n", __FUNCTION__)); + return; + } + + do { + if (!dhd_get_scheduled_work(deferred_work, &work_event)) { + DHD_TRACE(("%s: no event to handle\n", __FUNCTION__)); + break; + } + + if (work_event.event >= DHD_MAX_WQ_EVENTS) { + DHD_ERROR(("%s: unknown event\n", __FUNCTION__)); + dhd_deferred_dump_work_event(&work_event); + ASSERT(work_event.event < DHD_MAX_WQ_EVENTS); + continue; + } + + if (work_event.event_handler) { + work_event.event_handler(deferred_work->dhd_info, + work_event.event_data, work_event.event); + } else { + DHD_ERROR(("%s: event handler is null\n", + __FUNCTION__)); + dhd_deferred_dump_work_event(&work_event); + ASSERT(work_event.event_handler != NULL); + } + } while (1); + + return; +} + +void +dhd_deferred_work_set_skip(void *work, u8 event, bool set) +{ + struct dhd_deferred_wq *deferred_wq = (struct dhd_deferred_wq *)work; + + if (!deferred_wq || !event || (event >= DHD_MAX_WQ_EVENTS)) { + DHD_ERROR(("%s: Invalid!!\n", __FUNCTION__)); + return; + } + + if (set) { + /* Set */ + deferred_wq->event_skip_mask |= (1 << event); + } else { + /* Clear */ + deferred_wq->event_skip_mask &= ~(1 << event); + } +} diff --git a/bcmdhd.100.10.315.x/dhd_linux_wq.h b/bcmdhd.100.10.315.x/dhd_linux_wq.h new file mode 100644 index 0000000..d373f04 --- /dev/null +++ b/bcmdhd.100.10.315.x/dhd_linux_wq.h @@ -0,0 +1,91 @@ +/* + * Broadcom Dongle Host Driver (DHD), Generic work queue framework + * Generic interface to handle dhd deferred work events + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_linux_wq.h 769906 2018-06-28 11:52:31Z $ + */ +#ifndef _dhd_linux_wq_h_ +#define _dhd_linux_wq_h_ +/* + * Work event definitions + */ +enum _wq_event { + DHD_WQ_WORK_IF_ADD = 1, + DHD_WQ_WORK_IF_DEL, + DHD_WQ_WORK_SET_MAC, + DHD_WQ_WORK_SET_MCAST_LIST, + DHD_WQ_WORK_IPV6_NDO, + DHD_WQ_WORK_HANG_MSG, + DHD_WQ_WORK_SSSR_DUMP, + DHD_WQ_WORK_DHD_LOG_DUMP, + DHD_WQ_WORK_PKTLOG_DUMP, + DHD_WQ_WORK_INFORM_DHD_MON, + DHD_WQ_WORK_EVENT_LOGTRACE, + DHD_WQ_WORK_DMA_LB_MEM_REL, + DHD_WQ_WORK_NATOE_EVENT, + DHD_WQ_WORK_NATOE_IOCTL, + DHD_WQ_WORK_MACDBG, + DHD_WQ_WORK_DEBUG_UART_DUMP, + DHD_WQ_WORK_GET_BIGDATA_AP, + DHD_WQ_WORK_SOC_RAM_DUMP, +#ifdef DHD_ERPOM + DHD_WQ_WORK_ERROR_RECOVERY, +#endif /* DHD_ERPOM */ + DHD_WQ_WORK_H2D_CONSOLE_TIME_STAMP_MATCH, +#ifdef DHD_UPDATE_INTF_MAC + DHD_WQ_WORK_IF_UPDATE, +#endif /* DHD_UPDATE_INTF_MAC */ + DHD_MAX_WQ_EVENTS +}; + +/* + * Work event priority + */ +enum wq_priority { + DHD_WQ_WORK_PRIORITY_LOW = 1, + DHD_WQ_WORK_PRIORITY_HIGH, + DHD_WQ_MAX_PRIORITY +}; + +/* + * Error definitions + */ +#define DHD_WQ_STS_OK 0 +#define DHD_WQ_STS_FAILED -1 /* General failure */ +#define DHD_WQ_STS_UNINITIALIZED -2 +#define DHD_WQ_STS_SCHED_FAILED -3 +#define DHD_WQ_STS_UNKNOWN_EVENT -4 +#define DHD_WQ_STS_UNKNOWN_PRIORITY -5 +#define DHD_WQ_STS_EVENT_SKIPPED -6 + +typedef void (*event_handler_t)(void *handle, void *event_data, u8 event); + +void *dhd_deferred_work_init(void *dhd); +void dhd_deferred_work_deinit(void *workq); +int dhd_deferred_schedule_work(void *workq, void *event_data, u8 event, + event_handler_t evt_handler, u8 priority); +void dhd_deferred_work_set_skip(void *work, u8 event, bool set); +#endif /* _dhd_linux_wq_h_ */ diff --git a/bcmdhd.100.10.315.x/dhd_mschdbg.c b/bcmdhd.100.10.315.x/dhd_mschdbg.c new file mode 100644 index 0000000..31645c3 --- /dev/null +++ b/bcmdhd.100.10.315.x/dhd_mschdbg.c @@ -0,0 +1,778 @@ +/* + * DHD debugability support + * + * <> + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: dhd_mschdbg.c 639872 2016-05-25 05:39:30Z $ + */ +#ifdef SHOW_LOGTRACE +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +static const char *head_log = ""; +#define MSCH_EVENT_HEAD(space) \ + do { \ + MSCH_EVENT(("%s_E: ", head_log)); \ + if (space > 0) { \ + int ii; \ + for (ii = 0; ii < space; ii += 4) MSCH_EVENT((" ")); \ + } \ + } while (0) +#define MSCH_EVENT(args) do {if (dhd_msg_level & DHD_EVENT_VAL) printf args;} while (0) + +static uint64 solt_start_time[4], req_start_time[4], profiler_start_time[4]; +static uint32 solt_chanspec[4] = {0, }, req_start[4] = {0, }; +static bool lastMessages = FALSE; + +#define US_PRE_SEC 1000000 +#define DATA_UNIT_FOR_LOG_CNT 4 + +static void dhd_mschdbg_us_to_sec(uint32 time_h, uint32 time_l, uint32 *sec, uint32 *remain) +{ + uint64 cur_time = ((uint64)(ntoh32(time_h)) << 32) | ntoh32(time_l); + uint64 r, u = 0; + + r = cur_time; + while (time_h != 0) { + u += (uint64)((0xffffffff / US_PRE_SEC)) * time_h; + r = cur_time - u * US_PRE_SEC; + time_h = (uint32)(r >> 32); + } + + *sec = (uint32)(u + ((uint32)(r) / US_PRE_SEC)); + *remain = (uint32)(r) % US_PRE_SEC; +} + +static char *dhd_mschdbg_display_time(uint32 time_h, uint32 time_l) +{ + static char display_time[32]; + uint32 s, ss; + + if (time_h == 0xffffffff && time_l == 0xffffffff) { + snprintf(display_time, 31, "-1"); + } else { + dhd_mschdbg_us_to_sec(time_h, time_l, &s, &ss); + snprintf(display_time, 31, "%d.%06d", s, ss); + } + return display_time; +} + +static void +dhd_mschdbg_chanspec_list(int sp, char *data, uint16 ptr, uint16 chanspec_cnt) +{ + int i, cnt = (int)ntoh16(chanspec_cnt); + uint16 *chanspec_list = (uint16 *)(data + ntoh16(ptr)); + char buf[CHANSPEC_STR_LEN]; + chanspec_t c; + + MSCH_EVENT_HEAD(sp); + MSCH_EVENT((":")); + for (i = 0; i < cnt; i++) { + c = (chanspec_t)ntoh16(chanspec_list[i]); + MSCH_EVENT((" %s", wf_chspec_ntoa(c, buf))); + } + MSCH_EVENT(("\n")); +} + +static void +dhd_mschdbg_elem_list(int sp, char *title, char *data, uint16 ptr, uint16 list_cnt) +{ + int i, cnt = (int)ntoh16(list_cnt); + uint32 *list = (uint32 *)(data + ntoh16(ptr)); + + MSCH_EVENT_HEAD(sp); + MSCH_EVENT(("%s_list: ", title)); + for (i = 0; i < cnt; i++) { + MSCH_EVENT(("0x%08x->", ntoh32(list[i]))); + } + MSCH_EVENT(("null\n")); +} + +static void +dhd_mschdbg_req_param_profiler_event_data(int sp, int ver, char *data, uint16 ptr) +{ + int sn = sp + 4; + msch_req_param_profiler_event_data_t *p = + (msch_req_param_profiler_event_data_t *)(data + ntoh16(ptr)); + uint32 type, flags; + + MSCH_EVENT_HEAD(sp); + MSCH_EVENT(("\n")); + MSCH_EVENT_HEAD(sn); + MSCH_EVENT(("req_type: ")); + + type = p->req_type; + if (type < 4) { + char *req_type[] = {"fixed", "start-flexible", "duration-flexible", + "both-flexible"}; + MSCH_EVENT(("%s", req_type[type])); + } + else + MSCH_EVENT(("unknown(%d)", type)); + + flags = ntoh16(p->flags); + if (flags & WL_MSCH_REQ_FLAGS_CHAN_CONTIGUOUS) + MSCH_EVENT((", CHAN_CONTIGUOUS")); + if (flags & WL_MSCH_REQ_FLAGS_MERGE_CONT_SLOTS) + MSCH_EVENT((", MERGE_CONT_SLOTS")); + if (flags & WL_MSCH_REQ_FLAGS_PREMTABLE) + MSCH_EVENT((", PREMTABLE")); + if (flags & WL_MSCH_REQ_FLAGS_PREMT_CURTS) + MSCH_EVENT((", PREMT_CURTS")); + if (flags & WL_MSCH_REQ_FLAGS_PREMT_IMMEDIATE) + MSCH_EVENT((", PREMT_IMMEDIATE")); + MSCH_EVENT((", priority: %d\n", p->priority)); + + MSCH_EVENT_HEAD(sn); + MSCH_EVENT(("start-time: %s, duration: %d(us), interval: %d(us)\n", + dhd_mschdbg_display_time(p->start_time_h, p->start_time_l), + ntoh32(p->duration), ntoh32(p->interval))); + + if (type == WL_MSCH_RT_DUR_FLEX) { + MSCH_EVENT_HEAD(sn); + MSCH_EVENT(("dur_flex: %d(us)\n", ntoh32(p->flex.dur_flex))); + } else if (type == WL_MSCH_RT_BOTH_FLEX) { + MSCH_EVENT_HEAD(sn); + MSCH_EVENT(("min_dur: %d(us), max_away_dur: %d(us)\n", + ntoh32(p->flex.bf.min_dur), ntoh32(p->flex.bf.max_away_dur))); + + MSCH_EVENT_HEAD(sn); + MSCH_EVENT(("hi_prio_time: %s, hi_prio_interval: %d(us)\n", + dhd_mschdbg_display_time(p->flex.bf.hi_prio_time_h, + p->flex.bf.hi_prio_time_l), + ntoh32(p->flex.bf.hi_prio_interval))); + } +} + +static void +dhd_mschdbg_timeslot_profiler_event_data(int sp, int ver, char *title, char *data, + uint16 ptr, bool empty) +{ + int s, sn = sp + 4; + msch_timeslot_profiler_event_data_t *p = + (msch_timeslot_profiler_event_data_t *)(data + ntoh16(ptr)); + char *state[] = {"NONE", "CHN_SW", "ONCHAN_FIRE", "OFF_CHN_PREP", + "OFF_CHN_DONE", "TS_COMPLETE"}; + + MSCH_EVENT_HEAD(sp); + MSCH_EVENT(("<%s timeslot>: ", title)); + if (empty) { + MSCH_EVENT((" null\n")); + return; + } + else + MSCH_EVENT(("0x%08x\n", ntoh32(p->p_timeslot))); + + s = (int)(ntoh32(p->state)); + if (s > 5) s = 0; + + MSCH_EVENT_HEAD(sn); + MSCH_EVENT(("id: %d, state[%d]: %s, chan_ctxt: [0x%08x]\n", + ntoh32(p->timeslot_id), ntoh32(p->state), state[s], ntoh32(p->p_chan_ctxt))); + + MSCH_EVENT_HEAD(sn); + MSCH_EVENT(("fire_time: %s", + dhd_mschdbg_display_time(p->fire_time_h, p->fire_time_l))); + + MSCH_EVENT((", pre_start_time: %s", + dhd_mschdbg_display_time(p->pre_start_time_h, p->pre_start_time_l))); + + MSCH_EVENT((", end_time: %s", + dhd_mschdbg_display_time(p->end_time_h, p->end_time_l))); + + MSCH_EVENT((", sch_dur: %s\n", + dhd_mschdbg_display_time(p->sch_dur_h, p->sch_dur_l))); +} + +static void +dhd_mschdbg_req_timing_profiler_event_data(int sp, int ver, char *title, char *data, + uint16 ptr, bool empty) +{ + int sn = sp + 4; + msch_req_timing_profiler_event_data_t *p = + (msch_req_timing_profiler_event_data_t *)(data + ntoh16(ptr)); + uint32 type; + + MSCH_EVENT_HEAD(sp); + MSCH_EVENT(("<%s req_timing>: ", title)); + if (empty) { + MSCH_EVENT((" null\n")); + return; + } + else + MSCH_EVENT(("0x%08x (prev 0x%08x, next 0x%08x)\n", + ntoh32(p->p_req_timing), ntoh32(p->p_prev), ntoh32(p->p_next))); + + MSCH_EVENT_HEAD(sn); + MSCH_EVENT(("flags:")); + type = ntoh16(p->flags); + if ((type & 0x7f) == 0) + MSCH_EVENT((" NONE")); + else { + if (type & WL_MSCH_RC_FLAGS_ONCHAN_FIRE) + MSCH_EVENT((" ONCHAN_FIRE")); + if (type & WL_MSCH_RC_FLAGS_START_FIRE_DONE) + MSCH_EVENT((" START_FIRE")); + if (type & WL_MSCH_RC_FLAGS_END_FIRE_DONE) + MSCH_EVENT((" END_FIRE")); + if (type & WL_MSCH_RC_FLAGS_ONFIRE_DONE) + MSCH_EVENT((" ONFIRE_DONE")); + if (type & WL_MSCH_RC_FLAGS_SPLIT_SLOT_START) + MSCH_EVENT((" SPLIT_SLOT_START")); + if (type & WL_MSCH_RC_FLAGS_SPLIT_SLOT_END) + MSCH_EVENT((" SPLIT_SLOT_END")); + if (type & WL_MSCH_RC_FLAGS_PRE_ONFIRE_DONE) + MSCH_EVENT((" PRE_ONFIRE_DONE")); + } + MSCH_EVENT(("\n")); + + MSCH_EVENT_HEAD(sn); + MSCH_EVENT(("pre_start_time: %s", + dhd_mschdbg_display_time(p->pre_start_time_h, p->pre_start_time_l))); + + MSCH_EVENT((", start_time: %s", + dhd_mschdbg_display_time(p->start_time_h, p->start_time_l))); + + MSCH_EVENT((", end_time: %s\n", + dhd_mschdbg_display_time(p->end_time_h, p->end_time_l))); + + if (p->p_timeslot && (p->timeslot_ptr == 0)) { + MSCH_EVENT_HEAD(sn); + MSCH_EVENT(("<%s timeslot>: 0x%08x\n", title, ntoh32(p->p_timeslot))); + } else + dhd_mschdbg_timeslot_profiler_event_data(sn, ver, title, data, p->timeslot_ptr, + (p->timeslot_ptr == 0)); +} + +static void +dhd_mschdbg_chan_ctxt_profiler_event_data(int sp, int ver, char *data, uint16 ptr, bool empty) +{ + int sn = sp + 4; + msch_chan_ctxt_profiler_event_data_t *p = + (msch_chan_ctxt_profiler_event_data_t *)(data + ntoh16(ptr)); + chanspec_t c; + char buf[CHANSPEC_STR_LEN]; + + MSCH_EVENT_HEAD(sp); + MSCH_EVENT((": ")); + if (empty) { + MSCH_EVENT((" null\n")); + return; + } + else + MSCH_EVENT(("0x%08x (prev 0x%08x, next 0x%08x)\n", + ntoh32(p->p_chan_ctxt), ntoh32(p->p_prev), ntoh32(p->p_next))); + + c = (chanspec_t)ntoh16(p->chanspec); + + MSCH_EVENT_HEAD(sn); + MSCH_EVENT(("channel: %s, bf_sch_pending: %s, bf_skipped: %d\n", + wf_chspec_ntoa(c, buf), p->bf_sch_pending? "TRUE" : "FALSE", + ntoh32(p->bf_skipped_count))); + + MSCH_EVENT_HEAD(sn); + MSCH_EVENT(("bf_link: prev 0x%08x, next 0x%08x\n", + ntoh32(p->bf_link_prev), ntoh32(p->bf_link_next))); + + MSCH_EVENT_HEAD(sn); + MSCH_EVENT(("onchan_time: %s", + dhd_mschdbg_display_time(p->onchan_time_h, p->onchan_time_l))); + MSCH_EVENT((", actual_onchan_dur: %s", + dhd_mschdbg_display_time(p->actual_onchan_dur_h, p->actual_onchan_dur_l))); + MSCH_EVENT((", pend_onchan_dur: %s\n", + dhd_mschdbg_display_time(p->pend_onchan_dur_h, p->pend_onchan_dur_l))); + + dhd_mschdbg_elem_list(sn, "req_entity", data, p->req_entity_list_ptr, + p->req_entity_list_cnt); + dhd_mschdbg_elem_list(sn, "bf_entity", data, p->bf_entity_list_ptr, + p->bf_entity_list_cnt); +} + +static void +dhd_mschdbg_req_entity_profiler_event_data(int sp, int ver, char *data, uint16 ptr, bool empty) +{ + int sn = sp + 4; + msch_req_entity_profiler_event_data_t *p = + (msch_req_entity_profiler_event_data_t *)(data + ntoh16(ptr)); + char buf[CHANSPEC_STR_LEN]; + chanspec_t c; + uint32 flags; + + MSCH_EVENT_HEAD(sp); + MSCH_EVENT((": ")); + if (empty) { + MSCH_EVENT((" null\n")); + return; + } + else + MSCH_EVENT(("0x%08x (prev 0x%08x, next 0x%08x)\n", + ntoh32(p->p_req_entity), ntoh32(p->req_hdl_link_prev), + ntoh32(p->req_hdl_link_next))); + + MSCH_EVENT_HEAD(sn); + MSCH_EVENT(("req_hdl: [0x%08x]\n", ntoh32(p->p_req_hdl))); + + MSCH_EVENT_HEAD(sn); + MSCH_EVENT(("chan_ctxt_link: prev 0x%08x, next 0x%08x\n", + ntoh32(p->chan_ctxt_link_prev), ntoh32(p->chan_ctxt_link_next))); + MSCH_EVENT_HEAD(sn); + MSCH_EVENT(("rt_specific_link: prev 0x%08x, next 0x%08x\n", + ntoh32(p->rt_specific_link_prev), ntoh32(p->rt_specific_link_next))); + MSCH_EVENT_HEAD(sn); + MSCH_EVENT(("start_fixed_link: prev 0x%08x, next 0x%08x\n", + ntoh32(p->start_fixed_link_prev), ntoh32(p->start_fixed_link_next))); + MSCH_EVENT_HEAD(sn); + MSCH_EVENT(("both_flex_list: prev 0x%08x, next 0x%08x\n", + ntoh32(p->both_flex_list_prev), ntoh32(p->both_flex_list_next))); + + c = (chanspec_t)ntoh16(p->chanspec); + MSCH_EVENT_HEAD(sn); + if (ver >= 2) { + MSCH_EVENT(("channel: %s, onchan Id %d, current chan Id %d, priority %d", + wf_chspec_ntoa(c, buf), ntoh16(p->onchan_chn_idx), ntoh16(p->cur_chn_idx), + ntoh16(p->priority))); + flags = ntoh32(p->flags); + if (flags & WL_MSCH_ENTITY_FLAG_MULTI_INSTANCE) + MSCH_EVENT((" : MULTI_INSTANCE\n")); + else + MSCH_EVENT(("\n")); + MSCH_EVENT_HEAD(sn); + MSCH_EVENT(("actual_start_time: %s, ", + dhd_mschdbg_display_time(p->actual_start_time_h, p->actual_start_time_l))); + MSCH_EVENT(("curts_fire_time: %s, ", + dhd_mschdbg_display_time(p->curts_fire_time_h, p->curts_fire_time_l))); + } else { + MSCH_EVENT(("channel: %s, priority %d, ", wf_chspec_ntoa(c, buf), + ntoh16(p->priority))); + } + MSCH_EVENT(("bf_last_serv_time: %s\n", + dhd_mschdbg_display_time(p->bf_last_serv_time_h, p->bf_last_serv_time_l))); + + dhd_mschdbg_req_timing_profiler_event_data(sn, ver, "current", data, p->cur_slot_ptr, + (p->cur_slot_ptr == 0)); + dhd_mschdbg_req_timing_profiler_event_data(sn, ver, "pending", data, p->pend_slot_ptr, + (p->pend_slot_ptr == 0)); + + if (p->p_chan_ctxt && (p->chan_ctxt_ptr == 0)) { + MSCH_EVENT_HEAD(sn); + MSCH_EVENT((": 0x%08x\n", ntoh32(p->p_chan_ctxt))); + } + else + dhd_mschdbg_chan_ctxt_profiler_event_data(sn, ver, data, p->chan_ctxt_ptr, + (p->chan_ctxt_ptr == 0)); +} + +static void +dhd_mschdbg_req_handle_profiler_event_data(int sp, int ver, char *data, uint16 ptr, bool empty) +{ + int sn = sp + 4; + msch_req_handle_profiler_event_data_t *p = + (msch_req_handle_profiler_event_data_t *)(data + ntoh16(ptr)); + uint32 flags; + + MSCH_EVENT_HEAD(sp); + MSCH_EVENT((": ")); + if (empty) { + MSCH_EVENT((" null\n")); + return; + } + else + MSCH_EVENT(("0x%08x (prev 0x%08x, next 0x%08x)\n", + ntoh32(p->p_req_handle), ntoh32(p->p_prev), ntoh32(p->p_next))); + + dhd_mschdbg_elem_list(sn, "req_entity", data, p->req_entity_list_ptr, + p->req_entity_list_cnt); + MSCH_EVENT_HEAD(sn); + MSCH_EVENT(("cb_func: [0x%08x], cb_func: [0x%08x]", + ntoh32(p->cb_func), ntoh32(p->cb_ctxt))); + if (ver < 2) { + MSCH_EVENT((", chan_cnt: %d", ntoh16(p->chan_cnt))); + } + flags = ntoh32(p->flags); + if (flags & WL_MSCH_REQ_HDL_FLAGS_NEW_REQ) + MSCH_EVENT((", NEW_REQ")); + MSCH_EVENT(("\n")); + + dhd_mschdbg_req_param_profiler_event_data(sn, ver, data, p->req_param_ptr); + + if (ver >= 2) { + MSCH_EVENT_HEAD(sn); + MSCH_EVENT(("req_time: %s\n", + dhd_mschdbg_display_time(p->req_time_h, p->req_time_l))); + MSCH_EVENT_HEAD(sn); + MSCH_EVENT(("chan_cnt: %d, chan idx %d, last chan idx %d\n", + ntoh16(p->chan_cnt), ntoh16(p->chan_idx), ntoh16(p->last_chan_idx))); + if (p->chanspec_list && p->chanspec_cnt) { + dhd_mschdbg_chanspec_list(sn, data, p->chanspec_list, p->chanspec_cnt); + } + } +} + +static void +dhd_mschdbg_profiler_profiler_event_data(int sp, int ver, char *data, uint16 ptr) +{ + msch_profiler_profiler_event_data_t *p = + (msch_profiler_profiler_event_data_t *)(data + ntoh16(ptr)); + uint32 flags; + + MSCH_EVENT_HEAD(sp); + MSCH_EVENT(("free list: req_hdl 0x%08x, req_entity 0x%08x," + " chan_ctxt 0x%08x, chanspec 0x%08x\n", + ntoh32(p->free_req_hdl_list), ntoh32(p->free_req_entity_list), + ntoh32(p->free_chan_ctxt_list), ntoh32(p->free_chanspec_list))); + + MSCH_EVENT_HEAD(sp); + MSCH_EVENT(("alloc count: chanspec %d, req_entity %d, req_hdl %d, " + "chan_ctxt %d, timeslot %d\n", + ntoh16(p->msch_chanspec_alloc_cnt), ntoh16(p->msch_req_entity_alloc_cnt), + ntoh16(p->msch_req_hdl_alloc_cnt), ntoh16(p->msch_chan_ctxt_alloc_cnt), + ntoh16(p->msch_timeslot_alloc_cnt))); + + dhd_mschdbg_elem_list(sp, "req_hdl", data, p->msch_req_hdl_list_ptr, + p->msch_req_hdl_list_cnt); + dhd_mschdbg_elem_list(sp, "chan_ctxt", data, p->msch_chan_ctxt_list_ptr, + p->msch_chan_ctxt_list_cnt); + dhd_mschdbg_elem_list(sp, "req_timing", data, p->msch_req_timing_list_ptr, + p->msch_req_timing_list_cnt); + dhd_mschdbg_elem_list(sp, "start_fixed", data, p->msch_start_fixed_list_ptr, + p->msch_start_fixed_list_cnt); + dhd_mschdbg_elem_list(sp, "both_flex_req_entity", data, + p->msch_both_flex_req_entity_list_ptr, + p->msch_both_flex_req_entity_list_cnt); + dhd_mschdbg_elem_list(sp, "start_flex", data, p->msch_start_flex_list_ptr, + p->msch_start_flex_list_cnt); + dhd_mschdbg_elem_list(sp, "both_flex", data, p->msch_both_flex_list_ptr, + p->msch_both_flex_list_cnt); + + if (p->p_cur_msch_timeslot && (p->cur_msch_timeslot_ptr == 0)) { + MSCH_EVENT_HEAD(sp); + MSCH_EVENT((": 0x%08x\n", + ntoh32(p->p_cur_msch_timeslot))); + } else + dhd_mschdbg_timeslot_profiler_event_data(sp, ver, "cur_msch", data, + p->cur_msch_timeslot_ptr, (p->cur_msch_timeslot_ptr == 0)); + + if (p->p_next_timeslot && (p->next_timeslot_ptr == 0)) { + MSCH_EVENT_HEAD(sp); + MSCH_EVENT((": 0x%08x\n", + ntoh32(p->p_next_timeslot))); + } else + dhd_mschdbg_timeslot_profiler_event_data(sp, ver, "next", data, + p->next_timeslot_ptr, (p->next_timeslot_ptr == 0)); + + MSCH_EVENT_HEAD(sp); + MSCH_EVENT(("ts_id: %d, ", ntoh32(p->ts_id))); + flags = ntoh32(p->flags); + if (flags & WL_MSCH_STATE_IN_TIEMR_CTXT) + MSCH_EVENT(("IN_TIEMR_CTXT, ")); + if (flags & WL_MSCH_STATE_SCHD_PENDING) + MSCH_EVENT(("SCHD_PENDING, ")); + MSCH_EVENT(("slotskip_flags: %d, cur_armed_timeslot: 0x%08x\n", + (ver >= 2)? ntoh32(p->slotskip_flag) : 0, ntoh32(p->cur_armed_timeslot))); + MSCH_EVENT_HEAD(sp); + MSCH_EVENT(("flex_list_cnt: %d, service_interval: %d, " + "max_lo_prio_interval: %d\n", + ntoh16(p->flex_list_cnt), ntoh32(p->service_interval), + ntoh32(p->max_lo_prio_interval))); +} + +static void dhd_mschdbg_dump_data(dhd_pub_t *dhdp, void *raw_event_ptr, int type, + char *data, int len) +{ + uint64 t = 0, tt = 0; + uint32 s = 0, ss = 0; + int wlc_index, ver; + + ver = (type & WL_MSCH_PROFILER_VER_MASK) >> WL_MSCH_PROFILER_VER_SHIFT; + wlc_index = (type & WL_MSCH_PROFILER_WLINDEX_MASK) >> WL_MSCH_PROFILER_WLINDEX_SHIFT; + if (wlc_index >= 4) + return; + + type &= WL_MSCH_PROFILER_TYPE_MASK; + if (type <= WL_MSCH_PROFILER_PROFILE_END) { + msch_profiler_event_data_t *pevent = (msch_profiler_event_data_t *)data; + tt = ((uint64)(ntoh32(pevent->time_hi)) << 32) | ntoh32(pevent->time_lo); + dhd_mschdbg_us_to_sec(pevent->time_hi, pevent->time_lo, &s, &ss); + } + + if (lastMessages && (type != WL_MSCH_PROFILER_MESSAGE) && + (type != WL_MSCH_PROFILER_EVENT_LOG)) { + MSCH_EVENT_HEAD(0); + MSCH_EVENT(("\n")); + lastMessages = FALSE; + } + + switch (type) { + case WL_MSCH_PROFILER_START: + MSCH_EVENT_HEAD(0); + MSCH_EVENT(("%06d.%06d START\n", s, ss)); + break; + + case WL_MSCH_PROFILER_EXIT: + MSCH_EVENT_HEAD(0); + MSCH_EVENT(("%06d.%06d EXIT\n", s, ss)); + break; + + case WL_MSCH_PROFILER_REQ: + { + msch_req_profiler_event_data_t *p = (msch_req_profiler_event_data_t *)data; + MSCH_EVENT_HEAD(0); + MSCH_EVENT(("\n")); + MSCH_EVENT_HEAD(0); + MSCH_EVENT(("===============================\n")); + MSCH_EVENT_HEAD(0); + MSCH_EVENT(("%06d.%06d [wl%d] REGISTER:\n", s, ss, wlc_index)); + dhd_mschdbg_req_param_profiler_event_data(4, ver, data, p->req_param_ptr); + dhd_mschdbg_chanspec_list(4, data, p->chanspec_ptr, p->chanspec_cnt); + MSCH_EVENT_HEAD(0); + MSCH_EVENT(("===============================\n")); + MSCH_EVENT_HEAD(0); + MSCH_EVENT(("\n")); + } + break; + + case WL_MSCH_PROFILER_CALLBACK: + { + msch_callback_profiler_event_data_t *p = + (msch_callback_profiler_event_data_t *)data; + char buf[CHANSPEC_STR_LEN]; + chanspec_t chanspec; + uint16 cbtype; + + MSCH_EVENT_HEAD(0); + MSCH_EVENT(("%06d.%06d [wl%d] CALLBACK: ", s, ss, wlc_index)); + chanspec = (chanspec_t)ntoh16(p->chanspec); + MSCH_EVENT(("req_hdl[0x%08x], channel %s --", + ntoh32(p->p_req_hdl), wf_chspec_ntoa(chanspec, buf))); + + cbtype = ntoh16(p->type); + if (cbtype & WL_MSCH_CT_ON_CHAN) + MSCH_EVENT((" ON_CHAN")); + if (cbtype & WL_MSCH_CT_OFF_CHAN) + MSCH_EVENT((" OFF_CHAN")); + if (cbtype & WL_MSCH_CT_REQ_START) + MSCH_EVENT((" REQ_START")); + if (cbtype & WL_MSCH_CT_REQ_END) + MSCH_EVENT((" REQ_END")); + if (cbtype & WL_MSCH_CT_SLOT_START) + MSCH_EVENT((" SLOT_START")); + if (cbtype & WL_MSCH_CT_SLOT_SKIP) + MSCH_EVENT((" SLOT_SKIP")); + if (cbtype & WL_MSCH_CT_SLOT_END) + MSCH_EVENT((" SLOT_END")); + if (cbtype & WL_MSCH_CT_OFF_CHAN_DONE) + MSCH_EVENT((" OFF_CHAN_DONE")); + if (cbtype & WL_MSCH_CT_PARTIAL) + MSCH_EVENT((" PARTIAL")); + if (cbtype & WL_MSCH_CT_PRE_ONCHAN) + MSCH_EVENT((" PRE_ONCHAN")); + if (cbtype & WL_MSCH_CT_PRE_REQ_START) + MSCH_EVENT((" PRE_REQ_START")); + + if (cbtype & WL_MSCH_CT_REQ_START) { + req_start[wlc_index] = 1; + req_start_time[wlc_index] = tt; + } else if (cbtype & WL_MSCH_CT_REQ_END) { + if (req_start[wlc_index]) { + MSCH_EVENT((" : REQ duration %d", + (uint32)(tt - req_start_time[wlc_index]))); + req_start[wlc_index] = 0; + } + } + + if (cbtype & WL_MSCH_CT_SLOT_START) { + solt_chanspec[wlc_index] = p->chanspec; + solt_start_time[wlc_index] = tt; + } else if (cbtype & WL_MSCH_CT_SLOT_END) { + if (p->chanspec == solt_chanspec[wlc_index]) { + MSCH_EVENT((" : SLOT duration %d", + (uint32)(tt - solt_start_time[wlc_index]))); + solt_chanspec[wlc_index] = 0; + } + } + MSCH_EVENT(("\n")); + + if (cbtype & (WL_MSCH_CT_ON_CHAN | WL_MSCH_CT_SLOT_SKIP)) { + MSCH_EVENT_HEAD(4); + if (cbtype & WL_MSCH_CT_ON_CHAN) { + MSCH_EVENT(("ID %d onchan idx %d cur_chan_seq_start %s ", + ntoh32(p->timeslot_id), ntoh32(p->onchan_idx), + dhd_mschdbg_display_time(p->cur_chan_seq_start_time_h, + p->cur_chan_seq_start_time_l))); + } + t = ((uint64)(ntoh32(p->start_time_h)) << 32) | + ntoh32(p->start_time_l); + MSCH_EVENT(("start %s ", + dhd_mschdbg_display_time(p->start_time_h, + p->start_time_l))); + tt = ((uint64)(ntoh32(p->end_time_h)) << 32) | ntoh32(p->end_time_l); + MSCH_EVENT(("end %s duration %d\n", + dhd_mschdbg_display_time(p->end_time_h, p->end_time_l), + (p->end_time_h == 0xffffffff && p->end_time_l == 0xffffffff)? + -1 : (int)(tt - t))); + } + + } + break; + + case WL_MSCH_PROFILER_EVENT_LOG: + { + while (len >= (int)WL_MSCH_EVENT_LOG_HEAD_SIZE) { + msch_event_log_profiler_event_data_t *p = + (msch_event_log_profiler_event_data_t *)data; + /* TODO: How to parse MSCH if extended event tag is present ??? */ + prcd_event_log_hdr_t hdr; + int size = WL_MSCH_EVENT_LOG_HEAD_SIZE + p->hdr.count * sizeof(uint32); + if (len < size || size > sizeof(msch_event_log_profiler_event_data_t)) { + break; + } + data += size; + len -= size; + dhd_mschdbg_us_to_sec(p->time_hi, p->time_lo, &s, &ss); + MSCH_EVENT_HEAD(0); + MSCH_EVENT(("%06d.%06d [wl%d]: ", s, ss, p->hdr.tag)); + hdr.tag = EVENT_LOG_TAG_MSCHPROFILE; + hdr.count = p->hdr.count + 1; + /* exclude LSB 2 bits which indicate binary/non-binary data */ + hdr.fmt_num = ntoh16(p->hdr.fmt_num) >> 2; + dhd_dbg_verboselog_printf(dhdp, &hdr, raw_event_ptr, p->data, 0, 0); + } + lastMessages = TRUE; + break; + } + + case WL_MSCH_PROFILER_MESSAGE: + { + msch_message_profiler_event_data_t *p = (msch_message_profiler_event_data_t *)data; + MSCH_EVENT_HEAD(0); + MSCH_EVENT(("%06d.%06d [wl%d]: %s", s, ss, wlc_index, p->message)); + lastMessages = TRUE; + break; + } + + case WL_MSCH_PROFILER_PROFILE_START: + profiler_start_time[wlc_index] = tt; + MSCH_EVENT_HEAD(0); + MSCH_EVENT(("-------------------------------\n")); + MSCH_EVENT_HEAD(0); + MSCH_EVENT(("%06d.%06d [wl%d] PROFILE DATA:\n", s, ss, wlc_index)); + dhd_mschdbg_profiler_profiler_event_data(4, ver, data, 0); + break; + + case WL_MSCH_PROFILER_PROFILE_END: + MSCH_EVENT_HEAD(0); + MSCH_EVENT(("%06d.%06d [wl%d] PROFILE END: take time %d\n", s, ss, + wlc_index, (uint32)(tt - profiler_start_time[wlc_index]))); + MSCH_EVENT_HEAD(0); + MSCH_EVENT(("-------------------------------\n")); + MSCH_EVENT_HEAD(0); + MSCH_EVENT(("\n")); + break; + + case WL_MSCH_PROFILER_REQ_HANDLE: + dhd_mschdbg_req_handle_profiler_event_data(4, ver, data, 0, FALSE); + break; + + case WL_MSCH_PROFILER_REQ_ENTITY: + dhd_mschdbg_req_entity_profiler_event_data(4, ver, data, 0, FALSE); + break; + + case WL_MSCH_PROFILER_CHAN_CTXT: + dhd_mschdbg_chan_ctxt_profiler_event_data(4, ver, data, 0, FALSE); + break; + + case WL_MSCH_PROFILER_REQ_TIMING: + dhd_mschdbg_req_timing_profiler_event_data(4, ver, "msch", data, 0, FALSE); + break; + + default: + MSCH_EVENT_HEAD(0); + MSCH_EVENT(("[wl%d] ERROR: unsupported EVENT reason code:%d; ", + wlc_index, type)); + break; + } +} + +void +wl_mschdbg_event_handler(dhd_pub_t *dhdp, void *raw_event_ptr, int type, void *data, int len) +{ + head_log = "MSCH"; + dhd_mschdbg_dump_data(dhdp, raw_event_ptr, type, (char *)data, len); +} + +void +wl_mschdbg_verboselog_handler(dhd_pub_t *dhdp, void *raw_event_ptr, prcd_event_log_hdr_t *plog_hdr, + uint32 *log_ptr) +{ + uint32 log_pyld_len; + head_log = "CONSOLE"; + + if (plog_hdr->count == 0) { + return; + } + log_pyld_len = (plog_hdr->count - 1) * DATA_UNIT_FOR_LOG_CNT; + + if (plog_hdr->tag == EVENT_LOG_TAG_MSCHPROFILE) { + msch_event_log_profiler_event_data_t *p = + (msch_event_log_profiler_event_data_t *)log_ptr; + /* TODO: How to parse MSCH if extended event tag is present ??? */ + prcd_event_log_hdr_t hdr; + uint32 s, ss; + + if (log_pyld_len < OFFSETOF(msch_event_log_profiler_event_data_t, data) || + log_pyld_len > sizeof(msch_event_log_profiler_event_data_t)) { + return; + } + + dhd_mschdbg_us_to_sec(p->time_hi, p->time_lo, &s, &ss); + MSCH_EVENT_HEAD(0); + MSCH_EVENT(("%06d.%06d [wl%d]: ", s, ss, p->hdr.tag)); + hdr.tag = EVENT_LOG_TAG_MSCHPROFILE; + hdr.count = p->hdr.count + 1; + /* exclude LSB 2 bits which indicate binary/non-binary data */ + hdr.fmt_num = ntoh16(p->hdr.fmt_num) >> 2; + dhd_dbg_verboselog_printf(dhdp, &hdr, raw_event_ptr, p->data, 0, 0); + } else { + msch_collect_tlv_t *p = (msch_collect_tlv_t *)log_ptr; + int type = ntoh16(p->type); + int len = ntoh16(p->size); + + if (log_pyld_len < OFFSETOF(msch_collect_tlv_t, value) + len) { + return; + } + + dhd_mschdbg_dump_data(dhdp, raw_event_ptr, type, p->value, len); + } +} +#endif /* SHOW_LOGTRACE */ diff --git a/bcmdhd.100.10.315.x/dhd_mschdbg.h b/bcmdhd.100.10.315.x/dhd_mschdbg.h new file mode 100644 index 0000000..1e711da --- /dev/null +++ b/bcmdhd.100.10.315.x/dhd_mschdbg.h @@ -0,0 +1,39 @@ +/* + * DHD debugability header file + * + * <> + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: dhd_mschdbg.h 571265 2015-07-14 20:50:18Z $ + */ + +#ifndef _dhd_mschdbg_h_ +#define _dhd_mschdbg_h_ + +#ifdef SHOW_LOGTRACE +extern void wl_mschdbg_event_handler(dhd_pub_t *dhdp, void *raw_event_ptr, int type, + void *data, int len); +extern void wl_mschdbg_verboselog_handler(dhd_pub_t *dhdp, void *raw_event_ptr, + prcd_event_log_hdr_t *plog_hdr, uint32 *log_ptr); +#endif /* SHOW_LOGTRACE */ + +#endif /* _dhd_mschdbg_h_ */ diff --git a/bcmdhd.100.10.315.x/dhd_msgbuf.c b/bcmdhd.100.10.315.x/dhd_msgbuf.c new file mode 100644 index 0000000..408cf89 --- /dev/null +++ b/bcmdhd.100.10.315.x/dhd_msgbuf.c @@ -0,0 +1,9934 @@ +/** + * @file definition of host message ring functionality + * Provides type definitions and function prototypes used to link the + * DHD OS, bus, and protocol modules. + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_msgbuf.c 769906 2018-06-28 11:52:31Z $ + */ + +#include +#include + +#include +#include +#include + +#include +#include +#include + +#include + +#include +#include +#include + +#include + +#include +#include +#include +#include + +#if defined(DHD_LB) +#include +#include +#define DHD_LB_WORKQ_SZ (8192) +#define DHD_LB_WORKQ_SYNC (16) +#define DHD_LB_WORK_SCHED (DHD_LB_WORKQ_SYNC * 2) +#endif /* DHD_LB */ + +#include +#include +#include +#include +#include + +extern char dhd_version[]; +extern char fw_version[]; + +/** + * Host configures a soft doorbell for d2h rings, by specifying a 32bit host + * address where a value must be written. Host may also interrupt coalescing + * on this soft doorbell. + * Use Case: Hosts with network processors, may register with the dongle the + * network processor's thread wakeup register and a value corresponding to the + * core/thread context. Dongle will issue a write transaction + * to the PCIE RC which will need to be routed to the mapped register space, by + * the host. + */ +/* #define DHD_D2H_SOFT_DOORBELL_SUPPORT */ + +/* Dependency Check */ +#if defined(IOCTLRESP_USE_CONSTMEM) && defined(DHD_USE_STATIC_CTRLBUF) +#error "DHD_USE_STATIC_CTRLBUF is NOT working with DHD_USE_OSLPKT_FOR_RESPBUF" +#endif /* IOCTLRESP_USE_CONSTMEM && DHD_USE_STATIC_CTRLBUF */ + +#define RETRIES 2 /* # of retries to retrieve matching ioctl response */ + +#define DEFAULT_RX_BUFFERS_TO_POST 256 +#define RXBUFPOST_THRESHOLD 32 +#define RX_BUF_BURST 32 /* Rx buffers for MSDU Data */ + +#define DHD_STOP_QUEUE_THRESHOLD 200 +#define DHD_START_QUEUE_THRESHOLD 100 + +#define RX_DMA_OFFSET 8 /* Mem2mem DMA inserts an extra 8 */ +#define IOCT_RETBUF_SIZE (RX_DMA_OFFSET + WLC_IOCTL_MAXLEN) + +/* flags for ioctl pending status */ +#define MSGBUF_IOCTL_ACK_PENDING (1<<0) +#define MSGBUF_IOCTL_RESP_PENDING (1<<1) + +#define DHD_IOCTL_REQ_PKTBUFSZ 2048 +#define MSGBUF_IOCTL_MAX_RQSTLEN (DHD_IOCTL_REQ_PKTBUFSZ - H2DRING_CTRL_SUB_ITEMSIZE) + +#define DMA_ALIGN_LEN 4 + +#define DMA_D2H_SCRATCH_BUF_LEN 8 +#define DMA_XFER_LEN_LIMIT 0x400000 + +#ifdef BCM_HOST_BUF +#ifndef DMA_HOST_BUFFER_LEN +#define DMA_HOST_BUFFER_LEN 0x200000 +#endif // endif +#endif /* BCM_HOST_BUF */ + +#define DHD_FLOWRING_IOCTL_BUFPOST_PKTSZ 8192 + +#define DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D 1 +#define DHD_FLOWRING_MAX_EVENTBUF_POST 32 +#define DHD_FLOWRING_MAX_IOCTLRESPBUF_POST 8 +#define DHD_H2D_INFORING_MAX_BUF_POST 32 +#define DHD_MAX_TSBUF_POST 8 + +#define DHD_PROT_FUNCS 43 + +/* Length of buffer in host for bus throughput measurement */ +#define DHD_BUS_TPUT_BUF_LEN 2048 + +#define TXP_FLUSH_NITEMS + +/* optimization to write "n" tx items at a time to ring */ +#define TXP_FLUSH_MAX_ITEMS_FLUSH_CNT 48 + +#define RING_NAME_MAX_LENGTH 24 +#define CTRLSUB_HOSTTS_MEESAGE_SIZE 1024 +/* Giving room before ioctl_trans_id rollsover. */ +#define BUFFER_BEFORE_ROLLOVER 300 + +/* 512K memory + 32K registers */ +#define SNAPSHOT_UPLOAD_BUF_SIZE ((512 + 32) * 1024) + +struct msgbuf_ring; /* ring context for common and flow rings */ + +/** + * PCIE D2H DMA Complete Sync Modes + * + * Firmware may interrupt the host, prior to the D2H Mem2Mem DMA completes into + * Host system memory. A WAR using one of 3 approaches is needed: + * 1. Dongle places a modulo-253 seqnum in last word of each D2H message + * 2. XOR Checksum, with epoch# in each work item. Dongle builds an XOR checksum + * writes in the last word of each work item. Each work item has a seqnum + * number = sequence num % 253. + * + * 3. Read Barrier: Dongle does a host memory read access prior to posting an + * interrupt, ensuring that D2H data transfer indeed completed. + * 4. Dongle DMA's all indices after producing items in the D2H ring, flushing + * ring contents before the indices. + * + * Host does not sync for DMA to complete with option #3 or #4, and a noop sync + * callback (see dhd_prot_d2h_sync_none) may be bound. + * + * Dongle advertizes host side sync mechanism requirements. + */ + +#define PCIE_D2H_SYNC_WAIT_TRIES (512UL) +#define PCIE_D2H_SYNC_NUM_OF_STEPS (5UL) +#define PCIE_D2H_SYNC_DELAY (100UL) /* in terms of usecs */ + +/** + * Custom callback attached based upon D2H DMA Sync mode advertized by dongle. + * + * On success: return cmn_msg_hdr_t::msg_type + * On failure: return 0 (invalid msg_type) + */ +typedef uint8 (* d2h_sync_cb_t)(dhd_pub_t *dhd, struct msgbuf_ring *ring, + volatile cmn_msg_hdr_t *msg, int msglen); + +/* + * +---------------------------------------------------------------------------- + * + * RingIds and FlowId are not equivalent as ringids include D2H rings whereas + * flowids do not. + * + * Dongle advertizes the max H2D rings, as max_sub_queues = 'N' which includes + * the H2D common rings as well as the (N-BCMPCIE_H2D_COMMON_MSGRINGS) flowrings + * + * Here is a sample mapping for (based on PCIE Full Dongle Rev5) where, + * BCMPCIE_H2D_COMMON_MSGRINGS = 2, i.e. 2 H2D common rings, + * BCMPCIE_COMMON_MSGRINGS = 5, i.e. include 3 D2H common rings. + * + * H2D Control Submit RingId = 0 FlowId = 0 reserved never allocated + * H2D RxPost Submit RingId = 1 FlowId = 1 reserved never allocated + * + * D2H Control Complete RingId = 2 + * D2H Transmit Complete RingId = 3 + * D2H Receive Complete RingId = 4 + * + * H2D TxPost FLOWRING RingId = 5 FlowId = 2 (1st flowring) + * H2D TxPost FLOWRING RingId = 6 FlowId = 3 (2nd flowring) + * H2D TxPost FLOWRING RingId = 5 + (N-1) FlowId = (N-1) (Nth flowring) + * + * When TxPost FlowId(s) are allocated, the FlowIds [0..FLOWID_RESERVED) are + * unused, where FLOWID_RESERVED is BCMPCIE_H2D_COMMON_MSGRINGS. + * + * Example: when a system supports 4 bc/mc and 128 uc flowrings, with + * BCMPCIE_H2D_COMMON_MSGRINGS = 2, and BCMPCIE_H2D_COMMON_MSGRINGS = 5, and the + * FlowId values would be in the range [2..133] and the corresponding + * RingId values would be in the range [5..136]. + * + * The flowId allocator, may chose to, allocate Flowids: + * bc/mc (per virtual interface) in one consecutive range [2..(2+VIFS)) + * X# of uc flowids in consecutive ranges (per station Id), where X is the + * packet's access category (e.g. 4 uc flowids per station). + * + * CAUTION: + * When DMA indices array feature is used, RingId=5, corresponding to the 0th + * FLOWRING, will actually use the FlowId as index into the H2D DMA index, + * since the FlowId truly represents the index in the H2D DMA indices array. + * + * Likewise, in the D2H direction, the RingId - BCMPCIE_H2D_COMMON_MSGRINGS, + * will represent the index in the D2H DMA indices array. + * + * +---------------------------------------------------------------------------- + */ + +/* First TxPost Flowring Id */ +#define DHD_FLOWRING_START_FLOWID BCMPCIE_H2D_COMMON_MSGRINGS + +/* Determine whether a ringid belongs to a TxPost flowring */ +#define DHD_IS_FLOWRING(ringid, max_flow_rings) \ + ((ringid) >= BCMPCIE_COMMON_MSGRINGS && \ + (ringid) < ((max_flow_rings) + BCMPCIE_COMMON_MSGRINGS)) + +/* Convert a H2D TxPost FlowId to a MsgBuf RingId */ +#define DHD_FLOWID_TO_RINGID(flowid) \ + (BCMPCIE_COMMON_MSGRINGS + ((flowid) - BCMPCIE_H2D_COMMON_MSGRINGS)) + +/* Convert a MsgBuf RingId to a H2D TxPost FlowId */ +#define DHD_RINGID_TO_FLOWID(ringid) \ + (BCMPCIE_H2D_COMMON_MSGRINGS + ((ringid) - BCMPCIE_COMMON_MSGRINGS)) + +/* Convert a H2D MsgBuf RingId to an offset index into the H2D DMA indices array + * This may be used for the H2D DMA WR index array or H2D DMA RD index array or + * any array of H2D rings. + */ +#define DHD_H2D_RING_OFFSET(ringid) \ + (((ringid) >= BCMPCIE_COMMON_MSGRINGS) ? DHD_RINGID_TO_FLOWID(ringid) : (ringid)) + +/* Convert a H2D MsgBuf Flowring Id to an offset index into the H2D DMA indices array + * This may be used for IFRM. + */ +#define DHD_H2D_FRM_FLOW_RING_OFFSET(ringid) \ + ((ringid) - BCMPCIE_COMMON_MSGRINGS) + +/* Convert a D2H MsgBuf RingId to an offset index into the D2H DMA indices array + * This may be used for the D2H DMA WR index array or D2H DMA RD index array or + * any array of D2H rings. + * d2h debug ring is located at the end, i.e. after all the tx flow rings and h2d debug ring + * max_h2d_rings: total number of h2d rings + */ +#define DHD_D2H_RING_OFFSET(ringid, max_h2d_rings) \ + ((ringid) > (max_h2d_rings) ? \ + ((ringid) - max_h2d_rings) : \ + ((ringid) - BCMPCIE_H2D_COMMON_MSGRINGS)) + +/* Convert a D2H DMA Indices Offset to a RingId */ +#define DHD_D2H_RINGID(offset) \ + ((offset) + BCMPCIE_H2D_COMMON_MSGRINGS) + +#define DHD_DMAH_NULL ((void*)NULL) + +/* + * Pad a DMA-able buffer by an additional cachline. If the end of the DMA-able + * buffer does not occupy the entire cacheline, and another object is placed + * following the DMA-able buffer, data corruption may occur if the DMA-able + * buffer is used to DMAing into (e.g. D2H direction), when HW cache coherency + * is not available. + */ +#if defined(L1_CACHE_BYTES) +#define DHD_DMA_PAD (L1_CACHE_BYTES) +#else +#define DHD_DMA_PAD (128) +#endif // endif + +/* Used in loopback tests */ +typedef struct dhd_dmaxfer { + dhd_dma_buf_t srcmem; + dhd_dma_buf_t dstmem; + uint32 srcdelay; + uint32 destdelay; + uint32 len; + bool in_progress; + uint64 start_usec; + uint32 d11_lpbk; + int status; +} dhd_dmaxfer_t; + +/** + * msgbuf_ring : This object manages the host side ring that includes a DMA-able + * buffer, the WR and RD indices, ring parameters such as max number of items + * an length of each items, and other miscellaneous runtime state. + * A msgbuf_ring may be used to represent a H2D or D2H common ring or a + * H2D TxPost ring as specified in the PCIE FullDongle Spec. + * Ring parameters are conveyed to the dongle, which maintains its own peer end + * ring state. Depending on whether the DMA Indices feature is supported, the + * host will update the WR/RD index in the DMA indices array in host memory or + * directly in dongle memory. + */ +typedef struct msgbuf_ring { + bool inited; + uint16 idx; /* ring id */ + uint16 rd; /* read index */ + uint16 curr_rd; /* read index for debug */ + uint16 wr; /* write index */ + uint16 max_items; /* maximum number of items in ring */ + uint16 item_len; /* length of each item in the ring */ + sh_addr_t base_addr; /* LITTLE ENDIAN formatted: base address */ + dhd_dma_buf_t dma_buf; /* DMA-able buffer: pa, va, len, dmah, secdma */ + uint32 seqnum; /* next expected item's sequence number */ +#ifdef TXP_FLUSH_NITEMS + void *start_addr; + /* # of messages on ring not yet announced to dongle */ + uint16 pend_items_count; +#endif /* TXP_FLUSH_NITEMS */ + + uint8 ring_type; + uint8 n_completion_ids; + bool create_pending; + uint16 create_req_id; + uint8 current_phase; + uint16 compeltion_ring_ids[MAX_COMPLETION_RING_IDS_ASSOCIATED]; + uchar name[RING_NAME_MAX_LENGTH]; + uint32 ring_mem_allocated; + void *ring_lock; +} msgbuf_ring_t; + +#define DHD_RING_BGN_VA(ring) ((ring)->dma_buf.va) +#define DHD_RING_END_VA(ring) \ + ((uint8 *)(DHD_RING_BGN_VA((ring))) + \ + (((ring)->max_items - 1) * (ring)->item_len)) + +/* This can be overwritten by module parameter defined in dhd_linux.c + * or by dhd iovar h2d_max_txpost. + */ +int h2d_max_txpost = H2DRING_TXPOST_MAX_ITEM; + +/** DHD protocol handle. Is an opaque type to other DHD software layers. */ +typedef struct dhd_prot { + osl_t *osh; /* OSL handle */ + uint16 rxbufpost; + uint16 max_rxbufpost; + uint16 max_eventbufpost; + uint16 max_ioctlrespbufpost; + uint16 max_tsbufpost; + uint16 max_infobufpost; + uint16 infobufpost; + uint16 cur_event_bufs_posted; + uint16 cur_ioctlresp_bufs_posted; + uint16 cur_ts_bufs_posted; + + /* Flow control mechanism based on active transmits pending */ + osl_atomic_t active_tx_count; /* increments/decrements on every packet tx/tx_status */ + uint16 h2d_max_txpost; + uint16 txp_threshold; /* optimization to write "n" tx items at a time to ring */ + + /* MsgBuf Ring info: has a dhd_dma_buf that is dynamically allocated */ + msgbuf_ring_t h2dring_ctrl_subn; /* H2D ctrl message submission ring */ + msgbuf_ring_t h2dring_rxp_subn; /* H2D RxBuf post ring */ + msgbuf_ring_t d2hring_ctrl_cpln; /* D2H ctrl completion ring */ + msgbuf_ring_t d2hring_tx_cpln; /* D2H Tx complete message ring */ + msgbuf_ring_t d2hring_rx_cpln; /* D2H Rx complete message ring */ + msgbuf_ring_t *h2dring_info_subn; /* H2D info submission ring */ + msgbuf_ring_t *d2hring_info_cpln; /* D2H info completion ring */ + msgbuf_ring_t *d2hring_edl; /* D2H Enhanced Debug Lane (EDL) ring */ + + msgbuf_ring_t *h2d_flowrings_pool; /* Pool of preallocated flowings */ + dhd_dma_buf_t flowrings_dma_buf; /* Contiguous DMA buffer for flowrings */ + uint16 h2d_rings_total; /* total H2D (common rings + flowrings) */ + + uint32 rx_dataoffset; + + dhd_mb_ring_t mb_ring_fn; /* called when dongle needs to be notified of new msg */ + dhd_mb_ring_2_t mb_2_ring_fn; /* called when dongle needs to be notified of new msg */ + + /* ioctl related resources */ + uint8 ioctl_state; + int16 ioctl_status; /* status returned from dongle */ + uint16 ioctl_resplen; + dhd_ioctl_recieved_status_t ioctl_received; + uint curr_ioctl_cmd; + dhd_dma_buf_t retbuf; /* For holding ioctl response */ + dhd_dma_buf_t ioctbuf; /* For holding ioctl request */ + + dhd_dma_buf_t d2h_dma_scratch_buf; /* For holding d2h scratch */ + + /* DMA-able arrays for holding WR and RD indices */ + uint32 rw_index_sz; /* Size of a RD or WR index in dongle */ + dhd_dma_buf_t h2d_dma_indx_wr_buf; /* Array of H2D WR indices */ + dhd_dma_buf_t h2d_dma_indx_rd_buf; /* Array of H2D RD indices */ + dhd_dma_buf_t d2h_dma_indx_wr_buf; /* Array of D2H WR indices */ + dhd_dma_buf_t d2h_dma_indx_rd_buf; /* Array of D2H RD indices */ + dhd_dma_buf_t h2d_ifrm_indx_wr_buf; /* Array of H2D WR indices for ifrm */ + + dhd_dma_buf_t host_bus_throughput_buf; /* bus throughput measure buffer */ + + dhd_dma_buf_t *flowring_buf; /* pool of flow ring buf */ + uint32 flowring_num; + + d2h_sync_cb_t d2h_sync_cb; /* Sync on D2H DMA done: SEQNUM or XORCSUM */ + ulong d2h_sync_wait_max; /* max number of wait loops to receive one msg */ + ulong d2h_sync_wait_tot; /* total wait loops */ + + dhd_dmaxfer_t dmaxfer; /* for test/DMA loopback */ + + uint16 ioctl_seq_no; + uint16 data_seq_no; + uint16 ioctl_trans_id; + void *pktid_ctrl_map; /* a pktid maps to a packet and its metadata */ + void *pktid_rx_map; /* pktid map for rx path */ + void *pktid_tx_map; /* pktid map for tx path */ + bool metadata_dbg; + void *pktid_map_handle_ioctl; + + /* Applications/utilities can read tx and rx metadata using IOVARs */ + uint16 rx_metadata_offset; + uint16 tx_metadata_offset; + +#if defined(DHD_D2H_SOFT_DOORBELL_SUPPORT) + /* Host's soft doorbell configuration */ + bcmpcie_soft_doorbell_t soft_doorbell[BCMPCIE_D2H_COMMON_MSGRINGS]; +#endif /* DHD_D2H_SOFT_DOORBELL_SUPPORT */ + + /* Work Queues to be used by the producer and the consumer, and threshold + * when the WRITE index must be synced to consumer's workq + */ +#if defined(DHD_LB_TXC) + uint32 tx_compl_prod_sync ____cacheline_aligned; + bcm_workq_t tx_compl_prod, tx_compl_cons; +#endif /* DHD_LB_TXC */ +#if defined(DHD_LB_RXC) + uint32 rx_compl_prod_sync ____cacheline_aligned; + bcm_workq_t rx_compl_prod, rx_compl_cons; +#endif /* DHD_LB_RXC */ + + dhd_dma_buf_t fw_trap_buf; /* firmware trap buffer */ + + uint32 host_ipc_version; /* Host sypported IPC rev */ + uint32 device_ipc_version; /* FW supported IPC rev */ + uint32 active_ipc_version; /* Host advertised IPC rev */ + dhd_dma_buf_t hostts_req_buf; /* For holding host timestamp request buf */ + bool hostts_req_buf_inuse; + bool rx_ts_log_enabled; + bool tx_ts_log_enabled; + bool no_retry; + bool no_aggr; + bool fixed_rate; + dhd_dma_buf_t host_scb_buf; /* scb host offload buffer */ +} dhd_prot_t; + +#ifdef DHD_DUMP_PCIE_RINGS +static +int dhd_ring_write(dhd_pub_t *dhd, msgbuf_ring_t *ring, void *file, unsigned long *file_posn); +#endif /* DHD_DUMP_PCIE_RINGS */ + +extern bool dhd_timesync_delay_post_bufs(dhd_pub_t *dhdp); +extern void dhd_schedule_dmaxfer_free(dhd_pub_t* dhdp, dmaxref_mem_map_t *dmmap); +/* Convert a dmaaddr_t to a base_addr with htol operations */ +static INLINE void dhd_base_addr_htolpa(sh_addr_t *base_addr, dmaaddr_t pa); + +/* APIs for managing a DMA-able buffer */ +static int dhd_dma_buf_audit(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf); +static void dhd_dma_buf_reset(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf); + +/* msgbuf ring management */ +static int dhd_prot_ring_attach(dhd_pub_t *dhd, msgbuf_ring_t *ring, + const char *name, uint16 max_items, uint16 len_item, uint16 ringid); +static void dhd_prot_ring_init(dhd_pub_t *dhd, msgbuf_ring_t *ring); +static void dhd_prot_ring_reset(dhd_pub_t *dhd, msgbuf_ring_t *ring); +static void dhd_prot_ring_detach(dhd_pub_t *dhd, msgbuf_ring_t *ring); +static void dhd_prot_process_fw_timestamp(dhd_pub_t *dhd, void* buf); + +/* Pool of pre-allocated msgbuf_ring_t with DMA-able buffers for Flowrings */ +static int dhd_prot_flowrings_pool_attach(dhd_pub_t *dhd); +static void dhd_prot_flowrings_pool_reset(dhd_pub_t *dhd); +static void dhd_prot_flowrings_pool_detach(dhd_pub_t *dhd); + +/* Fetch and Release a flowring msgbuf_ring from flowring pool */ +static msgbuf_ring_t *dhd_prot_flowrings_pool_fetch(dhd_pub_t *dhd, + uint16 flowid); +/* see also dhd_prot_flowrings_pool_release() in dhd_prot.h */ + +/* Producer: Allocate space in a msgbuf ring */ +static void* dhd_prot_alloc_ring_space(dhd_pub_t *dhd, msgbuf_ring_t *ring, + uint16 nitems, uint16 *alloced, bool exactly_nitems); +static void* dhd_prot_get_ring_space(msgbuf_ring_t *ring, uint16 nitems, + uint16 *alloced, bool exactly_nitems); + +/* Consumer: Determine the location where the next message may be consumed */ +static uint8* dhd_prot_get_read_addr(dhd_pub_t *dhd, msgbuf_ring_t *ring, + uint32 *available_len); + +/* Producer (WR index update) or Consumer (RD index update) indication */ +static void dhd_prot_ring_write_complete(dhd_pub_t *dhd, msgbuf_ring_t *ring, + void *p, uint16 len); +static void dhd_prot_upd_read_idx(dhd_pub_t *dhd, msgbuf_ring_t *ring); + +static INLINE int dhd_prot_dma_indx_alloc(dhd_pub_t *dhd, uint8 type, + dhd_dma_buf_t *dma_buf, uint32 bufsz); + +/* Set/Get a RD or WR index in the array of indices */ +/* See also: dhd_prot_dma_indx_init() */ +void dhd_prot_dma_indx_set(dhd_pub_t *dhd, uint16 new_index, uint8 type, + uint16 ringid); +static uint16 dhd_prot_dma_indx_get(dhd_pub_t *dhd, uint8 type, uint16 ringid); + +/* Locate a packet given a pktid */ +static INLINE void *dhd_prot_packet_get(dhd_pub_t *dhd, uint32 pktid, uint8 pkttype, + bool free_pktid); +/* Locate a packet given a PktId and free it. */ +static INLINE void dhd_prot_packet_free(dhd_pub_t *dhd, void *pkt, uint8 pkttype, bool send); + +static int dhd_msgbuf_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, + void *buf, uint len, uint8 action); +static int dhd_msgbuf_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, + void *buf, uint len, uint8 action); +static int dhd_msgbuf_wait_ioctl_cmplt(dhd_pub_t *dhd, uint32 len, void *buf); +static int dhd_fillup_ioct_reqst(dhd_pub_t *dhd, uint16 len, uint cmd, + void *buf, int ifidx); + +/* Post buffers for Rx, control ioctl response and events */ +static uint16 dhd_msgbuf_rxbuf_post_ctrlpath(dhd_pub_t *dhd, uint8 msgid, uint32 max_to_post); +static void dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd_pub_t *pub); +static void dhd_msgbuf_rxbuf_post_event_bufs(dhd_pub_t *pub); +static void dhd_msgbuf_rxbuf_post(dhd_pub_t *dhd, bool use_rsv_pktid); +static int dhd_prot_rxbuf_post(dhd_pub_t *dhd, uint16 count, bool use_rsv_pktid); +static int dhd_msgbuf_rxbuf_post_ts_bufs(dhd_pub_t *pub); + +static void dhd_prot_return_rxbuf(dhd_pub_t *dhd, uint32 pktid, uint32 rxcnt); + +/* D2H Message handling */ +static int dhd_prot_process_msgtype(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8 *buf, uint32 len); + +/* D2H Message handlers */ +static void dhd_prot_noop(dhd_pub_t *dhd, void *msg); +static void dhd_prot_txstatus_process(dhd_pub_t *dhd, void *msg); +static void dhd_prot_ioctcmplt_process(dhd_pub_t *dhd, void *msg); +static void dhd_prot_ioctack_process(dhd_pub_t *dhd, void *msg); +static void dhd_prot_ringstatus_process(dhd_pub_t *dhd, void *msg); +static void dhd_prot_genstatus_process(dhd_pub_t *dhd, void *msg); +static void dhd_prot_event_process(dhd_pub_t *dhd, void *msg); + +/* Loopback test with dongle */ +static void dmaxfer_free_dmaaddr(dhd_pub_t *dhd, dhd_dmaxfer_t *dma); +static int dmaxfer_prepare_dmaaddr(dhd_pub_t *dhd, uint len, uint srcdelay, + uint destdelay, dhd_dmaxfer_t *dma); +static void dhd_msgbuf_dmaxfer_process(dhd_pub_t *dhd, void *msg); + +/* Flowring management communication with dongle */ +static void dhd_prot_flow_ring_create_response_process(dhd_pub_t *dhd, void *msg); +static void dhd_prot_flow_ring_delete_response_process(dhd_pub_t *dhd, void *msg); +static void dhd_prot_flow_ring_flush_response_process(dhd_pub_t *dhd, void *msg); +static void dhd_prot_process_flow_ring_resume_response(dhd_pub_t *dhd, void* msg); +static void dhd_prot_process_flow_ring_suspend_response(dhd_pub_t *dhd, void* msg); + +/* Monitor Mode */ +#ifdef WL_MONITOR +extern bool dhd_monitor_enabled(dhd_pub_t *dhd, int ifidx); +extern void dhd_rx_mon_pkt(dhd_pub_t *dhdp, host_rxbuf_cmpl_t* msg, void *pkt, int ifidx); +#endif /* WL_MONITOR */ + +/* Configure a soft doorbell per D2H ring */ +static void dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd_pub_t *dhd); +static void dhd_prot_process_d2h_ring_config_complete(dhd_pub_t *dhd, void *msg); +static void dhd_prot_process_d2h_ring_create_complete(dhd_pub_t *dhd, void *buf); +static void dhd_prot_process_h2d_ring_create_complete(dhd_pub_t *dhd, void *buf); +static void dhd_prot_process_infobuf_complete(dhd_pub_t *dhd, void* buf); +static void dhd_prot_process_d2h_mb_data(dhd_pub_t *dhd, void* buf); +static void dhd_prot_detach_info_rings(dhd_pub_t *dhd); +#ifdef EWP_EDL +static void dhd_prot_detach_edl_rings(dhd_pub_t *dhd); +#endif // endif +static void dhd_prot_process_d2h_host_ts_complete(dhd_pub_t *dhd, void* buf); +static void dhd_prot_process_snapshot_complete(dhd_pub_t *dhd, void *buf); + +typedef void (*dhd_msgbuf_func_t)(dhd_pub_t *dhd, void *msg); + +/** callback functions for messages generated by the dongle */ +#define MSG_TYPE_INVALID 0 + +static dhd_msgbuf_func_t table_lookup[DHD_PROT_FUNCS] = { + dhd_prot_noop, /* 0 is MSG_TYPE_INVALID */ + dhd_prot_genstatus_process, /* MSG_TYPE_GEN_STATUS */ + dhd_prot_ringstatus_process, /* MSG_TYPE_RING_STATUS */ + NULL, + dhd_prot_flow_ring_create_response_process, /* MSG_TYPE_FLOW_RING_CREATE_CMPLT */ + NULL, + dhd_prot_flow_ring_delete_response_process, /* MSG_TYPE_FLOW_RING_DELETE_CMPLT */ + NULL, + dhd_prot_flow_ring_flush_response_process, /* MSG_TYPE_FLOW_RING_FLUSH_CMPLT */ + NULL, + dhd_prot_ioctack_process, /* MSG_TYPE_IOCTLPTR_REQ_ACK */ + NULL, + dhd_prot_ioctcmplt_process, /* MSG_TYPE_IOCTL_CMPLT */ + NULL, + dhd_prot_event_process, /* MSG_TYPE_WL_EVENT */ + NULL, + dhd_prot_txstatus_process, /* MSG_TYPE_TX_STATUS */ + NULL, + NULL, /* MSG_TYPE_RX_CMPLT use dedicated handler */ + NULL, + dhd_msgbuf_dmaxfer_process, /* MSG_TYPE_LPBK_DMAXFER_CMPLT */ + NULL, /* MSG_TYPE_FLOW_RING_RESUME */ + dhd_prot_process_flow_ring_resume_response, /* MSG_TYPE_FLOW_RING_RESUME_CMPLT */ + NULL, /* MSG_TYPE_FLOW_RING_SUSPEND */ + dhd_prot_process_flow_ring_suspend_response, /* MSG_TYPE_FLOW_RING_SUSPEND_CMPLT */ + NULL, /* MSG_TYPE_INFO_BUF_POST */ + dhd_prot_process_infobuf_complete, /* MSG_TYPE_INFO_BUF_CMPLT */ + NULL, /* MSG_TYPE_H2D_RING_CREATE */ + NULL, /* MSG_TYPE_D2H_RING_CREATE */ + dhd_prot_process_h2d_ring_create_complete, /* MSG_TYPE_H2D_RING_CREATE_CMPLT */ + dhd_prot_process_d2h_ring_create_complete, /* MSG_TYPE_D2H_RING_CREATE_CMPLT */ + NULL, /* MSG_TYPE_H2D_RING_CONFIG */ + NULL, /* MSG_TYPE_D2H_RING_CONFIG */ + NULL, /* MSG_TYPE_H2D_RING_CONFIG_CMPLT */ + dhd_prot_process_d2h_ring_config_complete, /* MSG_TYPE_D2H_RING_CONFIG_CMPLT */ + NULL, /* MSG_TYPE_H2D_MAILBOX_DATA */ + dhd_prot_process_d2h_mb_data, /* MSG_TYPE_D2H_MAILBOX_DATA */ + NULL, /* MSG_TYPE_TIMSTAMP_BUFPOST */ + NULL, /* MSG_TYPE_HOSTTIMSTAMP */ + dhd_prot_process_d2h_host_ts_complete, /* MSG_TYPE_HOSTTIMSTAMP_CMPLT */ + dhd_prot_process_fw_timestamp, /* MSG_TYPE_FIRMWARE_TIMESTAMP */ + NULL, /* MSG_TYPE_SNAPSHOT_UPLOAD */ + dhd_prot_process_snapshot_complete, /* MSG_TYPE_SNAPSHOT_CMPLT */ +}; + +#ifdef DHD_RX_CHAINING + +#define PKT_CTF_CHAINABLE(dhd, ifidx, evh, prio, h_sa, h_da, h_prio) \ + (dhd_wet_chainable(dhd) && \ + dhd_rx_pkt_chainable((dhd), (ifidx)) && \ + !ETHER_ISNULLDEST(((struct ether_header *)(evh))->ether_dhost) && \ + !ETHER_ISMULTI(((struct ether_header *)(evh))->ether_dhost) && \ + !eacmp((h_da), ((struct ether_header *)(evh))->ether_dhost) && \ + !eacmp((h_sa), ((struct ether_header *)(evh))->ether_shost) && \ + ((h_prio) == (prio)) && (dhd_ctf_hotbrc_check((dhd), (evh), (ifidx))) && \ + ((((struct ether_header *)(evh))->ether_type == HTON16(ETHER_TYPE_IP)) || \ + (((struct ether_header *)(evh))->ether_type == HTON16(ETHER_TYPE_IPV6)))) + +static INLINE void BCMFASTPATH dhd_rxchain_reset(rxchain_info_t *rxchain); +static void BCMFASTPATH dhd_rxchain_frame(dhd_pub_t *dhd, void *pkt, uint ifidx); +static void BCMFASTPATH dhd_rxchain_commit(dhd_pub_t *dhd); + +#define DHD_PKT_CTF_MAX_CHAIN_LEN 64 + +#endif /* DHD_RX_CHAINING */ + +#define DHD_LPBKDTDUMP_ON() (dhd_msg_level & DHD_LPBKDTDUMP_VAL) + +static void dhd_prot_h2d_sync_init(dhd_pub_t *dhd); + +bool +dhd_prot_is_cmpl_ring_empty(dhd_pub_t *dhd, void *prot_info) +{ + msgbuf_ring_t *flow_ring = (msgbuf_ring_t *)prot_info; + uint16 rd, wr; + bool ret; + + if (dhd->dma_d2h_ring_upd_support) { + wr = flow_ring->wr; + } else { + dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, flow_ring->idx); + } + if (dhd->dma_h2d_ring_upd_support) { + rd = flow_ring->rd; + } else { + dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, flow_ring->idx); + } + ret = (wr == rd) ? TRUE : FALSE; + return ret; +} + +void +dhd_prot_dump_ring_ptrs(void *prot_info) +{ + msgbuf_ring_t *ring = (msgbuf_ring_t *)prot_info; + DHD_ERROR(("%s curr_rd: %d rd: %d wr: %d \n", __FUNCTION__, + ring->curr_rd, ring->rd, ring->wr)); +} + +uint16 +dhd_prot_get_h2d_max_txpost(dhd_pub_t *dhd) +{ + return (uint16)h2d_max_txpost; +} +void +dhd_prot_set_h2d_max_txpost(dhd_pub_t *dhd, uint16 max_txpost) +{ + h2d_max_txpost = max_txpost; +} +/** + * D2H DMA to completion callback handlers. Based on the mode advertised by the + * dongle through the PCIE shared region, the appropriate callback will be + * registered in the proto layer to be invoked prior to precessing any message + * from a D2H DMA ring. If the dongle uses a read barrier or another mode that + * does not require host participation, then a noop callback handler will be + * bound that simply returns the msg_type. + */ +static void dhd_prot_d2h_sync_livelock(dhd_pub_t *dhd, uint32 msg_seqnum, msgbuf_ring_t *ring, + uint32 tries, volatile uchar *msg, int msglen); +static uint8 dhd_prot_d2h_sync_seqnum(dhd_pub_t *dhd, msgbuf_ring_t *ring, + volatile cmn_msg_hdr_t *msg, int msglen); +static uint8 dhd_prot_d2h_sync_xorcsum(dhd_pub_t *dhd, msgbuf_ring_t *ring, + volatile cmn_msg_hdr_t *msg, int msglen); +static uint8 dhd_prot_d2h_sync_none(dhd_pub_t *dhd, msgbuf_ring_t *ring, + volatile cmn_msg_hdr_t *msg, int msglen); +static void dhd_prot_d2h_sync_init(dhd_pub_t *dhd); +static int dhd_send_d2h_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create, + uint16 ring_type, uint32 id); +static int dhd_send_h2d_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create, + uint8 type, uint32 id); +static uint16 dhd_get_max_flow_rings(dhd_pub_t *dhd); + +/** + * dhd_prot_d2h_sync_livelock - when the host determines that a DMA transfer has + * not completed, a livelock condition occurs. Host will avert this livelock by + * dropping this message and moving to the next. This dropped message can lead + * to a packet leak, or even something disastrous in the case the dropped + * message happens to be a control response. + * Here we will log this condition. One may choose to reboot the dongle. + * + */ +static void +dhd_prot_d2h_sync_livelock(dhd_pub_t *dhd, uint32 msg_seqnum, msgbuf_ring_t *ring, uint32 tries, + volatile uchar *msg, int msglen) +{ + uint32 ring_seqnum = ring->seqnum; + + DHD_ERROR(( + "LIVELOCK DHD<%p> ring<%s> msg_seqnum<%u> ring_seqnum<%u:%u> tries<%u> max<%lu>" + " tot<%lu> dma_buf va<%p> msg<%p> curr_rd<%d>\n", + dhd, ring->name, msg_seqnum, ring_seqnum, ring_seqnum% D2H_EPOCH_MODULO, tries, + dhd->prot->d2h_sync_wait_max, dhd->prot->d2h_sync_wait_tot, + ring->dma_buf.va, msg, ring->curr_rd)); + + dhd_prhex("D2H MsgBuf Failure", msg, msglen, DHD_ERROR_VAL); + + dhd->livelock_occured = TRUE; + dhd_bus_dump_console_buffer(dhd->bus); + dhd_prot_debug_info_print(dhd); + +#ifdef DHD_FW_COREDUMP + if (dhd->memdump_enabled) { + /* collect core dump */ + dhd->memdump_type = DUMP_TYPE_BY_LIVELOCK; + dhd_bus_mem_dump(dhd); + } +#endif /* DHD_FW_COREDUMP */ + + dhd_schedule_reset(dhd); + +} + +/** + * dhd_prot_d2h_sync_seqnum - Sync on a D2H DMA completion using the SEQNUM + * mode. Sequence number is always in the last word of a message. + */ +static uint8 BCMFASTPATH +dhd_prot_d2h_sync_seqnum(dhd_pub_t *dhd, msgbuf_ring_t *ring, + volatile cmn_msg_hdr_t *msg, int msglen) +{ + uint32 tries; + uint32 ring_seqnum = ring->seqnum % D2H_EPOCH_MODULO; + int num_words = msglen / sizeof(uint32); /* num of 32bit words */ + volatile uint32 *marker = (volatile uint32 *)msg + (num_words - 1); /* last word */ + dhd_prot_t *prot = dhd->prot; + uint32 msg_seqnum; + uint32 step = 0; + uint32 delay = PCIE_D2H_SYNC_DELAY; + uint32 total_tries = 0; + + ASSERT(msglen == ring->item_len); + + BCM_REFERENCE(delay); + /* + * For retries we have to make some sort of stepper algorithm. + * We see that every time when the Dongle comes out of the D3 + * Cold state, the first D2H mem2mem DMA takes more time to + * complete, leading to livelock issues. + * + * Case 1 - Apart from Host CPU some other bus master is + * accessing the DDR port, probably page close to the ring + * so, PCIE does not get a change to update the memory. + * Solution - Increase the number of tries. + * + * Case 2 - The 50usec delay given by the Host CPU is not + * sufficient for the PCIe RC to start its work. + * In this case the breathing time of 50usec given by + * the Host CPU is not sufficient. + * Solution: Increase the delay in a stepper fashion. + * This is done to ensure that there are no + * unwanted extra delay introdcued in normal conditions. + */ + for (step = 1; step <= PCIE_D2H_SYNC_NUM_OF_STEPS; step++) { + for (tries = 0; tries < PCIE_D2H_SYNC_WAIT_TRIES; tries++) { + msg_seqnum = *marker; + if (ltoh32(msg_seqnum) == ring_seqnum) { /* dma upto last word done */ + ring->seqnum++; /* next expected sequence number */ + goto dma_completed; + } + + total_tries = ((step-1) * PCIE_D2H_SYNC_WAIT_TRIES) + tries; + + if (total_tries > prot->d2h_sync_wait_max) + prot->d2h_sync_wait_max = total_tries; + + OSL_CACHE_INV(msg, msglen); /* invalidate and try again */ + OSL_CPU_RELAX(); /* CPU relax for msg_seqnum value to update */ + OSL_DELAY(delay * step); /* Add stepper delay */ + + } /* for PCIE_D2H_SYNC_WAIT_TRIES */ + } /* for PCIE_D2H_SYNC_NUM_OF_STEPS */ + + dhd_prot_d2h_sync_livelock(dhd, msg_seqnum, ring, total_tries, + (volatile uchar *) msg, msglen); + + ring->seqnum++; /* skip this message ... leak of a pktid */ + return MSG_TYPE_INVALID; /* invalid msg_type 0 -> noop callback */ + +dma_completed: + + prot->d2h_sync_wait_tot += tries; + return msg->msg_type; +} + +/** + * dhd_prot_d2h_sync_xorcsum - Sync on a D2H DMA completion using the XORCSUM + * mode. The xorcsum is placed in the last word of a message. Dongle will also + * place a seqnum in the epoch field of the cmn_msg_hdr. + */ +static uint8 BCMFASTPATH +dhd_prot_d2h_sync_xorcsum(dhd_pub_t *dhd, msgbuf_ring_t *ring, + volatile cmn_msg_hdr_t *msg, int msglen) +{ + uint32 tries; + uint32 prot_checksum = 0; /* computed checksum */ + int num_words = msglen / sizeof(uint32); /* num of 32bit words */ + uint8 ring_seqnum = ring->seqnum % D2H_EPOCH_MODULO; + dhd_prot_t *prot = dhd->prot; + uint32 step = 0; + uint32 delay = PCIE_D2H_SYNC_DELAY; + uint32 total_tries = 0; + + ASSERT(msglen == ring->item_len); + + BCM_REFERENCE(delay); + /* + * For retries we have to make some sort of stepper algorithm. + * We see that every time when the Dongle comes out of the D3 + * Cold state, the first D2H mem2mem DMA takes more time to + * complete, leading to livelock issues. + * + * Case 1 - Apart from Host CPU some other bus master is + * accessing the DDR port, probably page close to the ring + * so, PCIE does not get a change to update the memory. + * Solution - Increase the number of tries. + * + * Case 2 - The 50usec delay given by the Host CPU is not + * sufficient for the PCIe RC to start its work. + * In this case the breathing time of 50usec given by + * the Host CPU is not sufficient. + * Solution: Increase the delay in a stepper fashion. + * This is done to ensure that there are no + * unwanted extra delay introdcued in normal conditions. + */ + for (step = 1; step <= PCIE_D2H_SYNC_NUM_OF_STEPS; step++) { + for (tries = 0; tries < PCIE_D2H_SYNC_WAIT_TRIES; tries++) { + /* First verify if the seqnumber has been update, + * if yes, then only check xorcsum. + * Once seqnum and xorcsum is proper that means + * complete message has arrived. + */ + if (msg->epoch == ring_seqnum) { + prot_checksum = bcm_compute_xor32((volatile uint32 *)msg, + num_words); + if (prot_checksum == 0U) { /* checksum is OK */ + ring->seqnum++; /* next expected sequence number */ + goto dma_completed; + } + } + + total_tries = ((step-1) * PCIE_D2H_SYNC_WAIT_TRIES) + tries; + + if (total_tries > prot->d2h_sync_wait_max) + prot->d2h_sync_wait_max = total_tries; + + OSL_CACHE_INV(msg, msglen); /* invalidate and try again */ + OSL_CPU_RELAX(); /* CPU relax for msg_seqnum value to update */ + OSL_DELAY(delay * step); /* Add stepper delay */ + + } /* for PCIE_D2H_SYNC_WAIT_TRIES */ + } /* for PCIE_D2H_SYNC_NUM_OF_STEPS */ + + DHD_ERROR(("%s: prot_checksum = 0x%x\n", __FUNCTION__, prot_checksum)); + dhd_prot_d2h_sync_livelock(dhd, msg->epoch, ring, total_tries, + (volatile uchar *) msg, msglen); + + ring->seqnum++; /* skip this message ... leak of a pktid */ + return MSG_TYPE_INVALID; /* invalid msg_type 0 -> noop callback */ + +dma_completed: + + prot->d2h_sync_wait_tot += tries; + return msg->msg_type; +} + +/** + * dhd_prot_d2h_sync_none - Dongle ensure that the DMA will complete and host + * need to try to sync. This noop sync handler will be bound when the dongle + * advertises that neither the SEQNUM nor XORCSUM mode of DMA sync is required. + */ +static uint8 BCMFASTPATH +dhd_prot_d2h_sync_none(dhd_pub_t *dhd, msgbuf_ring_t *ring, + volatile cmn_msg_hdr_t *msg, int msglen) +{ + return msg->msg_type; +} + +#ifdef EWP_EDL +/** + * dhd_prot_d2h_sync_edl - Sync on a D2H DMA completion by validating the cmn_msg_hdr_t + * header values at both the beginning and end of the payload. + * The cmn_msg_hdr_t is placed at the start and end of the payload + * in each work item in the EDL ring. + * Dongle will place a seqnum inside the cmn_msg_hdr_t 'epoch' field + * and the length of the payload in the 'request_id' field. + * Structure of each work item in the EDL ring: + * | cmn_msg_hdr_t | payload (var len) | cmn_msg_hdr_t | + * NOTE: - it was felt that calculating xorcsum for the entire payload (max length of 1648 bytes) is + * too costly on the dongle side and might take up too many ARM cycles, + * hence the xorcsum sync method is not being used for EDL ring. + */ +static int +BCMFASTPATH(dhd_prot_d2h_sync_edl)(dhd_pub_t *dhd, msgbuf_ring_t *ring, + volatile cmn_msg_hdr_t *msg) +{ + uint32 tries; + int msglen = 0, len = 0; + uint32 ring_seqnum = ring->seqnum % D2H_EPOCH_MODULO; + dhd_prot_t *prot = dhd->prot; + uint32 step = 0; + uint32 delay = PCIE_D2H_SYNC_DELAY; + uint32 total_tries = 0; + volatile cmn_msg_hdr_t *trailer = NULL; + volatile uint8 *buf = NULL; + bool valid_msg = FALSE; + + BCM_REFERENCE(delay); + /* + * For retries we have to make some sort of stepper algorithm. + * We see that every time when the Dongle comes out of the D3 + * Cold state, the first D2H mem2mem DMA takes more time to + * complete, leading to livelock issues. + * + * Case 1 - Apart from Host CPU some other bus master is + * accessing the DDR port, probably page close to the ring + * so, PCIE does not get a change to update the memory. + * Solution - Increase the number of tries. + * + * Case 2 - The 50usec delay given by the Host CPU is not + * sufficient for the PCIe RC to start its work. + * In this case the breathing time of 50usec given by + * the Host CPU is not sufficient. + * Solution: Increase the delay in a stepper fashion. + * This is done to ensure that there are no + * unwanted extra delay introdcued in normal conditions. + */ + for (step = 1; step <= PCIE_D2H_SYNC_NUM_OF_STEPS; step++) { + for (tries = 0; tries < PCIE_D2H_SYNC_WAIT_TRIES; tries++) { + /* First verify if the seqnumber has been updated, + * if yes, only then validate the header and trailer. + * Once seqnum, header and trailer have been validated, it means + * that the complete message has arrived. + */ + valid_msg = FALSE; + if (msg->epoch == ring_seqnum && + msg->msg_type == MSG_TYPE_INFO_PYLD && + msg->request_id > 0 && + msg->request_id <= ring->item_len) { + /* proceed to check trailer only if header is valid */ + buf = (volatile uint8 *)msg; + msglen = sizeof(cmn_msg_hdr_t) + msg->request_id; + buf += msglen; + if (msglen + sizeof(cmn_msg_hdr_t) <= ring->item_len) { + trailer = (volatile cmn_msg_hdr_t *)buf; + valid_msg = (trailer->epoch == ring_seqnum) && + (trailer->msg_type == msg->msg_type) && + (trailer->request_id == msg->request_id); + if (!valid_msg) { + DHD_TRACE(("%s:invalid trailer! seqnum=%u;reqid=%u" + " expected, seqnum=%u; reqid=%u. Retrying... \n", + __FUNCTION__, trailer->epoch, trailer->request_id, + msg->epoch, msg->request_id)); + } + } else { + DHD_TRACE(("%s: invalid payload length (%u)! Retrying.. \n", + __FUNCTION__, msg->request_id)); + } + + if (valid_msg) { + /* data is OK */ + ring->seqnum++; /* next expected sequence number */ + goto dma_completed; + } + } else { + DHD_TRACE(("%s: wrong hdr, seqnum expected %u, got %u." + " msg_type=0x%x, request_id=%u." + " Retrying...\n", + __FUNCTION__, ring_seqnum, msg->epoch, + msg->msg_type, msg->request_id)); + } + + total_tries = ((step-1) * PCIE_D2H_SYNC_WAIT_TRIES) + tries; + + if (total_tries > prot->d2h_sync_wait_max) + prot->d2h_sync_wait_max = total_tries; + + OSL_CACHE_INV(msg, msglen); /* invalidate and try again */ + OSL_CPU_RELAX(); /* CPU relax for msg_seqnum value to update */ + OSL_DELAY(delay * step); /* Add stepper delay */ + + } /* for PCIE_D2H_SYNC_WAIT_TRIES */ + } /* for PCIE_D2H_SYNC_NUM_OF_STEPS */ + + DHD_ERROR(("%s: EDL header check fails !\n", __FUNCTION__)); + DHD_ERROR(("%s: header: seqnum=%u; expected-seqnum=%u" + " msgtype=0x%x; expected-msgtype=0x%x" + " length=%u; expected-max-length=%u", __FUNCTION__, + msg->epoch, ring_seqnum, msg->msg_type, MSG_TYPE_INFO_PYLD, + msg->request_id, ring->item_len)); + dhd_prhex("msg header bytes: ", (volatile uchar *)msg, sizeof(*msg), DHD_ERROR_VAL); + if (trailer && msglen > 0 && + (msglen + sizeof(cmn_msg_hdr_t)) <= ring->item_len) { + DHD_ERROR(("%s: trailer: seqnum=%u; expected-seqnum=%u" + " msgtype=0x%x; expected-msgtype=0x%x" + " length=%u; expected-length=%u", __FUNCTION__, + trailer->epoch, ring_seqnum, trailer->msg_type, MSG_TYPE_INFO_PYLD, + trailer->request_id, msg->request_id)); + dhd_prhex("msg trailer bytes: ", (volatile uchar *)trailer, + sizeof(*trailer), DHD_ERROR_VAL); + } + + if ((msglen + sizeof(cmn_msg_hdr_t)) <= ring->item_len) + len = msglen + sizeof(cmn_msg_hdr_t); + else + len = ring->item_len; + + dhd_prot_d2h_sync_livelock(dhd, msg->epoch, ring, total_tries, + (volatile uchar *) msg, len); + + ring->seqnum++; /* skip this message */ + return BCME_ERROR; /* invalid msg_type 0 -> noop callback */ + +dma_completed: + DHD_TRACE(("%s: EDL header check pass, seqnum=%u; reqid=%u\n", __FUNCTION__, + msg->epoch, msg->request_id)); + + prot->d2h_sync_wait_tot += tries; + return BCME_OK; +} +#endif /* EWP_EDL */ + +INLINE void +dhd_wakeup_ioctl_event(dhd_pub_t *dhd, dhd_ioctl_recieved_status_t reason) +{ + /* To synchronize with the previous memory operations call wmb() */ + OSL_SMP_WMB(); + dhd->prot->ioctl_received = reason; + /* Call another wmb() to make sure before waking up the other event value gets updated */ + OSL_SMP_WMB(); + dhd_os_ioctl_resp_wake(dhd); +} + +/** + * dhd_prot_d2h_sync_init - Setup the host side DMA sync mode based on what + * dongle advertizes. + */ +static void +dhd_prot_d2h_sync_init(dhd_pub_t *dhd) +{ + dhd_prot_t *prot = dhd->prot; + prot->d2h_sync_wait_max = 0UL; + prot->d2h_sync_wait_tot = 0UL; + + prot->d2hring_ctrl_cpln.seqnum = D2H_EPOCH_INIT_VAL; + prot->d2hring_ctrl_cpln.current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT; + + prot->d2hring_tx_cpln.seqnum = D2H_EPOCH_INIT_VAL; + prot->d2hring_tx_cpln.current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT; + + prot->d2hring_rx_cpln.seqnum = D2H_EPOCH_INIT_VAL; + prot->d2hring_rx_cpln.current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT; + + if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_SEQNUM) { + prot->d2h_sync_cb = dhd_prot_d2h_sync_seqnum; + DHD_ERROR(("%s(): D2H sync mechanism is SEQNUM \r\n", __FUNCTION__)); + } else if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_XORCSUM) { + prot->d2h_sync_cb = dhd_prot_d2h_sync_xorcsum; + DHD_ERROR(("%s(): D2H sync mechanism is XORCSUM \r\n", __FUNCTION__)); + } else { + prot->d2h_sync_cb = dhd_prot_d2h_sync_none; + DHD_ERROR(("%s(): D2H sync mechanism is NONE \r\n", __FUNCTION__)); + } +} + +/** + * dhd_prot_h2d_sync_init - Per H2D common ring, setup the msgbuf ring seqnum + */ +static void +dhd_prot_h2d_sync_init(dhd_pub_t *dhd) +{ + dhd_prot_t *prot = dhd->prot; + prot->h2dring_rxp_subn.seqnum = H2D_EPOCH_INIT_VAL; + prot->h2dring_rxp_subn.current_phase = 0; + + prot->h2dring_ctrl_subn.seqnum = H2D_EPOCH_INIT_VAL; + prot->h2dring_ctrl_subn.current_phase = 0; +} + +/* +----------------- End of PCIE DHD H2D DMA SYNC ------------------------+ */ + +/* + * +---------------------------------------------------------------------------+ + * PCIE DMA-able buffer. Sets up a dhd_dma_buf_t object, which includes the + * virtual and physical address, the buffer lenght and the DMA handler. + * A secdma handler is also included in the dhd_dma_buf object. + * +---------------------------------------------------------------------------+ + */ + +static INLINE void +dhd_base_addr_htolpa(sh_addr_t *base_addr, dmaaddr_t pa) +{ + base_addr->low_addr = htol32(PHYSADDRLO(pa)); + base_addr->high_addr = htol32(PHYSADDRHI(pa)); +} + +/** + * dhd_dma_buf_audit - Any audits on a DHD DMA Buffer. + */ +static int +dhd_dma_buf_audit(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf) +{ + uint32 pa_lowaddr, end; /* dongle uses 32bit ptr arithmetic */ + ASSERT(dma_buf); + pa_lowaddr = PHYSADDRLO(dma_buf->pa); + ASSERT(PHYSADDRLO(dma_buf->pa) || PHYSADDRHI(dma_buf->pa)); + ASSERT(ISALIGNED(pa_lowaddr, DMA_ALIGN_LEN)); + ASSERT(dma_buf->len != 0); + + /* test 32bit offset arithmetic over dma buffer for loss of carry-over */ + end = (pa_lowaddr + dma_buf->len); /* end address */ + + if ((end & 0xFFFFFFFF) < (pa_lowaddr & 0xFFFFFFFF)) { /* exclude carryover */ + DHD_ERROR(("%s: dma_buf %x len %d spans dongle 32bit ptr arithmetic\n", + __FUNCTION__, pa_lowaddr, dma_buf->len)); + return BCME_ERROR; + } + + return BCME_OK; +} + +/** + * dhd_dma_buf_alloc - Allocate a cache coherent DMA-able buffer. + * returns BCME_OK=0 on success + * returns non-zero negative error value on failure. + */ +int +dhd_dma_buf_alloc(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf, uint32 buf_len) +{ + uint32 dma_pad = 0; + osl_t *osh = dhd->osh; + uint16 dma_align = DMA_ALIGN_LEN; + uint32 rem = 0; + + ASSERT(dma_buf != NULL); + ASSERT(dma_buf->va == NULL); + ASSERT(dma_buf->len == 0); + + /* Pad the buffer length to align to cacheline size. */ + rem = (buf_len % DHD_DMA_PAD); + dma_pad = rem ? (DHD_DMA_PAD - rem) : 0; + + dma_buf->va = DMA_ALLOC_CONSISTENT(osh, buf_len + dma_pad, + dma_align, &dma_buf->_alloced, &dma_buf->pa, &dma_buf->dmah); + + if (dma_buf->va == NULL) { + DHD_ERROR(("%s: buf_len %d, no memory available\n", + __FUNCTION__, buf_len)); + return BCME_NOMEM; + } + + dma_buf->len = buf_len; /* not including padded len */ + + if (dhd_dma_buf_audit(dhd, dma_buf) != BCME_OK) { /* audit dma buf */ + dhd_dma_buf_free(dhd, dma_buf); + return BCME_ERROR; + } + + dhd_dma_buf_reset(dhd, dma_buf); /* zero out and cache flush */ + + return BCME_OK; +} + +/** + * dhd_dma_buf_reset - Reset a cache coherent DMA-able buffer. + */ +static void +dhd_dma_buf_reset(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf) +{ + if ((dma_buf == NULL) || (dma_buf->va == NULL)) + return; + + (void)dhd_dma_buf_audit(dhd, dma_buf); + + /* Zero out the entire buffer and cache flush */ + memset((void*)dma_buf->va, 0, dma_buf->len); + OSL_CACHE_FLUSH((void *)dma_buf->va, dma_buf->len); +} + +/** + * dhd_dma_buf_free - Free a DMA-able buffer that was previously allocated using + * dhd_dma_buf_alloc(). + */ +void +dhd_dma_buf_free(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf) +{ + osl_t *osh = dhd->osh; + + ASSERT(dma_buf); + + if (dma_buf->va == NULL) + return; /* Allow for free invocation, when alloc failed */ + + /* DEBUG: dhd_dma_buf_reset(dhd, dma_buf) */ + (void)dhd_dma_buf_audit(dhd, dma_buf); + + /* dma buffer may have been padded at allocation */ + DMA_FREE_CONSISTENT(osh, dma_buf->va, dma_buf->_alloced, + dma_buf->pa, dma_buf->dmah); + + memset(dma_buf, 0, sizeof(dhd_dma_buf_t)); +} + +/** + * dhd_dma_buf_init - Initialize a dhd_dma_buf with speicifed values. + * Do not use dhd_dma_buf_init to zero out a dhd_dma_buf_t object. Use memset 0. + */ +void +dhd_dma_buf_init(dhd_pub_t *dhd, void *dhd_dma_buf, + void *va, uint32 len, dmaaddr_t pa, void *dmah, void *secdma) +{ + dhd_dma_buf_t *dma_buf; + ASSERT(dhd_dma_buf); + dma_buf = (dhd_dma_buf_t *)dhd_dma_buf; + dma_buf->va = va; + dma_buf->len = len; + dma_buf->pa = pa; + dma_buf->dmah = dmah; + dma_buf->secdma = secdma; + + /* Audit user defined configuration */ + (void)dhd_dma_buf_audit(dhd, dma_buf); +} + +/* +------------------ End of PCIE DHD DMA BUF ADT ------------------------+ */ + +/* + * +---------------------------------------------------------------------------+ + * PktId Map: Provides a native packet pointer to unique 32bit PktId mapping. + * Main purpose is to save memory on the dongle, has other purposes as well. + * The packet id map, also includes storage for some packet parameters that + * may be saved. A native packet pointer along with the parameters may be saved + * and a unique 32bit pkt id will be returned. Later, the saved packet pointer + * and the metadata may be retrieved using the previously allocated packet id. + * +---------------------------------------------------------------------------+ + */ +#define DHD_PCIE_PKTID +#define MAX_CTRL_PKTID (1024) /* Maximum number of pktids supported */ +#define MAX_RX_PKTID (1024) +#define MAX_TX_PKTID (3072 * 2) + +/* On Router, the pktptr serves as a pktid. */ + +#if defined(PROP_TXSTATUS) && !defined(DHD_PCIE_PKTID) +#error "PKTIDMAP must be supported with PROP_TXSTATUS/WLFC" +#endif // endif + +/* Enum for marking the buffer color based on usage */ +typedef enum dhd_pkttype { + PKTTYPE_DATA_TX = 0, + PKTTYPE_DATA_RX, + PKTTYPE_IOCTL_RX, + PKTTYPE_EVENT_RX, + PKTTYPE_INFO_RX, + /* dhd_prot_pkt_free no check, if pktid reserved and no space avail case */ + PKTTYPE_NO_CHECK, + PKTTYPE_TSBUF_RX +} dhd_pkttype_t; + +#define DHD_PKTID_INVALID (0U) +#define DHD_IOCTL_REQ_PKTID (0xFFFE) +#define DHD_FAKE_PKTID (0xFACE) +#define DHD_H2D_DBGRING_REQ_PKTID 0xFFFD +#define DHD_D2H_DBGRING_REQ_PKTID 0xFFFC +#define DHD_H2D_HOSTTS_REQ_PKTID 0xFFFB +#define DHD_H2D_BTLOGRING_REQ_PKTID 0xFFFA +#define DHD_D2H_BTLOGRING_REQ_PKTID 0xFFF9 +#define DHD_H2D_SNAPSHOT_UPLOAD_REQ_PKTID 0xFFF8 + +#define IS_FLOWRING(ring) \ + ((strncmp(ring->name, "h2dflr", sizeof("h2dflr"))) == (0)) + +typedef void * dhd_pktid_map_handle_t; /* opaque handle to a pktid map */ + +/* Construct a packet id mapping table, returning an opaque map handle */ +static dhd_pktid_map_handle_t *dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items); + +/* Destroy a packet id mapping table, freeing all packets active in the table */ +static void dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map); + +#define DHD_NATIVE_TO_PKTID_INIT(dhd, items) dhd_pktid_map_init((dhd), (items)) +#define DHD_NATIVE_TO_PKTID_RESET(dhd, map) dhd_pktid_map_reset((dhd), (map)) +#define DHD_NATIVE_TO_PKTID_FINI(dhd, map) dhd_pktid_map_fini((dhd), (map)) +#define DHD_NATIVE_TO_PKTID_FINI_IOCTL(osh, map) dhd_pktid_map_fini_ioctl((osh), (map)) + +#ifdef MACOSX_DHD +#undef DHD_PCIE_PKTID +#define DHD_PCIE_PKTID 1 +#endif /* MACOSX_DHD */ + +#if defined(DHD_PCIE_PKTID) +#if defined(MACOSX_DHD) +#define IOCTLRESP_USE_CONSTMEM +static void free_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf); +static int alloc_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf); +#endif // endif + +/* Determine number of pktids that are available */ +static INLINE uint32 dhd_pktid_map_avail_cnt(dhd_pktid_map_handle_t *handle); + +/* Allocate a unique pktid against which a pkt and some metadata is saved */ +static INLINE uint32 dhd_pktid_map_reserve(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, + void *pkt, dhd_pkttype_t pkttype); +static INLINE void dhd_pktid_map_save(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, + void *pkt, uint32 nkey, dmaaddr_t pa, uint32 len, uint8 dma, + void *dmah, void *secdma, dhd_pkttype_t pkttype); +static uint32 dhd_pktid_map_alloc(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map, + void *pkt, dmaaddr_t pa, uint32 len, uint8 dma, + void *dmah, void *secdma, dhd_pkttype_t pkttype); + +/* Return an allocated pktid, retrieving previously saved pkt and metadata */ +static void *dhd_pktid_map_free(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map, + uint32 id, dmaaddr_t *pa, uint32 *len, void **dmah, + void **secdma, dhd_pkttype_t pkttype, bool rsv_locker); + +/* + * DHD_PKTID_AUDIT_ENABLED: Audit of PktIds in DHD for duplicate alloc and frees + * + * DHD_PKTID_AUDIT_MAP: Audit the LIFO or FIFO PktIdMap allocator + * DHD_PKTID_AUDIT_RING: Audit the pktid during producer/consumer ring operation + * + * CAUTION: When DHD_PKTID_AUDIT_ENABLED is defined, + * either DHD_PKTID_AUDIT_MAP or DHD_PKTID_AUDIT_RING may be selected. + */ +#if defined(DHD_PKTID_AUDIT_ENABLED) +#define USE_DHD_PKTID_AUDIT_LOCK 1 +/* Audit the pktidmap allocator */ +/* #define DHD_PKTID_AUDIT_MAP */ + +/* Audit the pktid during production/consumption of workitems */ +#define DHD_PKTID_AUDIT_RING + +#if defined(DHD_PKTID_AUDIT_MAP) && defined(DHD_PKTID_AUDIT_RING) +#error "May only enabled audit of MAP or RING, at a time." +#endif /* DHD_PKTID_AUDIT_MAP && DHD_PKTID_AUDIT_RING */ + +#define DHD_DUPLICATE_ALLOC 1 +#define DHD_DUPLICATE_FREE 2 +#define DHD_TEST_IS_ALLOC 3 +#define DHD_TEST_IS_FREE 4 + +typedef enum dhd_pktid_map_type { + DHD_PKTID_MAP_TYPE_CTRL = 1, + DHD_PKTID_MAP_TYPE_TX, + DHD_PKTID_MAP_TYPE_RX, + DHD_PKTID_MAP_TYPE_UNKNOWN +} dhd_pktid_map_type_t; + +#ifdef USE_DHD_PKTID_AUDIT_LOCK +#define DHD_PKTID_AUDIT_LOCK_INIT(osh) dhd_os_spin_lock_init(osh) +#define DHD_PKTID_AUDIT_LOCK_DEINIT(osh, lock) dhd_os_spin_lock_deinit(osh, lock) +#define DHD_PKTID_AUDIT_LOCK(lock) dhd_os_spin_lock(lock) +#define DHD_PKTID_AUDIT_UNLOCK(lock, flags) dhd_os_spin_unlock(lock, flags) +#else +#define DHD_PKTID_AUDIT_LOCK_INIT(osh) (void *)(1) +#define DHD_PKTID_AUDIT_LOCK_DEINIT(osh, lock) do { /* noop */ } while (0) +#define DHD_PKTID_AUDIT_LOCK(lock) 0 +#define DHD_PKTID_AUDIT_UNLOCK(lock, flags) do { /* noop */ } while (0) +#endif /* !USE_DHD_PKTID_AUDIT_LOCK */ + +#endif /* DHD_PKTID_AUDIT_ENABLED */ + +#define USE_DHD_PKTID_LOCK 1 + +#ifdef USE_DHD_PKTID_LOCK +#define DHD_PKTID_LOCK_INIT(osh) dhd_os_spin_lock_init(osh) +#define DHD_PKTID_LOCK_DEINIT(osh, lock) dhd_os_spin_lock_deinit(osh, lock) +#define DHD_PKTID_LOCK(lock, flags) (flags) = dhd_os_spin_lock(lock) +#define DHD_PKTID_UNLOCK(lock, flags) dhd_os_spin_unlock(lock, flags) +#else +#define DHD_PKTID_LOCK_INIT(osh) (void *)(1) +#define DHD_PKTID_LOCK_DEINIT(osh, lock) \ + do { \ + BCM_REFERENCE(osh); \ + BCM_REFERENCE(lock); \ + } while (0) +#define DHD_PKTID_LOCK(lock) 0 +#define DHD_PKTID_UNLOCK(lock, flags) \ + do { \ + BCM_REFERENCE(lock); \ + BCM_REFERENCE(flags); \ + } while (0) +#endif /* !USE_DHD_PKTID_LOCK */ + +typedef enum dhd_locker_state { + LOCKER_IS_FREE, + LOCKER_IS_BUSY, + LOCKER_IS_RSVD +} dhd_locker_state_t; + +/* Packet metadata saved in packet id mapper */ + +typedef struct dhd_pktid_item { + dhd_locker_state_t state; /* tag a locker to be free, busy or reserved */ + uint8 dir; /* dma map direction (Tx=flush or Rx=invalidate) */ + dhd_pkttype_t pkttype; /* pktlists are maintained based on pkttype */ + uint16 len; /* length of mapped packet's buffer */ + void *pkt; /* opaque native pointer to a packet */ + dmaaddr_t pa; /* physical address of mapped packet's buffer */ + void *dmah; /* handle to OS specific DMA map */ + void *secdma; +} dhd_pktid_item_t; + +typedef uint32 dhd_pktid_key_t; + +typedef struct dhd_pktid_map { + uint32 items; /* total items in map */ + uint32 avail; /* total available items */ + int failures; /* lockers unavailable count */ + /* Spinlock to protect dhd_pktid_map in process/tasklet context */ + void *pktid_lock; /* Used when USE_DHD_PKTID_LOCK is defined */ + +#if defined(DHD_PKTID_AUDIT_ENABLED) + void *pktid_audit_lock; + struct bcm_mwbmap *pktid_audit; /* multi word bitmap based audit */ +#endif /* DHD_PKTID_AUDIT_ENABLED */ + dhd_pktid_key_t *keys; /* map_items +1 unique pkt ids */ + dhd_pktid_item_t lockers[0]; /* metadata storage */ +} dhd_pktid_map_t; + +/* + * PktId (Locker) #0 is never allocated and is considered invalid. + * + * On request for a pktid, a value DHD_PKTID_INVALID must be treated as a + * depleted pktid pool and must not be used by the caller. + * + * Likewise, a caller must never free a pktid of value DHD_PKTID_INVALID. + */ + +#define DHD_PKTID_FREE_LOCKER (FALSE) +#define DHD_PKTID_RSV_LOCKER (TRUE) + +#define DHD_PKTID_ITEM_SZ (sizeof(dhd_pktid_item_t)) +#define DHD_PKIDMAP_ITEMS(items) (items) +#define DHD_PKTID_MAP_SZ(items) (sizeof(dhd_pktid_map_t) + \ + (DHD_PKTID_ITEM_SZ * ((items) + 1))) +#define DHD_PKTIDMAP_KEYS_SZ(items) (sizeof(dhd_pktid_key_t) * ((items) + 1)) + +#define DHD_NATIVE_TO_PKTID_RESET_IOCTL(dhd, map) dhd_pktid_map_reset_ioctl((dhd), (map)) + +/* Convert a packet to a pktid, and save pkt pointer in busy locker */ +#define DHD_NATIVE_TO_PKTID_RSV(dhd, map, pkt, pkttype) \ + dhd_pktid_map_reserve((dhd), (map), (pkt), (pkttype)) +/* Reuse a previously reserved locker to save packet params */ +#define DHD_NATIVE_TO_PKTID_SAVE(dhd, map, pkt, nkey, pa, len, dir, dmah, secdma, pkttype) \ + dhd_pktid_map_save((dhd), (map), (void *)(pkt), (nkey), (pa), (uint32)(len), \ + (uint8)(dir), (void *)(dmah), (void *)(secdma), \ + (dhd_pkttype_t)(pkttype)) +/* Convert a packet to a pktid, and save packet params in locker */ +#define DHD_NATIVE_TO_PKTID(dhd, map, pkt, pa, len, dir, dmah, secdma, pkttype) \ + dhd_pktid_map_alloc((dhd), (map), (void *)(pkt), (pa), (uint32)(len), \ + (uint8)(dir), (void *)(dmah), (void *)(secdma), \ + (dhd_pkttype_t)(pkttype)) + +/* Convert pktid to a packet, and free the locker */ +#define DHD_PKTID_TO_NATIVE(dhd, map, pktid, pa, len, dmah, secdma, pkttype) \ + dhd_pktid_map_free((dhd), (map), (uint32)(pktid), \ + (dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \ + (void **)&(secdma), (dhd_pkttype_t)(pkttype), DHD_PKTID_FREE_LOCKER) + +/* Convert the pktid to a packet, empty locker, but keep it reserved */ +#define DHD_PKTID_TO_NATIVE_RSV(dhd, map, pktid, pa, len, dmah, secdma, pkttype) \ + dhd_pktid_map_free((dhd), (map), (uint32)(pktid), \ + (dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \ + (void **)&(secdma), (dhd_pkttype_t)(pkttype), DHD_PKTID_RSV_LOCKER) + +#define DHD_PKTID_AVAIL(map) dhd_pktid_map_avail_cnt(map) + +#if defined(DHD_PKTID_AUDIT_ENABLED) + +static int +dhd_get_pktid_map_type(dhd_pub_t *dhd, dhd_pktid_map_t *pktid_map) +{ + dhd_prot_t *prot = dhd->prot; + int pktid_map_type; + + if (pktid_map == prot->pktid_ctrl_map) { + pktid_map_type = DHD_PKTID_MAP_TYPE_CTRL; + } else if (pktid_map == prot->pktid_tx_map) { + pktid_map_type = DHD_PKTID_MAP_TYPE_TX; + } else if (pktid_map == prot->pktid_rx_map) { + pktid_map_type = DHD_PKTID_MAP_TYPE_RX; + } else { + pktid_map_type = DHD_PKTID_MAP_TYPE_UNKNOWN; + } + + return pktid_map_type; +} + +/** +* __dhd_pktid_audit - Use the mwbmap to audit validity of a pktid. +*/ +static int +__dhd_pktid_audit(dhd_pub_t *dhd, dhd_pktid_map_t *pktid_map, uint32 pktid, + const int test_for, const char *errmsg) +{ +#define DHD_PKT_AUDIT_STR "ERROR: %16s Host PktId Audit: " + struct bcm_mwbmap *handle; + uint32 flags; + bool ignore_audit; + int error = BCME_OK; + + if (pktid_map == (dhd_pktid_map_t *)NULL) { + DHD_ERROR((DHD_PKT_AUDIT_STR "Pkt id map NULL\n", errmsg)); + return BCME_OK; + } + + flags = DHD_PKTID_AUDIT_LOCK(pktid_map->pktid_audit_lock); + + handle = pktid_map->pktid_audit; + if (handle == (struct bcm_mwbmap *)NULL) { + DHD_ERROR((DHD_PKT_AUDIT_STR "Handle NULL\n", errmsg)); + goto out; + } + + /* Exclude special pktids from audit */ + ignore_audit = (pktid == DHD_IOCTL_REQ_PKTID) | (pktid == DHD_FAKE_PKTID); + if (ignore_audit) { + goto out; + } + + if ((pktid == DHD_PKTID_INVALID) || (pktid > pktid_map->items)) { + DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> invalid\n", errmsg, pktid)); + error = BCME_ERROR; + goto out; + } + + /* Perform audit */ + switch (test_for) { + case DHD_DUPLICATE_ALLOC: + if (!bcm_mwbmap_isfree(handle, pktid)) { + DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> alloc duplicate\n", + errmsg, pktid)); + error = BCME_ERROR; + } else { + bcm_mwbmap_force(handle, pktid); + } + break; + + case DHD_DUPLICATE_FREE: + if (bcm_mwbmap_isfree(handle, pktid)) { + DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> free duplicate\n", + errmsg, pktid)); + error = BCME_ERROR; + } else { + bcm_mwbmap_free(handle, pktid); + } + break; + + case DHD_TEST_IS_ALLOC: + if (bcm_mwbmap_isfree(handle, pktid)) { + DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> is not allocated\n", + errmsg, pktid)); + error = BCME_ERROR; + } + break; + + case DHD_TEST_IS_FREE: + if (!bcm_mwbmap_isfree(handle, pktid)) { + DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> is not free", + errmsg, pktid)); + error = BCME_ERROR; + } + break; + + default: + DHD_ERROR(("%s: Invalid test case: %d\n", __FUNCTION__, test_for)); + error = BCME_ERROR; + break; + } + +out: + DHD_PKTID_AUDIT_UNLOCK(pktid_map->pktid_audit_lock, flags); + return error; +} + +static int +dhd_pktid_audit(dhd_pub_t *dhd, dhd_pktid_map_t *pktid_map, uint32 pktid, + const int test_for, const char *errmsg) +{ + int ret = BCME_OK; + ret = __dhd_pktid_audit(dhd, pktid_map, pktid, test_for, errmsg); + if (ret == BCME_ERROR) { + DHD_ERROR(("%s: Got Pkt Id Audit failure: PKTID<%d> PKTID MAP TYPE<%d>\n", + __FUNCTION__, pktid, dhd_get_pktid_map_type(dhd, pktid_map))); + dhd_pktid_error_handler(dhd); + } + + return ret; +} + +#define DHD_PKTID_AUDIT(dhdp, map, pktid, test_for) \ + dhd_pktid_audit((dhdp), (dhd_pktid_map_t *)(map), (pktid), (test_for), __FUNCTION__) + +static int +dhd_pktid_audit_ring_debug(dhd_pub_t *dhdp, dhd_pktid_map_t *map, uint32 pktid, + const int test_for, void *msg, uint32 msg_len, const char *func) +{ + int ret = BCME_OK; + ret = __dhd_pktid_audit(dhdp, map, pktid, test_for, func); + if (ret == BCME_ERROR) { + DHD_ERROR(("%s: Got Pkt Id Audit failure: PKTID<%d> PKTID MAP TYPE<%d>\n", + __FUNCTION__, pktid, dhd_get_pktid_map_type(dhdp, map))); + prhex(func, (uchar *)msg, msg_len); + dhd_pktid_error_handler(dhdp); + } + return ret; +} +#define DHD_PKTID_AUDIT_RING_DEBUG(dhdp, map, pktid, test_for, msg, msg_len) \ + dhd_pktid_audit_ring_debug((dhdp), (dhd_pktid_map_t *)(map), \ + (pktid), (test_for), msg, msg_len, __FUNCTION__) + +#endif /* DHD_PKTID_AUDIT_ENABLED */ + +/** + * +---------------------------------------------------------------------------+ + * Packet to Packet Id mapper using a paradigm. + * + * dhd_pktid_map manages a set of unique Packet Ids range[1..MAX_xxx_PKTID]. + * + * dhd_pktid_map_alloc() may be used to save some packet metadata, and a unique + * packet id is returned. This unique packet id may be used to retrieve the + * previously saved packet metadata, using dhd_pktid_map_free(). On invocation + * of dhd_pktid_map_free(), the unique packet id is essentially freed. A + * subsequent call to dhd_pktid_map_alloc() may reuse this packet id. + * + * Implementation Note: + * Convert this into a abstraction and place into bcmutils ! + * Locker abstraction should treat contents as opaque storage, and a + * callback should be registered to handle busy lockers on destructor. + * + * +---------------------------------------------------------------------------+ + */ + +/** Allocate and initialize a mapper of num_items */ + +static dhd_pktid_map_handle_t * +dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items) +{ + void* osh; + uint32 nkey; + dhd_pktid_map_t *map; + uint32 dhd_pktid_map_sz; + uint32 map_items; + uint32 map_keys_sz; + osh = dhd->osh; + + dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(num_items); + + map = (dhd_pktid_map_t *)VMALLOC(osh, dhd_pktid_map_sz); + if (map == NULL) { + DHD_ERROR(("%s:%d: MALLOC failed for size %d\n", + __FUNCTION__, __LINE__, dhd_pktid_map_sz)); + return (dhd_pktid_map_handle_t *)NULL; + } + + map->items = num_items; + map->avail = num_items; + + map_items = DHD_PKIDMAP_ITEMS(map->items); + + map_keys_sz = DHD_PKTIDMAP_KEYS_SZ(map->items); + + /* Initialize the lock that protects this structure */ + map->pktid_lock = DHD_PKTID_LOCK_INIT(osh); + if (map->pktid_lock == NULL) { + DHD_ERROR(("%s:%d: Lock init failed \r\n", __FUNCTION__, __LINE__)); + goto error; + } + + map->keys = (dhd_pktid_key_t *)MALLOC(osh, map_keys_sz); + if (map->keys == NULL) { + DHD_ERROR(("%s:%d: MALLOC failed for map->keys size %d\n", + __FUNCTION__, __LINE__, map_keys_sz)); + goto error; + } + +#if defined(DHD_PKTID_AUDIT_ENABLED) + /* Incarnate a hierarchical multiword bitmap for auditing pktid allocator */ + map->pktid_audit = bcm_mwbmap_init(osh, map_items + 1); + if (map->pktid_audit == (struct bcm_mwbmap *)NULL) { + DHD_ERROR(("%s:%d: pktid_audit init failed\r\n", __FUNCTION__, __LINE__)); + goto error; + } else { + DHD_ERROR(("%s:%d: pktid_audit init succeeded %d\n", + __FUNCTION__, __LINE__, map_items + 1)); + } + map->pktid_audit_lock = DHD_PKTID_AUDIT_LOCK_INIT(osh); +#endif /* DHD_PKTID_AUDIT_ENABLED */ + + for (nkey = 1; nkey <= map_items; nkey++) { /* locker #0 is reserved */ + map->keys[nkey] = nkey; /* populate with unique keys */ + map->lockers[nkey].state = LOCKER_IS_FREE; + map->lockers[nkey].pkt = NULL; /* bzero: redundant */ + map->lockers[nkey].len = 0; + } + + /* Reserve pktid #0, i.e. DHD_PKTID_INVALID to be inuse */ + map->lockers[DHD_PKTID_INVALID].state = LOCKER_IS_BUSY; /* tag locker #0 as inuse */ + map->lockers[DHD_PKTID_INVALID].pkt = NULL; /* bzero: redundant */ + map->lockers[DHD_PKTID_INVALID].len = 0; + +#if defined(DHD_PKTID_AUDIT_ENABLED) + /* do not use dhd_pktid_audit() here, use bcm_mwbmap_force directly */ + bcm_mwbmap_force(map->pktid_audit, DHD_PKTID_INVALID); +#endif /* DHD_PKTID_AUDIT_ENABLED */ + + return (dhd_pktid_map_handle_t *)map; /* opaque handle */ + +error: + if (map) { +#if defined(DHD_PKTID_AUDIT_ENABLED) + if (map->pktid_audit != (struct bcm_mwbmap *)NULL) { + bcm_mwbmap_fini(osh, map->pktid_audit); /* Destruct pktid_audit */ + map->pktid_audit = (struct bcm_mwbmap *)NULL; + if (map->pktid_audit_lock) + DHD_PKTID_AUDIT_LOCK_DEINIT(osh, map->pktid_audit_lock); + } +#endif /* DHD_PKTID_AUDIT_ENABLED */ + + if (map->keys) { + MFREE(osh, map->keys, map_keys_sz); + } + + if (map->pktid_lock) { + DHD_PKTID_LOCK_DEINIT(osh, map->pktid_lock); + } + + VMFREE(osh, map, dhd_pktid_map_sz); + } + return (dhd_pktid_map_handle_t *)NULL; +} + +/** + * Retrieve all allocated keys and free all . + * Freeing implies: unmapping the buffers and freeing the native packet + * This could have been a callback registered with the pktid mapper. + */ +static void +dhd_pktid_map_reset(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle) +{ + void *osh; + uint32 nkey; + dhd_pktid_map_t *map; + dhd_pktid_item_t *locker; + uint32 map_items; + unsigned long flags; + bool data_tx = FALSE; + + map = (dhd_pktid_map_t *)handle; + DHD_PKTID_LOCK(map->pktid_lock, flags); + osh = dhd->osh; + + map_items = DHD_PKIDMAP_ITEMS(map->items); + /* skip reserved KEY #0, and start from 1 */ + + for (nkey = 1; nkey <= map_items; nkey++) { + if (map->lockers[nkey].state == LOCKER_IS_BUSY) { + locker = &map->lockers[nkey]; + locker->state = LOCKER_IS_FREE; + data_tx = (locker->pkttype == PKTTYPE_DATA_TX); + if (data_tx) { + OSL_ATOMIC_DEC(dhd->osh, &dhd->prot->active_tx_count); + } + +#ifdef DHD_PKTID_AUDIT_RING + DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* duplicate frees */ +#endif /* DHD_PKTID_AUDIT_RING */ + + { + if (SECURE_DMA_ENAB(dhd->osh)) + SECURE_DMA_UNMAP(osh, locker->pa, + locker->len, locker->dir, 0, + locker->dmah, locker->secdma, 0); + else + DMA_UNMAP(osh, locker->pa, locker->len, + locker->dir, 0, locker->dmah); + } + dhd_prot_packet_free(dhd, (ulong*)locker->pkt, + locker->pkttype, data_tx); + } + else { +#ifdef DHD_PKTID_AUDIT_RING + DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE); +#endif /* DHD_PKTID_AUDIT_RING */ + } + map->keys[nkey] = nkey; /* populate with unique keys */ + } + + map->avail = map_items; + memset(&map->lockers[1], 0, sizeof(dhd_pktid_item_t) * map_items); + DHD_PKTID_UNLOCK(map->pktid_lock, flags); +} + +#ifdef IOCTLRESP_USE_CONSTMEM +/** Called in detach scenario. Releasing IOCTL buffers. */ +static void +dhd_pktid_map_reset_ioctl(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle) +{ + uint32 nkey; + dhd_pktid_map_t *map; + dhd_pktid_item_t *locker; + uint32 map_items; + unsigned long flags; + + map = (dhd_pktid_map_t *)handle; + DHD_PKTID_LOCK(map->pktid_lock, flags); + + map_items = DHD_PKIDMAP_ITEMS(map->items); + /* skip reserved KEY #0, and start from 1 */ + for (nkey = 1; nkey <= map_items; nkey++) { + if (map->lockers[nkey].state == LOCKER_IS_BUSY) { + dhd_dma_buf_t retbuf; + +#ifdef DHD_PKTID_AUDIT_RING + DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* duplicate frees */ +#endif /* DHD_PKTID_AUDIT_RING */ + + locker = &map->lockers[nkey]; + retbuf.va = locker->pkt; + retbuf.len = locker->len; + retbuf.pa = locker->pa; + retbuf.dmah = locker->dmah; + retbuf.secdma = locker->secdma; + + free_ioctl_return_buffer(dhd, &retbuf); + } + else { +#ifdef DHD_PKTID_AUDIT_RING + DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE); +#endif /* DHD_PKTID_AUDIT_RING */ + } + map->keys[nkey] = nkey; /* populate with unique keys */ + } + + map->avail = map_items; + memset(&map->lockers[1], 0, sizeof(dhd_pktid_item_t) * map_items); + DHD_PKTID_UNLOCK(map->pktid_lock, flags); +} +#endif /* IOCTLRESP_USE_CONSTMEM */ + +/** + * Free the pktid map. + */ +static void +dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle) +{ + dhd_pktid_map_t *map; + uint32 dhd_pktid_map_sz; + uint32 map_keys_sz; + + if (handle == NULL) + return; + + /* Free any pending packets */ + dhd_pktid_map_reset(dhd, handle); + + map = (dhd_pktid_map_t *)handle; + dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(map->items); + map_keys_sz = DHD_PKTIDMAP_KEYS_SZ(map->items); + + DHD_PKTID_LOCK_DEINIT(dhd->osh, map->pktid_lock); + +#if defined(DHD_PKTID_AUDIT_ENABLED) + if (map->pktid_audit != (struct bcm_mwbmap *)NULL) { + bcm_mwbmap_fini(dhd->osh, map->pktid_audit); /* Destruct pktid_audit */ + map->pktid_audit = (struct bcm_mwbmap *)NULL; + if (map->pktid_audit_lock) { + DHD_PKTID_AUDIT_LOCK_DEINIT(dhd->osh, map->pktid_audit_lock); + } + } +#endif /* DHD_PKTID_AUDIT_ENABLED */ + MFREE(dhd->osh, map->keys, map_keys_sz); + VMFREE(dhd->osh, handle, dhd_pktid_map_sz); +} + +#ifdef IOCTLRESP_USE_CONSTMEM +static void +dhd_pktid_map_fini_ioctl(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle) +{ + dhd_pktid_map_t *map; + uint32 dhd_pktid_map_sz; + uint32 map_keys_sz; + + if (handle == NULL) + return; + + /* Free any pending packets */ + dhd_pktid_map_reset_ioctl(dhd, handle); + + map = (dhd_pktid_map_t *)handle; + dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(map->items); + map_keys_sz = DHD_PKTIDMAP_KEYS_SZ(map->items); + + DHD_PKTID_LOCK_DEINIT(dhd->osh, map->pktid_lock); + +#if defined(DHD_PKTID_AUDIT_ENABLED) + if (map->pktid_audit != (struct bcm_mwbmap *)NULL) { + bcm_mwbmap_fini(dhd->osh, map->pktid_audit); /* Destruct pktid_audit */ + map->pktid_audit = (struct bcm_mwbmap *)NULL; + if (map->pktid_audit_lock) { + DHD_PKTID_AUDIT_LOCK_DEINIT(dhd->osh, map->pktid_audit_lock); + } + } +#endif /* DHD_PKTID_AUDIT_ENABLED */ + + MFREE(dhd->osh, map->keys, map_keys_sz); + VMFREE(dhd->osh, handle, dhd_pktid_map_sz); +} +#endif /* IOCTLRESP_USE_CONSTMEM */ + +/** Get the pktid free count */ +static INLINE uint32 BCMFASTPATH +dhd_pktid_map_avail_cnt(dhd_pktid_map_handle_t *handle) +{ + dhd_pktid_map_t *map; + uint32 avail; + unsigned long flags; + + ASSERT(handle != NULL); + map = (dhd_pktid_map_t *)handle; + + DHD_PKTID_LOCK(map->pktid_lock, flags); + avail = map->avail; + DHD_PKTID_UNLOCK(map->pktid_lock, flags); + + return avail; +} + +/** + * dhd_pktid_map_reserve - reserve a unique numbered key. Reserved locker is not + * yet populated. Invoke the pktid save api to populate the packet parameters + * into the locker. This function is not reentrant, and is the caller's + * responsibility. Caller must treat a returned value DHD_PKTID_INVALID as + * a failure case, implying a depleted pool of pktids. + */ +static INLINE uint32 +dhd_pktid_map_reserve(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, + void *pkt, dhd_pkttype_t pkttype) +{ + uint32 nkey; + dhd_pktid_map_t *map; + dhd_pktid_item_t *locker; + unsigned long flags; + + ASSERT(handle != NULL); + map = (dhd_pktid_map_t *)handle; + + DHD_PKTID_LOCK(map->pktid_lock, flags); + + if ((int)(map->avail) <= 0) { /* no more pktids to allocate */ + map->failures++; + DHD_INFO(("%s:%d: failed, no free keys\n", __FUNCTION__, __LINE__)); + DHD_PKTID_UNLOCK(map->pktid_lock, flags); + return DHD_PKTID_INVALID; /* failed alloc request */ + } + + ASSERT(map->avail <= map->items); + nkey = map->keys[map->avail]; /* fetch a free locker, pop stack */ + + if ((map->avail > map->items) || (nkey > map->items)) { + map->failures++; + DHD_ERROR(("%s:%d: failed to allocate a new pktid," + " map->avail<%u>, nkey<%u>, pkttype<%u>\n", + __FUNCTION__, __LINE__, map->avail, nkey, + pkttype)); + DHD_PKTID_UNLOCK(map->pktid_lock, flags); + return DHD_PKTID_INVALID; /* failed alloc request */ + } + + locker = &map->lockers[nkey]; /* save packet metadata in locker */ + map->avail--; + locker->pkt = pkt; /* pkt is saved, other params not yet saved. */ + locker->len = 0; + locker->state = LOCKER_IS_BUSY; /* reserve this locker */ + + DHD_PKTID_UNLOCK(map->pktid_lock, flags); + + ASSERT(nkey != DHD_PKTID_INVALID); + + return nkey; /* return locker's numbered key */ +} + +/* + * dhd_pktid_map_save - Save a packet's parameters into a locker + * corresponding to a previously reserved unique numbered key. + */ +static INLINE void +dhd_pktid_map_save(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt, + uint32 nkey, dmaaddr_t pa, uint32 len, uint8 dir, void *dmah, void *secdma, + dhd_pkttype_t pkttype) +{ + dhd_pktid_map_t *map; + dhd_pktid_item_t *locker; + unsigned long flags; + + ASSERT(handle != NULL); + map = (dhd_pktid_map_t *)handle; + + DHD_PKTID_LOCK(map->pktid_lock, flags); + + if ((nkey == DHD_PKTID_INVALID) || (nkey > DHD_PKIDMAP_ITEMS(map->items))) { + DHD_ERROR(("%s:%d: Error! saving invalid pktid<%u> pkttype<%u>\n", + __FUNCTION__, __LINE__, nkey, pkttype)); + DHD_PKTID_UNLOCK(map->pktid_lock, flags); +#ifdef DHD_FW_COREDUMP + if (dhd->memdump_enabled) { + /* collect core dump */ + dhd->memdump_type = DUMP_TYPE_PKTID_INVALID; + dhd_bus_mem_dump(dhd); + } +#else + ASSERT(0); +#endif /* DHD_FW_COREDUMP */ + return; + } + + locker = &map->lockers[nkey]; + + ASSERT(((locker->state == LOCKER_IS_BUSY) && (locker->pkt == pkt)) || + ((locker->state == LOCKER_IS_RSVD) && (locker->pkt == NULL))); + + /* store contents in locker */ + locker->dir = dir; + locker->pa = pa; + locker->len = (uint16)len; /* 16bit len */ + locker->dmah = dmah; /* 16bit len */ + locker->secdma = secdma; + locker->pkttype = pkttype; + locker->pkt = pkt; + locker->state = LOCKER_IS_BUSY; /* make this locker busy */ + DHD_PKTID_UNLOCK(map->pktid_lock, flags); +} + +/** + * dhd_pktid_map_alloc - Allocate a unique numbered key and save the packet + * contents into the corresponding locker. Return the numbered key. + */ +static uint32 BCMFASTPATH +dhd_pktid_map_alloc(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt, + dmaaddr_t pa, uint32 len, uint8 dir, void *dmah, void *secdma, + dhd_pkttype_t pkttype) +{ + uint32 nkey; + + nkey = dhd_pktid_map_reserve(dhd, handle, pkt, pkttype); + if (nkey != DHD_PKTID_INVALID) { + dhd_pktid_map_save(dhd, handle, pkt, nkey, pa, + len, dir, dmah, secdma, pkttype); + } + + return nkey; +} + +/** + * dhd_pktid_map_free - Given a numbered key, return the locker contents. + * dhd_pktid_map_free() is not reentrant, and is the caller's responsibility. + * Caller may not free a pktid value DHD_PKTID_INVALID or an arbitrary pktid + * value. Only a previously allocated pktid may be freed. + */ +static void * BCMFASTPATH +dhd_pktid_map_free(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, uint32 nkey, + dmaaddr_t *pa, uint32 *len, void **dmah, void **secdma, dhd_pkttype_t pkttype, + bool rsv_locker) +{ + dhd_pktid_map_t *map; + dhd_pktid_item_t *locker; + void * pkt; + unsigned long long locker_addr; + unsigned long flags; + + ASSERT(handle != NULL); + + map = (dhd_pktid_map_t *)handle; + + DHD_PKTID_LOCK(map->pktid_lock, flags); + + if ((nkey == DHD_PKTID_INVALID) || (nkey > DHD_PKIDMAP_ITEMS(map->items))) { + DHD_ERROR(("%s:%d: Error! Try to free invalid pktid<%u>, pkttype<%d>\n", + __FUNCTION__, __LINE__, nkey, pkttype)); + DHD_PKTID_UNLOCK(map->pktid_lock, flags); +#ifdef DHD_FW_COREDUMP + if (dhd->memdump_enabled) { + /* collect core dump */ + dhd->memdump_type = DUMP_TYPE_PKTID_INVALID; + dhd_bus_mem_dump(dhd); + } +#else + ASSERT(0); +#endif /* DHD_FW_COREDUMP */ + return NULL; + } + + locker = &map->lockers[nkey]; + +#if defined(DHD_PKTID_AUDIT_MAP) + DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* Audit duplicate FREE */ +#endif /* DHD_PKTID_AUDIT_MAP */ + + /* Debug check for cloned numbered key */ + if (locker->state == LOCKER_IS_FREE) { + DHD_ERROR(("%s:%d: Error! freeing already freed invalid pktid<%u>\n", + __FUNCTION__, __LINE__, nkey)); + DHD_PKTID_UNLOCK(map->pktid_lock, flags); +#ifdef DHD_FW_COREDUMP + if (dhd->memdump_enabled) { + /* collect core dump */ + dhd->memdump_type = DUMP_TYPE_PKTID_INVALID; + dhd_bus_mem_dump(dhd); + } +#else + ASSERT(0); +#endif /* DHD_FW_COREDUMP */ + return NULL; + } + + /* Check for the colour of the buffer i.e The buffer posted for TX, + * should be freed for TX completion. Similarly the buffer posted for + * IOCTL should be freed for IOCT completion etc. + */ + if ((pkttype != PKTTYPE_NO_CHECK) && (locker->pkttype != pkttype)) { + + DHD_ERROR(("%s:%d: Error! Invalid Buffer Free for pktid<%u> \n", + __FUNCTION__, __LINE__, nkey)); +#ifdef BCMDMA64OSL + PHYSADDRTOULONG(locker->pa, locker_addr); +#else + locker_addr = PHYSADDRLO(locker->pa); +#endif /* BCMDMA64OSL */ + DHD_ERROR(("%s:%d: locker->state <%d>, locker->pkttype <%d>," + "pkttype <%d> locker->pa <0x%llx> \n", + __FUNCTION__, __LINE__, locker->state, locker->pkttype, + pkttype, locker_addr)); + DHD_PKTID_UNLOCK(map->pktid_lock, flags); +#ifdef DHD_FW_COREDUMP + if (dhd->memdump_enabled) { + /* collect core dump */ + dhd->memdump_type = DUMP_TYPE_PKTID_INVALID; + dhd_bus_mem_dump(dhd); + } +#else + ASSERT(0); +#endif /* DHD_FW_COREDUMP */ + return NULL; + } + + if (rsv_locker == DHD_PKTID_FREE_LOCKER) { + map->avail++; + map->keys[map->avail] = nkey; /* make this numbered key available */ + locker->state = LOCKER_IS_FREE; /* open and free Locker */ + } else { + /* pktid will be reused, but the locker does not have a valid pkt */ + locker->state = LOCKER_IS_RSVD; + } + +#if defined(DHD_PKTID_AUDIT_MAP) + DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE); +#endif /* DHD_PKTID_AUDIT_MAP */ + + *pa = locker->pa; /* return contents of locker */ + *len = (uint32)locker->len; + *dmah = locker->dmah; + *secdma = locker->secdma; + + pkt = locker->pkt; + locker->pkt = NULL; /* Clear pkt */ + locker->len = 0; + + DHD_PKTID_UNLOCK(map->pktid_lock, flags); + + return pkt; +} + +#else /* ! DHD_PCIE_PKTID */ + +typedef struct pktlist { + PKT_LIST *tx_pkt_list; /* list for tx packets */ + PKT_LIST *rx_pkt_list; /* list for rx packets */ + PKT_LIST *ctrl_pkt_list; /* list for ioctl/event buf post */ +} pktlists_t; + +/* + * Given that each workitem only uses a 32bit pktid, only 32bit hosts may avail + * of a one to one mapping 32bit pktptr and a 32bit pktid. + * + * - When PKTIDMAP is not used, DHD_NATIVE_TO_PKTID variants will never fail. + * - Neither DHD_NATIVE_TO_PKTID nor DHD_PKTID_TO_NATIVE need to be protected by + * a lock. + * - Hence DHD_PKTID_INVALID is not defined when DHD_PCIE_PKTID is undefined. + */ +#define DHD_PKTID32(pktptr32) ((uint32)(pktptr32)) +#define DHD_PKTPTR32(pktid32) ((void *)(pktid32)) + +static INLINE uint32 dhd_native_to_pktid(dhd_pktid_map_handle_t *map, void *pktptr32, + dmaaddr_t pa, uint32 dma_len, void *dmah, void *secdma, + dhd_pkttype_t pkttype); +static INLINE void * dhd_pktid_to_native(dhd_pktid_map_handle_t *map, uint32 pktid32, + dmaaddr_t *pa, uint32 *dma_len, void **dmah, void **secdma, + dhd_pkttype_t pkttype); + +static dhd_pktid_map_handle_t * +dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items) +{ + osl_t *osh = dhd->osh; + pktlists_t *handle = NULL; + + if ((handle = (pktlists_t *) MALLOCZ(osh, sizeof(pktlists_t))) == NULL) { + DHD_ERROR(("%s:%d: MALLOC failed for lists allocation, size=%d\n", + __FUNCTION__, __LINE__, sizeof(pktlists_t))); + goto error_done; + } + + if ((handle->tx_pkt_list = (PKT_LIST *) MALLOC(osh, sizeof(PKT_LIST))) == NULL) { + DHD_ERROR(("%s:%d: MALLOC failed for list allocation, size=%d\n", + __FUNCTION__, __LINE__, sizeof(PKT_LIST))); + goto error; + } + + if ((handle->rx_pkt_list = (PKT_LIST *) MALLOC(osh, sizeof(PKT_LIST))) == NULL) { + DHD_ERROR(("%s:%d: MALLOC failed for list allocation, size=%d\n", + __FUNCTION__, __LINE__, sizeof(PKT_LIST))); + goto error; + } + + if ((handle->ctrl_pkt_list = (PKT_LIST *) MALLOC(osh, sizeof(PKT_LIST))) == NULL) { + DHD_ERROR(("%s:%d: MALLOC failed for list allocation, size=%d\n", + __FUNCTION__, __LINE__, sizeof(PKT_LIST))); + goto error; + } + + PKTLIST_INIT(handle->tx_pkt_list); + PKTLIST_INIT(handle->rx_pkt_list); + PKTLIST_INIT(handle->ctrl_pkt_list); + + return (dhd_pktid_map_handle_t *) handle; + +error: + if (handle->ctrl_pkt_list) { + MFREE(osh, handle->ctrl_pkt_list, sizeof(PKT_LIST)); + } + + if (handle->rx_pkt_list) { + MFREE(osh, handle->rx_pkt_list, sizeof(PKT_LIST)); + } + + if (handle->tx_pkt_list) { + MFREE(osh, handle->tx_pkt_list, sizeof(PKT_LIST)); + } + + if (handle) { + MFREE(osh, handle, sizeof(pktlists_t)); + } + +error_done: + return (dhd_pktid_map_handle_t *)NULL; +} + +static void +dhd_pktid_map_reset(dhd_pub_t *dhd, pktlists_t *handle) +{ + osl_t *osh = dhd->osh; + + if (handle->ctrl_pkt_list) { + PKTLIST_FINI(handle->ctrl_pkt_list); + MFREE(osh, handle->ctrl_pkt_list, sizeof(PKT_LIST)); + } + + if (handle->rx_pkt_list) { + PKTLIST_FINI(handle->rx_pkt_list); + MFREE(osh, handle->rx_pkt_list, sizeof(PKT_LIST)); + } + + if (handle->tx_pkt_list) { + PKTLIST_FINI(handle->tx_pkt_list); + MFREE(osh, handle->tx_pkt_list, sizeof(PKT_LIST)); + } +} + +static void +dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map) +{ + osl_t *osh = dhd->osh; + pktlists_t *handle = (pktlists_t *) map; + + ASSERT(handle != NULL); + if (handle == (pktlists_t *)NULL) { + return; + } + + dhd_pktid_map_reset(dhd, handle); + + if (handle) { + MFREE(osh, handle, sizeof(pktlists_t)); + } +} + +/** Save dma parameters into the packet's pkttag and convert a pktptr to pktid */ +static INLINE uint32 +dhd_native_to_pktid(dhd_pktid_map_handle_t *map, void *pktptr32, + dmaaddr_t pa, uint32 dma_len, void *dmah, void *secdma, + dhd_pkttype_t pkttype) +{ + pktlists_t *handle = (pktlists_t *) map; + ASSERT(pktptr32 != NULL); + DHD_PKT_SET_DMA_LEN(pktptr32, dma_len); + DHD_PKT_SET_DMAH(pktptr32, dmah); + DHD_PKT_SET_PA(pktptr32, pa); + DHD_PKT_SET_SECDMA(pktptr32, secdma); + + if (pkttype == PKTTYPE_DATA_TX) { + PKTLIST_ENQ(handle->tx_pkt_list, pktptr32); + } else if (pkttype == PKTTYPE_DATA_RX) { + PKTLIST_ENQ(handle->rx_pkt_list, pktptr32); + } else { + PKTLIST_ENQ(handle->ctrl_pkt_list, pktptr32); + } + + return DHD_PKTID32(pktptr32); +} + +/** Convert a pktid to pktptr and retrieve saved dma parameters from packet */ +static INLINE void * +dhd_pktid_to_native(dhd_pktid_map_handle_t *map, uint32 pktid32, + dmaaddr_t *pa, uint32 *dma_len, void **dmah, void **secdma, + dhd_pkttype_t pkttype) +{ + pktlists_t *handle = (pktlists_t *) map; + void *pktptr32; + + ASSERT(pktid32 != 0U); + pktptr32 = DHD_PKTPTR32(pktid32); + *dma_len = DHD_PKT_GET_DMA_LEN(pktptr32); + *dmah = DHD_PKT_GET_DMAH(pktptr32); + *pa = DHD_PKT_GET_PA(pktptr32); + *secdma = DHD_PKT_GET_SECDMA(pktptr32); + + if (pkttype == PKTTYPE_DATA_TX) { + PKTLIST_UNLINK(handle->tx_pkt_list, pktptr32); + } else if (pkttype == PKTTYPE_DATA_RX) { + PKTLIST_UNLINK(handle->rx_pkt_list, pktptr32); + } else { + PKTLIST_UNLINK(handle->ctrl_pkt_list, pktptr32); + } + + return pktptr32; +} + +#define DHD_NATIVE_TO_PKTID_RSV(dhd, map, pkt, pkttype) DHD_PKTID32(pkt) + +#define DHD_NATIVE_TO_PKTID_SAVE(dhd, map, pkt, nkey, pa, len, dma_dir, dmah, secdma, pkttype) \ + ({ BCM_REFERENCE(dhd); BCM_REFERENCE(nkey); BCM_REFERENCE(dma_dir); \ + dhd_native_to_pktid((dhd_pktid_map_handle_t *) map, (pkt), (pa), (len), \ + (dmah), (secdma), (dhd_pkttype_t)(pkttype)); \ + }) + +#define DHD_NATIVE_TO_PKTID(dhd, map, pkt, pa, len, dma_dir, dmah, secdma, pkttype) \ + ({ BCM_REFERENCE(dhd); BCM_REFERENCE(dma_dir); \ + dhd_native_to_pktid((dhd_pktid_map_handle_t *) map, (pkt), (pa), (len), \ + (dmah), (secdma), (dhd_pkttype_t)(pkttype)); \ + }) + +#define DHD_PKTID_TO_NATIVE(dhd, map, pktid, pa, len, dmah, secdma, pkttype) \ + ({ BCM_REFERENCE(dhd); BCM_REFERENCE(pkttype); \ + dhd_pktid_to_native((dhd_pktid_map_handle_t *) map, (uint32)(pktid), \ + (dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \ + (void **)&secdma, (dhd_pkttype_t)(pkttype)); \ + }) + +#define DHD_PKTID_AVAIL(map) (~0) + +#endif /* ! DHD_PCIE_PKTID */ + +/* +------------------ End of PCIE DHD PKTID MAPPER -----------------------+ */ + +/** + * The PCIE FD protocol layer is constructed in two phases: + * Phase 1. dhd_prot_attach() + * Phase 2. dhd_prot_init() + * + * dhd_prot_attach() - Allocates a dhd_prot_t object and resets all its fields. + * All Common rings are allose attached (msgbuf_ring_t objects are allocated + * with DMA-able buffers). + * All dhd_dma_buf_t objects are also allocated here. + * + * As dhd_prot_attach is invoked prior to the pcie_shared object is read, any + * initialization of objects that requires information advertized by the dongle + * may not be performed here. + * E.g. the number of TxPost flowrings is not know at this point, neither do + * we know shich form of D2H DMA sync mechanism is advertized by the dongle, or + * whether the dongle supports DMA-ing of WR/RD indices for the H2D and/or D2H + * rings (common + flow). + * + * dhd_prot_init() is invoked after the bus layer has fetched the information + * advertized by the dongle in the pcie_shared_t. + */ +int +dhd_prot_attach(dhd_pub_t *dhd) +{ + osl_t *osh = dhd->osh; + dhd_prot_t *prot; + + /* FW going to DMA extended trap data, + * allocate buffer for the maximum extended trap data. + */ + uint32 trap_buf_len = BCMPCIE_EXT_TRAP_DATA_MAXLEN; + + /* Allocate prot structure */ + if (!(prot = (dhd_prot_t *)DHD_OS_PREALLOC(dhd, DHD_PREALLOC_PROT, + sizeof(dhd_prot_t)))) { + DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__)); + goto fail; + } + memset(prot, 0, sizeof(*prot)); + + prot->osh = osh; + dhd->prot = prot; + + /* DMAing ring completes supported? FALSE by default */ + dhd->dma_d2h_ring_upd_support = FALSE; + dhd->dma_h2d_ring_upd_support = FALSE; + dhd->dma_ring_upd_overwrite = FALSE; + + dhd->idma_inited = 0; + dhd->ifrm_inited = 0; + dhd->dar_inited = 0; + + /* Common Ring Allocations */ + + /* Ring 0: H2D Control Submission */ + if (dhd_prot_ring_attach(dhd, &prot->h2dring_ctrl_subn, "h2dctrl", + H2DRING_CTRL_SUB_MAX_ITEM, H2DRING_CTRL_SUB_ITEMSIZE, + BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT) != BCME_OK) { + DHD_ERROR(("%s: dhd_prot_ring_attach H2D Ctrl Submission failed\n", + __FUNCTION__)); + goto fail; + } + + /* Ring 1: H2D Receive Buffer Post */ + if (dhd_prot_ring_attach(dhd, &prot->h2dring_rxp_subn, "h2drxp", + H2DRING_RXPOST_MAX_ITEM, H2DRING_RXPOST_ITEMSIZE, + BCMPCIE_H2D_MSGRING_RXPOST_SUBMIT) != BCME_OK) { + DHD_ERROR(("%s: dhd_prot_ring_attach H2D RxPost failed\n", + __FUNCTION__)); + goto fail; + } + + /* Ring 2: D2H Control Completion */ + if (dhd_prot_ring_attach(dhd, &prot->d2hring_ctrl_cpln, "d2hctrl", + D2HRING_CTRL_CMPLT_MAX_ITEM, D2HRING_CTRL_CMPLT_ITEMSIZE, + BCMPCIE_D2H_MSGRING_CONTROL_COMPLETE) != BCME_OK) { + DHD_ERROR(("%s: dhd_prot_ring_attach D2H Ctrl Completion failed\n", + __FUNCTION__)); + goto fail; + } + + /* Ring 3: D2H Transmit Complete */ + if (dhd_prot_ring_attach(dhd, &prot->d2hring_tx_cpln, "d2htxcpl", + D2HRING_TXCMPLT_MAX_ITEM, D2HRING_TXCMPLT_ITEMSIZE, + BCMPCIE_D2H_MSGRING_TX_COMPLETE) != BCME_OK) { + DHD_ERROR(("%s: dhd_prot_ring_attach D2H Tx Completion failed\n", + __FUNCTION__)); + goto fail; + + } + + /* Ring 4: D2H Receive Complete */ + if (dhd_prot_ring_attach(dhd, &prot->d2hring_rx_cpln, "d2hrxcpl", + D2HRING_RXCMPLT_MAX_ITEM, D2HRING_RXCMPLT_ITEMSIZE, + BCMPCIE_D2H_MSGRING_RX_COMPLETE) != BCME_OK) { + DHD_ERROR(("%s: dhd_prot_ring_attach D2H Rx Completion failed\n", + __FUNCTION__)); + goto fail; + + } + + /* + * Max number of flowrings is not yet known. msgbuf_ring_t with DMA-able + * buffers for flowrings will be instantiated, in dhd_prot_init() . + * See dhd_prot_flowrings_pool_attach() + */ + /* ioctl response buffer */ + if (dhd_dma_buf_alloc(dhd, &prot->retbuf, IOCT_RETBUF_SIZE)) { + goto fail; + } + + /* IOCTL request buffer */ + if (dhd_dma_buf_alloc(dhd, &prot->ioctbuf, IOCT_RETBUF_SIZE)) { + goto fail; + } + + /* Host TS request buffer one buffer for now */ + if (dhd_dma_buf_alloc(dhd, &prot->hostts_req_buf, CTRLSUB_HOSTTS_MEESAGE_SIZE)) { + goto fail; + } + prot->hostts_req_buf_inuse = FALSE; + + /* Scratch buffer for dma rx offset */ +#ifdef BCM_HOST_BUF + if (dhd_dma_buf_alloc(dhd, &prot->d2h_dma_scratch_buf, + ROUNDUP(DMA_D2H_SCRATCH_BUF_LEN, 16) + DMA_HOST_BUFFER_LEN)) +#else + if (dhd_dma_buf_alloc(dhd, &prot->d2h_dma_scratch_buf, DMA_D2H_SCRATCH_BUF_LEN)) + +#endif /* BCM_HOST_BUF */ + { + goto fail; + } + + /* scratch buffer bus throughput measurement */ + if (dhd_dma_buf_alloc(dhd, &prot->host_bus_throughput_buf, DHD_BUS_TPUT_BUF_LEN)) { + goto fail; + } + +#ifdef DHD_RX_CHAINING + dhd_rxchain_reset(&prot->rxchain); +#endif // endif + + prot->pktid_ctrl_map = DHD_NATIVE_TO_PKTID_INIT(dhd, MAX_CTRL_PKTID); + if (prot->pktid_ctrl_map == NULL) { + goto fail; + } + + prot->pktid_rx_map = DHD_NATIVE_TO_PKTID_INIT(dhd, MAX_RX_PKTID); + if (prot->pktid_rx_map == NULL) + goto fail; + + prot->pktid_tx_map = DHD_NATIVE_TO_PKTID_INIT(dhd, MAX_TX_PKTID); + if (prot->pktid_tx_map == NULL) + goto fail; + +#ifdef IOCTLRESP_USE_CONSTMEM + prot->pktid_map_handle_ioctl = DHD_NATIVE_TO_PKTID_INIT(dhd, + DHD_FLOWRING_MAX_IOCTLRESPBUF_POST); + if (prot->pktid_map_handle_ioctl == NULL) { + goto fail; + } +#endif /* IOCTLRESP_USE_CONSTMEM */ + + /* Initialize the work queues to be used by the Load Balancing logic */ +#if defined(DHD_LB_TXC) + { + void *buffer; + buffer = MALLOC(dhd->osh, sizeof(void*) * DHD_LB_WORKQ_SZ); + if (buffer == NULL) { + DHD_ERROR(("%s: failed to allocate RXC work buffer\n", __FUNCTION__)); + goto fail; + } + bcm_workq_init(&prot->tx_compl_prod, &prot->tx_compl_cons, + buffer, DHD_LB_WORKQ_SZ); + prot->tx_compl_prod_sync = 0; + DHD_INFO(("%s: created tx_compl_workq <%p,%d>\n", + __FUNCTION__, buffer, DHD_LB_WORKQ_SZ)); + } +#endif /* DHD_LB_TXC */ + +#if defined(DHD_LB_RXC) + { + void *buffer; + buffer = MALLOC(dhd->osh, sizeof(void*) * DHD_LB_WORKQ_SZ); + if (buffer == NULL) { + DHD_ERROR(("%s: failed to allocate RXC work buffer\n", __FUNCTION__)); + goto fail; + } + bcm_workq_init(&prot->rx_compl_prod, &prot->rx_compl_cons, + buffer, DHD_LB_WORKQ_SZ); + prot->rx_compl_prod_sync = 0; + DHD_INFO(("%s: created rx_compl_workq <%p,%d>\n", + __FUNCTION__, buffer, DHD_LB_WORKQ_SZ)); + } +#endif /* DHD_LB_RXC */ + + /* Initialize trap buffer */ + if (dhd_dma_buf_alloc(dhd, &dhd->prot->fw_trap_buf, trap_buf_len)) { + DHD_ERROR(("%s: dhd_init_trap_buffer falied\n", __FUNCTION__)); + goto fail; + } + + return BCME_OK; + +fail: + + if (prot) { + /* Free up all allocated memories */ + dhd_prot_detach(dhd); + } + + return BCME_NOMEM; +} /* dhd_prot_attach */ + +static int +dhd_alloc_host_scbs(dhd_pub_t *dhd) +{ + int ret = BCME_OK; + sh_addr_t base_addr; + dhd_prot_t *prot = dhd->prot; + uint32 host_scb_size = 0; + + if (dhd->hscb_enable) { + /* read number of bytes to allocate from F/W */ + dhd_bus_cmn_readshared(dhd->bus, &host_scb_size, HOST_SCB_ADDR, 0); + if (host_scb_size) { + /* alloc array of host scbs */ + ret = dhd_dma_buf_alloc(dhd, &prot->host_scb_buf, host_scb_size); + /* write host scb address to F/W */ + if (ret == BCME_OK) { + dhd_base_addr_htolpa(&base_addr, prot->host_scb_buf.pa); + dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr), + HOST_SCB_ADDR, 0); + } else { + DHD_TRACE(("dhd_alloc_host_scbs: dhd_dma_buf_alloc error\n")); + } + } else { + DHD_TRACE(("dhd_alloc_host_scbs: host_scb_size is 0.\n")); + } + } else { + DHD_TRACE(("dhd_alloc_host_scbs: Host scb not supported in F/W.\n")); + } + + return ret; +} + +void +dhd_set_host_cap(dhd_pub_t *dhd) +{ + uint32 data = 0; + dhd_prot_t *prot = dhd->prot; + + if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6) { + if (dhd->h2d_phase_supported) { + data |= HOSTCAP_H2D_VALID_PHASE; + if (dhd->force_dongletrap_on_bad_h2d_phase) + data |= HOSTCAP_H2D_ENABLE_TRAP_ON_BADPHASE; + } + if (prot->host_ipc_version > prot->device_ipc_version) + prot->active_ipc_version = prot->device_ipc_version; + else + prot->active_ipc_version = prot->host_ipc_version; + + data |= prot->active_ipc_version; + + if (dhdpcie_bus_get_pcie_hostready_supported(dhd->bus)) { + DHD_INFO(("Advertise Hostready Capability\n")); + data |= HOSTCAP_H2D_ENABLE_HOSTRDY; + } + { + /* Disable DS altogether */ + data |= HOSTCAP_DS_NO_OOB_DW; + dhdpcie_bus_enab_pcie_dw(dhd->bus, DEVICE_WAKE_NONE); + } + + /* Indicate support for extended trap data */ + data |= HOSTCAP_EXTENDED_TRAP_DATA; + + /* Indicate support for TX status metadata */ + if (dhd->pcie_txs_metadata_enable != 0) + data |= HOSTCAP_TXSTATUS_METADATA; + + /* Enable fast delete ring in firmware if supported */ + if (dhd->fast_delete_ring_support) { + data |= HOSTCAP_FAST_DELETE_RING; + } + + if (dhdpcie_bus_get_pcie_idma_supported(dhd->bus)) { + DHD_ERROR(("IDMA inited\n")); + data |= HOSTCAP_H2D_IDMA; + dhd->idma_inited = TRUE; + } + + if (dhdpcie_bus_get_pcie_ifrm_supported(dhd->bus)) { + DHD_ERROR(("IFRM Inited\n")); + data |= HOSTCAP_H2D_IFRM; + dhd->ifrm_inited = TRUE; + dhd->dma_h2d_ring_upd_support = FALSE; + dhd_prot_dma_indx_free(dhd); + } + + if (dhdpcie_bus_get_pcie_dar_supported(dhd->bus)) { + DHD_ERROR(("DAR doorbell Use\n")); + data |= HOSTCAP_H2D_DAR; + dhd->dar_inited = TRUE; + } + + data |= HOSTCAP_UR_FW_NO_TRAP; + + if (dhd->hscb_enable) { + data |= HOSTCAP_HSCB; + } + +#ifdef EWP_EDL + if (dhd->dongle_edl_support) { + data |= HOSTCAP_EDL_RING; + DHD_ERROR(("Enable EDL host cap\n")); + } else { + DHD_ERROR(("DO NOT SET EDL host cap\n")); + } +#endif /* EWP_EDL */ + + DHD_INFO(("%s:Active Ver:%d, Host Ver:%d, FW Ver:%d\n", + __FUNCTION__, + prot->active_ipc_version, prot->host_ipc_version, + prot->device_ipc_version)); + + dhd_bus_cmn_writeshared(dhd->bus, &data, sizeof(uint32), HOST_API_VERSION, 0); + dhd_bus_cmn_writeshared(dhd->bus, &prot->fw_trap_buf.pa, + sizeof(prot->fw_trap_buf.pa), DNGL_TO_HOST_TRAP_ADDR, 0); + + } + +} + +/** + * dhd_prot_init - second stage of dhd_prot_attach. Now that the dongle has + * completed it's initialization of the pcie_shared structure, we may now fetch + * the dongle advertized features and adjust the protocol layer accordingly. + * + * dhd_prot_init() may be invoked again after a dhd_prot_reset(). + */ +int +dhd_prot_init(dhd_pub_t *dhd) +{ + sh_addr_t base_addr; + dhd_prot_t *prot = dhd->prot; + int ret = 0; + uint32 idmacontrol; + uint32 waitcount = 0; + +#ifdef WL_MONITOR + dhd->monitor_enable = FALSE; +#endif /* WL_MONITOR */ + + /** + * A user defined value can be assigned to global variable h2d_max_txpost via + * 1. DHD IOVAR h2d_max_txpost, before firmware download + * 2. module parameter h2d_max_txpost + * prot->h2d_max_txpost is assigned with H2DRING_TXPOST_MAX_ITEM, + * if user has not defined any buffers by one of the above methods. + */ + prot->h2d_max_txpost = (uint16)h2d_max_txpost; + + DHD_ERROR(("%s:%d: h2d_max_txpost = %d\n", __FUNCTION__, __LINE__, prot->h2d_max_txpost)); + + /* Read max rx packets supported by dongle */ + dhd_bus_cmn_readshared(dhd->bus, &prot->max_rxbufpost, MAX_HOST_RXBUFS, 0); + if (prot->max_rxbufpost == 0) { + /* This would happen if the dongle firmware is not */ + /* using the latest shared structure template */ + prot->max_rxbufpost = DEFAULT_RX_BUFFERS_TO_POST; + } + DHD_ERROR(("%s:%d: MAX_RXBUFPOST = %d\n", __FUNCTION__, __LINE__, prot->max_rxbufpost)); + + /* Initialize. bzero() would blow away the dma pointers. */ + prot->max_eventbufpost = DHD_FLOWRING_MAX_EVENTBUF_POST; + prot->max_ioctlrespbufpost = DHD_FLOWRING_MAX_IOCTLRESPBUF_POST; + prot->max_infobufpost = DHD_H2D_INFORING_MAX_BUF_POST; + prot->max_tsbufpost = DHD_MAX_TSBUF_POST; + + prot->cur_ioctlresp_bufs_posted = 0; + OSL_ATOMIC_INIT(dhd->osh, &prot->active_tx_count); + prot->data_seq_no = 0; + prot->ioctl_seq_no = 0; + prot->rxbufpost = 0; + prot->cur_event_bufs_posted = 0; + prot->ioctl_state = 0; + prot->curr_ioctl_cmd = 0; + prot->cur_ts_bufs_posted = 0; + prot->infobufpost = 0; + + prot->dmaxfer.srcmem.va = NULL; + prot->dmaxfer.dstmem.va = NULL; + prot->dmaxfer.in_progress = FALSE; + + prot->metadata_dbg = FALSE; + prot->rx_metadata_offset = 0; + prot->tx_metadata_offset = 0; + prot->txp_threshold = TXP_FLUSH_MAX_ITEMS_FLUSH_CNT; + + /* To catch any rollover issues fast, starting with higher ioctl_trans_id */ + prot->ioctl_trans_id = MAXBITVAL(NBITS(prot->ioctl_trans_id)) - BUFFER_BEFORE_ROLLOVER; + prot->ioctl_state = 0; + prot->ioctl_status = 0; + prot->ioctl_resplen = 0; + prot->ioctl_received = IOCTL_WAIT; + + /* Initialize Common MsgBuf Rings */ + + prot->device_ipc_version = dhd->bus->api.fw_rev; + prot->host_ipc_version = PCIE_SHARED_VERSION; + + /* Init the host API version */ + dhd_set_host_cap(dhd); + + /* alloc and configure scb host address for dongle */ + if ((ret = dhd_alloc_host_scbs(dhd))) { + return ret; + } + + /* Register the interrupt function upfront */ + /* remove corerev checks in data path */ + /* do this after host/fw negotiation for DAR */ + prot->mb_ring_fn = dhd_bus_get_mbintr_fn(dhd->bus); + prot->mb_2_ring_fn = dhd_bus_get_mbintr_2_fn(dhd->bus); + + dhd->bus->_dar_war = (dhd->bus->sih->buscorerev < 64) ? TRUE : FALSE; + + dhd_prot_ring_init(dhd, &prot->h2dring_ctrl_subn); + dhd_prot_ring_init(dhd, &prot->h2dring_rxp_subn); + dhd_prot_ring_init(dhd, &prot->d2hring_ctrl_cpln); + + /* Make it compatibile with pre-rev7 Firmware */ + if (prot->active_ipc_version < PCIE_SHARED_VERSION_7) { + prot->d2hring_tx_cpln.item_len = + D2HRING_TXCMPLT_ITEMSIZE_PREREV7; + prot->d2hring_rx_cpln.item_len = + D2HRING_RXCMPLT_ITEMSIZE_PREREV7; + } + dhd_prot_ring_init(dhd, &prot->d2hring_tx_cpln); + dhd_prot_ring_init(dhd, &prot->d2hring_rx_cpln); + + dhd_prot_d2h_sync_init(dhd); + + dhd_prot_h2d_sync_init(dhd); + + /* init the scratch buffer */ + dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_scratch_buf.pa); + dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr), + D2H_DMA_SCRATCH_BUF, 0); + dhd_bus_cmn_writeshared(dhd->bus, &prot->d2h_dma_scratch_buf.len, + sizeof(prot->d2h_dma_scratch_buf.len), D2H_DMA_SCRATCH_BUF_LEN, 0); + + /* If supported by the host, indicate the memory block + * for completion writes / submission reads to shared space + */ + if (dhd->dma_d2h_ring_upd_support) { + dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_indx_wr_buf.pa); + dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr), + D2H_DMA_INDX_WR_BUF, 0); + dhd_base_addr_htolpa(&base_addr, prot->h2d_dma_indx_rd_buf.pa); + dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr), + H2D_DMA_INDX_RD_BUF, 0); + } + + if (dhd->dma_h2d_ring_upd_support || IDMA_ENAB(dhd)) { + dhd_base_addr_htolpa(&base_addr, prot->h2d_dma_indx_wr_buf.pa); + dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr), + H2D_DMA_INDX_WR_BUF, 0); + dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_indx_rd_buf.pa); + dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr), + D2H_DMA_INDX_RD_BUF, 0); + } + + /* Signal to the dongle that common ring init is complete */ + dhd_bus_hostready(dhd->bus); + + /* + * If the DMA-able buffers for flowring needs to come from a specific + * contiguous memory region, then setup prot->flowrings_dma_buf here. + * dhd_prot_flowrings_pool_attach() will carve out DMA-able buffers from + * this contiguous memory region, for each of the flowrings. + */ + + /* Pre-allocate pool of msgbuf_ring for flowrings */ + if (dhd_prot_flowrings_pool_attach(dhd) != BCME_OK) { + return BCME_ERROR; + } + + /* If IFRM is enabled, wait for FW to setup the DMA channel */ + if (IFRM_ENAB(dhd)) { + dhd_base_addr_htolpa(&base_addr, prot->h2d_ifrm_indx_wr_buf.pa); + dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr), + H2D_IFRM_INDX_WR_BUF, 0); + } + + /* If IDMA is enabled and initied, wait for FW to setup the IDMA descriptors + * Waiting just before configuring doorbell + */ +#define IDMA_ENABLE_WAIT 10 + if (IDMA_ACTIVE(dhd)) { + /* wait for idma_en bit in IDMAcontrol register to be set */ + /* Loop till idma_en is not set */ + uint buscorerev = dhd->bus->sih->buscorerev; + idmacontrol = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + IDMAControl(buscorerev), 0, 0); + while (!(idmacontrol & PCIE_IDMA_MODE_EN(buscorerev)) && + (waitcount++ < IDMA_ENABLE_WAIT)) { + + DHD_ERROR(("iDMA not enabled yet,waiting 1 ms c=%d IDMAControl = %08x\n", + waitcount, idmacontrol)); + OSL_DELAY(1000); /* 1ms as its onetime only */ + idmacontrol = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + IDMAControl(buscorerev), 0, 0); + } + + if (waitcount < IDMA_ENABLE_WAIT) { + DHD_ERROR(("iDMA enabled PCIEControl = %08x\n", idmacontrol)); + } else { + DHD_ERROR(("Error: wait for iDMA timed out wait=%d IDMAControl = %08x\n", + waitcount, idmacontrol)); + return BCME_ERROR; + } + } + + /* Host should configure soft doorbells if needed ... here */ + + /* Post to dongle host configured soft doorbells */ + dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd); + + /* Post buffers for packet reception and ioctl/event responses */ + dhd_msgbuf_rxbuf_post(dhd, FALSE); /* alloc pkt ids */ + dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd); + dhd_msgbuf_rxbuf_post_event_bufs(dhd); + + prot->no_retry = FALSE; + prot->no_aggr = FALSE; + prot->fixed_rate = FALSE; + + /* + * Note that any communication with the Dongle should be added + * below this point. Any other host data structure initialiation that + * needs to be done prior to the DPC starts executing should be done + * befor this point. + * Because once we start sending H2D requests to Dongle, the Dongle + * respond immediately. So the DPC context to handle this + * D2H response could preempt the context in which dhd_prot_init is running. + * We want to ensure that all the Host part of dhd_prot_init is + * done before that. + */ + + /* See if info rings could be created, info rings should be created + * only if dongle does not support EDL + */ +#ifdef EWP_EDL + if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6 && !dhd->dongle_edl_support) +#else + if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6) +#endif /* EWP_EDL */ + { + if ((ret = dhd_prot_init_info_rings(dhd)) != BCME_OK) { + /* For now log and proceed, further clean up action maybe necessary + * when we have more clarity. + */ + DHD_ERROR(("%s Info rings couldn't be created: Err Code%d", + __FUNCTION__, ret)); + } + } + +#ifdef EWP_EDL + /* Create Enhanced Debug Lane rings (EDL) if dongle supports it */ + if (dhd->dongle_edl_support) { + if ((ret = dhd_prot_init_edl_rings(dhd)) != BCME_OK) { + DHD_ERROR(("%s EDL rings couldn't be created: Err Code%d", + __FUNCTION__, ret)); + } + } +#endif /* EWP_EDL */ + + return BCME_OK; +} /* dhd_prot_init */ + +/** + * dhd_prot_detach - PCIE FD protocol layer destructor. + * Unlink, frees allocated protocol memory (including dhd_prot) + */ +void dhd_prot_detach(dhd_pub_t *dhd) +{ + dhd_prot_t *prot = dhd->prot; + + /* Stop the protocol module */ + if (prot) { + + /* free up all DMA-able buffers allocated during prot attach/init */ + + dhd_dma_buf_free(dhd, &prot->d2h_dma_scratch_buf); + dhd_dma_buf_free(dhd, &prot->retbuf); + dhd_dma_buf_free(dhd, &prot->ioctbuf); + dhd_dma_buf_free(dhd, &prot->host_bus_throughput_buf); + dhd_dma_buf_free(dhd, &prot->hostts_req_buf); + dhd_dma_buf_free(dhd, &prot->fw_trap_buf); + dhd_dma_buf_free(dhd, &prot->host_scb_buf); + + /* DMA-able buffers for DMAing H2D/D2H WR/RD indices */ + dhd_dma_buf_free(dhd, &prot->h2d_dma_indx_wr_buf); + dhd_dma_buf_free(dhd, &prot->h2d_dma_indx_rd_buf); + dhd_dma_buf_free(dhd, &prot->d2h_dma_indx_wr_buf); + dhd_dma_buf_free(dhd, &prot->d2h_dma_indx_rd_buf); + + dhd_dma_buf_free(dhd, &prot->h2d_ifrm_indx_wr_buf); + + /* Common MsgBuf Rings */ + dhd_prot_ring_detach(dhd, &prot->h2dring_ctrl_subn); + dhd_prot_ring_detach(dhd, &prot->h2dring_rxp_subn); + dhd_prot_ring_detach(dhd, &prot->d2hring_ctrl_cpln); + dhd_prot_ring_detach(dhd, &prot->d2hring_tx_cpln); + dhd_prot_ring_detach(dhd, &prot->d2hring_rx_cpln); + + /* Detach each DMA-able buffer and free the pool of msgbuf_ring_t */ + dhd_prot_flowrings_pool_detach(dhd); + + /* detach info rings */ + dhd_prot_detach_info_rings(dhd); + +#ifdef EWP_EDL + dhd_prot_detach_edl_rings(dhd); +#endif // endif + + /* if IOCTLRESP_USE_CONSTMEM is defined IOCTL PKTs use pktid_map_handle_ioctl + * handler and PKT memory is allocated using alloc_ioctl_return_buffer(), Otherwise + * they will be part of pktid_ctrl_map handler and PKT memory is allocated using + * PKTGET_STATIC (if DHD_USE_STATIC_CTRLBUF is defined) OR PKGET. + * Similarly for freeing PKT buffers DHD_NATIVE_TO_PKTID_FINI will be used + * which calls PKTFREE_STATIC (if DHD_USE_STATIC_CTRLBUF is defined) OR PKFREE. + * Else if IOCTLRESP_USE_CONSTMEM is defined IOCTL PKTs will be freed using + * DHD_NATIVE_TO_PKTID_FINI_IOCTL which calls free_ioctl_return_buffer. + */ + DHD_NATIVE_TO_PKTID_FINI(dhd, prot->pktid_ctrl_map); + DHD_NATIVE_TO_PKTID_FINI(dhd, prot->pktid_rx_map); + DHD_NATIVE_TO_PKTID_FINI(dhd, prot->pktid_tx_map); +#ifdef IOCTLRESP_USE_CONSTMEM + DHD_NATIVE_TO_PKTID_FINI_IOCTL(dhd, prot->pktid_map_handle_ioctl); +#endif // endif + +#if defined(DHD_LB_TXC) + if (prot->tx_compl_prod.buffer) + MFREE(dhd->osh, prot->tx_compl_prod.buffer, + sizeof(void*) * DHD_LB_WORKQ_SZ); +#endif /* DHD_LB_TXC */ +#if defined(DHD_LB_RXC) + if (prot->rx_compl_prod.buffer) + MFREE(dhd->osh, prot->rx_compl_prod.buffer, + sizeof(void*) * DHD_LB_WORKQ_SZ); +#endif /* DHD_LB_RXC */ + + DHD_OS_PREFREE(dhd, dhd->prot, sizeof(dhd_prot_t)); + + dhd->prot = NULL; + } +} /* dhd_prot_detach */ + +/** + * dhd_prot_reset - Reset the protocol layer without freeing any objects. + * This may be invoked to soft reboot the dongle, without having to + * detach and attach the entire protocol layer. + * + * After dhd_prot_reset(), dhd_prot_init() may be invoked + * without going througha dhd_prot_attach() phase. + */ +void +dhd_prot_reset(dhd_pub_t *dhd) +{ + struct dhd_prot *prot = dhd->prot; + + DHD_TRACE(("%s\n", __FUNCTION__)); + + if (prot == NULL) { + return; + } + + dhd_prot_flowrings_pool_reset(dhd); + + /* Reset Common MsgBuf Rings */ + dhd_prot_ring_reset(dhd, &prot->h2dring_ctrl_subn); + dhd_prot_ring_reset(dhd, &prot->h2dring_rxp_subn); + dhd_prot_ring_reset(dhd, &prot->d2hring_ctrl_cpln); + dhd_prot_ring_reset(dhd, &prot->d2hring_tx_cpln); + dhd_prot_ring_reset(dhd, &prot->d2hring_rx_cpln); + + /* Reset info rings */ + if (prot->h2dring_info_subn) { + dhd_prot_ring_reset(dhd, prot->h2dring_info_subn); + } + + if (prot->d2hring_info_cpln) { + dhd_prot_ring_reset(dhd, prot->d2hring_info_cpln); + } +#ifdef EWP_EDL + if (prot->d2hring_edl) { + dhd_prot_ring_reset(dhd, prot->d2hring_edl); + } +#endif /* EWP_EDL */ + + /* Reset all DMA-able buffers allocated during prot attach */ + dhd_dma_buf_reset(dhd, &prot->d2h_dma_scratch_buf); + dhd_dma_buf_reset(dhd, &prot->retbuf); + dhd_dma_buf_reset(dhd, &prot->ioctbuf); + dhd_dma_buf_reset(dhd, &prot->host_bus_throughput_buf); + dhd_dma_buf_reset(dhd, &prot->hostts_req_buf); + dhd_dma_buf_reset(dhd, &prot->fw_trap_buf); + dhd_dma_buf_reset(dhd, &prot->host_scb_buf); + + dhd_dma_buf_reset(dhd, &prot->h2d_ifrm_indx_wr_buf); + + /* Rest all DMA-able buffers for DMAing H2D/D2H WR/RD indices */ + dhd_dma_buf_reset(dhd, &prot->h2d_dma_indx_rd_buf); + dhd_dma_buf_reset(dhd, &prot->h2d_dma_indx_wr_buf); + dhd_dma_buf_reset(dhd, &prot->d2h_dma_indx_rd_buf); + dhd_dma_buf_reset(dhd, &prot->d2h_dma_indx_wr_buf); + + prot->rx_metadata_offset = 0; + prot->tx_metadata_offset = 0; + + prot->rxbufpost = 0; + prot->cur_event_bufs_posted = 0; + prot->cur_ioctlresp_bufs_posted = 0; + + OSL_ATOMIC_INIT(dhd->osh, &prot->active_tx_count); + prot->data_seq_no = 0; + prot->ioctl_seq_no = 0; + prot->ioctl_state = 0; + prot->curr_ioctl_cmd = 0; + prot->ioctl_received = IOCTL_WAIT; + /* To catch any rollover issues fast, starting with higher ioctl_trans_id */ + prot->ioctl_trans_id = MAXBITVAL(NBITS(prot->ioctl_trans_id)) - BUFFER_BEFORE_ROLLOVER; + + /* dhd_flow_rings_init is located at dhd_bus_start, + * so when stopping bus, flowrings shall be deleted + */ + if (dhd->flow_rings_inited) { + dhd_flow_rings_deinit(dhd); + } + + /* Reset PKTID map */ + DHD_NATIVE_TO_PKTID_RESET(dhd, prot->pktid_ctrl_map); + DHD_NATIVE_TO_PKTID_RESET(dhd, prot->pktid_rx_map); + DHD_NATIVE_TO_PKTID_RESET(dhd, prot->pktid_tx_map); +#ifdef IOCTLRESP_USE_CONSTMEM + DHD_NATIVE_TO_PKTID_RESET_IOCTL(dhd, prot->pktid_map_handle_ioctl); +#endif /* IOCTLRESP_USE_CONSTMEM */ +#ifdef DMAMAP_STATS + dhd->dma_stats.txdata = dhd->dma_stats.txdata_sz = 0; + dhd->dma_stats.rxdata = dhd->dma_stats.rxdata_sz = 0; +#ifndef IOCTLRESP_USE_CONSTMEM + dhd->dma_stats.ioctl_rx = dhd->dma_stats.ioctl_rx_sz = 0; +#endif /* IOCTLRESP_USE_CONSTMEM */ + dhd->dma_stats.event_rx = dhd->dma_stats.event_rx_sz = 0; + dhd->dma_stats.info_rx = dhd->dma_stats.info_rx_sz = 0; + dhd->dma_stats.tsbuf_rx = dhd->dma_stats.tsbuf_rx_sz = 0; +#endif /* DMAMAP_STATS */ +} /* dhd_prot_reset */ + +#if defined(DHD_LB_RXP) +#define DHD_LB_DISPATCH_RX_PROCESS(dhdp) dhd_lb_dispatch_rx_process(dhdp) +#else /* !DHD_LB_RXP */ +#define DHD_LB_DISPATCH_RX_PROCESS(dhdp) do { /* noop */ } while (0) +#endif /* !DHD_LB_RXP */ + +#if defined(DHD_LB_RXC) +#define DHD_LB_DISPATCH_RX_COMPL(dhdp) dhd_lb_dispatch_rx_compl(dhdp) +#else /* !DHD_LB_RXC */ +#define DHD_LB_DISPATCH_RX_COMPL(dhdp) do { /* noop */ } while (0) +#endif /* !DHD_LB_RXC */ + +#if defined(DHD_LB_TXC) +#define DHD_LB_DISPATCH_TX_COMPL(dhdp) dhd_lb_dispatch_tx_compl(dhdp) +#else /* !DHD_LB_TXC */ +#define DHD_LB_DISPATCH_TX_COMPL(dhdp) do { /* noop */ } while (0) +#endif /* !DHD_LB_TXC */ + +#if defined(DHD_LB) +/* DHD load balancing: deferral of work to another online CPU */ +/* DHD_LB_TXC DHD_LB_RXC DHD_LB_RXP dispatchers, in dhd_linux.c */ +extern void dhd_lb_tx_compl_dispatch(dhd_pub_t *dhdp); +extern void dhd_lb_rx_compl_dispatch(dhd_pub_t *dhdp); +extern void dhd_lb_rx_napi_dispatch(dhd_pub_t *dhdp); +extern void dhd_lb_rx_pkt_enqueue(dhd_pub_t *dhdp, void *pkt, int ifidx); + +#if defined(DHD_LB_RXP) +/** + * dhd_lb_dispatch_rx_process - load balance by dispatch Rx processing work + * to other CPU cores + */ +static INLINE void +dhd_lb_dispatch_rx_process(dhd_pub_t *dhdp) +{ + dhd_lb_rx_napi_dispatch(dhdp); /* dispatch rx_process_napi */ +} +#endif /* DHD_LB_RXP */ + +#if defined(DHD_LB_TXC) +/** + * dhd_lb_dispatch_tx_compl - load balance by dispatch Tx complition work + * to other CPU cores + */ +static INLINE void +dhd_lb_dispatch_tx_compl(dhd_pub_t *dhdp, uint16 ring_idx) +{ + bcm_workq_prod_sync(&dhdp->prot->tx_compl_prod); /* flush WR index */ + dhd_lb_tx_compl_dispatch(dhdp); /* dispatch tx_compl_tasklet */ +} + +/** + * DHD load balanced tx completion tasklet handler, that will perform the + * freeing of packets on the selected CPU. Packet pointers are delivered to + * this tasklet via the tx complete workq. + */ +void +dhd_lb_tx_compl_handler(unsigned long data) +{ + int elem_ix; + void *pkt, **elem; + dmaaddr_t pa; + uint32 pa_len; + dhd_pub_t *dhd = (dhd_pub_t *)data; + dhd_prot_t *prot = dhd->prot; + bcm_workq_t *workq = &prot->tx_compl_cons; + uint32 count = 0; + + int curr_cpu; + curr_cpu = get_cpu(); + put_cpu(); + + DHD_LB_STATS_TXC_PERCPU_CNT_INCR(dhd); + + while (1) { + elem_ix = bcm_ring_cons(WORKQ_RING(workq), DHD_LB_WORKQ_SZ); + + if (elem_ix == BCM_RING_EMPTY) { + break; + } + + elem = WORKQ_ELEMENT(void *, workq, elem_ix); + pkt = *elem; + + DHD_INFO(("%s: tx_compl_cons pkt<%p>\n", __FUNCTION__, pkt)); + + OSL_PREFETCH(PKTTAG(pkt)); + OSL_PREFETCH(pkt); + + pa = DHD_PKTTAG_PA((dhd_pkttag_fr_t *)PKTTAG(pkt)); + pa_len = DHD_PKTTAG_PA_LEN((dhd_pkttag_fr_t *)PKTTAG(pkt)); + + DMA_UNMAP(dhd->osh, pa, pa_len, DMA_RX, 0, 0); +#if defined(BCMPCIE) + dhd_txcomplete(dhd, pkt, true); +#endif // endif + + PKTFREE(dhd->osh, pkt, TRUE); + count++; + } + + /* smp_wmb(); */ + bcm_workq_cons_sync(workq); + DHD_LB_STATS_UPDATE_TXC_HISTO(dhd, count); +} +#endif /* DHD_LB_TXC */ + +#if defined(DHD_LB_RXC) + +/** + * dhd_lb_dispatch_rx_compl - load balance by dispatch rx complition work + * to other CPU cores + */ +static INLINE void +dhd_lb_dispatch_rx_compl(dhd_pub_t *dhdp) +{ + dhd_prot_t *prot = dhdp->prot; + /* Schedule the takslet only if we have to */ + if (prot->rxbufpost <= (prot->max_rxbufpost - RXBUFPOST_THRESHOLD)) { + /* flush WR index */ + bcm_workq_prod_sync(&dhdp->prot->rx_compl_prod); + dhd_lb_rx_compl_dispatch(dhdp); /* dispatch rx_compl_tasklet */ + } +} + +void +dhd_lb_rx_compl_handler(unsigned long data) +{ + dhd_pub_t *dhd = (dhd_pub_t *)data; + bcm_workq_t *workq = &dhd->prot->rx_compl_cons; + + DHD_LB_STATS_RXC_PERCPU_CNT_INCR(dhd); + + dhd_msgbuf_rxbuf_post(dhd, TRUE); /* re-use pktids */ + bcm_workq_cons_sync(workq); +} +#endif /* DHD_LB_RXC */ +#endif /* DHD_LB */ + +void +dhd_prot_rx_dataoffset(dhd_pub_t *dhd, uint32 rx_offset) +{ + dhd_prot_t *prot = dhd->prot; + prot->rx_dataoffset = rx_offset; +} + +static int +dhd_check_create_info_rings(dhd_pub_t *dhd) +{ + dhd_prot_t *prot = dhd->prot; + int ret = BCME_ERROR; + uint16 ringid; + + { + /* dongle may increase max_submission_rings so keep + * ringid at end of dynamic rings + */ + ringid = dhd->bus->max_tx_flowrings + + (dhd->bus->max_submission_rings - dhd->bus->max_tx_flowrings) + + BCMPCIE_H2D_COMMON_MSGRINGS; + } + + if (prot->h2dring_info_subn && prot->d2hring_info_cpln) { + return BCME_OK; /* dhd_prot_init rentry after a dhd_prot_reset */ + } + + if (prot->h2dring_info_subn == NULL) { + prot->h2dring_info_subn = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t)); + + if (prot->h2dring_info_subn == NULL) { + DHD_ERROR(("%s: couldn't alloc memory for h2dring_info_subn\n", + __FUNCTION__)); + return BCME_NOMEM; + } + + DHD_INFO(("%s: about to create debug submit ring\n", __FUNCTION__)); + ret = dhd_prot_ring_attach(dhd, prot->h2dring_info_subn, "h2dinfo", + H2DRING_DYNAMIC_INFO_MAX_ITEM, H2DRING_INFO_BUFPOST_ITEMSIZE, + ringid); + if (ret != BCME_OK) { + DHD_ERROR(("%s: couldn't alloc resources for dbg submit ring\n", + __FUNCTION__)); + goto err; + } + } + + if (prot->d2hring_info_cpln == NULL) { + prot->d2hring_info_cpln = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t)); + + if (prot->d2hring_info_cpln == NULL) { + DHD_ERROR(("%s: couldn't alloc memory for h2dring_info_subn\n", + __FUNCTION__)); + return BCME_NOMEM; + } + + /* create the debug info completion ring next to debug info submit ring + * ringid = id next to debug info submit ring + */ + ringid = ringid + 1; + + DHD_INFO(("%s: about to create debug cpl ring\n", __FUNCTION__)); + ret = dhd_prot_ring_attach(dhd, prot->d2hring_info_cpln, "d2hinfo", + D2HRING_DYNAMIC_INFO_MAX_ITEM, D2HRING_INFO_BUFCMPLT_ITEMSIZE, + ringid); + if (ret != BCME_OK) { + DHD_ERROR(("%s: couldn't alloc resources for dbg cpl ring\n", + __FUNCTION__)); + dhd_prot_ring_detach(dhd, prot->h2dring_info_subn); + goto err; + } + } + + return ret; +err: + MFREE(prot->osh, prot->h2dring_info_subn, sizeof(msgbuf_ring_t)); + prot->h2dring_info_subn = NULL; + + if (prot->d2hring_info_cpln) { + MFREE(prot->osh, prot->d2hring_info_cpln, sizeof(msgbuf_ring_t)); + prot->d2hring_info_cpln = NULL; + } + return ret; +} /* dhd_check_create_info_rings */ + +int +dhd_prot_init_info_rings(dhd_pub_t *dhd) +{ + dhd_prot_t *prot = dhd->prot; + int ret = BCME_OK; + + if ((ret = dhd_check_create_info_rings(dhd)) != BCME_OK) { + DHD_ERROR(("%s: info rings aren't created! \n", + __FUNCTION__)); + return ret; + } + + if ((prot->d2hring_info_cpln->inited) || (prot->d2hring_info_cpln->create_pending)) { + DHD_INFO(("Info completion ring was created!\n")); + return ret; + } + + DHD_TRACE(("trying to send create d2h info ring: id %d\n", prot->d2hring_info_cpln->idx)); + ret = dhd_send_d2h_ringcreate(dhd, prot->d2hring_info_cpln, + BCMPCIE_D2H_RING_TYPE_DBGBUF_CPL, DHD_D2H_DBGRING_REQ_PKTID); + if (ret != BCME_OK) + return ret; + + prot->d2hring_info_cpln->seqnum = D2H_EPOCH_INIT_VAL; + + DHD_TRACE(("trying to send create h2d info ring id %d\n", prot->h2dring_info_subn->idx)); + prot->h2dring_info_subn->n_completion_ids = 1; + prot->h2dring_info_subn->compeltion_ring_ids[0] = prot->d2hring_info_cpln->idx; + + ret = dhd_send_h2d_ringcreate(dhd, prot->h2dring_info_subn, + BCMPCIE_H2D_RING_TYPE_DBGBUF_SUBMIT, DHD_H2D_DBGRING_REQ_PKTID); + + /* Note that there is no way to delete d2h or h2d ring deletion incase either fails, + * so can not cleanup if one ring was created while the other failed + */ + return ret; +} /* dhd_prot_init_info_rings */ + +static void +dhd_prot_detach_info_rings(dhd_pub_t *dhd) +{ + if (dhd->prot->h2dring_info_subn) { + dhd_prot_ring_detach(dhd, dhd->prot->h2dring_info_subn); + MFREE(dhd->prot->osh, dhd->prot->h2dring_info_subn, sizeof(msgbuf_ring_t)); + dhd->prot->h2dring_info_subn = NULL; + } + if (dhd->prot->d2hring_info_cpln) { + dhd_prot_ring_detach(dhd, dhd->prot->d2hring_info_cpln); + MFREE(dhd->prot->osh, dhd->prot->d2hring_info_cpln, sizeof(msgbuf_ring_t)); + dhd->prot->d2hring_info_cpln = NULL; + } +} + +#ifdef EWP_EDL +static int +dhd_check_create_edl_rings(dhd_pub_t *dhd) +{ + dhd_prot_t *prot = dhd->prot; + int ret = BCME_ERROR; + uint16 ringid; + + { + /* dongle may increase max_submission_rings so keep + * ringid at end of dynamic rings (re-use info ring cpl ring id) + */ + ringid = dhd->bus->max_tx_flowrings + + (dhd->bus->max_submission_rings - dhd->bus->max_tx_flowrings) + + BCMPCIE_H2D_COMMON_MSGRINGS + 1; + } + + if (prot->d2hring_edl) { + return BCME_OK; /* dhd_prot_init rentry after a dhd_prot_reset */ + } + + if (prot->d2hring_edl == NULL) { + prot->d2hring_edl = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t)); + + if (prot->d2hring_edl == NULL) { + DHD_ERROR(("%s: couldn't alloc memory for d2hring_edl\n", + __FUNCTION__)); + return BCME_NOMEM; + } + + DHD_ERROR(("%s: about to create EDL ring, ringid: %u \n", __FUNCTION__, + ringid)); + ret = dhd_prot_ring_attach(dhd, prot->d2hring_edl, "d2hring_edl", + D2HRING_EDL_MAX_ITEM, D2HRING_EDL_ITEMSIZE, + ringid); + if (ret != BCME_OK) { + DHD_ERROR(("%s: couldn't alloc resources for EDL ring\n", + __FUNCTION__)); + goto err; + } + } + + return ret; +err: + MFREE(prot->osh, prot->d2hring_edl, sizeof(msgbuf_ring_t)); + prot->d2hring_edl = NULL; + + return ret; +} /* dhd_check_create_btlog_rings */ + +int +dhd_prot_init_edl_rings(dhd_pub_t *dhd) +{ + dhd_prot_t *prot = dhd->prot; + int ret = BCME_ERROR; + + if ((ret = dhd_check_create_edl_rings(dhd)) != BCME_OK) { + DHD_ERROR(("%s: EDL rings aren't created! \n", + __FUNCTION__)); + return ret; + } + + if ((prot->d2hring_edl->inited) || (prot->d2hring_edl->create_pending)) { + DHD_INFO(("EDL completion ring was created!\n")); + return ret; + } + + DHD_ERROR(("trying to send create d2h edl ring: idx %d\n", prot->d2hring_edl->idx)); + ret = dhd_send_d2h_ringcreate(dhd, prot->d2hring_edl, + BCMPCIE_D2H_RING_TYPE_EDL, DHD_D2H_DBGRING_REQ_PKTID); + if (ret != BCME_OK) + return ret; + + prot->d2hring_edl->seqnum = D2H_EPOCH_INIT_VAL; + + return BCME_OK; +} /* dhd_prot_init_btlog_rings */ + +static void +dhd_prot_detach_edl_rings(dhd_pub_t *dhd) +{ + if (dhd->prot->d2hring_edl) { + dhd_prot_ring_detach(dhd, dhd->prot->d2hring_edl); + MFREE(dhd->prot->osh, dhd->prot->d2hring_edl, sizeof(msgbuf_ring_t)); + dhd->prot->d2hring_edl = NULL; + } +} +#endif /* EWP_EDL */ + +/** + * Initialize protocol: sync w/dongle state. + * Sets dongle media info (iswl, drv_version, mac address). + */ +int dhd_sync_with_dongle(dhd_pub_t *dhd) +{ + int ret = 0; + wlc_rev_info_t revinfo; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT); + + /* Post ts buffer after shim layer is attached */ + ret = dhd_msgbuf_rxbuf_post_ts_bufs(dhd); + +#ifdef DHD_FW_COREDUMP + /* Check the memdump capability */ + dhd_get_memdump_info(dhd); +#endif /* DHD_FW_COREDUMP */ +#ifdef BCMASSERT_LOG + dhd_get_assert_info(dhd); +#endif /* BCMASSERT_LOG */ + + /* Get the device rev info */ + memset(&revinfo, 0, sizeof(revinfo)); + ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_REVINFO, &revinfo, sizeof(revinfo), FALSE, 0); + if (ret < 0) { + DHD_ERROR(("%s: GET revinfo FAILED\n", __FUNCTION__)); + goto done; + } + DHD_ERROR(("%s: GET_REVINFO device 0x%x, vendor 0x%x, chipnum 0x%x\n", __FUNCTION__, + revinfo.deviceid, revinfo.vendorid, revinfo.chipnum)); + + DHD_SSSR_DUMP_INIT(dhd); + + dhd_process_cid_mac(dhd, TRUE); + ret = dhd_preinit_ioctls(dhd); + dhd_process_cid_mac(dhd, FALSE); + +#if defined(DHD_H2D_LOG_TIME_SYNC) + if (FW_SUPPORTED(dhd, h2dlogts)) { + dhd->dhd_rte_time_sync_ms = DHD_H2D_LOG_TIME_STAMP_MATCH; + dhd->bus->dhd_rte_time_sync_count = OSL_SYSUPTIME_US(); + /* This is during initialization. */ + dhd_h2d_log_time_sync(dhd); + } else { + dhd->dhd_rte_time_sync_ms = 0; + } +#endif /* DHD_H2D_LOG_TIME_SYNC */ + /* Always assumes wl for now */ + dhd->iswl = TRUE; +done: + return ret; +} /* dhd_sync_with_dongle */ + +#define DHD_DBG_SHOW_METADATA 0 + +#if DHD_DBG_SHOW_METADATA +static void BCMFASTPATH +dhd_prot_print_metadata(dhd_pub_t *dhd, void *ptr, int len) +{ + uint8 tlv_t; + uint8 tlv_l; + uint8 *tlv_v = (uint8 *)ptr; + + if (len <= BCMPCIE_D2H_METADATA_HDRLEN) + return; + + len -= BCMPCIE_D2H_METADATA_HDRLEN; + tlv_v += BCMPCIE_D2H_METADATA_HDRLEN; + + while (len > TLV_HDR_LEN) { + tlv_t = tlv_v[TLV_TAG_OFF]; + tlv_l = tlv_v[TLV_LEN_OFF]; + + len -= TLV_HDR_LEN; + tlv_v += TLV_HDR_LEN; + if (len < tlv_l) + break; + if ((tlv_t == 0) || (tlv_t == WLFC_CTL_TYPE_FILLER)) + break; + + switch (tlv_t) { + case WLFC_CTL_TYPE_TXSTATUS: { + uint32 txs; + memcpy(&txs, tlv_v, sizeof(uint32)); + if (tlv_l < (sizeof(wl_txstatus_additional_info_t) + sizeof(uint32))) { + printf("METADATA TX_STATUS: %08x\n", txs); + } else { + wl_txstatus_additional_info_t tx_add_info; + memcpy(&tx_add_info, tlv_v + sizeof(uint32), + sizeof(wl_txstatus_additional_info_t)); + printf("METADATA TX_STATUS: %08x WLFCTS[%04x | %08x - %08x - %08x]" + " rate = %08x tries = %d - %d\n", txs, + tx_add_info.seq, tx_add_info.entry_ts, + tx_add_info.enq_ts, tx_add_info.last_ts, + tx_add_info.rspec, tx_add_info.rts_cnt, + tx_add_info.tx_cnt); + } + } break; + + case WLFC_CTL_TYPE_RSSI: { + if (tlv_l == 1) + printf("METADATA RX_RSSI: rssi = %d\n", *tlv_v); + else + printf("METADATA RX_RSSI[%04x]: rssi = %d snr = %d\n", + (*(tlv_v + 3) << 8) | *(tlv_v + 2), + (int8)(*tlv_v), *(tlv_v + 1)); + } break; + + case WLFC_CTL_TYPE_FIFO_CREDITBACK: + bcm_print_bytes("METADATA FIFO_CREDITBACK", tlv_v, tlv_l); + break; + + case WLFC_CTL_TYPE_TX_ENTRY_STAMP: + bcm_print_bytes("METADATA TX_ENTRY", tlv_v, tlv_l); + break; + + case WLFC_CTL_TYPE_RX_STAMP: { + struct { + uint32 rspec; + uint32 bus_time; + uint32 wlan_time; + } rx_tmstamp; + memcpy(&rx_tmstamp, tlv_v, sizeof(rx_tmstamp)); + printf("METADATA RX TIMESTMAP: WLFCTS[%08x - %08x] rate = %08x\n", + rx_tmstamp.wlan_time, rx_tmstamp.bus_time, rx_tmstamp.rspec); + } break; + + case WLFC_CTL_TYPE_TRANS_ID: + bcm_print_bytes("METADATA TRANS_ID", tlv_v, tlv_l); + break; + + case WLFC_CTL_TYPE_COMP_TXSTATUS: + bcm_print_bytes("METADATA COMP_TXSTATUS", tlv_v, tlv_l); + break; + + default: + bcm_print_bytes("METADATA UNKNOWN", tlv_v, tlv_l); + break; + } + + len -= tlv_l; + tlv_v += tlv_l; + } +} +#endif /* DHD_DBG_SHOW_METADATA */ + +static INLINE void BCMFASTPATH +dhd_prot_packet_free(dhd_pub_t *dhd, void *pkt, uint8 pkttype, bool send) +{ + if (pkt) { + if (pkttype == PKTTYPE_IOCTL_RX || + pkttype == PKTTYPE_EVENT_RX || + pkttype == PKTTYPE_INFO_RX || + pkttype == PKTTYPE_TSBUF_RX) { +#ifdef DHD_USE_STATIC_CTRLBUF + PKTFREE_STATIC(dhd->osh, pkt, send); +#else + PKTFREE(dhd->osh, pkt, send); +#endif /* DHD_USE_STATIC_CTRLBUF */ + } else { + PKTFREE(dhd->osh, pkt, send); + } + } +} + +/** + * dhd_prot_packet_get should be called only for items having pktid_ctrl_map handle + * and all the bottom most functions like dhd_pktid_map_free hold separate DHD_PKTID_LOCK + * to ensure thread safety, so no need to hold any locks for this function + */ +static INLINE void * BCMFASTPATH +dhd_prot_packet_get(dhd_pub_t *dhd, uint32 pktid, uint8 pkttype, bool free_pktid) +{ + void *PKTBUF; + dmaaddr_t pa; + uint32 len; + void *dmah; + void *secdma; + +#ifdef DHD_PCIE_PKTID + if (free_pktid) { + PKTBUF = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_ctrl_map, + pktid, pa, len, dmah, secdma, pkttype); + } else { + PKTBUF = DHD_PKTID_TO_NATIVE_RSV(dhd, dhd->prot->pktid_ctrl_map, + pktid, pa, len, dmah, secdma, pkttype); + } +#else + PKTBUF = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_ctrl_map, pktid, pa, + len, dmah, secdma, pkttype); +#endif /* DHD_PCIE_PKTID */ + if (PKTBUF) { + { + if (SECURE_DMA_ENAB(dhd->osh)) + SECURE_DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah, + secdma, 0); + else + DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah); +#ifdef DMAMAP_STATS + switch (pkttype) { +#ifndef IOCTLRESP_USE_CONSTMEM + case PKTTYPE_IOCTL_RX: + dhd->dma_stats.ioctl_rx--; + dhd->dma_stats.ioctl_rx_sz -= len; + break; +#endif /* IOCTLRESP_USE_CONSTMEM */ + case PKTTYPE_EVENT_RX: + dhd->dma_stats.event_rx--; + dhd->dma_stats.event_rx_sz -= len; + break; + case PKTTYPE_INFO_RX: + dhd->dma_stats.info_rx--; + dhd->dma_stats.info_rx_sz -= len; + break; + case PKTTYPE_TSBUF_RX: + dhd->dma_stats.tsbuf_rx--; + dhd->dma_stats.tsbuf_rx_sz -= len; + break; + } +#endif /* DMAMAP_STATS */ + } + } + + return PKTBUF; +} + +#ifdef IOCTLRESP_USE_CONSTMEM +static INLINE void BCMFASTPATH +dhd_prot_ioctl_ret_buffer_get(dhd_pub_t *dhd, uint32 pktid, dhd_dma_buf_t *retbuf) +{ + memset(retbuf, 0, sizeof(dhd_dma_buf_t)); + retbuf->va = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_map_handle_ioctl, pktid, + retbuf->pa, retbuf->len, retbuf->dmah, retbuf->secdma, PKTTYPE_IOCTL_RX); + + return; +} +#endif // endif + +static void BCMFASTPATH +dhd_msgbuf_rxbuf_post(dhd_pub_t *dhd, bool use_rsv_pktid) +{ + dhd_prot_t *prot = dhd->prot; + int16 fillbufs; + uint16 cnt = 256; + int retcount = 0; + + fillbufs = prot->max_rxbufpost - prot->rxbufpost; + while (fillbufs >= RX_BUF_BURST) { + cnt--; + if (cnt == 0) { + /* find a better way to reschedule rx buf post if space not available */ + DHD_ERROR(("h2d rx post ring not available to post host buffers \n")); + DHD_ERROR(("Current posted host buf count %d \n", prot->rxbufpost)); + break; + } + + /* Post in a burst of 32 buffers at a time */ + fillbufs = MIN(fillbufs, RX_BUF_BURST); + + /* Post buffers */ + retcount = dhd_prot_rxbuf_post(dhd, fillbufs, use_rsv_pktid); + + if (retcount >= 0) { + prot->rxbufpost += (uint16)retcount; +#ifdef DHD_LB_RXC + /* dhd_prot_rxbuf_post returns the number of buffers posted */ + DHD_LB_STATS_UPDATE_RXC_HISTO(dhd, retcount); +#endif /* DHD_LB_RXC */ + /* how many more to post */ + fillbufs = prot->max_rxbufpost - prot->rxbufpost; + } else { + /* Make sure we don't run loop any further */ + fillbufs = 0; + } + } +} + +/** Post 'count' no of rx buffers to dongle */ +static int BCMFASTPATH +dhd_prot_rxbuf_post(dhd_pub_t *dhd, uint16 count, bool use_rsv_pktid) +{ + void *p, **pktbuf; + uint16 pktsz = DHD_FLOWRING_RX_BUFPOST_PKTSZ; + uint8 *rxbuf_post_tmp; + host_rxbuf_post_t *rxbuf_post; + void *msg_start; + dmaaddr_t pa, *pktbuf_pa; + uint32 *pktlen; + uint16 i = 0, alloced = 0; + unsigned long flags; + uint32 pktid; + dhd_prot_t *prot = dhd->prot; + msgbuf_ring_t *ring = &prot->h2dring_rxp_subn; + void *lcl_buf; + uint16 lcl_buf_size; + +#ifdef WL_MONITOR + if (dhd->monitor_enable) { + pktsz = DHD_MAX_MON_FLOWRING_RX_BUFPOST_PKTSZ; + } +#endif /* WL_MONITOR */ + /* allocate a local buffer to store pkt buffer va, pa and length */ + lcl_buf_size = (sizeof(void *) + sizeof(dmaaddr_t) + sizeof(uint32)) * + RX_BUF_BURST; + lcl_buf = MALLOC(dhd->osh, lcl_buf_size); + if (!lcl_buf) { + DHD_ERROR(("%s: local scratch buffer allocation failed\n", __FUNCTION__)); + return 0; + } + pktbuf = lcl_buf; + pktbuf_pa = (dmaaddr_t *)((uint8 *)pktbuf + sizeof(void *) * RX_BUF_BURST); + pktlen = (uint32 *)((uint8 *)pktbuf_pa + sizeof(dmaaddr_t) * RX_BUF_BURST); + + for (i = 0; i < count; i++) { + if ((p = PKTGET(dhd->osh, pktsz, FALSE)) == NULL) { + DHD_ERROR(("%s:%d: PKTGET for rxbuf failed\n", __FUNCTION__, __LINE__)); + dhd->rx_pktgetfail++; + break; + } + + pktlen[i] = PKTLEN(dhd->osh, p); + if (SECURE_DMA_ENAB(dhd->osh)) { + pa = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen[i], + DMA_RX, p, 0, ring->dma_buf.secdma, 0); + } +#ifndef BCM_SECURE_DMA + else + pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen[i], DMA_RX, p, 0); +#endif /* #ifndef BCM_SECURE_DMA */ + + if (PHYSADDRISZERO(pa)) { + PKTFREE(dhd->osh, p, FALSE); + DHD_ERROR(("Invalid phyaddr 0\n")); + ASSERT(0); + break; + } +#ifdef DMAMAP_STATS + dhd->dma_stats.rxdata++; + dhd->dma_stats.rxdata_sz += pktlen[i]; +#endif /* DMAMAP_STATS */ + + PKTPULL(dhd->osh, p, prot->rx_metadata_offset); + pktlen[i] = PKTLEN(dhd->osh, p); + pktbuf[i] = p; + pktbuf_pa[i] = pa; + } + + /* only post what we have */ + count = i; + + /* grab the ring lock to allocate pktid and post on ring */ + DHD_RING_LOCK(ring->ring_lock, flags); + + /* Claim space for exactly 'count' no of messages, for mitigation purpose */ + msg_start = (void *) + dhd_prot_alloc_ring_space(dhd, ring, count, &alloced, TRUE); + if (msg_start == NULL) { + DHD_INFO(("%s:%d: Rxbufpost Msgbuf Not available\n", __FUNCTION__, __LINE__)); + DHD_RING_UNLOCK(ring->ring_lock, flags); + goto cleanup; + } + /* if msg_start != NULL, we should have alloced space for atleast 1 item */ + ASSERT(alloced > 0); + + rxbuf_post_tmp = (uint8*)msg_start; + + for (i = 0; i < alloced; i++) { + rxbuf_post = (host_rxbuf_post_t *)rxbuf_post_tmp; + p = pktbuf[i]; + pa = pktbuf_pa[i]; + +#if defined(DHD_LB_RXC) + if (use_rsv_pktid == TRUE) { + bcm_workq_t *workq = &prot->rx_compl_cons; + int elem_ix = bcm_ring_cons(WORKQ_RING(workq), DHD_LB_WORKQ_SZ); + + if (elem_ix == BCM_RING_EMPTY) { + DHD_INFO(("%s rx_compl_cons ring is empty\n", __FUNCTION__)); + pktid = DHD_PKTID_INVALID; + goto alloc_pkt_id; + } else { + uint32 *elem = WORKQ_ELEMENT(uint32, workq, elem_ix); + pktid = *elem; + } + + rxbuf_post->cmn_hdr.request_id = htol32(pktid); + + /* Now populate the previous locker with valid information */ + if (pktid != DHD_PKTID_INVALID) { + DHD_NATIVE_TO_PKTID_SAVE(dhd, dhd->prot->pktid_rx_map, + p, pktid, pa, pktlen[i], DMA_RX, NULL, NULL, + PKTTYPE_DATA_RX); + } + } else +#endif /* ! DHD_LB_RXC */ + { +#if defined(DHD_LB_RXC) +alloc_pkt_id: +#endif /* DHD_LB_RXC */ + pktid = DHD_NATIVE_TO_PKTID(dhd, dhd->prot->pktid_rx_map, p, pa, + pktlen[i], DMA_RX, NULL, ring->dma_buf.secdma, PKTTYPE_DATA_RX); +#if defined(DHD_PCIE_PKTID) + if (pktid == DHD_PKTID_INVALID) { + break; + } +#endif /* DHD_PCIE_PKTID */ + } + + /* Common msg header */ + rxbuf_post->cmn_hdr.msg_type = MSG_TYPE_RXBUF_POST; + rxbuf_post->cmn_hdr.if_id = 0; + rxbuf_post->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO; + rxbuf_post->cmn_hdr.flags = ring->current_phase; + ring->seqnum++; + rxbuf_post->data_buf_len = htol16((uint16)pktlen[i]); + rxbuf_post->data_buf_addr.high_addr = htol32(PHYSADDRHI(pa)); + rxbuf_post->data_buf_addr.low_addr = + htol32(PHYSADDRLO(pa) + prot->rx_metadata_offset); + + if (prot->rx_metadata_offset) { + rxbuf_post->metadata_buf_len = prot->rx_metadata_offset; + rxbuf_post->metadata_buf_addr.high_addr = htol32(PHYSADDRHI(pa)); + rxbuf_post->metadata_buf_addr.low_addr = htol32(PHYSADDRLO(pa)); + } else { + rxbuf_post->metadata_buf_len = 0; + rxbuf_post->metadata_buf_addr.high_addr = 0; + rxbuf_post->metadata_buf_addr.low_addr = 0; + } + +#ifdef DHD_PKTID_AUDIT_RING + DHD_PKTID_AUDIT(dhd, prot->pktid_rx_map, pktid, DHD_DUPLICATE_ALLOC); +#endif /* DHD_PKTID_AUDIT_RING */ + + rxbuf_post->cmn_hdr.request_id = htol32(pktid); + + /* Move rxbuf_post_tmp to next item */ + rxbuf_post_tmp = rxbuf_post_tmp + ring->item_len; + +#ifdef DHD_LBUF_AUDIT + PKTAUDIT(dhd->osh, p); +#endif // endif + } + + if (i < alloced) { + if (ring->wr < (alloced - i)) + ring->wr = ring->max_items - (alloced - i); + else + ring->wr -= (alloced - i); + + if (ring->wr == 0) { + DHD_INFO(("%s: flipping the phase now\n", ring->name)); + ring->current_phase = ring->current_phase ? + 0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT; + } + + alloced = i; + } + + /* update ring's WR index and ring doorbell to dongle */ + if (alloced > 0) { + dhd_prot_ring_write_complete(dhd, ring, msg_start, alloced); + } + + DHD_RING_UNLOCK(ring->ring_lock, flags); + +cleanup: + for (i = alloced; i < count; i++) { + p = pktbuf[i]; + pa = pktbuf_pa[i]; + + if (SECURE_DMA_ENAB(dhd->osh)) + SECURE_DMA_UNMAP(dhd->osh, pa, pktlen[i], DMA_RX, 0, + DHD_DMAH_NULL, ring->dma_buf.secdma, 0); + else + DMA_UNMAP(dhd->osh, pa, pktlen[i], DMA_RX, 0, DHD_DMAH_NULL); + PKTFREE(dhd->osh, p, FALSE); + } + + MFREE(dhd->osh, lcl_buf, lcl_buf_size); + + return alloced; +} /* dhd_prot_rxbufpost */ + +static int +dhd_prot_infobufpost(dhd_pub_t *dhd, msgbuf_ring_t *ring) +{ + unsigned long flags; + uint32 pktid; + dhd_prot_t *prot = dhd->prot; + uint16 alloced = 0; + uint16 pktsz = DHD_INFOBUF_RX_BUFPOST_PKTSZ; + uint32 pktlen; + info_buf_post_msg_t *infobuf_post; + uint8 *infobuf_post_tmp; + void *p; + void* msg_start; + uint8 i = 0; + dmaaddr_t pa; + int16 count = 0; + + if (ring == NULL) + return 0; + + if (ring->inited != TRUE) + return 0; + if (ring == dhd->prot->h2dring_info_subn) { + if (prot->max_infobufpost == 0) + return 0; + + count = prot->max_infobufpost - prot->infobufpost; + } + else { + DHD_ERROR(("Unknown ring\n")); + return 0; + } + + if (count <= 0) { + DHD_INFO(("%s: Cannot post more than max info resp buffers\n", + __FUNCTION__)); + return 0; + } + + /* grab the ring lock to allocate pktid and post on ring */ + DHD_RING_LOCK(ring->ring_lock, flags); + + /* Claim space for exactly 'count' no of messages, for mitigation purpose */ + msg_start = (void *) dhd_prot_alloc_ring_space(dhd, ring, count, &alloced, FALSE); + + if (msg_start == NULL) { + DHD_INFO(("%s:%d: infobufpost Msgbuf Not available\n", __FUNCTION__, __LINE__)); + DHD_RING_UNLOCK(ring->ring_lock, flags); + return -1; + } + + /* if msg_start != NULL, we should have alloced space for atleast 1 item */ + ASSERT(alloced > 0); + + infobuf_post_tmp = (uint8*) msg_start; + + /* loop through each allocated message in the host ring */ + for (i = 0; i < alloced; i++) { + infobuf_post = (info_buf_post_msg_t *) infobuf_post_tmp; + /* Create a rx buffer */ +#ifdef DHD_USE_STATIC_CTRLBUF + p = PKTGET_STATIC(dhd->osh, pktsz, FALSE); +#else + p = PKTGET(dhd->osh, pktsz, FALSE); +#endif /* DHD_USE_STATIC_CTRLBUF */ + if (p == NULL) { + DHD_ERROR(("%s:%d: PKTGET for infobuf failed\n", __FUNCTION__, __LINE__)); + dhd->rx_pktgetfail++; + break; + } + pktlen = PKTLEN(dhd->osh, p); + if (SECURE_DMA_ENAB(dhd->osh)) { + pa = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, + DMA_RX, p, 0, ring->dma_buf.secdma, 0); + } +#ifndef BCM_SECURE_DMA + else + pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, DMA_RX, p, 0); +#endif /* #ifndef BCM_SECURE_DMA */ + if (PHYSADDRISZERO(pa)) { + if (SECURE_DMA_ENAB(dhd->osh)) { + SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL, + ring->dma_buf.secdma, 0); + } + else + DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL); +#ifdef DHD_USE_STATIC_CTRLBUF + PKTFREE_STATIC(dhd->osh, p, FALSE); +#else + PKTFREE(dhd->osh, p, FALSE); +#endif /* DHD_USE_STATIC_CTRLBUF */ + DHD_ERROR(("Invalid phyaddr 0\n")); + ASSERT(0); + break; + } +#ifdef DMAMAP_STATS + dhd->dma_stats.info_rx++; + dhd->dma_stats.info_rx_sz += pktlen; +#endif /* DMAMAP_STATS */ + pktlen = PKTLEN(dhd->osh, p); + + /* Common msg header */ + infobuf_post->cmn_hdr.msg_type = MSG_TYPE_INFO_BUF_POST; + infobuf_post->cmn_hdr.if_id = 0; + infobuf_post->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO; + infobuf_post->cmn_hdr.flags = ring->current_phase; + ring->seqnum++; + + pktid = DHD_NATIVE_TO_PKTID(dhd, dhd->prot->pktid_ctrl_map, p, pa, + pktlen, DMA_RX, NULL, ring->dma_buf.secdma, PKTTYPE_INFO_RX); + +#if defined(DHD_PCIE_PKTID) + if (pktid == DHD_PKTID_INVALID) { + if (SECURE_DMA_ENAB(dhd->osh)) { + SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, 0, + ring->dma_buf.secdma, 0); + } else + DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, 0); + +#ifdef DHD_USE_STATIC_CTRLBUF + PKTFREE_STATIC(dhd->osh, p, FALSE); +#else + PKTFREE(dhd->osh, p, FALSE); +#endif /* DHD_USE_STATIC_CTRLBUF */ + DHD_ERROR_RLMT(("%s: Pktid pool depleted.\n", __FUNCTION__)); + break; + } +#endif /* DHD_PCIE_PKTID */ + + infobuf_post->host_buf_len = htol16((uint16)pktlen); + infobuf_post->host_buf_addr.high_addr = htol32(PHYSADDRHI(pa)); + infobuf_post->host_buf_addr.low_addr = htol32(PHYSADDRLO(pa)); + +#ifdef DHD_PKTID_AUDIT_RING + DHD_PKTID_AUDIT(dhd, prot->pktid_ctrl_map, pktid, DHD_DUPLICATE_ALLOC); +#endif /* DHD_PKTID_AUDIT_RING */ + + DHD_INFO(("ID %d, low_addr 0x%08x, high_addr 0x%08x\n", + infobuf_post->cmn_hdr.request_id, infobuf_post->host_buf_addr.low_addr, + infobuf_post->host_buf_addr.high_addr)); + + infobuf_post->cmn_hdr.request_id = htol32(pktid); + /* Move rxbuf_post_tmp to next item */ + infobuf_post_tmp = infobuf_post_tmp + ring->item_len; +#ifdef DHD_LBUF_AUDIT + PKTAUDIT(dhd->osh, p); +#endif // endif + } + + if (i < alloced) { + if (ring->wr < (alloced - i)) + ring->wr = ring->max_items - (alloced - i); + else + ring->wr -= (alloced - i); + + alloced = i; + if (alloced && ring->wr == 0) { + DHD_INFO(("%s: flipping the phase now\n", ring->name)); + ring->current_phase = ring->current_phase ? + 0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT; + } + } + + /* Update the write pointer in TCM & ring bell */ + if (alloced > 0) { + if (ring == dhd->prot->h2dring_info_subn) { + prot->infobufpost += alloced; + } + dhd_prot_ring_write_complete(dhd, ring, msg_start, alloced); + } + + DHD_RING_UNLOCK(ring->ring_lock, flags); + + return alloced; +} /* dhd_prot_infobufpost */ + +#ifdef IOCTLRESP_USE_CONSTMEM +static int +alloc_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf) +{ + int err; + memset(retbuf, 0, sizeof(dhd_dma_buf_t)); + + if ((err = dhd_dma_buf_alloc(dhd, retbuf, IOCT_RETBUF_SIZE)) != BCME_OK) { + DHD_ERROR(("%s: dhd_dma_buf_alloc err %d\n", __FUNCTION__, err)); + ASSERT(0); + return BCME_NOMEM; + } + + return BCME_OK; +} + +static void +free_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf) +{ + /* retbuf (declared on stack) not fully populated ... */ + if (retbuf->va) { + uint32 dma_pad; + dma_pad = (IOCT_RETBUF_SIZE % DHD_DMA_PAD) ? DHD_DMA_PAD : 0; + retbuf->len = IOCT_RETBUF_SIZE; + retbuf->_alloced = retbuf->len + dma_pad; + } + + dhd_dma_buf_free(dhd, retbuf); + return; +} +#endif /* IOCTLRESP_USE_CONSTMEM */ + +static int +dhd_prot_rxbufpost_ctrl(dhd_pub_t *dhd, uint8 msg_type) +{ + void *p; + uint16 pktsz; + ioctl_resp_evt_buf_post_msg_t *rxbuf_post; + dmaaddr_t pa; + uint32 pktlen; + dhd_prot_t *prot = dhd->prot; + uint16 alloced = 0; + unsigned long flags; + dhd_dma_buf_t retbuf; + void *dmah = NULL; + uint32 pktid; + void *map_handle; + msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn; + bool non_ioctl_resp_buf = 0; + dhd_pkttype_t buf_type; + + if (dhd->busstate == DHD_BUS_DOWN) { + DHD_ERROR(("%s: bus is already down.\n", __FUNCTION__)); + return -1; + } + memset(&retbuf, 0, sizeof(dhd_dma_buf_t)); + + if (msg_type == MSG_TYPE_IOCTLRESP_BUF_POST) + buf_type = PKTTYPE_IOCTL_RX; + else if (msg_type == MSG_TYPE_EVENT_BUF_POST) + buf_type = PKTTYPE_EVENT_RX; + else if (msg_type == MSG_TYPE_TIMSTAMP_BUFPOST) + buf_type = PKTTYPE_TSBUF_RX; + else { + DHD_ERROR(("invalid message type to be posted to Ctrl ring %d\n", msg_type)); + return -1; + } + + if ((msg_type == MSG_TYPE_EVENT_BUF_POST) || (msg_type == MSG_TYPE_TIMSTAMP_BUFPOST)) + non_ioctl_resp_buf = TRUE; + else + non_ioctl_resp_buf = FALSE; + + if (non_ioctl_resp_buf) { + /* Allocate packet for not ioctl resp buffer post */ + pktsz = DHD_FLOWRING_RX_BUFPOST_PKTSZ; + } else { + /* Allocate packet for ctrl/ioctl buffer post */ + pktsz = DHD_FLOWRING_IOCTL_BUFPOST_PKTSZ; + } + +#ifdef IOCTLRESP_USE_CONSTMEM + if (!non_ioctl_resp_buf) { + if (alloc_ioctl_return_buffer(dhd, &retbuf) != BCME_OK) { + DHD_ERROR(("Could not allocate IOCTL response buffer\n")); + return -1; + } + ASSERT(retbuf.len == IOCT_RETBUF_SIZE); + p = retbuf.va; + pktlen = retbuf.len; + pa = retbuf.pa; + dmah = retbuf.dmah; + } else +#endif /* IOCTLRESP_USE_CONSTMEM */ + { +#ifdef DHD_USE_STATIC_CTRLBUF + p = PKTGET_STATIC(dhd->osh, pktsz, FALSE); +#else + p = PKTGET(dhd->osh, pktsz, FALSE); +#endif /* DHD_USE_STATIC_CTRLBUF */ + if (p == NULL) { + DHD_ERROR(("%s:%d: PKTGET for %s buf failed\n", + __FUNCTION__, __LINE__, non_ioctl_resp_buf ? + "EVENT" : "IOCTL RESP")); + dhd->rx_pktgetfail++; + return -1; + } + + pktlen = PKTLEN(dhd->osh, p); + + if (SECURE_DMA_ENAB(dhd->osh)) { + pa = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, + DMA_RX, p, 0, ring->dma_buf.secdma, 0); + } +#ifndef BCM_SECURE_DMA + else + pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, DMA_RX, p, 0); +#endif /* #ifndef BCM_SECURE_DMA */ + + if (PHYSADDRISZERO(pa)) { + DHD_ERROR(("Invalid physaddr 0\n")); + ASSERT(0); + goto free_pkt_return; + } + +#ifdef DMAMAP_STATS + switch (buf_type) { +#ifndef IOCTLRESP_USE_CONSTMEM + case PKTTYPE_IOCTL_RX: + dhd->dma_stats.ioctl_rx++; + dhd->dma_stats.ioctl_rx_sz += pktlen; + break; +#endif /* !IOCTLRESP_USE_CONSTMEM */ + case PKTTYPE_EVENT_RX: + dhd->dma_stats.event_rx++; + dhd->dma_stats.event_rx_sz += pktlen; + break; + case PKTTYPE_TSBUF_RX: + dhd->dma_stats.tsbuf_rx++; + dhd->dma_stats.tsbuf_rx_sz += pktlen; + break; + default: + break; + } +#endif /* DMAMAP_STATS */ + + } + + /* grab the ring lock to allocate pktid and post on ring */ + DHD_RING_LOCK(ring->ring_lock, flags); + + rxbuf_post = (ioctl_resp_evt_buf_post_msg_t *) + dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE); + + if (rxbuf_post == NULL) { + DHD_RING_UNLOCK(ring->ring_lock, flags); + DHD_ERROR(("%s:%d: Ctrl submit Msgbuf Not available to post buffer \n", + __FUNCTION__, __LINE__)); + +#ifdef IOCTLRESP_USE_CONSTMEM + if (non_ioctl_resp_buf) +#endif /* IOCTLRESP_USE_CONSTMEM */ + { + if (SECURE_DMA_ENAB(dhd->osh)) { + SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL, + ring->dma_buf.secdma, 0); + } else { + DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL); + } + } + goto free_pkt_return; + } + + /* CMN msg header */ + rxbuf_post->cmn_hdr.msg_type = msg_type; + +#ifdef IOCTLRESP_USE_CONSTMEM + if (!non_ioctl_resp_buf) { + map_handle = dhd->prot->pktid_map_handle_ioctl; + pktid = DHD_NATIVE_TO_PKTID(dhd, map_handle, p, pa, pktlen, DMA_RX, dmah, + ring->dma_buf.secdma, buf_type); + } else +#endif /* IOCTLRESP_USE_CONSTMEM */ + { + map_handle = dhd->prot->pktid_ctrl_map; + pktid = DHD_NATIVE_TO_PKTID(dhd, map_handle, + p, pa, pktlen, DMA_RX, dmah, ring->dma_buf.secdma, + buf_type); + } + + if (pktid == DHD_PKTID_INVALID) { + if (ring->wr == 0) { + ring->wr = ring->max_items - 1; + } else { + ring->wr--; + if (ring->wr == 0) { + ring->current_phase = ring->current_phase ? 0 : + BCMPCIE_CMNHDR_PHASE_BIT_INIT; + } + } + DHD_RING_UNLOCK(ring->ring_lock, flags); + DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL); + DHD_ERROR_RLMT(("%s: Pktid pool depleted.\n", __FUNCTION__)); + goto free_pkt_return; + } + +#ifdef DHD_PKTID_AUDIT_RING + DHD_PKTID_AUDIT(dhd, map_handle, pktid, DHD_DUPLICATE_ALLOC); +#endif /* DHD_PKTID_AUDIT_RING */ + + rxbuf_post->cmn_hdr.request_id = htol32(pktid); + rxbuf_post->cmn_hdr.if_id = 0; + rxbuf_post->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO; + ring->seqnum++; + rxbuf_post->cmn_hdr.flags = ring->current_phase; + +#if defined(DHD_PCIE_PKTID) + if (rxbuf_post->cmn_hdr.request_id == DHD_PKTID_INVALID) { + if (ring->wr == 0) { + ring->wr = ring->max_items - 1; + } else { + if (ring->wr == 0) { + ring->current_phase = ring->current_phase ? 0 : + BCMPCIE_CMNHDR_PHASE_BIT_INIT; + } + } + DHD_RING_UNLOCK(ring->ring_lock, flags); +#ifdef IOCTLRESP_USE_CONSTMEM + if (non_ioctl_resp_buf) +#endif /* IOCTLRESP_USE_CONSTMEM */ + { + if (SECURE_DMA_ENAB(dhd->osh)) { + SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL, + ring->dma_buf.secdma, 0); + } else + DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL); + } + goto free_pkt_return; + } +#endif /* DHD_PCIE_PKTID */ + +#ifndef IOCTLRESP_USE_CONSTMEM + rxbuf_post->host_buf_len = htol16((uint16)PKTLEN(dhd->osh, p)); +#else + rxbuf_post->host_buf_len = htol16((uint16)pktlen); +#endif /* IOCTLRESP_USE_CONSTMEM */ + rxbuf_post->host_buf_addr.high_addr = htol32(PHYSADDRHI(pa)); + rxbuf_post->host_buf_addr.low_addr = htol32(PHYSADDRLO(pa)); + +#ifdef DHD_LBUF_AUDIT + if (non_ioctl_resp_buf) + PKTAUDIT(dhd->osh, p); +#endif // endif + + /* update ring's WR index and ring doorbell to dongle */ + dhd_prot_ring_write_complete(dhd, ring, rxbuf_post, 1); + + DHD_RING_UNLOCK(ring->ring_lock, flags); + + return 1; + +free_pkt_return: + if (!non_ioctl_resp_buf) { +#ifdef IOCTLRESP_USE_CONSTMEM + free_ioctl_return_buffer(dhd, &retbuf); +#else + dhd_prot_packet_free(dhd, p, buf_type, FALSE); +#endif /* IOCTLRESP_USE_CONSTMEM */ + } else { + dhd_prot_packet_free(dhd, p, buf_type, FALSE); + } + + return -1; +} /* dhd_prot_rxbufpost_ctrl */ + +static uint16 +dhd_msgbuf_rxbuf_post_ctrlpath(dhd_pub_t *dhd, uint8 msg_type, uint32 max_to_post) +{ + uint32 i = 0; + int32 ret_val; + + DHD_INFO(("max to post %d, event %d \n", max_to_post, msg_type)); + + if (dhd->busstate == DHD_BUS_DOWN) { + DHD_ERROR(("%s: bus is already down.\n", __FUNCTION__)); + return 0; + } + + while (i < max_to_post) { + ret_val = dhd_prot_rxbufpost_ctrl(dhd, msg_type); + if (ret_val < 0) + break; + i++; + } + DHD_INFO(("posted %d buffers of type %d\n", i, msg_type)); + return (uint16)i; +} + +static void +dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd_pub_t *dhd) +{ + dhd_prot_t *prot = dhd->prot; + int max_to_post; + + DHD_INFO(("ioctl resp buf post\n")); + max_to_post = prot->max_ioctlrespbufpost - prot->cur_ioctlresp_bufs_posted; + if (max_to_post <= 0) { + DHD_INFO(("%s: Cannot post more than max IOCTL resp buffers\n", + __FUNCTION__)); + return; + } + prot->cur_ioctlresp_bufs_posted += dhd_msgbuf_rxbuf_post_ctrlpath(dhd, + MSG_TYPE_IOCTLRESP_BUF_POST, max_to_post); +} + +static void +dhd_msgbuf_rxbuf_post_event_bufs(dhd_pub_t *dhd) +{ + dhd_prot_t *prot = dhd->prot; + int max_to_post; + + max_to_post = prot->max_eventbufpost - prot->cur_event_bufs_posted; + if (max_to_post <= 0) { + DHD_ERROR(("%s: Cannot post more than max event buffers\n", + __FUNCTION__)); + return; + } + prot->cur_event_bufs_posted += dhd_msgbuf_rxbuf_post_ctrlpath(dhd, + MSG_TYPE_EVENT_BUF_POST, max_to_post); +} + +static int +dhd_msgbuf_rxbuf_post_ts_bufs(dhd_pub_t *dhd) +{ + return 0; +} + +bool BCMFASTPATH +dhd_prot_process_msgbuf_infocpl(dhd_pub_t *dhd, uint bound) +{ + dhd_prot_t *prot = dhd->prot; + bool more = TRUE; + uint n = 0; + msgbuf_ring_t *ring = prot->d2hring_info_cpln; + unsigned long flags; + + if (ring == NULL) + return FALSE; + if (ring->inited != TRUE) + return FALSE; + + /* Process all the messages - DTOH direction */ + while (!dhd_is_device_removed(dhd)) { + uint8 *msg_addr; + uint32 msg_len; + + if (dhd->hang_was_sent) { + more = FALSE; + break; + } + +#ifdef DHD_MAP_LOGGING + if (dhd->smmu_fault_occurred) { + more = FALSE; + break; + } +#endif /* DHD_MAP_LOGGING */ + + DHD_RING_LOCK(ring->ring_lock, flags); + /* Get the message from ring */ + msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len); + DHD_RING_UNLOCK(ring->ring_lock, flags); + if (msg_addr == NULL) { + more = FALSE; + break; + } + + /* Prefetch data to populate the cache */ + OSL_PREFETCH(msg_addr); + + if (dhd_prot_process_msgtype(dhd, ring, msg_addr, msg_len) != BCME_OK) { + DHD_ERROR(("%s: Error at process rxpl msgbuf of len %d\n", + __FUNCTION__, msg_len)); + } + + /* Update read pointer */ + dhd_prot_upd_read_idx(dhd, ring); + + /* After batch processing, check RX bound */ + n += msg_len / ring->item_len; + if (n >= bound) { + break; + } + } + + return more; +} + +#ifdef EWP_EDL +bool +dhd_prot_process_msgbuf_edl(dhd_pub_t *dhd) +{ + dhd_prot_t *prot = dhd->prot; + msgbuf_ring_t *ring = prot->d2hring_edl; + unsigned long flags = 0; + uint32 items = 0; + uint16 rd = 0; + uint16 depth = 0; + + if (ring == NULL) + return FALSE; + if (ring->inited != TRUE) + return FALSE; + if (ring->item_len == 0) { + DHD_ERROR(("%s: Bad ring ! ringidx %d, item_len %d \n", + __FUNCTION__, ring->idx, ring->item_len)); + return FALSE; + } + + if (dhd->hang_was_sent) { + return FALSE; + } + + /* in this DPC context just check if wr index has moved + * and schedule deferred context to actually process the + * work items. + */ + /* update the write index */ + DHD_RING_LOCK(ring->ring_lock, flags); + if (dhd->dma_d2h_ring_upd_support) { + /* DMAing write/read indices supported */ + ring->wr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx); + } else { + dhd_bus_cmn_readshared(dhd->bus, &ring->wr, RING_WR_UPD, ring->idx); + } + rd = ring->rd; + DHD_RING_UNLOCK(ring->ring_lock, flags); + + depth = ring->max_items; + /* check for avail space, in number of ring items */ + items = READ_AVAIL_SPACE(ring->wr, rd, depth); + if (items == 0) { + /* no work items in edl ring */ + return FALSE; + } + if (items > ring->max_items) { + DHD_ERROR(("\r\n======================= \r\n")); + DHD_ERROR(("%s(): ring %p, ring->name %s, ring->max_items %d, items %d \r\n", + __FUNCTION__, ring, ring->name, ring->max_items, items)); + DHD_ERROR(("wr: %d, rd: %d, depth: %d \r\n", + ring->wr, ring->rd, depth)); + DHD_ERROR(("dhd->busstate %d bus->wait_for_d3_ack %d \r\n", + dhd->busstate, dhd->bus->wait_for_d3_ack)); + DHD_ERROR(("\r\n======================= \r\n")); +#ifdef DHD_FW_COREDUMP + if (dhd->memdump_enabled) { + /* collect core dump */ + dhd->memdump_type = DUMP_TYPE_RESUMED_ON_INVALID_RING_RDWR; + dhd_bus_mem_dump(dhd); + + } +#endif /* DHD_FW_COREDUMP */ + dhd_schedule_reset(dhd); + + return FALSE; + } + + if (items > D2HRING_EDL_WATERMARK) { + DHD_ERROR_RLMT(("%s: WARNING! EDL watermark hit, num items=%u \n", + __FUNCTION__, items)); + } + + dhd_schedule_logtrace(dhd->info); + + return FALSE; +} + +/* This is called either from work queue context of 'event_log_dispatcher_work' or +* from the kthread context of dhd_logtrace_thread +*/ +int +dhd_prot_process_edl_complete(dhd_pub_t *dhd, void *evt_decode_data) +{ + dhd_prot_t *prot = NULL; + msgbuf_ring_t *ring = NULL; + int err = 0; + unsigned long flags = 0; + cmn_msg_hdr_t *msg = NULL; + uint8 *msg_addr = NULL; + uint32 max_items_to_process = 0, n = 0; + uint32 num_items = 0, new_items = 0; + uint16 depth = 0; + volatile uint16 wr = 0; + + if (!dhd || !dhd->prot) + return 0; + + prot = dhd->prot; + ring = prot->d2hring_edl; + if (!ring || !evt_decode_data) { + return 0; + } + + if (dhd->hang_was_sent) { + return FALSE; + } + + DHD_RING_LOCK(ring->ring_lock, flags); + ring->curr_rd = ring->rd; + wr = ring->wr; + depth = ring->max_items; + /* check for avail space, in number of ring items + * Note, that this will only give the # of items + * from rd to wr if wr>=rd, or from rd to ring end + * if wr < rd. So in the latter case strictly speaking + * not all the items are read. But this is OK, because + * these will be processed in the next doorbell as rd + * would have wrapped around. Processing in the next + * doorbell is acceptable since EDL only contains debug data + */ + num_items = READ_AVAIL_SPACE(wr, ring->rd, depth); + DHD_RING_UNLOCK(ring->ring_lock, flags); + + if (num_items == 0) { + /* no work items in edl ring */ + return 0; + } + + DHD_INFO(("%s: EDL work items [%u] available \n", + __FUNCTION__, num_items)); + + /* if space is available, calculate address to be read */ + msg_addr = (char*)ring->dma_buf.va + (ring->rd * ring->item_len); + n = max_items_to_process = MIN(num_items, DHD_EVENT_LOGTRACE_BOUND); + + /* Prefetch data to populate the cache */ + OSL_PREFETCH(msg_addr); + + while (n > 0) { + msg = (cmn_msg_hdr_t *)msg_addr; + /* wait for DMA of work item to complete */ + if ((err = dhd_prot_d2h_sync_edl(dhd, ring, msg)) != BCME_OK) { + DHD_ERROR(("%s: Error waiting for DMA to cmpl in EDL ring; err = %d\n", + __FUNCTION__, err)); + } + /* + * Update the curr_rd to the current index in the ring, from where + * the work item is fetched. This way if the fetched work item + * fails in LIVELOCK, we can print the exact read index in the ring + * that shows up the corrupted work item. + */ + if ((ring->curr_rd + 1) >= ring->max_items) { + ring->curr_rd = 0; + } else { + ring->curr_rd += 1; + } + + if (err != BCME_OK) { + return 0; + } + + /* process the edl work item, i.e, the event log */ + err = dhd_event_logtrace_process_edl(dhd, msg_addr, evt_decode_data); + + /* memset the read work item space to zero to avoid + * any accidental matching of seqnum + */ + memset(msg_addr, 0, ring->item_len); + + /* update rd index by 1 + * The TCM rd index is updated only if bus is not + * in D3. Else, the rd index is updated from resume + * context in - 'dhdpcie_bus_suspend' + */ + DHD_RING_LOCK(ring->ring_lock, flags); + ring->rd = ring->curr_rd; + ASSERT(ring->rd < ring->max_items); + DHD_RING_UNLOCK(ring->ring_lock, flags); + + DHD_GENERAL_LOCK(dhd, flags); + if (DHD_BUS_CHECK_SUSPEND_OR_ANY_SUSPEND_IN_PROGRESS(dhd)) { + DHD_INFO(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n", + __FUNCTION__, dhd->busstate, dhd->dhd_bus_busy_state)); + } else { + DHD_EDL_RING_TCM_RD_UPDATE(dhd); + } + DHD_GENERAL_UNLOCK(dhd, flags); + + /* Prefetch data to populate the cache */ + OSL_PREFETCH(msg_addr + ring->item_len); + + msg_addr += ring->item_len; + --n; + } + + /* if num_items > bound, then anyway we will reschedule and + * this function runs again, so that if in between the DPC has + * updated the wr index, then the updated wr is read. But if + * num_items <= bound, and if DPC executes and updates the wr index + * when the above while loop is running, then the updated 'wr' index + * needs to be re-read from here, If we don't do so, then till + * the next time this function is scheduled + * the event logs will not be processed. + */ + if (num_items <= DHD_EVENT_LOGTRACE_BOUND) { + /* read the updated wr index if reqd. and update num_items */ + DHD_RING_LOCK(ring->ring_lock, flags); + if (wr != (volatile uint16)ring->wr) { + wr = (volatile uint16)ring->wr; + new_items = READ_AVAIL_SPACE(wr, ring->rd, depth); + DHD_INFO(("%s: new items [%u] avail in edl\n", + __FUNCTION__, new_items)); + num_items += new_items; + } + DHD_RING_UNLOCK(ring->ring_lock, flags); + } + + /* if # of items processed is less than num_items, need to re-schedule + * the deferred ctx + */ + if (max_items_to_process < num_items) { + DHD_INFO(("%s: EDL bound hit / new items found, " + "items processed=%u; remaining=%u, " + "resched deferred ctx...\n", + __FUNCTION__, max_items_to_process, + num_items - max_items_to_process)); + return (num_items - max_items_to_process); + } + + return 0; + +} + +void +dhd_prot_edl_ring_tcm_rd_update(dhd_pub_t *dhd) +{ + dhd_prot_t *prot = NULL; + + if (!dhd) + return; + + prot = dhd->prot; + if (!prot || !prot->d2hring_edl) + return; + + dhd_prot_upd_read_idx(dhd, prot->d2hring_edl); +} +#endif /* EWP_EDL */ + +/* called when DHD needs to check for 'receive complete' messages from the dongle */ +bool BCMFASTPATH +dhd_prot_process_msgbuf_rxcpl(dhd_pub_t *dhd, uint bound) +{ + bool more = FALSE; + uint n = 0; + dhd_prot_t *prot = dhd->prot; + msgbuf_ring_t *ring = &prot->d2hring_rx_cpln; + uint16 item_len = ring->item_len; + host_rxbuf_cmpl_t *msg = NULL; + uint8 *msg_addr; + uint32 msg_len; + uint16 pkt_cnt, pkt_cnt_newidx; + unsigned long flags; + dmaaddr_t pa; + uint32 len; + void *dmah; + void *secdma; + int ifidx = 0, if_newidx = 0; + void *pkt, *pktqhead = NULL, *prevpkt = NULL, *pkt_newidx, *nextpkt; + uint32 pktid; + int i; + uint8 sync; + + while (1) { + if (dhd_is_device_removed(dhd)) + break; + + if (dhd->hang_was_sent) + break; + +#ifdef DHD_MAP_LOGGING + if (dhd->smmu_fault_occurred) { + break; + } +#endif /* DHD_MAP_LOGGING */ + + pkt_cnt = 0; + pktqhead = pkt_newidx = NULL; + pkt_cnt_newidx = 0; + + DHD_RING_LOCK(ring->ring_lock, flags); + + /* Get the address of the next message to be read from ring */ + msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len); + if (msg_addr == NULL) { + DHD_RING_UNLOCK(ring->ring_lock, flags); + break; + } + + while (msg_len > 0) { + msg = (host_rxbuf_cmpl_t *)msg_addr; + + /* Wait until DMA completes, then fetch msg_type */ + sync = prot->d2h_sync_cb(dhd, ring, &msg->cmn_hdr, item_len); + /* + * Update the curr_rd to the current index in the ring, from where + * the work item is fetched. This way if the fetched work item + * fails in LIVELOCK, we can print the exact read index in the ring + * that shows up the corrupted work item. + */ + if ((ring->curr_rd + 1) >= ring->max_items) { + ring->curr_rd = 0; + } else { + ring->curr_rd += 1; + } + + if (!sync) { + msg_len -= item_len; + msg_addr += item_len; + continue; + } + + pktid = ltoh32(msg->cmn_hdr.request_id); + +#ifdef DHD_PKTID_AUDIT_RING + DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_rx_map, pktid, + DHD_DUPLICATE_FREE, msg, D2HRING_RXCMPLT_ITEMSIZE); +#endif /* DHD_PKTID_AUDIT_RING */ + + pkt = DHD_PKTID_TO_NATIVE(dhd, prot->pktid_rx_map, pktid, pa, + len, dmah, secdma, PKTTYPE_DATA_RX); + if (!pkt) { + msg_len -= item_len; + msg_addr += item_len; + continue; + } + + if (SECURE_DMA_ENAB(dhd->osh)) + SECURE_DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, + dmah, secdma, 0); + else + DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah); + +#ifdef DMAMAP_STATS + dhd->dma_stats.rxdata--; + dhd->dma_stats.rxdata_sz -= len; +#endif /* DMAMAP_STATS */ + DHD_INFO(("id 0x%04x, offset %d, len %d, idx %d, phase 0x%02x, " + "pktdata %p, metalen %d\n", + ltoh32(msg->cmn_hdr.request_id), + ltoh16(msg->data_offset), + ltoh16(msg->data_len), msg->cmn_hdr.if_id, + msg->cmn_hdr.flags, PKTDATA(dhd->osh, pkt), + ltoh16(msg->metadata_len))); + + pkt_cnt++; + msg_len -= item_len; + msg_addr += item_len; + +#if DHD_DBG_SHOW_METADATA + if (prot->metadata_dbg && prot->rx_metadata_offset && + msg->metadata_len) { + uchar *ptr; + ptr = PKTDATA(dhd->osh, pkt) - (prot->rx_metadata_offset); + /* header followed by data */ + bcm_print_bytes("rxmetadata", ptr, msg->metadata_len); + dhd_prot_print_metadata(dhd, ptr, msg->metadata_len); + } +#endif /* DHD_DBG_SHOW_METADATA */ + + /* data_offset from buf start */ + if (ltoh16(msg->data_offset)) { + /* data offset given from dongle after split rx */ + PKTPULL(dhd->osh, pkt, ltoh16(msg->data_offset)); + } + else if (prot->rx_dataoffset) { + /* DMA RX offset updated through shared area */ + PKTPULL(dhd->osh, pkt, prot->rx_dataoffset); + } + /* Actual length of the packet */ + PKTSETLEN(dhd->osh, pkt, ltoh16(msg->data_len)); +#if defined(WL_MONITOR) + if (dhd_monitor_enabled(dhd, ifidx) && + (msg->flags & BCMPCIE_PKT_FLAGS_FRAME_802_11)) { + dhd_rx_mon_pkt(dhd, msg, pkt, ifidx); + continue; + } +#endif // endif + + if (!pktqhead) { + pktqhead = prevpkt = pkt; + ifidx = msg->cmn_hdr.if_id; + } else { + if (ifidx != msg->cmn_hdr.if_id) { + pkt_newidx = pkt; + if_newidx = msg->cmn_hdr.if_id; + pkt_cnt--; + pkt_cnt_newidx = 1; + break; + } else { + PKTSETNEXT(dhd->osh, prevpkt, pkt); + prevpkt = pkt; + } + } + +#ifdef DHD_LBUF_AUDIT + PKTAUDIT(dhd->osh, pkt); +#endif // endif + } + + /* roll back read pointer for unprocessed message */ + if (msg_len > 0) { + if (ring->rd < msg_len / item_len) + ring->rd = ring->max_items - msg_len / item_len; + else + ring->rd -= msg_len / item_len; + } + + /* Update read pointer */ + dhd_prot_upd_read_idx(dhd, ring); + + DHD_RING_UNLOCK(ring->ring_lock, flags); + + pkt = pktqhead; + for (i = 0; pkt && i < pkt_cnt; i++, pkt = nextpkt) { + nextpkt = PKTNEXT(dhd->osh, pkt); + PKTSETNEXT(dhd->osh, pkt, NULL); +#ifdef DHD_LB_RXP + dhd_lb_rx_pkt_enqueue(dhd, pkt, ifidx); +#elif defined(DHD_RX_CHAINING) + dhd_rxchain_frame(dhd, pkt, ifidx); +#else + dhd_bus_rx_frame(dhd->bus, pkt, ifidx, 1); +#endif /* DHD_LB_RXP */ + } + + if (pkt_newidx) { +#ifdef DHD_LB_RXP + dhd_lb_rx_pkt_enqueue(dhd, pkt_newidx, if_newidx); +#elif defined(DHD_RX_CHAINING) + dhd_rxchain_frame(dhd, pkt_newidx, if_newidx); +#else + dhd_bus_rx_frame(dhd->bus, pkt_newidx, if_newidx, 1); +#endif /* DHD_LB_RXP */ + } + + pkt_cnt += pkt_cnt_newidx; + + /* Post another set of rxbufs to the device */ + dhd_prot_return_rxbuf(dhd, 0, pkt_cnt); + +#ifdef DHD_RX_CHAINING + dhd_rxchain_commit(dhd); +#endif // endif + + /* After batch processing, check RX bound */ + n += pkt_cnt; + if (n >= bound) { + more = TRUE; + break; + } + } + + /* Call lb_dispatch only if packets are queued */ + if (n) { + DHD_LB_DISPATCH_RX_COMPL(dhd); + DHD_LB_DISPATCH_RX_PROCESS(dhd); + } + + return more; +} + +/** + * Hands transmit packets (with a caller provided flow_id) over to dongle territory (the flow ring) + */ +void +dhd_prot_update_txflowring(dhd_pub_t *dhd, uint16 flowid, void *msgring) +{ + msgbuf_ring_t *ring = (msgbuf_ring_t *)msgring; + + if (ring == NULL) { + DHD_ERROR(("%s: NULL txflowring. exiting...\n", __FUNCTION__)); + return; + } + /* Update read pointer */ + if (dhd->dma_d2h_ring_upd_support) { + ring->rd = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx); + } + + DHD_TRACE(("ringid %d flowid %d write %d read %d \n\n", + ring->idx, flowid, ring->wr, ring->rd)); + + /* Need more logic here, but for now use it directly */ + dhd_bus_schedule_queue(dhd->bus, flowid, TRUE); /* from queue to flowring */ +} + +/** called when DHD needs to check for 'transmit complete' messages from the dongle */ +bool BCMFASTPATH +dhd_prot_process_msgbuf_txcpl(dhd_pub_t *dhd, uint bound) +{ + bool more = TRUE; + uint n = 0; + msgbuf_ring_t *ring = &dhd->prot->d2hring_tx_cpln; + unsigned long flags; + + /* Process all the messages - DTOH direction */ + while (!dhd_is_device_removed(dhd)) { + uint8 *msg_addr; + uint32 msg_len; + + if (dhd->hang_was_sent) { + more = FALSE; + break; + } + +#ifdef DHD_MAP_LOGGING + if (dhd->smmu_fault_occurred) { + more = FALSE; + break; + } +#endif /* DHD_MAP_LOGGING */ + + DHD_RING_LOCK(ring->ring_lock, flags); + /* Get the address of the next message to be read from ring */ + msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len); + DHD_RING_UNLOCK(ring->ring_lock, flags); + + if (msg_addr == NULL) { + more = FALSE; + break; + } + + /* Prefetch data to populate the cache */ + OSL_PREFETCH(msg_addr); + + if (dhd_prot_process_msgtype(dhd, ring, msg_addr, msg_len) != BCME_OK) { + DHD_ERROR(("%s: process %s msg addr %p len %d\n", + __FUNCTION__, ring->name, msg_addr, msg_len)); + } + + /* Write to dngl rd ptr */ + dhd_prot_upd_read_idx(dhd, ring); + + /* After batch processing, check bound */ + n += msg_len / ring->item_len; + if (n >= bound) { + break; + } + } + + DHD_LB_DISPATCH_TX_COMPL(dhd); + + return more; +} + +int BCMFASTPATH +dhd_prot_process_trapbuf(dhd_pub_t *dhd) +{ + uint32 data; + dhd_dma_buf_t *trap_addr = &dhd->prot->fw_trap_buf; + + /* Interrupts can come in before this struct + * has been initialized. + */ + if (trap_addr->va == NULL) { + DHD_ERROR(("%s: trap_addr->va is NULL\n", __FUNCTION__)); + return 0; + } + + OSL_CACHE_INV((void *)trap_addr->va, sizeof(uint32)); + data = *(uint32 *)(trap_addr->va); + + if (data & D2H_DEV_FWHALT) { + DHD_ERROR(("Firmware trapped and trap_data is 0x%04x\n", data)); + + if (data & D2H_DEV_EXT_TRAP_DATA) + { + if (dhd->extended_trap_data) { + OSL_CACHE_INV((void *)trap_addr->va, + BCMPCIE_EXT_TRAP_DATA_MAXLEN); + memcpy(dhd->extended_trap_data, (uint32 *)trap_addr->va, + BCMPCIE_EXT_TRAP_DATA_MAXLEN); + } + DHD_ERROR(("Extended trap data available\n")); + } + return data; + } + return 0; +} + +/** called when DHD needs to check for 'ioctl complete' messages from the dongle */ +int BCMFASTPATH +dhd_prot_process_ctrlbuf(dhd_pub_t *dhd) +{ + dhd_prot_t *prot = dhd->prot; + msgbuf_ring_t *ring = &prot->d2hring_ctrl_cpln; + unsigned long flags; + + /* Process all the messages - DTOH direction */ + while (!dhd_is_device_removed(dhd)) { + uint8 *msg_addr; + uint32 msg_len; + + if (dhd->hang_was_sent) { + break; + } + +#ifdef DHD_MAP_LOGGING + if (dhd->smmu_fault_occurred) { + break; + } +#endif /* DHD_MAP_LOGGING */ + + DHD_RING_LOCK(ring->ring_lock, flags); + /* Get the address of the next message to be read from ring */ + msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len); + DHD_RING_UNLOCK(ring->ring_lock, flags); + + if (msg_addr == NULL) { + break; + } + + /* Prefetch data to populate the cache */ + OSL_PREFETCH(msg_addr); + if (dhd_prot_process_msgtype(dhd, ring, msg_addr, msg_len) != BCME_OK) { + DHD_ERROR(("%s: process %s msg addr %p len %d\n", + __FUNCTION__, ring->name, msg_addr, msg_len)); + } + + /* Write to dngl rd ptr */ + dhd_prot_upd_read_idx(dhd, ring); + } + + return 0; +} + +/** + * Consume messages out of the D2H ring. Ensure that the message's DMA to host + * memory has completed, before invoking the message handler via a table lookup + * of the cmn_msg_hdr::msg_type. + */ +static int BCMFASTPATH +dhd_prot_process_msgtype(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8 *buf, uint32 len) +{ + uint32 buf_len = len; + uint16 item_len; + uint8 msg_type; + cmn_msg_hdr_t *msg = NULL; + int ret = BCME_OK; + + ASSERT(ring); + item_len = ring->item_len; + if (item_len == 0) { + DHD_ERROR(("%s: ringidx %d, item_len %d buf_len %d \n", + __FUNCTION__, ring->idx, item_len, buf_len)); + return BCME_ERROR; + } + + while (buf_len > 0) { + if (dhd->hang_was_sent) { + ret = BCME_ERROR; + goto done; + } + +#ifdef DHD_MAP_LOGGING + if (dhd->smmu_fault_occurred) { + ret = BCME_ERROR; + goto done; + } +#endif /* DHD_MAP_LOGGING */ + + msg = (cmn_msg_hdr_t *)buf; + + /* Wait until DMA completes, then fetch msg_type */ + msg_type = dhd->prot->d2h_sync_cb(dhd, ring, msg, item_len); + + /* + * Update the curr_rd to the current index in the ring, from where + * the work item is fetched. This way if the fetched work item + * fails in LIVELOCK, we can print the exact read index in the ring + * that shows up the corrupted work item. + */ + if ((ring->curr_rd + 1) >= ring->max_items) { + ring->curr_rd = 0; + } else { + ring->curr_rd += 1; + } + + /* Prefetch data to populate the cache */ + OSL_PREFETCH(buf + item_len); + + DHD_INFO(("msg_type %d item_len %d buf_len %d\n", + msg_type, item_len, buf_len)); + + if (msg_type == MSG_TYPE_LOOPBACK) { + bcm_print_bytes("LPBK RESP: ", (uint8 *)msg, item_len); + DHD_ERROR((" MSG_TYPE_LOOPBACK, len %d\n", item_len)); + } + + ASSERT(msg_type < DHD_PROT_FUNCS); + if (msg_type >= DHD_PROT_FUNCS) { + DHD_ERROR(("%s: msg_type %d, item_len %d buf_len %d\n", + __FUNCTION__, msg_type, item_len, buf_len)); + ret = BCME_ERROR; + goto done; + } + + if (msg_type == MSG_TYPE_INFO_BUF_CMPLT) { + if (ring == dhd->prot->d2hring_info_cpln) { + if (!dhd->prot->infobufpost) { + DHD_ERROR(("infobuf posted are zero," + "but there is a completion\n")); + goto done; + } + dhd->prot->infobufpost--; + dhd_prot_infobufpost(dhd, dhd->prot->h2dring_info_subn); + dhd_prot_process_infobuf_complete(dhd, buf); + } + } else + if (table_lookup[msg_type]) { + table_lookup[msg_type](dhd, buf); + } + + if (buf_len < item_len) { + ret = BCME_ERROR; + goto done; + } + buf_len = buf_len - item_len; + buf = buf + item_len; + } + +done: + +#ifdef DHD_RX_CHAINING + dhd_rxchain_commit(dhd); +#endif // endif + + return ret; +} /* dhd_prot_process_msgtype */ + +static void +dhd_prot_noop(dhd_pub_t *dhd, void *msg) +{ + return; +} + +/** called on MSG_TYPE_RING_STATUS message received from dongle */ +static void +dhd_prot_ringstatus_process(dhd_pub_t *dhd, void *msg) +{ + pcie_ring_status_t *ring_status = (pcie_ring_status_t *) msg; + uint32 request_id = ltoh32(ring_status->cmn_hdr.request_id); + uint16 status = ltoh16(ring_status->compl_hdr.status); + uint16 ring_id = ltoh16(ring_status->compl_hdr.flow_ring_id); + + DHD_ERROR(("ring status: request_id %d, status 0x%04x, flow ring %d, write_idx %d \n", + request_id, status, ring_id, ltoh16(ring_status->write_idx))); + + if (ltoh16(ring_status->compl_hdr.ring_id) != BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT) + return; + if (status == BCMPCIE_BAD_PHASE) { + /* bad phase report from */ + DHD_ERROR(("Bad phase\n")); + } + if (status != BCMPCIE_BADOPTION) + return; + + if (request_id == DHD_H2D_DBGRING_REQ_PKTID) { + if (dhd->prot->h2dring_info_subn != NULL) { + if (dhd->prot->h2dring_info_subn->create_pending == TRUE) { + DHD_ERROR(("H2D ring create failed for info ring\n")); + dhd->prot->h2dring_info_subn->create_pending = FALSE; + } + else + DHD_ERROR(("ring create ID for a ring, create not pending\n")); + } else { + DHD_ERROR(("%s info submit ring doesn't exist\n", __FUNCTION__)); + } + } + else if (request_id == DHD_D2H_DBGRING_REQ_PKTID) { + if (dhd->prot->d2hring_info_cpln != NULL) { + if (dhd->prot->d2hring_info_cpln->create_pending == TRUE) { + DHD_ERROR(("D2H ring create failed for info ring\n")); + dhd->prot->d2hring_info_cpln->create_pending = FALSE; + } + else + DHD_ERROR(("ring create ID for info ring, create not pending\n")); + } else { + DHD_ERROR(("%s info cpl ring doesn't exist\n", __FUNCTION__)); + } + } + else { + DHD_ERROR(("don;t know how to pair with original request\n")); + } + /* How do we track this to pair it with ??? */ + return; +} + +/** called on MSG_TYPE_GEN_STATUS ('general status') message received from dongle */ +static void +dhd_prot_genstatus_process(dhd_pub_t *dhd, void *msg) +{ + pcie_gen_status_t *gen_status = (pcie_gen_status_t *)msg; + DHD_ERROR(("ERROR: gen status: request_id %d, STATUS 0x%04x, flow ring %d \n", + gen_status->cmn_hdr.request_id, gen_status->compl_hdr.status, + gen_status->compl_hdr.flow_ring_id)); + + /* How do we track this to pair it with ??? */ + return; +} + +/** + * Called on MSG_TYPE_IOCTLPTR_REQ_ACK ('ioctl ack') message received from dongle, meaning that the + * dongle received the ioctl message in dongle memory. + */ +static void +dhd_prot_ioctack_process(dhd_pub_t *dhd, void *msg) +{ + ioctl_req_ack_msg_t *ioct_ack = (ioctl_req_ack_msg_t *)msg; + unsigned long flags; +#if defined(DHD_PKTID_AUDIT_RING) + uint32 pktid = ltoh32(ioct_ack->cmn_hdr.request_id); +#endif // endif + +#if defined(DHD_PKTID_AUDIT_RING) + /* Skip audit for ADHD_IOCTL_REQ_PKTID = 0xFFFE */ + if (pktid != DHD_IOCTL_REQ_PKTID) { +#ifndef IOCTLRESP_USE_CONSTMEM + DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_ctrl_map, pktid, + DHD_TEST_IS_ALLOC, msg, D2HRING_CTRL_CMPLT_ITEMSIZE); +#else + DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_map_handle_ioctl, pktid, + DHD_TEST_IS_ALLOC, msg, D2HRING_CTRL_CMPLT_ITEMSIZE); +#endif /* !IOCTLRESP_USE_CONSTMEM */ + } +#endif // endif + + DHD_GENERAL_LOCK(dhd, flags); + if ((dhd->prot->ioctl_state & MSGBUF_IOCTL_ACK_PENDING) && + (dhd->prot->ioctl_state & MSGBUF_IOCTL_RESP_PENDING)) { + dhd->prot->ioctl_state &= ~MSGBUF_IOCTL_ACK_PENDING; + } else { + DHD_ERROR(("%s: received ioctl ACK with state %02x trans_id = %d\n", + __FUNCTION__, dhd->prot->ioctl_state, dhd->prot->ioctl_trans_id)); + prhex("dhd_prot_ioctack_process:", + (uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE); + } + DHD_GENERAL_UNLOCK(dhd, flags); + + DHD_CTL(("ioctl req ack: request_id %d, status 0x%04x, flow ring %d \n", + ioct_ack->cmn_hdr.request_id, ioct_ack->compl_hdr.status, + ioct_ack->compl_hdr.flow_ring_id)); + if (ioct_ack->compl_hdr.status != 0) { + DHD_ERROR(("got an error status for the ioctl request...need to handle that\n")); + } +} + +/** called on MSG_TYPE_IOCTL_CMPLT message received from dongle */ +static void +dhd_prot_ioctcmplt_process(dhd_pub_t *dhd, void *msg) +{ + dhd_prot_t *prot = dhd->prot; + uint32 pkt_id, xt_id; + ioctl_comp_resp_msg_t *ioct_resp = (ioctl_comp_resp_msg_t *)msg; + void *pkt; + unsigned long flags; + dhd_dma_buf_t retbuf; + + memset(&retbuf, 0, sizeof(dhd_dma_buf_t)); + + pkt_id = ltoh32(ioct_resp->cmn_hdr.request_id); + +#if defined(DHD_PKTID_AUDIT_RING) +#ifndef IOCTLRESP_USE_CONSTMEM + DHD_PKTID_AUDIT_RING_DEBUG(dhd, prot->pktid_ctrl_map, pkt_id, + DHD_DUPLICATE_FREE, msg, D2HRING_CTRL_CMPLT_ITEMSIZE); +#else + DHD_PKTID_AUDIT_RING_DEBUG(dhd, prot->pktid_map_handle_ioctl, pkt_id, + DHD_DUPLICATE_FREE, msg, D2HRING_CTRL_CMPLT_ITEMSIZE); +#endif /* !IOCTLRESP_USE_CONSTMEM */ +#endif // endif + + DHD_GENERAL_LOCK(dhd, flags); + if ((prot->ioctl_state & MSGBUF_IOCTL_ACK_PENDING) || + !(prot->ioctl_state & MSGBUF_IOCTL_RESP_PENDING)) { + DHD_ERROR(("%s: received ioctl response with state %02x trans_id = %d\n", + __FUNCTION__, dhd->prot->ioctl_state, dhd->prot->ioctl_trans_id)); + prhex("dhd_prot_ioctcmplt_process:", + (uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE); + DHD_GENERAL_UNLOCK(dhd, flags); + return; + } + + /* Clear Response pending bit */ + prot->ioctl_state &= ~MSGBUF_IOCTL_RESP_PENDING; + DHD_GENERAL_UNLOCK(dhd, flags); + +#ifndef IOCTLRESP_USE_CONSTMEM + pkt = dhd_prot_packet_get(dhd, pkt_id, PKTTYPE_IOCTL_RX, TRUE); +#else + dhd_prot_ioctl_ret_buffer_get(dhd, pkt_id, &retbuf); + pkt = retbuf.va; +#endif /* !IOCTLRESP_USE_CONSTMEM */ + if (!pkt) { + DHD_ERROR(("%s: received ioctl response with NULL pkt\n", __FUNCTION__)); + prhex("dhd_prot_ioctcmplt_process:", + (uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE); + return; + } + + prot->ioctl_resplen = ltoh16(ioct_resp->resp_len); + prot->ioctl_status = ltoh16(ioct_resp->compl_hdr.status); + xt_id = ltoh16(ioct_resp->trans_id); + + if (xt_id != prot->ioctl_trans_id || prot->curr_ioctl_cmd != ioct_resp->cmd) { + DHD_ERROR(("%s: transaction id(%d %d) or cmd(%d %d) mismatch\n", + __FUNCTION__, xt_id, prot->ioctl_trans_id, + prot->curr_ioctl_cmd, ioct_resp->cmd)); + dhd_wakeup_ioctl_event(dhd, IOCTL_RETURN_ON_ERROR); + dhd_prot_debug_info_print(dhd); +#ifdef DHD_FW_COREDUMP + if (dhd->memdump_enabled) { + /* collect core dump */ + dhd->memdump_type = DUMP_TYPE_TRANS_ID_MISMATCH; + dhd_bus_mem_dump(dhd); + } +#else + ASSERT(0); +#endif /* DHD_FW_COREDUMP */ + dhd_schedule_reset(dhd); + goto exit; + } + DHD_CTL(("IOCTL_COMPLETE: req_id %x transid %d status %x resplen %d\n", + pkt_id, xt_id, prot->ioctl_status, prot->ioctl_resplen)); + + if (prot->ioctl_resplen > 0) { +#ifndef IOCTLRESP_USE_CONSTMEM + bcopy(PKTDATA(dhd->osh, pkt), prot->retbuf.va, prot->ioctl_resplen); +#else + bcopy(pkt, prot->retbuf.va, prot->ioctl_resplen); +#endif /* !IOCTLRESP_USE_CONSTMEM */ + } + + /* wake up any dhd_os_ioctl_resp_wait() */ + dhd_wakeup_ioctl_event(dhd, IOCTL_RETURN_ON_SUCCESS); + +exit: +#ifndef IOCTLRESP_USE_CONSTMEM + dhd_prot_packet_free(dhd, pkt, + PKTTYPE_IOCTL_RX, FALSE); +#else + free_ioctl_return_buffer(dhd, &retbuf); +#endif /* !IOCTLRESP_USE_CONSTMEM */ + + /* Post another ioctl buf to the device */ + if (prot->cur_ioctlresp_bufs_posted > 0) { + prot->cur_ioctlresp_bufs_posted--; + } + + dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd); +} + +/** called on MSG_TYPE_TX_STATUS message received from dongle */ +static void BCMFASTPATH +dhd_prot_txstatus_process(dhd_pub_t *dhd, void *msg) +{ + dhd_prot_t *prot = dhd->prot; + host_txbuf_cmpl_t * txstatus; + unsigned long flags; + uint32 pktid; + void *pkt; + dmaaddr_t pa; + uint32 len; + void *dmah; + void *secdma; + bool pkt_fate; + msgbuf_ring_t *ring = &dhd->prot->d2hring_tx_cpln; +#ifdef TX_STATUS_LATENCY_STATS + flow_info_t *flow_info; + uint64 tx_status_latency; +#endif /* TX_STATUS_LATENCY_STATS */ +#if defined(TX_STATUS_LATENCY_STATS) + flow_ring_node_t *flow_ring_node; + uint16 flowid; +#endif // endif + + txstatus = (host_txbuf_cmpl_t *)msg; +#if defined(TX_STATUS_LATENCY_STATS) + flowid = txstatus->compl_hdr.flow_ring_id; + flow_ring_node = DHD_FLOW_RING(dhd, flowid); +#endif // endif + + /* locks required to protect circular buffer accesses */ + DHD_RING_LOCK(ring->ring_lock, flags); + pktid = ltoh32(txstatus->cmn_hdr.request_id); + pkt_fate = TRUE; + +#if defined(DHD_PKTID_AUDIT_RING) + DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_tx_map, pktid, + DHD_DUPLICATE_FREE, msg, D2HRING_TXCMPLT_ITEMSIZE); +#endif // endif + + DHD_INFO(("txstatus for pktid 0x%04x\n", pktid)); + if (OSL_ATOMIC_DEC_RETURN(dhd->osh, &prot->active_tx_count) < 0) { + DHD_ERROR(("Extra packets are freed\n")); + } + ASSERT(pktid != 0); + + pkt = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_tx_map, pktid, + pa, len, dmah, secdma, PKTTYPE_DATA_TX); + if (!pkt) { + DHD_RING_UNLOCK(ring->ring_lock, flags); + DHD_ERROR(("%s: received txstatus with NULL pkt\n", __FUNCTION__)); + prhex("dhd_prot_txstatus_process:", (uchar *)msg, D2HRING_TXCMPLT_ITEMSIZE); +#ifdef DHD_FW_COREDUMP + if (dhd->memdump_enabled) { + /* collect core dump */ + dhd->memdump_type = DUMP_TYPE_PKTID_INVALID; + dhd_bus_mem_dump(dhd); + } +#else + ASSERT(0); +#endif /* DHD_FW_COREDUMP */ + return; + } + + if (SECURE_DMA_ENAB(dhd->osh)) { + int offset = 0; + BCM_REFERENCE(offset); + + if (dhd->prot->tx_metadata_offset) + offset = dhd->prot->tx_metadata_offset + ETHER_HDR_LEN; + SECURE_DMA_UNMAP(dhd->osh, (uint) pa, + (uint) dhd->prot->tx_metadata_offset, DMA_RX, 0, dmah, + secdma, offset); + } else { + DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah); + } + +#ifdef TX_STATUS_LATENCY_STATS + /* update the tx status latency for flowid */ + flow_info = &flow_ring_node->flow_info; + tx_status_latency = OSL_SYSUPTIME_US() - DHD_PKT_GET_QTIME(pkt); + flow_info->cum_tx_status_latency += tx_status_latency; + flow_info->num_tx_status++; +#endif /* TX_STATUS_LATENCY_STATS */ +#if defined(DHD_LB_TXC) && !defined(BCM_SECURE_DMA) + { + int elem_ix; + void **elem; + bcm_workq_t *workq; + + workq = &prot->tx_compl_prod; + /* + * Produce the packet into the tx_compl workq for the tx compl tasklet + * to consume. + */ + OSL_PREFETCH(PKTTAG(pkt)); + + /* fetch next available slot in workq */ + elem_ix = bcm_ring_prod(WORKQ_RING(workq), DHD_LB_WORKQ_SZ); + + DHD_PKTTAG_SET_PA((dhd_pkttag_fr_t *)PKTTAG(pkt), pa); + DHD_PKTTAG_SET_PA_LEN((dhd_pkttag_fr_t *)PKTTAG(pkt), len); + + if (elem_ix == BCM_RING_FULL) { + DHD_ERROR(("tx_compl_prod BCM_RING_FULL\n")); + goto workq_ring_full; + } + + elem = WORKQ_ELEMENT(void *, &prot->tx_compl_prod, elem_ix); + *elem = pkt; + + smp_wmb(); + + /* Sync WR index to consumer if the SYNC threshold has been reached */ + if (++prot->tx_compl_prod_sync >= DHD_LB_WORKQ_SYNC) { + bcm_workq_prod_sync(workq); + prot->tx_compl_prod_sync = 0; + } + + DHD_INFO(("%s: tx_compl_prod pkt<%p> sync<%d>\n", + __FUNCTION__, pkt, prot->tx_compl_prod_sync)); + + DHD_RING_UNLOCK(ring->ring_lock, flags); + return; + } + +workq_ring_full: + +#endif /* !DHD_LB_TXC */ + +#ifdef DMAMAP_STATS + dhd->dma_stats.txdata--; + dhd->dma_stats.txdata_sz -= len; +#endif /* DMAMAP_STATS */ + pkt_fate = dhd_dbg_process_tx_status(dhd, pkt, pktid, + ltoh16(txstatus->compl_hdr.status) & WLFC_CTL_PKTFLAG_MASK); +#if defined(BCMPCIE) + dhd_txcomplete(dhd, pkt, pkt_fate); +#endif // endif + +#if DHD_DBG_SHOW_METADATA + if (dhd->prot->metadata_dbg && + dhd->prot->tx_metadata_offset && txstatus->metadata_len) { + uchar *ptr; + /* The Ethernet header of TX frame was copied and removed. + * Here, move the data pointer forward by Ethernet header size. + */ + PKTPULL(dhd->osh, pkt, ETHER_HDR_LEN); + ptr = PKTDATA(dhd->osh, pkt) - (dhd->prot->tx_metadata_offset); + bcm_print_bytes("txmetadata", ptr, txstatus->metadata_len); + dhd_prot_print_metadata(dhd, ptr, txstatus->metadata_len); + } +#endif /* DHD_DBG_SHOW_METADATA */ + +#ifdef DHD_LBUF_AUDIT + PKTAUDIT(dhd->osh, pkt); +#endif // endif + + DHD_RING_UNLOCK(ring->ring_lock, flags); + PKTFREE(dhd->osh, pkt, TRUE); + DHD_RING_LOCK(ring->ring_lock, flags); + DHD_FLOWRING_TXSTATUS_CNT_UPDATE(dhd->bus, txstatus->compl_hdr.flow_ring_id, + txstatus->tx_status); + DHD_RING_UNLOCK(ring->ring_lock, flags); + + return; +} /* dhd_prot_txstatus_process */ + +/** called on MSG_TYPE_WL_EVENT message received from dongle */ +static void +dhd_prot_event_process(dhd_pub_t *dhd, void *msg) +{ + wlevent_req_msg_t *evnt; + uint32 bufid; + uint16 buflen; + int ifidx = 0; + void* pkt; + dhd_prot_t *prot = dhd->prot; + + /* Event complete header */ + evnt = (wlevent_req_msg_t *)msg; + bufid = ltoh32(evnt->cmn_hdr.request_id); + +#if defined(DHD_PKTID_AUDIT_RING) + DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_ctrl_map, bufid, + DHD_DUPLICATE_FREE, msg, D2HRING_CTRL_CMPLT_ITEMSIZE); +#endif // endif + + buflen = ltoh16(evnt->event_data_len); + + ifidx = BCMMSGBUF_API_IFIDX(&evnt->cmn_hdr); + + /* Post another rxbuf to the device */ + if (prot->cur_event_bufs_posted) + prot->cur_event_bufs_posted--; + dhd_msgbuf_rxbuf_post_event_bufs(dhd); + + pkt = dhd_prot_packet_get(dhd, bufid, PKTTYPE_EVENT_RX, TRUE); + + if (!pkt) { + DHD_ERROR(("%s: pkt is NULL for pktid %d\n", __FUNCTION__, bufid)); + return; + } + + /* DMA RX offset updated through shared area */ + if (dhd->prot->rx_dataoffset) + PKTPULL(dhd->osh, pkt, dhd->prot->rx_dataoffset); + + PKTSETLEN(dhd->osh, pkt, buflen); +#ifdef DHD_LBUF_AUDIT + PKTAUDIT(dhd->osh, pkt); +#endif // endif + dhd_bus_rx_frame(dhd->bus, pkt, ifidx, 1); +} + +/** called on MSG_TYPE_INFO_BUF_CMPLT message received from dongle */ +static void BCMFASTPATH +dhd_prot_process_infobuf_complete(dhd_pub_t *dhd, void* buf) +{ + info_buf_resp_t *resp; + uint32 pktid; + uint16 buflen; + void * pkt; + + resp = (info_buf_resp_t *)buf; + pktid = ltoh32(resp->cmn_hdr.request_id); + buflen = ltoh16(resp->info_data_len); + +#ifdef DHD_PKTID_AUDIT_RING + DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_ctrl_map, pktid, + DHD_DUPLICATE_FREE, buf, D2HRING_INFO_BUFCMPLT_ITEMSIZE); +#endif /* DHD_PKTID_AUDIT_RING */ + + DHD_INFO(("id 0x%04x, len %d, phase 0x%02x, seqnum %d, rx_dataoffset %d\n", + pktid, buflen, resp->cmn_hdr.flags, ltoh16(resp->seqnum), + dhd->prot->rx_dataoffset)); + + if (dhd->debug_buf_dest_support) { + if (resp->dest < DEBUG_BUF_DEST_MAX) { + dhd->debug_buf_dest_stat[resp->dest]++; + } + } + + pkt = dhd_prot_packet_get(dhd, pktid, PKTTYPE_INFO_RX, TRUE); + if (!pkt) + return; + + /* DMA RX offset updated through shared area */ + if (dhd->prot->rx_dataoffset) + PKTPULL(dhd->osh, pkt, dhd->prot->rx_dataoffset); + + PKTSETLEN(dhd->osh, pkt, buflen); + +#ifdef DHD_LBUF_AUDIT + PKTAUDIT(dhd->osh, pkt); +#endif // endif + + /* info ring "debug" data, which is not a 802.3 frame, is sent/hacked with a + * special ifidx of -1. This is just internal to dhd to get the data to + * dhd_linux.c:dhd_rx_frame() from here (dhd_prot_infobuf_cmplt_process). + */ + dhd_bus_rx_frame(dhd->bus, pkt, DHD_DUMMY_INFO_IF /* ifidx HACK */, 1); +} + +/** called on MSG_TYPE_SNAPSHOT_CMPLT message received from dongle */ +static void BCMFASTPATH +dhd_prot_process_snapshot_complete(dhd_pub_t *dhd, void *buf) +{ +} + +/** Stop protocol: sync w/dongle state. */ +void dhd_prot_stop(dhd_pub_t *dhd) +{ + ASSERT(dhd); + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + +} + +/* Add any protocol-specific data header. + * Caller must reserve prot_hdrlen prepend space. + */ +void BCMFASTPATH +dhd_prot_hdrpush(dhd_pub_t *dhd, int ifidx, void *PKTBUF) +{ + return; +} + +uint +dhd_prot_hdrlen(dhd_pub_t *dhd, void *PKTBUF) +{ + return 0; +} + +#define MAX_MTU_SZ (1600u) + +#define PKTBUF pktbuf + +/** + * Called when a tx ethernet packet has been dequeued from a flow queue, and has to be inserted in + * the corresponding flow ring. + */ +int BCMFASTPATH +dhd_prot_txdata(dhd_pub_t *dhd, void *PKTBUF, uint8 ifidx) +{ + unsigned long flags; + dhd_prot_t *prot = dhd->prot; + host_txbuf_post_t *txdesc = NULL; + dmaaddr_t pa, meta_pa; + uint8 *pktdata; + uint32 pktlen; + uint32 pktid; + uint8 prio; + uint16 flowid = 0; + uint16 alloced = 0; + uint16 headroom; + msgbuf_ring_t *ring; + flow_ring_table_t *flow_ring_table; + flow_ring_node_t *flow_ring_node; + + if (dhd->flow_ring_table == NULL) { + DHD_ERROR(("dhd flow_ring_table is NULL\n")); + return BCME_NORESOURCE; + } + + flowid = DHD_PKT_GET_FLOWID(PKTBUF); + flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table; + flow_ring_node = (flow_ring_node_t *)&flow_ring_table[flowid]; + + ring = (msgbuf_ring_t *)flow_ring_node->prot_info; + + DHD_RING_LOCK(ring->ring_lock, flags); + + /* Create a unique 32-bit packet id */ + pktid = DHD_NATIVE_TO_PKTID_RSV(dhd, dhd->prot->pktid_tx_map, + PKTBUF, PKTTYPE_DATA_TX); +#if defined(DHD_PCIE_PKTID) + if (pktid == DHD_PKTID_INVALID) { + DHD_ERROR_RLMT(("%s: Pktid pool depleted.\n", __FUNCTION__)); + /* + * If we return error here, the caller would queue the packet + * again. So we'll just free the skb allocated in DMA Zone. + * Since we have not freed the original SKB yet the caller would + * requeue the same. + */ + goto err_no_res_pktfree; + } +#endif /* DHD_PCIE_PKTID */ + + /* Reserve space in the circular buffer */ + txdesc = (host_txbuf_post_t *) + dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE); + if (txdesc == NULL) { + DHD_INFO(("%s:%d: HTOD Msgbuf Not available TxCount = %d\n", + __FUNCTION__, __LINE__, OSL_ATOMIC_READ(dhd->osh, &prot->active_tx_count))); + goto err_free_pktid; + } + + DHD_DBG_PKT_MON_TX(dhd, PKTBUF, pktid); + + /* Extract the data pointer and length information */ + pktdata = PKTDATA(dhd->osh, PKTBUF); + pktlen = PKTLEN(dhd->osh, PKTBUF); + + /* Ethernet header: Copy before we cache flush packet using DMA_MAP */ + bcopy(pktdata, txdesc->txhdr, ETHER_HDR_LEN); + + /* Extract the ethernet header and adjust the data pointer and length */ + pktdata = PKTPULL(dhd->osh, PKTBUF, ETHER_HDR_LEN); + pktlen -= ETHER_HDR_LEN; + + /* Map the data pointer to a DMA-able address */ + if (SECURE_DMA_ENAB(dhd->osh)) { + int offset = 0; + BCM_REFERENCE(offset); + + if (prot->tx_metadata_offset) + offset = prot->tx_metadata_offset + ETHER_HDR_LEN; + + pa = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF), pktlen, + DMA_TX, PKTBUF, 0, ring->dma_buf.secdma, offset); + } +#ifndef BCM_SECURE_DMA + else + pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF), pktlen, DMA_TX, PKTBUF, 0); +#endif /* #ifndef BCM_SECURE_DMA */ + + if (PHYSADDRISZERO(pa)) { + DHD_ERROR(("%s: Something really bad, unless 0 is " + "a valid phyaddr for pa\n", __FUNCTION__)); + ASSERT(0); + goto err_rollback_idx; + } + +#ifdef DMAMAP_STATS + dhd->dma_stats.txdata++; + dhd->dma_stats.txdata_sz += pktlen; +#endif /* DMAMAP_STATS */ + /* No need to lock. Save the rest of the packet's metadata */ + DHD_NATIVE_TO_PKTID_SAVE(dhd, dhd->prot->pktid_tx_map, PKTBUF, pktid, + pa, pktlen, DMA_TX, NULL, ring->dma_buf.secdma, PKTTYPE_DATA_TX); + +#ifdef TXP_FLUSH_NITEMS + if (ring->pend_items_count == 0) + ring->start_addr = (void *)txdesc; + ring->pend_items_count++; +#endif // endif + + /* Form the Tx descriptor message buffer */ + + /* Common message hdr */ + txdesc->cmn_hdr.msg_type = MSG_TYPE_TX_POST; + txdesc->cmn_hdr.if_id = ifidx; + txdesc->cmn_hdr.flags = ring->current_phase; + + txdesc->flags = BCMPCIE_PKT_FLAGS_FRAME_802_3; + prio = (uint8)PKTPRIO(PKTBUF); + + txdesc->flags |= (prio & 0x7) << BCMPCIE_PKT_FLAGS_PRIO_SHIFT; + txdesc->seg_cnt = 1; + + txdesc->data_len = htol16((uint16) pktlen); + txdesc->data_buf_addr.high_addr = htol32(PHYSADDRHI(pa)); + txdesc->data_buf_addr.low_addr = htol32(PHYSADDRLO(pa)); + + /* Move data pointer to keep ether header in local PKTBUF for later reference */ + PKTPUSH(dhd->osh, PKTBUF, ETHER_HDR_LEN); + + /* Handle Tx metadata */ + headroom = (uint16)PKTHEADROOM(dhd->osh, PKTBUF); + if (prot->tx_metadata_offset && (headroom < prot->tx_metadata_offset)) + DHD_ERROR(("No headroom for Metadata tx %d %d\n", + prot->tx_metadata_offset, headroom)); + + if (prot->tx_metadata_offset && (headroom >= prot->tx_metadata_offset)) { + DHD_TRACE(("Metadata in tx %d\n", prot->tx_metadata_offset)); + + /* Adjust the data pointer to account for meta data in DMA_MAP */ + PKTPUSH(dhd->osh, PKTBUF, prot->tx_metadata_offset); + + if (SECURE_DMA_ENAB(dhd->osh)) { + meta_pa = SECURE_DMA_MAP_TXMETA(dhd->osh, PKTDATA(dhd->osh, PKTBUF), + prot->tx_metadata_offset + ETHER_HDR_LEN, DMA_RX, PKTBUF, + 0, ring->dma_buf.secdma); + } +#ifndef BCM_SECURE_DMA + else + meta_pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF), + prot->tx_metadata_offset, DMA_RX, PKTBUF, 0); +#endif /* #ifndef BCM_SECURE_DMA */ + + if (PHYSADDRISZERO(meta_pa)) { + /* Unmap the data pointer to a DMA-able address */ + if (SECURE_DMA_ENAB(dhd->osh)) { + int offset = 0; + BCM_REFERENCE(offset); + + if (prot->tx_metadata_offset) { + offset = prot->tx_metadata_offset + ETHER_HDR_LEN; + } + + SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, + DMA_TX, 0, DHD_DMAH_NULL, ring->dma_buf.secdma, offset); + } +#ifndef BCM_SECURE_DMA + else { + DMA_UNMAP(dhd->osh, pa, pktlen, DMA_TX, 0, DHD_DMAH_NULL); + } +#endif /* #ifndef BCM_SECURE_DMA */ +#ifdef TXP_FLUSH_NITEMS + /* update pend_items_count */ + ring->pend_items_count--; +#endif /* TXP_FLUSH_NITEMS */ + + DHD_ERROR(("%s: Something really bad, unless 0 is " + "a valid phyaddr for meta_pa\n", __FUNCTION__)); + ASSERT(0); + goto err_rollback_idx; + } + + /* Adjust the data pointer back to original value */ + PKTPULL(dhd->osh, PKTBUF, prot->tx_metadata_offset); + + txdesc->metadata_buf_len = prot->tx_metadata_offset; + txdesc->metadata_buf_addr.high_addr = htol32(PHYSADDRHI(meta_pa)); + txdesc->metadata_buf_addr.low_addr = htol32(PHYSADDRLO(meta_pa)); + } else { + txdesc->metadata_buf_len = htol16(0); + txdesc->metadata_buf_addr.high_addr = 0; + txdesc->metadata_buf_addr.low_addr = 0; + } + +#ifdef DHD_PKTID_AUDIT_RING + DHD_PKTID_AUDIT(dhd, prot->pktid_tx_map, pktid, DHD_DUPLICATE_ALLOC); +#endif /* DHD_PKTID_AUDIT_RING */ + + txdesc->cmn_hdr.request_id = htol32(pktid); + + DHD_TRACE(("txpost: data_len %d, pktid 0x%04x\n", txdesc->data_len, + txdesc->cmn_hdr.request_id)); + +#ifdef DHD_LBUF_AUDIT + PKTAUDIT(dhd->osh, PKTBUF); +#endif // endif + + if (pktlen > MAX_MTU_SZ) { + DHD_ERROR(("%s: ######## pktlen(%d) > MAX_MTU_SZ(%d) #######\n", + __FUNCTION__, pktlen, MAX_MTU_SZ)); + dhd_prhex("txringitem", (volatile uchar*)txdesc, + sizeof(host_txbuf_post_t), DHD_ERROR_VAL); + } + + /* Update the write pointer in TCM & ring bell */ +#ifdef TXP_FLUSH_NITEMS + /* Flush if we have either hit the txp_threshold or if this msg is */ + /* occupying the last slot in the flow_ring - before wrap around. */ + if ((ring->pend_items_count == prot->txp_threshold) || + ((uint8 *) txdesc == (uint8 *) DHD_RING_END_VA(ring))) { + dhd_prot_txdata_write_flush(dhd, flowid); + } +#else + /* update ring's WR index and ring doorbell to dongle */ + dhd_prot_ring_write_complete(dhd, ring, txdesc, 1); +#endif // endif +#ifdef TX_STATUS_LATENCY_STATS + /* set the time when pkt is queued to flowring */ + DHD_PKT_SET_QTIME(PKTBUF, OSL_SYSUPTIME_US()); +#endif /* TX_STATUS_LATENCY_STATS */ + + OSL_ATOMIC_INC(dhd->osh, &prot->active_tx_count); + /* + * Take a wake lock, do not sleep if we have atleast one packet + * to finish. + */ + DHD_TXFL_WAKE_LOCK_TIMEOUT(dhd, MAX_TX_TIMEOUT); + + DHD_RING_UNLOCK(ring->ring_lock, flags); + +#ifdef TX_STATUS_LATENCY_STATS + flow_ring_node->flow_info.num_tx_pkts++; +#endif /* TX_STATUS_LATENCY_STATS */ + return BCME_OK; + +err_rollback_idx: + /* roll back write pointer for unprocessed message */ + if (ring->wr == 0) { + ring->wr = ring->max_items - 1; + } else { + ring->wr--; + if (ring->wr == 0) { + DHD_INFO(("%s: flipping the phase now\n", ring->name)); + ring->current_phase = ring->current_phase ? + 0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT; + } + } + +err_free_pktid: +#if defined(DHD_PCIE_PKTID) + { + void *dmah; + void *secdma; + /* Free up the PKTID. physaddr and pktlen will be garbage. */ + DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_tx_map, pktid, + pa, pktlen, dmah, secdma, PKTTYPE_NO_CHECK); + } + +err_no_res_pktfree: +#endif /* DHD_PCIE_PKTID */ + + DHD_RING_UNLOCK(ring->ring_lock, flags); + + return BCME_NORESOURCE; +} /* dhd_prot_txdata */ + +/* called with a ring_lock */ +/** optimization to write "n" tx items at a time to ring */ +void BCMFASTPATH +dhd_prot_txdata_write_flush(dhd_pub_t *dhd, uint16 flowid) +{ +#ifdef TXP_FLUSH_NITEMS + flow_ring_table_t *flow_ring_table; + flow_ring_node_t *flow_ring_node; + msgbuf_ring_t *ring; + + if (dhd->flow_ring_table == NULL) { + return; + } + + flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table; + flow_ring_node = (flow_ring_node_t *)&flow_ring_table[flowid]; + ring = (msgbuf_ring_t *)flow_ring_node->prot_info; + + if (ring->pend_items_count) { + /* update ring's WR index and ring doorbell to dongle */ + dhd_prot_ring_write_complete(dhd, ring, ring->start_addr, + ring->pend_items_count); + ring->pend_items_count = 0; + ring->start_addr = NULL; + } +#endif /* TXP_FLUSH_NITEMS */ +} + +#undef PKTBUF /* Only defined in the above routine */ + +int BCMFASTPATH +dhd_prot_hdrpull(dhd_pub_t *dhd, int *ifidx, void *pkt, uchar *buf, uint *len) +{ + return 0; +} + +/** post a set of receive buffers to the dongle */ +static void BCMFASTPATH +dhd_prot_return_rxbuf(dhd_pub_t *dhd, uint32 pktid, uint32 rxcnt) +{ + dhd_prot_t *prot = dhd->prot; +#if defined(DHD_LB_RXC) + int elem_ix; + uint32 *elem; + bcm_workq_t *workq; + + workq = &prot->rx_compl_prod; + + /* Produce the work item */ + elem_ix = bcm_ring_prod(WORKQ_RING(workq), DHD_LB_WORKQ_SZ); + if (elem_ix == BCM_RING_FULL) { + DHD_ERROR(("%s LB RxCompl workQ is full\n", __FUNCTION__)); + ASSERT(0); + return; + } + + elem = WORKQ_ELEMENT(uint32, workq, elem_ix); + *elem = pktid; + + smp_wmb(); + + /* Sync WR index to consumer if the SYNC threshold has been reached */ + if (++prot->rx_compl_prod_sync >= DHD_LB_WORKQ_SYNC) { + bcm_workq_prod_sync(workq); + prot->rx_compl_prod_sync = 0; + } + + DHD_INFO(("%s: rx_compl_prod pktid<%u> sync<%d>\n", + __FUNCTION__, pktid, prot->rx_compl_prod_sync)); + +#endif /* DHD_LB_RXC */ + + if (prot->rxbufpost >= rxcnt) { + prot->rxbufpost -= (uint16)rxcnt; + } else { + /* ASSERT(0); */ + prot->rxbufpost = 0; + } + +#if !defined(DHD_LB_RXC) + if (prot->rxbufpost <= (prot->max_rxbufpost - RXBUFPOST_THRESHOLD)) + dhd_msgbuf_rxbuf_post(dhd, FALSE); /* alloc pkt ids */ +#endif /* !DHD_LB_RXC */ + return; +} + +/* called before an ioctl is sent to the dongle */ +static void +dhd_prot_wlioctl_intercept(dhd_pub_t *dhd, wl_ioctl_t * ioc, void * buf) +{ + dhd_prot_t *prot = dhd->prot; + int slen = 0; + + if (ioc->cmd == WLC_SET_VAR && buf != NULL && !strcmp(buf, "pcie_bus_tput")) { + pcie_bus_tput_params_t *tput_params; + + slen = strlen("pcie_bus_tput") + 1; + tput_params = (pcie_bus_tput_params_t*)((char *)buf + slen); + bcopy(&prot->host_bus_throughput_buf.pa, &tput_params->host_buf_addr, + sizeof(tput_params->host_buf_addr)); + tput_params->host_buf_len = DHD_BUS_TPUT_BUF_LEN; + } + +} + +/* called after an ioctl returns from dongle */ +static void +dhd_prot_wl_ioctl_ret_intercept(dhd_pub_t *dhd, wl_ioctl_t * ioc, void * buf, + int ifidx, int ret, int len) +{ + + if (!ret && ioc->cmd == WLC_SET_VAR && buf != NULL) { + /* Intercept the wme_dp ioctl here */ + if (!strcmp(buf, "wme_dp")) { + int slen, val = 0; + + slen = strlen("wme_dp") + 1; + if (len >= (int)(slen + sizeof(int))) + bcopy(((char *)buf + slen), &val, sizeof(int)); + dhd->wme_dp = (uint8) ltoh32(val); + } + + } + +} + +#ifdef DHD_PM_CONTROL_FROM_FILE +extern bool g_pm_control; +#endif /* DHD_PM_CONTROL_FROM_FILE */ + +/** Use protocol to issue ioctl to dongle. Only one ioctl may be in transit. */ +int dhd_prot_ioctl(dhd_pub_t *dhd, int ifidx, wl_ioctl_t * ioc, void * buf, int len) +{ + int ret = -1; + uint8 action; + + if (dhd->bus->is_linkdown) { + DHD_ERROR(("%s : PCIe link is down. we have nothing to do\n", __FUNCTION__)); + goto done; + } + + if ((dhd->busstate == DHD_BUS_DOWN) || dhd->hang_was_sent) { + DHD_ERROR(("%s : bus is down. we have nothing to do -" + " bus state: %d, sent hang: %d\n", __FUNCTION__, + dhd->busstate, dhd->hang_was_sent)); + goto done; + } + + if (dhd->busstate == DHD_BUS_SUSPEND) { + DHD_ERROR(("%s : bus is suspended\n", __FUNCTION__)); + goto done; + } + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (ioc->cmd == WLC_SET_PM) { +#ifdef DHD_PM_CONTROL_FROM_FILE + if (g_pm_control == TRUE) { + DHD_ERROR(("%s: SET PM ignored!(Requested:%d)\n", + __FUNCTION__, buf ? *(char *)buf : 0)); + goto done; + } +#endif /* DHD_PM_CONTROL_FROM_FILE */ + DHD_TRACE_HW4(("%s: SET PM to %d\n", __FUNCTION__, buf ? *(char *)buf : 0)); + } + + ASSERT(len <= WLC_IOCTL_MAXLEN); + + if (len > WLC_IOCTL_MAXLEN) + goto done; + + action = ioc->set; + + dhd_prot_wlioctl_intercept(dhd, ioc, buf); + + if (action & WL_IOCTL_ACTION_SET) { + ret = dhd_msgbuf_set_ioctl(dhd, ifidx, ioc->cmd, buf, len, action); + } else { + ret = dhd_msgbuf_query_ioctl(dhd, ifidx, ioc->cmd, buf, len, action); + if (ret > 0) + ioc->used = ret; + } + + /* Too many programs assume ioctl() returns 0 on success */ + if (ret >= 0) { + ret = 0; + } else { + DHD_INFO(("%s: status ret value is %d \n", __FUNCTION__, ret)); + dhd->dongle_error = ret; + } + + dhd_prot_wl_ioctl_ret_intercept(dhd, ioc, buf, ifidx, ret, len); + +done: + return ret; + +} /* dhd_prot_ioctl */ + +/** test / loopback */ + +int +dhdmsgbuf_lpbk_req(dhd_pub_t *dhd, uint len) +{ + unsigned long flags; + dhd_prot_t *prot = dhd->prot; + uint16 alloced = 0; + + ioct_reqst_hdr_t *ioct_rqst; + + uint16 hdrlen = sizeof(ioct_reqst_hdr_t); + uint16 msglen = len + hdrlen; + msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn; + + msglen = ALIGN_SIZE(msglen, DMA_ALIGN_LEN); + msglen = LIMIT_TO_MAX(msglen, MSGBUF_MAX_MSG_SIZE); + + DHD_RING_LOCK(ring->ring_lock, flags); + + ioct_rqst = (ioct_reqst_hdr_t *) + dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE); + + if (ioct_rqst == NULL) { + DHD_RING_UNLOCK(ring->ring_lock, flags); + return 0; + } + + { + uint8 *ptr; + uint16 i; + + ptr = (uint8 *)ioct_rqst; + for (i = 0; i < msglen; i++) { + ptr[i] = i % 256; + } + } + + /* Common msg buf hdr */ + ioct_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO; + ring->seqnum++; + + ioct_rqst->msg.msg_type = MSG_TYPE_LOOPBACK; + ioct_rqst->msg.if_id = 0; + ioct_rqst->msg.flags = ring->current_phase; + + bcm_print_bytes("LPBK REQ: ", (uint8 *)ioct_rqst, msglen); + + /* update ring's WR index and ring doorbell to dongle */ + dhd_prot_ring_write_complete(dhd, ring, ioct_rqst, 1); + + DHD_RING_UNLOCK(ring->ring_lock, flags); + + return 0; +} + +/** test / loopback */ +void dmaxfer_free_dmaaddr(dhd_pub_t *dhd, dhd_dmaxfer_t *dmaxfer) +{ + if (dmaxfer == NULL) + return; + + dhd_dma_buf_free(dhd, &dmaxfer->srcmem); + dhd_dma_buf_free(dhd, &dmaxfer->dstmem); +} + +/** test / loopback */ +int +dhd_prepare_schedule_dmaxfer_free(dhd_pub_t *dhdp) +{ + dhd_prot_t *prot = dhdp->prot; + dhd_dmaxfer_t *dmaxfer = &prot->dmaxfer; + dmaxref_mem_map_t *dmap = NULL; + + dmap = MALLOCZ(dhdp->osh, sizeof(dmaxref_mem_map_t)); + if (!dmap) { + DHD_ERROR(("%s: dmap alloc failed\n", __FUNCTION__)); + goto mem_alloc_fail; + } + dmap->srcmem = &(dmaxfer->srcmem); + dmap->dstmem = &(dmaxfer->dstmem); + + DMAXFER_FREE(dhdp, dmap); + return BCME_OK; + +mem_alloc_fail: + if (dmap) { + MFREE(dhdp->osh, dmap, sizeof(dmaxref_mem_map_t)); + dmap = NULL; + } + return BCME_NOMEM; +} /* dhd_prepare_schedule_dmaxfer_free */ + +/** test / loopback */ +void +dmaxfer_free_prev_dmaaddr(dhd_pub_t *dhdp, dmaxref_mem_map_t *dmmap) +{ + + dhd_dma_buf_free(dhdp, dmmap->srcmem); + dhd_dma_buf_free(dhdp, dmmap->dstmem); + + MFREE(dhdp->osh, dmmap, sizeof(dmaxref_mem_map_t)); + dmmap = NULL; + +} /* dmaxfer_free_prev_dmaaddr */ + +/** test / loopback */ +int dmaxfer_prepare_dmaaddr(dhd_pub_t *dhd, uint len, + uint srcdelay, uint destdelay, dhd_dmaxfer_t *dmaxfer) +{ + uint i = 0, j = 0; + if (!dmaxfer) + return BCME_ERROR; + + /* First free up existing buffers */ + dmaxfer_free_dmaaddr(dhd, dmaxfer); + + if (dhd_dma_buf_alloc(dhd, &dmaxfer->srcmem, len)) { + return BCME_NOMEM; + } + + if (dhd_dma_buf_alloc(dhd, &dmaxfer->dstmem, len + 8)) { + dhd_dma_buf_free(dhd, &dmaxfer->srcmem); + return BCME_NOMEM; + } + + dmaxfer->len = len; + + /* Populate source with a pattern like below + * 0x00000000 + * 0x01010101 + * 0x02020202 + * 0x03030303 + * 0x04040404 + * 0x05050505 + * ... + * 0xFFFFFFFF + */ + while (i < dmaxfer->len) { + ((uint8*)dmaxfer->srcmem.va)[i] = j % 256; + i++; + if (i % 4 == 0) { + j++; + } + } + + OSL_CACHE_FLUSH(dmaxfer->srcmem.va, dmaxfer->len); + + dmaxfer->srcdelay = srcdelay; + dmaxfer->destdelay = destdelay; + + return BCME_OK; +} /* dmaxfer_prepare_dmaaddr */ + +static void +dhd_msgbuf_dmaxfer_process(dhd_pub_t *dhd, void *msg) +{ + dhd_prot_t *prot = dhd->prot; + uint64 end_usec; + pcie_dmaxfer_cmplt_t *cmplt = (pcie_dmaxfer_cmplt_t *)msg; + + BCM_REFERENCE(cmplt); + end_usec = OSL_SYSUPTIME_US(); + + DHD_ERROR(("DMA loopback status: %d\n", cmplt->compl_hdr.status)); + prot->dmaxfer.status = cmplt->compl_hdr.status; + OSL_CACHE_INV(prot->dmaxfer.dstmem.va, prot->dmaxfer.len); + if (prot->dmaxfer.srcmem.va && prot->dmaxfer.dstmem.va) { + if (memcmp(prot->dmaxfer.srcmem.va, + prot->dmaxfer.dstmem.va, prot->dmaxfer.len) || + cmplt->compl_hdr.status != BCME_OK) { + DHD_ERROR(("DMA loopback failed\n")); + prhex("XFER SRC: ", + prot->dmaxfer.srcmem.va, prot->dmaxfer.len); + prhex("XFER DST: ", + prot->dmaxfer.dstmem.va, prot->dmaxfer.len); + prot->dmaxfer.status = BCME_ERROR; + } + else { + switch (prot->dmaxfer.d11_lpbk) { + case M2M_DMA_LPBK: { + DHD_ERROR(("DMA successful pcie m2m DMA loopback\n")); + } break; + case D11_LPBK: { + DHD_ERROR(("DMA successful with d11 loopback\n")); + } break; + case BMC_LPBK: { + DHD_ERROR(("DMA successful with bmc loopback\n")); + } break; + case M2M_NON_DMA_LPBK: { + DHD_ERROR(("DMA successful pcie m2m NON DMA loopback\n")); + } break; + case D11_HOST_MEM_LPBK: { + DHD_ERROR(("DMA successful d11 host mem loopback\n")); + } break; + case BMC_HOST_MEM_LPBK: { + DHD_ERROR(("DMA successful bmc host mem loopback\n")); + } break; + default: { + DHD_ERROR(("Invalid loopback option\n")); + } break; + } + + if (DHD_LPBKDTDUMP_ON()) { + /* debug info print of the Tx and Rx buffers */ + dhd_prhex("XFER SRC: ", prot->dmaxfer.srcmem.va, + prot->dmaxfer.len, DHD_INFO_VAL); + dhd_prhex("XFER DST: ", prot->dmaxfer.dstmem.va, + prot->dmaxfer.len, DHD_INFO_VAL); + } + } + } + + dhd_prepare_schedule_dmaxfer_free(dhd); + end_usec -= prot->dmaxfer.start_usec; + if (end_usec) + DHD_ERROR(("DMA loopback %d bytes in %lu usec, %u kBps\n", + prot->dmaxfer.len, (unsigned long)end_usec, + (prot->dmaxfer.len * (1000 * 1000 / 1024) / (uint32)end_usec))); + dhd->prot->dmaxfer.in_progress = FALSE; + + dhd->bus->dmaxfer_complete = TRUE; + dhd_os_dmaxfer_wake(dhd); +} + +/** Test functionality. + * Transfers bytes from host to dongle and to host again using DMA + * This function is not reentrant, as prot->dmaxfer.in_progress is not protected + * by a spinlock. + */ +int +dhdmsgbuf_dmaxfer_req(dhd_pub_t *dhd, uint len, uint srcdelay, uint destdelay, + uint d11_lpbk, uint core_num) +{ + unsigned long flags; + int ret = BCME_OK; + dhd_prot_t *prot = dhd->prot; + pcie_dma_xfer_params_t *dmap; + uint32 xferlen = LIMIT_TO_MAX(len, DMA_XFER_LEN_LIMIT); + uint16 alloced = 0; + msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn; + + if (prot->dmaxfer.in_progress) { + DHD_ERROR(("DMA is in progress...\n")); + return BCME_ERROR; + } + + if (d11_lpbk >= MAX_LPBK) { + DHD_ERROR(("loopback mode should be either" + " 0-PCIE_M2M_DMA, 1-D11, 2-BMC or 3-PCIE_M2M_NonDMA\n")); + return BCME_ERROR; + } + + DHD_RING_LOCK(ring->ring_lock, flags); + + prot->dmaxfer.in_progress = TRUE; + if ((ret = dmaxfer_prepare_dmaaddr(dhd, xferlen, srcdelay, destdelay, + &prot->dmaxfer)) != BCME_OK) { + prot->dmaxfer.in_progress = FALSE; + DHD_RING_UNLOCK(ring->ring_lock, flags); + return ret; + } + + dmap = (pcie_dma_xfer_params_t *) + dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE); + + if (dmap == NULL) { + dmaxfer_free_dmaaddr(dhd, &prot->dmaxfer); + prot->dmaxfer.in_progress = FALSE; + DHD_RING_UNLOCK(ring->ring_lock, flags); + return BCME_NOMEM; + } + + /* Common msg buf hdr */ + dmap->cmn_hdr.msg_type = MSG_TYPE_LPBK_DMAXFER; + dmap->cmn_hdr.request_id = htol32(DHD_FAKE_PKTID); + dmap->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO; + dmap->cmn_hdr.flags = ring->current_phase; + ring->seqnum++; + + dmap->host_input_buf_addr.high = htol32(PHYSADDRHI(prot->dmaxfer.srcmem.pa)); + dmap->host_input_buf_addr.low = htol32(PHYSADDRLO(prot->dmaxfer.srcmem.pa)); + dmap->host_ouput_buf_addr.high = htol32(PHYSADDRHI(prot->dmaxfer.dstmem.pa)); + dmap->host_ouput_buf_addr.low = htol32(PHYSADDRLO(prot->dmaxfer.dstmem.pa)); + dmap->xfer_len = htol32(prot->dmaxfer.len); + dmap->srcdelay = htol32(prot->dmaxfer.srcdelay); + dmap->destdelay = htol32(prot->dmaxfer.destdelay); + prot->dmaxfer.d11_lpbk = d11_lpbk; + dmap->flags = (((core_num & PCIE_DMA_XFER_FLG_CORE_NUMBER_MASK) + << PCIE_DMA_XFER_FLG_CORE_NUMBER_SHIFT) | + ((prot->dmaxfer.d11_lpbk & PCIE_DMA_XFER_FLG_D11_LPBK_MASK) + << PCIE_DMA_XFER_FLG_D11_LPBK_SHIFT)); + prot->dmaxfer.start_usec = OSL_SYSUPTIME_US(); + + /* update ring's WR index and ring doorbell to dongle */ + dhd_prot_ring_write_complete(dhd, ring, dmap, 1); + + DHD_RING_UNLOCK(ring->ring_lock, flags); + + DHD_ERROR(("DMA loopback Started...\n")); + + return BCME_OK; +} /* dhdmsgbuf_dmaxfer_req */ + +dma_xfer_status_t +dhdmsgbuf_dmaxfer_status(dhd_pub_t *dhd) +{ + dhd_prot_t *prot = dhd->prot; + + if (prot->dmaxfer.in_progress) + return DMA_XFER_IN_PROGRESS; + else if (prot->dmaxfer.status == BCME_OK) + return DMA_XFER_SUCCESS; + else + return DMA_XFER_FAILED; +} + +/** Called in the process of submitting an ioctl to the dongle */ +static int +dhd_msgbuf_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action) +{ + int ret = 0; + uint copylen = 0; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (dhd->bus->is_linkdown) { + DHD_ERROR(("%s : PCIe link is down. we have nothing to do\n", + __FUNCTION__)); + return -EIO; + } + + if (dhd->busstate == DHD_BUS_DOWN) { + DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__)); + return -EIO; + } + + /* don't talk to the dongle if fw is about to be reloaded */ + if (dhd->hang_was_sent) { + DHD_ERROR(("%s: HANG was sent up earlier. Not talking to the chip\n", + __FUNCTION__)); + return -EIO; + } + + if (cmd == WLC_GET_VAR && buf) + { + if (!len || !*(uint8 *)buf) { + DHD_ERROR(("%s(): Zero length bailing\n", __FUNCTION__)); + ret = BCME_BADARG; + goto done; + } + + /* Respond "bcmerror" and "bcmerrorstr" with local cache */ + copylen = MIN(len, BCME_STRLEN); + + if ((len >= strlen("bcmerrorstr")) && + (!strcmp((char *)buf, "bcmerrorstr"))) { + strncpy((char *)buf, bcmerrorstr(dhd->dongle_error), copylen); + *(uint8 *)((uint8 *)buf + (copylen - 1)) = '\0'; + goto done; + } else if ((len >= strlen("bcmerror")) && + !strcmp((char *)buf, "bcmerror")) { + *(uint32 *)(uint32 *)buf = dhd->dongle_error; + goto done; + } + } + + DHD_CTL(("query_ioctl: ACTION %d ifdix %d cmd %d len %d \n", + action, ifidx, cmd, len)); + + ret = dhd_fillup_ioct_reqst(dhd, (uint16)len, cmd, buf, ifidx); + + if (ret < 0) { + DHD_ERROR(("%s(): dhd_fillup_ioct_reqst failed \r\n", __FUNCTION__)); + goto done; + } + + /* wait for IOCTL completion message from dongle and get first fragment */ + ret = dhd_msgbuf_wait_ioctl_cmplt(dhd, len, buf); + +done: + return ret; +} + +void +dhd_msgbuf_iovar_timeout_dump(dhd_pub_t *dhd) +{ + + uint32 intstatus; + dhd_prot_t *prot = dhd->prot; + + dhd->rxcnt_timeout++; + dhd->rx_ctlerrs++; + dhd->iovar_timeout_occured = TRUE; + DHD_ERROR(("%s: resumed on timeout rxcnt_timeout %d ioctl_cmd %d " + "trans_id %d state %d busstate=%d ioctl_received=%d\n", + __FUNCTION__, dhd->rxcnt_timeout, prot->curr_ioctl_cmd, + prot->ioctl_trans_id, prot->ioctl_state, + dhd->busstate, prot->ioctl_received)); + if (prot->curr_ioctl_cmd == WLC_SET_VAR || + prot->curr_ioctl_cmd == WLC_GET_VAR) { + char iovbuf[32]; + int i; + int dump_size = 128; + uint8 *ioctl_buf = (uint8 *)prot->ioctbuf.va; + memset(iovbuf, 0, sizeof(iovbuf)); + strncpy(iovbuf, ioctl_buf, sizeof(iovbuf) - 1); + iovbuf[sizeof(iovbuf) - 1] = '\0'; + DHD_ERROR(("Current IOVAR (%s): %s\n", + prot->curr_ioctl_cmd == WLC_SET_VAR ? + "WLC_SET_VAR" : "WLC_GET_VAR", iovbuf)); + DHD_ERROR(("========== START IOCTL REQBUF DUMP ==========\n")); + for (i = 0; i < dump_size; i++) { + DHD_ERROR(("%02X ", ioctl_buf[i])); + if ((i % 32) == 31) { + DHD_ERROR(("\n")); + } + } + DHD_ERROR(("\n========== END IOCTL REQBUF DUMP ==========\n")); + } + + /* Check the PCIe link status by reading intstatus register */ + intstatus = si_corereg(dhd->bus->sih, + dhd->bus->sih->buscoreidx, dhd->bus->pcie_mailbox_int, 0, 0); + if (intstatus == (uint32)-1) { + DHD_ERROR(("%s : PCIe link might be down\n", __FUNCTION__)); + dhd->bus->is_linkdown = TRUE; + } + + dhd_bus_dump_console_buffer(dhd->bus); + dhd_prot_debug_info_print(dhd); +} + +/** + * Waits for IOCTL completion message from the dongle, copies this into caller + * provided parameter 'buf'. + */ +static int +dhd_msgbuf_wait_ioctl_cmplt(dhd_pub_t *dhd, uint32 len, void *buf) +{ + dhd_prot_t *prot = dhd->prot; + int timeleft; + unsigned long flags; + int ret = 0; + static uint cnt = 0; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (dhd_query_bus_erros(dhd)) { + ret = -EIO; + goto out; + } + + timeleft = dhd_os_ioctl_resp_wait(dhd, (uint *)&prot->ioctl_received, false); + +#ifdef DHD_RECOVER_TIMEOUT + if (prot->ioctl_received == 0) { + uint32 intstatus = si_corereg(dhd->bus->sih, + dhd->bus->sih->buscoreidx, dhd->bus->pcie_mailbox_int, 0, 0); + int host_irq_disbled = dhdpcie_irq_disabled(dhd->bus); + if ((intstatus) && (intstatus != (uint32)-1) && + (timeleft == 0) && (!dhd_query_bus_erros(dhd))) { + DHD_ERROR(("%s: iovar timeout trying again intstatus=%x" + " host_irq_disabled=%d\n", + __FUNCTION__, intstatus, host_irq_disbled)); + dhd_pcie_intr_count_dump(dhd); + dhd_print_tasklet_status(dhd); + dhd_prot_process_ctrlbuf(dhd); + timeleft = dhd_os_ioctl_resp_wait(dhd, (uint *)&prot->ioctl_received); + /* Clear Interrupts */ + dhdpcie_bus_clear_intstatus(dhd->bus); + } + } +#endif /* DHD_RECOVER_TIMEOUT */ + + if (dhd->conf->ctrl_resched > 0 && timeleft == 0 && (!dhd_query_bus_erros(dhd))) { + cnt++; + if (cnt <= dhd->conf->ctrl_resched) { + uint buscorerev = dhd->bus->sih->buscorerev; + uint32 intstatus = 0, intmask = 0; + intstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCIMailBoxInt(buscorerev), 0, 0); + intmask = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCIMailBoxMask(buscorerev), 0, 0); + if (intstatus) { + DHD_ERROR(("%s: reschedule dhd_dpc, cnt=%d, intstatus=0x%x, intmask=0x%x\n", + __FUNCTION__, cnt, intstatus, intmask)); + dhd->bus->ipend = TRUE; + dhd->bus->dpc_sched = TRUE; + dhd_sched_dpc(dhd); + timeleft = dhd_os_ioctl_resp_wait(dhd, &prot->ioctl_received, true); + } + } + } else { + cnt = 0; + } + + if (timeleft == 0 && (!dhd_query_bus_erros(dhd))) { + + dhd_msgbuf_iovar_timeout_dump(dhd); + +#ifdef DHD_FW_COREDUMP + /* Collect socram dump */ + if (dhd->memdump_enabled) { + /* collect core dump */ + dhd->memdump_type = DUMP_TYPE_RESUMED_ON_TIMEOUT; + dhd_bus_mem_dump(dhd); + } +#endif /* DHD_FW_COREDUMP */ + ret = -ETIMEDOUT; + goto out; + } else { + if (prot->ioctl_received != IOCTL_RETURN_ON_SUCCESS) { + DHD_ERROR(("%s: IOCTL failure due to ioctl_received = %d\n", + __FUNCTION__, prot->ioctl_received)); + ret = -EINVAL; + goto out; + } + dhd->rxcnt_timeout = 0; + dhd->rx_ctlpkts++; + DHD_CTL(("%s: ioctl resp resumed, got %d\n", + __FUNCTION__, prot->ioctl_resplen)); + } + + if (dhd->prot->ioctl_resplen > len) + dhd->prot->ioctl_resplen = (uint16)len; + if (buf) + bcopy(dhd->prot->retbuf.va, buf, dhd->prot->ioctl_resplen); + + ret = (int)(dhd->prot->ioctl_status); + +out: + DHD_GENERAL_LOCK(dhd, flags); + dhd->prot->ioctl_state = 0; + dhd->prot->ioctl_resplen = 0; + dhd->prot->ioctl_received = IOCTL_WAIT; + dhd->prot->curr_ioctl_cmd = 0; + DHD_GENERAL_UNLOCK(dhd, flags); + + return ret; +} /* dhd_msgbuf_wait_ioctl_cmplt */ + +static int +dhd_msgbuf_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action) +{ + int ret = 0; + + DHD_TRACE(("%s: Enter \n", __FUNCTION__)); + + if (dhd->bus->is_linkdown) { + DHD_ERROR(("%s : PCIe link is down. we have nothing to do\n", + __FUNCTION__)); + return -EIO; + } + + if (dhd->busstate == DHD_BUS_DOWN) { + DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__)); + return -EIO; + } + + /* don't talk to the dongle if fw is about to be reloaded */ + if (dhd->hang_was_sent) { + DHD_ERROR(("%s: HANG was sent up earlier. Not talking to the chip\n", + __FUNCTION__)); + return -EIO; + } + + DHD_CTL(("ACTION %d ifdix %d cmd %d len %d \n", + action, ifidx, cmd, len)); + + /* Fill up msgbuf for ioctl req */ + ret = dhd_fillup_ioct_reqst(dhd, (uint16)len, cmd, buf, ifidx); + + if (ret < 0) { + DHD_ERROR(("%s(): dhd_fillup_ioct_reqst failed \r\n", __FUNCTION__)); + goto done; + } + + ret = dhd_msgbuf_wait_ioctl_cmplt(dhd, len, buf); + +done: + return ret; +} + +/** Called by upper DHD layer. Handles a protocol control response asynchronously. */ +int dhd_prot_ctl_complete(dhd_pub_t *dhd) +{ + return 0; +} + +/** Called by upper DHD layer. Check for and handle local prot-specific iovar commands */ +int dhd_prot_iovar_op(dhd_pub_t *dhd, const char *name, + void *params, int plen, void *arg, int len, bool set) +{ + return BCME_UNSUPPORTED; +} + +#ifdef DHD_DUMP_PCIE_RINGS +int dhd_d2h_h2d_ring_dump(dhd_pub_t *dhd, void *file, unsigned long *file_posn) +{ + dhd_prot_t *prot; + msgbuf_ring_t *ring; + int ret = 0; + + if (!(dhd) || !(dhd->prot)) { + goto exit; + } + prot = dhd->prot; + + /* Below is the same ring dump sequence followed in parser as well. */ + ring = &prot->h2dring_ctrl_subn; + if ((ret = dhd_ring_write(dhd, ring, file, file_posn)) < 0) + goto exit; + + ring = &prot->h2dring_rxp_subn; + if ((ret = dhd_ring_write(dhd, ring, file, file_posn)) < 0) + goto exit; + + ring = &prot->d2hring_ctrl_cpln; + if ((ret = dhd_ring_write(dhd, ring, file, file_posn)) < 0) + goto exit; + + ring = &prot->d2hring_tx_cpln; + if ((ret = dhd_ring_write(dhd, ring, file, file_posn)) < 0) + goto exit; + + ring = &prot->d2hring_rx_cpln; + if ((ret = dhd_ring_write(dhd, ring, file, file_posn)) < 0) + goto exit; + + ring = prot->h2d_flowrings_pool; + if ((ret = dhd_ring_write(dhd, ring, file, file_posn)) < 0) + goto exit; + +#ifdef EWP_EDL + if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6 && !dhd->dongle_edl_support) +#else + if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6) +#endif /* EWP_EDL */ + { + ring = prot->h2dring_info_subn; + if ((ret = dhd_ring_write(dhd, ring, file, file_posn)) < 0) + goto exit; + + ring = prot->d2hring_info_cpln; + if ((ret = dhd_ring_write(dhd, ring, file, file_posn)) < 0) + goto exit; + } + +exit : + return ret; +} + +/* Write to file */ +static +int dhd_ring_write(dhd_pub_t *dhd, msgbuf_ring_t *ring, void *file, unsigned long *file_posn) +{ + int ret = 0; + + if (ring == NULL) { + DHD_ERROR(("%s: Ring not initialised, failed to dump ring contents\n", + __FUNCTION__)); + return BCME_ERROR; + } + ret = dhd_os_write_file_posn(file, file_posn, (char *)(ring->dma_buf.va), + ((unsigned long)(ring->max_items) * (ring->item_len))); + if (ret < 0) { + DHD_ERROR(("%s: write file error !\n", __FUNCTION__)); + return BCME_ERROR; + } + return BCME_OK; +} +#endif /* DHD_DUMP_PCIE_RINGS */ + +/** Add prot dump output to a buffer */ +void dhd_prot_dump(dhd_pub_t *dhd, struct bcmstrbuf *b) +{ + + if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_SEQNUM) + bcm_bprintf(b, "\nd2h_sync: SEQNUM:"); + else if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_XORCSUM) + bcm_bprintf(b, "\nd2h_sync: XORCSUM:"); + else + bcm_bprintf(b, "\nd2h_sync: NONE:"); + bcm_bprintf(b, " d2h_sync_wait max<%lu> tot<%lu>\n", + dhd->prot->d2h_sync_wait_max, dhd->prot->d2h_sync_wait_tot); + + bcm_bprintf(b, "\nDongle DMA Indices: h2d %d d2h %d index size %d bytes\n", + dhd->dma_h2d_ring_upd_support, + dhd->dma_d2h_ring_upd_support, + dhd->prot->rw_index_sz); + bcm_bprintf(b, "h2d_max_txpost: %d, prot->h2d_max_txpost: %d\n", + h2d_max_txpost, dhd->prot->h2d_max_txpost); +} + +/* Update local copy of dongle statistics */ +void dhd_prot_dstats(dhd_pub_t *dhd) +{ + return; +} + +/** Called by upper DHD layer */ +int dhd_process_pkt_reorder_info(dhd_pub_t *dhd, uchar *reorder_info_buf, + uint reorder_info_len, void **pkt, uint32 *free_buf_count) +{ + return 0; +} + +/** Debug related, post a dummy message to interrupt dongle. Used to process cons commands. */ +int +dhd_post_dummy_msg(dhd_pub_t *dhd) +{ + unsigned long flags; + hostevent_hdr_t *hevent = NULL; + uint16 alloced = 0; + + dhd_prot_t *prot = dhd->prot; + msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn; + + DHD_RING_LOCK(ring->ring_lock, flags); + + hevent = (hostevent_hdr_t *) + dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE); + + if (hevent == NULL) { + DHD_RING_UNLOCK(ring->ring_lock, flags); + return -1; + } + + /* CMN msg header */ + hevent->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO; + ring->seqnum++; + hevent->msg.msg_type = MSG_TYPE_HOST_EVNT; + hevent->msg.if_id = 0; + hevent->msg.flags = ring->current_phase; + + /* Event payload */ + hevent->evnt_pyld = htol32(HOST_EVENT_CONS_CMD); + + /* Since, we are filling the data directly into the bufptr obtained + * from the msgbuf, we can directly call the write_complete + */ + dhd_prot_ring_write_complete(dhd, ring, hevent, 1); + + DHD_RING_UNLOCK(ring->ring_lock, flags); + + return 0; +} + +/** + * If exactly_nitems is true, this function will allocate space for nitems or fail + * If exactly_nitems is false, this function will allocate space for nitems or less + */ +static void * BCMFASTPATH +dhd_prot_alloc_ring_space(dhd_pub_t *dhd, msgbuf_ring_t *ring, + uint16 nitems, uint16 * alloced, bool exactly_nitems) +{ + void * ret_buf; + + /* Alloc space for nitems in the ring */ + ret_buf = dhd_prot_get_ring_space(ring, nitems, alloced, exactly_nitems); + + if (ret_buf == NULL) { + /* if alloc failed , invalidate cached read ptr */ + if (dhd->dma_d2h_ring_upd_support) { + ring->rd = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx); + } else { + dhd_bus_cmn_readshared(dhd->bus, &(ring->rd), RING_RD_UPD, ring->idx); + } + + /* Try allocating once more */ + ret_buf = dhd_prot_get_ring_space(ring, nitems, alloced, exactly_nitems); + + if (ret_buf == NULL) { + DHD_INFO(("%s: Ring space not available \n", ring->name)); + return NULL; + } + } + + if (ret_buf == HOST_RING_BASE(ring)) { + DHD_INFO(("%s: setting the phase now\n", ring->name)); + ring->current_phase = ring->current_phase ? 0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT; + } + + /* Return alloced space */ + return ret_buf; +} + +/** + * Non inline ioct request. + * Form a ioctl request first as per ioctptr_reqst_hdr_t header in the circular buffer + * Form a separate request buffer where a 4 byte cmn header is added in the front + * buf contents from parent function is copied to remaining section of this buffer + */ +static int +dhd_fillup_ioct_reqst(dhd_pub_t *dhd, uint16 len, uint cmd, void* buf, int ifidx) +{ + dhd_prot_t *prot = dhd->prot; + ioctl_req_msg_t *ioct_rqst; + void * ioct_buf; /* For ioctl payload */ + uint16 rqstlen, resplen; + unsigned long flags; + uint16 alloced = 0; + msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn; + + if (dhd_query_bus_erros(dhd)) { + return -EIO; + } + + rqstlen = len; + resplen = len; + + /* Limit ioct request to MSGBUF_MAX_MSG_SIZE bytes including hdrs */ + /* 8K allocation of dongle buffer fails */ + /* dhd doesnt give separate input & output buf lens */ + /* so making the assumption that input length can never be more than 2k */ + rqstlen = MIN(rqstlen, MSGBUF_IOCTL_MAX_RQSTLEN); + + DHD_RING_LOCK(ring->ring_lock, flags); + + if (prot->ioctl_state) { + DHD_ERROR(("%s: pending ioctl %02x\n", __FUNCTION__, prot->ioctl_state)); + DHD_RING_UNLOCK(ring->ring_lock, flags); + return BCME_BUSY; + } else { + prot->ioctl_state = MSGBUF_IOCTL_ACK_PENDING | MSGBUF_IOCTL_RESP_PENDING; + } + + /* Request for cbuf space */ + ioct_rqst = (ioctl_req_msg_t*) + dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE); + if (ioct_rqst == NULL) { + DHD_ERROR(("couldn't allocate space on msgring to send ioctl request\n")); + prot->ioctl_state = 0; + prot->curr_ioctl_cmd = 0; + prot->ioctl_received = IOCTL_WAIT; + DHD_RING_UNLOCK(ring->ring_lock, flags); + return -1; + } + + /* Common msg buf hdr */ + ioct_rqst->cmn_hdr.msg_type = MSG_TYPE_IOCTLPTR_REQ; + ioct_rqst->cmn_hdr.if_id = (uint8)ifidx; + ioct_rqst->cmn_hdr.flags = ring->current_phase; + ioct_rqst->cmn_hdr.request_id = htol32(DHD_IOCTL_REQ_PKTID); + ioct_rqst->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO; + ring->seqnum++; + + ioct_rqst->cmd = htol32(cmd); + prot->curr_ioctl_cmd = cmd; + ioct_rqst->output_buf_len = htol16(resplen); + prot->ioctl_trans_id++; + ioct_rqst->trans_id = prot->ioctl_trans_id; + + /* populate ioctl buffer info */ + ioct_rqst->input_buf_len = htol16(rqstlen); + ioct_rqst->host_input_buf_addr.high = htol32(PHYSADDRHI(prot->ioctbuf.pa)); + ioct_rqst->host_input_buf_addr.low = htol32(PHYSADDRLO(prot->ioctbuf.pa)); + /* copy ioct payload */ + ioct_buf = (void *) prot->ioctbuf.va; + + if (buf) + memcpy(ioct_buf, buf, len); + + OSL_CACHE_FLUSH((void *) prot->ioctbuf.va, len); + + if (!ISALIGNED(ioct_buf, DMA_ALIGN_LEN)) + DHD_ERROR(("host ioct address unaligned !!!!! \n")); + + DHD_CTL(("submitted IOCTL request request_id %d, cmd %d, output_buf_len %d, tx_id %d\n", + ioct_rqst->cmn_hdr.request_id, cmd, ioct_rqst->output_buf_len, + ioct_rqst->trans_id)); + + /* update ring's WR index and ring doorbell to dongle */ + dhd_prot_ring_write_complete(dhd, ring, ioct_rqst, 1); + + DHD_RING_UNLOCK(ring->ring_lock, flags); + + return 0; +} /* dhd_fillup_ioct_reqst */ + +/** + * dhd_prot_ring_attach - Initialize the msgbuf_ring object and attach a + * DMA-able buffer to it. The ring is NOT tagged as inited until all the ring + * information is posted to the dongle. + * + * Invoked in dhd_prot_attach for the common rings, and in dhd_prot_init for + * each flowring in pool of flowrings. + * + * returns BCME_OK=0 on success + * returns non-zero negative error value on failure. + */ +static int +dhd_prot_ring_attach(dhd_pub_t *dhd, msgbuf_ring_t *ring, const char *name, + uint16 max_items, uint16 item_len, uint16 ringid) +{ + int dma_buf_alloced = BCME_NOMEM; + uint32 dma_buf_len = max_items * item_len; + dhd_prot_t *prot = dhd->prot; + uint16 max_flowrings = dhd->bus->max_tx_flowrings; + dhd_dma_buf_t *dma_buf = NULL; + + ASSERT(ring); + ASSERT(name); + ASSERT((max_items < 0xFFFF) && (item_len < 0xFFFF) && (ringid < 0xFFFF)); + + /* Init name */ + strncpy(ring->name, name, RING_NAME_MAX_LENGTH); + ring->name[RING_NAME_MAX_LENGTH - 1] = '\0'; + + ring->idx = ringid; + + ring->max_items = max_items; + ring->item_len = item_len; + + /* A contiguous space may be reserved for all flowrings */ + if (DHD_IS_FLOWRING(ringid, max_flowrings) && (prot->flowrings_dma_buf.va)) { + /* Carve out from the contiguous DMA-able flowring buffer */ + uint16 flowid; + uint32 base_offset; + + dhd_dma_buf_t *rsv_buf = &prot->flowrings_dma_buf; + dma_buf = &ring->dma_buf; + + flowid = DHD_RINGID_TO_FLOWID(ringid); + base_offset = (flowid - BCMPCIE_H2D_COMMON_MSGRINGS) * dma_buf_len; + + ASSERT(base_offset + dma_buf_len <= rsv_buf->len); + + dma_buf->len = dma_buf_len; + dma_buf->va = (void *)((uintptr)rsv_buf->va + base_offset); + PHYSADDRHISET(dma_buf->pa, PHYSADDRHI(rsv_buf->pa)); + PHYSADDRLOSET(dma_buf->pa, PHYSADDRLO(rsv_buf->pa) + base_offset); + + /* On 64bit, contiguous space may not span across 0x00000000FFFFFFFF */ + ASSERT(PHYSADDRLO(dma_buf->pa) >= PHYSADDRLO(rsv_buf->pa)); + + dma_buf->dmah = rsv_buf->dmah; + dma_buf->secdma = rsv_buf->secdma; + + (void)dhd_dma_buf_audit(dhd, &ring->dma_buf); + } else { +#ifdef EWP_EDL + if (ring == dhd->prot->d2hring_edl) { + /* For EDL ring, memory is alloced during attach, + * so just need to copy the dma_buf to the ring's dma_buf + */ + memcpy(&ring->dma_buf, &dhd->edl_ring_mem, sizeof(ring->dma_buf)); + dma_buf = &ring->dma_buf; + if (dma_buf->va == NULL) { + return BCME_NOMEM; + } + } else +#endif /* EWP_EDL */ + { + /* Allocate a dhd_dma_buf */ + dma_buf_alloced = dhd_dma_buf_alloc(dhd, &ring->dma_buf, dma_buf_len); + if (dma_buf_alloced != BCME_OK) { + return BCME_NOMEM; + } + } + } + + /* CAUTION: Save ring::base_addr in little endian format! */ + dhd_base_addr_htolpa(&ring->base_addr, ring->dma_buf.pa); + +#ifdef BCM_SECURE_DMA + if (SECURE_DMA_ENAB(prot->osh)) { + ring->dma_buf.secdma = MALLOCZ(prot->osh, sizeof(sec_cma_info_t)); + if (ring->dma_buf.secdma == NULL) { + goto free_dma_buf; + } + } +#endif /* BCM_SECURE_DMA */ + + ring->ring_lock = dhd_os_spin_lock_init(dhd->osh); + + DHD_INFO(("RING_ATTACH : %s Max item %d len item %d total size %d " + "ring start %p buf phys addr %x:%x \n", + ring->name, ring->max_items, ring->item_len, + dma_buf_len, ring->dma_buf.va, ltoh32(ring->base_addr.high_addr), + ltoh32(ring->base_addr.low_addr))); + + return BCME_OK; + +#ifdef BCM_SECURE_DMA +free_dma_buf: + if (dma_buf_alloced == BCME_OK) { + dhd_dma_buf_free(dhd, &ring->dma_buf); + } +#endif /* BCM_SECURE_DMA */ + + return BCME_NOMEM; + +} /* dhd_prot_ring_attach */ + +/** + * dhd_prot_ring_init - Post the common ring information to dongle. + * + * Used only for common rings. + * + * The flowrings information is passed via the create flowring control message + * (tx_flowring_create_request_t) sent over the H2D control submission common + * ring. + */ +static void +dhd_prot_ring_init(dhd_pub_t *dhd, msgbuf_ring_t *ring) +{ + ring->wr = 0; + ring->rd = 0; + ring->curr_rd = 0; + + /* CAUTION: ring::base_addr already in Little Endian */ + dhd_bus_cmn_writeshared(dhd->bus, &ring->base_addr, + sizeof(sh_addr_t), RING_BUF_ADDR, ring->idx); + dhd_bus_cmn_writeshared(dhd->bus, &ring->max_items, + sizeof(uint16), RING_MAX_ITEMS, ring->idx); + dhd_bus_cmn_writeshared(dhd->bus, &ring->item_len, + sizeof(uint16), RING_ITEM_LEN, ring->idx); + + dhd_bus_cmn_writeshared(dhd->bus, &(ring->wr), + sizeof(uint16), RING_WR_UPD, ring->idx); + dhd_bus_cmn_writeshared(dhd->bus, &(ring->rd), + sizeof(uint16), RING_RD_UPD, ring->idx); + + /* ring inited */ + ring->inited = TRUE; + +} /* dhd_prot_ring_init */ + +/** + * dhd_prot_ring_reset - bzero a ring's DMA-ble buffer and cache flush + * Reset WR and RD indices to 0. + */ +static void +dhd_prot_ring_reset(dhd_pub_t *dhd, msgbuf_ring_t *ring) +{ + DHD_TRACE(("%s\n", __FUNCTION__)); + + dhd_dma_buf_reset(dhd, &ring->dma_buf); + + ring->rd = ring->wr = 0; + ring->curr_rd = 0; + ring->inited = FALSE; + ring->create_pending = FALSE; +} + +/** + * dhd_prot_ring_detach - Detach the DMA-able buffer and any other objects + * hanging off the msgbuf_ring. + */ +static void +dhd_prot_ring_detach(dhd_pub_t *dhd, msgbuf_ring_t *ring) +{ + dhd_prot_t *prot = dhd->prot; + uint16 max_flowrings = dhd->bus->max_tx_flowrings; + ASSERT(ring); + + ring->inited = FALSE; + /* rd = ~0, wr = ring->rd - 1, max_items = 0, len_item = ~0 */ + +#ifdef BCM_SECURE_DMA + if (SECURE_DMA_ENAB(prot->osh)) { + if (ring->dma_buf.secdma) { + SECURE_DMA_UNMAP_ALL(prot->osh, ring->dma_buf.secdma); + MFREE(prot->osh, ring->dma_buf.secdma, sizeof(sec_cma_info_t)); + ring->dma_buf.secdma = NULL; + } + } +#endif /* BCM_SECURE_DMA */ + + /* If the DMA-able buffer was carved out of a pre-reserved contiguous + * memory, then simply stop using it. + */ + if (DHD_IS_FLOWRING(ring->idx, max_flowrings) && (prot->flowrings_dma_buf.va)) { + (void)dhd_dma_buf_audit(dhd, &ring->dma_buf); + memset(&ring->dma_buf, 0, sizeof(dhd_dma_buf_t)); + } else { + dhd_dma_buf_free(dhd, &ring->dma_buf); + } + + dhd_os_spin_lock_deinit(dhd->osh, ring->ring_lock); + +} /* dhd_prot_ring_detach */ + +/* + * +---------------------------------------------------------------------------- + * Flowring Pool + * + * Unlike common rings, which are attached very early on (dhd_prot_attach), + * flowrings are dynamically instantiated. Moreover, flowrings may require a + * larger DMA-able buffer. To avoid issues with fragmented cache coherent + * DMA-able memory, a pre-allocated pool of msgbuf_ring_t is allocated once. + * The DMA-able buffers are attached to these pre-allocated msgbuf_ring. + * + * Each DMA-able buffer may be allocated independently, or may be carved out + * of a single large contiguous region that is registered with the protocol + * layer into flowrings_dma_buf. On a 64bit platform, this contiguous region + * may not span 0x00000000FFFFFFFF (avoid dongle side 64bit ptr arithmetic). + * + * No flowring pool action is performed in dhd_prot_attach(), as the number + * of h2d rings is not yet known. + * + * In dhd_prot_init(), the dongle advertized number of h2d rings is used to + * determine the number of flowrings required, and a pool of msgbuf_rings are + * allocated and a DMA-able buffer (carved or allocated) is attached. + * See: dhd_prot_flowrings_pool_attach() + * + * A flowring msgbuf_ring object may be fetched from this pool during flowring + * creation, using the flowid. Likewise, flowrings may be freed back into the + * pool on flowring deletion. + * See: dhd_prot_flowrings_pool_fetch(), dhd_prot_flowrings_pool_release() + * + * In dhd_prot_detach(), the flowring pool is detached. The DMA-able buffers + * are detached (returned back to the carved region or freed), and the pool of + * msgbuf_ring and any objects allocated against it are freed. + * See: dhd_prot_flowrings_pool_detach() + * + * In dhd_prot_reset(), the flowring pool is simply reset by returning it to a + * state as-if upon an attach. All DMA-able buffers are retained. + * Following a dhd_prot_reset(), in a subsequent dhd_prot_init(), the flowring + * pool attach will notice that the pool persists and continue to use it. This + * will avoid the case of a fragmented DMA-able region. + * + * +---------------------------------------------------------------------------- + */ + +/* Conversion of a flowid to a flowring pool index */ +#define DHD_FLOWRINGS_POOL_OFFSET(flowid) \ + ((flowid) - BCMPCIE_H2D_COMMON_MSGRINGS) + +/* Fetch the msgbuf_ring_t from the flowring pool given a flowid */ +#define DHD_RING_IN_FLOWRINGS_POOL(prot, flowid) \ + (msgbuf_ring_t*)((prot)->h2d_flowrings_pool) + \ + DHD_FLOWRINGS_POOL_OFFSET(flowid) + +/* Traverse each flowring in the flowring pool, assigning ring and flowid */ +#define FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, total_flowrings) \ + for ((flowid) = DHD_FLOWRING_START_FLOWID, \ + (ring) = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid); \ + (flowid) < ((total_flowrings) + DHD_FLOWRING_START_FLOWID); \ + (ring)++, (flowid)++) + +/* Fetch number of H2D flowrings given the total number of h2d rings */ +static uint16 +dhd_get_max_flow_rings(dhd_pub_t *dhd) +{ + if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6) + return dhd->bus->max_tx_flowrings; + else + return (dhd->bus->max_tx_flowrings - BCMPCIE_H2D_COMMON_MSGRINGS); +} + +/** + * dhd_prot_flowrings_pool_attach - Initialize a pool of flowring msgbuf_ring_t. + * + * Allocate a pool of msgbuf_ring along with DMA-able buffers for flowrings. + * Dongle includes common rings when it advertizes the number of H2D rings. + * Allocates a pool of msgbuf_ring_t and invokes dhd_prot_ring_attach to + * allocate the DMA-able buffer and initialize each msgbuf_ring_t object. + * + * dhd_prot_ring_attach is invoked to perform the actual initialization and + * attaching the DMA-able buffer. + * + * Later dhd_prot_flowrings_pool_fetch() may be used to fetch a preallocated and + * initialized msgbuf_ring_t object. + * + * returns BCME_OK=0 on success + * returns non-zero negative error value on failure. + */ +static int +dhd_prot_flowrings_pool_attach(dhd_pub_t *dhd) +{ + uint16 flowid; + msgbuf_ring_t *ring; + uint16 h2d_flowrings_total; /* exclude H2D common rings */ + dhd_prot_t *prot = dhd->prot; + char ring_name[RING_NAME_MAX_LENGTH]; + + if (prot->h2d_flowrings_pool != NULL) + return BCME_OK; /* dhd_prot_init rentry after a dhd_prot_reset */ + + ASSERT(prot->h2d_rings_total == 0); + + /* h2d_rings_total includes H2D common rings: ctrl and rxbuf subn */ + prot->h2d_rings_total = (uint16)dhd_bus_max_h2d_queues(dhd->bus); + + if (prot->h2d_rings_total < BCMPCIE_H2D_COMMON_MSGRINGS) { + DHD_ERROR(("%s: h2d_rings_total advertized as %u\n", + __FUNCTION__, prot->h2d_rings_total)); + return BCME_ERROR; + } + + /* Subtract number of H2D common rings, to determine number of flowrings */ + h2d_flowrings_total = dhd_get_max_flow_rings(dhd); + + DHD_ERROR(("Attach flowrings pool for %d rings\n", h2d_flowrings_total)); + + /* Allocate pool of msgbuf_ring_t objects for all flowrings */ + prot->h2d_flowrings_pool = (msgbuf_ring_t *)MALLOCZ(prot->osh, + (h2d_flowrings_total * sizeof(msgbuf_ring_t))); + + if (prot->h2d_flowrings_pool == NULL) { + DHD_ERROR(("%s: flowrings pool for %d flowrings, alloc failure\n", + __FUNCTION__, h2d_flowrings_total)); + goto fail; + } + + /* Setup & Attach a DMA-able buffer to each flowring in the flowring pool */ + FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, h2d_flowrings_total) { + snprintf(ring_name, sizeof(ring_name), "h2dflr_%03u", flowid); + if (dhd_prot_ring_attach(dhd, ring, ring_name, + prot->h2d_max_txpost, H2DRING_TXPOST_ITEMSIZE, + DHD_FLOWID_TO_RINGID(flowid)) != BCME_OK) { + goto attach_fail; + } + } + + return BCME_OK; + +attach_fail: + dhd_prot_flowrings_pool_detach(dhd); /* Free entire pool of flowrings */ + +fail: + prot->h2d_rings_total = 0; + return BCME_NOMEM; + +} /* dhd_prot_flowrings_pool_attach */ + +/** + * dhd_prot_flowrings_pool_reset - Reset all msgbuf_ring_t objects in the pool. + * Invokes dhd_prot_ring_reset to perform the actual reset. + * + * The DMA-able buffer is not freed during reset and neither is the flowring + * pool freed. + * + * dhd_prot_flowrings_pool_reset will be invoked in dhd_prot_reset. Following + * the dhd_prot_reset, dhd_prot_init will be re-invoked, and the flowring pool + * from a previous flowring pool instantiation will be reused. + * + * This will avoid a fragmented DMA-able memory condition, if multiple + * dhd_prot_reset were invoked to reboot the dongle without a full detach/attach + * cycle. + */ +static void +dhd_prot_flowrings_pool_reset(dhd_pub_t *dhd) +{ + uint16 flowid, h2d_flowrings_total; + msgbuf_ring_t *ring; + dhd_prot_t *prot = dhd->prot; + + if (prot->h2d_flowrings_pool == NULL) { + ASSERT(prot->h2d_rings_total == 0); + return; + } + h2d_flowrings_total = dhd_get_max_flow_rings(dhd); + /* Reset each flowring in the flowring pool */ + FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, h2d_flowrings_total) { + dhd_prot_ring_reset(dhd, ring); + ring->inited = FALSE; + } + + /* Flowring pool state must be as-if dhd_prot_flowrings_pool_attach */ +} + +/** + * dhd_prot_flowrings_pool_detach - Free pool of msgbuf_ring along with + * DMA-able buffers for flowrings. + * dhd_prot_ring_detach is invoked to free the DMA-able buffer and perform any + * de-initialization of each msgbuf_ring_t. + */ +static void +dhd_prot_flowrings_pool_detach(dhd_pub_t *dhd) +{ + int flowid; + msgbuf_ring_t *ring; + uint16 h2d_flowrings_total; /* exclude H2D common rings */ + dhd_prot_t *prot = dhd->prot; + + if (prot->h2d_flowrings_pool == NULL) { + ASSERT(prot->h2d_rings_total == 0); + return; + } + + h2d_flowrings_total = dhd_get_max_flow_rings(dhd); + /* Detach the DMA-able buffer for each flowring in the flowring pool */ + FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, h2d_flowrings_total) { + dhd_prot_ring_detach(dhd, ring); + } + + MFREE(prot->osh, prot->h2d_flowrings_pool, + (h2d_flowrings_total * sizeof(msgbuf_ring_t))); + + prot->h2d_flowrings_pool = (msgbuf_ring_t*)NULL; + prot->h2d_rings_total = 0; + +} /* dhd_prot_flowrings_pool_detach */ + +/** + * dhd_prot_flowrings_pool_fetch - Fetch a preallocated and initialized + * msgbuf_ring from the flowring pool, and assign it. + * + * Unlike common rings, which uses a dhd_prot_ring_init() to pass the common + * ring information to the dongle, a flowring's information is passed via a + * flowring create control message. + * + * Only the ring state (WR, RD) index are initialized. + */ +static msgbuf_ring_t * +dhd_prot_flowrings_pool_fetch(dhd_pub_t *dhd, uint16 flowid) +{ + msgbuf_ring_t *ring; + dhd_prot_t *prot = dhd->prot; + + ASSERT(flowid >= DHD_FLOWRING_START_FLOWID); + ASSERT(flowid < prot->h2d_rings_total); + ASSERT(prot->h2d_flowrings_pool != NULL); + + ring = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid); + + /* ASSERT flow_ring->inited == FALSE */ + + ring->wr = 0; + ring->rd = 0; + ring->curr_rd = 0; + ring->inited = TRUE; + /** + * Every time a flowring starts dynamically, initialize current_phase with 0 + * then flip to BCMPCIE_CMNHDR_PHASE_BIT_INIT + */ + ring->current_phase = 0; + return ring; +} + +/** + * dhd_prot_flowrings_pool_release - release a previously fetched flowring's + * msgbuf_ring back to the flow_ring pool. + */ +void +dhd_prot_flowrings_pool_release(dhd_pub_t *dhd, uint16 flowid, void *flow_ring) +{ + msgbuf_ring_t *ring; + dhd_prot_t *prot = dhd->prot; + + ASSERT(flowid >= DHD_FLOWRING_START_FLOWID); + ASSERT(flowid < prot->h2d_rings_total); + ASSERT(prot->h2d_flowrings_pool != NULL); + + ring = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid); + + ASSERT(ring == (msgbuf_ring_t*)flow_ring); + /* ASSERT flow_ring->inited == TRUE */ + + (void)dhd_dma_buf_audit(dhd, &ring->dma_buf); + + ring->wr = 0; + ring->rd = 0; + ring->inited = FALSE; + + ring->curr_rd = 0; +} + +/* Assumes only one index is updated at a time */ +/* If exactly_nitems is true, this function will allocate space for nitems or fail */ +/* Exception: when wrap around is encountered, to prevent hangup (last nitems of ring buffer) */ +/* If exactly_nitems is false, this function will allocate space for nitems or less */ +static void *BCMFASTPATH +dhd_prot_get_ring_space(msgbuf_ring_t *ring, uint16 nitems, uint16 * alloced, + bool exactly_nitems) +{ + void *ret_ptr = NULL; + uint16 ring_avail_cnt; + + ASSERT(nitems <= ring->max_items); + + ring_avail_cnt = CHECK_WRITE_SPACE(ring->rd, ring->wr, ring->max_items); + + if ((ring_avail_cnt == 0) || + (exactly_nitems && (ring_avail_cnt < nitems) && + ((ring->max_items - ring->wr) >= nitems))) { + DHD_INFO(("Space not available: ring %s items %d write %d read %d\n", + ring->name, nitems, ring->wr, ring->rd)); + return NULL; + } + *alloced = MIN(nitems, ring_avail_cnt); + + /* Return next available space */ + ret_ptr = (char *)DHD_RING_BGN_VA(ring) + (ring->wr * ring->item_len); + + /* Update write index */ + if ((ring->wr + *alloced) == ring->max_items) + ring->wr = 0; + else if ((ring->wr + *alloced) < ring->max_items) + ring->wr += *alloced; + else { + /* Should never hit this */ + ASSERT(0); + return NULL; + } + + return ret_ptr; +} /* dhd_prot_get_ring_space */ + +/** + * dhd_prot_ring_write_complete - Host updates the new WR index on producing + * new messages in a H2D ring. The messages are flushed from cache prior to + * posting the new WR index. The new WR index will be updated in the DMA index + * array or directly in the dongle's ring state memory. + * A PCIE doorbell will be generated to wake up the dongle. + * This is a non-atomic function, make sure the callers + * always hold appropriate locks. + */ +static void BCMFASTPATH +__dhd_prot_ring_write_complete(dhd_pub_t *dhd, msgbuf_ring_t * ring, void* p, + uint16 nitems) +{ + dhd_prot_t *prot = dhd->prot; + uint32 db_index; + uint16 max_flowrings = dhd->bus->max_tx_flowrings; + uint corerev; + + /* cache flush */ + OSL_CACHE_FLUSH(p, ring->item_len * nitems); + + if (IDMA_ACTIVE(dhd) || dhd->dma_h2d_ring_upd_support) { + dhd_prot_dma_indx_set(dhd, ring->wr, + H2D_DMA_INDX_WR_UPD, ring->idx); + } else if (IFRM_ACTIVE(dhd) && DHD_IS_FLOWRING(ring->idx, max_flowrings)) { + dhd_prot_dma_indx_set(dhd, ring->wr, + H2D_IFRM_INDX_WR_UPD, ring->idx); + } else { + dhd_bus_cmn_writeshared(dhd->bus, &(ring->wr), + sizeof(uint16), RING_WR_UPD, ring->idx); + } + + /* raise h2d interrupt */ + if (IDMA_ACTIVE(dhd) || + (IFRM_ACTIVE(dhd) && DHD_IS_FLOWRING(ring->idx, max_flowrings))) { + db_index = IDMA_IDX0; + /* this api is called in wl down path..in that case sih is freed already */ + if (dhd->bus->sih) { + corerev = dhd->bus->sih->buscorerev; + /* We need to explictly configure the type of DMA for core rev >= 24 */ + if (corerev >= 24) { + db_index |= (DMA_TYPE_IDMA << DMA_TYPE_SHIFT); + } + } + prot->mb_2_ring_fn(dhd->bus, db_index, TRUE); + } else { + prot->mb_ring_fn(dhd->bus, ring->wr); + } +} + +static void BCMFASTPATH +dhd_prot_ring_write_complete(dhd_pub_t *dhd, msgbuf_ring_t * ring, void* p, + uint16 nitems) +{ + unsigned long flags_bus; + DHD_BUS_LOCK(dhd->bus->bus_lock, flags_bus); + __dhd_prot_ring_write_complete(dhd, ring, p, nitems); + DHD_BUS_UNLOCK(dhd->bus->bus_lock, flags_bus); +} + +/** + * dhd_prot_ring_write_complete_mbdata - will be called from dhd_prot_h2d_mbdata_send_ctrlmsg, + * which will hold DHD_BUS_LOCK to update WR pointer, Ring DB and also update bus_low_power_state + * to indicate D3_INFORM sent in the same BUS_LOCK. + */ +static void BCMFASTPATH +dhd_prot_ring_write_complete_mbdata(dhd_pub_t *dhd, msgbuf_ring_t * ring, void *p, + uint16 nitems, uint32 mb_data) +{ + unsigned long flags_bus; + + DHD_BUS_LOCK(dhd->bus->bus_lock, flags_bus); + + __dhd_prot_ring_write_complete(dhd, ring, p, nitems); + + /* Mark D3_INFORM in the same context to skip ringing H2D DB after D3_INFORM */ + if (mb_data == H2D_HOST_D3_INFORM) { + dhd->bus->bus_low_power_state = DHD_BUS_D3_INFORM_SENT; + } + + DHD_BUS_UNLOCK(dhd->bus->bus_lock, flags_bus); +} + +/** + * dhd_prot_upd_read_idx - Host updates the new RD index on consuming messages + * from a D2H ring. The new RD index will be updated in the DMA Index array or + * directly in dongle's ring state memory. + */ +static void +dhd_prot_upd_read_idx(dhd_pub_t *dhd, msgbuf_ring_t * ring) +{ + dhd_prot_t *prot = dhd->prot; + uint32 db_index; + uint corerev; + + /* update read index */ + /* If dma'ing h2d indices supported + * update the r -indices in the + * host memory o/w in TCM + */ + if (IDMA_ACTIVE(dhd)) { + dhd_prot_dma_indx_set(dhd, ring->rd, + D2H_DMA_INDX_RD_UPD, ring->idx); + db_index = IDMA_IDX1; + if (dhd->bus->sih) { + corerev = dhd->bus->sih->buscorerev; + /* We need to explictly configure the type of DMA for core rev >= 24 */ + if (corerev >= 24) { + db_index |= (DMA_TYPE_IDMA << DMA_TYPE_SHIFT); + } + } + prot->mb_2_ring_fn(dhd->bus, db_index, FALSE); + } else if (dhd->dma_h2d_ring_upd_support) { + dhd_prot_dma_indx_set(dhd, ring->rd, + D2H_DMA_INDX_RD_UPD, ring->idx); + } else { + dhd_bus_cmn_writeshared(dhd->bus, &(ring->rd), + sizeof(uint16), RING_RD_UPD, ring->idx); + } +} + +static int +dhd_send_d2h_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create, + uint16 ring_type, uint32 req_id) +{ + unsigned long flags; + d2h_ring_create_req_t *d2h_ring; + uint16 alloced = 0; + int ret = BCME_OK; + uint16 max_h2d_rings = dhd->bus->max_submission_rings; + msgbuf_ring_t *ctrl_ring = &dhd->prot->h2dring_ctrl_subn; + + DHD_RING_LOCK(ctrl_ring->ring_lock, flags); + + DHD_TRACE(("%s trying to send D2H ring create Req\n", __FUNCTION__)); + + if (ring_to_create == NULL) { + DHD_ERROR(("%s: FATAL: ring_to_create is NULL\n", __FUNCTION__)); + ret = BCME_ERROR; + goto err; + } + + /* Request for ring buffer space */ + d2h_ring = (d2h_ring_create_req_t *) dhd_prot_alloc_ring_space(dhd, + ctrl_ring, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, + &alloced, FALSE); + + if (d2h_ring == NULL) { + DHD_ERROR(("%s: FATAL: No space in control ring to send D2H ring create\n", + __FUNCTION__)); + ret = BCME_NOMEM; + goto err; + } + ring_to_create->create_req_id = (uint16)req_id; + ring_to_create->create_pending = TRUE; + + /* Common msg buf hdr */ + d2h_ring->msg.msg_type = MSG_TYPE_D2H_RING_CREATE; + d2h_ring->msg.if_id = 0; + d2h_ring->msg.flags = ctrl_ring->current_phase; + d2h_ring->msg.request_id = htol32(ring_to_create->create_req_id); + d2h_ring->ring_id = htol16(DHD_D2H_RING_OFFSET(ring_to_create->idx, max_h2d_rings)); + DHD_ERROR(("%s ringid: %d idx: %d max_h2d: %d\n", __FUNCTION__, d2h_ring->ring_id, + ring_to_create->idx, max_h2d_rings)); + + d2h_ring->ring_type = ring_type; + d2h_ring->max_items = htol16(ring_to_create->max_items); + d2h_ring->len_item = htol16(ring_to_create->item_len); + d2h_ring->ring_ptr.low_addr = ring_to_create->base_addr.low_addr; + d2h_ring->ring_ptr.high_addr = ring_to_create->base_addr.high_addr; + + d2h_ring->flags = 0; + d2h_ring->msg.epoch = + ctrl_ring->seqnum % H2D_EPOCH_MODULO; + ctrl_ring->seqnum++; +#ifdef EWP_EDL + if (ring_type == BCMPCIE_D2H_RING_TYPE_EDL) { + DHD_ERROR(("%s: sending d2h EDL ring create: " + "\n max items=%u; len_item=%u; ring_id=%u; low_addr=0x%x; high_addr=0x%x\n", + __FUNCTION__, ltoh16(d2h_ring->max_items), + ltoh16(d2h_ring->len_item), + ltoh16(d2h_ring->ring_id), + d2h_ring->ring_ptr.low_addr, + d2h_ring->ring_ptr.high_addr)); + } +#endif /* EWP_EDL */ + + /* Update the flow_ring's WRITE index */ + dhd_prot_ring_write_complete(dhd, ctrl_ring, d2h_ring, + DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D); + + DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags); + + return ret; +err: + DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags); + + return ret; +} + +static int +dhd_send_h2d_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create, uint8 ring_type, uint32 id) +{ + unsigned long flags; + h2d_ring_create_req_t *h2d_ring; + uint16 alloced = 0; + uint8 i = 0; + int ret = BCME_OK; + msgbuf_ring_t *ctrl_ring = &dhd->prot->h2dring_ctrl_subn; + + DHD_RING_LOCK(ctrl_ring->ring_lock, flags); + + DHD_TRACE(("%s trying to send H2D ring create Req\n", __FUNCTION__)); + + if (ring_to_create == NULL) { + DHD_ERROR(("%s: FATAL: ring_to_create is NULL\n", __FUNCTION__)); + ret = BCME_ERROR; + goto err; + } + + /* Request for ring buffer space */ + h2d_ring = (h2d_ring_create_req_t *)dhd_prot_alloc_ring_space(dhd, + ctrl_ring, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, + &alloced, FALSE); + + if (h2d_ring == NULL) { + DHD_ERROR(("%s: FATAL: No space in control ring to send H2D ring create\n", + __FUNCTION__)); + ret = BCME_NOMEM; + goto err; + } + ring_to_create->create_req_id = (uint16)id; + ring_to_create->create_pending = TRUE; + + /* Common msg buf hdr */ + h2d_ring->msg.msg_type = MSG_TYPE_H2D_RING_CREATE; + h2d_ring->msg.if_id = 0; + h2d_ring->msg.request_id = htol32(ring_to_create->create_req_id); + h2d_ring->msg.flags = ctrl_ring->current_phase; + h2d_ring->ring_id = htol16(DHD_H2D_RING_OFFSET(ring_to_create->idx)); + h2d_ring->ring_type = ring_type; + h2d_ring->max_items = htol16(H2DRING_DYNAMIC_INFO_MAX_ITEM); + h2d_ring->n_completion_ids = ring_to_create->n_completion_ids; + h2d_ring->len_item = htol16(H2DRING_INFO_BUFPOST_ITEMSIZE); + h2d_ring->ring_ptr.low_addr = ring_to_create->base_addr.low_addr; + h2d_ring->ring_ptr.high_addr = ring_to_create->base_addr.high_addr; + + for (i = 0; i < ring_to_create->n_completion_ids; i++) { + h2d_ring->completion_ring_ids[i] = htol16(ring_to_create->compeltion_ring_ids[i]); + } + + h2d_ring->flags = 0; + h2d_ring->msg.epoch = + ctrl_ring->seqnum % H2D_EPOCH_MODULO; + ctrl_ring->seqnum++; + + /* Update the flow_ring's WRITE index */ + dhd_prot_ring_write_complete(dhd, ctrl_ring, h2d_ring, + DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D); + + DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags); + + return ret; +err: + DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags); + + return ret; +} + +/** + * dhd_prot_dma_indx_set - set a new WR or RD index in the DMA index array. + * Dongle will DMA the entire array (if DMA_INDX feature is enabled). + * See dhd_prot_dma_indx_init() + */ +void +dhd_prot_dma_indx_set(dhd_pub_t *dhd, uint16 new_index, uint8 type, uint16 ringid) +{ + uint8 *ptr; + uint16 offset; + dhd_prot_t *prot = dhd->prot; + uint16 max_h2d_rings = dhd->bus->max_submission_rings; + + switch (type) { + case H2D_DMA_INDX_WR_UPD: + ptr = (uint8 *)(prot->h2d_dma_indx_wr_buf.va); + offset = DHD_H2D_RING_OFFSET(ringid); + break; + + case D2H_DMA_INDX_RD_UPD: + ptr = (uint8 *)(prot->d2h_dma_indx_rd_buf.va); + offset = DHD_D2H_RING_OFFSET(ringid, max_h2d_rings); + break; + + case H2D_IFRM_INDX_WR_UPD: + ptr = (uint8 *)(prot->h2d_ifrm_indx_wr_buf.va); + offset = DHD_H2D_FRM_FLOW_RING_OFFSET(ringid); + break; + + default: + DHD_ERROR(("%s: Invalid option for DMAing read/write index\n", + __FUNCTION__)); + return; + } + + ASSERT(prot->rw_index_sz != 0); + ptr += offset * prot->rw_index_sz; + + *(uint16*)ptr = htol16(new_index); + + OSL_CACHE_FLUSH((void *)ptr, prot->rw_index_sz); + + DHD_TRACE(("%s: data %d type %d ringid %d ptr 0x%p offset %d\n", + __FUNCTION__, new_index, type, ringid, ptr, offset)); + +} /* dhd_prot_dma_indx_set */ + +/** + * dhd_prot_dma_indx_get - Fetch a WR or RD index from the dongle DMA-ed index + * array. + * Dongle DMAes an entire array to host memory (if the feature is enabled). + * See dhd_prot_dma_indx_init() + */ +static uint16 +dhd_prot_dma_indx_get(dhd_pub_t *dhd, uint8 type, uint16 ringid) +{ + uint8 *ptr; + uint16 data; + uint16 offset; + dhd_prot_t *prot = dhd->prot; + uint16 max_h2d_rings = dhd->bus->max_submission_rings; + + switch (type) { + case H2D_DMA_INDX_WR_UPD: + ptr = (uint8 *)(prot->h2d_dma_indx_wr_buf.va); + offset = DHD_H2D_RING_OFFSET(ringid); + break; + + case H2D_DMA_INDX_RD_UPD: + ptr = (uint8 *)(prot->h2d_dma_indx_rd_buf.va); + offset = DHD_H2D_RING_OFFSET(ringid); + break; + + case D2H_DMA_INDX_WR_UPD: + ptr = (uint8 *)(prot->d2h_dma_indx_wr_buf.va); + offset = DHD_D2H_RING_OFFSET(ringid, max_h2d_rings); + break; + + case D2H_DMA_INDX_RD_UPD: + ptr = (uint8 *)(prot->d2h_dma_indx_rd_buf.va); + offset = DHD_D2H_RING_OFFSET(ringid, max_h2d_rings); + break; + + default: + DHD_ERROR(("%s: Invalid option for DMAing read/write index\n", + __FUNCTION__)); + return 0; + } + + ASSERT(prot->rw_index_sz != 0); + ptr += offset * prot->rw_index_sz; + + OSL_CACHE_INV((void *)ptr, prot->rw_index_sz); + + data = LTOH16(*((uint16*)ptr)); + + DHD_TRACE(("%s: data %d type %d ringid %d ptr 0x%p offset %d\n", + __FUNCTION__, data, type, ringid, ptr, offset)); + + return (data); + +} /* dhd_prot_dma_indx_get */ + +/** + * An array of DMA read/write indices, containing information about host rings, can be maintained + * either in host memory or in device memory, dependent on preprocessor options. This function is, + * dependent on these options, called during driver initialization. It reserves and initializes + * blocks of DMA'able host memory containing an array of DMA read or DMA write indices. The physical + * address of these host memory blocks are communicated to the dongle later on. By reading this host + * memory, the dongle learns about the state of the host rings. + */ + +static INLINE int +dhd_prot_dma_indx_alloc(dhd_pub_t *dhd, uint8 type, + dhd_dma_buf_t *dma_buf, uint32 bufsz) +{ + int rc; + + if ((dma_buf->len == bufsz) || (dma_buf->va != NULL)) + return BCME_OK; + + rc = dhd_dma_buf_alloc(dhd, dma_buf, bufsz); + + return rc; +} + +int +dhd_prot_dma_indx_init(dhd_pub_t *dhd, uint32 rw_index_sz, uint8 type, uint32 length) +{ + uint32 bufsz; + dhd_prot_t *prot = dhd->prot; + dhd_dma_buf_t *dma_buf; + + if (prot == NULL) { + DHD_ERROR(("prot is not inited\n")); + return BCME_ERROR; + } + + /* Dongle advertizes 2B or 4B RW index size */ + ASSERT(rw_index_sz != 0); + prot->rw_index_sz = rw_index_sz; + + bufsz = rw_index_sz * length; + + switch (type) { + case H2D_DMA_INDX_WR_BUF: + dma_buf = &prot->h2d_dma_indx_wr_buf; + if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz)) + goto ret_no_mem; + DHD_ERROR(("H2D DMA WR INDX : array size %d = %d * %d\n", + dma_buf->len, rw_index_sz, length)); + break; + + case H2D_DMA_INDX_RD_BUF: + dma_buf = &prot->h2d_dma_indx_rd_buf; + if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz)) + goto ret_no_mem; + DHD_ERROR(("H2D DMA RD INDX : array size %d = %d * %d\n", + dma_buf->len, rw_index_sz, length)); + break; + + case D2H_DMA_INDX_WR_BUF: + dma_buf = &prot->d2h_dma_indx_wr_buf; + if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz)) + goto ret_no_mem; + DHD_ERROR(("D2H DMA WR INDX : array size %d = %d * %d\n", + dma_buf->len, rw_index_sz, length)); + break; + + case D2H_DMA_INDX_RD_BUF: + dma_buf = &prot->d2h_dma_indx_rd_buf; + if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz)) + goto ret_no_mem; + DHD_ERROR(("D2H DMA RD INDX : array size %d = %d * %d\n", + dma_buf->len, rw_index_sz, length)); + break; + + case H2D_IFRM_INDX_WR_BUF: + dma_buf = &prot->h2d_ifrm_indx_wr_buf; + if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz)) + goto ret_no_mem; + DHD_ERROR(("H2D IFRM WR INDX : array size %d = %d * %d\n", + dma_buf->len, rw_index_sz, length)); + break; + + default: + DHD_ERROR(("%s: Unexpected option\n", __FUNCTION__)); + return BCME_BADOPTION; + } + + return BCME_OK; + +ret_no_mem: + DHD_ERROR(("%s: dhd_prot_dma_indx_alloc type %d buf_sz %d failure\n", + __FUNCTION__, type, bufsz)); + return BCME_NOMEM; + +} /* dhd_prot_dma_indx_init */ + +/** + * Called on checking for 'completion' messages from the dongle. Returns next host buffer to read + * from, or NULL if there are no more messages to read. + */ +static uint8* +dhd_prot_get_read_addr(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint32 *available_len) +{ + uint16 wr; + uint16 rd; + uint16 depth; + uint16 items; + void *read_addr = NULL; /* address of next msg to be read in ring */ + uint16 d2h_wr = 0; + + DHD_TRACE(("%s: d2h_dma_indx_rd_buf %p, d2h_dma_indx_wr_buf %p\n", + __FUNCTION__, (uint32 *)(dhd->prot->d2h_dma_indx_rd_buf.va), + (uint32 *)(dhd->prot->d2h_dma_indx_wr_buf.va))); + + /* Remember the read index in a variable. + * This is becuase ring->rd gets updated in the end of this function + * So if we have to print the exact read index from which the + * message is read its not possible. + */ + ring->curr_rd = ring->rd; + + /* update write pointer */ + if (dhd->dma_d2h_ring_upd_support) { + /* DMAing write/read indices supported */ + d2h_wr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx); + ring->wr = d2h_wr; + } else { + dhd_bus_cmn_readshared(dhd->bus, &(ring->wr), RING_WR_UPD, ring->idx); + } + + wr = ring->wr; + rd = ring->rd; + depth = ring->max_items; + + /* check for avail space, in number of ring items */ + items = READ_AVAIL_SPACE(wr, rd, depth); + if (items == 0) + return NULL; + + /* + * Note that there are builds where Assert translates to just printk + * so, even if we had hit this condition we would never halt. Now + * dhd_prot_process_msgtype can get into an big loop if this + * happens. + */ + if (items > ring->max_items) { + DHD_ERROR(("\r\n======================= \r\n")); + DHD_ERROR(("%s(): ring %p, ring->name %s, ring->max_items %d, items %d \r\n", + __FUNCTION__, ring, ring->name, ring->max_items, items)); + DHD_ERROR(("wr: %d, rd: %d, depth: %d \r\n", wr, rd, depth)); + DHD_ERROR(("dhd->busstate %d bus->wait_for_d3_ack %d \r\n", + dhd->busstate, dhd->bus->wait_for_d3_ack)); + DHD_ERROR(("\r\n======================= \r\n")); +#ifdef DHD_FW_COREDUMP + if (dhd->memdump_enabled) { + /* collect core dump */ + dhd->memdump_type = DUMP_TYPE_RESUMED_ON_INVALID_RING_RDWR; + dhd_bus_mem_dump(dhd); + + } +#endif /* DHD_FW_COREDUMP */ + + *available_len = 0; + dhd_schedule_reset(dhd); + + return NULL; + } + + /* if space is available, calculate address to be read */ + read_addr = (char*)ring->dma_buf.va + (rd * ring->item_len); + + /* update read pointer */ + if ((ring->rd + items) >= ring->max_items) + ring->rd = 0; + else + ring->rd += items; + + ASSERT(ring->rd < ring->max_items); + + /* convert items to bytes : available_len must be 32bits */ + *available_len = (uint32)(items * ring->item_len); + + OSL_CACHE_INV(read_addr, *available_len); + + /* return read address */ + return read_addr; + +} /* dhd_prot_get_read_addr */ + +/** + * dhd_prot_h2d_mbdata_send_ctrlmsg is a non-atomic function, + * make sure the callers always hold appropriate locks. + */ +int dhd_prot_h2d_mbdata_send_ctrlmsg(dhd_pub_t *dhd, uint32 mb_data) +{ + h2d_mailbox_data_t *h2d_mb_data; + uint16 alloced = 0; + msgbuf_ring_t *ctrl_ring = &dhd->prot->h2dring_ctrl_subn; + unsigned long flags; + int num_post = 1; + int i; + + DHD_INFO(("%s Sending H2D MB data Req data 0x%04x\n", + __FUNCTION__, mb_data)); + if (!ctrl_ring->inited) { + DHD_ERROR(("%s: Ctrl Submit Ring: not inited\n", __FUNCTION__)); + return BCME_ERROR; + } + + for (i = 0; i < num_post; i ++) { + DHD_RING_LOCK(ctrl_ring->ring_lock, flags); + /* Request for ring buffer space */ + h2d_mb_data = (h2d_mailbox_data_t *)dhd_prot_alloc_ring_space(dhd, + ctrl_ring, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, + &alloced, FALSE); + + if (h2d_mb_data == NULL) { + DHD_ERROR(("%s: FATAL: No space in control ring to send H2D Mb data\n", + __FUNCTION__)); + DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags); + return BCME_NOMEM; + } + + memset(h2d_mb_data, 0, sizeof(h2d_mailbox_data_t)); + /* Common msg buf hdr */ + h2d_mb_data->msg.msg_type = MSG_TYPE_H2D_MAILBOX_DATA; + h2d_mb_data->msg.flags = ctrl_ring->current_phase; + + h2d_mb_data->msg.epoch = + ctrl_ring->seqnum % H2D_EPOCH_MODULO; + ctrl_ring->seqnum++; + + /* Update flow create message */ + h2d_mb_data->mail_box_data = htol32(mb_data); + { + h2d_mb_data->mail_box_data = htol32(mb_data); + } + + DHD_INFO(("%s Send H2D MB data Req data 0x%04x\n", __FUNCTION__, mb_data)); + + /* upd wrt ptr and raise interrupt */ + dhd_prot_ring_write_complete_mbdata(dhd, ctrl_ring, h2d_mb_data, + DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, mb_data); + + DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags); + + } + return 0; +} + +/** Creates a flow ring and informs dongle of this event */ +int +dhd_prot_flow_ring_create(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node) +{ + tx_flowring_create_request_t *flow_create_rqst; + msgbuf_ring_t *flow_ring; + dhd_prot_t *prot = dhd->prot; + unsigned long flags; + uint16 alloced = 0; + msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn; + uint16 max_flowrings = dhd->bus->max_tx_flowrings; + + /* Fetch a pre-initialized msgbuf_ring from the flowring pool */ + flow_ring = dhd_prot_flowrings_pool_fetch(dhd, flow_ring_node->flowid); + if (flow_ring == NULL) { + DHD_ERROR(("%s: dhd_prot_flowrings_pool_fetch TX Flowid %d failed\n", + __FUNCTION__, flow_ring_node->flowid)); + return BCME_NOMEM; + } + + DHD_RING_LOCK(ctrl_ring->ring_lock, flags); + + /* Request for ctrl_ring buffer space */ + flow_create_rqst = (tx_flowring_create_request_t *) + dhd_prot_alloc_ring_space(dhd, ctrl_ring, 1, &alloced, FALSE); + + if (flow_create_rqst == NULL) { + dhd_prot_flowrings_pool_release(dhd, flow_ring_node->flowid, flow_ring); + DHD_ERROR(("%s: Flow Create Req flowid %d - failure ring space\n", + __FUNCTION__, flow_ring_node->flowid)); + DHD_RING_LOCK(ctrl_ring->ring_lock, flags); + return BCME_NOMEM; + } + + flow_ring_node->prot_info = (void *)flow_ring; + + /* Common msg buf hdr */ + flow_create_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_CREATE; + flow_create_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex; + flow_create_rqst->msg.request_id = htol32(0); /* TBD */ + flow_create_rqst->msg.flags = ctrl_ring->current_phase; + + flow_create_rqst->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO; + ctrl_ring->seqnum++; + + /* Update flow create message */ + flow_create_rqst->tid = flow_ring_node->flow_info.tid; + flow_create_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid); + memcpy(flow_create_rqst->sa, flow_ring_node->flow_info.sa, sizeof(flow_create_rqst->sa)); + memcpy(flow_create_rqst->da, flow_ring_node->flow_info.da, sizeof(flow_create_rqst->da)); + /* CAUTION: ring::base_addr already in Little Endian */ + flow_create_rqst->flow_ring_ptr.low_addr = flow_ring->base_addr.low_addr; + flow_create_rqst->flow_ring_ptr.high_addr = flow_ring->base_addr.high_addr; + flow_create_rqst->max_items = htol16(prot->h2d_max_txpost); + flow_create_rqst->len_item = htol16(H2DRING_TXPOST_ITEMSIZE); + + /* definition for ifrm mask : bit0:d11ac core, bit1:d11ad core + * currently it is not used for priority. so uses solely for ifrm mask + */ + if (IFRM_ACTIVE(dhd)) + flow_create_rqst->priority_ifrmmask = (1 << IFRM_DEV_0); + + DHD_ERROR(("%s: Send Flow Create Req flow ID %d for peer " MACDBG + " prio %d ifindex %d\n", __FUNCTION__, flow_ring_node->flowid, + MAC2STRDBG(flow_ring_node->flow_info.da), flow_ring_node->flow_info.tid, + flow_ring_node->flow_info.ifindex)); + + /* Update the flow_ring's WRITE index */ + if (IDMA_ACTIVE(dhd) || dhd->dma_h2d_ring_upd_support) { + dhd_prot_dma_indx_set(dhd, flow_ring->wr, + H2D_DMA_INDX_WR_UPD, flow_ring->idx); + } else if (IFRM_ACTIVE(dhd) && DHD_IS_FLOWRING(flow_ring->idx, max_flowrings)) { + dhd_prot_dma_indx_set(dhd, flow_ring->wr, + H2D_IFRM_INDX_WR_UPD, flow_ring->idx); + } else { + dhd_bus_cmn_writeshared(dhd->bus, &(flow_ring->wr), + sizeof(uint16), RING_WR_UPD, flow_ring->idx); + } + + /* update control subn ring's WR index and ring doorbell to dongle */ + dhd_prot_ring_write_complete(dhd, ctrl_ring, flow_create_rqst, 1); + + DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags); + + return BCME_OK; +} /* dhd_prot_flow_ring_create */ + +/** called on receiving MSG_TYPE_FLOW_RING_CREATE_CMPLT message from dongle */ +static void +dhd_prot_flow_ring_create_response_process(dhd_pub_t *dhd, void *msg) +{ + tx_flowring_create_response_t *flow_create_resp = (tx_flowring_create_response_t *)msg; + + DHD_ERROR(("%s: Flow Create Response status = %d Flow %d\n", __FUNCTION__, + ltoh16(flow_create_resp->cmplt.status), + ltoh16(flow_create_resp->cmplt.flow_ring_id))); + + dhd_bus_flow_ring_create_response(dhd->bus, + ltoh16(flow_create_resp->cmplt.flow_ring_id), + ltoh16(flow_create_resp->cmplt.status)); +} + +static void +dhd_prot_process_h2d_ring_create_complete(dhd_pub_t *dhd, void *buf) +{ + h2d_ring_create_response_t *resp = (h2d_ring_create_response_t *)buf; + DHD_INFO(("%s ring create Response status = %d ring %d, id 0x%04x\n", __FUNCTION__, + ltoh16(resp->cmplt.status), + ltoh16(resp->cmplt.ring_id), + ltoh32(resp->cmn_hdr.request_id))); + if ((ltoh32(resp->cmn_hdr.request_id) != DHD_H2D_DBGRING_REQ_PKTID) && + (ltoh32(resp->cmn_hdr.request_id) != DHD_H2D_BTLOGRING_REQ_PKTID)) { + DHD_ERROR(("invalid request ID with h2d ring create complete\n")); + return; + } + if (dhd->prot->h2dring_info_subn->create_req_id == ltoh32(resp->cmn_hdr.request_id) && + !dhd->prot->h2dring_info_subn->create_pending) { + DHD_ERROR(("info ring create status for not pending submit ring\n")); + } + + if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) { + DHD_ERROR(("info/btlog ring create failed with status %d\n", + ltoh16(resp->cmplt.status))); + return; + } + if (dhd->prot->h2dring_info_subn->create_req_id == ltoh32(resp->cmn_hdr.request_id)) { + dhd->prot->h2dring_info_subn->create_pending = FALSE; + dhd->prot->h2dring_info_subn->inited = TRUE; + DHD_ERROR(("info buffer post after ring create\n")); + dhd_prot_infobufpost(dhd, dhd->prot->h2dring_info_subn); + } +} + +static void +dhd_prot_process_d2h_ring_create_complete(dhd_pub_t *dhd, void *buf) +{ + d2h_ring_create_response_t *resp = (d2h_ring_create_response_t *)buf; + DHD_INFO(("%s ring create Response status = %d ring %d, id 0x%04x\n", __FUNCTION__, + ltoh16(resp->cmplt.status), + ltoh16(resp->cmplt.ring_id), + ltoh32(resp->cmn_hdr.request_id))); + if ((ltoh32(resp->cmn_hdr.request_id) != DHD_D2H_DBGRING_REQ_PKTID) && + (ltoh32(resp->cmn_hdr.request_id) != DHD_D2H_BTLOGRING_REQ_PKTID)) { + DHD_ERROR(("invalid request ID with d2h ring create complete\n")); + return; + } + if (ltoh32(resp->cmn_hdr.request_id) == DHD_D2H_DBGRING_REQ_PKTID) { +#ifdef EWP_EDL + if (!dhd->dongle_edl_support) +#endif // endif + { + if (!dhd->prot->d2hring_info_cpln->create_pending) { + DHD_ERROR(("info ring create status for not pending cpl ring\n")); + return; + } + + if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) { + DHD_ERROR(("info cpl ring create failed with status %d\n", + ltoh16(resp->cmplt.status))); + return; + } + dhd->prot->d2hring_info_cpln->create_pending = FALSE; + dhd->prot->d2hring_info_cpln->inited = TRUE; + } +#ifdef EWP_EDL + else { + if (!dhd->prot->d2hring_edl->create_pending) { + DHD_ERROR(("edl ring create status for not pending cpl ring\n")); + return; + } + + if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) { + DHD_ERROR(("edl cpl ring create failed with status %d\n", + ltoh16(resp->cmplt.status))); + return; + } + dhd->prot->d2hring_edl->create_pending = FALSE; + dhd->prot->d2hring_edl->inited = TRUE; + } +#endif /* EWP_EDL */ + } + +} + +static void +dhd_prot_process_d2h_mb_data(dhd_pub_t *dhd, void* buf) +{ + d2h_mailbox_data_t *d2h_data; + + d2h_data = (d2h_mailbox_data_t *)buf; + DHD_INFO(("%s dhd_prot_process_d2h_mb_data, 0x%04x\n", __FUNCTION__, + d2h_data->d2h_mailbox_data)); + dhd_bus_handle_mb_data(dhd->bus, d2h_data->d2h_mailbox_data); +} + +static void +dhd_prot_process_d2h_host_ts_complete(dhd_pub_t *dhd, void* buf) +{ + DHD_ERROR(("Timesunc feature not compiled in but GOT HOST_TS_COMPLETE\n")); + +} + +/** called on e.g. flow ring delete */ +void dhd_prot_clean_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info) +{ + msgbuf_ring_t *flow_ring = (msgbuf_ring_t *)msgbuf_flow_info; + dhd_prot_ring_detach(dhd, flow_ring); + DHD_INFO(("%s Cleaning up Flow \n", __FUNCTION__)); +} + +void dhd_prot_print_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info, + struct bcmstrbuf *strbuf, const char * fmt) +{ + const char *default_fmt = + "RD %d WR %d BASE(VA) %p BASE(PA) %x:%x SIZE %d " + "WORK_ITEM_SIZE %d MAX_WORK_ITEMS %d TOTAL_SIZE %d\n"; + msgbuf_ring_t *flow_ring = (msgbuf_ring_t *)msgbuf_flow_info; + uint16 rd, wr; + uint32 dma_buf_len = flow_ring->max_items * flow_ring->item_len; + + if (fmt == NULL) { + fmt = default_fmt; + } + dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, flow_ring->idx); + dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, flow_ring->idx); + bcm_bprintf(strbuf, fmt, rd, wr, flow_ring->dma_buf.va, + ltoh32(flow_ring->base_addr.high_addr), + ltoh32(flow_ring->base_addr.low_addr), + flow_ring->item_len, flow_ring->max_items, + dma_buf_len); +} + +void dhd_prot_print_info(dhd_pub_t *dhd, struct bcmstrbuf *strbuf) +{ + dhd_prot_t *prot = dhd->prot; + bcm_bprintf(strbuf, "IPCrevs: Dev %d, \t Host %d, \tactive %d\n", + dhd->prot->device_ipc_version, + dhd->prot->host_ipc_version, + dhd->prot->active_ipc_version); + + bcm_bprintf(strbuf, "max Host TS bufs to post: %d, \t posted %d \n", + dhd->prot->max_tsbufpost, dhd->prot->cur_ts_bufs_posted); + bcm_bprintf(strbuf, "max INFO bufs to post: %d, \t posted %d \n", + dhd->prot->max_infobufpost, dhd->prot->infobufpost); + bcm_bprintf(strbuf, "max event bufs to post: %d, \t posted %d \n", + dhd->prot->max_eventbufpost, dhd->prot->cur_event_bufs_posted); + bcm_bprintf(strbuf, "max ioctlresp bufs to post: %d, \t posted %d \n", + dhd->prot->max_ioctlrespbufpost, dhd->prot->cur_ioctlresp_bufs_posted); + bcm_bprintf(strbuf, "max RX bufs to post: %d, \t posted %d \n", + dhd->prot->max_rxbufpost, dhd->prot->rxbufpost); + + bcm_bprintf(strbuf, + "%14s %5s %5s %17s %17s %14s %14s %10s\n", + "Type", "RD", "WR", "BASE(VA)", "BASE(PA)", + "WORK_ITEM_SIZE", "MAX_WORK_ITEMS", "TOTAL_SIZE"); + bcm_bprintf(strbuf, "%14s", "H2DCtrlPost"); + dhd_prot_print_flow_ring(dhd, &prot->h2dring_ctrl_subn, strbuf, + " %5d %5d %17p %8x:%8x %14d %14d %10d\n"); + bcm_bprintf(strbuf, "%14s", "D2HCtrlCpl"); + dhd_prot_print_flow_ring(dhd, &prot->d2hring_ctrl_cpln, strbuf, + " %5d %5d %17p %8x:%8x %14d %14d %10d\n"); + bcm_bprintf(strbuf, "%14s", "H2DRxPost", prot->rxbufpost); + dhd_prot_print_flow_ring(dhd, &prot->h2dring_rxp_subn, strbuf, + " %5d %5d %17p %8x:%8x %14d %14d %10d\n"); + bcm_bprintf(strbuf, "%14s", "D2HRxCpl"); + dhd_prot_print_flow_ring(dhd, &prot->d2hring_rx_cpln, strbuf, + " %5d %5d %17p %8x:%8x %14d %14d %10d\n"); + bcm_bprintf(strbuf, "%14s", "D2HTxCpl"); + dhd_prot_print_flow_ring(dhd, &prot->d2hring_tx_cpln, strbuf, + " %5d %5d %17p %8x:%8x %14d %14d %10d\n"); + if (dhd->prot->h2dring_info_subn != NULL && dhd->prot->d2hring_info_cpln != NULL) { + bcm_bprintf(strbuf, "%14s", "H2DRingInfoSub"); + dhd_prot_print_flow_ring(dhd, prot->h2dring_info_subn, strbuf, + " %5d %5d %17p %8x:%8x %14d %14d %10d\n"); + bcm_bprintf(strbuf, "%14s", "D2HRingInfoCpl"); + dhd_prot_print_flow_ring(dhd, prot->d2hring_info_cpln, strbuf, + " %5d %5d %17p %8x:%8x %14d %14d %10d\n"); + } + if (dhd->prot->d2hring_edl != NULL) { + bcm_bprintf(strbuf, "%14s", "D2HRingEDL"); + dhd_prot_print_flow_ring(dhd, prot->d2hring_edl, strbuf, + " %5d %5d %17p %8x:%8x %14d %14d %10d\n"); + } + + bcm_bprintf(strbuf, "active_tx_count %d pktidmap_avail(ctrl/rx/tx) %d %d %d\n", + OSL_ATOMIC_READ(dhd->osh, &dhd->prot->active_tx_count), + DHD_PKTID_AVAIL(dhd->prot->pktid_ctrl_map), + DHD_PKTID_AVAIL(dhd->prot->pktid_rx_map), + DHD_PKTID_AVAIL(dhd->prot->pktid_tx_map)); + +} + +int +dhd_prot_flow_ring_delete(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node) +{ + tx_flowring_delete_request_t *flow_delete_rqst; + dhd_prot_t *prot = dhd->prot; + unsigned long flags; + uint16 alloced = 0; + msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn; + + DHD_RING_LOCK(ring->ring_lock, flags); + + /* Request for ring buffer space */ + flow_delete_rqst = (tx_flowring_delete_request_t *) + dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE); + + if (flow_delete_rqst == NULL) { + DHD_RING_UNLOCK(ring->ring_lock, flags); + DHD_ERROR(("%s: Flow Delete Req - failure ring space\n", __FUNCTION__)); + return BCME_NOMEM; + } + + /* Common msg buf hdr */ + flow_delete_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_DELETE; + flow_delete_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex; + flow_delete_rqst->msg.request_id = htol32(0); /* TBD */ + flow_delete_rqst->msg.flags = ring->current_phase; + + flow_delete_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO; + ring->seqnum++; + + /* Update Delete info */ + flow_delete_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid); + flow_delete_rqst->reason = htol16(BCME_OK); + + DHD_ERROR(("%s: Send Flow Delete Req RING ID %d for peer " MACDBG + " prio %d ifindex %d\n", __FUNCTION__, flow_ring_node->flowid, + MAC2STRDBG(flow_ring_node->flow_info.da), flow_ring_node->flow_info.tid, + flow_ring_node->flow_info.ifindex)); + + /* update ring's WR index and ring doorbell to dongle */ + dhd_prot_ring_write_complete(dhd, ring, flow_delete_rqst, 1); + + DHD_RING_UNLOCK(ring->ring_lock, flags); + + return BCME_OK; +} + +static void BCMFASTPATH +dhd_prot_flow_ring_fastdelete(dhd_pub_t *dhd, uint16 flowid, uint16 rd_idx) +{ + flow_ring_node_t *flow_ring_node = DHD_FLOW_RING(dhd, flowid); + msgbuf_ring_t *ring = (msgbuf_ring_t *)flow_ring_node->prot_info; + host_txbuf_cmpl_t txstatus; + host_txbuf_post_t *txdesc; + uint16 wr_idx; + + DHD_INFO(("%s: FAST delete ring, flowid=%d, rd_idx=%d, wr_idx=%d\n", + __FUNCTION__, flowid, rd_idx, ring->wr)); + + memset(&txstatus, 0, sizeof(txstatus)); + txstatus.compl_hdr.flow_ring_id = flowid; + txstatus.cmn_hdr.if_id = flow_ring_node->flow_info.ifindex; + wr_idx = ring->wr; + + while (wr_idx != rd_idx) { + if (wr_idx) + wr_idx--; + else + wr_idx = ring->max_items - 1; + txdesc = (host_txbuf_post_t *)((char *)DHD_RING_BGN_VA(ring) + + (wr_idx * ring->item_len)); + txstatus.cmn_hdr.request_id = txdesc->cmn_hdr.request_id; + dhd_prot_txstatus_process(dhd, &txstatus); + } +} + +static void +dhd_prot_flow_ring_delete_response_process(dhd_pub_t *dhd, void *msg) +{ + tx_flowring_delete_response_t *flow_delete_resp = (tx_flowring_delete_response_t *)msg; + + DHD_ERROR(("%s: Flow Delete Response status = %d Flow %d\n", __FUNCTION__, + flow_delete_resp->cmplt.status, flow_delete_resp->cmplt.flow_ring_id)); + + if (dhd->fast_delete_ring_support) { + dhd_prot_flow_ring_fastdelete(dhd, flow_delete_resp->cmplt.flow_ring_id, + flow_delete_resp->read_idx); + } + dhd_bus_flow_ring_delete_response(dhd->bus, flow_delete_resp->cmplt.flow_ring_id, + flow_delete_resp->cmplt.status); +} + +static void +dhd_prot_process_flow_ring_resume_response(dhd_pub_t *dhd, void* msg) +{ +#ifdef IDLE_TX_FLOW_MGMT + tx_idle_flowring_resume_response_t *flow_resume_resp = + (tx_idle_flowring_resume_response_t *)msg; + + DHD_ERROR(("%s Flow resume Response status = %d Flow %d\n", __FUNCTION__, + flow_resume_resp->cmplt.status, flow_resume_resp->cmplt.flow_ring_id)); + + dhd_bus_flow_ring_resume_response(dhd->bus, flow_resume_resp->cmplt.flow_ring_id, + flow_resume_resp->cmplt.status); +#endif /* IDLE_TX_FLOW_MGMT */ +} + +static void +dhd_prot_process_flow_ring_suspend_response(dhd_pub_t *dhd, void* msg) +{ +#ifdef IDLE_TX_FLOW_MGMT + int16 status; + tx_idle_flowring_suspend_response_t *flow_suspend_resp = + (tx_idle_flowring_suspend_response_t *)msg; + status = flow_suspend_resp->cmplt.status; + + DHD_ERROR(("%s Flow id %d suspend Response status = %d\n", + __FUNCTION__, flow_suspend_resp->cmplt.flow_ring_id, + status)); + + if (status != BCME_OK) { + + DHD_ERROR(("%s Error in Suspending Flow rings!!" + "Dongle will still be polling idle rings!!Status = %d \n", + __FUNCTION__, status)); + } +#endif /* IDLE_TX_FLOW_MGMT */ +} + +int +dhd_prot_flow_ring_flush(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node) +{ + tx_flowring_flush_request_t *flow_flush_rqst; + dhd_prot_t *prot = dhd->prot; + unsigned long flags; + uint16 alloced = 0; + msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn; + + DHD_RING_LOCK(ring->ring_lock, flags); + + /* Request for ring buffer space */ + flow_flush_rqst = (tx_flowring_flush_request_t *) + dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE); + if (flow_flush_rqst == NULL) { + DHD_RING_UNLOCK(ring->ring_lock, flags); + DHD_ERROR(("%s: Flow Flush Req - failure ring space\n", __FUNCTION__)); + return BCME_NOMEM; + } + + /* Common msg buf hdr */ + flow_flush_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_FLUSH; + flow_flush_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex; + flow_flush_rqst->msg.request_id = htol32(0); /* TBD */ + flow_flush_rqst->msg.flags = ring->current_phase; + flow_flush_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO; + ring->seqnum++; + + flow_flush_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid); + flow_flush_rqst->reason = htol16(BCME_OK); + + DHD_INFO(("%s: Send Flow Flush Req\n", __FUNCTION__)); + + /* update ring's WR index and ring doorbell to dongle */ + dhd_prot_ring_write_complete(dhd, ring, flow_flush_rqst, 1); + + DHD_RING_UNLOCK(ring->ring_lock, flags); + + return BCME_OK; +} /* dhd_prot_flow_ring_flush */ + +static void +dhd_prot_flow_ring_flush_response_process(dhd_pub_t *dhd, void *msg) +{ + tx_flowring_flush_response_t *flow_flush_resp = (tx_flowring_flush_response_t *)msg; + + DHD_INFO(("%s: Flow Flush Response status = %d\n", __FUNCTION__, + flow_flush_resp->cmplt.status)); + + dhd_bus_flow_ring_flush_response(dhd->bus, flow_flush_resp->cmplt.flow_ring_id, + flow_flush_resp->cmplt.status); +} + +/** + * Request dongle to configure soft doorbells for D2H rings. Host populated soft + * doorbell information is transferred to dongle via the d2h ring config control + * message. + */ +void +dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd_pub_t *dhd) +{ +#if defined(DHD_D2H_SOFT_DOORBELL_SUPPORT) + uint16 ring_idx; + uint8 *msg_next; + void *msg_start; + uint16 alloced = 0; + unsigned long flags; + dhd_prot_t *prot = dhd->prot; + ring_config_req_t *ring_config_req; + bcmpcie_soft_doorbell_t *soft_doorbell; + msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn; + const uint16 d2h_rings = BCMPCIE_D2H_COMMON_MSGRINGS; + + /* Claim space for d2h_ring number of d2h_ring_config_req_t messages */ + DHD_RING_LOCK(ctrl_ring->ring_lock, flags); + msg_start = dhd_prot_alloc_ring_space(dhd, ctrl_ring, d2h_rings, &alloced, TRUE); + + if (msg_start == NULL) { + DHD_ERROR(("%s Msgbuf no space for %d D2H ring config soft doorbells\n", + __FUNCTION__, d2h_rings)); + DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags); + return; + } + + msg_next = (uint8*)msg_start; + + for (ring_idx = 0; ring_idx < d2h_rings; ring_idx++) { + + /* position the ring_config_req into the ctrl subm ring */ + ring_config_req = (ring_config_req_t *)msg_next; + + /* Common msg header */ + ring_config_req->msg.msg_type = MSG_TYPE_D2H_RING_CONFIG; + ring_config_req->msg.if_id = 0; + ring_config_req->msg.flags = 0; + + ring_config_req->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO; + ctrl_ring->seqnum++; + + ring_config_req->msg.request_id = htol32(DHD_FAKE_PKTID); /* unused */ + + /* Ring Config subtype and d2h ring_id */ + ring_config_req->subtype = htol16(D2H_RING_CONFIG_SUBTYPE_SOFT_DOORBELL); + ring_config_req->ring_id = htol16(DHD_D2H_RINGID(ring_idx)); + + /* Host soft doorbell configuration */ + soft_doorbell = &prot->soft_doorbell[ring_idx]; + + ring_config_req->soft_doorbell.value = htol32(soft_doorbell->value); + ring_config_req->soft_doorbell.haddr.high = + htol32(soft_doorbell->haddr.high); + ring_config_req->soft_doorbell.haddr.low = + htol32(soft_doorbell->haddr.low); + ring_config_req->soft_doorbell.items = htol16(soft_doorbell->items); + ring_config_req->soft_doorbell.msecs = htol16(soft_doorbell->msecs); + + DHD_INFO(("%s: Soft doorbell haddr 0x%08x 0x%08x value 0x%08x\n", + __FUNCTION__, ring_config_req->soft_doorbell.haddr.high, + ring_config_req->soft_doorbell.haddr.low, + ring_config_req->soft_doorbell.value)); + + msg_next = msg_next + ctrl_ring->item_len; + } + + /* update control subn ring's WR index and ring doorbell to dongle */ + dhd_prot_ring_write_complete(dhd, ctrl_ring, msg_start, d2h_rings); + + DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags); + +#endif /* DHD_D2H_SOFT_DOORBELL_SUPPORT */ +} + +static void +dhd_prot_process_d2h_ring_config_complete(dhd_pub_t *dhd, void *msg) +{ + DHD_INFO(("%s: Ring Config Response - status %d ringid %d\n", + __FUNCTION__, ltoh16(((ring_config_resp_t *)msg)->compl_hdr.status), + ltoh16(((ring_config_resp_t *)msg)->compl_hdr.flow_ring_id))); +} + +int +dhd_prot_debug_info_print(dhd_pub_t *dhd) +{ + dhd_prot_t *prot = dhd->prot; + msgbuf_ring_t *ring; + uint16 rd, wr; + uint32 dma_buf_len; + + DHD_ERROR(("\n ------- DUMPING VERSION INFORMATION ------- \r\n")); + DHD_ERROR(("DHD: %s\n", dhd_version)); + DHD_ERROR(("Firmware: %s\n", fw_version)); + + DHD_ERROR(("\n ------- DUMPING PROTOCOL INFORMATION ------- \r\n")); + DHD_ERROR(("ICPrevs: Dev %d, Host %d, active %d\n", + prot->device_ipc_version, + prot->host_ipc_version, + prot->active_ipc_version)); + DHD_ERROR(("d2h_intr_method -> %s\n", + dhd->bus->d2h_intr_method ? "PCIE_MSI" : "PCIE_INTX")); + DHD_ERROR(("max Host TS bufs to post: %d, posted %d\n", + prot->max_tsbufpost, prot->cur_ts_bufs_posted)); + DHD_ERROR(("max INFO bufs to post: %d, posted %d\n", + prot->max_infobufpost, prot->infobufpost)); + DHD_ERROR(("max event bufs to post: %d, posted %d\n", + prot->max_eventbufpost, prot->cur_event_bufs_posted)); + DHD_ERROR(("max ioctlresp bufs to post: %d, posted %d\n", + prot->max_ioctlrespbufpost, prot->cur_ioctlresp_bufs_posted)); + DHD_ERROR(("max RX bufs to post: %d, posted %d\n", + prot->max_rxbufpost, prot->rxbufpost)); + DHD_ERROR(("h2d_max_txpost: %d, prot->h2d_max_txpost: %d\n", + h2d_max_txpost, prot->h2d_max_txpost)); + + DHD_ERROR(("\n ------- DUMPING IOCTL RING RD WR Pointers ------- \r\n")); + + ring = &prot->h2dring_ctrl_subn; + dma_buf_len = ring->max_items * ring->item_len; + DHD_ERROR(("CtrlPost: Mem Info: BASE(VA) %p BASE(PA) %x:%x SIZE %d \r\n", + ring->dma_buf.va, ltoh32(ring->base_addr.high_addr), + ltoh32(ring->base_addr.low_addr), dma_buf_len)); + DHD_ERROR(("CtrlPost: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr)); + if (dhd->bus->is_linkdown) { + DHD_ERROR(("CtrlPost: From Shared Mem: RD and WR are invalid" + " due to PCIe link down\r\n")); + } else { + dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx); + dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx); + DHD_ERROR(("CtrlPost: From Shared Mem: RD: %d WR %d \r\n", rd, wr)); + } + DHD_ERROR(("CtrlPost: seq num: %d \r\n", ring->seqnum % H2D_EPOCH_MODULO)); + + ring = &prot->d2hring_ctrl_cpln; + dma_buf_len = ring->max_items * ring->item_len; + DHD_ERROR(("CtrlCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x SIZE %d \r\n", + ring->dma_buf.va, ltoh32(ring->base_addr.high_addr), + ltoh32(ring->base_addr.low_addr), dma_buf_len)); + DHD_ERROR(("CtrlCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr)); + if (dhd->bus->is_linkdown) { + DHD_ERROR(("CtrlCpl: From Shared Mem: RD and WR are invalid" + " due to PCIe link down\r\n")); + } else { + dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx); + dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx); + DHD_ERROR(("CtrlCpl: From Shared Mem: RD: %d WR %d \r\n", rd, wr)); + } + DHD_ERROR(("CtrlCpl: Expected seq num: %d \r\n", ring->seqnum % H2D_EPOCH_MODULO)); + + ring = prot->h2dring_info_subn; + if (ring) { + dma_buf_len = ring->max_items * ring->item_len; + DHD_ERROR(("InfoSub: Mem Info: BASE(VA) %p BASE(PA) %x:%x SIZE %d \r\n", + ring->dma_buf.va, ltoh32(ring->base_addr.high_addr), + ltoh32(ring->base_addr.low_addr), dma_buf_len)); + DHD_ERROR(("InfoSub: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr)); + if (dhd->bus->is_linkdown) { + DHD_ERROR(("InfoSub: From Shared Mem: RD and WR are invalid" + " due to PCIe link down\r\n")); + } else { + dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx); + dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx); + DHD_ERROR(("InfoSub: From Shared Mem: RD: %d WR %d \r\n", rd, wr)); + } + DHD_ERROR(("InfoSub: seq num: %d \r\n", ring->seqnum % H2D_EPOCH_MODULO)); + } + ring = prot->d2hring_info_cpln; + if (ring) { + dma_buf_len = ring->max_items * ring->item_len; + DHD_ERROR(("InfoCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x SIZE %d \r\n", + ring->dma_buf.va, ltoh32(ring->base_addr.high_addr), + ltoh32(ring->base_addr.low_addr), dma_buf_len)); + DHD_ERROR(("InfoCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr)); + if (dhd->bus->is_linkdown) { + DHD_ERROR(("InfoCpl: From Shared Mem: RD and WR are invalid" + " due to PCIe link down\r\n")); + } else { + dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx); + dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx); + DHD_ERROR(("InfoCpl: From Shared Mem: RD: %d WR %d \r\n", rd, wr)); + } + DHD_ERROR(("InfoCpl: Expected seq num: %d \r\n", ring->seqnum % D2H_EPOCH_MODULO)); + } + + ring = &prot->d2hring_tx_cpln; + if (ring) { + dma_buf_len = ring->max_items * ring->item_len; + DHD_ERROR(("TxCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x SIZE %d \r\n", + ring->dma_buf.va, ltoh32(ring->base_addr.high_addr), + ltoh32(ring->base_addr.low_addr), dma_buf_len)); + DHD_ERROR(("TxCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr)); + if (dhd->bus->is_linkdown) { + DHD_ERROR(("TxCpl: From Shared Mem: RD and WR are invalid" + " due to PCIe link down\r\n")); + } else { + dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx); + dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx); + DHD_ERROR(("TxCpl: From Shared Mem: RD: %d WR %d \r\n", rd, wr)); + } + DHD_ERROR(("TxCpl: Expected seq num: %d \r\n", ring->seqnum % D2H_EPOCH_MODULO)); + } + + ring = &prot->d2hring_rx_cpln; + if (ring) { + dma_buf_len = ring->max_items * ring->item_len; + DHD_ERROR(("RxCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x SIZE %d \r\n", + ring->dma_buf.va, ltoh32(ring->base_addr.high_addr), + ltoh32(ring->base_addr.low_addr), dma_buf_len)); + DHD_ERROR(("RxCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr)); + if (dhd->bus->is_linkdown) { + DHD_ERROR(("RxCpl: From Shared Mem: RD and WR are invalid" + " due to PCIe link down\r\n")); + } else { + dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx); + dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx); + DHD_ERROR(("RxCpl: From Shared Mem: RD: %d WR %d \r\n", rd, wr)); + } + DHD_ERROR(("RxCpl: Expected seq num: %d \r\n", ring->seqnum % D2H_EPOCH_MODULO)); + } +#ifdef EWP_EDL + ring = prot->d2hring_edl; + if (ring) { + dma_buf_len = ring->max_items * ring->item_len; + DHD_ERROR(("EdlRing: Mem Info: BASE(VA) %p BASE(PA) %x:%x SIZE %d \r\n", + ring->dma_buf.va, ltoh32(ring->base_addr.high_addr), + ltoh32(ring->base_addr.low_addr), dma_buf_len)); + DHD_ERROR(("EdlRing: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr)); + if (dhd->bus->is_linkdown) { + DHD_ERROR(("EdlRing: From Shared Mem: RD and WR are invalid" + " due to PCIe link down\r\n")); + } else { + dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx); + dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx); + DHD_ERROR(("EdlRing: From Shared Mem: RD: %d WR %d \r\n", rd, wr)); + } + DHD_ERROR(("EdlRing: Expected seq num: %d \r\n", + ring->seqnum % D2H_EPOCH_MODULO)); + } +#endif /* EWP_EDL */ + + DHD_ERROR(("%s: cur_ioctlresp_bufs_posted %d cur_event_bufs_posted %d\n", + __FUNCTION__, prot->cur_ioctlresp_bufs_posted, prot->cur_event_bufs_posted)); + + dhd_pcie_debug_info_dump(dhd); + + return 0; +} + +int +dhd_prot_ringupd_dump(dhd_pub_t *dhd, struct bcmstrbuf *b) +{ + uint32 *ptr; + uint32 value; + + if (dhd->prot->d2h_dma_indx_wr_buf.va) { + uint32 i; + uint32 max_h2d_queues = dhd_bus_max_h2d_queues(dhd->bus); + + OSL_CACHE_INV((void *)dhd->prot->d2h_dma_indx_wr_buf.va, + dhd->prot->d2h_dma_indx_wr_buf.len); + + ptr = (uint32 *)(dhd->prot->d2h_dma_indx_wr_buf.va); + + bcm_bprintf(b, "\n max_tx_queues %d\n", max_h2d_queues); + + bcm_bprintf(b, "\nRPTR block H2D common rings, 0x%04x\n", ptr); + value = ltoh32(*ptr); + bcm_bprintf(b, "\tH2D CTRL: value 0x%04x\n", value); + ptr++; + value = ltoh32(*ptr); + bcm_bprintf(b, "\tH2D RXPOST: value 0x%04x\n", value); + + ptr++; + bcm_bprintf(b, "RPTR block Flow rings , 0x%04x\n", ptr); + for (i = BCMPCIE_H2D_COMMON_MSGRINGS; i < max_h2d_queues; i++) { + value = ltoh32(*ptr); + bcm_bprintf(b, "\tflowring ID %d: value 0x%04x\n", i, value); + ptr++; + } + } + + if (dhd->prot->h2d_dma_indx_rd_buf.va) { + OSL_CACHE_INV((void *)dhd->prot->h2d_dma_indx_rd_buf.va, + dhd->prot->h2d_dma_indx_rd_buf.len); + + ptr = (uint32 *)(dhd->prot->h2d_dma_indx_rd_buf.va); + + bcm_bprintf(b, "\nWPTR block D2H common rings, 0x%04x\n", ptr); + value = ltoh32(*ptr); + bcm_bprintf(b, "\tD2H CTRLCPLT: value 0x%04x\n", value); + ptr++; + value = ltoh32(*ptr); + bcm_bprintf(b, "\tD2H TXCPLT: value 0x%04x\n", value); + ptr++; + value = ltoh32(*ptr); + bcm_bprintf(b, "\tD2H RXCPLT: value 0x%04x\n", value); + } + + return 0; +} + +uint32 +dhd_prot_metadata_dbg_set(dhd_pub_t *dhd, bool val) +{ + dhd_prot_t *prot = dhd->prot; +#if DHD_DBG_SHOW_METADATA + prot->metadata_dbg = val; +#endif // endif + return (uint32)prot->metadata_dbg; +} + +uint32 +dhd_prot_metadata_dbg_get(dhd_pub_t *dhd) +{ + dhd_prot_t *prot = dhd->prot; + return (uint32)prot->metadata_dbg; +} + +uint32 +dhd_prot_metadatalen_set(dhd_pub_t *dhd, uint32 val, bool rx) +{ + dhd_prot_t *prot = dhd->prot; + if (rx) + prot->rx_metadata_offset = (uint16)val; + else + prot->tx_metadata_offset = (uint16)val; + return dhd_prot_metadatalen_get(dhd, rx); +} + +uint32 +dhd_prot_metadatalen_get(dhd_pub_t *dhd, bool rx) +{ + dhd_prot_t *prot = dhd->prot; + if (rx) + return prot->rx_metadata_offset; + else + return prot->tx_metadata_offset; +} + +/** optimization to write "n" tx items at a time to ring */ +uint32 +dhd_prot_txp_threshold(dhd_pub_t *dhd, bool set, uint32 val) +{ + dhd_prot_t *prot = dhd->prot; + if (set) + prot->txp_threshold = (uint16)val; + val = prot->txp_threshold; + return val; +} + +#ifdef DHD_RX_CHAINING + +static INLINE void BCMFASTPATH +dhd_rxchain_reset(rxchain_info_t *rxchain) +{ + rxchain->pkt_count = 0; +} + +static void BCMFASTPATH +dhd_rxchain_frame(dhd_pub_t *dhd, void *pkt, uint ifidx) +{ + uint8 *eh; + uint8 prio; + dhd_prot_t *prot = dhd->prot; + rxchain_info_t *rxchain = &prot->rxchain; + + ASSERT(!PKTISCHAINED(pkt)); + ASSERT(PKTCLINK(pkt) == NULL); + ASSERT(PKTCGETATTR(pkt) == 0); + + eh = PKTDATA(dhd->osh, pkt); + prio = IP_TOS46(eh + ETHER_HDR_LEN) >> IPV4_TOS_PREC_SHIFT; + + if (rxchain->pkt_count && !(PKT_CTF_CHAINABLE(dhd, ifidx, eh, prio, rxchain->h_sa, + rxchain->h_da, rxchain->h_prio))) { + /* Different flow - First release the existing chain */ + dhd_rxchain_commit(dhd); + } + + /* For routers, with HNDCTF, link the packets using PKTSETCLINK, */ + /* so that the chain can be handed off to CTF bridge as is. */ + if (rxchain->pkt_count == 0) { + /* First packet in chain */ + rxchain->pkthead = rxchain->pkttail = pkt; + + /* Keep a copy of ptr to ether_da, ether_sa and prio */ + rxchain->h_da = ((struct ether_header *)eh)->ether_dhost; + rxchain->h_sa = ((struct ether_header *)eh)->ether_shost; + rxchain->h_prio = prio; + rxchain->ifidx = ifidx; + rxchain->pkt_count++; + } else { + /* Same flow - keep chaining */ + PKTSETCLINK(rxchain->pkttail, pkt); + rxchain->pkttail = pkt; + rxchain->pkt_count++; + } + + if ((dhd_rx_pkt_chainable(dhd, ifidx)) && (!ETHER_ISMULTI(rxchain->h_da)) && + ((((struct ether_header *)eh)->ether_type == HTON16(ETHER_TYPE_IP)) || + (((struct ether_header *)eh)->ether_type == HTON16(ETHER_TYPE_IPV6)))) { + PKTSETCHAINED(dhd->osh, pkt); + PKTCINCRCNT(rxchain->pkthead); + PKTCADDLEN(rxchain->pkthead, PKTLEN(dhd->osh, pkt)); + } else { + dhd_rxchain_commit(dhd); + return; + } + + /* If we have hit the max chain length, dispatch the chain and reset */ + if (rxchain->pkt_count >= DHD_PKT_CTF_MAX_CHAIN_LEN) { + dhd_rxchain_commit(dhd); + } +} + +static void BCMFASTPATH +dhd_rxchain_commit(dhd_pub_t *dhd) +{ + dhd_prot_t *prot = dhd->prot; + rxchain_info_t *rxchain = &prot->rxchain; + + if (rxchain->pkt_count == 0) + return; + + /* Release the packets to dhd_linux */ + dhd_bus_rx_frame(dhd->bus, rxchain->pkthead, rxchain->ifidx, rxchain->pkt_count); + + /* Reset the chain */ + dhd_rxchain_reset(rxchain); +} + +#endif /* DHD_RX_CHAINING */ + +#ifdef IDLE_TX_FLOW_MGMT +int +dhd_prot_flow_ring_resume(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node) +{ + tx_idle_flowring_resume_request_t *flow_resume_rqst; + msgbuf_ring_t *flow_ring; + dhd_prot_t *prot = dhd->prot; + unsigned long flags; + uint16 alloced = 0; + msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn; + + /* Fetch a pre-initialized msgbuf_ring from the flowring pool */ + flow_ring = dhd_prot_flowrings_pool_fetch(dhd, flow_ring_node->flowid); + if (flow_ring == NULL) { + DHD_ERROR(("%s: dhd_prot_flowrings_pool_fetch TX Flowid %d failed\n", + __FUNCTION__, flow_ring_node->flowid)); + return BCME_NOMEM; + } + + DHD_RING_LOCK(ctrl_ring->ring_lock, flags); + + /* Request for ctrl_ring buffer space */ + flow_resume_rqst = (tx_idle_flowring_resume_request_t *) + dhd_prot_alloc_ring_space(dhd, ctrl_ring, 1, &alloced, FALSE); + + if (flow_resume_rqst == NULL) { + dhd_prot_flowrings_pool_release(dhd, flow_ring_node->flowid, flow_ring); + DHD_ERROR(("%s: Flow resume Req flowid %d - failure ring space\n", + __FUNCTION__, flow_ring_node->flowid)); + DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags); + return BCME_NOMEM; + } + + flow_ring_node->prot_info = (void *)flow_ring; + + /* Common msg buf hdr */ + flow_resume_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_RESUME; + flow_resume_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex; + flow_resume_rqst->msg.request_id = htol32(0); /* TBD */ + + flow_resume_rqst->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO; + ctrl_ring->seqnum++; + + flow_resume_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid); + DHD_ERROR(("%s Send Flow resume Req flow ID %d\n", + __FUNCTION__, flow_ring_node->flowid)); + + /* Update the flow_ring's WRITE index */ + if (IDMA_ACTIVE(dhd) || dhd->dma_h2d_ring_upd_support) { + dhd_prot_dma_indx_set(dhd, flow_ring->wr, + H2D_DMA_INDX_WR_UPD, flow_ring->idx); + } else if (IFRM_ACTIVE(dhd) && (flow_ring->idx >= BCMPCIE_H2D_MSGRING_TXFLOW_IDX_START)) { + dhd_prot_dma_indx_set(dhd, flow_ring->wr, + H2D_IFRM_INDX_WR_UPD, + (flow_ring->idx - BCMPCIE_H2D_MSGRING_TXFLOW_IDX_START)); + } else { + dhd_bus_cmn_writeshared(dhd->bus, &(flow_ring->wr), + sizeof(uint16), RING_WR_UPD, flow_ring->idx); + } + + /* update control subn ring's WR index and ring doorbell to dongle */ + dhd_prot_ring_write_complete(dhd, ctrl_ring, flow_resume_rqst, 1); + + DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags); + + return BCME_OK; +} /* dhd_prot_flow_ring_create */ + +int +dhd_prot_flow_ring_batch_suspend_request(dhd_pub_t *dhd, uint16 *ringid, uint16 count) +{ + tx_idle_flowring_suspend_request_t *flow_suspend_rqst; + dhd_prot_t *prot = dhd->prot; + unsigned long flags; + uint16 index; + uint16 alloced = 0; + msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn; + + DHD_RING_LOCK(ring->ring_lock, flags); + + /* Request for ring buffer space */ + flow_suspend_rqst = (tx_idle_flowring_suspend_request_t *) + dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE); + + if (flow_suspend_rqst == NULL) { + DHD_RING_UNLOCK(ring->ring_lock, flags); + DHD_ERROR(("%s: Flow suspend Req - failure ring space\n", __FUNCTION__)); + return BCME_NOMEM; + } + + /* Common msg buf hdr */ + flow_suspend_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_SUSPEND; + /* flow_suspend_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex; */ + flow_suspend_rqst->msg.request_id = htol32(0); /* TBD */ + + flow_suspend_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO; + ring->seqnum++; + + /* Update flow id info */ + for (index = 0; index < count; index++) + { + flow_suspend_rqst->ring_id[index] = ringid[index]; + } + flow_suspend_rqst->num = count; + + DHD_ERROR(("%s sending batch suspend!! count is %d\n", __FUNCTION__, count)); + + /* update ring's WR index and ring doorbell to dongle */ + dhd_prot_ring_write_complete(dhd, ring, flow_suspend_rqst, 1); + + DHD_RING_UNLOCK(ring->ring_lock, flags); + + return BCME_OK; +} +#endif /* IDLE_TX_FLOW_MGMT */ + +static const char* etd_trap_name(hnd_ext_tag_trap_t tag) +{ + switch (tag) + { + case TAG_TRAP_SIGNATURE: return "TAG_TRAP_SIGNATURE"; + case TAG_TRAP_STACK: return "TAG_TRAP_STACK"; + case TAG_TRAP_MEMORY: return "TAG_TRAP_MEMORY"; + case TAG_TRAP_DEEPSLEEP: return "TAG_TRAP_DEEPSLEEP"; + case TAG_TRAP_PSM_WD: return "TAG_TRAP_PSM_WD"; + case TAG_TRAP_PHY: return "TAG_TRAP_PHY"; + case TAG_TRAP_BUS: return "TAG_TRAP_BUS"; + case TAG_TRAP_MAC_SUSP: return "TAG_TRAP_MAC_SUSP"; + case TAG_TRAP_BACKPLANE: return "TAG_TRAP_BACKPLANE"; + case TAG_TRAP_PCIE_Q: return "TAG_TRAP_PCIE_Q"; + case TAG_TRAP_WLC_STATE: return "TAG_TRAP_WLC_STATE"; + case TAG_TRAP_MAC_WAKE: return "TAG_TRAP_MAC_WAKE"; + case TAG_TRAP_HMAP: return "TAG_TRAP_HMAP"; + case TAG_TRAP_PHYTXERR_THRESH: return "TAG_TRAP_PHYTXERR_THRESH"; + case TAG_TRAP_HC_DATA: return "TAG_TRAP_HC_DATA"; + case TAG_TRAP_LOG_DATA: return "TAG_TRAP_LOG_DATA"; + case TAG_TRAP_CODE: return "TAG_TRAP_CODE"; + case TAG_TRAP_LAST: + default: + return "Unknown"; + } + return "Unknown"; +} + +int dhd_prot_dump_extended_trap(dhd_pub_t *dhdp, struct bcmstrbuf *b, bool raw) +{ + uint32 i; + uint32 *ext_data; + hnd_ext_trap_hdr_t *hdr; + const bcm_tlv_t *tlv; + const trap_t *tr; + const uint32 *stack; + const hnd_ext_trap_bp_err_t *bpe; + uint32 raw_len; + + ext_data = dhdp->extended_trap_data; + + /* return if there is no extended trap data */ + if (!ext_data || !(dhdp->dongle_trap_data & D2H_DEV_EXT_TRAP_DATA)) + { + bcm_bprintf(b, "%d (0x%x)", dhdp->dongle_trap_data, dhdp->dongle_trap_data); + return BCME_OK; + } + + bcm_bprintf(b, "Extended trap data\n"); + + /* First word is original trap_data */ + bcm_bprintf(b, "trap_data = 0x%08x\n", *ext_data); + ext_data++; + + /* Followed by the extended trap data header */ + hdr = (hnd_ext_trap_hdr_t *)ext_data; + bcm_bprintf(b, "version: %d, len: %d\n", hdr->version, hdr->len); + + /* Dump a list of all tags found before parsing data */ + bcm_bprintf(b, "\nTags Found:\n"); + for (i = 0; i < TAG_TRAP_LAST; i++) { + tlv = bcm_parse_tlvs(hdr->data, hdr->len, i); + if (tlv) + bcm_bprintf(b, "Tag: %d (%s), Length: %d\n", i, etd_trap_name(i), tlv->len); + } + + if (raw) + { + raw_len = sizeof(hnd_ext_trap_hdr_t) + (hdr->len / 4) + (hdr->len % 4 ? 1 : 0); + for (i = 0; i < raw_len; i++) + { + bcm_bprintf(b, "0x%08x ", ext_data[i]); + if (i % 4 == 3) + bcm_bprintf(b, "\n"); + } + return BCME_OK; + } + + /* Extract the various supported TLVs from the extended trap data */ + tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_CODE); + if (tlv) + { + bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_CODE), tlv->len); + bcm_bprintf(b, "ETD TYPE: %d\n", tlv->data[0]); + } + + tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_SIGNATURE); + if (tlv) + { + bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_SIGNATURE), tlv->len); + tr = (const trap_t *)tlv->data; + + bcm_bprintf(b, "TRAP %x: pc %x, lr %x, sp %x, cpsr %x, spsr %x\n", + tr->type, tr->pc, tr->r14, tr->r13, tr->cpsr, tr->spsr); + bcm_bprintf(b, " r0 %x, r1 %x, r2 %x, r3 %x, r4 %x, r5 %x, r6 %x\n", + tr->r0, tr->r1, tr->r2, tr->r3, tr->r4, tr->r5, tr->r6); + bcm_bprintf(b, " r7 %x, r8 %x, r9 %x, r10 %x, r11 %x, r12 %x\n", + tr->r7, tr->r8, tr->r9, tr->r10, tr->r11, tr->r12); + } + + tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_STACK); + if (tlv) + { + bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_STACK), tlv->len); + stack = (const uint32 *)tlv->data; + for (i = 0; i < (uint32)(tlv->len / 4); i++) + { + bcm_bprintf(b, " 0x%08x\n", *stack); + stack++; + } + } + + tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_BACKPLANE); + if (tlv) + { + bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_BACKPLANE), tlv->len); + bpe = (const hnd_ext_trap_bp_err_t *)tlv->data; + bcm_bprintf(b, " error: %x\n", bpe->error); + bcm_bprintf(b, " coreid: %x\n", bpe->coreid); + bcm_bprintf(b, " baseaddr: %x\n", bpe->baseaddr); + bcm_bprintf(b, " ioctrl: %x\n", bpe->ioctrl); + bcm_bprintf(b, " iostatus: %x\n", bpe->iostatus); + bcm_bprintf(b, " resetctrl: %x\n", bpe->resetctrl); + bcm_bprintf(b, " resetstatus: %x\n", bpe->resetstatus); + bcm_bprintf(b, " errlogctrl: %x\n", bpe->errlogctrl); + bcm_bprintf(b, " errlogdone: %x\n", bpe->errlogdone); + bcm_bprintf(b, " errlogstatus: %x\n", bpe->errlogstatus); + bcm_bprintf(b, " errlogaddrlo: %x\n", bpe->errlogaddrlo); + bcm_bprintf(b, " errlogaddrhi: %x\n", bpe->errlogaddrhi); + bcm_bprintf(b, " errlogid: %x\n", bpe->errlogid); + bcm_bprintf(b, " errloguser: %x\n", bpe->errloguser); + bcm_bprintf(b, " errlogflags: %x\n", bpe->errlogflags); + } + + tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_MEMORY); + if (tlv) + { + const hnd_ext_trap_heap_err_t* hme; + + bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_MEMORY), tlv->len); + hme = (const hnd_ext_trap_heap_err_t *)tlv->data; + bcm_bprintf(b, " arena total: %d\n", hme->arena_total); + bcm_bprintf(b, " heap free: %d\n", hme->heap_free); + bcm_bprintf(b, " heap in use: %d\n", hme->heap_inuse); + bcm_bprintf(b, " mf count: %d\n", hme->mf_count); + bcm_bprintf(b, " stack LWM: %x\n", hme->stack_lwm); + + bcm_bprintf(b, " Histogram:\n"); + for (i = 0; i < (HEAP_HISTOGRAM_DUMP_LEN * 2); i += 2) { + if (hme->heap_histogm[i] == 0xfffe) + bcm_bprintf(b, " Others\t%d\t?\n", hme->heap_histogm[i + 1]); + else if (hme->heap_histogm[i] == 0xffff) + bcm_bprintf(b, " >= 256K\t%d\t?\n", hme->heap_histogm[i + 1]); + else + bcm_bprintf(b, " %d\t%d\t%d\n", hme->heap_histogm[i] << 2, + hme->heap_histogm[i + 1], (hme->heap_histogm[i] << 2) + * hme->heap_histogm[i + 1]); + } + + bcm_bprintf(b, " Max free block: %d\n", hme->max_sz_free_blk[0] << 2); + for (i = 1; i < HEAP_MAX_SZ_BLKS_LEN; i++) { + bcm_bprintf(b, " Next lgst free block: %d\n", hme->max_sz_free_blk[i] << 2); + } + } + + tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_PCIE_Q); + if (tlv) + { + const hnd_ext_trap_pcie_mem_err_t* pqme; + + bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_PCIE_Q), tlv->len); + pqme = (const hnd_ext_trap_pcie_mem_err_t *)tlv->data; + bcm_bprintf(b, " d2h queue len: %x\n", pqme->d2h_queue_len); + bcm_bprintf(b, " d2h req queue len: %x\n", pqme->d2h_req_queue_len); + } + + tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_WLC_STATE); + if (tlv) + { + const hnd_ext_trap_wlc_mem_err_t* wsme; + + bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_WLC_STATE), tlv->len); + wsme = (const hnd_ext_trap_wlc_mem_err_t *)tlv->data; + bcm_bprintf(b, " instance: %d\n", wsme->instance); + bcm_bprintf(b, " associated: %d\n", wsme->associated); + bcm_bprintf(b, " peer count: %d\n", wsme->peer_cnt); + bcm_bprintf(b, " client count: %d\n", wsme->soft_ap_client_cnt); + bcm_bprintf(b, " TX_AC_BK_FIFO: %d\n", wsme->txqueue_len[0]); + bcm_bprintf(b, " TX_AC_BE_FIFO: %d\n", wsme->txqueue_len[1]); + bcm_bprintf(b, " TX_AC_VI_FIFO: %d\n", wsme->txqueue_len[2]); + bcm_bprintf(b, " TX_AC_VO_FIFO: %d\n", wsme->txqueue_len[3]); + + if (tlv->len >= (sizeof(*wsme) * 2)) { + wsme++; + bcm_bprintf(b, "\n instance: %d\n", wsme->instance); + bcm_bprintf(b, " associated: %d\n", wsme->associated); + bcm_bprintf(b, " peer count: %d\n", wsme->peer_cnt); + bcm_bprintf(b, " client count: %d\n", wsme->soft_ap_client_cnt); + bcm_bprintf(b, " TX_AC_BK_FIFO: %d\n", wsme->txqueue_len[0]); + bcm_bprintf(b, " TX_AC_BE_FIFO: %d\n", wsme->txqueue_len[1]); + bcm_bprintf(b, " TX_AC_VI_FIFO: %d\n", wsme->txqueue_len[2]); + bcm_bprintf(b, " TX_AC_VO_FIFO: %d\n", wsme->txqueue_len[3]); + } + } + + tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_PHY); + if (tlv) + { + const hnd_ext_trap_phydbg_t* phydbg; + bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_PHY), tlv->len); + phydbg = (const hnd_ext_trap_phydbg_t *)tlv->data; + bcm_bprintf(b, " err: 0x%x\n", phydbg->err); + bcm_bprintf(b, " RxFeStatus: 0x%x\n", phydbg->RxFeStatus); + bcm_bprintf(b, " TxFIFOStatus0: 0x%x\n", phydbg->TxFIFOStatus0); + bcm_bprintf(b, " TxFIFOStatus1: 0x%x\n", phydbg->TxFIFOStatus1); + bcm_bprintf(b, " RfseqMode: 0x%x\n", phydbg->RfseqMode); + bcm_bprintf(b, " RfseqStatus0: 0x%x\n", phydbg->RfseqStatus0); + bcm_bprintf(b, " RfseqStatus1: 0x%x\n", phydbg->RfseqStatus1); + bcm_bprintf(b, " RfseqStatus_Ocl: 0x%x\n", phydbg->RfseqStatus_Ocl); + bcm_bprintf(b, " RfseqStatus_Ocl1: 0x%x\n", phydbg->RfseqStatus_Ocl1); + bcm_bprintf(b, " OCLControl1: 0x%x\n", phydbg->OCLControl1); + bcm_bprintf(b, " TxError: 0x%x\n", phydbg->TxError); + bcm_bprintf(b, " bphyTxError: 0x%x\n", phydbg->bphyTxError); + bcm_bprintf(b, " TxCCKError: 0x%x\n", phydbg->TxCCKError); + bcm_bprintf(b, " TxCtrlWrd0: 0x%x\n", phydbg->TxCtrlWrd0); + bcm_bprintf(b, " TxCtrlWrd1: 0x%x\n", phydbg->TxCtrlWrd1); + bcm_bprintf(b, " TxCtrlWrd2: 0x%x\n", phydbg->TxCtrlWrd2); + bcm_bprintf(b, " TxLsig0: 0x%x\n", phydbg->TxLsig0); + bcm_bprintf(b, " TxLsig1: 0x%x\n", phydbg->TxLsig1); + bcm_bprintf(b, " TxVhtSigA10: 0x%x\n", phydbg->TxVhtSigA10); + bcm_bprintf(b, " TxVhtSigA11: 0x%x\n", phydbg->TxVhtSigA11); + bcm_bprintf(b, " TxVhtSigA20: 0x%x\n", phydbg->TxVhtSigA20); + bcm_bprintf(b, " TxVhtSigA21: 0x%x\n", phydbg->TxVhtSigA21); + bcm_bprintf(b, " txPktLength: 0x%x\n", phydbg->txPktLength); + bcm_bprintf(b, " txPsdulengthCtr: 0x%x\n", phydbg->txPsdulengthCtr); + bcm_bprintf(b, " gpioClkControl: 0x%x\n", phydbg->gpioClkControl); + bcm_bprintf(b, " gpioSel: 0x%x\n", phydbg->gpioSel); + bcm_bprintf(b, " pktprocdebug: 0x%x\n", phydbg->pktprocdebug); + for (i = 0; i < 3; i++) + bcm_bprintf(b, " gpioOut[%d]: 0x%x\n", i, phydbg->gpioOut[i]); + } + + tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_PSM_WD); + if (tlv) + { + const hnd_ext_trap_psmwd_t* psmwd; + bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_PSM_WD), tlv->len); + psmwd = (const hnd_ext_trap_psmwd_t *)tlv; + bcm_bprintf(b, " version: 0x%x\n", psmwd->version); + bcm_bprintf(b, " maccontrol: 0x%x\n", psmwd->i32_maccontrol); + bcm_bprintf(b, " maccommand: 0x%x\n", psmwd->i32_maccommand); + bcm_bprintf(b, " macintstatus: 0x%x\n", psmwd->i32_macintstatus); + bcm_bprintf(b, " phydebug: 0x%x\n", psmwd->i32_phydebug); + bcm_bprintf(b, " clk_ctl_st: 0x%x\n", psmwd->i32_clk_ctl_st); + for (i = 0; i < 3; i++) + bcm_bprintf(b, " psmdebug[%d]: 0x%x\n", i, psmwd->i32_psmdebug[i]); + bcm_bprintf(b, " gated clock en: 0x%x\n", psmwd->i16_0x1a8); + bcm_bprintf(b, " Rcv Fifo Ctrl: 0x%x\n", psmwd->i16_0x406); + bcm_bprintf(b, " Rx ctrl 1: 0x%x\n", psmwd->i16_0x408); + bcm_bprintf(b, " Rxe Status 1: 0x%x\n", psmwd->i16_0x41a); + bcm_bprintf(b, " Rxe Status 2: 0x%x\n", psmwd->i16_0x41c); + bcm_bprintf(b, " rcv wrd count 0: 0x%x\n", psmwd->i16_0x424); + bcm_bprintf(b, " rcv wrd count 1: 0x%x\n", psmwd->i16_0x426); + bcm_bprintf(b, " RCV_LFIFO_STS: 0x%x\n", psmwd->i16_0x456); + bcm_bprintf(b, " PSM_SLP_TMR: 0x%x\n", psmwd->i16_0x480); + bcm_bprintf(b, " PSM BRC: 0x%x\n", psmwd->i16_0x490); + bcm_bprintf(b, " TXE CTRL: 0x%x\n", psmwd->i16_0x500); + bcm_bprintf(b, " TXE Status: 0x%x\n", psmwd->i16_0x50e); + bcm_bprintf(b, " TXE_xmtdmabusy: 0x%x\n", psmwd->i16_0x55e); + bcm_bprintf(b, " TXE_XMTfifosuspflush: 0x%x\n", psmwd->i16_0x566); + bcm_bprintf(b, " IFS Stat: 0x%x\n", psmwd->i16_0x690); + bcm_bprintf(b, " IFS_MEDBUSY_CTR: 0x%x\n", psmwd->i16_0x692); + bcm_bprintf(b, " IFS_TX_DUR: 0x%x\n", psmwd->i16_0x694); + bcm_bprintf(b, " SLow_CTL: 0x%x\n", psmwd->i16_0x6a0); + bcm_bprintf(b, " TXE_AQM fifo Ready: 0x%x\n", psmwd->i16_0x838); + bcm_bprintf(b, " Dagg ctrl: 0x%x\n", psmwd->i16_0x8c0); + bcm_bprintf(b, " shm_prewds_cnt: 0x%x\n", psmwd->shm_prewds_cnt); + bcm_bprintf(b, " shm_txtplufl_cnt: 0x%x\n", psmwd->shm_txtplufl_cnt); + bcm_bprintf(b, " shm_txphyerr_cnt: 0x%x\n", psmwd->shm_txphyerr_cnt); + } + + tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_MAC_SUSP); + if (tlv) + { + const hnd_ext_trap_macsusp_t* macsusp; + bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_MAC_SUSP), tlv->len); + macsusp = (const hnd_ext_trap_macsusp_t *)tlv; + bcm_bprintf(b, " version: %d\n", macsusp->version); + bcm_bprintf(b, " trap_reason: %d\n", macsusp->trap_reason); + bcm_bprintf(b, " maccontrol: 0x%x\n", macsusp->i32_maccontrol); + bcm_bprintf(b, " maccommand: 0x%x\n", macsusp->i32_maccommand); + bcm_bprintf(b, " macintstatus: 0x%x\n", macsusp->i32_macintstatus); + for (i = 0; i < 4; i++) + bcm_bprintf(b, " phydebug[%d]: 0x%x\n", i, macsusp->i32_phydebug[i]); + for (i = 0; i < 8; i++) + bcm_bprintf(b, " psmdebug[%d]: 0x%x\n", i, macsusp->i32_psmdebug[i]); + bcm_bprintf(b, " Rxe Status_1: 0x%x\n", macsusp->i16_0x41a); + bcm_bprintf(b, " Rxe Status_2: 0x%x\n", macsusp->i16_0x41c); + bcm_bprintf(b, " PSM BRC: 0x%x\n", macsusp->i16_0x490); + bcm_bprintf(b, " TXE Status: 0x%x\n", macsusp->i16_0x50e); + bcm_bprintf(b, " TXE xmtdmabusy: 0x%x\n", macsusp->i16_0x55e); + bcm_bprintf(b, " TXE XMTfifosuspflush: 0x%x\n", macsusp->i16_0x566); + bcm_bprintf(b, " IFS Stat: 0x%x\n", macsusp->i16_0x690); + bcm_bprintf(b, " IFS MEDBUSY CTR: 0x%x\n", macsusp->i16_0x692); + bcm_bprintf(b, " IFS TX DUR: 0x%x\n", macsusp->i16_0x694); + bcm_bprintf(b, " WEP CTL: 0x%x\n", macsusp->i16_0x7c0); + bcm_bprintf(b, " TXE AQM fifo Ready: 0x%x\n", macsusp->i16_0x838); + bcm_bprintf(b, " MHP status: 0x%x\n", macsusp->i16_0x880); + bcm_bprintf(b, " shm_prewds_cnt: 0x%x\n", macsusp->shm_prewds_cnt); + bcm_bprintf(b, " shm_ucode_dbgst: 0x%x\n", macsusp->shm_ucode_dbgst); + } + + tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_MAC_WAKE); + if (tlv) + { + const hnd_ext_trap_macenab_t* macwake; + bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_MAC_WAKE), tlv->len); + macwake = (const hnd_ext_trap_macenab_t *)tlv; + bcm_bprintf(b, " version: 0x%x\n", macwake->version); + bcm_bprintf(b, " trap_reason: 0x%x\n", macwake->trap_reason); + bcm_bprintf(b, " maccontrol: 0x%x\n", macwake->i32_maccontrol); + bcm_bprintf(b, " maccommand: 0x%x\n", macwake->i32_maccommand); + bcm_bprintf(b, " macintstatus: 0x%x\n", macwake->i32_macintstatus); + for (i = 0; i < 8; i++) + bcm_bprintf(b, " psmdebug[%d]: 0x%x\n", i, macwake->i32_psmdebug[i]); + bcm_bprintf(b, " clk_ctl_st: 0x%x\n", macwake->i32_clk_ctl_st); + bcm_bprintf(b, " powerctl: 0x%x\n", macwake->i32_powerctl); + bcm_bprintf(b, " gated clock en: 0x%x\n", macwake->i16_0x1a8); + bcm_bprintf(b, " PSM_SLP_TMR: 0x%x\n", macwake->i16_0x480); + bcm_bprintf(b, " PSM BRC: 0x%x\n", macwake->i16_0x490); + bcm_bprintf(b, " TSF CTL: 0x%x\n", macwake->i16_0x600); + bcm_bprintf(b, " IFS Stat: 0x%x\n", macwake->i16_0x690); + bcm_bprintf(b, " IFS_MEDBUSY_CTR: 0x%x\n", macwake->i16_0x692); + bcm_bprintf(b, " Slow_CTL: 0x%x\n", macwake->i16_0x6a0); + bcm_bprintf(b, " Slow_FRAC: 0x%x\n", macwake->i16_0x6a6); + bcm_bprintf(b, " fast power up delay: 0x%x\n", macwake->i16_0x6a8); + bcm_bprintf(b, " Slow_PER: 0x%x\n", macwake->i16_0x6aa); + bcm_bprintf(b, " shm_ucode_dbgst: 0x%x\n", macwake->shm_ucode_dbgst); + } + + tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_BUS); + if (tlv) + { + const bcm_dngl_pcie_hc_t* hc; + bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_BUS), tlv->len); + hc = (const bcm_dngl_pcie_hc_t *)tlv->data; + bcm_bprintf(b, " version: 0x%x\n", hc->version); + bcm_bprintf(b, " reserved: 0x%x\n", hc->reserved); + bcm_bprintf(b, " pcie_err_ind_type: 0x%x\n", hc->pcie_err_ind_type); + bcm_bprintf(b, " pcie_flag: 0x%x\n", hc->pcie_flag); + bcm_bprintf(b, " pcie_control_reg: 0x%x\n", hc->pcie_control_reg); + for (i = 0; i < HC_PCIEDEV_CONFIG_REGLIST_MAX; i++) + bcm_bprintf(b, " pcie_config_regs[%d]: 0x%x\n", i, hc->pcie_config_regs[i]); + } + + tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_HMAP); + if (tlv) + { + const pcie_hmapviolation_t* hmap; + hmap = (const pcie_hmapviolation_t *)tlv->data; + bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_HMAP), tlv->len); + bcm_bprintf(b, " HMAP Vio Addr Low: 0x%x\n", hmap->hmap_violationaddr_lo); + bcm_bprintf(b, " HMAP Vio Addr Hi: 0x%x\n", hmap->hmap_violationaddr_hi); + bcm_bprintf(b, " HMAP Vio Info: 0x%x\n", hmap->hmap_violation_info); + } + + return BCME_OK; +} + +#ifdef BCMPCIE +int +dhd_prot_send_host_timestamp(dhd_pub_t *dhdp, uchar *tlvs, uint16 tlv_len, + uint16 seqnum, uint16 xt_id) +{ + dhd_prot_t *prot = dhdp->prot; + host_timestamp_msg_t *ts_req; + unsigned long flags; + uint16 alloced = 0; + uchar *ts_tlv_buf; + msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn; + + if ((tlvs == NULL) || (tlv_len == 0)) { + DHD_ERROR(("%s: argument error tlv: %p, tlv_len %d\n", + __FUNCTION__, tlvs, tlv_len)); + return -1; + } + + DHD_RING_LOCK(ctrl_ring->ring_lock, flags); + + /* if Host TS req already pending go away */ + if (prot->hostts_req_buf_inuse == TRUE) { + DHD_ERROR(("one host TS request already pending at device\n")); + DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags); + return -1; + } + + /* Request for cbuf space */ + ts_req = (host_timestamp_msg_t*)dhd_prot_alloc_ring_space(dhdp, ctrl_ring, + DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, &alloced, FALSE); + if (ts_req == NULL) { + DHD_ERROR(("couldn't allocate space on msgring to send host TS request\n")); + DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags); + return -1; + } + + /* Common msg buf hdr */ + ts_req->msg.msg_type = MSG_TYPE_HOSTTIMSTAMP; + ts_req->msg.if_id = 0; + ts_req->msg.flags = ctrl_ring->current_phase; + ts_req->msg.request_id = DHD_H2D_HOSTTS_REQ_PKTID; + + ts_req->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO; + ctrl_ring->seqnum++; + + ts_req->xt_id = xt_id; + ts_req->seqnum = seqnum; + /* populate TS req buffer info */ + ts_req->input_data_len = htol16(tlv_len); + ts_req->host_buf_addr.high = htol32(PHYSADDRHI(prot->hostts_req_buf.pa)); + ts_req->host_buf_addr.low = htol32(PHYSADDRLO(prot->hostts_req_buf.pa)); + /* copy ioct payload */ + ts_tlv_buf = (void *) prot->hostts_req_buf.va; + prot->hostts_req_buf_inuse = TRUE; + memcpy(ts_tlv_buf, tlvs, tlv_len); + + OSL_CACHE_FLUSH((void *) prot->hostts_req_buf.va, tlv_len); + + if (ISALIGNED(ts_tlv_buf, DMA_ALIGN_LEN) == FALSE) { + DHD_ERROR(("host TS req buffer address unaligned !!!!! \n")); + } + + DHD_CTL(("submitted Host TS request request_id %d, data_len %d, tx_id %d, seq %d\n", + ts_req->msg.request_id, ts_req->input_data_len, + ts_req->xt_id, ts_req->seqnum)); + + /* upd wrt ptr and raise interrupt */ + dhd_prot_ring_write_complete(dhdp, ctrl_ring, ts_req, + DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D); + + DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags); + + return 0; +} /* dhd_prot_send_host_timestamp */ + +bool +dhd_prot_data_path_tx_timestamp_logging(dhd_pub_t *dhd, bool enable, bool set) +{ + if (set) + dhd->prot->tx_ts_log_enabled = enable; + + return dhd->prot->tx_ts_log_enabled; +} + +bool +dhd_prot_data_path_rx_timestamp_logging(dhd_pub_t *dhd, bool enable, bool set) +{ + if (set) + dhd->prot->rx_ts_log_enabled = enable; + + return dhd->prot->rx_ts_log_enabled; +} + +bool +dhd_prot_pkt_noretry(dhd_pub_t *dhd, bool enable, bool set) +{ + if (set) + dhd->prot->no_retry = enable; + + return dhd->prot->no_retry; +} + +bool +dhd_prot_pkt_noaggr(dhd_pub_t *dhd, bool enable, bool set) +{ + if (set) + dhd->prot->no_aggr = enable; + + return dhd->prot->no_aggr; +} + +bool +dhd_prot_pkt_fixed_rate(dhd_pub_t *dhd, bool enable, bool set) +{ + if (set) + dhd->prot->fixed_rate = enable; + + return dhd->prot->fixed_rate; +} +#endif /* BCMPCIE */ + +void +dhd_prot_dma_indx_free(dhd_pub_t *dhd) +{ + dhd_prot_t *prot = dhd->prot; + + dhd_dma_buf_free(dhd, &prot->h2d_dma_indx_wr_buf); + dhd_dma_buf_free(dhd, &prot->d2h_dma_indx_rd_buf); +} + +void +dhd_msgbuf_delay_post_ts_bufs(dhd_pub_t *dhd) +{ + if (dhd->prot->max_tsbufpost > 0) + dhd_msgbuf_rxbuf_post_ts_bufs(dhd); +} + +static void BCMFASTPATH +dhd_prot_process_fw_timestamp(dhd_pub_t *dhd, void* buf) +{ + DHD_ERROR(("Timesunc feature not compiled in but GOT FW TS message\n")); + +} + +uint16 +dhd_prot_get_ioctl_trans_id(dhd_pub_t *dhdp) +{ + return dhdp->prot->ioctl_trans_id; +} + +void dhd_get_hscb_info(struct dhd_prot *prot, void ** va, uint32 *len) +{ + if (va) { + *va = prot->host_scb_buf.va; + } + if (len) { + *len = prot->host_scb_buf.len; + } +} + +void dhd_get_hscb_buff(struct dhd_prot *prot, uint32 offset, uint32 length, void * buff) +{ + memcpy(buff, (char*)prot->host_scb_buf.va + offset, length); +} diff --git a/bcmdhd.100.10.315.x/dhd_pcie.c b/bcmdhd.100.10.315.x/dhd_pcie.c new file mode 100644 index 0000000..7fb69ce --- /dev/null +++ b/bcmdhd.100.10.315.x/dhd_pcie.c @@ -0,0 +1,10394 @@ +/* + * DHD Bus Module for PCIE + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_pcie.c 771178 2018-07-09 08:34:50Z $ + */ + +/* include files */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if defined(DHD_DEBUG) +#include +#endif /* defined(DHD_DEBUG) */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef DHDTCPACK_SUPPRESS +#include +#endif /* DHDTCPACK_SUPPRESS */ +#include +#include + +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM +#include +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + +#if defined(DEBUGGER) || defined(DHD_DSCOPE) +#include +#endif /* DEBUGGER || DHD_DSCOPE */ + +#define EXTENDED_PCIE_DEBUG_DUMP 1 /* Enable Extended pcie registers dump */ + +#define MEMBLOCK 2048 /* Block size used for downloading of dongle image */ +#define MAX_WKLK_IDLE_CHECK 3 /* times wake_lock checked before deciding not to suspend */ + +#define ARMCR4REG_BANKIDX (0x40/sizeof(uint32)) +#define ARMCR4REG_BANKPDA (0x4C/sizeof(uint32)) +/* Temporary war to fix precommit till sync issue between trunk & precommit branch is resolved */ + +/* CTO Prevention Recovery */ +#ifdef BCMQT_HW +#define CTO_TO_CLEAR_WAIT_MS 10000 +#define CTO_TO_CLEAR_WAIT_MAX_CNT 100 +#else +#define CTO_TO_CLEAR_WAIT_MS 1000 +#define CTO_TO_CLEAR_WAIT_MAX_CNT 10 +#endif // endif + +/* Fetch address of a member in the pciedev_shared structure in dongle memory */ +#define DHD_PCIE_SHARED_MEMBER_ADDR(bus, member) \ + (bus)->shared_addr + OFFSETOF(pciedev_shared_t, member) + +/* Fetch address of a member in rings_info_ptr structure in dongle memory */ +#define DHD_RING_INFO_MEMBER_ADDR(bus, member) \ + (bus)->pcie_sh->rings_info_ptr + OFFSETOF(ring_info_t, member) + +/* Fetch address of a member in the ring_mem structure in dongle memory */ +#define DHD_RING_MEM_MEMBER_ADDR(bus, ringid, member) \ + (bus)->ring_sh[ringid].ring_mem_addr + OFFSETOF(ring_mem_t, member) + +#if defined(SUPPORT_MULTIPLE_BOARD_REV) + extern unsigned int system_rev; +#endif /* SUPPORT_MULTIPLE_BOARD_REV */ + +#ifdef EWP_EDL +extern int host_edl_support; +#endif // endif + +/* This can be overwritten by module parameter(dma_ring_indices) defined in dhd_linux.c */ +uint dma_ring_indices = 0; +/* This can be overwritten by module parameter(h2d_phase) defined in dhd_linux.c */ +bool h2d_phase = 0; +/* This can be overwritten by module parameter(force_trap_bad_h2d_phase) + * defined in dhd_linux.c + */ +bool force_trap_bad_h2d_phase = 0; + +int dhd_dongle_memsize; +int dhd_dongle_ramsize; +static int dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size); +static int dhdpcie_bus_readconsole(dhd_bus_t *bus); +#if defined(DHD_FW_COREDUMP) +struct dhd_bus *g_dhd_bus = NULL; +static int dhdpcie_mem_dump(dhd_bus_t *bus); +#endif /* DHD_FW_COREDUMP */ + +static int dhdpcie_bus_membytes(dhd_bus_t *bus, bool write, ulong address, uint8 *data, uint size); +static int dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, + const char *name, void *params, + int plen, void *arg, int len, int val_size); +static int dhdpcie_bus_lpback_req(struct dhd_bus *bus, uint32 intval); +static int dhdpcie_bus_dmaxfer_req(struct dhd_bus *bus, + uint32 len, uint32 srcdelay, uint32 destdelay, + uint32 d11_lpbk, uint32 core_num, uint32 wait); +static int dhdpcie_bus_download_state(dhd_bus_t *bus, bool enter); +static int _dhdpcie_download_firmware(struct dhd_bus *bus); +static int dhdpcie_download_firmware(dhd_bus_t *bus, osl_t *osh); +static int dhdpcie_bus_write_vars(dhd_bus_t *bus); +static bool dhdpcie_bus_process_mailbox_intr(dhd_bus_t *bus, uint32 intstatus); +static bool dhdpci_bus_read_frames(dhd_bus_t *bus); +static int dhdpcie_readshared(dhd_bus_t *bus); +static void dhdpcie_init_shared_addr(dhd_bus_t *bus); +static bool dhdpcie_dongle_attach(dhd_bus_t *bus); +static void dhdpcie_bus_dongle_setmemsize(dhd_bus_t *bus, int mem_size); +static void dhdpcie_bus_release_dongle(dhd_bus_t *bus, osl_t *osh, + bool dongle_isolation, bool reset_flag); +static void dhdpcie_bus_release_malloc(dhd_bus_t *bus, osl_t *osh); +static int dhdpcie_downloadvars(dhd_bus_t *bus, void *arg, int len); +static uint8 dhdpcie_bus_rtcm8(dhd_bus_t *bus, ulong offset); +static void dhdpcie_bus_wtcm8(dhd_bus_t *bus, ulong offset, uint8 data); +static void dhdpcie_bus_wtcm16(dhd_bus_t *bus, ulong offset, uint16 data); +static uint16 dhdpcie_bus_rtcm16(dhd_bus_t *bus, ulong offset); +static void dhdpcie_bus_wtcm32(dhd_bus_t *bus, ulong offset, uint32 data); +static uint32 dhdpcie_bus_rtcm32(dhd_bus_t *bus, ulong offset); +#ifdef DHD_SUPPORT_64BIT +static void dhdpcie_bus_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data) __attribute__ ((used)); +static uint64 dhdpcie_bus_rtcm64(dhd_bus_t *bus, ulong offset) __attribute__ ((used)); +#endif /* DHD_SUPPORT_64BIT */ +static void dhdpcie_bus_cfg_set_bar0_win(dhd_bus_t *bus, uint32 data); +static void dhdpcie_bus_reg_unmap(osl_t *osh, volatile char *addr, int size); +static int dhdpcie_cc_nvmshadow(dhd_bus_t *bus, struct bcmstrbuf *b); +static void dhdpcie_fw_trap(dhd_bus_t *bus); +static void dhd_fillup_ring_sharedptr_info(dhd_bus_t *bus, ring_info_t *ring_info); +extern void dhd_dpc_enable(dhd_pub_t *dhdp); +extern void dhd_dpc_kill(dhd_pub_t *dhdp); + +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM +static void dhdpcie_handle_mb_data(dhd_bus_t *bus); +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + +#ifdef IDLE_TX_FLOW_MGMT +static void dhd_bus_check_idle_scan(dhd_bus_t *bus); +static void dhd_bus_idle_scan(dhd_bus_t *bus); +#endif /* IDLE_TX_FLOW_MGMT */ + +#ifdef EXYNOS_PCIE_DEBUG +extern void exynos_pcie_register_dump(int ch_num); +#endif /* EXYNOS_PCIE_DEBUG */ + +#if defined(DHD_H2D_LOG_TIME_SYNC) +static void dhdpci_bus_rte_log_time_sync_poll(dhd_bus_t *bus); +#endif /* DHD_H2D_LOG_TIME_SYNC */ + +#define PCI_VENDOR_ID_BROADCOM 0x14e4 + +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM +#define MAX_D3_ACK_TIMEOUT 100 +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + +#define DHD_DEFAULT_DOORBELL_TIMEOUT 200 /* ms */ +static bool dhdpcie_check_firmware_compatible(uint32 f_api_version, uint32 h_api_version); +static void dhdpcie_cto_error_recovery(struct dhd_bus *bus); + +static int dhdpcie_init_d11status(struct dhd_bus *bus); + +static int dhdpcie_wrt_rnd(struct dhd_bus *bus); + +extern uint16 dhd_prot_get_h2d_max_txpost(dhd_pub_t *dhd); +extern void dhd_prot_set_h2d_max_txpost(dhd_pub_t *dhd, uint16 max_txpost); + +#ifdef DHD_SSSR_DUMP +static int dhdpcie_sssr_dump(dhd_pub_t *dhd); +#endif /* DHD_SSSR_DUMP */ + +/* IOVar table */ +enum { + IOV_INTR = 1, + IOV_MEMSIZE, + IOV_SET_DOWNLOAD_STATE, + IOV_DEVRESET, + IOV_VARS, + IOV_MSI_SIM, + IOV_PCIE_LPBK, + IOV_CC_NVMSHADOW, + IOV_RAMSIZE, + IOV_RAMSTART, + IOV_SLEEP_ALLOWED, + IOV_PCIE_DMAXFER, + IOV_PCIE_SUSPEND, + IOV_DONGLEISOLATION, + IOV_LTRSLEEPON_UNLOOAD, + IOV_METADATA_DBG, + IOV_RX_METADATALEN, + IOV_TX_METADATALEN, + IOV_TXP_THRESHOLD, + IOV_BUZZZ_DUMP, + IOV_DUMP_RINGUPD_BLOCK, + IOV_DMA_RINGINDICES, + IOV_FORCE_FW_TRAP, + IOV_DB1_FOR_MB, + IOV_FLOW_PRIO_MAP, + IOV_RXBOUND, + IOV_TXBOUND, + IOV_HANGREPORT, + IOV_H2D_MAILBOXDATA, + IOV_INFORINGS, + IOV_H2D_PHASE, + IOV_H2D_ENABLE_TRAP_BADPHASE, + IOV_H2D_TXPOST_MAX_ITEM, + IOV_TRAPDATA, + IOV_TRAPDATA_RAW, + IOV_CTO_PREVENTION, + IOV_PCIE_WD_RESET, + IOV_DUMP_DONGLE, + IOV_IDMA_ENABLE, + IOV_IFRM_ENABLE, + IOV_CLEAR_RING, + IOV_DAR_ENABLE, + IOV_DNGL_CAPS, /**< returns string with dongle capabilities */ +#if defined(DEBUGGER) || defined(DHD_DSCOPE) + IOV_GDB_SERVER, /**< starts gdb server on given interface */ +#endif /* DEBUGGER || DHD_DSCOPE */ + IOV_INB_DW_ENABLE, + IOV_CTO_THRESHOLD, + IOV_HSCBSIZE, /* get HSCB buffer size */ + IOV_HSCBBYTES, /* copy HSCB buffer */ + IOV_PCIE_LAST /**< unused IOVAR */ +}; + +const bcm_iovar_t dhdpcie_iovars[] = { + {"intr", IOV_INTR, 0, 0, IOVT_BOOL, 0 }, + {"memsize", IOV_MEMSIZE, 0, 0, IOVT_UINT32, 0 }, + {"dwnldstate", IOV_SET_DOWNLOAD_STATE, 0, 0, IOVT_BOOL, 0 }, + {"vars", IOV_VARS, 0, 0, IOVT_BUFFER, 0 }, + {"devreset", IOV_DEVRESET, 0, 0, IOVT_UINT8, 0 }, + {"pcie_device_trap", IOV_FORCE_FW_TRAP, 0, 0, 0, 0 }, + {"pcie_lpbk", IOV_PCIE_LPBK, 0, 0, IOVT_UINT32, 0 }, + {"cc_nvmshadow", IOV_CC_NVMSHADOW, 0, 0, IOVT_BUFFER, 0 }, + {"ramsize", IOV_RAMSIZE, 0, 0, IOVT_UINT32, 0 }, + {"ramstart", IOV_RAMSTART, 0, 0, IOVT_UINT32, 0 }, + {"pcie_dmaxfer", IOV_PCIE_DMAXFER, 0, 0, IOVT_BUFFER, 3 * sizeof(int32) }, + {"pcie_suspend", IOV_PCIE_SUSPEND, 0, 0, IOVT_UINT32, 0 }, + {"sleep_allowed", IOV_SLEEP_ALLOWED, 0, 0, IOVT_BOOL, 0 }, + {"dngl_isolation", IOV_DONGLEISOLATION, 0, 0, IOVT_UINT32, 0 }, + {"ltrsleep_on_unload", IOV_LTRSLEEPON_UNLOOAD, 0, 0, IOVT_UINT32, 0 }, + {"dump_ringupdblk", IOV_DUMP_RINGUPD_BLOCK, 0, 0, IOVT_BUFFER, 0 }, + {"dma_ring_indices", IOV_DMA_RINGINDICES, 0, 0, IOVT_UINT32, 0}, + {"metadata_dbg", IOV_METADATA_DBG, 0, 0, IOVT_BOOL, 0 }, + {"rx_metadata_len", IOV_RX_METADATALEN, 0, 0, IOVT_UINT32, 0 }, + {"tx_metadata_len", IOV_TX_METADATALEN, 0, 0, IOVT_UINT32, 0 }, + {"db1_for_mb", IOV_DB1_FOR_MB, 0, 0, IOVT_UINT32, 0 }, + {"txp_thresh", IOV_TXP_THRESHOLD, 0, 0, IOVT_UINT32, 0 }, + {"buzzz_dump", IOV_BUZZZ_DUMP, 0, 0, IOVT_UINT32, 0 }, + {"flow_prio_map", IOV_FLOW_PRIO_MAP, 0, 0, IOVT_UINT32, 0 }, + {"rxbound", IOV_RXBOUND, 0, 0, IOVT_UINT32, 0 }, + {"txbound", IOV_TXBOUND, 0, 0, IOVT_UINT32, 0 }, + {"fw_hang_report", IOV_HANGREPORT, 0, 0, IOVT_BOOL, 0 }, + {"h2d_mb_data", IOV_H2D_MAILBOXDATA, 0, 0, IOVT_UINT32, 0 }, + {"inforings", IOV_INFORINGS, 0, 0, IOVT_UINT32, 0 }, + {"h2d_phase", IOV_H2D_PHASE, 0, 0, IOVT_UINT32, 0 }, + {"force_trap_bad_h2d_phase", IOV_H2D_ENABLE_TRAP_BADPHASE, 0, 0, + IOVT_UINT32, 0 }, + {"h2d_max_txpost", IOV_H2D_TXPOST_MAX_ITEM, 0, 0, IOVT_UINT32, 0 }, + {"trap_data", IOV_TRAPDATA, 0, 0, IOVT_BUFFER, 0 }, + {"trap_data_raw", IOV_TRAPDATA_RAW, 0, 0, IOVT_BUFFER, 0 }, + {"cto_prevention", IOV_CTO_PREVENTION, 0, 0, IOVT_UINT32, 0 }, + {"pcie_wd_reset", IOV_PCIE_WD_RESET, 0, 0, IOVT_BOOL, 0 }, + {"dump_dongle", IOV_DUMP_DONGLE, 0, 0, IOVT_BUFFER, + MAX(sizeof(dump_dongle_in_t), sizeof(dump_dongle_out_t))}, + {"clear_ring", IOV_CLEAR_RING, 0, 0, IOVT_UINT32, 0 }, + {"idma_enable", IOV_IDMA_ENABLE, 0, 0, IOVT_UINT32, 0 }, + {"ifrm_enable", IOV_IFRM_ENABLE, 0, 0, IOVT_UINT32, 0 }, + {"dar_enable", IOV_DAR_ENABLE, 0, 0, IOVT_UINT32, 0 }, + {"cap", IOV_DNGL_CAPS, 0, 0, IOVT_BUFFER, 0}, +#if defined(DEBUGGER) || defined(DHD_DSCOPE) + {"gdb_server", IOV_GDB_SERVER, 0, 0, IOVT_UINT32, 0 }, +#endif /* DEBUGGER || DHD_DSCOPE */ + {"inb_dw_enable", IOV_INB_DW_ENABLE, 0, 0, IOVT_UINT32, 0 }, + {"cto_threshold", IOV_CTO_THRESHOLD, 0, 0, IOVT_UINT32, 0 }, + {"hscbsize", IOV_HSCBSIZE, 0, 0, IOVT_UINT32, 0 }, + {"hscbbytes", IOV_HSCBBYTES, 0, 0, IOVT_BUFFER, 2 * sizeof(int32) }, + {NULL, 0, 0, 0, 0, 0 } +}; + +#define MAX_READ_TIMEOUT 5 * 1000 * 1000 + +#ifndef DHD_RXBOUND +#define DHD_RXBOUND 64 +#endif // endif +#ifndef DHD_TXBOUND +#define DHD_TXBOUND 64 +#endif // endif + +#define DHD_INFORING_BOUND 32 +#define DHD_BTLOGRING_BOUND 32 + +uint dhd_rxbound = DHD_RXBOUND; +uint dhd_txbound = DHD_TXBOUND; + +#if defined(DEBUGGER) || defined(DHD_DSCOPE) +/** the GDB debugger layer will call back into this (bus) layer to read/write dongle memory */ +static struct dhd_gdb_bus_ops_s bus_ops = { + .read_u16 = dhdpcie_bus_rtcm16, + .read_u32 = dhdpcie_bus_rtcm32, + .write_u32 = dhdpcie_bus_wtcm32, +}; +#endif /* DEBUGGER || DHD_DSCOPE */ + +bool +dhd_bus_get_flr_force_fail(struct dhd_bus *bus) +{ + return bus->flr_force_fail; +} + +/** + * Register/Unregister functions are called by the main DHD entry point (eg module insertion) to + * link with the bus driver, in order to look for or await the device. + */ +int +dhd_bus_register(void) +{ + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + return dhdpcie_bus_register(); +} + +void +dhd_bus_unregister(void) +{ + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + dhdpcie_bus_unregister(); + return; +} + +/** returns a host virtual address */ +uint32 * +dhdpcie_bus_reg_map(osl_t *osh, ulong addr, int size) +{ + return (uint32 *)REG_MAP(addr, size); +} + +void +dhdpcie_bus_reg_unmap(osl_t *osh, volatile char *addr, int size) +{ + REG_UNMAP(addr); + return; +} + +/** + * retrun H2D Doorbell registers address + * use DAR registers instead of enum register for corerev >= 23 (4347B0) + */ +static INLINE uint +dhd_bus_db0_addr_get(struct dhd_bus *bus) +{ + uint addr = PCIH2D_MailBox; + uint dar_addr = DAR_PCIH2D_DB0_0(bus->sih->buscorerev); + + return ((DAR_ACTIVE(bus->dhd)) ? dar_addr : addr); +} + +static INLINE uint +dhd_bus_db0_addr_2_get(struct dhd_bus *bus) +{ + return ((DAR_ACTIVE(bus->dhd)) ? DAR_PCIH2D_DB2_0(bus->sih->buscorerev) : PCIH2D_MailBox_2); +} + +static INLINE uint +dhd_bus_db1_addr_get(struct dhd_bus *bus) +{ + return ((DAR_ACTIVE(bus->dhd)) ? DAR_PCIH2D_DB0_1(bus->sih->buscorerev) : PCIH2D_DB1); +} + +static INLINE uint +dhd_bus_db1_addr_1_get(struct dhd_bus *bus) +{ + return ((DAR_ACTIVE(bus->dhd)) ? DAR_PCIH2D_DB1_1(bus->sih->buscorerev) : PCIH2D_DB1_1); +} + +static INLINE void +_dhd_bus_pcie_pwr_req_clear_cmn(struct dhd_bus *bus) +{ + uint mask; + + /* + * If multiple de-asserts, decrement ref and return + * Clear power request when only one pending + * so initial request is not removed unexpectedly + */ + if (bus->pwr_req_ref > 1) { + bus->pwr_req_ref--; + return; + } + + ASSERT(bus->pwr_req_ref == 1); + + if (MULTIBP_ENAB(bus->sih)) { + /* Common BP controlled by HW so only need to toggle WL/ARM backplane */ + mask = SRPWR_DMN1_ARMBPSD_MASK; + } else { + mask = SRPWR_DMN0_PCIE_MASK | SRPWR_DMN1_ARMBPSD_MASK; + } + + si_srpwr_request(bus->sih, mask, 0); + bus->pwr_req_ref = 0; +} + +static INLINE void +dhd_bus_pcie_pwr_req_clear(struct dhd_bus *bus) +{ + unsigned long flags = 0; + + DHD_GENERAL_LOCK(bus->dhd, flags); + _dhd_bus_pcie_pwr_req_clear_cmn(bus); + DHD_GENERAL_UNLOCK(bus->dhd, flags); +} + +static INLINE void +dhd_bus_pcie_pwr_req_clear_nolock(struct dhd_bus *bus) +{ + _dhd_bus_pcie_pwr_req_clear_cmn(bus); +} + +static INLINE void +_dhd_bus_pcie_pwr_req_cmn(struct dhd_bus *bus) +{ + uint mask, val; + + /* If multiple request entries, increment reference and return */ + if (bus->pwr_req_ref > 0) { + bus->pwr_req_ref++; + return; + } + + ASSERT(bus->pwr_req_ref == 0); + + if (MULTIBP_ENAB(bus->sih)) { + /* Common BP controlled by HW so only need to toggle WL/ARM backplane */ + mask = SRPWR_DMN1_ARMBPSD_MASK; + val = SRPWR_DMN1_ARMBPSD_MASK; + } else { + mask = SRPWR_DMN0_PCIE_MASK | SRPWR_DMN1_ARMBPSD_MASK; + val = SRPWR_DMN0_PCIE_MASK | SRPWR_DMN1_ARMBPSD_MASK; + } + + si_srpwr_request(bus->sih, mask, val); + + bus->pwr_req_ref = 1; +} + +static INLINE void +dhd_bus_pcie_pwr_req(struct dhd_bus *bus) +{ + unsigned long flags = 0; + + DHD_GENERAL_LOCK(bus->dhd, flags); + _dhd_bus_pcie_pwr_req_cmn(bus); + DHD_GENERAL_UNLOCK(bus->dhd, flags); +} + +static INLINE void +_dhd_bus_pcie_pwr_req_pd0123_cmn(struct dhd_bus *bus) +{ + uint mask, val; + + mask = SRPWR_DMN_ALL_MASK; + val = SRPWR_DMN_ALL_MASK; + + si_srpwr_request(bus->sih, mask, val); +} + +static INLINE void +dhd_bus_pcie_pwr_req_reload_war(struct dhd_bus *bus) +{ + unsigned long flags = 0; + + DHD_GENERAL_LOCK(bus->dhd, flags); + _dhd_bus_pcie_pwr_req_pd0123_cmn(bus); + DHD_GENERAL_UNLOCK(bus->dhd, flags); +} + +static INLINE void +_dhd_bus_pcie_pwr_req_clear_pd23_cmn(struct dhd_bus *bus) +{ + uint mask; + + mask = SRPWR_DMN3_MACMAIN_MASK | SRPWR_DMN2_MACAUX_MASK; + + si_srpwr_request(bus->sih, mask, 0); +} + +static INLINE void +dhd_bus_pcie_pwr_req_clear_reload_war(struct dhd_bus *bus) +{ + unsigned long flags = 0; + + DHD_GENERAL_LOCK(bus->dhd, flags); + _dhd_bus_pcie_pwr_req_clear_pd23_cmn(bus); + DHD_GENERAL_UNLOCK(bus->dhd, flags); +} + +static INLINE void +dhd_bus_pcie_pwr_req_nolock(struct dhd_bus *bus) +{ + _dhd_bus_pcie_pwr_req_cmn(bus); +} + +bool +dhdpcie_chip_support_msi(dhd_bus_t *bus) +{ + DHD_ERROR(("%s: buscorerev=%d chipid=0x%x\n", + __FUNCTION__, bus->sih->buscorerev, si_chipid(bus->sih))); + if (bus->sih->buscorerev <= 14 || + si_chipid(bus->sih) == BCM4375_CHIP_ID || + si_chipid(bus->sih) == BCM4362_CHIP_ID || + si_chipid(bus->sih) == BCM43751_CHIP_ID || + si_chipid(bus->sih) == BCM4361_CHIP_ID || + si_chipid(bus->sih) == BCM4359_CHIP_ID) { + return FALSE; + } else { + return TRUE; + } +} + +/** + * Called once for each hardware (dongle) instance that this DHD manages. + * + * 'regs' is the host virtual address that maps to the start of the PCIe BAR0 window. The first 4096 + * bytes in this window are mapped to the backplane address in the PCIEBAR0Window register. The + * precondition is that the PCIEBAR0Window register 'points' at the PCIe core. + * + * 'tcm' is the *host* virtual address at which tcm is mapped. + */ +int dhdpcie_bus_attach(osl_t *osh, dhd_bus_t **bus_ptr, + volatile char *regs, volatile char *tcm, void *pci_dev) +{ + dhd_bus_t *bus = NULL; + int ret = BCME_OK; + + DHD_TRACE(("%s: ENTER\n", __FUNCTION__)); + + do { + if (!(bus = MALLOCZ(osh, sizeof(dhd_bus_t)))) { + DHD_ERROR(("%s: MALLOC of dhd_bus_t failed\n", __FUNCTION__)); + ret = BCME_NORESOURCE; + break; + } + + bus->regs = regs; + bus->tcm = tcm; + bus->osh = osh; + /* Save pci_dev into dhd_bus, as it may be needed in dhd_attach */ + bus->dev = (struct pci_dev *)pci_dev; + + dll_init(&bus->flowring_active_list); +#ifdef IDLE_TX_FLOW_MGMT + bus->active_list_last_process_ts = OSL_SYSUPTIME(); +#endif /* IDLE_TX_FLOW_MGMT */ + + /* Attach pcie shared structure */ + if (!(bus->pcie_sh = MALLOCZ(osh, sizeof(pciedev_shared_t)))) { + DHD_ERROR(("%s: MALLOC of bus->pcie_sh failed\n", __FUNCTION__)); + ret = BCME_NORESOURCE; + break; + } + + /* dhd_common_init(osh); */ + + if (dhdpcie_dongle_attach(bus)) { + DHD_ERROR(("%s: dhdpcie_probe_attach failed\n", __FUNCTION__)); + ret = BCME_NOTREADY; + break; + } + + /* software resources */ + if (!(bus->dhd = dhd_attach(osh, bus, PCMSGBUF_HDRLEN))) { + DHD_ERROR(("%s: dhd_attach failed\n", __FUNCTION__)); + ret = BCME_NORESOURCE; + break; + } + bus->dhd->busstate = DHD_BUS_DOWN; + bus->db1_for_mb = TRUE; + bus->dhd->hang_report = TRUE; + bus->use_mailbox = FALSE; + bus->use_d0_inform = FALSE; + bus->intr_enabled = FALSE; + bus->flr_force_fail = FALSE; + /* update the dma indices if set through module parameter. */ + if (dma_ring_indices != 0) { + dhdpcie_set_dma_ring_indices(bus->dhd, dma_ring_indices); + } + /* update h2d phase support if set through module parameter */ + bus->dhd->h2d_phase_supported = h2d_phase ? TRUE : FALSE; + /* update force trap on bad phase if set through module parameter */ + bus->dhd->force_dongletrap_on_bad_h2d_phase = + force_trap_bad_h2d_phase ? TRUE : FALSE; +#ifdef IDLE_TX_FLOW_MGMT + bus->enable_idle_flowring_mgmt = FALSE; +#endif /* IDLE_TX_FLOW_MGMT */ + bus->irq_registered = FALSE; + +#ifdef DHD_MSI_SUPPORT + bus->d2h_intr_method = enable_msi && dhdpcie_chip_support_msi(bus) ? + PCIE_MSI : PCIE_INTX; +#else + bus->d2h_intr_method = PCIE_INTX; +#endif /* DHD_MSI_SUPPORT */ + + DHD_TRACE(("%s: EXIT SUCCESS\n", + __FUNCTION__)); +#ifdef DHD_FW_COREDUMP + g_dhd_bus = bus; +#endif // endif + *bus_ptr = bus; + return ret; + } while (0); + + DHD_TRACE(("%s: EXIT FAILURE\n", __FUNCTION__)); + + if (bus && bus->pcie_sh) { + MFREE(osh, bus->pcie_sh, sizeof(pciedev_shared_t)); + } + + if (bus) { + MFREE(osh, bus, sizeof(dhd_bus_t)); + } + + return ret; +} + +bool +dhd_bus_skip_clm(dhd_pub_t *dhdp) +{ + switch (dhd_bus_chip_id(dhdp)) { + case BCM4369_CHIP_ID: + return TRUE; + default: + return FALSE; + } +} + +uint +dhd_bus_chip(struct dhd_bus *bus) +{ + ASSERT(bus->sih != NULL); + return bus->sih->chip; +} + +uint +dhd_bus_chiprev(struct dhd_bus *bus) +{ + ASSERT(bus); + ASSERT(bus->sih != NULL); + return bus->sih->chiprev; +} + +void * +dhd_bus_pub(struct dhd_bus *bus) +{ + return bus->dhd; +} + +void * +dhd_bus_sih(struct dhd_bus *bus) +{ + return (void *)bus->sih; +} + +void * +dhd_bus_txq(struct dhd_bus *bus) +{ + return &bus->txq; +} + +/** Get Chip ID version */ +uint dhd_bus_chip_id(dhd_pub_t *dhdp) +{ + dhd_bus_t *bus = dhdp->bus; + return bus->sih->chip; +} + +/** Get Chip Rev ID version */ +uint dhd_bus_chiprev_id(dhd_pub_t *dhdp) +{ + dhd_bus_t *bus = dhdp->bus; + return bus->sih->chiprev; +} + +/** Get Chip Pkg ID version */ +uint dhd_bus_chippkg_id(dhd_pub_t *dhdp) +{ + dhd_bus_t *bus = dhdp->bus; + return bus->sih->chippkg; +} + +/** Read and clear intstatus. This should be called with interrupts disabled or inside isr */ +uint32 +dhdpcie_bus_intstatus(dhd_bus_t *bus) +{ + uint32 intstatus = 0; +#ifndef DHD_READ_INTSTATUS_IN_DPC + uint32 intmask = 0; +#endif /* DHD_READ_INTSTATUS_IN_DPC */ + + if (bus->bus_low_power_state == DHD_BUS_D3_ACK_RECIEVED) { + DHD_ERROR(("%s: trying to clear intstatus after D3 Ack\n", __FUNCTION__)); + return intstatus; + } + if ((bus->sih->buscorerev == 6) || (bus->sih->buscorerev == 4) || + (bus->sih->buscorerev == 2)) { + intstatus = dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 4); + dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 4, intstatus); + intstatus &= I_MB; + } else { + /* this is a PCIE core register..not a config register... */ + intstatus = si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_int, 0, 0); + +#ifndef DHD_READ_INTSTATUS_IN_DPC + /* this is a PCIE core register..not a config register... */ + intmask = si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_mask, 0, 0); + intstatus &= intmask; +#endif /* DHD_READ_INTSTATUS_IN_DPC */ + /* Is device removed. intstatus & intmask read 0xffffffff */ + if (intstatus == (uint32)-1) { + DHD_ERROR(("%s: Device is removed or Link is down.\n", __FUNCTION__)); + bus->is_linkdown = TRUE; + dhd_pcie_debug_info_dump(bus->dhd); + return intstatus; + } + + /* + * The fourth argument to si_corereg is the "mask" fields of the register to update + * and the fifth field is the "value" to update. Now if we are interested in only + * few fields of the "mask" bit map, we should not be writing back what we read + * By doing so, we might clear/ack interrupts that are not handled yet. + */ + si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_int, bus->def_intmask, + intstatus); + + intstatus &= bus->def_intmask; + } + + return intstatus; +} + +/** + * Name: dhdpcie_bus_isr + * Parameters: + * 1: IN int irq -- interrupt vector + * 2: IN void *arg -- handle to private data structure + * Return value: + * Status (TRUE or FALSE) + * + * Description: + * Interrupt Service routine checks for the status register, + * disable interrupt and queue DPC if mail box interrupts are raised. + */ +int32 +dhdpcie_bus_isr(dhd_bus_t *bus) +{ + uint32 intstatus = 0; + + do { + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + /* verify argument */ + if (!bus) { + DHD_LOG_MEM(("%s : bus is null pointer, exit \n", __FUNCTION__)); + break; + } + + if (bus->dhd->dongle_reset) { + DHD_LOG_MEM(("%s : dongle is reset\n", __FUNCTION__)); + break; + } + + if (bus->dhd->busstate == DHD_BUS_DOWN) { + DHD_LOG_MEM(("%s : bus is down \n", __FUNCTION__)); + break; + } + + /* avoid processing of interrupts until msgbuf prot is inited */ + if (!bus->intr_enabled) { + DHD_INFO(("%s, not ready to receive interrupts\n", __FUNCTION__)); + break; + } + + if (PCIECTO_ENAB(bus)) { + /* read pci_intstatus */ + intstatus = dhdpcie_bus_cfg_read_dword(bus, PCI_INT_STATUS, 4); + + if (intstatus & PCI_CTO_INT_MASK) { + /* reset backplane and cto, + * then access through pcie is recovered. + */ + dhdpcie_cto_error_recovery(bus); + return TRUE; + } + } + + if (bus->d2h_intr_method == PCIE_MSI) { + /* For MSI, as intstatus is cleared by firmware, no need to read */ + goto skip_intstatus_read; + } + +#ifndef DHD_READ_INTSTATUS_IN_DPC + intstatus = dhdpcie_bus_intstatus(bus); + + /* Check if the interrupt is ours or not */ + if (intstatus == 0) { + /* in EFI since we poll for interrupt, this message will flood the logs + * so disable this for EFI + */ + DHD_LOG_MEM(("%s : this interrupt is not ours\n", __FUNCTION__)); + break; + } + + /* save the intstatus */ + /* read interrupt status register!! Status bits will be cleared in DPC !! */ + bus->intstatus = intstatus; + + /* return error for 0xFFFFFFFF */ + if (intstatus == (uint32)-1) { + DHD_LOG_MEM(("%s : wrong interrupt status val : 0x%x\n", + __FUNCTION__, intstatus)); + dhdpcie_disable_irq_nosync(bus); + break; + } + +skip_intstatus_read: + /* Overall operation: + * - Mask further interrupts + * - Read/ack intstatus + * - Take action based on bits and state + * - Reenable interrupts (as per state) + */ + + /* Count the interrupt call */ + bus->intrcount++; +#endif /* DHD_READ_INTSTATUS_IN_DPC */ + + bus->ipend = TRUE; + + bus->isr_intr_disable_count++; + + /* For Linux, Macos etc (otherthan NDIS) instead of disabling + * dongle interrupt by clearing the IntMask, disable directly + * interrupt from the host side, so that host will not recieve + * any interrupts at all, even though dongle raises interrupts + */ + dhdpcie_disable_irq_nosync(bus); /* Disable interrupt!! */ + + bus->intdis = TRUE; + +#if defined(PCIE_ISR_THREAD) + + DHD_TRACE(("Calling dhd_bus_dpc() from %s\n", __FUNCTION__)); + DHD_OS_WAKE_LOCK(bus->dhd); + while (dhd_bus_dpc(bus)); + DHD_OS_WAKE_UNLOCK(bus->dhd); +#else + bus->dpc_sched = TRUE; + dhd_sched_dpc(bus->dhd); /* queue DPC now!! */ +#endif /* defined(SDIO_ISR_THREAD) */ + + DHD_TRACE(("%s: Exit Success DPC Queued\n", __FUNCTION__)); + return TRUE; + + } while (0); + + DHD_TRACE(("%s: Exit Failure\n", __FUNCTION__)); + return FALSE; +} + +int +dhdpcie_set_pwr_state(dhd_bus_t *bus, uint state) +{ + uint32 cur_state = 0; + uint32 pm_csr = 0; + osl_t *osh = bus->osh; + + pm_csr = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PM_CSR, sizeof(uint32)); + cur_state = pm_csr & PCIECFGREG_PM_CSR_STATE_MASK; + + if (cur_state == state) { + DHD_ERROR(("%s: Already in state %u \n", __FUNCTION__, cur_state)); + return BCME_OK; + } + + if (state > PCIECFGREG_PM_CSR_STATE_D3_HOT) + return BCME_ERROR; + + /* Validate the state transition + * if already in a lower power state, return error + */ + if (state != PCIECFGREG_PM_CSR_STATE_D0 && + cur_state <= PCIECFGREG_PM_CSR_STATE_D3_COLD && + cur_state > state) { + DHD_ERROR(("%s: Invalid power state transition !\n", __FUNCTION__)); + return BCME_ERROR; + } + + pm_csr &= ~PCIECFGREG_PM_CSR_STATE_MASK; + pm_csr |= state; + + OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_PM_CSR, sizeof(uint32), pm_csr); + + /* need to wait for the specified mandatory pcie power transition delay time */ + if (state == PCIECFGREG_PM_CSR_STATE_D3_HOT || + cur_state == PCIECFGREG_PM_CSR_STATE_D3_HOT) + OSL_DELAY(DHDPCIE_PM_D3_DELAY); + else if (state == PCIECFGREG_PM_CSR_STATE_D2 || + cur_state == PCIECFGREG_PM_CSR_STATE_D2) + OSL_DELAY(DHDPCIE_PM_D2_DELAY); + + /* read back the power state and verify */ + pm_csr = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PM_CSR, sizeof(uint32)); + cur_state = pm_csr & PCIECFGREG_PM_CSR_STATE_MASK; + if (cur_state != state) { + DHD_ERROR(("%s: power transition failed ! Current state is %u \n", + __FUNCTION__, cur_state)); + return BCME_ERROR; + } else { + DHD_ERROR(("%s: power transition to %u success \n", + __FUNCTION__, cur_state)); + } + + return BCME_OK; +} + +int +dhdpcie_config_check(dhd_bus_t *bus) +{ + uint32 i, val; + int ret = BCME_ERROR; + + for (i = 0; i < DHDPCIE_CONFIG_CHECK_RETRY_COUNT; i++) { + val = OSL_PCI_READ_CONFIG(bus->osh, PCI_CFG_VID, sizeof(uint32)); + if ((val & 0xFFFF) == VENDOR_BROADCOM) { + ret = BCME_OK; + break; + } + OSL_DELAY(DHDPCIE_CONFIG_CHECK_DELAY_MS * 1000); + } + + return ret; +} + +int +dhdpcie_config_restore(dhd_bus_t *bus, bool restore_pmcsr) +{ + uint32 i; + osl_t *osh = bus->osh; + + if (BCME_OK != dhdpcie_config_check(bus)) { + return BCME_ERROR; + } + + for (i = PCI_CFG_REV >> 2; i < DHDPCIE_CONFIG_HDR_SIZE; i++) { + OSL_PCI_WRITE_CONFIG(osh, i << 2, sizeof(uint32), bus->saved_config.header[i]); + } + OSL_PCI_WRITE_CONFIG(osh, PCI_CFG_CMD, sizeof(uint32), bus->saved_config.header[1]); + + if (restore_pmcsr) + OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_PM_CSR, + sizeof(uint32), bus->saved_config.pmcsr); + + OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_MSI_CAP, sizeof(uint32), bus->saved_config.msi_cap); + OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_MSI_ADDR_L, sizeof(uint32), + bus->saved_config.msi_addr0); + OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_MSI_ADDR_H, + sizeof(uint32), bus->saved_config.msi_addr1); + OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_MSI_DATA, + sizeof(uint32), bus->saved_config.msi_data); + + OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_DEV_STATUS_CTRL, + sizeof(uint32), bus->saved_config.exp_dev_ctrl_stat); + OSL_PCI_WRITE_CONFIG(osh, PCIECFGGEN_DEV_STATUS_CTRL2, + sizeof(uint32), bus->saved_config.exp_dev_ctrl_stat2); + OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_LINK_STATUS_CTRL, + sizeof(uint32), bus->saved_config.exp_link_ctrl_stat); + OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_LINK_STATUS_CTRL2, + sizeof(uint32), bus->saved_config.exp_link_ctrl_stat2); + + OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_PML1_SUB_CTRL1, + sizeof(uint32), bus->saved_config.l1pm0); + OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_PML1_SUB_CTRL2, + sizeof(uint32), bus->saved_config.l1pm1); + + OSL_PCI_WRITE_CONFIG(bus->osh, PCI_BAR0_WIN, sizeof(uint32), + bus->saved_config.bar0_win); + OSL_PCI_WRITE_CONFIG(bus->osh, PCI_BAR1_WIN, sizeof(uint32), + bus->saved_config.bar1_win); + + return BCME_OK; +} + +int +dhdpcie_config_save(dhd_bus_t *bus) +{ + uint32 i; + osl_t *osh = bus->osh; + + if (BCME_OK != dhdpcie_config_check(bus)) { + return BCME_ERROR; + } + + for (i = 0; i < DHDPCIE_CONFIG_HDR_SIZE; i++) { + bus->saved_config.header[i] = OSL_PCI_READ_CONFIG(osh, i << 2, sizeof(uint32)); + } + + bus->saved_config.pmcsr = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PM_CSR, sizeof(uint32)); + + bus->saved_config.msi_cap = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_MSI_CAP, + sizeof(uint32)); + bus->saved_config.msi_addr0 = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_MSI_ADDR_L, + sizeof(uint32)); + bus->saved_config.msi_addr1 = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_MSI_ADDR_H, + sizeof(uint32)); + bus->saved_config.msi_data = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_MSI_DATA, + sizeof(uint32)); + + bus->saved_config.exp_dev_ctrl_stat = OSL_PCI_READ_CONFIG(osh, + PCIECFGREG_DEV_STATUS_CTRL, sizeof(uint32)); + bus->saved_config.exp_dev_ctrl_stat2 = OSL_PCI_READ_CONFIG(osh, + PCIECFGGEN_DEV_STATUS_CTRL2, sizeof(uint32)); + bus->saved_config.exp_link_ctrl_stat = OSL_PCI_READ_CONFIG(osh, + PCIECFGREG_LINK_STATUS_CTRL, sizeof(uint32)); + bus->saved_config.exp_link_ctrl_stat2 = OSL_PCI_READ_CONFIG(osh, + PCIECFGREG_LINK_STATUS_CTRL2, sizeof(uint32)); + + bus->saved_config.l1pm0 = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PML1_SUB_CTRL1, + sizeof(uint32)); + bus->saved_config.l1pm1 = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PML1_SUB_CTRL2, + sizeof(uint32)); + + bus->saved_config.bar0_win = OSL_PCI_READ_CONFIG(osh, PCI_BAR0_WIN, + sizeof(uint32)); + bus->saved_config.bar1_win = OSL_PCI_READ_CONFIG(osh, PCI_BAR1_WIN, + sizeof(uint32)); + + return BCME_OK; +} + +#ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY +dhd_pub_t *link_recovery = NULL; +#endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */ + +static void +dhdpcie_bus_intr_init(dhd_bus_t *bus) +{ + uint buscorerev = bus->sih->buscorerev; + bus->pcie_mailbox_int = PCIMailBoxInt(buscorerev); + bus->pcie_mailbox_mask = PCIMailBoxMask(buscorerev); + bus->d2h_mb_mask = PCIE_MB_D2H_MB_MASK(buscorerev); + bus->def_intmask = PCIE_MB_D2H_MB_MASK(buscorerev); + if (buscorerev < 64) { + bus->def_intmask |= PCIE_MB_TOPCIE_FN0_0 | PCIE_MB_TOPCIE_FN0_1; + } +} + +static void +dhdpcie_cc_watchdog_reset(dhd_bus_t *bus) +{ + uint32 wd_en = (bus->sih->buscorerev >= 66) ? WD_SSRESET_PCIE_F0_EN : + (WD_SSRESET_PCIE_F0_EN | WD_SSRESET_PCIE_ALL_FN_EN); + pcie_watchdog_reset(bus->osh, bus->sih, WD_ENABLE_MASK, wd_en); +} + +void +dhdpcie_dongle_reset(dhd_bus_t *bus) +{ + /* dhd_bus_perform_flr will return BCME_UNSUPPORTED if chip is not FLR capable */ + if (dhd_bus_perform_flr(bus, FALSE) == BCME_UNSUPPORTED) { +#ifdef DHD_USE_BP_RESET + /* Backplane reset using SPROM cfg register(0x88) for buscorerev <= 24 */ + dhd_bus_perform_bp_reset(bus); +#else + /* Legacy chipcommon watchdog reset */ + dhdpcie_cc_watchdog_reset(bus); +#endif /* DHD_USE_BP_RESET */ + } +} + +static bool +dhdpcie_dongle_attach(dhd_bus_t *bus) +{ + osl_t *osh = bus->osh; + volatile void *regsva = (volatile void*)bus->regs; + uint16 devid; + uint32 val; + sbpcieregs_t *sbpcieregs; + bool dongle_isolation; + + DHD_TRACE(("%s: ENTER\n", __FUNCTION__)); + +#ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY + link_recovery = bus->dhd; +#endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */ + + bus->alp_only = TRUE; + bus->sih = NULL; + + /* Checking PCIe bus status with reading configuration space */ + val = OSL_PCI_READ_CONFIG(osh, PCI_CFG_VID, sizeof(uint32)); + if ((val & 0xFFFF) != VENDOR_BROADCOM) { + DHD_ERROR(("%s : failed to read PCI configuration space!\n", __FUNCTION__)); + goto fail; + } + devid = (val >> 16) & 0xFFFF; + bus->cl_devid = devid; + + /* Set bar0 window to si_enum_base */ + dhdpcie_bus_cfg_set_bar0_win(bus, si_enum_base(devid)); + + /* + * Checking PCI_SPROM_CONTROL register for preventing invalid address access + * due to switch address space from PCI_BUS to SI_BUS. + */ + val = OSL_PCI_READ_CONFIG(osh, PCI_SPROM_CONTROL, sizeof(uint32)); + if (val == 0xffffffff) { + DHD_ERROR(("%s : failed to read SPROM control register\n", __FUNCTION__)); + goto fail; + } + + /* si_attach() will provide an SI handle and scan the backplane */ + if (!(bus->sih = si_attach((uint)devid, osh, regsva, PCI_BUS, bus, + &bus->vars, &bus->varsz))) { + DHD_ERROR(("%s: si_attach failed!\n", __FUNCTION__)); + goto fail; + } + + if (MULTIBP_ENAB(bus->sih) && (bus->sih->buscorerev >= 66)) { +#if defined(BCMFPGA_HW) + DHD_ERROR(("Disable CTO\n")); + bus->cto_enable = FALSE; +#else + DHD_ERROR(("Enable CTO\n")); + bus->cto_enable = TRUE; +#endif // endif + dhdpcie_cto_init(bus, bus->cto_enable); + /* + * HW JIRA - CRWLPCIEGEN2-672 + * Producer Index Feature which is used by F1 gets reset on F0 FLR + * fixed in REV68 + */ + if (bus->sih->buscorerev == 66) { + dhdpcie_ssreset_dis_enum_rst(bus); + } + + /* IOV_DEVRESET could exercise si_detach()/si_attach() again so reset + * dhdpcie_bus_release_dongle() --> si_detach() + * dhdpcie_dongle_attach() --> si_attach() + */ + bus->pwr_req_ref = 0; + } + + if (MULTIBP_ENAB(bus->sih)) { + dhd_bus_pcie_pwr_req_nolock(bus); + } + + /* Olympic EFI requirement - stop driver load if FW is already running + * need to do this here before pcie_watchdog_reset, because + * pcie_watchdog_reset will put the ARM back into halt state + */ + if (!dhdpcie_is_arm_halted(bus)) { + DHD_ERROR(("%s: ARM is not halted,FW is already running! Abort.\n", + __FUNCTION__)); + goto fail; + } + + BCM_REFERENCE(dongle_isolation); + + /* For inbuilt drivers pcie clk req will be done by RC, + * so do not do clkreq from dhd + */ + if (dhd_download_fw_on_driverload) + { + /* Enable CLKREQ# */ + dhdpcie_clkreq(bus->osh, 1, 1); + } + + /* + * bus->dhd will be NULL if it is called from dhd_bus_attach, so need to reset + * without checking dongle_isolation flag, but if it is called via some other path + * like quiesce FLR, then based on dongle_isolation flag, watchdog_reset should + * be called. + */ + if (bus->dhd == NULL) { + /* dhd_attach not yet happened, do watchdog reset */ + dongle_isolation = FALSE; + } else { + dongle_isolation = bus->dhd->dongle_isolation; + } + +#ifndef DHD_SKIP_DONGLE_RESET_IN_ATTACH + /* + * Issue CC watchdog to reset all the cores on the chip - similar to rmmod dhd + * This is required to avoid spurious interrupts to the Host and bring back + * dongle to a sane state (on host soft-reboot / watchdog-reboot). + */ + if (dongle_isolation == FALSE) { + dhdpcie_dongle_reset(bus); + } +#endif /* !DHD_SKIP_DONGLE_RESET_IN_ATTACH */ + + si_setcore(bus->sih, PCIE2_CORE_ID, 0); + sbpcieregs = (sbpcieregs_t*)(bus->regs); + + /* WAR where the BAR1 window may not be sized properly */ + W_REG(osh, &sbpcieregs->configaddr, 0x4e0); + val = R_REG(osh, &sbpcieregs->configdata); + W_REG(osh, &sbpcieregs->configdata, val); + + /* Get info on the ARM and SOCRAM cores... */ + /* Should really be qualified by device id */ + if ((si_setcore(bus->sih, ARM7S_CORE_ID, 0)) || + (si_setcore(bus->sih, ARMCM3_CORE_ID, 0)) || + (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) || + (si_setcore(bus->sih, ARMCA7_CORE_ID, 0))) { + bus->armrev = si_corerev(bus->sih); + } else { + DHD_ERROR(("%s: failed to find ARM core!\n", __FUNCTION__)); + goto fail; + } + + if (si_setcore(bus->sih, SYSMEM_CORE_ID, 0)) { + /* Only set dongle RAMSIZE to default value when BMC vs ARM usage of SYSMEM is not + * adjusted. + */ + if (!bus->ramsize_adjusted) { + if (!(bus->orig_ramsize = si_sysmem_size(bus->sih))) { + DHD_ERROR(("%s: failed to find SYSMEM memory!\n", __FUNCTION__)); + goto fail; + } + switch ((uint16)bus->sih->chip) { + default: + /* also populate base address */ + bus->dongle_ram_base = CA7_4365_RAM_BASE; + bus->orig_ramsize = 0x1c0000; /* Reserve 1.75MB for CA7 */ + break; + } + } + } else if (!si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) { + if (!(bus->orig_ramsize = si_socram_size(bus->sih))) { + DHD_ERROR(("%s: failed to find SOCRAM memory!\n", __FUNCTION__)); + goto fail; + } + } else { + /* cr4 has a different way to find the RAM size from TCM's */ + if (!(bus->orig_ramsize = si_tcm_size(bus->sih))) { + DHD_ERROR(("%s: failed to find CR4-TCM memory!\n", __FUNCTION__)); + goto fail; + } + /* also populate base address */ + switch ((uint16)bus->sih->chip) { + case BCM4339_CHIP_ID: + case BCM4335_CHIP_ID: + bus->dongle_ram_base = CR4_4335_RAM_BASE; + break; + case BCM4358_CHIP_ID: + case BCM4354_CHIP_ID: + case BCM43567_CHIP_ID: + case BCM43569_CHIP_ID: + case BCM4350_CHIP_ID: + case BCM43570_CHIP_ID: + bus->dongle_ram_base = CR4_4350_RAM_BASE; + break; + case BCM4360_CHIP_ID: + bus->dongle_ram_base = CR4_4360_RAM_BASE; + break; + + case BCM4364_CHIP_ID: + bus->dongle_ram_base = CR4_4364_RAM_BASE; + break; + + CASE_BCM4345_CHIP: + bus->dongle_ram_base = (bus->sih->chiprev < 6) /* changed at 4345C0 */ + ? CR4_4345_LT_C0_RAM_BASE : CR4_4345_GE_C0_RAM_BASE; + break; + CASE_BCM43602_CHIP: + bus->dongle_ram_base = CR4_43602_RAM_BASE; + break; + case BCM4349_CHIP_GRPID: + /* RAM based changed from 4349c0(revid=9) onwards */ + bus->dongle_ram_base = ((bus->sih->chiprev < 9) ? + CR4_4349_RAM_BASE : CR4_4349_RAM_BASE_FROM_REV_9); + break; + case BCM4347_CHIP_ID: + case BCM4357_CHIP_ID: + case BCM4361_CHIP_ID: + bus->dongle_ram_base = CR4_4347_RAM_BASE; + break; + case BCM4362_CHIP_ID: + bus->dongle_ram_base = CR4_4362_RAM_BASE; + break; + case BCM43751_CHIP_ID: + bus->dongle_ram_base = CR4_43751_RAM_BASE; + break; + case BCM4375_CHIP_ID: + case BCM4369_CHIP_ID: + bus->dongle_ram_base = CR4_4369_RAM_BASE; + break; + default: + bus->dongle_ram_base = 0; + DHD_ERROR(("%s: WARNING: Using default ram base at 0x%x\n", + __FUNCTION__, bus->dongle_ram_base)); + } + } + bus->ramsize = bus->orig_ramsize; + if (dhd_dongle_memsize) + dhdpcie_bus_dongle_setmemsize(bus, dhd_dongle_memsize); + + if (bus->ramsize > DONGLE_TCM_MAP_SIZE) { + DHD_ERROR(("%s : invalid ramsize %d(0x%x) is returned from dongle\n", + __FUNCTION__, bus->ramsize, bus->ramsize)); + goto fail; + } + + DHD_ERROR(("DHD: dongle ram size is set to %d(orig %d) at 0x%x\n", + bus->ramsize, bus->orig_ramsize, bus->dongle_ram_base)); + + bus->srmemsize = si_socram_srmem_size(bus->sih); + + dhdpcie_bus_intr_init(bus); + + /* Set the poll and/or interrupt flags */ + bus->intr = (bool)dhd_intr; + if ((bus->poll = (bool)dhd_poll)) + bus->pollrate = 1; +#ifdef DHD_DISABLE_ASPM + dhd_bus_aspm_enable_rc_ep(bus, FALSE); +#endif /* DHD_DISABLE_ASPM */ + + bus->idma_enabled = TRUE; + bus->ifrm_enabled = TRUE; + DHD_TRACE(("%s: EXIT: SUCCESS\n", __FUNCTION__)); + + if (MULTIBP_ENAB(bus->sih)) { + dhd_bus_pcie_pwr_req_clear_nolock(bus); + } + + bus->force_bt_quiesce = TRUE; + + return 0; + +fail: + if (bus->sih != NULL) { + if (MULTIBP_ENAB(bus->sih)) { + dhd_bus_pcie_pwr_req_clear_nolock(bus); + } + /* for EFI even if there is an error, load still succeeds + * so si_detach should not be called here, it is called during unload + */ + si_detach(bus->sih); + bus->sih = NULL; + } + DHD_TRACE(("%s: EXIT: FAILURE\n", __FUNCTION__)); + return -1; +} + +int +dhpcie_bus_unmask_interrupt(dhd_bus_t *bus) +{ + dhdpcie_bus_cfg_write_dword(bus, PCIIntmask, 4, I_MB); + return 0; +} +int +dhpcie_bus_mask_interrupt(dhd_bus_t *bus) +{ + dhdpcie_bus_cfg_write_dword(bus, PCIIntmask, 4, 0x0); + return 0; +} + +/* Non atomic function, caller should hold appropriate lock */ +void +dhdpcie_bus_intr_enable(dhd_bus_t *bus) +{ + DHD_TRACE(("%s Enter\n", __FUNCTION__)); + if (bus && bus->sih && !bus->is_linkdown) { + /* Skip after recieving D3 ACK */ + if (bus->bus_low_power_state == DHD_BUS_D3_ACK_RECIEVED) { + return; + } + if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) || + (bus->sih->buscorerev == 4)) { + dhpcie_bus_unmask_interrupt(bus); + } else { + si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_mask, + bus->def_intmask, bus->def_intmask); + } + } + DHD_TRACE(("%s Exit\n", __FUNCTION__)); +} + +/* Non atomic function, caller should hold appropriate lock */ +void +dhdpcie_bus_intr_disable(dhd_bus_t *bus) +{ + DHD_TRACE(("%s Enter\n", __FUNCTION__)); + if (bus && bus->sih && !bus->is_linkdown) { + /* Skip after recieving D3 ACK */ + if (bus->bus_low_power_state == DHD_BUS_D3_ACK_RECIEVED) { + return; + } + if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) || + (bus->sih->buscorerev == 4)) { + dhpcie_bus_mask_interrupt(bus); + } else { + si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_mask, + bus->def_intmask, 0); + } + } + DHD_TRACE(("%s Exit\n", __FUNCTION__)); +} + +/* + * dhdpcie_advertise_bus_cleanup advertises that clean up is under progress + * to other bus user contexts like Tx, Rx, IOVAR, WD etc and it waits for other contexts + * to gracefully exit. All the bus usage contexts before marking busstate as busy, will check for + * whether the busstate is DHD_BUS_DOWN or DHD_BUS_DOWN_IN_PROGRESS, if so + * they will exit from there itself without marking dhd_bus_busy_state as BUSY. + */ +void +dhdpcie_advertise_bus_cleanup(dhd_pub_t *dhdp) +{ + unsigned long flags; + int timeleft; + + dhdp->dhd_watchdog_ms_backup = dhd_watchdog_ms; + if (dhdp->dhd_watchdog_ms_backup) { + DHD_ERROR(("%s: Disabling wdtick before dhd deinit\n", + __FUNCTION__)); + dhd_os_wd_timer(dhdp, 0); + } + if (dhdp->busstate != DHD_BUS_DOWN) { + DHD_GENERAL_LOCK(dhdp, flags); + dhdp->busstate = DHD_BUS_DOWN_IN_PROGRESS; + DHD_GENERAL_UNLOCK(dhdp, flags); + } + + timeleft = dhd_os_busbusy_wait_negation(dhdp, &dhdp->dhd_bus_busy_state); + if ((timeleft == 0) || (timeleft == 1)) { + DHD_ERROR(("%s : Timeout due to dhd_bus_busy_state=0x%x\n", + __FUNCTION__, dhdp->dhd_bus_busy_state)); + ASSERT(0); + } + + return; +} + +static void +dhdpcie_advertise_bus_remove(dhd_pub_t *dhdp) +{ + unsigned long flags; + int timeleft; + + DHD_GENERAL_LOCK(dhdp, flags); + dhdp->busstate = DHD_BUS_REMOVE; + DHD_GENERAL_UNLOCK(dhdp, flags); + + timeleft = dhd_os_busbusy_wait_negation(dhdp, &dhdp->dhd_bus_busy_state); + if ((timeleft == 0) || (timeleft == 1)) { + DHD_ERROR(("%s : Timeout due to dhd_bus_busy_state=0x%x\n", + __FUNCTION__, dhdp->dhd_bus_busy_state)); + ASSERT(0); + } + + return; +} + +static void +dhdpcie_bus_remove_prep(dhd_bus_t *bus) +{ + unsigned long flags; + DHD_TRACE(("%s Enter\n", __FUNCTION__)); + + DHD_GENERAL_LOCK(bus->dhd, flags); + bus->dhd->busstate = DHD_BUS_DOWN; + DHD_GENERAL_UNLOCK(bus->dhd, flags); + + dhd_os_sdlock(bus->dhd); + + if (bus->sih && !bus->dhd->dongle_isolation) { + if (bus->sih->buscorerev == 66) { + dhd_bus_pcie_pwr_req_reload_war(bus); + } + + /* Has insmod fails after rmmod issue in Brix Android */ + + /* if the pcie link is down, watchdog reset + * should not be done, as it may hang + */ + + if (!bus->is_linkdown) { + dhdpcie_dongle_reset(bus); + } + + bus->dhd->is_pcie_watchdog_reset = TRUE; + } + + dhd_os_sdunlock(bus->dhd); + + DHD_TRACE(("%s Exit\n", __FUNCTION__)); +} + +void +dhd_init_bus_lock(dhd_bus_t *bus) +{ + if (!bus->bus_lock) { + bus->bus_lock = dhd_os_spin_lock_init(bus->dhd->osh); + } +} + +void +dhd_deinit_bus_lock(dhd_bus_t *bus) +{ + if (bus->bus_lock) { + dhd_os_spin_lock_deinit(bus->dhd->osh, bus->bus_lock); + bus->bus_lock = NULL; + } +} + +/** Detach and free everything */ +void +dhdpcie_bus_release(dhd_bus_t *bus) +{ + bool dongle_isolation = FALSE; + osl_t *osh = NULL; + unsigned long flags_bus; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (bus) { + + osh = bus->osh; + ASSERT(osh); + + if (bus->dhd) { +#if defined(DEBUGGER) || defined(DHD_DSCOPE) + debugger_close(); +#endif /* DEBUGGER || DHD_DSCOPE */ + dhdpcie_advertise_bus_remove(bus->dhd); + dongle_isolation = bus->dhd->dongle_isolation; + bus->dhd->is_pcie_watchdog_reset = FALSE; + dhdpcie_bus_remove_prep(bus); + + if (bus->intr) { + DHD_BUS_LOCK(bus->bus_lock, flags_bus); + dhdpcie_bus_intr_disable(bus); + DHD_BUS_UNLOCK(bus->bus_lock, flags_bus); + dhdpcie_free_irq(bus); + } + dhd_deinit_bus_lock(bus); + /** + * dhdpcie_bus_release_dongle free bus->sih handle, which is needed to + * access Dongle registers. + * dhd_detach will communicate with dongle to delete flowring ..etc. + * So dhdpcie_bus_release_dongle should be called only after the dhd_detach. + */ + dhd_detach(bus->dhd); + dhdpcie_bus_release_dongle(bus, osh, dongle_isolation, TRUE); + dhd_free(bus->dhd); + bus->dhd = NULL; + } + /* unmap the regs and tcm here!! */ + if (bus->regs) { + dhdpcie_bus_reg_unmap(osh, bus->regs, DONGLE_REG_MAP_SIZE); + bus->regs = NULL; + } + if (bus->tcm) { + dhdpcie_bus_reg_unmap(osh, bus->tcm, DONGLE_TCM_MAP_SIZE); + bus->tcm = NULL; + } + + dhdpcie_bus_release_malloc(bus, osh); + /* Detach pcie shared structure */ + if (bus->pcie_sh) { + MFREE(osh, bus->pcie_sh, sizeof(pciedev_shared_t)); + bus->pcie_sh = NULL; + } + + if (bus->console.buf != NULL) { + MFREE(osh, bus->console.buf, bus->console.bufsize); + } + + /* Finally free bus info */ + MFREE(osh, bus, sizeof(dhd_bus_t)); + + } + + DHD_TRACE(("%s: Exit\n", __FUNCTION__)); +} /* dhdpcie_bus_release */ + +void +dhdpcie_bus_release_dongle(dhd_bus_t *bus, osl_t *osh, bool dongle_isolation, bool reset_flag) +{ + DHD_TRACE(("%s: Enter bus->dhd %p bus->dhd->dongle_reset %d \n", __FUNCTION__, + bus->dhd, bus->dhd->dongle_reset)); + + if ((bus->dhd && bus->dhd->dongle_reset) && reset_flag) { + DHD_TRACE(("%s Exit\n", __FUNCTION__)); + return; + } + + if (bus->sih) { + + if (!dongle_isolation && + (bus->dhd && !bus->dhd->is_pcie_watchdog_reset)) { + dhdpcie_dongle_reset(bus); + } + + if (bus->ltrsleep_on_unload) { + si_corereg(bus->sih, bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, u.pcie2.ltr_state), ~0, 0); + } + + if (bus->sih->buscorerev == 13) + pcie_serdes_iddqdisable(bus->osh, bus->sih, + (sbpcieregs_t *) bus->regs); + + /* For inbuilt drivers pcie clk req will be done by RC, + * so do not do clkreq from dhd + */ + if (dhd_download_fw_on_driverload) + { + /* Disable CLKREQ# */ + dhdpcie_clkreq(bus->osh, 1, 0); + } + + if (bus->sih != NULL) { + si_detach(bus->sih); + bus->sih = NULL; + } + if (bus->vars && bus->varsz) + MFREE(osh, bus->vars, bus->varsz); + bus->vars = NULL; + } + + DHD_TRACE(("%s Exit\n", __FUNCTION__)); +} + +uint32 +dhdpcie_bus_cfg_read_dword(dhd_bus_t *bus, uint32 addr, uint32 size) +{ + uint32 data = OSL_PCI_READ_CONFIG(bus->osh, addr, size); + return data; +} + +/** 32 bit config write */ +void +dhdpcie_bus_cfg_write_dword(dhd_bus_t *bus, uint32 addr, uint32 size, uint32 data) +{ + OSL_PCI_WRITE_CONFIG(bus->osh, addr, size, data); +} + +void +dhdpcie_bus_cfg_set_bar0_win(dhd_bus_t *bus, uint32 data) +{ + OSL_PCI_WRITE_CONFIG(bus->osh, PCI_BAR0_WIN, 4, data); +} + +void +dhdpcie_bus_dongle_setmemsize(struct dhd_bus *bus, int mem_size) +{ + int32 min_size = DONGLE_MIN_MEMSIZE; + /* Restrict the memsize to user specified limit */ + DHD_ERROR(("user: Restrict the dongle ram size to %d, min accepted %d\n", + dhd_dongle_memsize, min_size)); + if ((dhd_dongle_memsize > min_size) && + (dhd_dongle_memsize < (int32)bus->orig_ramsize)) + bus->ramsize = dhd_dongle_memsize; +} + +void +dhdpcie_bus_release_malloc(dhd_bus_t *bus, osl_t *osh) +{ + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (bus->dhd && bus->dhd->dongle_reset) + return; + + if (bus->vars && bus->varsz) { + MFREE(osh, bus->vars, bus->varsz); + bus->vars = NULL; + } + + DHD_TRACE(("%s: Exit\n", __FUNCTION__)); + return; + +} + +/** Stop bus module: clear pending frames, disable data flow */ +void dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex) +{ + uint32 status; + unsigned long flags, flags_bus; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (!bus->dhd) + return; + + if (bus->dhd->busstate == DHD_BUS_DOWN) { + DHD_ERROR(("%s: already down by net_dev_reset\n", __FUNCTION__)); + goto done; + } + + DHD_DISABLE_RUNTIME_PM(bus->dhd); + + DHD_GENERAL_LOCK(bus->dhd, flags); + bus->dhd->busstate = DHD_BUS_DOWN; + DHD_GENERAL_UNLOCK(bus->dhd, flags); + +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM + atomic_set(&bus->dhd->block_bus, TRUE); +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + + DHD_BUS_LOCK(bus->bus_lock, flags_bus); + dhdpcie_bus_intr_disable(bus); + DHD_BUS_UNLOCK(bus->bus_lock, flags_bus); + + status = dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 4); + dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 4, status); + + if (!dhd_download_fw_on_driverload) { + dhd_dpc_kill(bus->dhd); + } + +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM + pm_runtime_disable(dhd_bus_to_dev(bus)); + pm_runtime_set_suspended(dhd_bus_to_dev(bus)); + pm_runtime_enable(dhd_bus_to_dev(bus)); +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + + /* Clear rx control and wake any waiters */ + dhd_os_set_ioctl_resp_timeout(IOCTL_DISABLE_TIMEOUT); + dhd_wakeup_ioctl_event(bus->dhd, IOCTL_RETURN_ON_BUS_STOP); + +done: + return; +} + +/** + * Watchdog timer function. + * @param dhd Represents a specific hardware (dongle) instance that this DHD manages + */ +bool dhd_bus_watchdog(dhd_pub_t *dhd) +{ + unsigned long flags; + dhd_bus_t *bus = dhd->bus; + + DHD_GENERAL_LOCK(dhd, flags); + if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhd) || + DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhd)) { + DHD_GENERAL_UNLOCK(dhd, flags); + return FALSE; + } + DHD_BUS_BUSY_SET_IN_WD(dhd); + DHD_GENERAL_UNLOCK(dhd, flags); + + /* Poll for console output periodically */ + if (dhd->busstate == DHD_BUS_DATA && + dhd->dhd_console_ms != 0 && + bus->bus_low_power_state == DHD_BUS_NO_LOW_POWER_STATE) { + bus->console.count += dhd_watchdog_ms; + if (bus->console.count >= dhd->dhd_console_ms) { + bus->console.count -= dhd->dhd_console_ms; + + if (MULTIBP_ENAB(bus->sih)) { + dhd_bus_pcie_pwr_req(bus); + } + + /* Make sure backplane clock is on */ + if (dhdpcie_bus_readconsole(bus) < 0) { + dhd->dhd_console_ms = 0; /* On error, stop trying */ + } + + if (MULTIBP_ENAB(bus->sih)) { + dhd_bus_pcie_pwr_req_clear(bus); + } + } + } + +#ifdef DHD_READ_INTSTATUS_IN_DPC + if (bus->poll) { + bus->ipend = TRUE; + bus->dpc_sched = TRUE; + dhd_sched_dpc(bus->dhd); /* queue DPC now!! */ + } +#endif /* DHD_READ_INTSTATUS_IN_DPC */ + + DHD_GENERAL_LOCK(dhd, flags); + DHD_BUS_BUSY_CLEAR_IN_WD(dhd); + dhd_os_busbusy_wake(dhd); + DHD_GENERAL_UNLOCK(dhd, flags); + + return TRUE; +} /* dhd_bus_watchdog */ + +#if defined(SUPPORT_MULTIPLE_REVISION) +static int concate_revision_bcm4358(dhd_bus_t *bus, char *fw_path, char *nv_path) +{ + uint32 chiprev; +#if defined(SUPPORT_MULTIPLE_CHIPS) + char chipver_tag[20] = "_4358"; +#else + char chipver_tag[10] = {0, }; +#endif /* SUPPORT_MULTIPLE_CHIPS */ + + chiprev = dhd_bus_chiprev(bus); + if (chiprev == 0) { + DHD_ERROR(("----- CHIP 4358 A0 -----\n")); + strcat(chipver_tag, "_a0"); + } else if (chiprev == 1) { + DHD_ERROR(("----- CHIP 4358 A1 -----\n")); +#if defined(SUPPORT_MULTIPLE_CHIPS) || defined(SUPPORT_MULTIPLE_MODULE_CIS) + strcat(chipver_tag, "_a1"); +#endif /* defined(SUPPORT_MULTIPLE_CHIPS) || defined(SUPPORT_MULTIPLE_MODULE_CIS) */ + } else if (chiprev == 3) { + DHD_ERROR(("----- CHIP 4358 A3 -----\n")); +#if defined(SUPPORT_MULTIPLE_CHIPS) + strcat(chipver_tag, "_a3"); +#endif /* SUPPORT_MULTIPLE_CHIPS */ + } else { + DHD_ERROR(("----- Unknown chip version, ver=%x -----\n", chiprev)); + } + + strcat(fw_path, chipver_tag); + +#if defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK) + if (chiprev == 1 || chiprev == 3) { + int ret = dhd_check_module_b85a(); + if ((chiprev == 1) && (ret < 0)) { + memset(chipver_tag, 0x00, sizeof(chipver_tag)); + strcat(chipver_tag, "_b85"); + strcat(chipver_tag, "_a1"); + } + } + + DHD_ERROR(("%s: chipver_tag %s \n", __FUNCTION__, chipver_tag)); +#endif /* defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK) */ + +#if defined(SUPPORT_MULTIPLE_BOARD_REV) + if (system_rev >= 10) { + DHD_ERROR(("----- Board Rev [%d]-----\n", system_rev)); + strcat(chipver_tag, "_r10"); + } +#endif /* SUPPORT_MULTIPLE_BOARD_REV */ + strcat(nv_path, chipver_tag); + + return 0; +} + +static int concate_revision_bcm4359(dhd_bus_t *bus, char *fw_path, char *nv_path) +{ + uint32 chip_ver; + char chipver_tag[10] = {0, }; +#if defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK) && \ + defined(SUPPORT_BCM4359_MIXED_MODULES) + int module_type = -1; +#endif /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK && SUPPORT_BCM4359_MIXED_MODULES */ + + chip_ver = bus->sih->chiprev; + if (chip_ver == 4) { + DHD_ERROR(("----- CHIP 4359 B0 -----\n")); + strncat(chipver_tag, "_b0", strlen("_b0")); + } else if (chip_ver == 5) { + DHD_ERROR(("----- CHIP 4359 B1 -----\n")); + strncat(chipver_tag, "_b1", strlen("_b1")); + } else if (chip_ver == 9) { + DHD_ERROR(("----- CHIP 4359 C0 -----\n")); + strncat(chipver_tag, "_c0", strlen("_c0")); + } else { + DHD_ERROR(("----- Unknown chip version, ver=%x -----\n", chip_ver)); + return -1; + } + +#if defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK) && \ + defined(SUPPORT_BCM4359_MIXED_MODULES) + module_type = dhd_check_module_b90(); + + switch (module_type) { + case BCM4359_MODULE_TYPE_B90B: + strcat(fw_path, chipver_tag); + break; + case BCM4359_MODULE_TYPE_B90S: + default: + /* + * .cid.info file not exist case, + * loading B90S FW force for initial MFG boot up. + */ + if (chip_ver == 5) { + strncat(fw_path, "_b90s", strlen("_b90s")); + } + strcat(fw_path, chipver_tag); + strcat(nv_path, chipver_tag); + break; + } +#else /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK && SUPPORT_BCM4359_MIXED_MODULES */ + strcat(fw_path, chipver_tag); + strcat(nv_path, chipver_tag); +#endif /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK && SUPPORT_BCM4359_MIXED_MODULES */ + + return 0; +} + +#if defined(USE_CID_CHECK) + +#define MAX_EXTENSION 20 +#define MODULE_BCM4361_INDEX 3 +#define CHIP_REV_A0 1 +#define CHIP_REV_A1 2 +#define CHIP_REV_B0 3 +#define CHIP_REV_B1 4 +#define CHIP_REV_B2 5 +#define CHIP_REV_C0 6 +#define BOARD_TYPE_EPA 0x080f +#define BOARD_TYPE_IPA 0x0827 +#define BOARD_TYPE_IPA_OLD 0x081a +#define DEFAULT_CIDINFO_FOR_EPA "r00a_e000_a0_ePA" +#define DEFAULT_CIDINFO_FOR_IPA "r00a_e000_a0_iPA" +#define DEFAULT_CIDINFO_FOR_A1 "r01a_e30a_a1" +#define DEFAULT_CIDINFO_FOR_B0 "r01i_e32_b0" +#define MAX_VID_LEN 8 +#define MAX_VNAME_LEN 30 +#define CIS_TUPLE_HDR_LEN 2 +#if defined(BCM4361_CHIP) +#define CIS_TUPLE_START_ADDRESS 0x18011110 +#define CIS_TUPLE_END_ADDRESS 0x18011167 +#elif defined(BCM4375_CHIP) +#define CIS_TUPLE_START_ADDRESS 0x18011120 +#define CIS_TUPLE_END_ADDRESS 0x18011177 +#endif /* defined(BCM4361_CHIP) */ +#define CIS_TUPLE_MAX_COUNT (CIS_TUPLE_END_ADDRESS - CIS_TUPLE_START_ADDRESS\ + + 1) / sizeof(uint32) +#define CIS_TUPLE_TAG_START 0x80 +#define CIS_TUPLE_TAG_VENDOR 0x81 +#define CIS_TUPLE_TAG_BOARDTYPE 0x1b +#define CIS_TUPLE_TAG_LENGTH 1 +#define NVRAM_FEM_MURATA "_murata" +#define CID_FEM_MURATA "_mur_" + +typedef struct cis_tuple_format { + uint8 id; + uint8 len; /* total length of tag and data */ + uint8 tag; + uint8 data[1]; +} cis_tuple_format_t; + +typedef struct { + char cid_ext[MAX_EXTENSION]; + char nvram_ext[MAX_EXTENSION]; + char fw_ext[MAX_EXTENSION]; +} naming_info_t; + +naming_info_t bcm4361_naming_table[] = { + { {""}, {""}, {""} }, + { {"r00a_e000_a0_ePA"}, {"_a0_ePA"}, {"_a0_ePA"} }, + { {"r00a_e000_a0_iPA"}, {"_a0"}, {"_a1"} }, + { {"r01a_e30a_a1"}, {"_r01a_a1"}, {"_a1"} }, + { {"r02a_e30a_a1"}, {"_r02a_a1"}, {"_a1"} }, + { {"r02c_e30a_a1"}, {"_r02c_a1"}, {"_a1"} }, + { {"r01d_e31_b0"}, {"_r01d_b0"}, {"_b0"} }, + { {"r01f_e31_b0"}, {"_r01f_b0"}, {"_b0"} }, + { {"r02g_e31_b0"}, {"_r02g_b0"}, {"_b0"} }, + { {"r01h_e32_b0"}, {"_r01h_b0"}, {"_b0"} }, + { {"r01i_e32_b0"}, {"_r01i_b0"}, {"_b0"} }, + { {"r02j_e32_b0"}, {"_r02j_b0"}, {"_b0"} }, + { {"r012_1kl_a1"}, {"_r012_a1"}, {"_a1"} }, + { {"r013_1kl_b0"}, {"_r013_b0"}, {"_b0"} }, + { {"r013_1kl_b0"}, {"_r013_b0"}, {"_b0"} }, + { {"r014_1kl_b0"}, {"_r014_b0"}, {"_b0"} }, + { {"r015_1kl_b0"}, {"_r015_b0"}, {"_b0"} }, + { {"r020_1kl_b0"}, {"_r020_b0"}, {"_b0"} }, + { {"r021_1kl_b0"}, {"_r021_b0"}, {"_b0"} }, + { {"r022_1kl_b0"}, {"_r022_b0"}, {"_b0"} }, + { {"r023_1kl_b0"}, {"_r023_b0"}, {"_b0"} }, + { {"r024_1kl_b0"}, {"_r024_b0"}, {"_b0"} }, + { {"r030_1kl_b0"}, {"_r030_b0"}, {"_b0"} }, + { {"r031_1kl_b0"}, {"_r030_b0"}, {"_b0"} }, /* exceptional case : r31 -> r30 */ + { {"r032_1kl_b0"}, {"_r032_b0"}, {"_b0"} }, + { {"r033_1kl_b0"}, {"_r033_b0"}, {"_b0"} }, + { {"r034_1kl_b0"}, {"_r034_b0"}, {"_b0"} }, + { {"r02a_e32a_b2"}, {"_r02a_b2"}, {"_b2"} }, + { {"r02b_e32a_b2"}, {"_r02b_b2"}, {"_b2"} }, + { {"r020_1qw_b2"}, {"_r020_b2"}, {"_b2"} }, + { {"r021_1qw_b2"}, {"_r021_b2"}, {"_b2"} }, + { {"r022_1qw_b2"}, {"_r022_b2"}, {"_b2"} }, + { {"r031_1qw_b2"}, {"_r031_b2"}, {"_b2"} }, + { {"r032_1qw_b2"}, {"_r032_b2"}, {"_b2"} }, + { {"r041_1qw_b2"}, {"_r041_b2"}, {"_b2"} } +}; + +#define MODULE_BCM4375_INDEX 3 + +naming_info_t bcm4375_naming_table[] = { + { {""}, {""}, {""} }, + { {"e41_es11"}, {"_ES00_semco_b0"}, {"_b0"} }, + { {"e43_es33"}, {"_ES01_semco_b0"}, {"_b0"} }, + { {"e43_es34"}, {"_ES02_semco_b0"}, {"_b0"} }, + { {"e43_es35"}, {"_ES02_semco_b0"}, {"_b0"} }, + { {"e43_cs41"}, {"_CS00_semco_b1"}, {"_b1"} }, + { {"e43_cs51"}, {"_CS00_semco_b1"}, {"_b1"} }, + { {"e43_cs61"}, {"_CS00_skyworks_b1"}, {"_b1"} }, + { {"1rh_es10"}, {"_1rh_es10_b0"}, {"_b0"} }, + { {"1rh_es11"}, {"_1rh_es11_b0"}, {"_b0"} }, + { {"1rh_es12"}, {"_1rh_es12_b0"}, {"_b0"} }, + { {"1rh_es13"}, {"_1rh_es13_b0"}, {"_b0"} }, + { {"1rh_es20"}, {"_1rh_es20_b0"}, {"_b0"} } +}; + +static naming_info_t * +dhd_find_naming_info(naming_info_t table[], int table_size, char *module_type) +{ + int index_found = 0, i = 0; + + if (module_type && strlen(module_type) > 0) { + for (i = 1; i < table_size; i++) { + if (!strncmp(table[i].cid_ext, module_type, strlen(table[i].cid_ext))) { + index_found = i; + break; + } + } + } + + DHD_INFO(("%s: index_found=%d\n", __FUNCTION__, index_found)); + + return &table[index_found]; +} + +static naming_info_t * +dhd_find_naming_info_by_cid(naming_info_t table[], int table_size, + char *cid_info) +{ + int index_found = 0, i = 0; + char *ptr; + + /* truncate extension */ + for (i = 1, ptr = cid_info; i < MODULE_BCM4361_INDEX && ptr; i++) { + ptr = bcmstrstr(ptr, "_"); + if (ptr) { + ptr++; + } + } + + for (i = 1; i < table_size && ptr; i++) { + if (!strncmp(table[i].cid_ext, ptr, strlen(table[i].cid_ext))) { + index_found = i; + break; + } + } + + DHD_INFO(("%s: index_found=%d\n", __FUNCTION__, index_found)); + + return &table[index_found]; +} + +static int +dhd_parse_board_information_bcm(dhd_bus_t *bus, int *boardtype, + unsigned char *vid, int *vid_length) +{ + int boardtype_backplane_addr[] = { + 0x18010324, /* OTP Control 1 */ + 0x18012618, /* PMU min resource mask */ + }; + int boardtype_backplane_data[] = { + 0x00fa0000, + 0x0e4fffff /* Keep on ARMHTAVAIL */ + }; + int int_val = 0, i = 0; + cis_tuple_format_t *tuple; + int totlen, len; + uint32 raw_data[CIS_TUPLE_MAX_COUNT]; + + for (i = 0; i < ARRAYSIZE(boardtype_backplane_addr); i++) { + /* Write new OTP and PMU configuration */ + if (si_backplane_access(bus->sih, boardtype_backplane_addr[i], sizeof(int), + &boardtype_backplane_data[i], FALSE) != BCME_OK) { + DHD_ERROR(("invalid size/addr combination\n")); + return BCME_ERROR; + } + + if (si_backplane_access(bus->sih, boardtype_backplane_addr[i], sizeof(int), + &int_val, TRUE) != BCME_OK) { + DHD_ERROR(("invalid size/addr combination\n")); + return BCME_ERROR; + } + + DHD_INFO(("%s: boardtype_backplane_addr 0x%08x rdata 0x%04x\n", + __FUNCTION__, boardtype_backplane_addr[i], int_val)); + } + + /* read tuple raw data */ + for (i = 0; i < CIS_TUPLE_MAX_COUNT; i++) { + if (si_backplane_access(bus->sih, CIS_TUPLE_START_ADDRESS + i * sizeof(uint32), + sizeof(uint32), &raw_data[i], TRUE) != BCME_OK) { + break; + } + } + + totlen = i * sizeof(uint32); + tuple = (cis_tuple_format_t *)raw_data; + + /* check the first tuple has tag 'start' */ + if (tuple->id != CIS_TUPLE_TAG_START) { + return BCME_ERROR; + } + + *vid_length = *boardtype = 0; + + /* find tagged parameter */ + while ((totlen >= (tuple->len + CIS_TUPLE_HDR_LEN)) && + (*vid_length == 0 || *boardtype == 0)) { + len = tuple->len; + + if ((tuple->tag == CIS_TUPLE_TAG_VENDOR) && + (totlen >= (int)(len + CIS_TUPLE_HDR_LEN))) { + /* found VID */ + memcpy(vid, tuple->data, tuple->len - CIS_TUPLE_TAG_LENGTH); + *vid_length = tuple->len - CIS_TUPLE_TAG_LENGTH; + prhex("OTP VID", tuple->data, tuple->len - CIS_TUPLE_TAG_LENGTH); + } + else if ((tuple->tag == CIS_TUPLE_TAG_BOARDTYPE) && + (totlen >= (int)(len + CIS_TUPLE_HDR_LEN))) { + /* found boardtype */ + *boardtype = (int)tuple->data[0]; + prhex("OTP boardtype", tuple->data, tuple->len - CIS_TUPLE_TAG_LENGTH); + } + + tuple = (cis_tuple_format_t*)((uint8*)tuple + (len + CIS_TUPLE_HDR_LEN)); + totlen -= (len + CIS_TUPLE_HDR_LEN); + } + + if (*vid_length <= 0 || *boardtype <= 0) { + DHD_ERROR(("failed to parse information (vid=%d, boardtype=%d)\n", + *vid_length, *boardtype)); + return BCME_ERROR; + } + + return BCME_OK; + +} + +static naming_info_t * +dhd_find_naming_info_by_chip_rev(naming_info_t table[], int table_size, + dhd_bus_t *bus, bool *is_murata_fem) +{ + int board_type = 0, chip_rev = 0, vid_length = 0; + unsigned char vid[MAX_VID_LEN]; + naming_info_t *info = &table[0]; + char *cid_info = NULL; + + if (!bus || !bus->sih) { + DHD_ERROR(("%s:bus(%p) or bus->sih is NULL\n", __FUNCTION__, bus)); + return NULL; + } + chip_rev = bus->sih->chiprev; + + if (dhd_parse_board_information_bcm(bus, &board_type, vid, &vid_length) + != BCME_OK) { + DHD_ERROR(("%s:failed to parse board information\n", __FUNCTION__)); + return NULL; + } + + DHD_INFO(("%s:chip version %d\n", __FUNCTION__, chip_rev)); + +#if defined(BCM4361_CHIP) + /* A0 chipset has exception only */ + if (chip_rev == CHIP_REV_A0) { + if (board_type == BOARD_TYPE_EPA) { + info = dhd_find_naming_info(table, table_size, + DEFAULT_CIDINFO_FOR_EPA); + } else if ((board_type == BOARD_TYPE_IPA) || + (board_type == BOARD_TYPE_IPA_OLD)) { + info = dhd_find_naming_info(table, table_size, + DEFAULT_CIDINFO_FOR_IPA); + } + } else { + cid_info = dhd_get_cid_info(vid, vid_length); + if (cid_info) { + info = dhd_find_naming_info_by_cid(table, table_size, cid_info); + if (strstr(cid_info, CID_FEM_MURATA)) { + *is_murata_fem = TRUE; + } + } + } +#else + cid_info = dhd_get_cid_info(vid, vid_length); + if (cid_info) { + info = dhd_find_naming_info_by_cid(table, table_size, cid_info); + if (strstr(cid_info, CID_FEM_MURATA)) { + *is_murata_fem = TRUE; + } + } +#endif /* BCM4361_CHIP */ + + return info; +} +#endif /* USE_CID_CHECK */ + +static int +concate_revision_bcm4361(dhd_bus_t *bus, char *fw_path, char *nv_path) +{ + int ret = BCME_OK; +#if defined(SUPPORT_BCM4361_MIXED_MODULES) && defined(USE_CID_CHECK) + char module_type[MAX_VNAME_LEN]; + naming_info_t *info = NULL; + bool is_murata_fem = FALSE; + + memset(module_type, 0, sizeof(module_type)); + + if (dhd_check_module_bcm(module_type, + MODULE_BCM4361_INDEX, &is_murata_fem) == BCME_OK) { + info = dhd_find_naming_info(bcm4361_naming_table, + ARRAYSIZE(bcm4361_naming_table), module_type); + } else { + /* in case of .cid.info doesn't exists */ + info = dhd_find_naming_info_by_chip_rev(bcm4361_naming_table, + ARRAYSIZE(bcm4361_naming_table), bus, &is_murata_fem); + } + + if (bcmstrnstr(nv_path, PATH_MAX, "_murata", 7)) { + is_murata_fem = FALSE; + } + + if (info) { + if (is_murata_fem) { + strncat(nv_path, NVRAM_FEM_MURATA, strlen(NVRAM_FEM_MURATA)); + } + strncat(nv_path, info->nvram_ext, strlen(info->nvram_ext)); + strncat(fw_path, info->fw_ext, strlen(info->fw_ext)); + } else { + DHD_ERROR(("%s:failed to find extension for nvram and firmware\n", __FUNCTION__)); + ret = BCME_ERROR; + } +#else /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK */ + char chipver_tag[10] = {0, }; + + strcat(fw_path, chipver_tag); + strcat(nv_path, chipver_tag); +#endif /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK */ + + return ret; +} + +static int +concate_revision_bcm4375(dhd_bus_t *bus, char *fw_path, char *nv_path) +{ + int ret = BCME_OK; +#if defined(SUPPORT_BCM4375_MIXED_MODULES) && defined(USE_CID_CHECK) + char module_type[MAX_VNAME_LEN]; + naming_info_t *info = NULL; + bool is_murata_fem = FALSE; + + memset(module_type, 0, sizeof(module_type)); + + if (dhd_check_module_bcm(module_type, + MODULE_BCM4375_INDEX, &is_murata_fem) == BCME_OK) { + info = dhd_find_naming_info(bcm4375_naming_table, + ARRAYSIZE(bcm4375_naming_table), module_type); + } else { + /* in case of .cid.info doesn't exists */ + info = dhd_find_naming_info_by_chip_rev(bcm4375_naming_table, + ARRAYSIZE(bcm4375_naming_table), bus, &is_murata_fem); + } + + if (info) { + strncat(nv_path, info->nvram_ext, strlen(info->nvram_ext)); + strncat(fw_path, info->fw_ext, strlen(info->fw_ext)); + } else { + DHD_ERROR(("%s:failed to find extension for nvram and firmware\n", __FUNCTION__)); + ret = BCME_ERROR; + } +#else /* SUPPORT_BCM4375_MIXED_MODULES && USE_CID_CHECK */ + char chipver_tag[10] = {0, }; + + strcat(fw_path, chipver_tag); + strcat(nv_path, chipver_tag); +#endif /* SUPPORT_BCM4375_MIXED_MODULES && USE_CID_CHECK */ + + return ret; +} + +int +concate_revision(dhd_bus_t *bus, char *fw_path, char *nv_path) +{ + int res = 0; + + if (!bus || !bus->sih) { + DHD_ERROR(("%s:Bus is Invalid\n", __FUNCTION__)); + return -1; + } + + if (!fw_path || !nv_path) { + DHD_ERROR(("fw_path or nv_path is null.\n")); + return res; + } + + switch (si_chipid(bus->sih)) { + + case BCM43569_CHIP_ID: + case BCM4358_CHIP_ID: + res = concate_revision_bcm4358(bus, fw_path, nv_path); + break; + case BCM4355_CHIP_ID: + case BCM4359_CHIP_ID: + res = concate_revision_bcm4359(bus, fw_path, nv_path); + break; + case BCM4361_CHIP_ID: + case BCM4347_CHIP_ID: + res = concate_revision_bcm4361(bus, fw_path, nv_path); + break; + case BCM4375_CHIP_ID: + res = concate_revision_bcm4375(bus, fw_path, nv_path); + break; + default: + DHD_ERROR(("REVISION SPECIFIC feature is not required\n")); + return res; + } + + return res; +} +#endif /* SUPPORT_MULTIPLE_REVISION */ + +uint16 +dhd_get_chipid(dhd_pub_t *dhd) +{ + dhd_bus_t *bus = dhd->bus; + + if (bus && bus->sih) + return (uint16)si_chipid(bus->sih); + else + return 0; +} + +/** + * Loads firmware given by caller supplied path and nvram image into PCIe dongle. + * + * BCM_REQUEST_FW specific : + * Given the chip type, determines the to be used file paths within /lib/firmware/brcm/ containing + * firmware and nvm for that chip. If the download fails, retries download with a different nvm file + * + * BCMEMBEDIMAGE specific: + * If bus->fw_path is empty, or if the download of bus->fw_path failed, firmware contained in header + * file will be used instead. + * + * @return BCME_OK on success + */ +int +dhd_bus_download_firmware(struct dhd_bus *bus, osl_t *osh, + char *pfw_path, char *pnv_path, + char *pclm_path, char *pconf_path) +{ + int ret; + + bus->fw_path = pfw_path; + bus->nv_path = pnv_path; + bus->dhd->clm_path = pclm_path; + bus->dhd->conf_path = pconf_path; + +#if defined(SUPPORT_MULTIPLE_REVISION) + if (concate_revision(bus, bus->fw_path, bus->nv_path) != 0) { + DHD_ERROR(("%s: fail to concatnate revison \n", + __FUNCTION__)); + return BCME_BADARG; + } +#endif /* SUPPORT_MULTIPLE_REVISION */ + +#if defined(DHD_BLOB_EXISTENCE_CHECK) + dhd_set_blob_support(bus->dhd, bus->fw_path); +#endif /* DHD_BLOB_EXISTENCE_CHECK */ + + DHD_ERROR(("%s: firmware path=%s, nvram path=%s\n", + __FUNCTION__, bus->fw_path, bus->nv_path)); + dhdpcie_dump_resource(bus); + + ret = dhdpcie_download_firmware(bus, osh); + + return ret; +} + +void +dhd_set_path_params(struct dhd_bus *bus) +{ + /* External conf takes precedence if specified */ + dhd_conf_preinit(bus->dhd); + + if (bus->dhd->clm_path[0] == '\0') { + dhd_conf_set_path(bus->dhd, "clm.blob", bus->dhd->clm_path, bus->fw_path); + } + dhd_conf_set_clm_name_by_chip(bus->dhd, bus->dhd->clm_path); + if (bus->dhd->conf_path[0] == '\0') { + dhd_conf_set_path(bus->dhd, "config.txt", bus->dhd->conf_path, bus->nv_path); + } +#ifdef CONFIG_PATH_AUTO_SELECT + dhd_conf_set_conf_name_by_chip(bus->dhd, bus->dhd->conf_path); +#endif + + dhd_conf_read_config(bus->dhd, bus->dhd->conf_path); + + dhd_conf_set_fw_name_by_chip(bus->dhd, bus->fw_path); + dhd_conf_set_nv_name_by_chip(bus->dhd, bus->nv_path); + dhd_conf_set_clm_name_by_chip(bus->dhd, bus->dhd->clm_path); + + printf("Final fw_path=%s\n", bus->fw_path); + printf("Final nv_path=%s\n", bus->nv_path); + printf("Final clm_path=%s\n", bus->dhd->clm_path); + printf("Final conf_path=%s\n", bus->dhd->conf_path); + +} + +void +dhd_set_bus_params(struct dhd_bus *bus) +{ + if (bus->dhd->conf->dhd_poll >= 0) { + bus->poll = bus->dhd->conf->dhd_poll; + if (!bus->pollrate) + bus->pollrate = 1; + printf("%s: set polling mode %d\n", __FUNCTION__, bus->dhd->conf->dhd_poll); + } +} + +/** + * Loads firmware given by 'bus->fw_path' into PCIe dongle. + * + * BCM_REQUEST_FW specific : + * Given the chip type, determines the to be used file paths within /lib/firmware/brcm/ containing + * firmware and nvm for that chip. If the download fails, retries download with a different nvm file + * + * BCMEMBEDIMAGE specific: + * If bus->fw_path is empty, or if the download of bus->fw_path failed, firmware contained in header + * file will be used instead. + * + * @return BCME_OK on success + */ +static int +dhdpcie_download_firmware(struct dhd_bus *bus, osl_t *osh) +{ + int ret = 0; +#if defined(BCM_REQUEST_FW) + uint chipid = bus->sih->chip; + uint revid = bus->sih->chiprev; + char fw_path[64] = "/lib/firmware/brcm/bcm"; /* path to firmware image */ + char nv_path[64]; /* path to nvram vars file */ + bus->fw_path = fw_path; + bus->nv_path = nv_path; + switch (chipid) { + case BCM43570_CHIP_ID: + bcmstrncat(fw_path, "43570", 5); + switch (revid) { + case 0: + bcmstrncat(fw_path, "a0", 2); + break; + case 2: + bcmstrncat(fw_path, "a2", 2); + break; + default: + DHD_ERROR(("%s: revid is not found %x\n", __FUNCTION__, + revid)); + break; + } + break; + default: + DHD_ERROR(("%s: unsupported device %x\n", __FUNCTION__, + chipid)); + return 0; + } + /* load board specific nvram file */ + snprintf(bus->nv_path, sizeof(nv_path), "%s.nvm", fw_path); + /* load firmware */ + snprintf(bus->fw_path, sizeof(fw_path), "%s-firmware.bin", fw_path); +#endif /* BCM_REQUEST_FW */ + + DHD_OS_WAKE_LOCK(bus->dhd); + + dhd_set_path_params(bus); + dhd_set_bus_params(bus); + + ret = _dhdpcie_download_firmware(bus); + + DHD_OS_WAKE_UNLOCK(bus->dhd); + return ret; +} /* dhdpcie_download_firmware */ + +#define DHD_MEMORY_SET_PATTERN 0xAA + +/** + * Downloads a file containing firmware into dongle memory. In case of a .bea file, the DHD + * is updated with the event logging partitions within that file as well. + * + * @param pfw_path Path to .bin or .bea file + */ +static int +dhdpcie_download_code_file(struct dhd_bus *bus, char *pfw_path) +{ + int bcmerror = BCME_ERROR; + int offset = 0; +#if defined(DHD_FW_MEM_CORRUPTION) + uint8 *p_org_fw = NULL; + uint32 org_fw_size = 0; + uint32 fw_write_offset = 0; +#endif /* DHD_FW_MEM_CORRUPTION */ + int len = 0; + bool store_reset; + char *imgbuf = NULL; + uint8 *memblock = NULL, *memptr = NULL; + uint8 *memptr_tmp = NULL; // terence: check downloaded firmware is correct + int offset_end = bus->ramsize; + uint32 file_size = 0, read_len = 0; + + DHD_ERROR(("%s: download firmware %s\n", __FUNCTION__, pfw_path)); + + /* Should succeed in opening image if it is actually given through registry + * entry or in module param. + */ + imgbuf = dhd_os_open_image1(bus->dhd, pfw_path); + if (imgbuf == NULL) { + printf("%s: Open firmware file failed %s\n", __FUNCTION__, pfw_path); + goto err; + } + + file_size = dhd_os_get_image_size(imgbuf); + if (!file_size) { + DHD_ERROR(("%s: get file size fails ! \n", __FUNCTION__)); + goto err; + } + + memptr = memblock = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN); + if (memblock == NULL) { + DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK)); + bcmerror = BCME_NOMEM; + goto err; + } + if (dhd_msg_level & DHD_TRACE_VAL) { + memptr_tmp = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN); + if (memptr_tmp == NULL) { + DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK)); + goto err; + } + } + if ((uint32)(uintptr)memblock % DHD_SDALIGN) { + memptr += (DHD_SDALIGN - ((uint32)(uintptr)memblock % DHD_SDALIGN)); + } + +#if defined(DHD_FW_MEM_CORRUPTION) + if (dhd_bus_get_fw_mode(bus->dhd) == DHD_FLAG_MFG_MODE) { + org_fw_size = file_size; +#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP) + p_org_fw = (uint8*)DHD_OS_PREALLOC(bus->dhd, + DHD_PREALLOC_MEMDUMP_RAM, org_fw_size); +#else + p_org_fw = (uint8*)VMALLOC(bus->dhd->osh, org_fw_size); +#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */ + if (p_org_fw == NULL) { + DHD_ERROR(("%s: Failed to allocate memory %d bytes for download check\n", + __FUNCTION__, org_fw_size)); + bcmerror = BCME_NOMEM; + goto err; + } else { + memset(p_org_fw, 0, org_fw_size); + } + } +#endif /* DHD_FW_MEM_CORRUPTION */ + + /* check if CR4/CA7 */ + store_reset = (si_setcore(bus->sih, ARMCR4_CORE_ID, 0) || + si_setcore(bus->sih, ARMCA7_CORE_ID, 0)); + /* Download image with MEMBLOCK size */ + while ((len = dhd_os_get_image_block((char*)memptr, MEMBLOCK, imgbuf))) { + if (len < 0) { + DHD_ERROR(("%s: dhd_os_get_image_block failed (%d)\n", __FUNCTION__, len)); + bcmerror = BCME_ERROR; + goto err; + } + + read_len += len; + if (read_len > file_size) { + DHD_ERROR(("%s: WARNING! reading beyond EOF, len=%d; read_len=%u;" + " file_size=%u truncating len to %d \n", __FUNCTION__, + len, read_len, file_size, (len - (read_len - file_size)))); + len -= (read_len - file_size); + } + + /* if address is 0, store the reset instruction to be written in 0 */ + if (store_reset) { + ASSERT(offset == 0); + bus->resetinstr = *(((uint32*)memptr)); + /* Add start of RAM address to the address given by user */ + offset += bus->dongle_ram_base; + offset_end += offset; + store_reset = FALSE; + } + + bcmerror = dhdpcie_bus_membytes(bus, TRUE, offset, (uint8 *)memptr, len); + if (bcmerror) { + DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n", + __FUNCTION__, bcmerror, MEMBLOCK, offset)); + goto err; + } + + if (dhd_msg_level & DHD_TRACE_VAL) { + bcmerror = dhdpcie_bus_membytes(bus, FALSE, offset, memptr_tmp, len); + if (bcmerror) { + DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n", + __FUNCTION__, bcmerror, MEMBLOCK, offset)); + goto err; + } + if (memcmp(memptr_tmp, memptr, len)) { + DHD_ERROR(("%s: Downloaded image is corrupted.\n", __FUNCTION__)); + goto err; + } else + DHD_INFO(("%s: Download, Upload and compare succeeded.\n", __FUNCTION__)); + } + offset += MEMBLOCK; +#if defined(DHD_FW_MEM_CORRUPTION) + if (dhd_bus_get_fw_mode(bus->dhd) == DHD_FLAG_MFG_MODE) { + memcpy((p_org_fw + fw_write_offset), memptr, len); + fw_write_offset += len; + } +#endif /* DHD_FW_MEM_CORRUPTION */ + + if (offset >= offset_end) { + DHD_ERROR(("%s: invalid address access to %x (offset end: %x)\n", + __FUNCTION__, offset, offset_end)); + bcmerror = BCME_ERROR; + goto err; + } + + if (read_len >= file_size) { + break; + } + } +#ifdef DHD_FW_MEM_CORRUPTION + /* Read and compare the downloaded code */ + if (dhd_bus_get_fw_mode(bus->dhd) == DHD_FLAG_MFG_MODE) { + unsigned char *p_readback_buf = NULL; + uint32 compared_len; + uint32 remaining_len = 0; + + compared_len = 0; + p_readback_buf = MALLOC(bus->dhd->osh, MEMBLOCK); + if (p_readback_buf == NULL) { + DHD_ERROR(("%s: Failed to allocate memory %d bytes for readback buffer\n", + __FUNCTION__, MEMBLOCK)); + bcmerror = BCME_NOMEM; + goto compare_err; + } + /* Read image to verify downloaded contents. */ + offset = bus->dongle_ram_base; + + while (compared_len < org_fw_size) { + memset(p_readback_buf, DHD_MEMORY_SET_PATTERN, MEMBLOCK); + remaining_len = org_fw_size - compared_len; + + if (remaining_len >= MEMBLOCK) { + len = MEMBLOCK; + } else { + len = remaining_len; + } + bcmerror = dhdpcie_bus_membytes(bus, FALSE, offset, + (uint8 *)p_readback_buf, len); + if (bcmerror) { + DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n", + __FUNCTION__, bcmerror, MEMBLOCK, offset)); + goto compare_err; + } + + if (memcmp((p_org_fw + compared_len), p_readback_buf, len) != 0) { + DHD_ERROR(("%s: Downloaded image is corrupted. offset %d\n", + __FUNCTION__, compared_len)); + bcmerror = BCME_ERROR; + goto compare_err; + } + + compared_len += len; + offset += len; + } + DHD_ERROR(("%s: Download, Upload and compare succeeded.\n", __FUNCTION__)); + +compare_err: + if (p_readback_buf) { + MFREE(bus->dhd->osh, p_readback_buf, MEMBLOCK); + } + } +#endif /* DHD_FW_MEM_CORRUPTION */ + +err: +#if defined(DHD_FW_MEM_CORRUPTION) + if (p_org_fw) { +#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP) + DHD_OS_PREFREE(bus->dhd, p_org_fw, org_fw_size); +#else + VMFREE(bus->dhd->osh, p_org_fw, org_fw_size); +#endif // endif + } +#endif /* DHD_FW_MEM_CORRUPTION */ + if (memblock) { + MFREE(bus->dhd->osh, memblock, MEMBLOCK + DHD_SDALIGN); + if (dhd_msg_level & DHD_TRACE_VAL) { + if (memptr_tmp) + MFREE(bus->dhd->osh, memptr_tmp, MEMBLOCK + DHD_SDALIGN); + } + } + + if (imgbuf) { + dhd_os_close_image1(bus->dhd, imgbuf); + } + + return bcmerror; +} /* dhdpcie_download_code_file */ + +static int +dhdpcie_download_nvram(struct dhd_bus *bus) +{ + int bcmerror = BCME_ERROR; + uint len; + char * memblock = NULL; + char *bufp; + char *pnv_path; + bool nvram_file_exists; + bool nvram_uefi_exists = FALSE; + bool local_alloc = FALSE; + pnv_path = bus->nv_path; + + nvram_file_exists = ((pnv_path != NULL) && (pnv_path[0] != '\0')); + + /* First try UEFI */ + len = MAX_NVRAMBUF_SIZE; + dhd_get_download_buffer(bus->dhd, NULL, NVRAM, &memblock, (int *)&len); + + /* If UEFI empty, then read from file system */ + if ((len <= 0) || (memblock == NULL)) { + + if (nvram_file_exists) { + len = MAX_NVRAMBUF_SIZE; + dhd_get_download_buffer(bus->dhd, pnv_path, NVRAM, &memblock, (int *)&len); + if ((len <= 0 || len > MAX_NVRAMBUF_SIZE)) { + goto err; + } + } + else { + /* For SROM OTP no external file or UEFI required */ + bcmerror = BCME_OK; + } + } else { + nvram_uefi_exists = TRUE; + } + + DHD_ERROR(("%s: dhd_get_download_buffer len %d\n", __FUNCTION__, len)); + + if (len > 0 && len <= MAX_NVRAMBUF_SIZE && memblock != NULL) { + bufp = (char *) memblock; + + { + bufp[len] = 0; + if (nvram_uefi_exists || nvram_file_exists) { + len = process_nvram_vars(bufp, len); + } + } + + DHD_ERROR(("%s: process_nvram_vars len %d\n", __FUNCTION__, len)); + + if (len % 4) { + len += 4 - (len % 4); + } + bufp += len; + *bufp++ = 0; + if (len) + bcmerror = dhdpcie_downloadvars(bus, memblock, len + 1); + if (bcmerror) { + DHD_ERROR(("%s: error downloading vars: %d\n", + __FUNCTION__, bcmerror)); + } + } + +err: + if (memblock) { + if (local_alloc) { + MFREE(bus->dhd->osh, memblock, MAX_NVRAMBUF_SIZE); + } else { + dhd_free_download_buffer(bus->dhd, memblock, MAX_NVRAMBUF_SIZE); + } + } + + return bcmerror; +} + +static int +dhdpcie_ramsize_read_image(struct dhd_bus *bus, char *buf, int len) +{ + int bcmerror = BCME_ERROR; + char *imgbuf = NULL; + + if (buf == NULL || len == 0) + goto err; + + /* External image takes precedence if specified */ + if ((bus->fw_path != NULL) && (bus->fw_path[0] != '\0')) { + // opens and seeks to correct file offset: + imgbuf = dhd_os_open_image1(bus->dhd, bus->fw_path); + if (imgbuf == NULL) { + DHD_ERROR(("%s: Failed to open firmware file\n", __FUNCTION__)); + goto err; + } + + /* Read it */ + if (len != dhd_os_get_image_block(buf, len, imgbuf)) { + DHD_ERROR(("%s: Failed to read %d bytes data\n", __FUNCTION__, len)); + goto err; + } + + bcmerror = BCME_OK; + } + +err: + if (imgbuf) + dhd_os_close_image1(bus->dhd, imgbuf); + + return bcmerror; +} + +/* The ramsize can be changed in the dongle image, for example 4365 chip share the sysmem + * with BMC and we can adjust how many sysmem belong to CA7 during dongle compilation. + * So in DHD we need to detect this case and update the correct dongle RAMSIZE as well. + */ +static void +dhdpcie_ramsize_adj(struct dhd_bus *bus) +{ + int i, search_len = 0; + uint8 *memptr = NULL; + uint8 *ramsizeptr = NULL; + uint ramsizelen; + uint32 ramsize_ptr_ptr[] = {RAMSIZE_PTR_PTR_LIST}; + hnd_ramsize_ptr_t ramsize_info; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + /* Adjust dongle RAMSIZE already called. */ + if (bus->ramsize_adjusted) { + return; + } + + /* success or failure, we don't want to be here + * more than once. + */ + bus->ramsize_adjusted = TRUE; + + /* Not handle if user restrict dongle ram size enabled */ + if (dhd_dongle_memsize) { + DHD_ERROR(("%s: user restrict dongle ram size to %d.\n", __FUNCTION__, + dhd_dongle_memsize)); + return; + } + + /* Out immediately if no image to download */ + if ((bus->fw_path == NULL) || (bus->fw_path[0] == '\0')) { + DHD_ERROR(("%s: no fimrware file\n", __FUNCTION__)); + return; + } + + /* Get maximum RAMSIZE info search length */ + for (i = 0; ; i++) { + if (ramsize_ptr_ptr[i] == RAMSIZE_PTR_PTR_END) + break; + + if (search_len < (int)ramsize_ptr_ptr[i]) + search_len = (int)ramsize_ptr_ptr[i]; + } + + if (!search_len) + return; + + search_len += sizeof(hnd_ramsize_ptr_t); + + memptr = MALLOC(bus->dhd->osh, search_len); + if (memptr == NULL) { + DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, search_len)); + return; + } + + /* External image takes precedence if specified */ + if (dhdpcie_ramsize_read_image(bus, (char *)memptr, search_len) != BCME_OK) { + goto err; + } + else { + ramsizeptr = memptr; + ramsizelen = search_len; + } + + if (ramsizeptr) { + /* Check Magic */ + for (i = 0; ; i++) { + if (ramsize_ptr_ptr[i] == RAMSIZE_PTR_PTR_END) + break; + + if (ramsize_ptr_ptr[i] + sizeof(hnd_ramsize_ptr_t) > ramsizelen) + continue; + + memcpy((char *)&ramsize_info, ramsizeptr + ramsize_ptr_ptr[i], + sizeof(hnd_ramsize_ptr_t)); + + if (ramsize_info.magic == HTOL32(HND_RAMSIZE_PTR_MAGIC)) { + bus->orig_ramsize = LTOH32(ramsize_info.ram_size); + bus->ramsize = LTOH32(ramsize_info.ram_size); + DHD_ERROR(("%s: Adjust dongle RAMSIZE to 0x%x\n", __FUNCTION__, + bus->ramsize)); + break; + } + } + } + +err: + if (memptr) + MFREE(bus->dhd->osh, memptr, search_len); + + return; +} /* dhdpcie_ramsize_adj */ + +/** + * Downloads firmware file given by 'bus->fw_path' into PCIe dongle + * + * BCMEMBEDIMAGE specific: + * If bus->fw_path is empty, or if the download of bus->fw_path failed, firmware contained in header + * file will be used instead. + * + */ +static int +_dhdpcie_download_firmware(struct dhd_bus *bus) +{ + int bcmerror = -1; + + bool embed = FALSE; /* download embedded firmware */ + bool dlok = FALSE; /* download firmware succeeded */ + + /* Out immediately if no image to download */ + if ((bus->fw_path == NULL) || (bus->fw_path[0] == '\0')) { + DHD_ERROR(("%s: no fimrware file\n", __FUNCTION__)); + return 0; + } + /* Adjust ram size */ + dhdpcie_ramsize_adj(bus); + + /* Keep arm in reset */ + if (dhdpcie_bus_download_state(bus, TRUE)) { + DHD_ERROR(("%s: error placing ARM core in reset\n", __FUNCTION__)); + goto err; + } + + /* External image takes precedence if specified */ + if ((bus->fw_path != NULL) && (bus->fw_path[0] != '\0')) { + if (dhdpcie_download_code_file(bus, bus->fw_path)) { + DHD_ERROR(("%s:%d dongle image file download failed\n", __FUNCTION__, + __LINE__)); + goto err; + } else { + embed = FALSE; + dlok = TRUE; + } + } + + BCM_REFERENCE(embed); + if (!dlok) { + DHD_ERROR(("%s:%d dongle image download failed\n", __FUNCTION__, __LINE__)); + goto err; + } + + /* EXAMPLE: nvram_array */ + /* If a valid nvram_arry is specified as above, it can be passed down to dongle */ + /* dhd_bus_set_nvram_params(bus, (char *)&nvram_array); */ + + /* External nvram takes precedence if specified */ + if (dhdpcie_download_nvram(bus)) { + DHD_ERROR(("%s:%d dongle nvram file download failed\n", __FUNCTION__, __LINE__)); + goto err; + } + + /* Take arm out of reset */ + if (dhdpcie_bus_download_state(bus, FALSE)) { + DHD_ERROR(("%s: error getting out of ARM core reset\n", __FUNCTION__)); + goto err; + } + + bcmerror = 0; + +err: + return bcmerror; +} /* _dhdpcie_download_firmware */ + +static int +dhdpcie_bus_readconsole(dhd_bus_t *bus) +{ + dhd_console_t *c = &bus->console; + uint8 line[CONSOLE_LINE_MAX], ch; + uint32 n, idx, addr; + int rv; + uint readlen = 0; + uint i = 0; + + /* Don't do anything until FWREADY updates console address */ + if (bus->console_addr == 0) + return -1; + + /* Read console log struct */ + addr = bus->console_addr + OFFSETOF(hnd_cons_t, log); + + if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, (uint8 *)&c->log, sizeof(c->log))) < 0) + return rv; + + /* Allocate console buffer (one time only) */ + if (c->buf == NULL) { + c->bufsize = ltoh32(c->log.buf_size); + if ((c->buf = MALLOC(bus->dhd->osh, c->bufsize)) == NULL) + return BCME_NOMEM; + DHD_INFO(("conlog: bufsize=0x%x\n", c->bufsize)); + } + idx = ltoh32(c->log.idx); + + /* Protect against corrupt value */ + if (idx > c->bufsize) + return BCME_ERROR; + + /* Skip reading the console buffer if the index pointer has not moved */ + if (idx == c->last) + return BCME_OK; + + DHD_INFO(("conlog: addr=0x%x, idx=0x%x, last=0x%x \n", c->log.buf, + idx, c->last)); + + /* Read the console buffer data to a local buffer */ + /* optimize and read only the portion of the buffer needed, but + * important to handle wrap-around. + */ + addr = ltoh32(c->log.buf); + + /* wrap around case - write ptr < read ptr */ + if (idx < c->last) { + /* from read ptr to end of buffer */ + readlen = c->bufsize - c->last; + if ((rv = dhdpcie_bus_membytes(bus, FALSE, + addr + c->last, c->buf, readlen)) < 0) { + DHD_ERROR(("conlog: read error[1] ! \n")); + return rv; + } + /* from beginning of buffer to write ptr */ + if ((rv = dhdpcie_bus_membytes(bus, FALSE, + addr, c->buf + readlen, + idx)) < 0) { + DHD_ERROR(("conlog: read error[2] ! \n")); + return rv; + } + readlen += idx; + } else { + /* non-wraparound case, write ptr > read ptr */ + readlen = (uint)idx - c->last; + if ((rv = dhdpcie_bus_membytes(bus, FALSE, + addr + c->last, c->buf, readlen)) < 0) { + DHD_ERROR(("conlog: read error[3] ! \n")); + return rv; + } + } + /* update read ptr */ + c->last = idx; + + /* now output the read data from the local buffer to the host console */ + while (i < readlen) { + for (n = 0; n < CONSOLE_LINE_MAX - 2 && i < readlen; n++) { + ch = c->buf[i]; + ++i; + if (ch == '\n') + break; + line[n] = ch; + } + + if (n > 0) { + if (line[n - 1] == '\r') + n--; + line[n] = 0; + DHD_FWLOG(("CONSOLE: %s\n", line)); + } + } + + return BCME_OK; + +} /* dhdpcie_bus_readconsole */ + +void +dhd_bus_dump_console_buffer(dhd_bus_t *bus) +{ + uint32 n, i; + uint32 addr; + char *console_buffer = NULL; + uint32 console_ptr, console_size, console_index; + uint8 line[CONSOLE_LINE_MAX], ch; + int rv; + + DHD_ERROR(("%s: Dump Complete Console Buffer\n", __FUNCTION__)); + + if (bus->is_linkdown) { + DHD_ERROR(("%s: Skip dump Console Buffer due to PCIe link down\n", __FUNCTION__)); + return; + } + + addr = bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log); + if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, + (uint8 *)&console_ptr, sizeof(console_ptr))) < 0) { + goto exit; + } + + addr = bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log.buf_size); + if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, + (uint8 *)&console_size, sizeof(console_size))) < 0) { + goto exit; + } + + addr = bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log.idx); + if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, + (uint8 *)&console_index, sizeof(console_index))) < 0) { + goto exit; + } + + console_ptr = ltoh32(console_ptr); + console_size = ltoh32(console_size); + console_index = ltoh32(console_index); + + if (console_size > CONSOLE_BUFFER_MAX || + !(console_buffer = MALLOC(bus->dhd->osh, console_size))) { + goto exit; + } + + if ((rv = dhdpcie_bus_membytes(bus, FALSE, console_ptr, + (uint8 *)console_buffer, console_size)) < 0) { + goto exit; + } + + for (i = 0, n = 0; i < console_size; i += n + 1) { + for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) { + ch = console_buffer[(console_index + i + n) % console_size]; + if (ch == '\n') + break; + line[n] = ch; + } + + if (n > 0) { + if (line[n - 1] == '\r') + n--; + line[n] = 0; + /* Don't use DHD_ERROR macro since we print + * a lot of information quickly. The macro + * will truncate a lot of the printfs + */ + + DHD_FWLOG(("CONSOLE: %s\n", line)); + } + } + +exit: + if (console_buffer) + MFREE(bus->dhd->osh, console_buffer, console_size); + return; +} + +/** + * Opens the file given by bus->fw_path, reads part of the file into a buffer and closes the file. + * + * @return BCME_OK on success + */ +static int +dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size) +{ + int bcmerror = 0; + uint msize = 512; + char *mbuffer = NULL; + uint maxstrlen = 256; + char *str = NULL; + pciedev_shared_t *local_pciedev_shared = bus->pcie_sh; + struct bcmstrbuf strbuf; + unsigned long flags; + bool dongle_trap_occured = FALSE; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (DHD_NOCHECKDIED_ON()) { + return 0; + } + + if (data == NULL) { + /* + * Called after a rx ctrl timeout. "data" is NULL. + * allocate memory to trace the trap or assert. + */ + size = msize; + mbuffer = data = MALLOC(bus->dhd->osh, msize); + + if (mbuffer == NULL) { + DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, msize)); + bcmerror = BCME_NOMEM; + goto done; + } + } + + if ((str = MALLOC(bus->dhd->osh, maxstrlen)) == NULL) { + DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, maxstrlen)); + bcmerror = BCME_NOMEM; + goto done; + } + DHD_GENERAL_LOCK(bus->dhd, flags); + DHD_BUS_BUSY_SET_IN_CHECKDIED(bus->dhd); + DHD_GENERAL_UNLOCK(bus->dhd, flags); + + if (MULTIBP_ENAB(bus->sih)) { + dhd_bus_pcie_pwr_req(bus); + } + if ((bcmerror = dhdpcie_readshared(bus)) < 0) { + goto done; + } + + bcm_binit(&strbuf, data, size); + + bcm_bprintf(&strbuf, "msgtrace address : 0x%08X\nconsole address : 0x%08X\n", + local_pciedev_shared->msgtrace_addr, local_pciedev_shared->console_addr); + + if ((local_pciedev_shared->flags & PCIE_SHARED_ASSERT_BUILT) == 0) { + /* NOTE: Misspelled assert is intentional - DO NOT FIX. + * (Avoids conflict with real asserts for programmatic parsing of output.) + */ + bcm_bprintf(&strbuf, "Assrt not built in dongle\n"); + } + + if ((bus->pcie_sh->flags & (PCIE_SHARED_ASSERT|PCIE_SHARED_TRAP)) == 0) { + /* NOTE: Misspelled assert is intentional - DO NOT FIX. + * (Avoids conflict with real asserts for programmatic parsing of output.) + */ + bcm_bprintf(&strbuf, "No trap%s in dongle", + (bus->pcie_sh->flags & PCIE_SHARED_ASSERT_BUILT) + ?"/assrt" :""); + } else { + if (bus->pcie_sh->flags & PCIE_SHARED_ASSERT) { + /* Download assert */ + bcm_bprintf(&strbuf, "Dongle assert"); + if (bus->pcie_sh->assert_exp_addr != 0) { + str[0] = '\0'; + if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE, + bus->pcie_sh->assert_exp_addr, + (uint8 *)str, maxstrlen)) < 0) { + goto done; + } + + str[maxstrlen - 1] = '\0'; + bcm_bprintf(&strbuf, " expr \"%s\"", str); + } + + if (bus->pcie_sh->assert_file_addr != 0) { + str[0] = '\0'; + if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE, + bus->pcie_sh->assert_file_addr, + (uint8 *)str, maxstrlen)) < 0) { + goto done; + } + + str[maxstrlen - 1] = '\0'; + bcm_bprintf(&strbuf, " file \"%s\"", str); + } + + bcm_bprintf(&strbuf, " line %d ", bus->pcie_sh->assert_line); + } + + if (bus->pcie_sh->flags & PCIE_SHARED_TRAP) { + trap_t *tr = &bus->dhd->last_trap_info; + dongle_trap_occured = TRUE; + if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE, + bus->pcie_sh->trap_addr, (uint8*)tr, sizeof(trap_t))) < 0) { + bus->dhd->dongle_trap_occured = TRUE; + goto done; + } + dhd_bus_dump_trap_info(bus, &strbuf); + } + } + + if (bus->pcie_sh->flags & (PCIE_SHARED_ASSERT | PCIE_SHARED_TRAP)) { + DHD_FWLOG(("%s: %s\n", __FUNCTION__, strbuf.origbuf)); + + /* wake up IOCTL wait event */ + dhd_wakeup_ioctl_event(bus->dhd, IOCTL_RETURN_ON_TRAP); + + dhd_bus_dump_console_buffer(bus); + dhd_prot_debug_info_print(bus->dhd); + +#if defined(DHD_FW_COREDUMP) + /* save core dump or write to a file */ + if (bus->dhd->memdump_enabled) { +#ifdef DHD_SSSR_DUMP + if (bus->dhd->sssr_inited) { + dhdpcie_sssr_dump(bus->dhd); + } +#endif /* DHD_SSSR_DUMP */ + bus->dhd->memdump_type = DUMP_TYPE_DONGLE_TRAP; + dhdpcie_mem_dump(bus); + } +#endif /* DHD_FW_COREDUMP */ + + /* set the trap occured flag only after all the memdump, + * logdump and sssr dump collection has been scheduled + */ + if (dongle_trap_occured) { + bus->dhd->dongle_trap_occured = TRUE; + } + + dhd_schedule_reset(bus->dhd); + + } + + DHD_GENERAL_LOCK(bus->dhd, flags); + DHD_BUS_BUSY_CLEAR_IN_CHECKDIED(bus->dhd); + dhd_os_busbusy_wake(bus->dhd); + DHD_GENERAL_UNLOCK(bus->dhd, flags); + +done: + if (MULTIBP_ENAB(bus->sih)) { + dhd_bus_pcie_pwr_req_clear(bus); + } + if (mbuffer) + MFREE(bus->dhd->osh, mbuffer, msize); + if (str) + MFREE(bus->dhd->osh, str, maxstrlen); + + return bcmerror; +} /* dhdpcie_checkdied */ + +/* Custom copy of dhdpcie_mem_dump() that can be called at interrupt level */ +void dhdpcie_mem_dump_bugcheck(dhd_bus_t *bus, uint8 *buf) +{ + int ret = 0; + int size; /* Full mem size */ + int start; /* Start address */ + int read_size = 0; /* Read size of each iteration */ + uint8 *databuf = buf; + + if (bus == NULL) { + return; + } + + start = bus->dongle_ram_base; + read_size = 4; + /* check for dead bus */ + { + uint test_word = 0; + ret = dhdpcie_bus_membytes(bus, FALSE, start, (uint8*)&test_word, read_size); + /* if read error or bus timeout */ + if (ret || (test_word == 0xFFFFFFFF)) { + return; + } + } + + /* Get full mem size */ + size = bus->ramsize; + /* Read mem content */ + while (size) + { + read_size = MIN(MEMBLOCK, size); + if ((ret = dhdpcie_bus_membytes(bus, FALSE, start, databuf, read_size))) { + return; + } + + /* Decrement size and increment start address */ + size -= read_size; + start += read_size; + databuf += read_size; + } + bus->dhd->soc_ram = buf; + bus->dhd->soc_ram_length = bus->ramsize; + return; +} + +#if defined(DHD_FW_COREDUMP) +static int +dhdpcie_mem_dump(dhd_bus_t *bus) +{ + int ret = 0; + int size; /* Full mem size */ + int start = bus->dongle_ram_base; /* Start address */ + int read_size = 0; /* Read size of each iteration */ + uint8 *buf = NULL, *databuf = NULL; + +#ifdef EXYNOS_PCIE_DEBUG + exynos_pcie_register_dump(1); +#endif /* EXYNOS_PCIE_DEBUG */ + +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM + if (pm_runtime_get_sync(dhd_bus_to_dev(bus)) < 0) + return BCME_ERROR; +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + + /* Get full mem size */ + size = bus->ramsize; + buf = dhd_get_fwdump_buf(bus->dhd, size); + if (!buf) { + DHD_ERROR(("%s: Out of memory (%d bytes)\n", __FUNCTION__, size)); + return BCME_ERROR; + } + + /* Read mem content */ + DHD_TRACE_HW4(("Dump dongle memory\n")); + databuf = buf; + while (size) + { + read_size = MIN(MEMBLOCK, size); + if ((ret = dhdpcie_bus_membytes(bus, FALSE, start, databuf, read_size))) + { + DHD_ERROR(("%s: Error membytes %d\n", __FUNCTION__, ret)); +#ifdef DHD_DEBUG_UART + bus->dhd->memdump_success = FALSE; +#endif /* DHD_DEBUG_UART */ + return BCME_ERROR; + } + DHD_TRACE((".")); + + /* Decrement size and increment start address */ + size -= read_size; + start += read_size; + databuf += read_size; + } +#ifdef DHD_DEBUG_UART + bus->dhd->memdump_success = TRUE; +#endif /* DHD_DEBUG_UART */ + + dhd_schedule_memdump(bus->dhd, buf, bus->ramsize); + /* buf, actually soc_ram free handled in dhd_{free,clear} */ + +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM + pm_runtime_mark_last_busy(dhd_bus_to_dev(bus)); + pm_runtime_put_autosuspend(dhd_bus_to_dev(bus)); +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + + return ret; +} + +int +dhd_bus_mem_dump(dhd_pub_t *dhdp) +{ + dhd_bus_t *bus = dhdp->bus; + int ret = BCME_ERROR; + + if (dhdp->busstate == DHD_BUS_DOWN) { + DHD_ERROR(("%s bus is down\n", __FUNCTION__)); + return BCME_ERROR; + } + + if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhdp)) { + DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state, so skip\n", + __FUNCTION__, dhdp->busstate, dhdp->dhd_bus_busy_state)); + return BCME_ERROR; + } + + DHD_OS_WAKE_LOCK(dhdp); + ret = dhdpcie_mem_dump(bus); + DHD_OS_WAKE_UNLOCK(dhdp); + return ret; +} + +int +dhd_dongle_mem_dump(void) +{ + if (!g_dhd_bus) { + DHD_ERROR(("%s: Bus is NULL\n", __FUNCTION__)); + return -ENODEV; + } + + dhd_bus_dump_console_buffer(g_dhd_bus); + dhd_prot_debug_info_print(g_dhd_bus->dhd); + + g_dhd_bus->dhd->memdump_enabled = DUMP_MEMFILE_BUGON; + g_dhd_bus->dhd->memdump_type = DUMP_TYPE_AP_ABNORMAL_ACCESS; + + dhd_bus_mem_dump(g_dhd_bus->dhd); + return 0; +} +EXPORT_SYMBOL(dhd_dongle_mem_dump); +#endif /* DHD_FW_COREDUMP */ + +int +dhd_socram_dump(dhd_bus_t *bus) +{ +#if defined(DHD_FW_COREDUMP) + DHD_OS_WAKE_LOCK(bus->dhd); + dhd_bus_mem_dump(bus->dhd); + DHD_OS_WAKE_UNLOCK(bus->dhd); + return 0; +#else + return -1; +#endif // endif +} + +/** + * Transfers bytes from host to dongle using pio mode. + * Parameter 'address' is a backplane address. + */ +static int +dhdpcie_bus_membytes(dhd_bus_t *bus, bool write, ulong address, uint8 *data, uint size) +{ + uint dsize; + int detect_endian_flag = 0x01; + bool little_endian; + + if (write && bus->is_linkdown) { + DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__)); + return BCME_ERROR; + } + + if (MULTIBP_ENAB(bus->sih)) { + dhd_bus_pcie_pwr_req(bus); + } + /* Detect endianness. */ + little_endian = *(char *)&detect_endian_flag; + + /* In remap mode, adjust address beyond socram and redirect + * to devram at SOCDEVRAM_BP_ADDR since remap address > orig_ramsize + * is not backplane accessible + */ + + /* Determine initial transfer parameters */ +#ifdef DHD_SUPPORT_64BIT + dsize = sizeof(uint64); +#else /* !DHD_SUPPORT_64BIT */ + dsize = sizeof(uint32); +#endif /* DHD_SUPPORT_64BIT */ + + /* Do the transfer(s) */ + DHD_INFO(("%s: %s %d bytes in window 0x%08lx\n", + __FUNCTION__, (write ? "write" : "read"), size, address)); + if (write) { + while (size) { +#ifdef DHD_SUPPORT_64BIT + if (size >= sizeof(uint64) && little_endian && !(address % 8)) { + dhdpcie_bus_wtcm64(bus, address, *((uint64 *)data)); + } +#else /* !DHD_SUPPORT_64BIT */ + if (size >= sizeof(uint32) && little_endian && !(address % 4)) { + dhdpcie_bus_wtcm32(bus, address, *((uint32*)data)); + } +#endif /* DHD_SUPPORT_64BIT */ + else { + dsize = sizeof(uint8); + dhdpcie_bus_wtcm8(bus, address, *data); + } + + /* Adjust for next transfer (if any) */ + if ((size -= dsize)) { + data += dsize; + address += dsize; + } + } + } else { + while (size) { +#ifdef DHD_SUPPORT_64BIT + if (size >= sizeof(uint64) && little_endian && !(address % 8)) + { + *(uint64 *)data = dhdpcie_bus_rtcm64(bus, address); + } +#else /* !DHD_SUPPORT_64BIT */ + if (size >= sizeof(uint32) && little_endian && !(address % 4)) + { + *(uint32 *)data = dhdpcie_bus_rtcm32(bus, address); + } +#endif /* DHD_SUPPORT_64BIT */ + else { + dsize = sizeof(uint8); + *data = dhdpcie_bus_rtcm8(bus, address); + } + + /* Adjust for next transfer (if any) */ + if ((size -= dsize) > 0) { + data += dsize; + address += dsize; + } + } + } + if (MULTIBP_ENAB(bus->sih)) { + dhd_bus_pcie_pwr_req_clear(bus); + } + return BCME_OK; +} /* dhdpcie_bus_membytes */ + +/** + * Transfers one transmit (ethernet) packet that was queued in the (flow controlled) flow ring queue + * to the (non flow controlled) flow ring. + */ +int BCMFASTPATH +dhd_bus_schedule_queue(struct dhd_bus *bus, uint16 flow_id, bool txs) +{ + flow_ring_node_t *flow_ring_node; + int ret = BCME_OK; +#ifdef DHD_LOSSLESS_ROAMING + dhd_pub_t *dhdp = bus->dhd; +#endif // endif + DHD_INFO(("%s: flow_id is %d\n", __FUNCTION__, flow_id)); + + /* ASSERT on flow_id */ + if (flow_id >= bus->max_submission_rings) { + DHD_ERROR(("%s: flow_id is invalid %d, max %d\n", __FUNCTION__, + flow_id, bus->max_submission_rings)); + return 0; + } + + flow_ring_node = DHD_FLOW_RING(bus->dhd, flow_id); + + if (flow_ring_node->prot_info == NULL) { + DHD_ERROR((" %s : invalid flow_ring_node \n", __FUNCTION__)); + return BCME_NOTREADY; + } + +#ifdef DHD_LOSSLESS_ROAMING + if ((dhdp->dequeue_prec_map & (1 << flow_ring_node->flow_info.tid)) == 0) { + DHD_INFO(("%s: tid %d is not in precedence map. block scheduling\n", + __FUNCTION__, flow_ring_node->flow_info.tid)); + return BCME_OK; + } +#endif /* DHD_LOSSLESS_ROAMING */ + + { + unsigned long flags; + void *txp = NULL; + flow_queue_t *queue; +#ifdef DHD_LOSSLESS_ROAMING + struct ether_header *eh; + uint8 *pktdata; +#endif /* DHD_LOSSLESS_ROAMING */ + + queue = &flow_ring_node->queue; /* queue associated with flow ring */ + + DHD_FLOWRING_LOCK(flow_ring_node->lock, flags); + + if (flow_ring_node->status != FLOW_RING_STATUS_OPEN) { + DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); + return BCME_NOTREADY; + } + + while ((txp = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) { + PKTORPHAN(txp, bus->dhd->conf->tsq); + + /* + * Modifying the packet length caused P2P cert failures. + * Specifically on test cases where a packet of size 52 bytes + * was injected, the sniffer capture showed 62 bytes because of + * which the cert tests failed. So making the below change + * only Router specific. + */ + +#ifdef DHDTCPACK_SUPPRESS + if (bus->dhd->tcpack_sup_mode != TCPACK_SUP_HOLD) { + ret = dhd_tcpack_check_xmit(bus->dhd, txp); + if (ret != BCME_OK) { + DHD_ERROR(("%s: dhd_tcpack_check_xmit() error.\n", + __FUNCTION__)); + } + } +#endif /* DHDTCPACK_SUPPRESS */ +#ifdef DHD_LOSSLESS_ROAMING + pktdata = (uint8 *)PKTDATA(OSH_NULL, txp); + eh = (struct ether_header *) pktdata; + if (eh->ether_type == hton16(ETHER_TYPE_802_1X)) { + uint8 prio = (uint8)PKTPRIO(txp); + /* Restore to original priority for 802.1X packet */ + if (prio == PRIO_8021D_NC) { + PKTSETPRIO(txp, dhdp->prio_8021x); + } + } +#endif /* DHD_LOSSLESS_ROAMING */ + /* Attempt to transfer packet over flow ring */ + ret = dhd_prot_txdata(bus->dhd, txp, flow_ring_node->flow_info.ifindex); + if (ret != BCME_OK) { /* may not have resources in flow ring */ + DHD_INFO(("%s: Reinserrt %d\n", __FUNCTION__, ret)); + dhd_prot_txdata_write_flush(bus->dhd, flow_id); + /* reinsert at head */ + dhd_flow_queue_reinsert(bus->dhd, queue, txp); + DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); + + /* If we are able to requeue back, return success */ + return BCME_OK; + } + } + + dhd_prot_txdata_write_flush(bus->dhd, flow_id); + + DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); + } + + return ret; +} /* dhd_bus_schedule_queue */ + +/** Sends an (ethernet) data frame (in 'txp') to the dongle. Callee disposes of txp. */ +int BCMFASTPATH +dhd_bus_txdata(struct dhd_bus *bus, void *txp, uint8 ifidx) +{ + uint16 flowid; +#ifdef IDLE_TX_FLOW_MGMT + uint8 node_status; +#endif /* IDLE_TX_FLOW_MGMT */ + flow_queue_t *queue; + flow_ring_node_t *flow_ring_node; + unsigned long flags; + int ret = BCME_OK; + void *txp_pend = NULL; + + if (!bus->dhd->flowid_allocator) { + DHD_ERROR(("%s: Flow ring not intited yet \n", __FUNCTION__)); + goto toss; + } + + flowid = DHD_PKT_GET_FLOWID(txp); + + flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid); + + DHD_TRACE(("%s: pkt flowid %d, status %d active %d\n", + __FUNCTION__, flowid, flow_ring_node->status, flow_ring_node->active)); + + DHD_FLOWRING_LOCK(flow_ring_node->lock, flags); + if ((flowid >= bus->dhd->num_flow_rings) || +#ifdef IDLE_TX_FLOW_MGMT + (!flow_ring_node->active)) +#else + (!flow_ring_node->active) || + (flow_ring_node->status == FLOW_RING_STATUS_DELETE_PENDING) || + (flow_ring_node->status == FLOW_RING_STATUS_STA_FREEING)) +#endif /* IDLE_TX_FLOW_MGMT */ + { + DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); + DHD_INFO(("%s: Dropping pkt flowid %d, status %d active %d\n", + __FUNCTION__, flowid, flow_ring_node->status, + flow_ring_node->active)); + ret = BCME_ERROR; + goto toss; + } + +#ifdef IDLE_TX_FLOW_MGMT + node_status = flow_ring_node->status; + + /* handle diffrent status states here!! */ + switch (node_status) + { + case FLOW_RING_STATUS_OPEN: + + if (bus->enable_idle_flowring_mgmt) { + /* Move the node to the head of active list */ + dhd_flow_ring_move_to_active_list_head(bus, flow_ring_node); + } + break; + + case FLOW_RING_STATUS_SUSPENDED: + DHD_INFO(("Need to Initiate TX Flow resume\n")); + /* Issue resume_ring request */ + dhd_bus_flow_ring_resume_request(bus, + flow_ring_node); + break; + + case FLOW_RING_STATUS_CREATE_PENDING: + case FLOW_RING_STATUS_RESUME_PENDING: + /* Dont do anything here!! */ + DHD_INFO(("Waiting for Flow create/resume! status is %u\n", + node_status)); + break; + + case FLOW_RING_STATUS_DELETE_PENDING: + default: + DHD_ERROR(("Dropping packet!! flowid %u status is %u\n", + flowid, node_status)); + /* error here!! */ + ret = BCME_ERROR; + DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); + goto toss; + } + /* Now queue the packet */ +#endif /* IDLE_TX_FLOW_MGMT */ + + queue = &flow_ring_node->queue; /* queue associated with flow ring */ + + if ((ret = dhd_flow_queue_enqueue(bus->dhd, queue, txp)) != BCME_OK) + txp_pend = txp; + + DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); + + if (flow_ring_node->status) { + DHD_INFO(("%s: Enq pkt flowid %d, status %d active %d\n", + __FUNCTION__, flowid, flow_ring_node->status, + flow_ring_node->active)); + if (txp_pend) { + txp = txp_pend; + goto toss; + } + return BCME_OK; + } + ret = dhd_bus_schedule_queue(bus, flowid, FALSE); /* from queue to flowring */ + + /* If we have anything pending, try to push into q */ + if (txp_pend) { + DHD_FLOWRING_LOCK(flow_ring_node->lock, flags); + + if ((ret = dhd_flow_queue_enqueue(bus->dhd, queue, txp_pend)) != BCME_OK) { + DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); + txp = txp_pend; + goto toss; + } + + DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); + } + + return ret; + +toss: + DHD_INFO(("%s: Toss %d\n", __FUNCTION__, ret)); + PKTCFREE(bus->dhd->osh, txp, TRUE); + return ret; +} /* dhd_bus_txdata */ + +void +dhd_bus_stop_queue(struct dhd_bus *bus) +{ + dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, ON); +} + +void +dhd_bus_start_queue(struct dhd_bus *bus) +{ + dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, OFF); +} + +/* Device console input function */ +int dhd_bus_console_in(dhd_pub_t *dhd, uchar *msg, uint msglen) +{ + dhd_bus_t *bus = dhd->bus; + uint32 addr, val; + int rv; + /* Address could be zero if CONSOLE := 0 in dongle Makefile */ + if (bus->console_addr == 0) + return BCME_UNSUPPORTED; + + /* Don't allow input if dongle is in reset */ + if (bus->dhd->dongle_reset) { + return BCME_NOTREADY; + } + + /* Zero cbuf_index */ + addr = bus->console_addr + OFFSETOF(hnd_cons_t, cbuf_idx); + val = htol32(0); + if ((rv = dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0) + goto done; + + /* Write message into cbuf */ + addr = bus->console_addr + OFFSETOF(hnd_cons_t, cbuf); + if ((rv = dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)msg, msglen)) < 0) + goto done; + + /* Write length into vcons_in */ + addr = bus->console_addr + OFFSETOF(hnd_cons_t, vcons_in); + val = htol32(msglen); + if ((rv = dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0) + goto done; + + /* generate an interrupt to dongle to indicate that it needs to process cons command */ + dhdpcie_send_mb_data(bus, H2D_HOST_CONS_INT); +done: + return rv; +} /* dhd_bus_console_in */ + +/** + * Called on frame reception, the frame was received from the dongle on interface 'ifidx' and is + * contained in 'pkt'. Processes rx frame, forwards up the layer to netif. + */ +void BCMFASTPATH +dhd_bus_rx_frame(struct dhd_bus *bus, void* pkt, int ifidx, uint pkt_count) +{ + dhd_rx_frame(bus->dhd, ifidx, pkt, pkt_count, 0); +} + +/** 'offset' is a backplane address */ +void +dhdpcie_bus_wtcm8(dhd_bus_t *bus, ulong offset, uint8 data) +{ + W_REG(bus->dhd->osh, (volatile uint8 *)(bus->tcm + offset), data); +} + +uint8 +dhdpcie_bus_rtcm8(dhd_bus_t *bus, ulong offset) +{ + volatile uint8 data; + data = R_REG(bus->dhd->osh, (volatile uint8 *)(bus->tcm + offset)); + return data; +} + +void +dhdpcie_bus_wtcm32(dhd_bus_t *bus, ulong offset, uint32 data) +{ + W_REG(bus->dhd->osh, (volatile uint32 *)(bus->tcm + offset), data); +} +void +dhdpcie_bus_wtcm16(dhd_bus_t *bus, ulong offset, uint16 data) +{ + W_REG(bus->dhd->osh, (volatile uint16 *)(bus->tcm + offset), data); +} +#ifdef DHD_SUPPORT_64BIT +void +dhdpcie_bus_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data) +{ + W_REG(bus->dhd->osh, (volatile uint64 *)(bus->tcm + offset), data); +} +#endif /* DHD_SUPPORT_64BIT */ + +uint16 +dhdpcie_bus_rtcm16(dhd_bus_t *bus, ulong offset) +{ + volatile uint16 data; + data = R_REG(bus->dhd->osh, (volatile uint16 *)(bus->tcm + offset)); + return data; +} + +uint32 +dhdpcie_bus_rtcm32(dhd_bus_t *bus, ulong offset) +{ + volatile uint32 data; + data = R_REG(bus->dhd->osh, (volatile uint32 *)(bus->tcm + offset)); + return data; +} + +#ifdef DHD_SUPPORT_64BIT +uint64 +dhdpcie_bus_rtcm64(dhd_bus_t *bus, ulong offset) +{ + volatile uint64 data; + data = R_REG(bus->dhd->osh, (volatile uint64 *)(bus->tcm + offset)); + return data; +} +#endif /* DHD_SUPPORT_64BIT */ + +/** A snippet of dongle memory is shared between host and dongle */ +void +dhd_bus_cmn_writeshared(dhd_bus_t *bus, void *data, uint32 len, uint8 type, uint16 ringid) +{ + uint64 long_data; + ulong addr; /* dongle address */ + + DHD_INFO(("%s: writing to dongle type %d len %d\n", __FUNCTION__, type, len)); + + if (bus->is_linkdown) { + DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__)); + return; + } + + if (MULTIBP_ENAB(bus->sih)) { + dhd_bus_pcie_pwr_req(bus); + } + switch (type) { + case D2H_DMA_SCRATCH_BUF: + addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_dma_scratch_buffer); + long_data = HTOL64(*(uint64 *)data); + dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len); + if (dhd_msg_level & DHD_INFO_VAL) { + prhex(__FUNCTION__, data, len); + } + break; + + case D2H_DMA_SCRATCH_BUF_LEN : + addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_dma_scratch_buffer_len); + dhdpcie_bus_wtcm32(bus, addr, (uint32) HTOL32(*(uint32 *)data)); + if (dhd_msg_level & DHD_INFO_VAL) { + prhex(__FUNCTION__, data, len); + } + break; + + case H2D_DMA_INDX_WR_BUF: + long_data = HTOL64(*(uint64 *)data); + addr = DHD_RING_INFO_MEMBER_ADDR(bus, h2d_w_idx_hostaddr); + dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len); + if (dhd_msg_level & DHD_INFO_VAL) { + prhex(__FUNCTION__, data, len); + } + break; + + case H2D_DMA_INDX_RD_BUF: + long_data = HTOL64(*(uint64 *)data); + addr = DHD_RING_INFO_MEMBER_ADDR(bus, h2d_r_idx_hostaddr); + dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len); + if (dhd_msg_level & DHD_INFO_VAL) { + prhex(__FUNCTION__, data, len); + } + break; + + case D2H_DMA_INDX_WR_BUF: + long_data = HTOL64(*(uint64 *)data); + addr = DHD_RING_INFO_MEMBER_ADDR(bus, d2h_w_idx_hostaddr); + dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len); + if (dhd_msg_level & DHD_INFO_VAL) { + prhex(__FUNCTION__, data, len); + } + break; + + case D2H_DMA_INDX_RD_BUF: + long_data = HTOL64(*(uint64 *)data); + addr = DHD_RING_INFO_MEMBER_ADDR(bus, d2h_r_idx_hostaddr); + dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len); + if (dhd_msg_level & DHD_INFO_VAL) { + prhex(__FUNCTION__, data, len); + } + break; + + case H2D_IFRM_INDX_WR_BUF: + long_data = HTOL64(*(uint64 *)data); + addr = DHD_RING_INFO_MEMBER_ADDR(bus, ifrm_w_idx_hostaddr); + dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len); + if (dhd_msg_level & DHD_INFO_VAL) { + prhex(__FUNCTION__, data, len); + } + break; + + case RING_ITEM_LEN : + addr = DHD_RING_MEM_MEMBER_ADDR(bus, ringid, len_items); + dhdpcie_bus_wtcm16(bus, addr, (uint16) HTOL16(*(uint16 *)data)); + break; + + case RING_MAX_ITEMS : + addr = DHD_RING_MEM_MEMBER_ADDR(bus, ringid, max_item); + dhdpcie_bus_wtcm16(bus, addr, (uint16) HTOL16(*(uint16 *)data)); + break; + + case RING_BUF_ADDR : + long_data = HTOL64(*(uint64 *)data); + addr = DHD_RING_MEM_MEMBER_ADDR(bus, ringid, base_addr); + dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *) &long_data, len); + if (dhd_msg_level & DHD_INFO_VAL) { + prhex(__FUNCTION__, data, len); + } + break; + + case RING_WR_UPD : + addr = bus->ring_sh[ringid].ring_state_w; + dhdpcie_bus_wtcm16(bus, addr, (uint16) HTOL16(*(uint16 *)data)); + break; + + case RING_RD_UPD : + addr = bus->ring_sh[ringid].ring_state_r; + dhdpcie_bus_wtcm16(bus, addr, (uint16) HTOL16(*(uint16 *)data)); + break; + + case D2H_MB_DATA: + addr = bus->d2h_mb_data_ptr_addr; + dhdpcie_bus_wtcm32(bus, addr, (uint32) HTOL32(*(uint32 *)data)); + break; + + case H2D_MB_DATA: + addr = bus->h2d_mb_data_ptr_addr; + dhdpcie_bus_wtcm32(bus, addr, (uint32) HTOL32(*(uint32 *)data)); + break; + + case HOST_API_VERSION: + addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_cap); + dhdpcie_bus_wtcm32(bus, addr, (uint32) HTOL32(*(uint32 *)data)); + break; + + case DNGL_TO_HOST_TRAP_ADDR: + long_data = HTOL64(*(uint64 *)data); + addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_trap_addr); + dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *) &long_data, len); + DHD_INFO(("Wrote trap addr:0x%x\n", (uint32) HTOL32(*(uint32 *)data))); + break; + + case HOST_SCB_ADDR: + addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_scb_addr); +#ifdef DHD_SUPPORT_64BIT + dhdpcie_bus_wtcm64(bus, addr, (uint64) HTOL64(*(uint64 *)data)); +#else /* !DHD_SUPPORT_64BIT */ + dhdpcie_bus_wtcm32(bus, addr, *((uint32*)data)); +#endif /* DHD_SUPPORT_64BIT */ + DHD_INFO(("Wrote host_scb_addr:0x%x\n", + (uint32) HTOL32(*(uint32 *)data))); + break; + + default: + break; + } + if (MULTIBP_ENAB(bus->sih)) { + dhd_bus_pcie_pwr_req_clear(bus); + } +} /* dhd_bus_cmn_writeshared */ + +/** A snippet of dongle memory is shared between host and dongle */ +void +dhd_bus_cmn_readshared(dhd_bus_t *bus, void* data, uint8 type, uint16 ringid) +{ + ulong addr; /* dongle address */ + + if (MULTIBP_ENAB(bus->sih)) { + dhd_bus_pcie_pwr_req(bus); + } + switch (type) { + case RING_WR_UPD : + addr = bus->ring_sh[ringid].ring_state_w; + *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, addr)); + break; + + case RING_RD_UPD : + addr = bus->ring_sh[ringid].ring_state_r; + *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, addr)); + break; + + case TOTAL_LFRAG_PACKET_CNT : + addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, total_lfrag_pkt_cnt); + *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, addr)); + break; + + case H2D_MB_DATA: + addr = bus->h2d_mb_data_ptr_addr; + *(uint32*)data = LTOH32(dhdpcie_bus_rtcm32(bus, addr)); + break; + + case D2H_MB_DATA: + addr = bus->d2h_mb_data_ptr_addr; + *(uint32*)data = LTOH32(dhdpcie_bus_rtcm32(bus, addr)); + break; + + case MAX_HOST_RXBUFS : + addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, max_host_rxbufs); + *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, addr)); + break; + + case HOST_SCB_ADDR: + addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_scb_size); + *(uint32*)data = LTOH32(dhdpcie_bus_rtcm32(bus, addr)); + break; + + default : + break; + } + if (MULTIBP_ENAB(bus->sih)) { + dhd_bus_pcie_pwr_req_clear(bus); + } +} + +uint32 dhd_bus_get_sharedflags(dhd_bus_t *bus) +{ + return ((pciedev_shared_t*)bus->pcie_sh)->flags; +} + +void +dhd_bus_clearcounts(dhd_pub_t *dhdp) +{ +} + +/** + * @param params input buffer, NULL for 'set' operation. + * @param plen length of 'params' buffer, 0 for 'set' operation. + * @param arg output buffer + */ +int +dhd_bus_iovar_op(dhd_pub_t *dhdp, const char *name, + void *params, int plen, void *arg, int len, bool set) +{ + dhd_bus_t *bus = dhdp->bus; + const bcm_iovar_t *vi = NULL; + int bcmerror = BCME_UNSUPPORTED; + int val_size; + uint32 actionid; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + ASSERT(name); + ASSERT(len >= 0); + if (!name || len < 0) + return BCME_BADARG; + + /* Get MUST have return space */ + ASSERT(set || (arg && len)); + if (!(set || (arg && len))) + return BCME_BADARG; + + /* Set does NOT take qualifiers */ + ASSERT(!set || (!params && !plen)); + if (!(!set || (!params && !plen))) + return BCME_BADARG; + + DHD_INFO(("%s: %s %s, len %d plen %d\n", __FUNCTION__, + name, (set ? "set" : "get"), len, plen)); + + if (MULTIBP_ENAB(bus->sih)) { + dhd_bus_pcie_pwr_req(bus); + } + + /* Look up var locally; if not found pass to host driver */ + if ((vi = bcm_iovar_lookup(dhdpcie_iovars, name)) == NULL) { + goto exit; + } + + /* set up 'params' pointer in case this is a set command so that + * the convenience int and bool code can be common to set and get + */ + if (params == NULL) { + params = arg; + plen = len; + } + + if (vi->type == IOVT_VOID) + val_size = 0; + else if (vi->type == IOVT_BUFFER) + val_size = len; + else + /* all other types are integer sized */ + val_size = sizeof(int); + + actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid); + bcmerror = dhdpcie_bus_doiovar(bus, vi, actionid, name, params, plen, arg, len, val_size); + +exit: + /* In DEVRESET_QUIESCE/DEVRESET_ON, + * this includes dongle re-attach which initialize pwr_req_ref count to 0 and + * causes pwr_req_ref count miss-match in pwr req clear function and hang. + * In this case, bypass pwr req clear. + */ + if (bcmerror == BCME_DNGL_DEVRESET) { + bcmerror = BCME_OK; + } else { + if (MULTIBP_ENAB(bus->sih)) { + dhd_bus_pcie_pwr_req_clear(bus); + } + } + return bcmerror; +} /* dhd_bus_iovar_op */ + +#ifdef BCM_BUZZZ +#include + +int +dhd_buzzz_dump_cntrs(char *p, uint32 *core, uint32 *log, + const int num_counters) +{ + int bytes = 0; + uint32 ctr; + uint32 curr[BCM_BUZZZ_COUNTERS_MAX], prev[BCM_BUZZZ_COUNTERS_MAX]; + uint32 delta[BCM_BUZZZ_COUNTERS_MAX]; + + /* Compute elapsed counter values per counter event type */ + for (ctr = 0U; ctr < num_counters; ctr++) { + prev[ctr] = core[ctr]; + curr[ctr] = *log++; + core[ctr] = curr[ctr]; /* saved for next log */ + + if (curr[ctr] < prev[ctr]) + delta[ctr] = curr[ctr] + (~0U - prev[ctr]); + else + delta[ctr] = (curr[ctr] - prev[ctr]); + + bytes += sprintf(p + bytes, "%12u ", delta[ctr]); + } + + return bytes; +} + +typedef union cm3_cnts { /* export this in bcm_buzzz.h */ + uint32 u32; + uint8 u8[4]; + struct { + uint8 cpicnt; + uint8 exccnt; + uint8 sleepcnt; + uint8 lsucnt; + }; +} cm3_cnts_t; + +int +dhd_bcm_buzzz_dump_cntrs6(char *p, uint32 *core, uint32 *log) +{ + int bytes = 0; + + uint32 cyccnt, instrcnt; + cm3_cnts_t cm3_cnts; + uint8 foldcnt; + + { /* 32bit cyccnt */ + uint32 curr, prev, delta; + prev = core[0]; curr = *log++; core[0] = curr; + if (curr < prev) + delta = curr + (~0U - prev); + else + delta = (curr - prev); + + bytes += sprintf(p + bytes, "%12u ", delta); + cyccnt = delta; + } + + { /* Extract the 4 cnts: cpi, exc, sleep and lsu */ + int i; + uint8 max8 = ~0; + cm3_cnts_t curr, prev, delta; + prev.u32 = core[1]; curr.u32 = * log++; core[1] = curr.u32; + for (i = 0; i < 4; i++) { + if (curr.u8[i] < prev.u8[i]) + delta.u8[i] = curr.u8[i] + (max8 - prev.u8[i]); + else + delta.u8[i] = (curr.u8[i] - prev.u8[i]); + bytes += sprintf(p + bytes, "%4u ", delta.u8[i]); + } + cm3_cnts.u32 = delta.u32; + } + + { /* Extract the foldcnt from arg0 */ + uint8 curr, prev, delta, max8 = ~0; + bcm_buzzz_arg0_t arg0; arg0.u32 = *log; + prev = core[2]; curr = arg0.klog.cnt; core[2] = curr; + if (curr < prev) + delta = curr + (max8 - prev); + else + delta = (curr - prev); + bytes += sprintf(p + bytes, "%4u ", delta); + foldcnt = delta; + } + + instrcnt = cyccnt - (cm3_cnts.u8[0] + cm3_cnts.u8[1] + cm3_cnts.u8[2] + + cm3_cnts.u8[3]) + foldcnt; + if (instrcnt > 0xFFFFFF00) + bytes += sprintf(p + bytes, "[%10s] ", "~"); + else + bytes += sprintf(p + bytes, "[%10u] ", instrcnt); + return bytes; +} + +int +dhd_buzzz_dump_log(char *p, uint32 *core, uint32 *log, bcm_buzzz_t *buzzz) +{ + int bytes = 0; + bcm_buzzz_arg0_t arg0; + static uint8 * fmt[] = BCM_BUZZZ_FMT_STRINGS; + + if (buzzz->counters == 6) { + bytes += dhd_bcm_buzzz_dump_cntrs6(p, core, log); + log += 2; /* 32bit cyccnt + (4 x 8bit) CM3 */ + } else { + bytes += dhd_buzzz_dump_cntrs(p, core, log, buzzz->counters); + log += buzzz->counters; /* (N x 32bit) CR4=3, CA7=4 */ + } + + /* Dump the logged arguments using the registered formats */ + arg0.u32 = *log++; + + switch (arg0.klog.args) { + case 0: + bytes += sprintf(p + bytes, fmt[arg0.klog.id]); + break; + case 1: + { + uint32 arg1 = *log++; + bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1); + break; + } + case 2: + { + uint32 arg1, arg2; + arg1 = *log++; arg2 = *log++; + bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1, arg2); + break; + } + case 3: + { + uint32 arg1, arg2, arg3; + arg1 = *log++; arg2 = *log++; arg3 = *log++; + bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1, arg2, arg3); + break; + } + case 4: + { + uint32 arg1, arg2, arg3, arg4; + arg1 = *log++; arg2 = *log++; + arg3 = *log++; arg4 = *log++; + bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1, arg2, arg3, arg4); + break; + } + default: + printf("%s: Maximum one argument supported\n", __FUNCTION__); + break; + } + + bytes += sprintf(p + bytes, "\n"); + + return bytes; +} + +void dhd_buzzz_dump(bcm_buzzz_t *buzzz_p, void *buffer_p, char *p) +{ + int i; + uint32 total, part1, part2, log_sz, core[BCM_BUZZZ_COUNTERS_MAX]; + void * log; + + for (i = 0; i < BCM_BUZZZ_COUNTERS_MAX; i++) { + core[i] = 0; + } + + log_sz = buzzz_p->log_sz; + + part1 = ((uint32)buzzz_p->cur - (uint32)buzzz_p->log) / log_sz; + + if (buzzz_p->wrap == TRUE) { + part2 = ((uint32)buzzz_p->end - (uint32)buzzz_p->cur) / log_sz; + total = (buzzz_p->buffer_sz - BCM_BUZZZ_LOGENTRY_MAXSZ) / log_sz; + } else { + part2 = 0U; + total = buzzz_p->count; + } + + if (total == 0U) { + printf("%s: bcm_buzzz_dump total<%u> done\n", __FUNCTION__, total); + return; + } else { + printf("%s: bcm_buzzz_dump total<%u> : part2<%u> + part1<%u>\n", __FUNCTION__, + total, part2, part1); + } + + if (part2) { /* with wrap */ + log = (void*)((size_t)buffer_p + (buzzz_p->cur - buzzz_p->log)); + while (part2--) { /* from cur to end : part2 */ + p[0] = '\0'; + dhd_buzzz_dump_log(p, core, (uint32 *)log, buzzz_p); + printf("%s", p); + log = (void*)((size_t)log + buzzz_p->log_sz); + } + } + + log = (void*)buffer_p; + while (part1--) { + p[0] = '\0'; + dhd_buzzz_dump_log(p, core, (uint32 *)log, buzzz_p); + printf("%s", p); + log = (void*)((size_t)log + buzzz_p->log_sz); + } + + printf("%s: bcm_buzzz_dump done.\n", __FUNCTION__); +} + +int dhd_buzzz_dump_dngl(dhd_bus_t *bus) +{ + bcm_buzzz_t * buzzz_p = NULL; + void * buffer_p = NULL; + char * page_p = NULL; + pciedev_shared_t *sh; + int ret = 0; + + if (bus->dhd->busstate != DHD_BUS_DATA) { + return BCME_UNSUPPORTED; + } + if ((page_p = (char *)MALLOC(bus->dhd->osh, 4096)) == NULL) { + printf("%s: Page memory allocation failure\n", __FUNCTION__); + goto done; + } + if ((buzzz_p = MALLOC(bus->dhd->osh, sizeof(bcm_buzzz_t))) == NULL) { + printf("%s: BCM BUZZZ memory allocation failure\n", __FUNCTION__); + goto done; + } + + ret = dhdpcie_readshared(bus); + if (ret < 0) { + DHD_ERROR(("%s :Shared area read failed \n", __FUNCTION__)); + goto done; + } + + sh = bus->pcie_sh; + + DHD_INFO(("%s buzzz:%08x\n", __FUNCTION__, sh->buzz_dbg_ptr)); + + if (sh->buzz_dbg_ptr != 0U) { /* Fetch and display dongle BUZZZ Trace */ + + dhdpcie_bus_membytes(bus, FALSE, (ulong)sh->buzz_dbg_ptr, + (uint8 *)buzzz_p, sizeof(bcm_buzzz_t)); + + printf("BUZZZ[0x%08x]: log<0x%08x> cur<0x%08x> end<0x%08x> " + "count<%u> status<%u> wrap<%u>\n" + "cpu<0x%02X> counters<%u> group<%u> buffer_sz<%u> log_sz<%u>\n", + (int)sh->buzz_dbg_ptr, + (int)buzzz_p->log, (int)buzzz_p->cur, (int)buzzz_p->end, + buzzz_p->count, buzzz_p->status, buzzz_p->wrap, + buzzz_p->cpu_idcode, buzzz_p->counters, buzzz_p->group, + buzzz_p->buffer_sz, buzzz_p->log_sz); + + if (buzzz_p->count == 0) { + printf("%s: Empty dongle BUZZZ trace\n\n", __FUNCTION__); + goto done; + } + + /* Allocate memory for trace buffer and format strings */ + buffer_p = MALLOC(bus->dhd->osh, buzzz_p->buffer_sz); + if (buffer_p == NULL) { + printf("%s: Buffer memory allocation failure\n", __FUNCTION__); + goto done; + } + + /* Fetch the trace. format strings are exported via bcm_buzzz.h */ + dhdpcie_bus_membytes(bus, FALSE, (uint32)buzzz_p->log, /* Trace */ + (uint8 *)buffer_p, buzzz_p->buffer_sz); + + /* Process and display the trace using formatted output */ + + { + int ctr; + for (ctr = 0; ctr < buzzz_p->counters; ctr++) { + printf(" ", buzzz_p->eventid[ctr]); + } + printf("\n"); + } + + dhd_buzzz_dump(buzzz_p, buffer_p, page_p); + + printf("%s: ----- End of dongle BCM BUZZZ Trace -----\n\n", __FUNCTION__); + + MFREE(bus->dhd->osh, buffer_p, buzzz_p->buffer_sz); buffer_p = NULL; + } + +done: + + if (page_p) MFREE(bus->dhd->osh, page_p, 4096); + if (buzzz_p) MFREE(bus->dhd->osh, buzzz_p, sizeof(bcm_buzzz_t)); + if (buffer_p) MFREE(bus->dhd->osh, buffer_p, buzzz_p->buffer_sz); + + return BCME_OK; +} +#endif /* BCM_BUZZZ */ + +#define PCIE_GEN2(sih) ((BUSTYPE((sih)->bustype) == PCI_BUS) && \ + ((sih)->buscoretype == PCIE2_CORE_ID)) + +#define PCIE_FLR_CAPAB_BIT 28 +#define PCIE_FUNCTION_LEVEL_RESET_BIT 15 + +/* Change delays for only QT HW, FPGA and silicon uses same delay */ +#ifdef BCMQT_HW +#define DHD_FUNCTION_LEVEL_RESET_DELAY 300000u +#define DHD_SSRESET_STATUS_RETRY_DELAY 10000u +#else +#define DHD_FUNCTION_LEVEL_RESET_DELAY 55u /* 55 msec delay */ +#define DHD_SSRESET_STATUS_RETRY_DELAY 40u +#endif // endif +#define DHD_SSRESET_STATUS_RETRIES 50u + +int +dhd_bus_perform_flr(dhd_bus_t *bus, bool force_fail) +{ + bool flr_capab; + uint val; + int retry = 0; + + DHD_ERROR(("******** Perform FLR ********\n")); + + /* Read PCIE_CFG_DEVICE_CAPABILITY bit 28 to check FLR capability */ + val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_DEVICE_CAPABILITY, sizeof(val)); + flr_capab = val & (1 << PCIE_FLR_CAPAB_BIT); + DHD_ERROR(("Read Device Capability: reg=0x%x read val=0x%x flr_capab=0x%x\n", + PCIE_CFG_DEVICE_CAPABILITY, val, flr_capab)); + if (!flr_capab) { + DHD_ERROR(("Chip does not support FLR\n")); + return BCME_UNSUPPORTED; + } + + /* Save pcie config space */ + DHD_ERROR(("Save Pcie Config Space\n")); + DHD_PCIE_CONFIG_SAVE(bus); + + /* Set bit 15 of PCIE_CFG_DEVICE_CONTROL */ + DHD_ERROR(("Set PCIE_FUNCTION_LEVEL_RESET_BIT(%d) of PCIE_CFG_DEVICE_CONTROL(0x%x)\n", + PCIE_FUNCTION_LEVEL_RESET_BIT, PCIE_CFG_DEVICE_CONTROL)); + val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_DEVICE_CONTROL, sizeof(val)); + DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_DEVICE_CONTROL, val)); + val = val | (1 << PCIE_FUNCTION_LEVEL_RESET_BIT); + DHD_ERROR(("write_config: reg=0x%x write val=0x%x\n", PCIE_CFG_DEVICE_CONTROL, val)); + OSL_PCI_WRITE_CONFIG(bus->osh, PCIE_CFG_DEVICE_CONTROL, sizeof(val), val); + + /* wait for DHD_FUNCTION_LEVEL_RESET_DELAY msec */ + DHD_ERROR(("Delay of %d msec\n", DHD_FUNCTION_LEVEL_RESET_DELAY)); + OSL_DELAY(DHD_FUNCTION_LEVEL_RESET_DELAY * 1000u); + + if (force_fail) { + DHD_ERROR(("Set PCIE_SSRESET_DISABLE_BIT(%d) of PCIE_CFG_SUBSYSTEM_CONTROL(0x%x)\n", + PCIE_SSRESET_DISABLE_BIT, PCIE_CFG_SUBSYSTEM_CONTROL)); + val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val)); + DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_SUBSYSTEM_CONTROL, + val)); + val = val | (1 << PCIE_SSRESET_DISABLE_BIT); + DHD_ERROR(("write_config: reg=0x%x write val=0x%x\n", PCIE_CFG_SUBSYSTEM_CONTROL, + val)); + OSL_PCI_WRITE_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val), val); + + val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val)); + DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_SUBSYSTEM_CONTROL, + val)); + } + + /* Clear bit 15 of PCIE_CFG_DEVICE_CONTROL */ + DHD_ERROR(("Clear PCIE_FUNCTION_LEVEL_RESET_BIT(%d) of PCIE_CFG_DEVICE_CONTROL(0x%x)\n", + PCIE_FUNCTION_LEVEL_RESET_BIT, PCIE_CFG_DEVICE_CONTROL)); + val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_DEVICE_CONTROL, sizeof(val)); + DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_DEVICE_CONTROL, val)); + val = val & ~(1 << PCIE_FUNCTION_LEVEL_RESET_BIT); + DHD_ERROR(("write_config: reg=0x%x write val=0x%x\n", PCIE_CFG_DEVICE_CONTROL, val)); + OSL_PCI_WRITE_CONFIG(bus->osh, PCIE_CFG_DEVICE_CONTROL, sizeof(val), val); + + /* Wait till bit 13 of PCIE_CFG_SUBSYSTEM_CONTROL is cleared */ + DHD_ERROR(("Wait till PCIE_SSRESET_STATUS_BIT(%d) of PCIE_CFG_SUBSYSTEM_CONTROL(0x%x)" + "is cleared\n", PCIE_SSRESET_STATUS_BIT, PCIE_CFG_SUBSYSTEM_CONTROL)); + do { + val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val)); + DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n", + PCIE_CFG_SUBSYSTEM_CONTROL, val)); + val = val & (1 << PCIE_SSRESET_STATUS_BIT); + OSL_DELAY(DHD_SSRESET_STATUS_RETRY_DELAY); + } while (val && (retry++ < DHD_SSRESET_STATUS_RETRIES)); + + if (val) { + DHD_ERROR(("ERROR: reg=0x%x bit %d is not cleared\n", + PCIE_CFG_SUBSYSTEM_CONTROL, PCIE_SSRESET_STATUS_BIT)); + /* User has to fire the IOVAR again, if force_fail is needed */ + if (force_fail) { + bus->flr_force_fail = FALSE; + DHD_ERROR(("%s cleared flr_force_fail flag\n", __FUNCTION__)); + } + return BCME_ERROR; + } + + /* Restore pcie config space */ + DHD_ERROR(("Restore Pcie Config Space\n")); + DHD_PCIE_CONFIG_RESTORE(bus); + + DHD_ERROR(("******** FLR Succedeed ********\n")); + + return BCME_OK; +} + +#ifdef DHD_USE_BP_RESET +#define DHD_BP_RESET_ASPM_DISABLE_DELAY 500u /* usec */ + +#define DHD_BP_RESET_STATUS_RETRY_DELAY 40u /* usec */ +#define DHD_BP_RESET_STATUS_RETRIES 50u + +#define PCIE_CFG_SPROM_CTRL_SB_RESET_BIT 10 +#define PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT 21 +int +dhd_bus_perform_bp_reset(struct dhd_bus *bus) +{ + uint val; + int retry = 0; + uint dar_clk_ctrl_status_reg = DAR_CLK_CTRL(bus->sih->buscorerev); + int ret = BCME_OK; + bool cond; + + DHD_ERROR(("******** Perform BP reset ********\n")); + + /* Disable ASPM */ + DHD_INFO(("Disable ASPM: Clear bits(1-0) of PCIECFGREG_LINK_STATUS_CTRL(0x%x)\n", + PCIECFGREG_LINK_STATUS_CTRL)); + val = OSL_PCI_READ_CONFIG(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(val)); + DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL, val)); + val = val & (~PCIE_ASPM_ENAB); + DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL, val)); + OSL_PCI_WRITE_CONFIG(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(val), val); + + /* wait for delay usec */ + DHD_INFO(("Delay of %d usec\n", DHD_BP_RESET_ASPM_DISABLE_DELAY)); + OSL_DELAY(DHD_BP_RESET_ASPM_DISABLE_DELAY); + + /* Set bit 10 of PCIECFGREG_SPROM_CTRL */ + DHD_INFO(("Set PCIE_CFG_SPROM_CTRL_SB_RESET_BIT(%d) of PCIECFGREG_SPROM_CTRL(0x%x)\n", + PCIE_CFG_SPROM_CTRL_SB_RESET_BIT, PCIECFGREG_SPROM_CTRL)); + val = OSL_PCI_READ_CONFIG(bus->osh, PCIECFGREG_SPROM_CTRL, sizeof(val)); + DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_SPROM_CTRL, val)); + val = val | (1 << PCIE_CFG_SPROM_CTRL_SB_RESET_BIT); + DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIECFGREG_SPROM_CTRL, val)); + OSL_PCI_WRITE_CONFIG(bus->osh, PCIECFGREG_SPROM_CTRL, sizeof(val), val); + + /* Wait till bit backplane reset is ASSERTED i,e + * bit 10 of PCIECFGREG_SPROM_CTRL is cleared. + * Only after this, poll for 21st bit of DAR reg 0xAE0 is valid + * else DAR register will read previous old value + */ + DHD_INFO(("Wait till PCIE_CFG_SPROM_CTRL_SB_RESET_BIT(%d) of " + "PCIECFGREG_SPROM_CTRL(0x%x) is cleared\n", + PCIE_CFG_SPROM_CTRL_SB_RESET_BIT, PCIECFGREG_SPROM_CTRL)); + do { + val = OSL_PCI_READ_CONFIG(bus->osh, PCIECFGREG_SPROM_CTRL, sizeof(val)); + DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_SPROM_CTRL, val)); + cond = val & (1 << PCIE_CFG_SPROM_CTRL_SB_RESET_BIT); + OSL_DELAY(DHD_BP_RESET_STATUS_RETRY_DELAY); + } while (cond && (retry++ < DHD_BP_RESET_STATUS_RETRIES)); + + if (cond) { + DHD_ERROR(("ERROR: reg=0x%x bit %d is not cleared\n", + PCIECFGREG_SPROM_CTRL, PCIE_CFG_SPROM_CTRL_SB_RESET_BIT)); + ret = BCME_ERROR; + goto aspm_enab; + } + + /* Wait till bit 21 of dar_clk_ctrl_status_reg is cleared */ + DHD_INFO(("Wait till PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT(%d) of " + "dar_clk_ctrl_status_reg(0x%x) is cleared\n", + PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT, dar_clk_ctrl_status_reg)); + do { + val = si_corereg(bus->sih, bus->sih->buscoreidx, + dar_clk_ctrl_status_reg, 0, 0); + DHD_INFO(("read_dar si_corereg: reg=0x%x read val=0x%x\n", + dar_clk_ctrl_status_reg, val)); + cond = val & (1 << PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT); + OSL_DELAY(DHD_BP_RESET_STATUS_RETRY_DELAY); + } while (cond && (retry++ < DHD_BP_RESET_STATUS_RETRIES)); + + if (cond) { + DHD_ERROR(("ERROR: reg=0x%x bit %d is not cleared\n", + dar_clk_ctrl_status_reg, PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT)); + ret = BCME_ERROR; + } + +aspm_enab: + /* Enable ASPM */ + DHD_INFO(("Enable ASPM: set bit 1 of PCIECFGREG_LINK_STATUS_CTRL(0x%x)\n", + PCIECFGREG_LINK_STATUS_CTRL)); + val = OSL_PCI_READ_CONFIG(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(val)); + DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL, val)); + val = val | (PCIE_ASPM_L1_ENAB); + DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL, val)); + OSL_PCI_WRITE_CONFIG(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(val), val); + + DHD_ERROR(("******** BP reset Succedeed ********\n")); + + return ret; +} +#endif /* DHD_USE_BP_RESET */ + +int +dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag) +{ + dhd_bus_t *bus = dhdp->bus; + int bcmerror = 0; + unsigned long flags; + unsigned long flags_bus; +#ifdef CONFIG_ARCH_MSM + int retry = POWERUP_MAX_RETRY; +#endif /* CONFIG_ARCH_MSM */ + + if (flag == TRUE) { /* Turn off WLAN */ + /* Removing Power */ + DHD_ERROR(("%s: == Power OFF ==\n", __FUNCTION__)); + + bus->dhd->up = FALSE; + + /* wait for other contexts to finish -- if required a call + * to OSL_DELAY for 1s can be added to give other contexts + * a chance to finish + */ + dhdpcie_advertise_bus_cleanup(bus->dhd); + + if (bus->dhd->busstate != DHD_BUS_DOWN) { +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM + atomic_set(&bus->dhd->block_bus, TRUE); + dhd_flush_rx_tx_wq(bus->dhd); +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + +#ifdef BCMPCIE_OOB_HOST_WAKE + /* Clean up any pending host wake IRQ */ + dhd_bus_oob_intr_set(bus->dhd, FALSE); + dhd_bus_oob_intr_unregister(bus->dhd); +#endif /* BCMPCIE_OOB_HOST_WAKE */ + dhd_os_wd_timer(dhdp, 0); + dhd_bus_stop(bus, TRUE); + if (bus->intr) { + DHD_BUS_LOCK(bus->bus_lock, flags_bus); + dhdpcie_bus_intr_disable(bus); + DHD_BUS_UNLOCK(bus->bus_lock, flags_bus); + dhdpcie_free_irq(bus); + } + dhd_deinit_bus_lock(bus); + dhd_bus_release_dongle(bus); + dhdpcie_bus_free_resource(bus); + bcmerror = dhdpcie_bus_disable_device(bus); + if (bcmerror) { + DHD_ERROR(("%s: dhdpcie_bus_disable_device: %d\n", + __FUNCTION__, bcmerror)); +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM + atomic_set(&bus->dhd->block_bus, FALSE); +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + } + /* Clean up protocol data after Bus Master Enable bit clear + * so that host can safely unmap DMA and remove the allocated buffers + * from the PKTID MAP. Some Applicantion Processors supported + * System MMU triggers Kernel panic when they detect to attempt to + * DMA-unmapped memory access from the devices which use the + * System MMU. Therefore, Kernel panic can be happened since it is + * possible that dongle can access to DMA-unmapped memory after + * calling the dhd_prot_reset(). + * For this reason, the dhd_prot_reset() and dhd_clear() functions + * should be located after the dhdpcie_bus_disable_device(). + */ + dhd_prot_reset(dhdp); + dhd_clear(dhdp); +#ifdef CONFIG_ARCH_MSM + bcmerror = dhdpcie_bus_clock_stop(bus); + if (bcmerror) { + DHD_ERROR(("%s: host clock stop failed: %d\n", + __FUNCTION__, bcmerror)); +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM + atomic_set(&bus->dhd->block_bus, FALSE); +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + goto done; + } +#endif /* CONFIG_ARCH_MSM */ + DHD_GENERAL_LOCK(bus->dhd, flags); + bus->dhd->busstate = DHD_BUS_DOWN; + DHD_GENERAL_UNLOCK(bus->dhd, flags); +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM + atomic_set(&bus->dhd->block_bus, FALSE); +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + } else { + if (bus->intr) { + dhdpcie_free_irq(bus); + } +#ifdef BCMPCIE_OOB_HOST_WAKE + /* Clean up any pending host wake IRQ */ + dhd_bus_oob_intr_set(bus->dhd, FALSE); + dhd_bus_oob_intr_unregister(bus->dhd); +#endif /* BCMPCIE_OOB_HOST_WAKE */ + dhd_dpc_kill(bus->dhd); + if (!bus->no_bus_init) { + dhd_bus_release_dongle(bus); + dhdpcie_bus_free_resource(bus); + bcmerror = dhdpcie_bus_disable_device(bus); + if (bcmerror) { + DHD_ERROR(("%s: dhdpcie_bus_disable_device: %d\n", + __FUNCTION__, bcmerror)); + } + + /* Clean up protocol data after Bus Master Enable bit clear + * so that host can safely unmap DMA and remove the allocated + * buffers from the PKTID MAP. Some Applicantion Processors + * supported System MMU triggers Kernel panic when they detect + * to attempt to DMA-unmapped memory access from the devices + * which use the System MMU. + * Therefore, Kernel panic can be happened since it is possible + * that dongle can access to DMA-unmapped memory after calling + * the dhd_prot_reset(). + * For this reason, the dhd_prot_reset() and dhd_clear() functions + * should be located after the dhdpcie_bus_disable_device(). + */ + dhd_prot_reset(dhdp); + dhd_clear(dhdp); + } else { + bus->no_bus_init = FALSE; + } +#ifdef CONFIG_ARCH_MSM + bcmerror = dhdpcie_bus_clock_stop(bus); + if (bcmerror) { + DHD_ERROR(("%s: host clock stop failed: %d\n", + __FUNCTION__, bcmerror)); + goto done; + } +#endif /* CONFIG_ARCH_MSM */ + } + + bus->dhd->dongle_reset = TRUE; + DHD_ERROR(("%s: WLAN OFF Done\n", __FUNCTION__)); + + } else { /* Turn on WLAN */ + if (bus->dhd->busstate == DHD_BUS_DOWN) { + /* Powering On */ + DHD_ERROR(("%s: == Power ON ==\n", __FUNCTION__)); +#ifdef CONFIG_ARCH_MSM + while (--retry) { + bcmerror = dhdpcie_bus_clock_start(bus); + if (!bcmerror) { + DHD_ERROR(("%s: dhdpcie_bus_clock_start OK\n", + __FUNCTION__)); + break; + } else { + OSL_SLEEP(10); + } + } + + if (bcmerror && !retry) { + DHD_ERROR(("%s: host pcie clock enable failed: %d\n", + __FUNCTION__, bcmerror)); + goto done; + } +#endif /* CONFIG_ARCH_MSM */ + bus->is_linkdown = 0; + bcmerror = dhdpcie_bus_enable_device(bus); + if (bcmerror) { + DHD_ERROR(("%s: host configuration restore failed: %d\n", + __FUNCTION__, bcmerror)); + goto done; + } + + bcmerror = dhdpcie_bus_alloc_resource(bus); + if (bcmerror) { + DHD_ERROR(("%s: dhdpcie_bus_resource_alloc failed: %d\n", + __FUNCTION__, bcmerror)); + goto done; + } + + bcmerror = dhdpcie_bus_dongle_attach(bus); + if (bcmerror) { + DHD_ERROR(("%s: dhdpcie_bus_dongle_attach failed: %d\n", + __FUNCTION__, bcmerror)); + goto done; + } + + bcmerror = dhd_bus_request_irq(bus); + if (bcmerror) { + DHD_ERROR(("%s: dhd_bus_request_irq failed: %d\n", + __FUNCTION__, bcmerror)); + goto done; + } + + bus->dhd->dongle_reset = FALSE; + + bcmerror = dhd_bus_start(dhdp); + if (bcmerror) { + DHD_ERROR(("%s: dhd_bus_start: %d\n", + __FUNCTION__, bcmerror)); + goto done; + } + + bus->dhd->up = TRUE; + /* Renabling watchdog which is disabled in dhdpcie_advertise_bus_cleanup */ + if (bus->dhd->dhd_watchdog_ms_backup) { + DHD_ERROR(("%s: Enabling wdtick after dhd init\n", + __FUNCTION__)); + dhd_os_wd_timer(bus->dhd, bus->dhd->dhd_watchdog_ms_backup); + } + DHD_ERROR(("%s: WLAN Power On Done\n", __FUNCTION__)); + } else { + DHD_ERROR(("%s: what should we do here\n", __FUNCTION__)); + goto done; + } + } + +done: + if (bcmerror) { + DHD_GENERAL_LOCK(bus->dhd, flags); + bus->dhd->busstate = DHD_BUS_DOWN; + DHD_GENERAL_UNLOCK(bus->dhd, flags); + } + return bcmerror; +} + +/* si_backplane_access() manages a shared resource - BAR0 mapping, hence its + * calls shall be serialized. This wrapper function provides such serialization + * and shall be used everywjer einstead of direct call of si_backplane_access() + * + * Linux DHD driver calls si_backplane_access() from 3 three contexts: tasklet + * (that may call dhdpcie_sssr_dump() from dhdpcie_sssr_dump()), iovar + * ("sbreg", "membyres", etc.) and procfs (used by GDB proxy). To avoid race + * conditions calls of si_backplane_access() shall be serialized. Presence of + * tasklet context implies that serialization shall b ebased on spinlock. Hence + * Linux implementation of dhd_pcie_backplane_access_[un]lock() is + * spinlock-based. + * + * Other platforms may add their own implementations of + * dhd_pcie_backplane_access_[un]lock() as needed (e.g. if serialization is not + * needed implementation might be empty) + */ +static uint +serialized_backplane_access(dhd_bus_t *bus, uint addr, uint size, uint *val, bool read) +{ + uint ret; + dhd_pcie_backplane_access_lock(bus->dhd); + ret = si_backplane_access(bus->sih, addr, size, val, read); + dhd_pcie_backplane_access_unlock(bus->dhd); + return ret; +} + +static int +dhdpcie_get_dma_ring_indices(dhd_pub_t *dhd) +{ + int h2d_support, d2h_support; + + d2h_support = dhd->dma_d2h_ring_upd_support ? 1 : 0; + h2d_support = dhd->dma_h2d_ring_upd_support ? 1 : 0; + return (d2h_support | (h2d_support << 1)); + +} +int +dhdpcie_set_dma_ring_indices(dhd_pub_t *dhd, int32 int_val) +{ + int bcmerror = 0; + /* Can change it only during initialization/FW download */ + if (dhd->busstate == DHD_BUS_DOWN) { + if ((int_val > 3) || (int_val < 0)) { + DHD_ERROR(("Bad argument. Possible values: 0, 1, 2 & 3\n")); + bcmerror = BCME_BADARG; + } else { + dhd->dma_d2h_ring_upd_support = (int_val & 1) ? TRUE : FALSE; + dhd->dma_h2d_ring_upd_support = (int_val & 2) ? TRUE : FALSE; + dhd->dma_ring_upd_overwrite = TRUE; + } + } else { + DHD_ERROR(("%s: Can change only when bus down (before FW download)\n", + __FUNCTION__)); + bcmerror = BCME_NOTDOWN; + } + + return bcmerror; + +} + +/** + * IOVAR handler of the DHD bus layer (in this case, the PCIe bus). + * + * @param actionid e.g. IOV_SVAL(IOV_PCIEREG) + * @param params input buffer + * @param plen length in [bytes] of input buffer 'params' + * @param arg output buffer + * @param len length in [bytes] of output buffer 'arg' + */ +static int +dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, const char *name, + void *params, int plen, void *arg, int len, int val_size) +{ + int bcmerror = 0; + int32 int_val = 0; + int32 int_val2 = 0; + int32 int_val3 = 0; + bool bool_val = 0; + + DHD_TRACE(("%s: Enter, action %d name %s params %p plen %d arg %p len %d val_size %d\n", + __FUNCTION__, actionid, name, params, plen, arg, len, val_size)); + + if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, IOV_ISSET(actionid))) != 0) + goto exit; + + if (plen >= (int)sizeof(int_val)) + bcopy(params, &int_val, sizeof(int_val)); + + if (plen >= (int)sizeof(int_val) * 2) + bcopy((void*)((uintptr)params + sizeof(int_val)), &int_val2, sizeof(int_val2)); + + if (plen >= (int)sizeof(int_val) * 3) + bcopy((void*)((uintptr)params + 2 * sizeof(int_val)), &int_val3, sizeof(int_val3)); + + bool_val = (int_val != 0) ? TRUE : FALSE; + + /* Check if dongle is in reset. If so, only allow DEVRESET iovars */ + if (bus->dhd->dongle_reset && !(actionid == IOV_SVAL(IOV_DEVRESET) || + actionid == IOV_GVAL(IOV_DEVRESET))) { + bcmerror = BCME_NOTREADY; + goto exit; + } + + switch (actionid) { + + case IOV_SVAL(IOV_VARS): + bcmerror = dhdpcie_downloadvars(bus, arg, len); + break; + case IOV_SVAL(IOV_PCIE_LPBK): + bcmerror = dhdpcie_bus_lpback_req(bus, int_val); + break; + + case IOV_SVAL(IOV_PCIE_DMAXFER): { + int int_val4 = 0; + int wait = 0; + int core_num = 0; + if (plen >= (int)sizeof(int_val) * 4) { + bcopy((void*)((uintptr)params + 3 * sizeof(int_val)), + &int_val4, sizeof(int_val4)); + } + if (plen >= (int)sizeof(int_val) * 5) { + bcopy((void*)((uintptr)params + 4 * sizeof(int_val)), + &wait, sizeof(wait)); + } + if (plen >= (int)sizeof(core_num) * 6) { + bcopy((void*)((uintptr)params + 5 * sizeof(core_num)), + &core_num, sizeof(core_num)); + } + bcmerror = dhdpcie_bus_dmaxfer_req(bus, int_val, int_val2, int_val3, + int_val4, core_num, wait); + if (wait && bcmerror >= 0) { + /* get the status of the dma transfer */ + int_val4 = dhdmsgbuf_dmaxfer_status(bus->dhd); + bcopy(&int_val4, params, sizeof(int_val)); + } + break; + } + + case IOV_GVAL(IOV_PCIE_DMAXFER): { + int dma_status = 0; + dma_status = dhdmsgbuf_dmaxfer_status(bus->dhd); + bcopy(&dma_status, arg, val_size); + bcmerror = BCME_OK; + break; + } + + case IOV_GVAL(IOV_PCIE_SUSPEND): + int_val = (bus->dhd->busstate == DHD_BUS_SUSPEND) ? 1 : 0; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_PCIE_SUSPEND): + if (bool_val) { /* Suspend */ + int ret; + unsigned long flags; + + /* + * If some other context is busy, wait until they are done, + * before starting suspend + */ + ret = dhd_os_busbusy_wait_condition(bus->dhd, + &bus->dhd->dhd_bus_busy_state, DHD_BUS_BUSY_IN_DHD_IOVAR); + if (ret == 0) { + DHD_ERROR(("%s:Wait Timedout, dhd_bus_busy_state = 0x%x\n", + __FUNCTION__, bus->dhd->dhd_bus_busy_state)); + return BCME_BUSY; + } + + DHD_GENERAL_LOCK(bus->dhd, flags); + DHD_BUS_BUSY_SET_SUSPEND_IN_PROGRESS(bus->dhd); + DHD_GENERAL_UNLOCK(bus->dhd, flags); +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM + dhdpcie_bus_suspend(bus, TRUE, TRUE); +#else + dhdpcie_bus_suspend(bus, TRUE); +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + + DHD_GENERAL_LOCK(bus->dhd, flags); + DHD_BUS_BUSY_CLEAR_SUSPEND_IN_PROGRESS(bus->dhd); + dhd_os_busbusy_wake(bus->dhd); + DHD_GENERAL_UNLOCK(bus->dhd, flags); + } else { /* Resume */ + unsigned long flags; + DHD_GENERAL_LOCK(bus->dhd, flags); + DHD_BUS_BUSY_SET_RESUME_IN_PROGRESS(bus->dhd); + DHD_GENERAL_UNLOCK(bus->dhd, flags); + + dhdpcie_bus_suspend(bus, FALSE); + + DHD_GENERAL_LOCK(bus->dhd, flags); + DHD_BUS_BUSY_CLEAR_RESUME_IN_PROGRESS(bus->dhd); + dhd_os_busbusy_wake(bus->dhd); + DHD_GENERAL_UNLOCK(bus->dhd, flags); + } + break; + + case IOV_GVAL(IOV_MEMSIZE): + int_val = (int32)bus->ramsize; + bcopy(&int_val, arg, val_size); + break; + + /* Debug related. Dumps core registers or one of the dongle memory */ + case IOV_GVAL(IOV_DUMP_DONGLE): + { + dump_dongle_in_t ddi = *(dump_dongle_in_t*)params; + dump_dongle_out_t *ddo = (dump_dongle_out_t*)arg; + uint32 *p = ddo->val; + const uint max_offset = 4096 - 1; /* one core contains max 4096/4 registers */ + + if (plen < sizeof(ddi) || len < sizeof(ddo)) { + bcmerror = BCME_BADARG; + break; + } + + switch (ddi.type) { + case DUMP_DONGLE_COREREG: + ddo->n_bytes = 0; + + if (si_setcoreidx(bus->sih, ddi.index) == NULL) { + break; // beyond last core: core enumeration ended + } + + ddo->address = si_addrspace(bus->sih, CORE_SLAVE_PORT_0, CORE_BASE_ADDR_0); + ddo->address += ddi.offset; // BP address at which this dump starts + + ddo->id = si_coreid(bus->sih); + ddo->rev = si_corerev(bus->sih); + + while (ddi.offset < max_offset && + sizeof(dump_dongle_out_t) + ddo->n_bytes < (uint)len) { + *p++ = si_corereg(bus->sih, ddi.index, ddi.offset, 0, 0); + ddi.offset += sizeof(uint32); + ddo->n_bytes += sizeof(uint32); + } + break; + default: + // TODO: implement d11 SHM/TPL dumping + bcmerror = BCME_BADARG; + break; + } + break; + } + + /* Debug related. Returns a string with dongle capabilities */ + case IOV_GVAL(IOV_DNGL_CAPS): + { + strncpy(arg, bus->dhd->fw_capabilities, + MIN(strlen(bus->dhd->fw_capabilities), (size_t)len)); + ((char*)arg)[len - 1] = '\0'; + break; + } + +#if defined(DEBUGGER) || defined(DHD_DSCOPE) + case IOV_SVAL(IOV_GDB_SERVER): + /* debugger_*() functions may sleep, so cannot hold spinlock */ + DHD_PERIM_UNLOCK(bus->dhd); + if (int_val > 0) { + debugger_init((void *) bus, &bus_ops, int_val, SI_ENUM_BASE(bus->sih)); + } else { + debugger_close(); + } + DHD_PERIM_LOCK(bus->dhd); + break; +#endif /* DEBUGGER || DHD_DSCOPE */ + +#ifdef BCM_BUZZZ + /* Dump dongle side buzzz trace to console */ + case IOV_GVAL(IOV_BUZZZ_DUMP): + bcmerror = dhd_buzzz_dump_dngl(bus); + break; +#endif /* BCM_BUZZZ */ + + case IOV_SVAL(IOV_SET_DOWNLOAD_STATE): + bcmerror = dhdpcie_bus_download_state(bus, bool_val); + break; + + case IOV_GVAL(IOV_RAMSIZE): + int_val = (int32)bus->ramsize; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_RAMSIZE): + bus->ramsize = int_val; + bus->orig_ramsize = int_val; + break; + + case IOV_GVAL(IOV_RAMSTART): + int_val = (int32)bus->dongle_ram_base; + bcopy(&int_val, arg, val_size); + break; + + case IOV_GVAL(IOV_CC_NVMSHADOW): + { + struct bcmstrbuf dump_b; + + bcm_binit(&dump_b, arg, len); + bcmerror = dhdpcie_cc_nvmshadow(bus, &dump_b); + break; + } + + case IOV_GVAL(IOV_SLEEP_ALLOWED): + bool_val = bus->sleep_allowed; + bcopy(&bool_val, arg, val_size); + break; + + case IOV_SVAL(IOV_SLEEP_ALLOWED): + bus->sleep_allowed = bool_val; + break; + + case IOV_GVAL(IOV_DONGLEISOLATION): + int_val = bus->dhd->dongle_isolation; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_DONGLEISOLATION): + bus->dhd->dongle_isolation = bool_val; + break; + + case IOV_GVAL(IOV_LTRSLEEPON_UNLOOAD): + int_val = bus->ltrsleep_on_unload; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_LTRSLEEPON_UNLOOAD): + bus->ltrsleep_on_unload = bool_val; + break; + + case IOV_GVAL(IOV_DUMP_RINGUPD_BLOCK): + { + struct bcmstrbuf dump_b; + bcm_binit(&dump_b, arg, len); + bcmerror = dhd_prot_ringupd_dump(bus->dhd, &dump_b); + break; + } + case IOV_GVAL(IOV_DMA_RINGINDICES): + { + int_val = dhdpcie_get_dma_ring_indices(bus->dhd); + bcopy(&int_val, arg, sizeof(int_val)); + break; + } + case IOV_SVAL(IOV_DMA_RINGINDICES): + bcmerror = dhdpcie_set_dma_ring_indices(bus->dhd, int_val); + break; + + case IOV_GVAL(IOV_METADATA_DBG): + int_val = dhd_prot_metadata_dbg_get(bus->dhd); + bcopy(&int_val, arg, val_size); + break; + case IOV_SVAL(IOV_METADATA_DBG): + dhd_prot_metadata_dbg_set(bus->dhd, (int_val != 0)); + break; + + case IOV_GVAL(IOV_RX_METADATALEN): + int_val = dhd_prot_metadatalen_get(bus->dhd, TRUE); + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_RX_METADATALEN): + if (int_val > 64) { + bcmerror = BCME_BUFTOOLONG; + break; + } + dhd_prot_metadatalen_set(bus->dhd, int_val, TRUE); + break; + + case IOV_SVAL(IOV_TXP_THRESHOLD): + dhd_prot_txp_threshold(bus->dhd, TRUE, int_val); + break; + + case IOV_GVAL(IOV_TXP_THRESHOLD): + int_val = dhd_prot_txp_threshold(bus->dhd, FALSE, int_val); + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_DB1_FOR_MB): + if (int_val) + bus->db1_for_mb = TRUE; + else + bus->db1_for_mb = FALSE; + break; + + case IOV_GVAL(IOV_DB1_FOR_MB): + if (bus->db1_for_mb) + int_val = 1; + else + int_val = 0; + bcopy(&int_val, arg, val_size); + break; + + case IOV_GVAL(IOV_TX_METADATALEN): + int_val = dhd_prot_metadatalen_get(bus->dhd, FALSE); + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_TX_METADATALEN): + if (int_val > 64) { + bcmerror = BCME_BUFTOOLONG; + break; + } + dhd_prot_metadatalen_set(bus->dhd, int_val, FALSE); + break; + + case IOV_SVAL(IOV_DEVRESET): + switch (int_val) { + case DHD_BUS_DEVRESET_ON: + bcmerror = dhd_bus_devreset(bus->dhd, (uint8)int_val); + break; + case DHD_BUS_DEVRESET_OFF: + bcmerror = dhd_bus_devreset(bus->dhd, (uint8)int_val); + break; + case DHD_BUS_DEVRESET_FLR: + bcmerror = dhd_bus_perform_flr(bus, bus->flr_force_fail); + break; + case DHD_BUS_DEVRESET_FLR_FORCE_FAIL: + bus->flr_force_fail = TRUE; + break; + default: + DHD_ERROR(("%s: invalid argument for devreset\n", __FUNCTION__)); + break; + } + break; + case IOV_SVAL(IOV_FORCE_FW_TRAP): + if (bus->dhd->busstate == DHD_BUS_DATA) + dhdpcie_fw_trap(bus); + else { + DHD_ERROR(("%s: Bus is NOT up\n", __FUNCTION__)); + bcmerror = BCME_NOTUP; + } + break; + case IOV_GVAL(IOV_FLOW_PRIO_MAP): + int_val = bus->dhd->flow_prio_map_type; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_FLOW_PRIO_MAP): + int_val = (int32)dhd_update_flow_prio_map(bus->dhd, (uint8)int_val); + bcopy(&int_val, arg, val_size); + break; + + case IOV_GVAL(IOV_TXBOUND): + int_val = (int32)dhd_txbound; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_TXBOUND): + dhd_txbound = (uint)int_val; + break; + + case IOV_SVAL(IOV_H2D_MAILBOXDATA): + dhdpcie_send_mb_data(bus, (uint)int_val); + break; + + case IOV_SVAL(IOV_INFORINGS): + dhd_prot_init_info_rings(bus->dhd); + break; + + case IOV_SVAL(IOV_H2D_PHASE): + if (bus->dhd->busstate != DHD_BUS_DOWN) { + DHD_ERROR(("%s: Can change only when bus down (before FW download)\n", + __FUNCTION__)); + bcmerror = BCME_NOTDOWN; + break; + } + if (int_val) + bus->dhd->h2d_phase_supported = TRUE; + else + bus->dhd->h2d_phase_supported = FALSE; + break; + + case IOV_GVAL(IOV_H2D_PHASE): + int_val = (int32) bus->dhd->h2d_phase_supported; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_H2D_ENABLE_TRAP_BADPHASE): + if (bus->dhd->busstate != DHD_BUS_DOWN) { + DHD_ERROR(("%s: Can change only when bus down (before FW download)\n", + __FUNCTION__)); + bcmerror = BCME_NOTDOWN; + break; + } + if (int_val) + bus->dhd->force_dongletrap_on_bad_h2d_phase = TRUE; + else + bus->dhd->force_dongletrap_on_bad_h2d_phase = FALSE; + break; + + case IOV_GVAL(IOV_H2D_ENABLE_TRAP_BADPHASE): + int_val = (int32) bus->dhd->force_dongletrap_on_bad_h2d_phase; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_H2D_TXPOST_MAX_ITEM): + if (bus->dhd->busstate != DHD_BUS_DOWN) { + DHD_ERROR(("%s: Can change only when bus down (before FW download)\n", + __FUNCTION__)); + bcmerror = BCME_NOTDOWN; + break; + } + dhd_prot_set_h2d_max_txpost(bus->dhd, (uint16)int_val); + break; + + case IOV_GVAL(IOV_H2D_TXPOST_MAX_ITEM): + int_val = dhd_prot_get_h2d_max_txpost(bus->dhd); + bcopy(&int_val, arg, val_size); + break; + + case IOV_GVAL(IOV_RXBOUND): + int_val = (int32)dhd_rxbound; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_RXBOUND): + dhd_rxbound = (uint)int_val; + break; + + case IOV_GVAL(IOV_TRAPDATA): + { + struct bcmstrbuf dump_b; + bcm_binit(&dump_b, arg, len); + bcmerror = dhd_prot_dump_extended_trap(bus->dhd, &dump_b, FALSE); + break; + } + + case IOV_GVAL(IOV_TRAPDATA_RAW): + { + struct bcmstrbuf dump_b; + bcm_binit(&dump_b, arg, len); + bcmerror = dhd_prot_dump_extended_trap(bus->dhd, &dump_b, TRUE); + break; + } + case IOV_SVAL(IOV_HANGREPORT): + bus->dhd->hang_report = bool_val; + DHD_ERROR(("%s: Set hang_report as %d\n", + __FUNCTION__, bus->dhd->hang_report)); + break; + + case IOV_GVAL(IOV_HANGREPORT): + int_val = (int32)bus->dhd->hang_report; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_CTO_PREVENTION): + { + uint32 pcie_lnkst; + + if (bus->sih->buscorerev < 19) { + bcmerror = BCME_UNSUPPORTED; + break; + } + si_corereg(bus->sih, bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, configaddr), ~0, PCI_LINK_STATUS); + + pcie_lnkst = si_corereg(bus->sih, bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, configdata), 0, 0); + + if ((bus->sih->buscorerev == 19) && + (((pcie_lnkst >> PCI_LINK_SPEED_SHIFT) & + PCI_LINK_SPEED_MASK) == PCIE_LNK_SPEED_GEN1)) { + bcmerror = BCME_UNSUPPORTED; + break; + } + bus->cto_enable = bool_val; + dhdpcie_cto_init(bus, bus->cto_enable); + DHD_ERROR(("%s: set CTO prevention and recovery enable/disable %d\n", + __FUNCTION__, bus->cto_enable)); + } + break; + + case IOV_GVAL(IOV_CTO_PREVENTION): + if (bus->sih->buscorerev < 19) { + bcmerror = BCME_UNSUPPORTED; + break; + } + int_val = (int32)bus->cto_enable; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_CTO_THRESHOLD): + { + if (bus->sih->buscorerev < 19) { + bcmerror = BCME_UNSUPPORTED; + break; + } + bus->cto_threshold = (uint32)int_val; + } + break; + + case IOV_GVAL(IOV_CTO_THRESHOLD): + if (bus->sih->buscorerev < 19) { + bcmerror = BCME_UNSUPPORTED; + break; + } + if (bus->cto_threshold) + int_val = (int32)bus->cto_threshold; + else + int_val = (int32)PCIE_CTO_TO_THRESH_DEFAULT; + + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_PCIE_WD_RESET): + if (bool_val) { + /* Legacy chipcommon watchdog reset */ + dhdpcie_cc_watchdog_reset(bus); + } + break; + + case IOV_GVAL(IOV_IDMA_ENABLE): + int_val = bus->idma_enabled; + bcopy(&int_val, arg, val_size); + break; + case IOV_SVAL(IOV_IDMA_ENABLE): + bus->idma_enabled = (bool)int_val; + break; + case IOV_GVAL(IOV_IFRM_ENABLE): + int_val = bus->ifrm_enabled; + bcopy(&int_val, arg, val_size); + break; + case IOV_SVAL(IOV_IFRM_ENABLE): + bus->ifrm_enabled = (bool)int_val; + break; + case IOV_GVAL(IOV_CLEAR_RING): + bcopy(&int_val, arg, val_size); + dhd_flow_rings_flush(bus->dhd, 0); + break; + case IOV_GVAL(IOV_DAR_ENABLE): + int_val = bus->dar_enabled; + bcopy(&int_val, arg, val_size); + break; + case IOV_SVAL(IOV_DAR_ENABLE): + bus->dar_enabled = (bool)int_val; + break; + case IOV_GVAL(IOV_HSCBSIZE): + dhd_get_hscb_info(bus->dhd->prot, NULL, (uint32 *)arg); + break; + case IOV_GVAL(IOV_HSCBBYTES): + dhd_get_hscb_buff(bus->dhd->prot, int_val, int_val2, (void*)arg); + break; + + default: + bcmerror = BCME_UNSUPPORTED; + break; + } + +exit: + return bcmerror; +} /* dhdpcie_bus_doiovar */ + +/** Transfers bytes from host to dongle using pio mode */ +static int +dhdpcie_bus_lpback_req(struct dhd_bus *bus, uint32 len) +{ + if (bus->dhd == NULL) { + DHD_ERROR(("%s: bus not inited\n", __FUNCTION__)); + return 0; + } + if (bus->dhd->prot == NULL) { + DHD_ERROR(("%s: prot is not inited\n", __FUNCTION__)); + return 0; + } + if (bus->dhd->busstate != DHD_BUS_DATA) { + DHD_ERROR(("%s: not in a readystate to LPBK is not inited\n", __FUNCTION__)); + return 0; + } + dhdmsgbuf_lpbk_req(bus->dhd, len); + return 0; +} + +/* Ring DoorBell1 to indicate Hostready i.e. D3 Exit */ +void +dhd_bus_hostready(struct dhd_bus *bus) +{ + if (!bus->dhd->d2h_hostrdy_supported) { + return; + } + + if (bus->is_linkdown) { + DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__)); + return; + } + + DHD_INFO_HW4(("%s : Read PCICMD Reg: 0x%08X\n", __FUNCTION__, + dhd_pcie_config_read(bus->osh, PCI_CFG_CMD, sizeof(uint32)))); + if (DAR_PWRREQ(bus)) { + dhd_bus_pcie_pwr_req(bus); + } + si_corereg(bus->sih, bus->sih->buscoreidx, dhd_bus_db1_addr_get(bus), ~0, 0x12345678); + bus->hostready_count ++; + DHD_INFO_HW4(("%s: Ring Hostready:%d\n", __FUNCTION__, bus->hostready_count)); +} + +/* Clear INTSTATUS */ +void +dhdpcie_bus_clear_intstatus(struct dhd_bus *bus) +{ + uint32 intstatus = 0; + if ((bus->sih->buscorerev == 6) || (bus->sih->buscorerev == 4) || + (bus->sih->buscorerev == 2)) { + intstatus = dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 4); + dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 4, intstatus); + } else { + /* this is a PCIE core register..not a config register... */ + intstatus = si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_int, 0, 0); + si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_int, bus->def_intmask, + intstatus); + } +} + +int +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM +dhdpcie_bus_suspend(struct dhd_bus *bus, bool state, bool byint) +#else +dhdpcie_bus_suspend(struct dhd_bus *bus, bool state) +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ +{ + int timeleft; + int rc = 0; + unsigned long flags, flags_bus; +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM + int d3_read_retry = 0; + uint32 d2h_mb_data = 0; + uint32 zero = 0; +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + + printf("%s: state=%d\n", __FUNCTION__, state); + if (bus->dhd == NULL) { + DHD_ERROR(("%s: bus not inited\n", __FUNCTION__)); + return BCME_ERROR; + } + if (bus->dhd->prot == NULL) { + DHD_ERROR(("%s: prot is not inited\n", __FUNCTION__)); + return BCME_ERROR; + } + + if (dhd_query_bus_erros(bus->dhd)) { + return BCME_ERROR; + } + + DHD_GENERAL_LOCK(bus->dhd, flags); + if (!(bus->dhd->busstate == DHD_BUS_DATA || bus->dhd->busstate == DHD_BUS_SUSPEND)) { + DHD_ERROR(("%s: not in a readystate\n", __FUNCTION__)); + DHD_GENERAL_UNLOCK(bus->dhd, flags); + return BCME_ERROR; + } + DHD_GENERAL_UNLOCK(bus->dhd, flags); + if (bus->dhd->dongle_reset) { + DHD_ERROR(("Dongle is in reset state.\n")); + return -EIO; + } + + /* Check whether we are already in the requested state. + * state=TRUE means Suspend + * state=FALSE meanse Resume + */ + if (state == TRUE && bus->dhd->busstate == DHD_BUS_SUSPEND) { + DHD_ERROR(("Bus is already in SUSPEND state.\n")); + return BCME_OK; + } else if (state == FALSE && bus->dhd->busstate == DHD_BUS_DATA) { + DHD_ERROR(("Bus is already in RESUME state.\n")); + return BCME_OK; + } + + if (state) { + int idle_retry = 0; + int active; + + if (bus->is_linkdown) { + DHD_ERROR(("%s: PCIe link was down, state=%d\n", + __FUNCTION__, state)); + return BCME_ERROR; + } + + /* Suspend */ + DHD_ERROR(("%s: Entering suspend state\n", __FUNCTION__)); + + bus->dhd->dhd_watchdog_ms_backup = dhd_watchdog_ms; + if (bus->dhd->dhd_watchdog_ms_backup) { + DHD_ERROR(("%s: Disabling wdtick before going to suspend\n", + __FUNCTION__)); + dhd_os_wd_timer(bus->dhd, 0); + } + + DHD_GENERAL_LOCK(bus->dhd, flags); + if (DHD_BUS_BUSY_CHECK_IN_TX(bus->dhd)) { + DHD_ERROR(("Tx Request is not ended\n")); + bus->dhd->busstate = DHD_BUS_DATA; + DHD_GENERAL_UNLOCK(bus->dhd, flags); + return -EBUSY; + } + + bus->last_suspend_start_time = OSL_SYSUPTIME_US(); + + /* stop all interface network queue. */ + dhd_bus_stop_queue(bus); + DHD_GENERAL_UNLOCK(bus->dhd, flags); + +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM + if (byint) { + DHD_OS_WAKE_LOCK_WAIVE(bus->dhd); + /* Clear wait_for_d3_ack before sending D3_INFORM */ + bus->wait_for_d3_ack = 0; + dhdpcie_send_mb_data(bus, H2D_HOST_D3_INFORM); + + timeleft = dhd_os_d3ack_wait(bus->dhd, &bus->wait_for_d3_ack); + DHD_OS_WAKE_LOCK_RESTORE(bus->dhd); + } else { + /* Clear wait_for_d3_ack before sending D3_INFORM */ + bus->wait_for_d3_ack = 0; + dhdpcie_send_mb_data(bus, H2D_HOST_D3_INFORM | H2D_HOST_ACK_NOINT); + while (!bus->wait_for_d3_ack && d3_read_retry < MAX_D3_ACK_TIMEOUT) { + dhdpcie_handle_mb_data(bus); + usleep_range(1000, 1500); + d3_read_retry++; + } + } +#else + DHD_OS_WAKE_LOCK_WAIVE(bus->dhd); + + /* Clear wait_for_d3_ack before sending D3_INFORM */ + bus->wait_for_d3_ack = 0; + /* + * Send H2D_HOST_D3_INFORM to dongle and mark bus->bus_low_power_state + * to DHD_BUS_D3_INFORM_SENT in dhd_prot_ring_write_complete_mbdata + * inside atomic context, so that no more DBs will be + * rung after sending D3_INFORM + */ + dhdpcie_send_mb_data(bus, H2D_HOST_D3_INFORM); + + /* Wait for D3 ACK for D3_ACK_RESP_TIMEOUT seconds */ + + timeleft = dhd_os_d3ack_wait(bus->dhd, &bus->wait_for_d3_ack); + +#ifdef DHD_RECOVER_TIMEOUT + if (bus->wait_for_d3_ack == 0) { + /* If wait_for_d3_ack was not updated because D2H MB was not received */ + uint32 intstatus = si_corereg(bus->sih, bus->sih->buscoreidx, + bus->pcie_mailbox_int, 0, 0); + int host_irq_disabled = dhdpcie_irq_disabled(bus); + if ((intstatus) && (intstatus != (uint32)-1) && + (timeleft == 0) && (!dhd_query_bus_erros(bus->dhd))) { + DHD_ERROR(("%s: D3 ACK trying again intstatus=%x" + " host_irq_disabled=%d\n", + __FUNCTION__, intstatus, host_irq_disabled)); + dhd_pcie_intr_count_dump(bus->dhd); + dhd_print_tasklet_status(bus->dhd); + dhd_prot_process_ctrlbuf(bus->dhd); + timeleft = dhd_os_d3ack_wait(bus->dhd, &bus->wait_for_d3_ack); + /* Clear Interrupts */ + dhdpcie_bus_clear_intstatus(bus); + } + } /* bus->wait_for_d3_ack was 0 */ +#endif /* DHD_RECOVER_TIMEOUT */ + + DHD_OS_WAKE_LOCK_RESTORE(bus->dhd); +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + + /* To allow threads that got pre-empted to complete. + */ + while ((active = dhd_os_check_wakelock_all(bus->dhd)) && + (idle_retry < MAX_WKLK_IDLE_CHECK)) { + OSL_SLEEP(1); + idle_retry++; + } + + if (bus->wait_for_d3_ack) { + DHD_ERROR(("%s: Got D3 Ack \n", __FUNCTION__)); + + /* Got D3 Ack. Suspend the bus */ + if (active) { + DHD_ERROR(("%s():Suspend failed because of wakelock" + "restoring Dongle to D0\n", __FUNCTION__)); + + if (bus->dhd->dhd_watchdog_ms_backup) { + DHD_ERROR(("%s: Enabling wdtick due to wakelock active\n", + __FUNCTION__)); + dhd_os_wd_timer(bus->dhd, + bus->dhd->dhd_watchdog_ms_backup); + } + + /* + * Dongle still thinks that it has to be in D3 state until + * it gets a D0 Inform, but we are backing off from suspend. + * Ensure that Dongle is brought back to D0. + * + * Bringing back Dongle from D3 Ack state to D0 state is a + * 2 step process. Dongle would want to know that D0 Inform + * would be sent as a MB interrupt to bring it out of D3 Ack + * state to D0 state. So we have to send both this message. + */ + + /* Clear wait_for_d3_ack to send D0_INFORM or host_ready */ + bus->wait_for_d3_ack = 0; + + DHD_BUS_LOCK(bus->bus_lock, flags_bus); + bus->bus_low_power_state = DHD_BUS_NO_LOW_POWER_STATE; + /* Enable back the intmask which was cleared in DPC + * after getting D3_ACK. + */ + bus->resume_intr_enable_count++; + + /* For Linux, Macos etc (otherthan NDIS) enable back the dongle + * interrupts using intmask and host interrupts + * which were disabled in the dhdpcie_bus_isr()-> + * dhd_bus_handle_d3_ack(). + */ + /* Enable back interrupt using Intmask!! */ + dhdpcie_bus_intr_enable(bus); + /* Enable back interrupt from Host side!! */ + dhdpcie_enable_irq(bus); + + DHD_BUS_UNLOCK(bus->bus_lock, flags_bus); + + if (bus->use_d0_inform) { + DHD_OS_WAKE_LOCK_WAIVE(bus->dhd); + dhdpcie_send_mb_data(bus, + (H2D_HOST_D0_INFORM_IN_USE | H2D_HOST_D0_INFORM)); + DHD_OS_WAKE_LOCK_RESTORE(bus->dhd); + } + /* ring doorbell 1 (hostready) */ + dhd_bus_hostready(bus); + + DHD_GENERAL_LOCK(bus->dhd, flags); + bus->dhd->busstate = DHD_BUS_DATA; + /* resume all interface network queue. */ + dhd_bus_start_queue(bus); + DHD_GENERAL_UNLOCK(bus->dhd, flags); + rc = BCME_ERROR; + } else { + /* Actual Suspend after no wakelock */ + /* At this time bus->bus_low_power_state will be + * made to DHD_BUS_D3_ACK_RECIEVED after recieving D3_ACK + * in dhd_bus_handle_d3_ack() + */ + if (bus->use_d0_inform && + (bus->api.fw_rev < PCIE_SHARED_VERSION_6)) { + DHD_OS_WAKE_LOCK_WAIVE(bus->dhd); + dhdpcie_send_mb_data(bus, (H2D_HOST_D0_INFORM_IN_USE)); + DHD_OS_WAKE_LOCK_RESTORE(bus->dhd); + } + +#if defined(BCMPCIE_OOB_HOST_WAKE) + dhdpcie_oob_intr_set(bus, TRUE); +#endif /* BCMPCIE_OOB_HOST_WAKE */ + + DHD_GENERAL_LOCK(bus->dhd, flags); + /* The Host cannot process interrupts now so disable the same. + * No need to disable the dongle INTR using intmask, as we are + * already calling disabling INTRs from DPC context after + * getting D3_ACK in dhd_bus_handle_d3_ack. + * Code may not look symmetric between Suspend and + * Resume paths but this is done to close down the timing window + * between DPC and suspend context and bus->bus_low_power_state + * will be set to DHD_BUS_D3_ACK_RECIEVED in DPC. + */ + bus->dhd->d3ackcnt_timeout = 0; + bus->dhd->busstate = DHD_BUS_SUSPEND; + DHD_GENERAL_UNLOCK(bus->dhd, flags); + DHD_ERROR(("%s: BaseAddress0(0x%x)=0x%x, " + "BaseAddress1(0x%x)=0x%x\n", __FUNCTION__, + PCIECFGREG_BASEADDR0, + dhd_pcie_config_read(bus->osh, + PCIECFGREG_BASEADDR0, sizeof(uint32)), + PCIECFGREG_BASEADDR1, + dhd_pcie_config_read(bus->osh, + PCIECFGREG_BASEADDR1, sizeof(uint32)))); + dhdpcie_dump_resource(bus); + /* Handle Host Suspend */ + rc = dhdpcie_pci_suspend_resume(bus, state); + if (!rc) { + bus->last_suspend_end_time = OSL_SYSUPTIME_US(); + } + } + } else if (timeleft == 0) { /* D3 ACK Timeout */ + bus->dhd->d3ack_timeout_occured = TRUE; + /* If the D3 Ack has timeout */ + bus->dhd->d3ackcnt_timeout++; + DHD_ERROR(("%s: resumed on timeout for D3 ACK d3_inform_cnt %d \n", + __FUNCTION__, bus->dhd->d3ackcnt_timeout)); + DHD_BUS_LOCK(bus->bus_lock, flags_bus); + bus->bus_low_power_state = DHD_BUS_NO_LOW_POWER_STATE; + DHD_BUS_UNLOCK(bus->bus_lock, flags_bus); + DHD_GENERAL_LOCK(bus->dhd, flags); + bus->dhd->busstate = DHD_BUS_DATA; + /* resume all interface network queue. */ + dhd_bus_start_queue(bus); + DHD_GENERAL_UNLOCK(bus->dhd, flags); + if (!bus->dhd->dongle_trap_occured) { + uint32 intstatus = 0; + + /* Check if PCIe bus status is valid */ + intstatus = si_corereg(bus->sih, bus->sih->buscoreidx, + bus->pcie_mailbox_int, 0, 0); + if (intstatus == (uint32)-1) { + /* Invalidate PCIe bus status */ + bus->is_linkdown = 1; + } + + dhd_bus_dump_console_buffer(bus); + dhd_prot_debug_info_print(bus->dhd); +#ifdef DHD_FW_COREDUMP + if (bus->dhd->memdump_enabled) { + /* write core dump to file */ + bus->dhd->memdump_type = DUMP_TYPE_D3_ACK_TIMEOUT; + dhdpcie_mem_dump(bus); + } +#endif /* DHD_FW_COREDUMP */ + + DHD_ERROR(("%s: Event HANG send up due to D3_ACK timeout\n", + __FUNCTION__)); + dhd_os_check_hang(bus->dhd, 0, -ETIMEDOUT); + } +#if defined(DHD_ERPOM) + dhd_schedule_reset(bus->dhd); +#endif // endif + rc = -ETIMEDOUT; + } + } else { + /* Resume */ + DHD_ERROR(("%s: Entering resume state\n", __FUNCTION__)); + bus->last_resume_start_time = OSL_SYSUPTIME_US(); + + /** + * PCIE2_BAR0_CORE2_WIN gets reset after D3 cold. + * si_backplane_access(function to read/write backplane) + * updates the window(PCIE2_BAR0_CORE2_WIN) only if + * window being accessed is different form the window + * being pointed by second_bar0win. + * Since PCIE2_BAR0_CORE2_WIN is already reset because of D3 cold, + * invalidating second_bar0win after resume updates + * PCIE2_BAR0_CORE2_WIN with right window. + */ + si_invalidate_second_bar0win(bus->sih); +#if defined(BCMPCIE_OOB_HOST_WAKE) + DHD_OS_OOB_IRQ_WAKE_UNLOCK(bus->dhd); +#endif /* BCMPCIE_OOB_HOST_WAKE */ + rc = dhdpcie_pci_suspend_resume(bus, state); + DHD_ERROR(("%s: BaseAddress0(0x%x)=0x%x, BaseAddress1(0x%x)=0x%x\n", + __FUNCTION__, PCIECFGREG_BASEADDR0, + dhd_pcie_config_read(bus->osh, PCIECFGREG_BASEADDR0, sizeof(uint32)), + PCIECFGREG_BASEADDR1, + dhd_pcie_config_read(bus->osh, PCIECFGREG_BASEADDR1, sizeof(uint32)))); + dhdpcie_dump_resource(bus); + + DHD_BUS_LOCK(bus->bus_lock, flags_bus); + /* set bus_low_power_state to DHD_BUS_NO_LOW_POWER_STATE */ + bus->bus_low_power_state = DHD_BUS_NO_LOW_POWER_STATE; + DHD_BUS_UNLOCK(bus->bus_lock, flags_bus); + + if (!rc && bus->dhd->busstate == DHD_BUS_SUSPEND) { + if (bus->use_d0_inform) { + DHD_OS_WAKE_LOCK_WAIVE(bus->dhd); + dhdpcie_send_mb_data(bus, (H2D_HOST_D0_INFORM)); + DHD_OS_WAKE_LOCK_RESTORE(bus->dhd); + } + /* ring doorbell 1 (hostready) */ + dhd_bus_hostready(bus); + } + DHD_GENERAL_LOCK(bus->dhd, flags); + bus->dhd->busstate = DHD_BUS_DATA; + /* resume all interface network queue. */ + dhd_bus_start_queue(bus); + + /* TODO: for NDIS also we need to use enable_irq in future */ + bus->resume_intr_enable_count++; + + /* For Linux, Macos etc (otherthan NDIS) enable back the dongle interrupts + * using intmask and host interrupts + * which were disabled in the dhdpcie_bus_isr()->dhd_bus_handle_d3_ack(). + */ + dhdpcie_bus_intr_enable(bus); /* Enable back interrupt using Intmask!! */ + dhdpcie_enable_irq(bus); /* Enable back interrupt from Host side!! */ + + DHD_GENERAL_UNLOCK(bus->dhd, flags); + + if (bus->dhd->dhd_watchdog_ms_backup) { + DHD_ERROR(("%s: Enabling wdtick after resume\n", + __FUNCTION__)); + dhd_os_wd_timer(bus->dhd, bus->dhd->dhd_watchdog_ms_backup); + } + + bus->last_resume_end_time = OSL_SYSUPTIME_US(); + /* pro-actively update TCM rd index for EDL ring */ + DHD_EDL_RING_TCM_RD_UPDATE(bus->dhd); + } + return rc; +} + +uint32 +dhdpcie_force_alp(struct dhd_bus *bus, bool enable) +{ + ASSERT(bus && bus->sih); + if (enable) { + si_corereg(bus->sih, bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, u.pcie2.clk_ctl_st), CCS_FORCEALP, CCS_FORCEALP); + } else { + si_corereg(bus->sih, bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, u.pcie2.clk_ctl_st), CCS_FORCEALP, 0); + } + return 0; +} + +/* set pcie l1 entry time: dhd pciereg 0x1004[22:16] */ +uint32 +dhdpcie_set_l1_entry_time(struct dhd_bus *bus, int l1_entry_time) +{ + uint reg_val; + + ASSERT(bus && bus->sih); + + si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, configaddr), ~0, + 0x1004); + reg_val = si_corereg(bus->sih, bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, configdata), 0, 0); + reg_val = (reg_val & ~(0x7f << 16)) | ((l1_entry_time & 0x7f) << 16); + si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, configdata), ~0, + reg_val); + + return 0; +} + +static uint32 +dhd_apply_d11_war_length(struct dhd_bus *bus, uint32 len, uint32 d11_lpbk) +{ + uint16 chipid = si_chipid(bus->sih); + if ((chipid == BCM4375_CHIP_ID || + chipid == BCM4362_CHIP_ID || + chipid == BCM43751_CHIP_ID || + chipid == BCM4377_CHIP_ID) && + (d11_lpbk != M2M_DMA_LPBK && d11_lpbk != M2M_NON_DMA_LPBK)) { + len += 8; + } + DHD_ERROR(("%s: len %d\n", __FUNCTION__, len)); + return len; +} + +/** Transfers bytes from host to dongle and to host again using DMA */ +static int +dhdpcie_bus_dmaxfer_req(struct dhd_bus *bus, + uint32 len, uint32 srcdelay, uint32 destdelay, + uint32 d11_lpbk, uint32 core_num, uint32 wait) +{ + int ret = 0; + + if (bus->dhd == NULL) { + DHD_ERROR(("%s: bus not inited\n", __FUNCTION__)); + return BCME_ERROR; + } + if (bus->dhd->prot == NULL) { + DHD_ERROR(("%s: prot is not inited\n", __FUNCTION__)); + return BCME_ERROR; + } + if (bus->dhd->busstate != DHD_BUS_DATA) { + DHD_ERROR(("%s: not in a readystate to LPBK is not inited\n", __FUNCTION__)); + return BCME_ERROR; + } + + if (len < 5 || len > 4194296) { + DHD_ERROR(("%s: len is too small or too large\n", __FUNCTION__)); + return BCME_ERROR; + } + + len = dhd_apply_d11_war_length(bus, len, d11_lpbk); + + bus->dmaxfer_complete = FALSE; + ret = dhdmsgbuf_dmaxfer_req(bus->dhd, len, srcdelay, destdelay, + d11_lpbk, core_num); + if (ret != BCME_OK || !wait) { + DHD_INFO(("%s: dmaxfer req returns status %u; wait = %u\n", __FUNCTION__, + ret, wait)); + } else { + ret = dhd_os_dmaxfer_wait(bus->dhd, &bus->dmaxfer_complete); + if (ret < 0) + ret = BCME_NOTREADY; + } + + return ret; + +} + +static int +dhdpcie_bus_download_state(dhd_bus_t *bus, bool enter) +{ + int bcmerror = 0; + volatile uint32 *cr4_regs; + + if (!bus->sih) { + DHD_ERROR(("%s: NULL sih!!\n", __FUNCTION__)); + return BCME_ERROR; + } + /* To enter download state, disable ARM and reset SOCRAM. + * To exit download state, simply reset ARM (default is RAM boot). + */ + if (enter) { + /* Make sure BAR1 maps to backplane address 0 */ + dhdpcie_bus_cfg_write_dword(bus, PCI_BAR1_WIN, 4, 0x00000000); + bus->alp_only = TRUE; + + /* some chips (e.g. 43602) have two ARM cores, the CR4 is receives the firmware. */ + cr4_regs = si_setcore(bus->sih, ARMCR4_CORE_ID, 0); + + if (cr4_regs == NULL && !(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) && + !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0)) && + !(si_setcore(bus->sih, ARMCA7_CORE_ID, 0))) { + DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + + if (si_setcore(bus->sih, ARMCA7_CORE_ID, 0)) { + /* Halt ARM & remove reset */ + si_core_reset(bus->sih, SICF_CPUHALT, SICF_CPUHALT); + if (!(si_setcore(bus->sih, SYSMEM_CORE_ID, 0))) { + DHD_ERROR(("%s: Failed to find SYSMEM core!\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + si_core_reset(bus->sih, 0, 0); + /* reset last 4 bytes of RAM address. to be used for shared area */ + dhdpcie_init_shared_addr(bus); + } else if (cr4_regs == NULL) { /* no CR4 present on chip */ + si_core_disable(bus->sih, 0); + + if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) { + DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + + si_core_reset(bus->sih, 0, 0); + + /* Clear the top bit of memory */ + if (bus->ramsize) { + uint32 zeros = 0; + if (dhdpcie_bus_membytes(bus, TRUE, bus->ramsize - 4, + (uint8*)&zeros, 4) < 0) { + bcmerror = BCME_ERROR; + goto fail; + } + } + } else { + /* For CR4, + * Halt ARM + * Remove ARM reset + * Read RAM base address [0x18_0000] + * [next] Download firmware + * [done at else] Populate the reset vector + * [done at else] Remove ARM halt + */ + /* Halt ARM & remove reset */ + si_core_reset(bus->sih, SICF_CPUHALT, SICF_CPUHALT); + if (BCM43602_CHIP(bus->sih->chip)) { + W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKIDX, 5); + W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKPDA, 0); + W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKIDX, 7); + W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKPDA, 0); + } + /* reset last 4 bytes of RAM address. to be used for shared area */ + dhdpcie_init_shared_addr(bus); + } + } else { + if (si_setcore(bus->sih, ARMCA7_CORE_ID, 0)) { + /* write vars */ + if ((bcmerror = dhdpcie_bus_write_vars(bus))) { + DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__)); + goto fail; + } + /* write random numbers to sysmem for the purpose of + * randomizing heap address space. + */ + if ((bcmerror = dhdpcie_wrt_rnd(bus)) != BCME_OK) { + DHD_ERROR(("%s: Failed to get random seed to write to TCM !\n", + __FUNCTION__)); + goto fail; + } + /* switch back to arm core again */ + if (!(si_setcore(bus->sih, ARMCA7_CORE_ID, 0))) { + DHD_ERROR(("%s: Failed to find ARM CA7 core!\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + /* write address 0 with reset instruction */ + bcmerror = dhdpcie_bus_membytes(bus, TRUE, 0, + (uint8 *)&bus->resetinstr, sizeof(bus->resetinstr)); + /* now remove reset and halt and continue to run CA7 */ + } else if (!si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) { + if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) { + DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + + if (!si_iscoreup(bus->sih)) { + DHD_ERROR(("%s: SOCRAM core is down after reset?\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + + /* Enable remap before ARM reset but after vars. + * No backplane access in remap mode + */ + if (!si_setcore(bus->sih, PCMCIA_CORE_ID, 0) && + !si_setcore(bus->sih, SDIOD_CORE_ID, 0)) { + DHD_ERROR(("%s: Can't change back to SDIO core?\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + + if (!(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) && + !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) { + DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + } else { + if (BCM43602_CHIP(bus->sih->chip)) { + /* Firmware crashes on SOCSRAM access when core is in reset */ + if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) { + DHD_ERROR(("%s: Failed to find SOCRAM core!\n", + __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + si_core_reset(bus->sih, 0, 0); + si_setcore(bus->sih, ARMCR4_CORE_ID, 0); + } + + /* write vars */ + if ((bcmerror = dhdpcie_bus_write_vars(bus))) { + DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__)); + goto fail; + } + + /* write a random number to TCM for the purpose of + * randomizing heap address space. + */ + if ((bcmerror = dhdpcie_wrt_rnd(bus)) != BCME_OK) { + DHD_ERROR(("%s: Failed to get random seed to write to TCM !\n", + __FUNCTION__)); + goto fail; + } + + /* switch back to arm core again */ + if (!(si_setcore(bus->sih, ARMCR4_CORE_ID, 0))) { + DHD_ERROR(("%s: Failed to find ARM CR4 core!\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + + /* write address 0 with reset instruction */ + bcmerror = dhdpcie_bus_membytes(bus, TRUE, 0, + (uint8 *)&bus->resetinstr, sizeof(bus->resetinstr)); + + if (bcmerror == BCME_OK) { + uint32 tmp; + + bcmerror = dhdpcie_bus_membytes(bus, FALSE, 0, + (uint8 *)&tmp, sizeof(tmp)); + + if (bcmerror == BCME_OK && tmp != bus->resetinstr) { + DHD_ERROR(("%s: Failed to write 0x%08x to addr 0\n", + __FUNCTION__, bus->resetinstr)); + DHD_ERROR(("%s: contents of addr 0 is 0x%08x\n", + __FUNCTION__, tmp)); + bcmerror = BCME_ERROR; + goto fail; + } + } + + /* now remove reset and halt and continue to run CR4 */ + } + + si_core_reset(bus->sih, 0, 0); + + /* Allow HT Clock now that the ARM is running. */ + bus->alp_only = FALSE; + + bus->dhd->busstate = DHD_BUS_LOAD; + } + +fail: + /* Always return to PCIE core */ + si_setcore(bus->sih, PCIE2_CORE_ID, 0); + + return bcmerror; +} /* dhdpcie_bus_download_state */ + +static int +dhdpcie_bus_write_vars(dhd_bus_t *bus) +{ + int bcmerror = 0; + uint32 varsize, phys_size; + uint32 varaddr; + uint8 *vbuffer; + uint32 varsizew; +#ifdef DHD_DEBUG + uint8 *nvram_ularray; +#endif /* DHD_DEBUG */ + + /* Even if there are no vars are to be written, we still need to set the ramsize. */ + varsize = bus->varsz ? ROUNDUP(bus->varsz, 4) : 0; + varaddr = (bus->ramsize - 4) - varsize; + + varaddr += bus->dongle_ram_base; + + if (bus->vars) { + + vbuffer = (uint8 *)MALLOC(bus->dhd->osh, varsize); + if (!vbuffer) + return BCME_NOMEM; + + bzero(vbuffer, varsize); + bcopy(bus->vars, vbuffer, bus->varsz); + /* Write the vars list */ + bcmerror = dhdpcie_bus_membytes(bus, TRUE, varaddr, vbuffer, varsize); + + /* Implement read back and verify later */ +#ifdef DHD_DEBUG + /* Verify NVRAM bytes */ + DHD_INFO(("%s: Compare NVRAM dl & ul; varsize=%d\n", __FUNCTION__, varsize)); + nvram_ularray = (uint8*)MALLOC(bus->dhd->osh, varsize); + if (!nvram_ularray) { + MFREE(bus->dhd->osh, vbuffer, varsize); + return BCME_NOMEM; + } + + /* Upload image to verify downloaded contents. */ + memset(nvram_ularray, 0xaa, varsize); + + /* Read the vars list to temp buffer for comparison */ + bcmerror = dhdpcie_bus_membytes(bus, FALSE, varaddr, nvram_ularray, varsize); + if (bcmerror) { + DHD_ERROR(("%s: error %d on reading %d nvram bytes at 0x%08x\n", + __FUNCTION__, bcmerror, varsize, varaddr)); + } + + /* Compare the org NVRAM with the one read from RAM */ + if (memcmp(vbuffer, nvram_ularray, varsize)) { + DHD_ERROR(("%s: Downloaded NVRAM image is corrupted.\n", __FUNCTION__)); + } else + DHD_ERROR(("%s: Download, Upload and compare of NVRAM succeeded.\n", + __FUNCTION__)); + + MFREE(bus->dhd->osh, nvram_ularray, varsize); +#endif /* DHD_DEBUG */ + + MFREE(bus->dhd->osh, vbuffer, varsize); + } + + phys_size = REMAP_ENAB(bus) ? bus->ramsize : bus->orig_ramsize; + + phys_size += bus->dongle_ram_base; + + /* adjust to the user specified RAM */ + DHD_INFO(("%s: Physical memory size: %d, usable memory size: %d\n", __FUNCTION__, + phys_size, bus->ramsize)); + DHD_INFO(("%s: Vars are at %d, orig varsize is %d\n", __FUNCTION__, + varaddr, varsize)); + varsize = ((phys_size - 4) - varaddr); + + /* + * Determine the length token: + * Varsize, converted to words, in lower 16-bits, checksum in upper 16-bits. + */ + if (bcmerror) { + varsizew = 0; + bus->nvram_csm = varsizew; + } else { + varsizew = varsize / 4; + varsizew = (~varsizew << 16) | (varsizew & 0x0000FFFF); + bus->nvram_csm = varsizew; + varsizew = htol32(varsizew); + } + + DHD_INFO(("%s: New varsize is %d, length token=0x%08x\n", __FUNCTION__, varsize, varsizew)); + + /* Write the length token to the last word */ + bcmerror = dhdpcie_bus_membytes(bus, TRUE, (phys_size - 4), + (uint8*)&varsizew, 4); + + return bcmerror; +} /* dhdpcie_bus_write_vars */ + +int +dhdpcie_downloadvars(dhd_bus_t *bus, void *arg, int len) +{ + int bcmerror = BCME_OK; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + /* Basic sanity checks */ + if (bus->dhd->up) { + bcmerror = BCME_NOTDOWN; + goto err; + } + if (!len) { + bcmerror = BCME_BUFTOOSHORT; + goto err; + } + + /* Free the old ones and replace with passed variables */ + if (bus->vars) + MFREE(bus->dhd->osh, bus->vars, bus->varsz); + + bus->vars = MALLOC(bus->dhd->osh, len); + bus->varsz = bus->vars ? len : 0; + if (bus->vars == NULL) { + bcmerror = BCME_NOMEM; + goto err; + } + + /* Copy the passed variables, which should include the terminating double-null */ + bcopy(arg, bus->vars, bus->varsz); + +#ifdef DHD_USE_SINGLE_NVRAM_FILE + if (dhd_bus_get_fw_mode(bus->dhd) == DHD_FLAG_MFG_MODE) { + char *sp = NULL; + char *ep = NULL; + int i; + char tag[2][8] = {"ccode=", "regrev="}; + + /* Find ccode and regrev info */ + for (i = 0; i < 2; i++) { + sp = strnstr(bus->vars, tag[i], bus->varsz); + if (!sp) { + DHD_ERROR(("%s: Could not find ccode info from the nvram %s\n", + __FUNCTION__, bus->nv_path)); + bcmerror = BCME_ERROR; + goto err; + } + sp = strchr(sp, '='); + ep = strchr(sp, '\0'); + /* We assumed that string length of both ccode and + * regrev values should not exceed WLC_CNTRY_BUF_SZ + */ + if (ep && ((ep - sp) <= WLC_CNTRY_BUF_SZ)) { + sp++; + while (*sp != '\0') { + DHD_INFO(("%s: parse '%s', current sp = '%c'\n", + __FUNCTION__, tag[i], *sp)); + *sp++ = '0'; + } + } else { + DHD_ERROR(("%s: Invalid parameter format when parsing for %s\n", + __FUNCTION__, tag[i])); + bcmerror = BCME_ERROR; + goto err; + } + } + } +#endif /* DHD_USE_SINGLE_NVRAM_FILE */ + +err: + return bcmerror; +} + +/* loop through the capability list and see if the pcie capabilty exists */ +uint8 +dhdpcie_find_pci_capability(osl_t *osh, uint8 req_cap_id) +{ + uint8 cap_id; + uint8 cap_ptr = 0; + uint8 byte_val; + + /* check for Header type 0 */ + byte_val = read_pci_cfg_byte(PCI_CFG_HDR); + if ((byte_val & 0x7f) != PCI_HEADER_NORMAL) { + DHD_ERROR(("%s : PCI config header not normal.\n", __FUNCTION__)); + goto end; + } + + /* check if the capability pointer field exists */ + byte_val = read_pci_cfg_byte(PCI_CFG_STAT); + if (!(byte_val & PCI_CAPPTR_PRESENT)) { + DHD_ERROR(("%s : PCI CAP pointer not present.\n", __FUNCTION__)); + goto end; + } + + cap_ptr = read_pci_cfg_byte(PCI_CFG_CAPPTR); + /* check if the capability pointer is 0x00 */ + if (cap_ptr == 0x00) { + DHD_ERROR(("%s : PCI CAP pointer is 0x00.\n", __FUNCTION__)); + goto end; + } + + /* loop thr'u the capability list and see if the pcie capabilty exists */ + + cap_id = read_pci_cfg_byte(cap_ptr); + + while (cap_id != req_cap_id) { + cap_ptr = read_pci_cfg_byte((cap_ptr + 1)); + if (cap_ptr == 0x00) break; + cap_id = read_pci_cfg_byte(cap_ptr); + } + +end: + return cap_ptr; +} + +void +dhdpcie_pme_active(osl_t *osh, bool enable) +{ + uint8 cap_ptr; + uint32 pme_csr; + + cap_ptr = dhdpcie_find_pci_capability(osh, PCI_CAP_POWERMGMTCAP_ID); + + if (!cap_ptr) { + DHD_ERROR(("%s : Power Management Capability not present\n", __FUNCTION__)); + return; + } + + pme_csr = OSL_PCI_READ_CONFIG(osh, cap_ptr + PME_CSR_OFFSET, sizeof(uint32)); + DHD_ERROR(("%s : pme_sts_ctrl 0x%x\n", __FUNCTION__, pme_csr)); + + pme_csr |= PME_CSR_PME_STAT; + if (enable) { + pme_csr |= PME_CSR_PME_EN; + } else { + pme_csr &= ~PME_CSR_PME_EN; + } + + OSL_PCI_WRITE_CONFIG(osh, cap_ptr + PME_CSR_OFFSET, sizeof(uint32), pme_csr); +} + +bool +dhdpcie_pme_cap(osl_t *osh) +{ + uint8 cap_ptr; + uint32 pme_cap; + + cap_ptr = dhdpcie_find_pci_capability(osh, PCI_CAP_POWERMGMTCAP_ID); + + if (!cap_ptr) { + DHD_ERROR(("%s : Power Management Capability not present\n", __FUNCTION__)); + return FALSE; + } + + pme_cap = OSL_PCI_READ_CONFIG(osh, cap_ptr, sizeof(uint32)); + + DHD_ERROR(("%s : pme_cap 0x%x\n", __FUNCTION__, pme_cap)); + + return ((pme_cap & PME_CAP_PM_STATES) != 0); +} + +uint32 +dhdpcie_lcreg(osl_t *osh, uint32 mask, uint32 val) +{ + + uint8 pcie_cap; + uint8 lcreg_offset; /* PCIE capability LCreg offset in the config space */ + uint32 reg_val; + + pcie_cap = dhdpcie_find_pci_capability(osh, PCI_CAP_PCIECAP_ID); + + if (!pcie_cap) { + DHD_ERROR(("%s : PCIe Capability not present\n", __FUNCTION__)); + return 0; + } + + lcreg_offset = pcie_cap + PCIE_CAP_LINKCTRL_OFFSET; + + /* set operation */ + if (mask) { + /* read */ + reg_val = OSL_PCI_READ_CONFIG(osh, lcreg_offset, sizeof(uint32)); + + /* modify */ + reg_val &= ~mask; + reg_val |= (mask & val); + + /* write */ + OSL_PCI_WRITE_CONFIG(osh, lcreg_offset, sizeof(uint32), reg_val); + } + return OSL_PCI_READ_CONFIG(osh, lcreg_offset, sizeof(uint32)); +} + +uint8 +dhdpcie_clkreq(osl_t *osh, uint32 mask, uint32 val) +{ + uint8 pcie_cap; + uint32 reg_val; + uint8 lcreg_offset; /* PCIE capability LCreg offset in the config space */ + + pcie_cap = dhdpcie_find_pci_capability(osh, PCI_CAP_PCIECAP_ID); + + if (!pcie_cap) { + DHD_ERROR(("%s : PCIe Capability not present\n", __FUNCTION__)); + return 0; + } + + lcreg_offset = pcie_cap + PCIE_CAP_LINKCTRL_OFFSET; + + reg_val = OSL_PCI_READ_CONFIG(osh, lcreg_offset, sizeof(uint32)); + /* set operation */ + if (mask) { + if (val) + reg_val |= PCIE_CLKREQ_ENAB; + else + reg_val &= ~PCIE_CLKREQ_ENAB; + OSL_PCI_WRITE_CONFIG(osh, lcreg_offset, sizeof(uint32), reg_val); + reg_val = OSL_PCI_READ_CONFIG(osh, lcreg_offset, sizeof(uint32)); + } + if (reg_val & PCIE_CLKREQ_ENAB) + return 1; + else + return 0; +} + +void dhd_dump_intr_counters(dhd_pub_t *dhd, struct bcmstrbuf *strbuf) +{ + bcm_bprintf(strbuf, "\n ------- DUMPING INTR enable/disable counters-------\n"); + bcm_bprintf(strbuf, "resume_intr_enable_count=%lu dpc_intr_enable_count=%lu\n" + "isr_intr_disable_count=%lu suspend_intr_disable_count=%lu\n" + "dpc_return_busdown_count=%lu\n", + dhd->bus->resume_intr_enable_count, dhd->bus->dpc_intr_enable_count, + dhd->bus->isr_intr_disable_count, dhd->bus->suspend_intr_disable_count, + dhd->bus->dpc_return_busdown_count); +#ifdef BCMPCIE_OOB_HOST_WAKE + bcm_bprintf(strbuf, "oob_intr_count=%lu oob_intr_enable_count=%lu" + " oob_intr_disable_count=%lu\n oob_irq_num=%d last_oob_irq_time=%llu\n", + dhd->bus->oob_intr_count, dhd->bus->oob_intr_enable_count, + dhd->bus->oob_intr_disable_count, dhdpcie_get_oob_irq_num(dhd->bus), + dhd->bus->last_oob_irq_time); +#endif /* BCMPCIE_OOB_HOST_WAKE */ + bcm_bprintf(strbuf, "\ncurrent_time=%llu isr_entry_time=%llu isr_exit_time=%llu\n" + "dpc_entry_time=%llu last_process_ctrlbuf_time=%llu " + "last_process_flowring_time=%llu last_process_txcpl_time=%llu\n" + "last_process_rxcpl_time=%llu last_process_infocpl_time=%llu " + "dpc_exit_time=%llu resched_dpc_time=%llu\n", + OSL_SYSUPTIME_US(), dhd->bus->isr_entry_time, dhd->bus->isr_exit_time, + dhd->bus->dpc_entry_time, dhd->bus->last_process_ctrlbuf_time, + dhd->bus->last_process_flowring_time, dhd->bus->last_process_txcpl_time, + dhd->bus->last_process_rxcpl_time, dhd->bus->last_process_infocpl_time, + dhd->bus->dpc_exit_time, dhd->bus->resched_dpc_time); + + bcm_bprintf(strbuf, "\nlast_suspend_start_time=%llu last_suspend_end_time=%llu" + " last_resume_start_time=%llu last_resume_end_time=%llu\n", + dhd->bus->last_suspend_start_time, dhd->bus->last_suspend_end_time, + dhd->bus->last_resume_start_time, dhd->bus->last_resume_end_time); + +#if defined(SHOW_LOGTRACE) && defined(DHD_USE_KTHREAD_FOR_LOGTRACE) + bcm_bprintf(strbuf, "logtrace_thread_entry_time=%llu " + "logtrace_thread_sem_down_time=%llu " + "logtrace_thread_flush_time=%llu " + "logtrace_thread_unexpected_break_time=%llu " + " logtrace_thread_complete_time=%llu\n", + dhd->logtrace_thr_ts.entry_time, dhd->logtrace_thr_ts.sem_down_time, + dhd->logtrace_thr_ts.flush_time, dhd->logtrace_thr_ts.unexpected_break_time, + dhd->logtrace_thr_ts.complete_time); +#endif /* SHOW_LOGTRACE && DHD_USE_KTHREAD_FOR_LOGTRACE */ +} + +void dhd_dump_intr_registers(dhd_pub_t *dhd, struct bcmstrbuf *strbuf) +{ + uint32 intstatus = 0; + uint32 intmask = 0; + uint32 d2h_db0 = 0; + uint32 d2h_mb_data = 0; + + intstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + dhd->bus->pcie_mailbox_int, 0, 0); + intmask = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + dhd->bus->pcie_mailbox_mask, 0, 0); + d2h_db0 = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCID2H_MailBox, 0, 0); + dhd_bus_cmn_readshared(dhd->bus, &d2h_mb_data, D2H_MB_DATA, 0); + + bcm_bprintf(strbuf, "intstatus=0x%x intmask=0x%x d2h_db0=0x%x\n", + intstatus, intmask, d2h_db0); + bcm_bprintf(strbuf, "d2h_mb_data=0x%x def_intmask=0x%x\n", + d2h_mb_data, dhd->bus->def_intmask); +} +/** Add bus dump output to a buffer */ +void dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf) +{ + uint16 flowid; + int ix = 0; + flow_ring_node_t *flow_ring_node; + flow_info_t *flow_info; +#ifdef TX_STATUS_LATENCY_STATS + uint8 ifindex; + if_flow_lkup_t *if_flow_lkup; + dhd_if_tx_status_latency_t if_tx_status_latency[DHD_MAX_IFS]; +#endif /* TX_STATUS_LATENCY_STATS */ + + if (dhdp->busstate != DHD_BUS_DATA) + return; + +#ifdef TX_STATUS_LATENCY_STATS + memset(if_tx_status_latency, 0, sizeof(if_tx_status_latency)); +#endif /* TX_STATUS_LATENCY_STATS */ +#ifdef DHD_WAKE_STATUS + bcm_bprintf(strbuf, "wake %u rxwake %u readctrlwake %u\n", + bcmpcie_get_total_wake(dhdp->bus), dhdp->bus->wake_counts.rxwake, + dhdp->bus->wake_counts.rcwake); +#ifdef DHD_WAKE_RX_STATUS + bcm_bprintf(strbuf, " unicast %u muticast %u broadcast %u arp %u\n", + dhdp->bus->wake_counts.rx_ucast, dhdp->bus->wake_counts.rx_mcast, + dhdp->bus->wake_counts.rx_bcast, dhdp->bus->wake_counts.rx_arp); + bcm_bprintf(strbuf, " multi4 %u multi6 %u icmp6 %u multiother %u\n", + dhdp->bus->wake_counts.rx_multi_ipv4, dhdp->bus->wake_counts.rx_multi_ipv6, + dhdp->bus->wake_counts.rx_icmpv6, dhdp->bus->wake_counts.rx_multi_other); + bcm_bprintf(strbuf, " icmp6_ra %u, icmp6_na %u, icmp6_ns %u\n", + dhdp->bus->wake_counts.rx_icmpv6_ra, dhdp->bus->wake_counts.rx_icmpv6_na, + dhdp->bus->wake_counts.rx_icmpv6_ns); +#endif /* DHD_WAKE_RX_STATUS */ +#ifdef DHD_WAKE_EVENT_STATUS + for (flowid = 0; flowid < WLC_E_LAST; flowid++) + if (dhdp->bus->wake_counts.rc_event[flowid] != 0) + bcm_bprintf(strbuf, " %s = %u\n", bcmevent_get_name(flowid), + dhdp->bus->wake_counts.rc_event[flowid]); + bcm_bprintf(strbuf, "\n"); +#endif /* DHD_WAKE_EVENT_STATUS */ +#endif /* DHD_WAKE_STATUS */ + + dhd_prot_print_info(dhdp, strbuf); + dhd_dump_intr_registers(dhdp, strbuf); + dhd_dump_intr_counters(dhdp, strbuf); + bcm_bprintf(strbuf, "h2d_mb_data_ptr_addr 0x%x, d2h_mb_data_ptr_addr 0x%x\n", + dhdp->bus->h2d_mb_data_ptr_addr, dhdp->bus->d2h_mb_data_ptr_addr); + bcm_bprintf(strbuf, "dhd cumm_ctr %d\n", DHD_CUMM_CTR_READ(&dhdp->cumm_ctr)); + bcm_bprintf(strbuf, + "%4s %4s %2s %4s %17s %4s %4s %6s %10s %4s %4s ", + "Num:", "Flow", "If", "Prio", ":Dest_MacAddress:", "Qlen", "CLen", "L2CLen", + " Overflows", " RD", " WR"); + +#ifdef TX_STATUS_LATENCY_STATS + /* Average Tx status/Completion Latency in micro secs */ + bcm_bprintf(strbuf, "%16s %16s ", " NumTxPkts", " AvgTxCmpL_Us"); +#endif /* TX_STATUS_LATENCY_STATS */ + + bcm_bprintf(strbuf, "\n"); + + for (flowid = 0; flowid < dhdp->num_flow_rings; flowid++) { + flow_ring_node = DHD_FLOW_RING(dhdp, flowid); + if (!flow_ring_node->active) + continue; + + flow_info = &flow_ring_node->flow_info; + bcm_bprintf(strbuf, + "%4d %4d %2d %4d "MACDBG" %4d %4d %6d %10u ", ix++, + flow_ring_node->flowid, flow_info->ifindex, flow_info->tid, + MAC2STRDBG(flow_info->da), + DHD_FLOW_QUEUE_LEN(&flow_ring_node->queue), + DHD_CUMM_CTR_READ(DHD_FLOW_QUEUE_CLEN_PTR(&flow_ring_node->queue)), + DHD_CUMM_CTR_READ(DHD_FLOW_QUEUE_L2CLEN_PTR(&flow_ring_node->queue)), + DHD_FLOW_QUEUE_FAILURES(&flow_ring_node->queue)); + dhd_prot_print_flow_ring(dhdp, flow_ring_node->prot_info, strbuf, + "%4d %4d "); + +#ifdef TX_STATUS_LATENCY_STATS + bcm_bprintf(strbuf, "%16d %16d ", + flow_info->num_tx_pkts, + flow_info->num_tx_status ? + DIV_U64_BY_U64(flow_info->cum_tx_status_latency, + flow_info->num_tx_status) : 0); + + ifindex = flow_info->ifindex; + ASSERT(ifindex < DHD_MAX_IFS); + if (ifindex < DHD_MAX_IFS) { + if_tx_status_latency[ifindex].num_tx_status += flow_info->num_tx_status; + if_tx_status_latency[ifindex].cum_tx_status_latency += + flow_info->cum_tx_status_latency; + } else { + DHD_ERROR(("%s: Bad IF index: %d associated with flowid: %d\n", + __FUNCTION__, ifindex, flowid)); + } +#endif /* TX_STATUS_LATENCY_STATS */ + bcm_bprintf(strbuf, "\n"); + } + + bcm_bprintf(strbuf, "\n"); + ix = 0; + bcm_bprintf(strbuf, "%4s %4s %2s %10s %7s %6s %5s %5s %10s %7s %7s %7s \n", + "Num:", "Flow", "If", " ACKED", "D11SPRS", "WLSPRS", "TSDWL", + "NOACK", "SPRS_ACKED", "EXPIRED", "DROPPED", "FWFREED"); + for (flowid = 0; flowid < dhdp->num_flow_rings; flowid++) { + flow_ring_node = DHD_FLOW_RING(dhdp, flowid); + if (!flow_ring_node->active) + continue; + + flow_info = &flow_ring_node->flow_info; + bcm_bprintf(strbuf, "%4d %4d %2d ", + ix++, flow_ring_node->flowid, flow_info->ifindex); + bcm_bprintf(strbuf, + "%5d %7d %6d %5d %5d %10d %7d %7d %7d\n", + "NA", "NA", "NA", "NA", "NA", "NA", "NA", "NA", "NA"); + } + +#ifdef TX_STATUS_LATENCY_STATS + bcm_bprintf(strbuf, "%s %16s %16s\n", "If", "AvgTxCmpL_Us", "NumTxStatus"); + if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup; + for (ix = 0; ix < DHD_MAX_IFS; ix++) { + if (!if_flow_lkup[ix].status) { + continue; + } + bcm_bprintf(strbuf, "%2d %16d %16d\n", + ix, + if_tx_status_latency[ix].num_tx_status ? + DIV_U64_BY_U64(if_tx_status_latency[ix].cum_tx_status_latency, + if_tx_status_latency[ix].num_tx_status): 0, + if_tx_status_latency[ix].num_tx_status); + } +#endif /* TX_STATUS_LATENCY_STATS */ + bcm_bprintf(strbuf, "D3 inform cnt %d\n", dhdp->bus->d3_inform_cnt); + bcm_bprintf(strbuf, "D0 inform cnt %d\n", dhdp->bus->d0_inform_cnt); + bcm_bprintf(strbuf, "D0 inform in use cnt %d\n", dhdp->bus->d0_inform_in_use_cnt); + if (dhdp->d2h_hostrdy_supported) { + bcm_bprintf(strbuf, "hostready count:%d\n", dhdp->bus->hostready_count); + } + bcm_bprintf(strbuf, "d2h_intr_method -> %s\n", + dhdp->bus->d2h_intr_method ? "PCIE_MSI" : "PCIE_INTX"); +} + +/** + * Brings transmit packets on all flow rings closer to the dongle, by moving (a subset) from their + * flow queue to their flow ring. + */ +static void +dhd_update_txflowrings(dhd_pub_t *dhd) +{ + unsigned long flags; + dll_t *item, *next; + flow_ring_node_t *flow_ring_node; + struct dhd_bus *bus = dhd->bus; + + /* Hold flowring_list_lock to ensure no race condition while accessing the List */ + DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags); + for (item = dll_head_p(&bus->flowring_active_list); + (!dhd_is_device_removed(dhd) && !dll_end(&bus->flowring_active_list, item)); + item = next) { + if (dhd->hang_was_sent) { + break; + } + + next = dll_next_p(item); + flow_ring_node = dhd_constlist_to_flowring(item); + + /* Ensure that flow_ring_node in the list is Not Null */ + ASSERT(flow_ring_node != NULL); + + /* Ensure that the flowring node has valid contents */ + ASSERT(flow_ring_node->prot_info != NULL); + + dhd_prot_update_txflowring(dhd, flow_ring_node->flowid, flow_ring_node->prot_info); + } + DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags); +} + +/** Mailbox ringbell Function */ +static void +dhd_bus_gen_devmb_intr(struct dhd_bus *bus) +{ + if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) || + (bus->sih->buscorerev == 4)) { + DHD_ERROR(("%s: mailbox communication not supported\n", __FUNCTION__)); + return; + } + if (bus->db1_for_mb) { + /* this is a pcie core register, not the config register */ + DHD_INFO(("%s: writing a mail box interrupt to the device, through doorbell 1\n", __FUNCTION__)); + if (DAR_PWRREQ(bus)) { + dhd_bus_pcie_pwr_req(bus); + } + si_corereg(bus->sih, bus->sih->buscoreidx, dhd_bus_db1_addr_get(bus), + ~0, 0x12345678); + } else { + DHD_INFO(("%s: writing a mail box interrupt to the device, through config space\n", __FUNCTION__)); + dhdpcie_bus_cfg_write_dword(bus, PCISBMbx, 4, (1 << 0)); + dhdpcie_bus_cfg_write_dword(bus, PCISBMbx, 4, (1 << 0)); + } +} + +/* Upon receiving a mailbox interrupt, + * if H2D_FW_TRAP bit is set in mailbox location + * device traps + */ +static void +dhdpcie_fw_trap(dhd_bus_t *bus) +{ + /* Send the mailbox data and generate mailbox intr. */ + dhdpcie_send_mb_data(bus, H2D_FW_TRAP); + /* For FWs that cannot interprete H2D_FW_TRAP */ + (void)dhd_wl_ioctl_set_intiovar(bus->dhd, "bus:disconnect", 99, WLC_SET_VAR, TRUE, 0); +} + +/** mailbox doorbell ring function */ +void +dhd_bus_ringbell(struct dhd_bus *bus, uint32 value) +{ + /* Skip after sending D3_INFORM */ + if (bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE) { + DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n", + __FUNCTION__, bus->bus_low_power_state)); + return; + } + if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) || + (bus->sih->buscorerev == 4)) { + si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_int, + PCIE_INTB, PCIE_INTB); + } else { + /* this is a pcie core register, not the config regsiter */ + DHD_INFO(("%s: writing a door bell to the device\n", __FUNCTION__)); + if (IDMA_ACTIVE(bus->dhd)) { + if (DAR_PWRREQ(bus)) { + dhd_bus_pcie_pwr_req(bus); + } + si_corereg(bus->sih, bus->sih->buscoreidx, dhd_bus_db0_addr_2_get(bus), + ~0, value); + } else { + if (DAR_PWRREQ(bus)) { + dhd_bus_pcie_pwr_req(bus); + } + si_corereg(bus->sih, bus->sih->buscoreidx, + dhd_bus_db0_addr_get(bus), ~0, 0x12345678); + } + } +} + +/** mailbox doorbell ring function for IDMA/IFRM using dma channel2 */ +void +dhd_bus_ringbell_2(struct dhd_bus *bus, uint32 value, bool devwake) +{ + /* this is a pcie core register, not the config regsiter */ + /* Skip after sending D3_INFORM */ + if (bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE) { + DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n", + __FUNCTION__, bus->bus_low_power_state)); + return; + } + DHD_INFO(("writing a door bell 2 to the device\n")); + if (DAR_PWRREQ(bus)) { + dhd_bus_pcie_pwr_req(bus); + } + si_corereg(bus->sih, bus->sih->buscoreidx, dhd_bus_db0_addr_2_get(bus), + ~0, value); +} + +void +dhdpcie_bus_ringbell_fast(struct dhd_bus *bus, uint32 value) +{ + /* Skip after sending D3_INFORM */ + if (bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE) { + DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n", + __FUNCTION__, bus->bus_low_power_state)); + return; + } + + if (DAR_PWRREQ(bus)) { + dhd_bus_pcie_pwr_req(bus); + } + W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr, value); +} + +void +dhdpcie_bus_ringbell_2_fast(struct dhd_bus *bus, uint32 value, bool devwake) +{ + /* Skip after sending D3_INFORM */ + if (bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE) { + DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n", + __FUNCTION__, bus->bus_low_power_state)); + return; + } + + if (DAR_PWRREQ(bus)) { + dhd_bus_pcie_pwr_req(bus); + } + W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_2_addr, value); +} + +static void +dhd_bus_ringbell_oldpcie(struct dhd_bus *bus, uint32 value) +{ + uint32 w; + /* Skip after sending D3_INFORM */ + if (bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE) { + DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n", + __FUNCTION__, bus->bus_low_power_state)); + return; + } + w = (R_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr) & ~PCIE_INTB) | PCIE_INTB; + W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr, w); +} + +dhd_mb_ring_t +dhd_bus_get_mbintr_fn(struct dhd_bus *bus) +{ + if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) || + (bus->sih->buscorerev == 4)) { + bus->pcie_mb_intr_addr = si_corereg_addr(bus->sih, bus->sih->buscoreidx, + bus->pcie_mailbox_int); + if (bus->pcie_mb_intr_addr) { + bus->pcie_mb_intr_osh = si_osh(bus->sih); + return dhd_bus_ringbell_oldpcie; + } + } else { + bus->pcie_mb_intr_addr = si_corereg_addr(bus->sih, bus->sih->buscoreidx, + dhd_bus_db0_addr_get(bus)); + if (bus->pcie_mb_intr_addr) { + bus->pcie_mb_intr_osh = si_osh(bus->sih); + return dhdpcie_bus_ringbell_fast; + } + } + return dhd_bus_ringbell; +} + +dhd_mb_ring_2_t +dhd_bus_get_mbintr_2_fn(struct dhd_bus *bus) +{ + bus->pcie_mb_intr_2_addr = si_corereg_addr(bus->sih, bus->sih->buscoreidx, + dhd_bus_db0_addr_2_get(bus)); + if (bus->pcie_mb_intr_2_addr) { + bus->pcie_mb_intr_osh = si_osh(bus->sih); + return dhdpcie_bus_ringbell_2_fast; + } + return dhd_bus_ringbell_2; +} + +bool BCMFASTPATH +dhd_bus_dpc(struct dhd_bus *bus) +{ + bool resched = FALSE; /* Flag indicating resched wanted */ + unsigned long flags; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + bus->dpc_entry_time = OSL_SYSUPTIME_US(); + + DHD_GENERAL_LOCK(bus->dhd, flags); + /* Check for only DHD_BUS_DOWN and not for DHD_BUS_DOWN_IN_PROGRESS + * to avoid IOCTL Resumed On timeout when ioctl is waiting for response + * and rmmod is fired in parallel, which will make DHD_BUS_DOWN_IN_PROGRESS + * and if we return from here, then IOCTL response will never be handled + */ + if (bus->dhd->busstate == DHD_BUS_DOWN) { + DHD_ERROR(("%s: Bus down, ret\n", __FUNCTION__)); + bus->intstatus = 0; + DHD_GENERAL_UNLOCK(bus->dhd, flags); + bus->dpc_return_busdown_count++; + return 0; + } + DHD_BUS_BUSY_SET_IN_DPC(bus->dhd); + DHD_GENERAL_UNLOCK(bus->dhd, flags); + +#ifdef DHD_READ_INTSTATUS_IN_DPC + if (bus->ipend) { + bus->ipend = FALSE; + bus->intstatus = dhdpcie_bus_intstatus(bus); + /* Check if the interrupt is ours or not */ + if (bus->intstatus == 0) { + goto INTR_ON; + } + bus->intrcount++; + } +#endif /* DHD_READ_INTSTATUS_IN_DPC */ + + resched = dhdpcie_bus_process_mailbox_intr(bus, bus->intstatus); + if (!resched) { + bus->intstatus = 0; +#ifdef DHD_READ_INTSTATUS_IN_DPC +INTR_ON: +#endif /* DHD_READ_INTSTATUS_IN_DPC */ + bus->dpc_intr_enable_count++; + /* For Linux, Macos etc (otherthan NDIS) enable back the host interrupts + * which has been disabled in the dhdpcie_bus_isr() + */ + dhdpcie_enable_irq(bus); /* Enable back interrupt!! */ + bus->dpc_exit_time = OSL_SYSUPTIME_US(); + } else { + bus->resched_dpc_time = OSL_SYSUPTIME_US(); + } + + bus->dpc_sched = resched; + + DHD_GENERAL_LOCK(bus->dhd, flags); + DHD_BUS_BUSY_CLEAR_IN_DPC(bus->dhd); + dhd_os_busbusy_wake(bus->dhd); + DHD_GENERAL_UNLOCK(bus->dhd, flags); + + return resched; + +} + +int +dhdpcie_send_mb_data(dhd_bus_t *bus, uint32 h2d_mb_data) +{ + uint32 cur_h2d_mb_data = 0; + + DHD_INFO_HW4(("%s: H2D_MB_DATA: 0x%08X\n", __FUNCTION__, h2d_mb_data)); + + if (bus->is_linkdown) { + DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__)); + return BCME_ERROR; + } + + if (bus->api.fw_rev >= PCIE_SHARED_VERSION_6 && !bus->use_mailbox) { + DHD_INFO(("API rev is 6, sending mb data as H2D Ctrl message to dongle, 0x%04x\n", + h2d_mb_data)); + /* Prevent asserting device_wake during doorbell ring for mb data to avoid loop. */ + { + if (dhd_prot_h2d_mbdata_send_ctrlmsg(bus->dhd, h2d_mb_data)) { + DHD_ERROR(("failure sending the H2D Mailbox message " + "to firmware\n")); + goto fail; + } + } + goto done; + } + + dhd_bus_cmn_readshared(bus, &cur_h2d_mb_data, H2D_MB_DATA, 0); + + if (cur_h2d_mb_data != 0) { + uint32 i = 0; + DHD_INFO(("%s: GRRRRRRR: MB transaction is already pending 0x%04x\n", __FUNCTION__, cur_h2d_mb_data)); + while ((i++ < 100) && cur_h2d_mb_data) { + OSL_DELAY(10); + dhd_bus_cmn_readshared(bus, &cur_h2d_mb_data, H2D_MB_DATA, 0); + } + if (i >= 100) { + DHD_ERROR(("%s : waited 1ms for the dngl " + "to ack the previous mb transaction\n", __FUNCTION__)); + DHD_ERROR(("%s : MB transaction is still pending 0x%04x\n", + __FUNCTION__, cur_h2d_mb_data)); + } + } + + dhd_bus_cmn_writeshared(bus, &h2d_mb_data, sizeof(uint32), H2D_MB_DATA, 0); + dhd_bus_gen_devmb_intr(bus); + +done: + if (h2d_mb_data == H2D_HOST_D3_INFORM) { + DHD_INFO_HW4(("%s: send H2D_HOST_D3_INFORM to dongle\n", __FUNCTION__)); + bus->d3_inform_cnt++; + } + if (h2d_mb_data == H2D_HOST_D0_INFORM_IN_USE) { + DHD_INFO_HW4(("%s: send H2D_HOST_D0_INFORM_IN_USE to dongle\n", __FUNCTION__)); + bus->d0_inform_in_use_cnt++; + } + if (h2d_mb_data == H2D_HOST_D0_INFORM) { + DHD_INFO_HW4(("%s: send H2D_HOST_D0_INFORM to dongle\n", __FUNCTION__)); + bus->d0_inform_cnt++; + } + return BCME_OK; +fail: + return BCME_ERROR; +} + +static void +dhd_bus_handle_d3_ack(dhd_bus_t *bus) +{ + unsigned long flags_bus; + DHD_BUS_LOCK(bus->bus_lock, flags_bus); + bus->suspend_intr_disable_count++; + /* Disable dongle Interrupts Immediately after D3 */ + + /* For Linux, Macos etc (otherthan NDIS) along with disabling + * dongle interrupt by clearing the IntMask, disable directly + * interrupt from the host side as well. Also clear the intstatus + * if it is set to avoid unnecessary intrrupts after D3 ACK. + */ + dhdpcie_bus_intr_disable(bus); /* Disable interrupt using IntMask!! */ + dhdpcie_bus_clear_intstatus(bus); + dhdpcie_disable_irq_nosync(bus); /* Disable host interrupt!! */ + + /* Set bus_low_power_state to DHD_BUS_D3_ACK_RECIEVED */ + bus->bus_low_power_state = DHD_BUS_D3_ACK_RECIEVED; + DHD_BUS_UNLOCK(bus->bus_lock, flags_bus); + bus->wait_for_d3_ack = 1; + dhd_os_d3ack_wake(bus->dhd); +} +void +dhd_bus_handle_mb_data(dhd_bus_t *bus, uint32 d2h_mb_data) +{ + if (MULTIBP_ENAB(bus->sih)) { + dhd_bus_pcie_pwr_req(bus); + } + + DHD_INFO(("D2H_MB_DATA: 0x%04x\n", d2h_mb_data)); + + if (d2h_mb_data & D2H_DEV_FWHALT) { + DHD_ERROR(("FW trap has happened\n")); + dhdpcie_checkdied(bus, NULL, 0); + dhd_os_check_hang(bus->dhd, 0, -EREMOTEIO); + goto exit; + } + if (d2h_mb_data & D2H_DEV_DS_ENTER_REQ) { + bool ds_acked = FALSE; + BCM_REFERENCE(ds_acked); + if (bus->bus_low_power_state == DHD_BUS_D3_ACK_RECIEVED) { + DHD_ERROR(("DS-ENTRY AFTER D3-ACK!!!!! QUITING\n")); + bus->dhd->busstate = DHD_BUS_DOWN; + goto exit; + } + /* what should we do */ + DHD_INFO(("D2H_MB_DATA: DEEP SLEEP REQ\n")); + { + dhdpcie_send_mb_data(bus, H2D_HOST_DS_ACK); + DHD_INFO(("D2H_MB_DATA: sent DEEP SLEEP ACK\n")); + } + } + if (d2h_mb_data & D2H_DEV_DS_EXIT_NOTE) { + /* what should we do */ + DHD_INFO(("D2H_MB_DATA: DEEP SLEEP EXIT\n")); + } + if (d2h_mb_data & D2HMB_DS_HOST_SLEEP_EXIT_ACK) { + /* what should we do */ + DHD_INFO(("D2H_MB_DATA: D0 ACK\n")); + } + if (d2h_mb_data & D2H_DEV_D3_ACK) { + /* what should we do */ + DHD_INFO_HW4(("D2H_MB_DATA: D3 ACK\n")); + if (!bus->wait_for_d3_ack) { + dhd_bus_handle_d3_ack(bus); + } + } + +exit: + if (MULTIBP_ENAB(bus->sih)) { + dhd_bus_pcie_pwr_req_clear(bus); + } +} + +static void +dhdpcie_handle_mb_data(dhd_bus_t *bus) +{ + uint32 d2h_mb_data = 0; + uint32 zero = 0; + + if (MULTIBP_ENAB(bus->sih)) { + dhd_bus_pcie_pwr_req(bus); + } + + dhd_bus_cmn_readshared(bus, &d2h_mb_data, D2H_MB_DATA, 0); + if (D2H_DEV_MB_INVALIDATED(d2h_mb_data)) { + DHD_ERROR(("%s: Invalid D2H_MB_DATA: 0x%08x\n", + __FUNCTION__, d2h_mb_data)); + goto exit; + } + + dhd_bus_cmn_writeshared(bus, &zero, sizeof(uint32), D2H_MB_DATA, 0); + + DHD_INFO_HW4(("%s: D2H_MB_DATA: 0x%04x\n", __FUNCTION__, d2h_mb_data)); + if (d2h_mb_data & D2H_DEV_FWHALT) { + DHD_ERROR(("FW trap has happened\n")); + dhdpcie_checkdied(bus, NULL, 0); + /* not ready yet dhd_os_ind_firmware_stall(bus->dhd); */ + goto exit; + } + if (d2h_mb_data & D2H_DEV_DS_ENTER_REQ) { + /* what should we do */ + DHD_INFO(("%s: D2H_MB_DATA: DEEP SLEEP REQ\n", __FUNCTION__)); + dhdpcie_send_mb_data(bus, H2D_HOST_DS_ACK); + DHD_INFO(("%s: D2H_MB_DATA: sent DEEP SLEEP ACK\n", __FUNCTION__)); + } + if (d2h_mb_data & D2H_DEV_DS_EXIT_NOTE) { + /* what should we do */ + DHD_INFO(("%s: D2H_MB_DATA: DEEP SLEEP EXIT\n", __FUNCTION__)); + } + if (d2h_mb_data & D2H_DEV_D3_ACK) { + /* what should we do */ + DHD_INFO_HW4(("%s: D2H_MB_DATA: D3 ACK\n", __FUNCTION__)); + if (!bus->wait_for_d3_ack) { + dhd_bus_handle_d3_ack(bus); + } + } + +exit: + if (MULTIBP_ENAB(bus->sih)) { + dhd_bus_pcie_pwr_req_clear(bus); + } +} + +static void +dhdpcie_read_handle_mb_data(dhd_bus_t *bus) +{ + uint32 d2h_mb_data = 0; + uint32 zero = 0; + + if (bus->is_linkdown) { + DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__)); + return; + } + + if (MULTIBP_ENAB(bus->sih)) { + dhd_bus_pcie_pwr_req(bus); + } + + dhd_bus_cmn_readshared(bus, &d2h_mb_data, D2H_MB_DATA, 0); + if (!d2h_mb_data) { + goto exit; + } + + dhd_bus_cmn_writeshared(bus, &zero, sizeof(uint32), D2H_MB_DATA, 0); + + dhd_bus_handle_mb_data(bus, d2h_mb_data); + +exit: + if (MULTIBP_ENAB(bus->sih)) { + dhd_bus_pcie_pwr_req_clear(bus); + } +} + +static bool +dhdpcie_bus_process_mailbox_intr(dhd_bus_t *bus, uint32 intstatus) +{ + bool resched = FALSE; + + if (MULTIBP_ENAB(bus->sih)) { + dhd_bus_pcie_pwr_req(bus); + } + if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) || + (bus->sih->buscorerev == 4)) { + /* Msg stream interrupt */ + if (intstatus & I_BIT1) { + resched = dhdpci_bus_read_frames(bus); + } else if (intstatus & I_BIT0) { + /* do nothing for Now */ + } + } else { + if (intstatus & (PCIE_MB_TOPCIE_FN0_0 | PCIE_MB_TOPCIE_FN0_1)) + bus->api.handle_mb_data(bus); + + if ((bus->dhd->busstate == DHD_BUS_SUSPEND) || (bus->use_mailbox && + (bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE))) { + DHD_ERROR(("%s: Bus is in power save state. " + "Skip processing rest of ring buffers.\n", __FUNCTION__)); + goto exit; + } + + /* Validate intstatus only for INTX case */ + if ((bus->d2h_intr_method == PCIE_MSI) || + ((bus->d2h_intr_method == PCIE_INTX) && (intstatus & bus->d2h_mb_mask))) { +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM + if (pm_runtime_get(dhd_bus_to_dev(bus)) >= 0) { + resched = dhdpci_bus_read_frames(bus); + pm_runtime_mark_last_busy(dhd_bus_to_dev(bus)); + pm_runtime_put_autosuspend(dhd_bus_to_dev(bus)); + } +#else + resched = dhdpci_bus_read_frames(bus); +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + } + } + +exit: + if (MULTIBP_ENAB(bus->sih)) { + dhd_bus_pcie_pwr_req_clear(bus); + } + return resched; +} + +#if defined(DHD_H2D_LOG_TIME_SYNC) +static void +dhdpci_bus_rte_log_time_sync_poll(dhd_bus_t *bus) +{ + unsigned long time_elapsed; + + /* Poll for timeout value periodically */ + if ((bus->dhd->busstate == DHD_BUS_DATA) && + (bus->dhd->dhd_rte_time_sync_ms != 0) && + (bus->bus_low_power_state == DHD_BUS_NO_LOW_POWER_STATE)) { + time_elapsed = OSL_SYSUPTIME_US() - bus->dhd_rte_time_sync_count; + /* Compare time is milli seconds */ + if ((time_elapsed / 1000) >= bus->dhd->dhd_rte_time_sync_ms) { + /* + * Its fine, if it has crossed the timeout value. No need to adjust the + * elapsed time + */ + bus->dhd_rte_time_sync_count += time_elapsed; + + /* Schedule deffered work. Work function will send IOVAR. */ + dhd_h2d_log_time_sync_deferred_wq_schedule(bus->dhd); + } + } +} +#endif /* DHD_H2D_LOG_TIME_SYNC */ + +static bool +dhdpci_bus_read_frames(dhd_bus_t *bus) +{ + bool more = FALSE; + + /* First check if there a FW trap */ + if ((bus->api.fw_rev >= PCIE_SHARED_VERSION_6) && + (bus->dhd->dongle_trap_data = dhd_prot_process_trapbuf(bus->dhd))) { + dhd_bus_handle_mb_data(bus, D2H_DEV_FWHALT); + return FALSE; + } + + /* There may be frames in both ctrl buf and data buf; check ctrl buf first */ + DHD_PERIM_LOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT)); + + dhd_prot_process_ctrlbuf(bus->dhd); + bus->last_process_ctrlbuf_time = OSL_SYSUPTIME_US(); + /* Unlock to give chance for resp to be handled */ + DHD_PERIM_UNLOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT)); + + /* Do not process rest of ring buf once bus enters low power state */ + if (!bus->use_mailbox && (bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE)) { + DHD_ERROR(("%s: Bus is in power save state. " + "Skip processing rest of ring buffers.\n", __FUNCTION__)); + return FALSE; + } + + DHD_PERIM_LOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT)); + /* update the flow ring cpls */ + dhd_update_txflowrings(bus->dhd); + bus->last_process_flowring_time = OSL_SYSUPTIME_US(); + + /* With heavy TX traffic, we could get a lot of TxStatus + * so add bound + */ + more |= dhd_prot_process_msgbuf_txcpl(bus->dhd, dhd_txbound); + bus->last_process_txcpl_time = OSL_SYSUPTIME_US(); + + /* With heavy RX traffic, this routine potentially could spend some time + * processing RX frames without RX bound + */ + more |= dhd_prot_process_msgbuf_rxcpl(bus->dhd, dhd_rxbound); + bus->last_process_rxcpl_time = OSL_SYSUPTIME_US(); + + /* Process info ring completion messages */ +#ifdef EWP_EDL + if (!bus->dhd->dongle_edl_support) +#endif // endif + { + more |= dhd_prot_process_msgbuf_infocpl(bus->dhd, DHD_INFORING_BOUND); + bus->last_process_infocpl_time = OSL_SYSUPTIME_US(); + } +#ifdef EWP_EDL + else { + more |= dhd_prot_process_msgbuf_edl(bus->dhd); + bus->last_process_edl_time = OSL_SYSUPTIME_US(); + } +#endif /* EWP_EDL */ + +#ifdef IDLE_TX_FLOW_MGMT + if (bus->enable_idle_flowring_mgmt) { + /* Look for idle flow rings */ + dhd_bus_check_idle_scan(bus); + } +#endif /* IDLE_TX_FLOW_MGMT */ + + /* don't talk to the dongle if fw is about to be reloaded */ + if (bus->dhd->hang_was_sent) { + more = FALSE; + } + DHD_PERIM_UNLOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT)); + +#if defined(DHD_H2D_LOG_TIME_SYNC) + dhdpci_bus_rte_log_time_sync_poll(bus); +#endif /* DHD_H2D_LOG_TIME_SYNC */ + return more; +} + +bool +dhdpcie_tcm_valid(dhd_bus_t *bus) +{ + uint32 addr = 0; + int rv; + uint32 shaddr = 0; + pciedev_shared_t sh; + + shaddr = bus->dongle_ram_base + bus->ramsize - 4; + + /* Read last word in memory to determine address of pciedev_shared structure */ + addr = LTOH32(dhdpcie_bus_rtcm32(bus, shaddr)); + + if ((addr == 0) || (addr == bus->nvram_csm) || (addr < bus->dongle_ram_base) || + (addr > shaddr)) { + DHD_ERROR(("%s: address (0x%08x) of pciedev_shared invalid addr\n", + __FUNCTION__, addr)); + return FALSE; + } + + /* Read hndrte_shared structure */ + if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, (uint8 *)&sh, + sizeof(pciedev_shared_t))) < 0) { + DHD_ERROR(("Failed to read PCIe shared struct with %d\n", rv)); + return FALSE; + } + + /* Compare any field in pciedev_shared_t */ + if (sh.console_addr != bus->pcie_sh->console_addr) { + DHD_ERROR(("Contents of pciedev_shared_t structure are not matching.\n")); + return FALSE; + } + + return TRUE; +} + +static void +dhdpcie_update_bus_api_revisions(uint32 firmware_api_version, uint32 host_api_version) +{ + snprintf(bus_api_revision, BUS_API_REV_STR_LEN, "\nBus API revisions:(FW rev%d)(DHD rev%d)", + firmware_api_version, host_api_version); + return; +} + +static bool +dhdpcie_check_firmware_compatible(uint32 firmware_api_version, uint32 host_api_version) +{ + bool retcode = FALSE; + + DHD_INFO(("firmware api revision %d, host api revision %d\n", + firmware_api_version, host_api_version)); + + switch (firmware_api_version) { + case PCIE_SHARED_VERSION_7: + case PCIE_SHARED_VERSION_6: + case PCIE_SHARED_VERSION_5: + retcode = TRUE; + break; + default: + if (firmware_api_version <= host_api_version) + retcode = TRUE; + } + return retcode; +} + +static int +dhdpcie_readshared(dhd_bus_t *bus) +{ + uint32 addr = 0; + int rv, dma_indx_wr_buf, dma_indx_rd_buf; + uint32 shaddr = 0; + pciedev_shared_t *sh = bus->pcie_sh; + dhd_timeout_t tmo; + bool idma_en = FALSE; + + if (MULTIBP_ENAB(bus->sih)) { + dhd_bus_pcie_pwr_req(bus); + } + + shaddr = bus->dongle_ram_base + bus->ramsize - 4; + /* start a timer for 5 seconds */ + dhd_timeout_start(&tmo, MAX_READ_TIMEOUT); + + while (((addr == 0) || (addr == bus->nvram_csm)) && !dhd_timeout_expired(&tmo)) { + /* Read last word in memory to determine address of pciedev_shared structure */ + addr = LTOH32(dhdpcie_bus_rtcm32(bus, shaddr)); + } + + if ((addr == 0) || (addr == bus->nvram_csm) || (addr < bus->dongle_ram_base) || + (addr > shaddr)) { + DHD_ERROR(("%s: address (0x%08x) of pciedev_shared invalid\n", + __FUNCTION__, addr)); + DHD_ERROR(("%s: Waited %u usec, dongle is not ready\n", __FUNCTION__, tmo.elapsed)); +#ifdef DEBUG_DNGL_INIT_FAIL + bus->dhd->memdump_type = DUMP_TYPE_DONGLE_INIT_FAILURE; + dhdpcie_mem_dump(bus); +#endif /* DEBUG_DNGL_INIT_FAIL */ + return BCME_ERROR; + } else { + bus->shared_addr = (ulong)addr; + DHD_ERROR(("%s: PCIe shared addr (0x%08x) read took %u usec " + "before dongle is ready\n", __FUNCTION__, addr, tmo.elapsed)); + } + + /* Read hndrte_shared structure */ + if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, (uint8 *)sh, + sizeof(pciedev_shared_t))) < 0) { + DHD_ERROR(("%s: Failed to read PCIe shared struct with %d\n", __FUNCTION__, rv)); + return rv; + } + + /* Endianness */ + sh->flags = ltoh32(sh->flags); + sh->trap_addr = ltoh32(sh->trap_addr); + sh->assert_exp_addr = ltoh32(sh->assert_exp_addr); + sh->assert_file_addr = ltoh32(sh->assert_file_addr); + sh->assert_line = ltoh32(sh->assert_line); + sh->console_addr = ltoh32(sh->console_addr); + sh->msgtrace_addr = ltoh32(sh->msgtrace_addr); + sh->dma_rxoffset = ltoh32(sh->dma_rxoffset); + sh->rings_info_ptr = ltoh32(sh->rings_info_ptr); + sh->flags2 = ltoh32(sh->flags2); + + /* load bus console address */ + bus->console_addr = sh->console_addr; + + /* Read the dma rx offset */ + bus->dma_rxoffset = bus->pcie_sh->dma_rxoffset; + dhd_prot_rx_dataoffset(bus->dhd, bus->dma_rxoffset); + + DHD_INFO(("%s: DMA RX offset from shared Area %d\n", __FUNCTION__, bus->dma_rxoffset)); + + bus->api.fw_rev = sh->flags & PCIE_SHARED_VERSION_MASK; + if (!(dhdpcie_check_firmware_compatible(bus->api.fw_rev, PCIE_SHARED_VERSION))) + { + DHD_ERROR(("%s: pcie_shared version %d in dhd " + "is older than pciedev_shared version %d in dongle\n", + __FUNCTION__, PCIE_SHARED_VERSION, + bus->api.fw_rev)); + return BCME_ERROR; + } + dhdpcie_update_bus_api_revisions(bus->api.fw_rev, PCIE_SHARED_VERSION); + + bus->rw_index_sz = (sh->flags & PCIE_SHARED_2BYTE_INDICES) ? + sizeof(uint16) : sizeof(uint32); + DHD_INFO(("%s: Dongle advertizes %d size indices\n", + __FUNCTION__, bus->rw_index_sz)); + +#ifdef IDLE_TX_FLOW_MGMT + if (sh->flags & PCIE_SHARED_IDLE_FLOW_RING) { + DHD_ERROR(("%s: FW Supports IdleFlow ring managment!\n", + __FUNCTION__)); + bus->enable_idle_flowring_mgmt = TRUE; + } +#endif /* IDLE_TX_FLOW_MGMT */ + + if (IDMA_CAPABLE(bus)) { + if (bus->sih->buscorerev == 23) { + } else { + idma_en = TRUE; + } + } + + if (idma_en) { + bus->dhd->idma_enable = (sh->flags & PCIE_SHARED_IDMA) ? TRUE : FALSE; + bus->dhd->ifrm_enable = (sh->flags & PCIE_SHARED_IFRM) ? TRUE : FALSE; + } + + bus->dhd->d2h_sync_mode = sh->flags & PCIE_SHARED_D2H_SYNC_MODE_MASK; + + bus->dhd->dar_enable = (sh->flags & PCIE_SHARED_DAR) ? TRUE : FALSE; + + /* Does the FW support DMA'ing r/w indices */ + if (sh->flags & PCIE_SHARED_DMA_INDEX) { + if (!bus->dhd->dma_ring_upd_overwrite) { + { + if (!IFRM_ENAB(bus->dhd)) { + bus->dhd->dma_h2d_ring_upd_support = TRUE; + } + bus->dhd->dma_d2h_ring_upd_support = TRUE; + } + } + + if (bus->dhd->dma_d2h_ring_upd_support) + bus->dhd->d2h_sync_mode = 0; + + DHD_INFO(("%s: Host support DMAing indices: H2D:%d - D2H:%d. FW supports it\n", + __FUNCTION__, + (bus->dhd->dma_h2d_ring_upd_support ? 1 : 0), + (bus->dhd->dma_d2h_ring_upd_support ? 1 : 0))); + } else if (!(sh->flags & PCIE_SHARED_D2H_SYNC_MODE_MASK)) { + DHD_ERROR(("%s FW has to support either dma indices or d2h sync\n", + __FUNCTION__)); + return BCME_UNSUPPORTED; + } else { + bus->dhd->dma_h2d_ring_upd_support = FALSE; + bus->dhd->dma_d2h_ring_upd_support = FALSE; + } + + /* Does the firmware support fast delete ring? */ + if (sh->flags2 & PCIE_SHARED2_FAST_DELETE_RING) { + DHD_INFO(("%s: Firmware supports fast delete ring\n", + __FUNCTION__)); + bus->dhd->fast_delete_ring_support = TRUE; + } else { + DHD_INFO(("%s: Firmware does not support fast delete ring\n", + __FUNCTION__)); + bus->dhd->fast_delete_ring_support = FALSE; + } + + /* get ring_info, ring_state and mb data ptrs and store the addresses in bus structure */ + { + ring_info_t ring_info; + + if ((rv = dhdpcie_bus_membytes(bus, FALSE, sh->rings_info_ptr, + (uint8 *)&ring_info, sizeof(ring_info_t))) < 0) + return rv; + + bus->h2d_mb_data_ptr_addr = ltoh32(sh->h2d_mb_data_ptr); + bus->d2h_mb_data_ptr_addr = ltoh32(sh->d2h_mb_data_ptr); + + if (bus->api.fw_rev >= PCIE_SHARED_VERSION_6) { + bus->max_tx_flowrings = ltoh16(ring_info.max_tx_flowrings); + bus->max_submission_rings = ltoh16(ring_info.max_submission_queues); + bus->max_completion_rings = ltoh16(ring_info.max_completion_rings); + bus->max_cmn_rings = bus->max_submission_rings - bus->max_tx_flowrings; + bus->api.handle_mb_data = dhdpcie_read_handle_mb_data; + bus->use_mailbox = sh->flags & PCIE_SHARED_USE_MAILBOX; + } + else { + bus->max_tx_flowrings = ltoh16(ring_info.max_tx_flowrings); + bus->max_submission_rings = bus->max_tx_flowrings; + bus->max_completion_rings = BCMPCIE_D2H_COMMON_MSGRINGS; + bus->max_cmn_rings = BCMPCIE_H2D_COMMON_MSGRINGS; + bus->api.handle_mb_data = dhdpcie_handle_mb_data; + bus->use_mailbox = TRUE; + } + if (bus->max_completion_rings == 0) { + DHD_ERROR(("dongle completion rings are invalid %d\n", + bus->max_completion_rings)); + return BCME_ERROR; + } + if (bus->max_submission_rings == 0) { + DHD_ERROR(("dongle submission rings are invalid %d\n", + bus->max_submission_rings)); + return BCME_ERROR; + } + if (bus->max_tx_flowrings == 0) { + DHD_ERROR(("dongle txflow rings are invalid %d\n", bus->max_tx_flowrings)); + return BCME_ERROR; + } + + /* If both FW and Host support DMA'ing indices, allocate memory and notify FW + * The max_sub_queues is read from FW initialized ring_info + */ + if (bus->dhd->dma_h2d_ring_upd_support || IDMA_ENAB(bus->dhd)) { + dma_indx_wr_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz, + H2D_DMA_INDX_WR_BUF, bus->max_submission_rings); + dma_indx_rd_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz, + D2H_DMA_INDX_RD_BUF, bus->max_completion_rings); + + if ((dma_indx_wr_buf != BCME_OK) || (dma_indx_rd_buf != BCME_OK)) { + DHD_ERROR(("%s: Failed to allocate memory for dma'ing h2d indices" + "Host will use w/r indices in TCM\n", + __FUNCTION__)); + bus->dhd->dma_h2d_ring_upd_support = FALSE; + bus->dhd->idma_enable = FALSE; + } + } + + if (bus->dhd->dma_d2h_ring_upd_support) { + dma_indx_wr_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz, + D2H_DMA_INDX_WR_BUF, bus->max_completion_rings); + dma_indx_rd_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz, + H2D_DMA_INDX_RD_BUF, bus->max_submission_rings); + + if ((dma_indx_wr_buf != BCME_OK) || (dma_indx_rd_buf != BCME_OK)) { + DHD_ERROR(("%s: Failed to allocate memory for dma'ing d2h indices" + "Host will use w/r indices in TCM\n", + __FUNCTION__)); + bus->dhd->dma_d2h_ring_upd_support = FALSE; + } + } + + if (IFRM_ENAB(bus->dhd)) { + dma_indx_wr_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz, + H2D_IFRM_INDX_WR_BUF, bus->max_tx_flowrings); + + if (dma_indx_wr_buf != BCME_OK) { + DHD_ERROR(("%s: Failed to alloc memory for Implicit DMA\n", + __FUNCTION__)); + bus->dhd->ifrm_enable = FALSE; + } + } + + /* read ringmem and ringstate ptrs from shared area and store in host variables */ + dhd_fillup_ring_sharedptr_info(bus, &ring_info); + if (dhd_msg_level & DHD_INFO_VAL) { + bcm_print_bytes("ring_info_raw", (uchar *)&ring_info, sizeof(ring_info_t)); + } + DHD_INFO(("%s: ring_info\n", __FUNCTION__)); + + DHD_ERROR(("%s: max H2D queues %d\n", + __FUNCTION__, ltoh16(ring_info.max_tx_flowrings))); + + DHD_INFO(("mail box address\n")); + DHD_INFO(("%s: h2d_mb_data_ptr_addr 0x%04x\n", + __FUNCTION__, bus->h2d_mb_data_ptr_addr)); + DHD_INFO(("%s: d2h_mb_data_ptr_addr 0x%04x\n", + __FUNCTION__, bus->d2h_mb_data_ptr_addr)); + } + + DHD_INFO(("%s: d2h_sync_mode 0x%08x\n", + __FUNCTION__, bus->dhd->d2h_sync_mode)); + + bus->dhd->d2h_hostrdy_supported = + ((sh->flags & PCIE_SHARED_HOSTRDY_SUPPORT) == PCIE_SHARED_HOSTRDY_SUPPORT); + + bus->dhd->ext_trap_data_supported = + ((sh->flags2 & PCIE_SHARED2_EXTENDED_TRAP_DATA) == PCIE_SHARED2_EXTENDED_TRAP_DATA); + + if ((sh->flags2 & PCIE_SHARED2_TXSTATUS_METADATA) == 0) + bus->dhd->pcie_txs_metadata_enable = 0; + + bus->dhd->hscb_enable = + (sh->flags2 & PCIE_SHARED2_HSCB) == PCIE_SHARED2_HSCB; + +#ifdef EWP_EDL + if (host_edl_support) { + bus->dhd->dongle_edl_support = (sh->flags2 & PCIE_SHARED2_EDL_RING) ? TRUE : FALSE; + DHD_ERROR(("Dongle EDL support: %u\n", bus->dhd->dongle_edl_support)); + } +#endif /* EWP_EDL */ + + bus->dhd->debug_buf_dest_support = + (sh->flags2 & PCIE_SHARED2_DEBUG_BUF_DEST) ? TRUE : FALSE; + DHD_ERROR(("FW supports debug buf dest ? %s \n", + bus->dhd->debug_buf_dest_support ? "Y" : "N")); + + if (MULTIBP_ENAB(bus->sih)) { + dhd_bus_pcie_pwr_req_clear(bus); + } + return BCME_OK; +} /* dhdpcie_readshared */ + +/** Read ring mem and ring state ptr info from shared memory area in device memory */ +static void +dhd_fillup_ring_sharedptr_info(dhd_bus_t *bus, ring_info_t *ring_info) +{ + uint16 i = 0; + uint16 j = 0; + uint32 tcm_memloc; + uint32 d2h_w_idx_ptr, d2h_r_idx_ptr, h2d_w_idx_ptr, h2d_r_idx_ptr; + uint16 max_tx_flowrings = bus->max_tx_flowrings; + + /* Ring mem ptr info */ + /* Alloated in the order + H2D_MSGRING_CONTROL_SUBMIT 0 + H2D_MSGRING_RXPOST_SUBMIT 1 + D2H_MSGRING_CONTROL_COMPLETE 2 + D2H_MSGRING_TX_COMPLETE 3 + D2H_MSGRING_RX_COMPLETE 4 + */ + + { + /* ringmemptr holds start of the mem block address space */ + tcm_memloc = ltoh32(ring_info->ringmem_ptr); + + /* Find out ringmem ptr for each ring common ring */ + for (i = 0; i <= BCMPCIE_COMMON_MSGRING_MAX_ID; i++) { + bus->ring_sh[i].ring_mem_addr = tcm_memloc; + /* Update mem block */ + tcm_memloc = tcm_memloc + sizeof(ring_mem_t); + DHD_INFO(("%s: ring id %d ring mem addr 0x%04x \n", __FUNCTION__, + i, bus->ring_sh[i].ring_mem_addr)); + } + } + + /* Ring state mem ptr info */ + { + d2h_w_idx_ptr = ltoh32(ring_info->d2h_w_idx_ptr); + d2h_r_idx_ptr = ltoh32(ring_info->d2h_r_idx_ptr); + h2d_w_idx_ptr = ltoh32(ring_info->h2d_w_idx_ptr); + h2d_r_idx_ptr = ltoh32(ring_info->h2d_r_idx_ptr); + + /* Store h2d common ring write/read pointers */ + for (i = 0; i < BCMPCIE_H2D_COMMON_MSGRINGS; i++) { + bus->ring_sh[i].ring_state_w = h2d_w_idx_ptr; + bus->ring_sh[i].ring_state_r = h2d_r_idx_ptr; + + /* update mem block */ + h2d_w_idx_ptr = h2d_w_idx_ptr + bus->rw_index_sz; + h2d_r_idx_ptr = h2d_r_idx_ptr + bus->rw_index_sz; + + DHD_INFO(("%s: h2d w/r : idx %d write %x read %x \n", __FUNCTION__, i, + bus->ring_sh[i].ring_state_w, bus->ring_sh[i].ring_state_r)); + } + + /* Store d2h common ring write/read pointers */ + for (j = 0; j < BCMPCIE_D2H_COMMON_MSGRINGS; j++, i++) { + bus->ring_sh[i].ring_state_w = d2h_w_idx_ptr; + bus->ring_sh[i].ring_state_r = d2h_r_idx_ptr; + + /* update mem block */ + d2h_w_idx_ptr = d2h_w_idx_ptr + bus->rw_index_sz; + d2h_r_idx_ptr = d2h_r_idx_ptr + bus->rw_index_sz; + + DHD_INFO(("%s: d2h w/r : idx %d write %x read %x \n", __FUNCTION__, i, + bus->ring_sh[i].ring_state_w, bus->ring_sh[i].ring_state_r)); + } + + /* Store txflow ring write/read pointers */ + if (bus->api.fw_rev < PCIE_SHARED_VERSION_6) { + max_tx_flowrings -= BCMPCIE_H2D_COMMON_MSGRINGS; + } else { + /* Account for Debug info h2d ring located after the last tx flow ring */ + max_tx_flowrings = max_tx_flowrings + 1; + } + for (j = 0; j < max_tx_flowrings; i++, j++) + { + bus->ring_sh[i].ring_state_w = h2d_w_idx_ptr; + bus->ring_sh[i].ring_state_r = h2d_r_idx_ptr; + + /* update mem block */ + h2d_w_idx_ptr = h2d_w_idx_ptr + bus->rw_index_sz; + h2d_r_idx_ptr = h2d_r_idx_ptr + bus->rw_index_sz; + + DHD_INFO(("%s: FLOW Rings h2d w/r : idx %d write %x read %x \n", + __FUNCTION__, i, + bus->ring_sh[i].ring_state_w, + bus->ring_sh[i].ring_state_r)); + } + /* store wr/rd pointers for debug info completion ring */ + bus->ring_sh[i].ring_state_w = d2h_w_idx_ptr; + bus->ring_sh[i].ring_state_r = d2h_r_idx_ptr; + d2h_w_idx_ptr = d2h_w_idx_ptr + bus->rw_index_sz; + d2h_r_idx_ptr = d2h_r_idx_ptr + bus->rw_index_sz; + DHD_INFO(("d2h w/r : idx %d write %x read %x \n", i, + bus->ring_sh[i].ring_state_w, bus->ring_sh[i].ring_state_r)); + } +} /* dhd_fillup_ring_sharedptr_info */ + +/** + * Initialize bus module: prepare for communication with the dongle. Called after downloading + * firmware into the dongle. + */ +int dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex) +{ + dhd_bus_t *bus = dhdp->bus; + int ret = 0; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + ASSERT(bus->dhd); + if (!bus->dhd) + return 0; + + if (bus->sih->buscorerev == 66) { + dhd_bus_pcie_pwr_req_clear_reload_war(bus); + } + + if (MULTIBP_ENAB(bus->sih)) { + dhd_bus_pcie_pwr_req(bus); + } + + /* Make sure we're talking to the core. */ + bus->reg = si_setcore(bus->sih, PCIE2_CORE_ID, 0); + ASSERT(bus->reg != NULL); + + /* before opening up bus for data transfer, check if shared are is intact */ + ret = dhdpcie_readshared(bus); + if (ret < 0) { + DHD_ERROR(("%s :Shared area read failed \n", __FUNCTION__)); + goto exit; + } + + /* Make sure we're talking to the core. */ + bus->reg = si_setcore(bus->sih, PCIE2_CORE_ID, 0); + ASSERT(bus->reg != NULL); + + dhd_init_bus_lock(bus); + + /* Set bus state according to enable result */ + dhdp->busstate = DHD_BUS_DATA; + bus->bus_low_power_state = DHD_BUS_NO_LOW_POWER_STATE; + dhdp->dhd_bus_busy_state = 0; + + /* D11 status via PCIe completion header */ + if ((ret = dhdpcie_init_d11status(bus)) < 0) { + goto exit; + } + + if (!dhd_download_fw_on_driverload) + dhd_dpc_enable(bus->dhd); + /* Enable the interrupt after device is up */ + dhdpcie_bus_intr_enable(bus); + + bus->intr_enabled = TRUE; + + /* bcmsdh_intr_unmask(bus->sdh); */ + bus->idletime = 0; + + /* Make use_d0_inform TRUE for Rev 5 for backward compatibility */ + if (bus->api.fw_rev < PCIE_SHARED_VERSION_6) { + bus->use_d0_inform = TRUE; + } else { + bus->use_d0_inform = FALSE; + } + +exit: + if (MULTIBP_ENAB(bus->sih)) { + dhd_bus_pcie_pwr_req_clear(bus); + } + return ret; +} + +static void +dhdpcie_init_shared_addr(dhd_bus_t *bus) +{ + uint32 addr = 0; + uint32 val = 0; + addr = bus->dongle_ram_base + bus->ramsize - 4; + dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val)); +} + +bool +dhdpcie_chipmatch(uint16 vendor, uint16 device) +{ + if (vendor != PCI_VENDOR_ID_BROADCOM) { + DHD_ERROR(("%s: Unsupported vendor %x device %x\n", __FUNCTION__, + vendor, device)); + return (-ENODEV); + } + + if ((device == BCM4350_D11AC_ID) || (device == BCM4350_D11AC2G_ID) || + (device == BCM4350_D11AC5G_ID) || (device == BCM4350_CHIP_ID) || + (device == BCM43569_CHIP_ID)) { + return 0; + } + + if ((device == BCM4354_D11AC_ID) || (device == BCM4354_D11AC2G_ID) || + (device == BCM4354_D11AC5G_ID) || (device == BCM4354_CHIP_ID)) { + return 0; + } + + if ((device == BCM4356_D11AC_ID) || (device == BCM4356_D11AC2G_ID) || + (device == BCM4356_D11AC5G_ID) || (device == BCM4356_CHIP_ID)) { + return 0; + } + + if ((device == BCM4371_D11AC_ID) || (device == BCM4371_D11AC2G_ID) || + (device == BCM4371_D11AC5G_ID) || (device == BCM4371_CHIP_ID)) { + return 0; + } + + if ((device == BCM4345_D11AC_ID) || (device == BCM4345_D11AC2G_ID) || + (device == BCM4345_D11AC5G_ID) || BCM4345_CHIP(device)) { + return 0; + } + + if ((device == BCM43452_D11AC_ID) || (device == BCM43452_D11AC2G_ID) || + (device == BCM43452_D11AC5G_ID)) { + return 0; + } + + if ((device == BCM4335_D11AC_ID) || (device == BCM4335_D11AC2G_ID) || + (device == BCM4335_D11AC5G_ID) || (device == BCM4335_CHIP_ID)) { + return 0; + } + + if ((device == BCM43602_D11AC_ID) || (device == BCM43602_D11AC2G_ID) || + (device == BCM43602_D11AC5G_ID) || (device == BCM43602_CHIP_ID)) { + return 0; + } + + if ((device == BCM43569_D11AC_ID) || (device == BCM43569_D11AC2G_ID) || + (device == BCM43569_D11AC5G_ID) || (device == BCM43569_CHIP_ID)) { + return 0; + } + + if ((device == BCM4358_D11AC_ID) || (device == BCM4358_D11AC2G_ID) || + (device == BCM4358_D11AC5G_ID)) { + return 0; + } + + if ((device == BCM4349_D11AC_ID) || (device == BCM4349_D11AC2G_ID) || + (device == BCM4349_D11AC5G_ID) || (device == BCM4349_CHIP_ID)) { + return 0; + } + + if ((device == BCM4355_D11AC_ID) || (device == BCM4355_D11AC2G_ID) || + (device == BCM4355_D11AC5G_ID) || (device == BCM4355_CHIP_ID)) { + return 0; + } + + if ((device == BCM4359_D11AC_ID) || (device == BCM4359_D11AC2G_ID) || + (device == BCM4359_D11AC5G_ID)) { + return 0; + } + + if ((device == BCM43596_D11AC_ID) || (device == BCM43596_D11AC2G_ID) || + (device == BCM43596_D11AC5G_ID)) { + return 0; + } + + if ((device == BCM43597_D11AC_ID) || (device == BCM43597_D11AC2G_ID) || + (device == BCM43597_D11AC5G_ID)) { + return 0; + } + + if ((device == BCM4364_D11AC_ID) || (device == BCM4364_D11AC2G_ID) || + (device == BCM4364_D11AC5G_ID) || (device == BCM4364_CHIP_ID)) { + return 0; + } + + if ((device == BCM4361_D11AC_ID) || (device == BCM4361_D11AC2G_ID) || + (device == BCM4361_D11AC5G_ID) || (device == BCM4361_CHIP_ID)) { + return 0; + } + if ((device == BCM4362_D11AX_ID) || (device == BCM4362_D11AX2G_ID) || + (device == BCM4362_D11AX5G_ID) || (device == BCM4362_CHIP_ID)) { + return 0; + } + if ((device == BCM43751_D11AX_ID) || (device == BCM43751_D11AX2G_ID) || + (device == BCM43751_D11AX5G_ID) || (device == BCM43751_CHIP_ID)) { + return 0; + } + if ((device == BCM4347_D11AC_ID) || (device == BCM4347_D11AC2G_ID) || + (device == BCM4347_D11AC5G_ID) || (device == BCM4347_CHIP_ID)) { + return 0; + } + + if ((device == BCM4365_D11AC_ID) || (device == BCM4365_D11AC2G_ID) || + (device == BCM4365_D11AC5G_ID) || (device == BCM4365_CHIP_ID)) { + return 0; + } + + if ((device == BCM4366_D11AC_ID) || (device == BCM4366_D11AC2G_ID) || + (device == BCM4366_D11AC5G_ID) || (device == BCM4366_CHIP_ID) || + (device == BCM43664_CHIP_ID) || (device == BCM43666_CHIP_ID)) { + return 0; + } + + if ((device == BCM4369_D11AX_ID) || (device == BCM4369_D11AX2G_ID) || + (device == BCM4369_D11AX5G_ID) || (device == BCM4369_CHIP_ID)) { + return 0; + } + + if ((device == BCM4375_D11AX_ID) || (device == BCM4375_D11AX2G_ID) || + (device == BCM4375_D11AX5G_ID) || (device == BCM4375_CHIP_ID)) { + return 0; + } + + DHD_ERROR(("%s: Unsupported vendor %x device %x\n", __FUNCTION__, vendor, device)); + return (-ENODEV); +} /* dhdpcie_chipmatch */ + +/** + * Name: dhdpcie_cc_nvmshadow + * + * Description: + * A shadow of OTP/SPROM exists in ChipCommon Region + * betw. 0x800 and 0xBFF (Backplane Addr. 0x1800_0800 and 0x1800_0BFF). + * Strapping option (SPROM vs. OTP), presence of OTP/SPROM and its size + * can also be read from ChipCommon Registers. + */ +static int +dhdpcie_cc_nvmshadow(dhd_bus_t *bus, struct bcmstrbuf *b) +{ + uint16 dump_offset = 0; + uint32 dump_size = 0, otp_size = 0, sprom_size = 0; + + /* Table for 65nm OTP Size (in bits) */ + int otp_size_65nm[8] = {0, 2048, 4096, 8192, 4096, 6144, 512, 1024}; + + volatile uint16 *nvm_shadow; + + uint cur_coreid; + uint chipc_corerev; + chipcregs_t *chipcregs; + + /* Save the current core */ + cur_coreid = si_coreid(bus->sih); + /* Switch to ChipC */ + chipcregs = (chipcregs_t *)si_setcore(bus->sih, CC_CORE_ID, 0); + ASSERT(chipcregs != NULL); + + chipc_corerev = si_corerev(bus->sih); + + /* Check ChipcommonCore Rev */ + if (chipc_corerev < 44) { + DHD_ERROR(("%s: ChipcommonCore Rev %d < 44\n", __FUNCTION__, chipc_corerev)); + return BCME_UNSUPPORTED; + } + + /* Check ChipID */ + if (((uint16)bus->sih->chip != BCM4350_CHIP_ID) && !BCM4345_CHIP((uint16)bus->sih->chip) && + ((uint16)bus->sih->chip != BCM4355_CHIP_ID) && + ((uint16)bus->sih->chip != BCM4364_CHIP_ID)) { + DHD_ERROR(("%s: cc_nvmdump cmd. supported for Olympic chips" + "4350/4345/4355/4364 only\n", __FUNCTION__)); + return BCME_UNSUPPORTED; + } + + /* Check if SRC_PRESENT in SpromCtrl(0x190 in ChipCommon Regs) is set */ + if (chipcregs->sromcontrol & SRC_PRESENT) { + /* SPROM Size: 1Kbits (0x0), 4Kbits (0x1), 16Kbits(0x2) */ + sprom_size = (1 << (2 * ((chipcregs->sromcontrol & SRC_SIZE_MASK) + >> SRC_SIZE_SHIFT))) * 1024; + bcm_bprintf(b, "\nSPROM Present (Size %d bits)\n", sprom_size); + } + + if (chipcregs->sromcontrol & SRC_OTPPRESENT) { + bcm_bprintf(b, "\nOTP Present"); + + if (((chipcregs->otplayout & OTPL_WRAP_TYPE_MASK) >> OTPL_WRAP_TYPE_SHIFT) + == OTPL_WRAP_TYPE_40NM) { + /* 40nm OTP: Size = (OtpSize + 1) * 1024 bits */ + /* Chipcommon rev51 is a variation on rev45 and does not support + * the latest OTP configuration. + */ + if (chipc_corerev != 51 && chipc_corerev >= 49) { + otp_size = (((chipcregs->otplayout & OTPL_ROW_SIZE_MASK) + >> OTPL_ROW_SIZE_SHIFT) + 1) * 1024; + bcm_bprintf(b, "(Size %d bits)\n", otp_size); + } else { + otp_size = (((chipcregs->capabilities & CC_CAP_OTPSIZE) + >> CC_CAP_OTPSIZE_SHIFT) + 1) * 1024; + bcm_bprintf(b, "(Size %d bits)\n", otp_size); + } + } else { + /* This part is untested since newer chips have 40nm OTP */ + /* Chipcommon rev51 is a variation on rev45 and does not support + * the latest OTP configuration. + */ + if (chipc_corerev != 51 && chipc_corerev >= 49) { + otp_size = otp_size_65nm[(chipcregs->otplayout & OTPL_ROW_SIZE_MASK) + >> OTPL_ROW_SIZE_SHIFT]; + bcm_bprintf(b, "(Size %d bits)\n", otp_size); + } else { + otp_size = otp_size_65nm[(chipcregs->capabilities & CC_CAP_OTPSIZE) + >> CC_CAP_OTPSIZE_SHIFT]; + bcm_bprintf(b, "(Size %d bits)\n", otp_size); + DHD_INFO(("%s: 65nm/130nm OTP Size not tested. \n", + __FUNCTION__)); + } + } + } + + /* Chipcommon rev51 is a variation on rev45 and does not support + * the latest OTP configuration. + */ + if (chipc_corerev != 51 && chipc_corerev >= 49) { + if (((chipcregs->sromcontrol & SRC_PRESENT) == 0) && + ((chipcregs->otplayout & OTPL_ROW_SIZE_MASK) == 0)) { + DHD_ERROR(("%s: SPROM and OTP could not be found " + "sromcontrol = %x, otplayout = %x \n", + __FUNCTION__, chipcregs->sromcontrol, chipcregs->otplayout)); + return BCME_NOTFOUND; + } + } else { + if (((chipcregs->sromcontrol & SRC_PRESENT) == 0) && + ((chipcregs->capabilities & CC_CAP_OTPSIZE) == 0)) { + DHD_ERROR(("%s: SPROM and OTP could not be found " + "sromcontrol = %x, capablities = %x \n", + __FUNCTION__, chipcregs->sromcontrol, chipcregs->capabilities)); + return BCME_NOTFOUND; + } + } + + /* Check the strapping option in SpromCtrl: Set = OTP otherwise SPROM */ + if ((!(chipcregs->sromcontrol & SRC_PRESENT) || (chipcregs->sromcontrol & SRC_OTPSEL)) && + (chipcregs->sromcontrol & SRC_OTPPRESENT)) { + + bcm_bprintf(b, "OTP Strap selected.\n" + "\nOTP Shadow in ChipCommon:\n"); + + dump_size = otp_size / 16 ; /* 16bit words */ + + } else if (((chipcregs->sromcontrol & SRC_OTPSEL) == 0) && + (chipcregs->sromcontrol & SRC_PRESENT)) { + + bcm_bprintf(b, "SPROM Strap selected\n" + "\nSPROM Shadow in ChipCommon:\n"); + + /* If SPROM > 8K only 8Kbits is mapped to ChipCommon (0x800 - 0xBFF) */ + /* dump_size in 16bit words */ + dump_size = sprom_size > 8 ? (8 * 1024) / 16 : sprom_size / 16; + } else { + DHD_ERROR(("%s: NVM Shadow does not exist in ChipCommon\n", + __FUNCTION__)); + return BCME_NOTFOUND; + } + + if (bus->regs == NULL) { + DHD_ERROR(("ChipCommon Regs. not initialized\n")); + return BCME_NOTREADY; + } else { + bcm_bprintf(b, "\n OffSet:"); + + /* Chipcommon rev51 is a variation on rev45 and does not support + * the latest OTP configuration. + */ + if (chipc_corerev != 51 && chipc_corerev >= 49) { + /* Chip common can read only 8kbits, + * for ccrev >= 49 otp size is around 12 kbits so use GCI core + */ + nvm_shadow = (volatile uint16 *)si_setcore(bus->sih, GCI_CORE_ID, 0); + } else { + /* Point to the SPROM/OTP shadow in ChipCommon */ + nvm_shadow = chipcregs->sromotp; + } + + if (nvm_shadow == NULL) { + DHD_ERROR(("%s: NVM Shadow is not intialized\n", __FUNCTION__)); + return BCME_NOTFOUND; + } + + /* + * Read 16 bits / iteration. + * dump_size & dump_offset in 16-bit words + */ + while (dump_offset < dump_size) { + if (dump_offset % 2 == 0) + /* Print the offset in the shadow space in Bytes */ + bcm_bprintf(b, "\n 0x%04x", dump_offset * 2); + + bcm_bprintf(b, "\t0x%04x", *(nvm_shadow + dump_offset)); + dump_offset += 0x1; + } + } + + /* Switch back to the original core */ + si_setcore(bus->sih, cur_coreid, 0); + + return BCME_OK; +} /* dhdpcie_cc_nvmshadow */ + +/** Flow rings are dynamically created and destroyed */ +void dhd_bus_clean_flow_ring(dhd_bus_t *bus, void *node) +{ + void *pkt; + flow_queue_t *queue; + flow_ring_node_t *flow_ring_node = (flow_ring_node_t *)node; + unsigned long flags; + + queue = &flow_ring_node->queue; + +#ifdef DHDTCPACK_SUPPRESS + /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt, + * when there is a newly coming packet from network stack. + */ + dhd_tcpack_info_tbl_clean(bus->dhd); +#endif /* DHDTCPACK_SUPPRESS */ + + /* clean up BUS level info */ + DHD_FLOWRING_LOCK(flow_ring_node->lock, flags); + + /* Flush all pending packets in the queue, if any */ + while ((pkt = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) { + PKTFREE(bus->dhd->osh, pkt, TRUE); + } + ASSERT(DHD_FLOW_QUEUE_EMPTY(queue)); + + /* Reinitialise flowring's queue */ + dhd_flow_queue_reinit(bus->dhd, queue, FLOW_RING_QUEUE_THRESHOLD); + flow_ring_node->status = FLOW_RING_STATUS_CLOSED; + flow_ring_node->active = FALSE; + + DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); + + /* Hold flowring_list_lock to ensure no race condition while accessing the List */ + DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags); + dll_delete(&flow_ring_node->list); + DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags); + + /* Release the flowring object back into the pool */ + dhd_prot_flowrings_pool_release(bus->dhd, + flow_ring_node->flowid, flow_ring_node->prot_info); + + /* Free the flowid back to the flowid allocator */ + dhd_flowid_free(bus->dhd, flow_ring_node->flow_info.ifindex, + flow_ring_node->flowid); +} + +/** + * Allocate a Flow ring buffer, + * Init Ring buffer, send Msg to device about flow ring creation +*/ +int +dhd_bus_flow_ring_create_request(dhd_bus_t *bus, void *arg) +{ + flow_ring_node_t *flow_ring_node = (flow_ring_node_t *)arg; + + DHD_INFO(("%s :Flow create\n", __FUNCTION__)); + + /* Send Msg to device about flow ring creation */ + if (dhd_prot_flow_ring_create(bus->dhd, flow_ring_node) != BCME_OK) + return BCME_NOMEM; + + return BCME_OK; +} + +/** Handle response from dongle on a 'flow ring create' request */ +void +dhd_bus_flow_ring_create_response(dhd_bus_t *bus, uint16 flowid, int32 status) +{ + flow_ring_node_t *flow_ring_node; + unsigned long flags; + + DHD_INFO(("%s :Flow Response %d \n", __FUNCTION__, flowid)); + + flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid); + ASSERT(flow_ring_node->flowid == flowid); + + if (status != BCME_OK) { + DHD_ERROR(("%s Flow create Response failure error status = %d \n", + __FUNCTION__, status)); + /* Call Flow clean up */ + dhd_bus_clean_flow_ring(bus, flow_ring_node); + return; + } + + DHD_FLOWRING_LOCK(flow_ring_node->lock, flags); + flow_ring_node->status = FLOW_RING_STATUS_OPEN; + DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); + + /* Now add the Flow ring node into the active list + * Note that this code to add the newly created node to the active + * list was living in dhd_flowid_lookup. But note that after + * adding the node to the active list the contents of node is being + * filled in dhd_prot_flow_ring_create. + * If there is a D2H interrupt after the node gets added to the + * active list and before the node gets populated with values + * from the Bottom half dhd_update_txflowrings would be called. + * which will then try to walk through the active flow ring list, + * pickup the nodes and operate on them. Now note that since + * the function dhd_prot_flow_ring_create is not finished yet + * the contents of flow_ring_node can still be NULL leading to + * crashes. Hence the flow_ring_node should be added to the + * active list only after its truely created, which is after + * receiving the create response message from the Host. + */ + DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags); + dll_prepend(&bus->flowring_active_list, &flow_ring_node->list); + DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags); + + dhd_bus_schedule_queue(bus, flowid, FALSE); /* from queue to flowring */ + + return; +} + +int +dhd_bus_flow_ring_delete_request(dhd_bus_t *bus, void *arg) +{ + void * pkt; + flow_queue_t *queue; + flow_ring_node_t *flow_ring_node; + unsigned long flags; + + DHD_INFO(("%s :Flow Delete\n", __FUNCTION__)); + + flow_ring_node = (flow_ring_node_t *)arg; + +#ifdef DHDTCPACK_SUPPRESS + /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt, + * when there is a newly coming packet from network stack. + */ + dhd_tcpack_info_tbl_clean(bus->dhd); +#endif /* DHDTCPACK_SUPPRESS */ + DHD_FLOWRING_LOCK(flow_ring_node->lock, flags); + if (flow_ring_node->status == FLOW_RING_STATUS_DELETE_PENDING) { + DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); + DHD_ERROR(("%s :Delete Pending flowid %u\n", __FUNCTION__, flow_ring_node->flowid)); + return BCME_ERROR; + } + flow_ring_node->status = FLOW_RING_STATUS_DELETE_PENDING; + + queue = &flow_ring_node->queue; /* queue associated with flow ring */ + + /* Flush all pending packets in the queue, if any */ + while ((pkt = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) { + PKTFREE(bus->dhd->osh, pkt, TRUE); + } + ASSERT(DHD_FLOW_QUEUE_EMPTY(queue)); + + DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); + + /* Send Msg to device about flow ring deletion */ + dhd_prot_flow_ring_delete(bus->dhd, flow_ring_node); + + return BCME_OK; +} + +void +dhd_bus_flow_ring_delete_response(dhd_bus_t *bus, uint16 flowid, uint32 status) +{ + flow_ring_node_t *flow_ring_node; + + DHD_INFO(("%s :Flow Delete Response %d \n", __FUNCTION__, flowid)); + + flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid); + ASSERT(flow_ring_node->flowid == flowid); + + if (status != BCME_OK) { + DHD_ERROR(("%s Flow Delete Response failure error status = %d \n", + __FUNCTION__, status)); + return; + } + /* Call Flow clean up */ + dhd_bus_clean_flow_ring(bus, flow_ring_node); + + return; + +} + +int dhd_bus_flow_ring_flush_request(dhd_bus_t *bus, void *arg) +{ + void *pkt; + flow_queue_t *queue; + flow_ring_node_t *flow_ring_node; + unsigned long flags; + + DHD_INFO(("%s :Flow Flush\n", __FUNCTION__)); + + flow_ring_node = (flow_ring_node_t *)arg; + + DHD_FLOWRING_LOCK(flow_ring_node->lock, flags); + queue = &flow_ring_node->queue; /* queue associated with flow ring */ + /* Flow ring status will be set back to FLOW_RING_STATUS_OPEN + * once flow ring flush response is received for this flowring node. + */ + flow_ring_node->status = FLOW_RING_STATUS_FLUSH_PENDING; + +#ifdef DHDTCPACK_SUPPRESS + /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt, + * when there is a newly coming packet from network stack. + */ + dhd_tcpack_info_tbl_clean(bus->dhd); +#endif /* DHDTCPACK_SUPPRESS */ + + /* Flush all pending packets in the queue, if any */ + while ((pkt = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) { + PKTFREE(bus->dhd->osh, pkt, TRUE); + } + ASSERT(DHD_FLOW_QUEUE_EMPTY(queue)); + + DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); + + /* Send Msg to device about flow ring flush */ + dhd_prot_flow_ring_flush(bus->dhd, flow_ring_node); + + return BCME_OK; +} + +void +dhd_bus_flow_ring_flush_response(dhd_bus_t *bus, uint16 flowid, uint32 status) +{ + flow_ring_node_t *flow_ring_node; + + if (status != BCME_OK) { + DHD_ERROR(("%s Flow flush Response failure error status = %d \n", + __FUNCTION__, status)); + return; + } + + flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid); + ASSERT(flow_ring_node->flowid == flowid); + + flow_ring_node->status = FLOW_RING_STATUS_OPEN; + return; +} + +uint32 +dhd_bus_max_h2d_queues(struct dhd_bus *bus) +{ + return bus->max_submission_rings; +} + +/* To be symmetric with SDIO */ +void +dhd_bus_pktq_flush(dhd_pub_t *dhdp) +{ + return; +} + +void +dhd_bus_set_linkdown(dhd_pub_t *dhdp, bool val) +{ + dhdp->bus->is_linkdown = val; +} + +#ifdef IDLE_TX_FLOW_MGMT +/* resume request */ +int +dhd_bus_flow_ring_resume_request(dhd_bus_t *bus, void *arg) +{ + flow_ring_node_t *flow_ring_node = (flow_ring_node_t *)arg; + + DHD_ERROR(("%s :Flow Resume Request flow id %u\n", __FUNCTION__, flow_ring_node->flowid)); + + flow_ring_node->status = FLOW_RING_STATUS_RESUME_PENDING; + + /* Send Msg to device about flow ring resume */ + dhd_prot_flow_ring_resume(bus->dhd, flow_ring_node); + + return BCME_OK; +} + +/* add the node back to active flowring */ +void +dhd_bus_flow_ring_resume_response(dhd_bus_t *bus, uint16 flowid, int32 status) +{ + + flow_ring_node_t *flow_ring_node; + + DHD_TRACE(("%s :flowid %d \n", __FUNCTION__, flowid)); + + flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid); + ASSERT(flow_ring_node->flowid == flowid); + + if (status != BCME_OK) { + DHD_ERROR(("%s Error Status = %d \n", + __FUNCTION__, status)); + return; + } + + DHD_TRACE(("%s :Number of pkts queued in FlowId:%d is -> %u!!\n", + __FUNCTION__, flow_ring_node->flowid, flow_ring_node->queue.len)); + + flow_ring_node->status = FLOW_RING_STATUS_OPEN; + + dhd_bus_schedule_queue(bus, flowid, FALSE); + return; +} + +/* scan the flow rings in active list for idle time out */ +void +dhd_bus_check_idle_scan(dhd_bus_t *bus) +{ + uint64 time_stamp; /* in millisec */ + uint64 diff; + + time_stamp = OSL_SYSUPTIME(); + diff = time_stamp - bus->active_list_last_process_ts; + + if (diff > IDLE_FLOW_LIST_TIMEOUT) { + dhd_bus_idle_scan(bus); + bus->active_list_last_process_ts = OSL_SYSUPTIME(); + } + + return; +} + +/* scan the nodes in active list till it finds a non idle node */ +void +dhd_bus_idle_scan(dhd_bus_t *bus) +{ + dll_t *item, *prev; + flow_ring_node_t *flow_ring_node; + uint64 time_stamp, diff; + unsigned long flags; + uint16 ringid[MAX_SUSPEND_REQ]; + uint16 count = 0; + + time_stamp = OSL_SYSUPTIME(); + DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags); + + for (item = dll_tail_p(&bus->flowring_active_list); + !dll_end(&bus->flowring_active_list, item); item = prev) { + prev = dll_prev_p(item); + + flow_ring_node = dhd_constlist_to_flowring(item); + + if (flow_ring_node->flowid == (bus->max_submission_rings - 1)) + continue; + + if (flow_ring_node->status != FLOW_RING_STATUS_OPEN) { + /* Takes care of deleting zombie rings */ + /* delete from the active list */ + DHD_INFO(("deleting flow id %u from active list\n", + flow_ring_node->flowid)); + __dhd_flow_ring_delete_from_active_list(bus, flow_ring_node); + continue; + } + + diff = time_stamp - flow_ring_node->last_active_ts; + + if ((diff > IDLE_FLOW_RING_TIMEOUT) && !(flow_ring_node->queue.len)) { + DHD_ERROR(("\nSuspending flowid %d\n", flow_ring_node->flowid)); + /* delete from the active list */ + __dhd_flow_ring_delete_from_active_list(bus, flow_ring_node); + flow_ring_node->status = FLOW_RING_STATUS_SUSPENDED; + ringid[count] = flow_ring_node->flowid; + count++; + if (count == MAX_SUSPEND_REQ) { + /* create a batch message now!! */ + dhd_prot_flow_ring_batch_suspend_request(bus->dhd, ringid, count); + count = 0; + } + + } else { + + /* No more scanning, break from here! */ + break; + } + } + + if (count) { + dhd_prot_flow_ring_batch_suspend_request(bus->dhd, ringid, count); + } + + DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags); + + return; +} + +void dhd_flow_ring_move_to_active_list_head(struct dhd_bus *bus, flow_ring_node_t *flow_ring_node) +{ + unsigned long flags; + dll_t* list; + + DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags); + /* check if the node is already at head, otherwise delete it and prepend */ + list = dll_head_p(&bus->flowring_active_list); + if (&flow_ring_node->list != list) { + dll_delete(&flow_ring_node->list); + dll_prepend(&bus->flowring_active_list, &flow_ring_node->list); + } + + /* update flow ring timestamp */ + flow_ring_node->last_active_ts = OSL_SYSUPTIME(); + + DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags); + + return; +} + +void dhd_flow_ring_add_to_active_list(struct dhd_bus *bus, flow_ring_node_t *flow_ring_node) +{ + unsigned long flags; + + DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags); + + dll_prepend(&bus->flowring_active_list, &flow_ring_node->list); + /* update flow ring timestamp */ + flow_ring_node->last_active_ts = OSL_SYSUPTIME(); + + DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags); + + return; +} +void __dhd_flow_ring_delete_from_active_list(struct dhd_bus *bus, flow_ring_node_t *flow_ring_node) +{ + dll_delete(&flow_ring_node->list); +} + +void dhd_flow_ring_delete_from_active_list(struct dhd_bus *bus, flow_ring_node_t *flow_ring_node) +{ + unsigned long flags; + + DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags); + + __dhd_flow_ring_delete_from_active_list(bus, flow_ring_node); + + DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags); + + return; +} +#endif /* IDLE_TX_FLOW_MGMT */ + +int +dhdpcie_bus_clock_start(struct dhd_bus *bus) +{ + return dhdpcie_start_host_pcieclock(bus); +} + +int +dhdpcie_bus_clock_stop(struct dhd_bus *bus) +{ + return dhdpcie_stop_host_pcieclock(bus); +} + +int +dhdpcie_bus_disable_device(struct dhd_bus *bus) +{ + return dhdpcie_disable_device(bus); +} + +int +dhdpcie_bus_enable_device(struct dhd_bus *bus) +{ + return dhdpcie_enable_device(bus); +} + +int +dhdpcie_bus_alloc_resource(struct dhd_bus *bus) +{ + return dhdpcie_alloc_resource(bus); +} + +void +dhdpcie_bus_free_resource(struct dhd_bus *bus) +{ + dhdpcie_free_resource(bus); +} + +int +dhd_bus_request_irq(struct dhd_bus *bus) +{ + return dhdpcie_bus_request_irq(bus); +} + +bool +dhdpcie_bus_dongle_attach(struct dhd_bus *bus) +{ + return dhdpcie_dongle_attach(bus); +} + +int +dhd_bus_release_dongle(struct dhd_bus *bus) +{ + bool dongle_isolation; + osl_t *osh; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (bus) { + osh = bus->osh; + ASSERT(osh); + + if (bus->dhd) { +#if defined(DEBUGGER) || defined(DHD_DSCOPE) + debugger_close(); +#endif /* DEBUGGER || DHD_DSCOPE */ + + dongle_isolation = bus->dhd->dongle_isolation; + dhdpcie_bus_release_dongle(bus, osh, dongle_isolation, TRUE); + } + } + + return 0; +} + +void +dhdpcie_cto_init(struct dhd_bus *bus, bool enable) +{ + uint32 val; + + if (enable) { + dhdpcie_bus_cfg_write_dword(bus, PCI_INT_MASK, 4, + PCI_CTO_INT_MASK | PCI_SBIM_MASK_SERR); + val = dhdpcie_bus_cfg_read_dword(bus, PCI_SPROM_CONTROL, 4); + dhdpcie_bus_cfg_write_dword(bus, PCI_SPROM_CONTROL, 4, val | SPROM_BACKPLANE_EN); + if (bus->cto_threshold == 0) { + bus->cto_threshold = PCIE_CTO_TO_THRESH_DEFAULT; + } + + si_corereg(bus->sih, bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, ctoctrl), ~0, + ((bus->cto_threshold << PCIE_CTO_TO_THRESHOLD_SHIFT) & + PCIE_CTO_TO_THRESHHOLD_MASK) | + ((PCIE_CTO_CLKCHKCNT_VAL << PCIE_CTO_CLKCHKCNT_SHIFT) & + PCIE_CTO_CLKCHKCNT_MASK) | + PCIE_CTO_ENAB_MASK); + } else { + dhdpcie_bus_cfg_write_dword(bus, PCI_INT_MASK, 4, 0); + val = dhdpcie_bus_cfg_read_dword(bus, PCI_SPROM_CONTROL, 4); + dhdpcie_bus_cfg_write_dword(bus, PCI_SPROM_CONTROL, 4, val & ~SPROM_BACKPLANE_EN); + + si_corereg(bus->sih, bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, ctoctrl), ~0, 0); + } +} + +static void +dhdpcie_cto_error_recovery(struct dhd_bus *bus) +{ + uint32 pci_intmask, err_status, dar_val; + uint8 i = 0; + uint32 val; + + pci_intmask = dhdpcie_bus_cfg_read_dword(bus, PCI_INT_MASK, 4); + dhdpcie_bus_cfg_write_dword(bus, PCI_INT_MASK, 4, pci_intmask & ~PCI_CTO_INT_MASK); + + DHD_OS_WAKE_LOCK(bus->dhd); + + DHD_ERROR(("--- CTO Triggered --- %d\n", bus->pwr_req_ref)); + + /* + * DAR still accessible + */ + dar_val = si_corereg(bus->sih, bus->sih->buscoreidx, + DAR_CLK_CTRL(bus->sih->buscorerev), 0, 0); + DHD_ERROR((" 0x%x:0x%x\n", (uint32) DAR_CLK_CTRL(bus->sih->buscorerev), dar_val)); + + dar_val = si_corereg(bus->sih, bus->sih->buscoreidx, + DAR_PCIE_PWR_CTRL(bus->sih->buscorerev), 0, 0); + DHD_ERROR((" 0x%x:0x%x\n", (uint32) DAR_PCIE_PWR_CTRL(bus->sih->buscorerev), dar_val)); + + dar_val = si_corereg(bus->sih, bus->sih->buscoreidx, + DAR_INTSTAT(bus->sih->buscorerev), 0, 0); + DHD_ERROR((" 0x%x:0x%x\n", (uint32) DAR_INTSTAT(bus->sih->buscorerev), dar_val)); + + dar_val = si_corereg(bus->sih, bus->sih->buscoreidx, + DAR_ERRLOG(bus->sih->buscorerev), 0, 0); + DHD_ERROR((" 0x%x:0x%x\n", (uint32) DAR_ERRLOG(bus->sih->buscorerev), dar_val)); + + dar_val = si_corereg(bus->sih, bus->sih->buscoreidx, + DAR_ERRADDR(bus->sih->buscorerev), 0, 0); + DHD_ERROR((" 0x%x:0x%x\n", (uint32) DAR_ERRADDR(bus->sih->buscorerev), dar_val)); + + dar_val = si_corereg(bus->sih, bus->sih->buscoreidx, + DAR_PCIMailBoxInt(bus->sih->buscorerev), 0, 0); + DHD_ERROR((" 0x%x:0x%x\n", (uint32) DAR_PCIMailBoxInt(bus->sih->buscorerev), dar_val)); + + /* reset backplane */ + val = dhdpcie_bus_cfg_read_dword(bus, PCI_SPROM_CONTROL, 4); + dhdpcie_bus_cfg_write_dword(bus, PCI_SPROM_CONTROL, 4, val | SPROM_CFG_TO_SB_RST); + + /* clear timeout error */ + while (1) { + err_status = si_corereg(bus->sih, bus->sih->buscoreidx, + DAR_ERRLOG(bus->sih->buscorerev), + 0, 0); + if (err_status & PCIE_CTO_ERR_MASK) { + si_corereg(bus->sih, bus->sih->buscoreidx, + DAR_ERRLOG(bus->sih->buscorerev), + ~0, PCIE_CTO_ERR_MASK); + } else { + break; + } + OSL_DELAY(CTO_TO_CLEAR_WAIT_MS * 1000); + i++; + if (i > CTO_TO_CLEAR_WAIT_MAX_CNT) { + DHD_ERROR(("cto recovery fail\n")); + + DHD_OS_WAKE_UNLOCK(bus->dhd); + return; + } + } + + /* clear interrupt status */ + dhdpcie_bus_cfg_write_dword(bus, PCI_INT_STATUS, 4, PCI_CTO_INT_MASK); + + /* Halt ARM & remove reset */ + /* TBD : we can add ARM Halt here in case */ + + /* reset SPROM_CFG_TO_SB_RST */ + val = dhdpcie_bus_cfg_read_dword(bus, PCI_SPROM_CONTROL, 4); + + DHD_ERROR(("cto recovery reset 0x%x:SPROM_CFG_TO_SB_RST(0x%x) 0x%x\n", + PCI_SPROM_CONTROL, SPROM_CFG_TO_SB_RST, val)); + dhdpcie_bus_cfg_write_dword(bus, PCI_SPROM_CONTROL, 4, val & ~SPROM_CFG_TO_SB_RST); + + val = dhdpcie_bus_cfg_read_dword(bus, PCI_SPROM_CONTROL, 4); + DHD_ERROR(("cto recovery success 0x%x:SPROM_CFG_TO_SB_RST(0x%x) 0x%x\n", + PCI_SPROM_CONTROL, SPROM_CFG_TO_SB_RST, val)); + + DHD_OS_WAKE_UNLOCK(bus->dhd); +} + +void +dhdpcie_ssreset_dis_enum_rst(struct dhd_bus *bus) +{ + uint32 val; + + val = dhdpcie_bus_cfg_read_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL, 4); + dhdpcie_bus_cfg_write_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL, 4, + val | (0x1 << PCIE_SSRESET_DIS_ENUM_RST_BIT)); +} + +#if defined(DBG_PKT_MON) +static int +dhdpcie_init_d11status(struct dhd_bus *bus) +{ + uint32 addr; + uint32 flags2; + int ret = 0; + + if (bus->pcie_sh->flags2 & PCIE_SHARED2_D2H_D11_TX_STATUS) { + flags2 = bus->pcie_sh->flags2; + addr = bus->shared_addr + OFFSETOF(pciedev_shared_t, flags2); + flags2 |= PCIE_SHARED2_H2D_D11_TX_STATUS; + ret = dhdpcie_bus_membytes(bus, TRUE, addr, + (uint8 *)&flags2, sizeof(flags2)); + if (ret < 0) { + DHD_ERROR(("%s: update flag bit (H2D_D11_TX_STATUS) failed\n", + __FUNCTION__)); + return ret; + } + bus->pcie_sh->flags2 = flags2; + bus->dhd->d11_tx_status = TRUE; + } + return ret; +} + +#else +static int +dhdpcie_init_d11status(struct dhd_bus *bus) +{ + return 0; +} +#endif // endif + +#ifdef BCMPCIE_OOB_HOST_WAKE +int +dhd_bus_oob_intr_register(dhd_pub_t *dhdp) +{ + return dhdpcie_oob_intr_register(dhdp->bus); +} + +void +dhd_bus_oob_intr_unregister(dhd_pub_t *dhdp) +{ + dhdpcie_oob_intr_unregister(dhdp->bus); +} + +void +dhd_bus_oob_intr_set(dhd_pub_t *dhdp, bool enable) +{ + dhdpcie_oob_intr_set(dhdp->bus, enable); +} +#endif /* BCMPCIE_OOB_HOST_WAKE */ + +bool +dhdpcie_bus_get_pcie_hostready_supported(dhd_bus_t *bus) +{ + return bus->dhd->d2h_hostrdy_supported; +} + +void +dhd_pcie_dump_core_regs(dhd_pub_t * pub, uint32 index, uint32 first_addr, uint32 last_addr) +{ + dhd_bus_t *bus = pub->bus; + uint32 coreoffset = index << 12; + uint32 core_addr = SI_ENUM_BASE(bus->sih) + coreoffset; + uint32 value; + + while (first_addr <= last_addr) { + core_addr = SI_ENUM_BASE(bus->sih) + coreoffset + first_addr; + if (serialized_backplane_access(bus, core_addr, 4, &value, TRUE) != BCME_OK) { + DHD_ERROR(("Invalid size/addr combination \n")); + } + DHD_ERROR(("[0x%08x]: 0x%08x\n", core_addr, value)); + first_addr = first_addr + 4; + } +} + +bool +dhdpcie_bus_get_pcie_idma_supported(dhd_bus_t *bus) +{ + if (!bus->dhd) + return FALSE; + else if (bus->idma_enabled) { + return bus->dhd->idma_enable; + } else { + return FALSE; + } +} + +bool +dhdpcie_bus_get_pcie_ifrm_supported(dhd_bus_t *bus) +{ + if (!bus->dhd) + return FALSE; + else if (bus->ifrm_enabled) { + return bus->dhd->ifrm_enable; + } else { + return FALSE; + } +} + +bool +dhdpcie_bus_get_pcie_dar_supported(dhd_bus_t *bus) +{ + if (!bus->dhd) { + return FALSE; + } else if (bus->dar_enabled) { + return bus->dhd->dar_enable; + } else { + return FALSE; + } +} + +void +dhdpcie_bus_enab_pcie_dw(dhd_bus_t *bus, uint8 dw_option) +{ + DHD_ERROR(("ENABLING DW:%d\n", dw_option)); + bus->dw_option = dw_option; +} + +void +dhd_bus_dump_trap_info(dhd_bus_t *bus, struct bcmstrbuf *strbuf) +{ + trap_t *tr = &bus->dhd->last_trap_info; + bcm_bprintf(strbuf, + "\nTRAP type 0x%x @ epc 0x%x, cpsr 0x%x, spsr 0x%x, sp 0x%x," + " lp 0x%x, rpc 0x%x" + "\nTrap offset 0x%x, r0 0x%x, r1 0x%x, r2 0x%x, r3 0x%x, " + "r4 0x%x, r5 0x%x, r6 0x%x, r7 0x%x, r8 0x%x, r9 0x%x, " + "r10 0x%x, r11 0x%x, r12 0x%x\n\n", + ltoh32(tr->type), ltoh32(tr->epc), ltoh32(tr->cpsr), ltoh32(tr->spsr), + ltoh32(tr->r13), ltoh32(tr->r14), ltoh32(tr->pc), + ltoh32(bus->pcie_sh->trap_addr), + ltoh32(tr->r0), ltoh32(tr->r1), ltoh32(tr->r2), ltoh32(tr->r3), + ltoh32(tr->r4), ltoh32(tr->r5), ltoh32(tr->r6), ltoh32(tr->r7), + ltoh32(tr->r8), ltoh32(tr->r9), ltoh32(tr->r10), + ltoh32(tr->r11), ltoh32(tr->r12)); +} + +int +dhd_bus_readwrite_bp_addr(dhd_pub_t *dhdp, uint addr, uint size, uint* data, bool read) +{ + int bcmerror = 0; + struct dhd_bus *bus = dhdp->bus; + + if (serialized_backplane_access(bus, addr, size, data, read) != BCME_OK) { + DHD_ERROR(("Invalid size/addr combination \n")); + bcmerror = BCME_ERROR; + } + + return bcmerror; +} + +int +dhd_get_idletime(dhd_pub_t *dhd) +{ + return dhd->bus->idletime; +} + +#ifdef DHD_SSSR_DUMP + +static INLINE void +dhd_sbreg_op(dhd_pub_t *dhd, uint addr, uint *val, bool read) +{ + OSL_DELAY(1); + serialized_backplane_access(dhd->bus, addr, sizeof(uint), val, read); + DHD_ERROR(("%s: addr:0x%x val:0x%x read:%d\n", __FUNCTION__, addr, *val, read)); + return; +} + +static int +dhdpcie_get_sssr_fifo_dump(dhd_pub_t *dhd, uint *buf, uint fifo_size, + uint addr_reg, uint data_reg) +{ + uint addr; + uint val = 0; + int i; + + DHD_ERROR(("%s\n", __FUNCTION__)); + + if (!buf) { + DHD_ERROR(("%s: buf is NULL\n", __FUNCTION__)); + return BCME_ERROR; + } + + if (!fifo_size) { + DHD_ERROR(("%s: fifo_size is 0\n", __FUNCTION__)); + return BCME_ERROR; + } + + /* Set the base address offset to 0 */ + addr = addr_reg; + val = 0; + dhd_sbreg_op(dhd, addr, &val, FALSE); + + addr = data_reg; + /* Read 4 bytes at once and loop for fifo_size / 4 */ + for (i = 0; i < fifo_size / 4; i++) { + serialized_backplane_access(dhd->bus, addr, sizeof(uint), &val, TRUE); + buf[i] = val; + OSL_DELAY(1); + } + return BCME_OK; +} + +static int +dhdpcie_get_sssr_dig_dump(dhd_pub_t *dhd, uint *buf, uint fifo_size, + uint addr_reg) +{ + uint addr; + uint val = 0; + int i; + si_t *sih = dhd->bus->sih; + + DHD_ERROR(("%s\n", __FUNCTION__)); + + if (!buf) { + DHD_ERROR(("%s: buf is NULL\n", __FUNCTION__)); + return BCME_ERROR; + } + + if (!fifo_size) { + DHD_ERROR(("%s: fifo_size is 0\n", __FUNCTION__)); + return BCME_ERROR; + } + + if (addr_reg) { + + if ((!dhd->sssr_reg_info.vasip_regs.vasip_sr_size) && + dhd->sssr_reg_info.length > OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) { + dhdpcie_bus_membytes(dhd->bus, FALSE, addr_reg, (uint8 *)buf, fifo_size); + } else { + /* Check if vasip clk is disabled, if yes enable it */ + addr = dhd->sssr_reg_info.vasip_regs.wrapper_regs.ioctrl; + dhd_sbreg_op(dhd, addr, &val, TRUE); + if (!val) { + val = 1; + dhd_sbreg_op(dhd, addr, &val, FALSE); + } + + addr = addr_reg; + /* Read 4 bytes at once and loop for fifo_size / 4 */ + for (i = 0; i < fifo_size / 4; i++, addr += 4) { + serialized_backplane_access(dhd->bus, addr, sizeof(uint), &val, + TRUE); + buf[i] = val; + OSL_DELAY(1); + } + } + } else { + uint cur_coreid; + uint chipc_corerev; + chipcregs_t *chipcregs; + + /* Save the current core */ + cur_coreid = si_coreid(sih); + + /* Switch to ChipC */ + chipcregs = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0); + + chipc_corerev = si_corerev(sih); + + if ((chipc_corerev == 64) || (chipc_corerev == 65)) { + W_REG(si_osh(sih), &chipcregs->sr_memrw_addr, 0); + + /* Read 4 bytes at once and loop for fifo_size / 4 */ + for (i = 0; i < fifo_size / 4; i++) { + buf[i] = R_REG(si_osh(sih), &chipcregs->sr_memrw_data); + OSL_DELAY(1); + } + } + + /* Switch back to the original core */ + si_setcore(sih, cur_coreid, 0); + } + + return BCME_OK; +} + +#if defined(EWP_ETD_PRSRV_LOGS) +void +dhdpcie_get_etd_preserve_logs(dhd_pub_t *dhd, + uint8 *ext_trap_data, void *event_decode_data) +{ + hnd_ext_trap_hdr_t *hdr = NULL; + bcm_tlv_t *tlv; + eventlog_trapdata_info_t *etd_evtlog = NULL; + eventlog_trap_buf_info_t *evtlog_buf_arr = NULL; + uint arr_size = 0; + int i = 0; + int err = 0; + uint32 seqnum = 0; + + if (!ext_trap_data || !event_decode_data || !dhd) + return; + + if (!dhd->concise_dbg_buf) + return; + + /* First word is original trap_data, skip */ + ext_trap_data += sizeof(uint32); + + hdr = (hnd_ext_trap_hdr_t *)ext_trap_data; + tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_LOG_DATA); + if (tlv) { + etd_evtlog = (eventlog_trapdata_info_t *)tlv->data; + DHD_ERROR(("%s: etd_evtlog tlv found, num_elements=%x; " + "seq_num=%x; log_arr_addr=%x\n", __FUNCTION__, + (etd_evtlog->num_elements), + ntoh32(etd_evtlog->seq_num), (etd_evtlog->log_arr_addr))); + arr_size = (uint32)sizeof(*evtlog_buf_arr) * (etd_evtlog->num_elements); + if (!arr_size) { + DHD_ERROR(("%s: num event logs is zero! \n", __FUNCTION__)); + return; + } + evtlog_buf_arr = MALLOCZ(dhd->osh, arr_size); + if (!evtlog_buf_arr) { + DHD_ERROR(("%s: out of memory !\n", __FUNCTION__)); + return; + } + /* read the eventlog_trap_buf_info_t array from dongle memory */ + err = dhdpcie_bus_membytes(dhd->bus, FALSE, + (ulong)(etd_evtlog->log_arr_addr), + (uint8 *)evtlog_buf_arr, arr_size); + if (err != BCME_OK) { + DHD_ERROR(("%s: Error reading event log array from dongle !\n", + __FUNCTION__)); + goto err; + } + /* ntoh is required only for seq_num, because in the original + * case of event logs from info ring, it is sent from dongle in that way + * so for ETD also dongle follows same convention + */ + seqnum = ntoh32(etd_evtlog->seq_num); + memset(dhd->concise_dbg_buf, 0, CONCISE_DUMP_BUFLEN); + for (i = 0; i < (etd_evtlog->num_elements); ++i) { + /* read each individual event log buf from dongle memory */ + err = dhdpcie_bus_membytes(dhd->bus, FALSE, + ((ulong)evtlog_buf_arr[i].buf_addr), + dhd->concise_dbg_buf, (evtlog_buf_arr[i].len)); + if (err != BCME_OK) { + DHD_ERROR(("%s: Error reading event log buffer from dongle !\n", + __FUNCTION__)); + goto err; + } + dhd_dbg_msgtrace_log_parser(dhd, dhd->concise_dbg_buf, + event_decode_data, (evtlog_buf_arr[i].len), + FALSE, hton32(seqnum)); + ++seqnum; + } +err: + MFREE(dhd->osh, evtlog_buf_arr, arr_size); + } else { + DHD_ERROR(("%s: Error getting trap log data in ETD !\n", __FUNCTION__)); + } +} +#endif /* BCMPCIE && DHD_LOG_DUMP */ + +static int +dhdpcie_resume_chipcommon_powerctrl(dhd_pub_t *dhd) +{ + uint addr; + uint val; + + DHD_ERROR(("%s\n", __FUNCTION__)); + + /* conditionally clear bits [11:8] of PowerCtrl */ + addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl; + dhd_sbreg_op(dhd, addr, &val, TRUE); + if (!(val & dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl_mask)) { + addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl; + val = dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl_mask; + dhd_sbreg_op(dhd, addr, &val, FALSE); + } + return BCME_OK; +} + +static int +dhdpcie_suspend_chipcommon_powerctrl(dhd_pub_t *dhd) +{ + uint addr; + uint val; + + DHD_ERROR(("%s\n", __FUNCTION__)); + + /* conditionally clear bits [11:8] of PowerCtrl */ + addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl; + dhd_sbreg_op(dhd, addr, &val, TRUE); + if (val & dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl_mask) { + addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl; + val = 0; + dhd_sbreg_op(dhd, addr, &val, FALSE); + } + return BCME_OK; +} + +static int +dhdpcie_clear_intmask_and_timer(dhd_pub_t *dhd) +{ + uint addr; + uint val; + + DHD_ERROR(("%s\n", __FUNCTION__)); + + /* clear chipcommon intmask */ + addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.intmask; + val = 0x0; + dhd_sbreg_op(dhd, addr, &val, FALSE); + + /* clear PMUIntMask0 */ + addr = dhd->sssr_reg_info.pmu_regs.base_regs.pmuintmask0; + val = 0x0; + dhd_sbreg_op(dhd, addr, &val, FALSE); + + /* clear PMUIntMask1 */ + addr = dhd->sssr_reg_info.pmu_regs.base_regs.pmuintmask1; + val = 0x0; + dhd_sbreg_op(dhd, addr, &val, FALSE); + + /* clear res_req_timer */ + addr = dhd->sssr_reg_info.pmu_regs.base_regs.resreqtimer; + val = 0x0; + dhd_sbreg_op(dhd, addr, &val, FALSE); + + /* clear macresreqtimer */ + addr = dhd->sssr_reg_info.pmu_regs.base_regs.macresreqtimer; + val = 0x0; + dhd_sbreg_op(dhd, addr, &val, FALSE); + + /* clear macresreqtimer1 */ + addr = dhd->sssr_reg_info.pmu_regs.base_regs.macresreqtimer1; + val = 0x0; + dhd_sbreg_op(dhd, addr, &val, FALSE); + + /* clear VasipClkEn */ + if (dhd->sssr_reg_info.vasip_regs.vasip_sr_size) { + addr = dhd->sssr_reg_info.vasip_regs.wrapper_regs.ioctrl; + val = 0x0; + dhd_sbreg_op(dhd, addr, &val, FALSE); + } + + return BCME_OK; +} + +static void +dhdpcie_update_d11_status_from_trapdata(dhd_pub_t *dhd) +{ +#define TRAP_DATA_MAIN_CORE_BIT_MASK (1 << 1) +#define TRAP_DATA_AUX_CORE_BIT_MASK (1 << 4) + uint trap_data_mask[MAX_NUM_D11CORES] = + {TRAP_DATA_MAIN_CORE_BIT_MASK, TRAP_DATA_AUX_CORE_BIT_MASK}; + int i; + /* Apply only for 4375 chip */ + if (dhd_bus_chip_id(dhd) == BCM4375_CHIP_ID) { + for (i = 0; i < MAX_NUM_D11CORES; i++) { + if (dhd->sssr_d11_outofreset[i] && + (dhd->dongle_trap_data & trap_data_mask[i])) { + dhd->sssr_d11_outofreset[i] = TRUE; + } else { + dhd->sssr_d11_outofreset[i] = FALSE; + } + DHD_ERROR(("%s: sssr_d11_outofreset[%d] : %d after AND with " + "trap_data:0x%x-0x%x\n", + __FUNCTION__, i, dhd->sssr_d11_outofreset[i], + dhd->dongle_trap_data, trap_data_mask[i])); + } + } +} + +static int +dhdpcie_d11_check_outofreset(dhd_pub_t *dhd) +{ + int i; + uint addr; + uint val = 0; + + DHD_ERROR(("%s\n", __FUNCTION__)); + + for (i = 0; i < MAX_NUM_D11CORES; i++) { + /* Check if bit 0 of resetctrl is cleared */ + addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.resetctrl; + if (!addr) { + DHD_ERROR(("%s: skipping for core[%d] as 'addr' is NULL\n", + __FUNCTION__, i)); + continue; + } + dhd_sbreg_op(dhd, addr, &val, TRUE); + if (!(val & 1)) { + dhd->sssr_d11_outofreset[i] = TRUE; + } else { + dhd->sssr_d11_outofreset[i] = FALSE; + } + DHD_ERROR(("%s: sssr_d11_outofreset[%d] : %d\n", + __FUNCTION__, i, dhd->sssr_d11_outofreset[i])); + } + dhdpcie_update_d11_status_from_trapdata(dhd); + + return BCME_OK; +} + +static int +dhdpcie_d11_clear_clk_req(dhd_pub_t *dhd) +{ + int i; + uint addr; + uint val = 0; + + DHD_ERROR(("%s\n", __FUNCTION__)); + + for (i = 0; i < MAX_NUM_D11CORES; i++) { + if (dhd->sssr_d11_outofreset[i]) { + /* clear request clk only if itopoobb is non zero */ + addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.itopoobb; + dhd_sbreg_op(dhd, addr, &val, TRUE); + if (val != 0) { + /* clear clockcontrolstatus */ + addr = dhd->sssr_reg_info.mac_regs[i].base_regs.clockcontrolstatus; + val = + dhd->sssr_reg_info.mac_regs[i].base_regs.clockcontrolstatus_val; + dhd_sbreg_op(dhd, addr, &val, FALSE); + } + } + } + return BCME_OK; +} + +static int +dhdpcie_arm_clear_clk_req(dhd_pub_t *dhd) +{ + uint addr; + uint val = 0; + + DHD_ERROR(("%s\n", __FUNCTION__)); + + /* Check if bit 0 of resetctrl is cleared */ + addr = dhd->sssr_reg_info.arm_regs.wrapper_regs.resetctrl; + dhd_sbreg_op(dhd, addr, &val, TRUE); + if (!(val & 1)) { + /* clear request clk only if itopoobb is non zero */ + addr = dhd->sssr_reg_info.arm_regs.wrapper_regs.itopoobb; + dhd_sbreg_op(dhd, addr, &val, TRUE); + if (val != 0) { + /* clear clockcontrolstatus */ + addr = dhd->sssr_reg_info.arm_regs.base_regs.clockcontrolstatus; + val = dhd->sssr_reg_info.arm_regs.base_regs.clockcontrolstatus_val; + dhd_sbreg_op(dhd, addr, &val, FALSE); + } + } + return BCME_OK; +} + +static int +dhdpcie_pcie_clear_clk_req(dhd_pub_t *dhd) +{ + uint addr; + uint val = 0; + + DHD_ERROR(("%s\n", __FUNCTION__)); + + /* clear request clk only if itopoobb is non zero */ + addr = dhd->sssr_reg_info.pcie_regs.wrapper_regs.itopoobb; + dhd_sbreg_op(dhd, addr, &val, TRUE); + if (val) { + /* clear clockcontrolstatus */ + addr = dhd->sssr_reg_info.pcie_regs.base_regs.clockcontrolstatus; + val = dhd->sssr_reg_info.pcie_regs.base_regs.clockcontrolstatus_val; + dhd_sbreg_op(dhd, addr, &val, FALSE); + } + return BCME_OK; +} + +static int +dhdpcie_pcie_send_ltrsleep(dhd_pub_t *dhd) +{ + uint addr; + uint val = 0; + + DHD_ERROR(("%s\n", __FUNCTION__)); + + addr = dhd->sssr_reg_info.pcie_regs.base_regs.ltrstate; + val = LTR_ACTIVE; + dhd_sbreg_op(dhd, addr, &val, FALSE); + + val = LTR_SLEEP; + dhd_sbreg_op(dhd, addr, &val, FALSE); + + return BCME_OK; +} + +static int +dhdpcie_clear_clk_req(dhd_pub_t *dhd) +{ + DHD_ERROR(("%s\n", __FUNCTION__)); + + dhdpcie_arm_clear_clk_req(dhd); + + dhdpcie_d11_clear_clk_req(dhd); + + dhdpcie_pcie_clear_clk_req(dhd); + + return BCME_OK; +} + +static int +dhdpcie_bring_d11_outofreset(dhd_pub_t *dhd) +{ + int i; + uint addr; + uint val = 0; + + DHD_ERROR(("%s\n", __FUNCTION__)); + + for (i = 0; i < MAX_NUM_D11CORES; i++) { + if (dhd->sssr_d11_outofreset[i]) { + /* disable core by setting bit 0 */ + addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.resetctrl; + val = 1; + dhd_sbreg_op(dhd, addr, &val, FALSE); + OSL_DELAY(6000); + + addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.ioctrl; + val = dhd->sssr_reg_info.mac_regs[0].wrapper_regs.ioctrl_resetseq_val[0]; + dhd_sbreg_op(dhd, addr, &val, FALSE); + + val = dhd->sssr_reg_info.mac_regs[0].wrapper_regs.ioctrl_resetseq_val[1]; + dhd_sbreg_op(dhd, addr, &val, FALSE); + + /* enable core by clearing bit 0 */ + addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.resetctrl; + val = 0; + dhd_sbreg_op(dhd, addr, &val, FALSE); + + addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.ioctrl; + val = dhd->sssr_reg_info.mac_regs[0].wrapper_regs.ioctrl_resetseq_val[2]; + dhd_sbreg_op(dhd, addr, &val, FALSE); + + val = dhd->sssr_reg_info.mac_regs[0].wrapper_regs.ioctrl_resetseq_val[3]; + dhd_sbreg_op(dhd, addr, &val, FALSE); + + val = dhd->sssr_reg_info.mac_regs[0].wrapper_regs.ioctrl_resetseq_val[4]; + dhd_sbreg_op(dhd, addr, &val, FALSE); + } + } + return BCME_OK; +} + +static int +dhdpcie_sssr_dump_get_before_sr(dhd_pub_t *dhd) +{ + int i; + + DHD_ERROR(("%s\n", __FUNCTION__)); + + for (i = 0; i < MAX_NUM_D11CORES; i++) { + if (dhd->sssr_d11_outofreset[i]) { + dhdpcie_get_sssr_fifo_dump(dhd, dhd->sssr_d11_before[i], + dhd->sssr_reg_info.mac_regs[i].sr_size, + dhd->sssr_reg_info.mac_regs[i].base_regs.xmtaddress, + dhd->sssr_reg_info.mac_regs[i].base_regs.xmtdata); + } + } + + if (dhd->sssr_reg_info.vasip_regs.vasip_sr_size) { + dhdpcie_get_sssr_dig_dump(dhd, dhd->sssr_dig_buf_before, + dhd->sssr_reg_info.vasip_regs.vasip_sr_size, + dhd->sssr_reg_info.vasip_regs.vasip_sr_addr); + } else if ((dhd->sssr_reg_info.length > OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) && + dhd->sssr_reg_info.dig_mem_info.dig_sr_addr) { + dhdpcie_get_sssr_dig_dump(dhd, dhd->sssr_dig_buf_before, + dhd->sssr_reg_info.dig_mem_info.dig_sr_size, + dhd->sssr_reg_info.dig_mem_info.dig_sr_addr); + } + + return BCME_OK; +} + +static int +dhdpcie_sssr_dump_get_after_sr(dhd_pub_t *dhd) +{ + int i; + + DHD_ERROR(("%s\n", __FUNCTION__)); + + for (i = 0; i < MAX_NUM_D11CORES; i++) { + if (dhd->sssr_d11_outofreset[i]) { + dhdpcie_get_sssr_fifo_dump(dhd, dhd->sssr_d11_after[i], + dhd->sssr_reg_info.mac_regs[i].sr_size, + dhd->sssr_reg_info.mac_regs[i].base_regs.xmtaddress, + dhd->sssr_reg_info.mac_regs[i].base_regs.xmtdata); + } + } + + if (dhd->sssr_reg_info.vasip_regs.vasip_sr_size) { + dhdpcie_get_sssr_dig_dump(dhd, dhd->sssr_dig_buf_after, + dhd->sssr_reg_info.vasip_regs.vasip_sr_size, + dhd->sssr_reg_info.vasip_regs.vasip_sr_addr); + } else if ((dhd->sssr_reg_info.length > OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) && + dhd->sssr_reg_info.dig_mem_info.dig_sr_addr) { + dhdpcie_get_sssr_dig_dump(dhd, dhd->sssr_dig_buf_after, + dhd->sssr_reg_info.dig_mem_info.dig_sr_size, + dhd->sssr_reg_info.dig_mem_info.dig_sr_addr); + } + + return BCME_OK; +} + +static int +dhdpcie_sssr_dump(dhd_pub_t *dhd) +{ + if (!dhd->sssr_inited) { + DHD_ERROR(("%s: SSSR not inited\n", __FUNCTION__)); + return BCME_ERROR; + } + + if (dhd->bus->is_linkdown) { + DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__)); + return BCME_ERROR; + } + + dhdpcie_d11_check_outofreset(dhd); + + DHD_ERROR(("%s: Collecting Dump before SR\n", __FUNCTION__)); + if (dhdpcie_sssr_dump_get_before_sr(dhd) != BCME_OK) { + DHD_ERROR(("%s: dhdpcie_sssr_dump_get_before_sr failed\n", __FUNCTION__)); + return BCME_ERROR; + } + + dhdpcie_clear_intmask_and_timer(dhd); + dhdpcie_suspend_chipcommon_powerctrl(dhd); + dhdpcie_clear_clk_req(dhd); + dhdpcie_pcie_send_ltrsleep(dhd); + + /* Wait for some time before Restore */ + OSL_DELAY(6000); + + dhdpcie_resume_chipcommon_powerctrl(dhd); + dhdpcie_bring_d11_outofreset(dhd); + + DHD_ERROR(("%s: Collecting Dump after SR\n", __FUNCTION__)); + if (dhdpcie_sssr_dump_get_after_sr(dhd) != BCME_OK) { + DHD_ERROR(("%s: dhdpcie_sssr_dump_get_after_sr failed\n", __FUNCTION__)); + return BCME_ERROR; + } + + dhd_schedule_sssr_dump(dhd, SSSR_DUMP_MODE_SSSR); + + return BCME_OK; +} + +int +dhd_bus_sssr_dump(dhd_pub_t *dhd) +{ + return dhdpcie_sssr_dump(dhd); +} + +static int +dhdpcie_fis_trigger(dhd_pub_t *dhd) +{ + if (!dhd->sssr_inited) { + DHD_ERROR(("%s: SSSR not inited\n", __FUNCTION__)); + return BCME_ERROR; + } + + if (dhd->bus->is_linkdown) { + DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__)); + return BCME_ERROR; + } + + /* Trigger FIS */ + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + DAR_FIS_CTRL(dhd->bus->sih->buscorerev), ~0, DAR_FIS_START_MASK); + OSL_DELAY(100 * 1000); + + return BCME_OK; +} + +int +dhd_bus_fis_trigger(dhd_pub_t *dhd) +{ + return dhdpcie_fis_trigger(dhd); +} + +static int +dhdpcie_fis_dump(dhd_pub_t *dhd) +{ + int i; + + if (!dhd->sssr_inited) { + DHD_ERROR(("%s: SSSR not inited\n", __FUNCTION__)); + return BCME_ERROR; + } + + if (dhd->bus->is_linkdown) { + DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__)); + return BCME_ERROR; + } + + /* bring up all pmu resources */ + PMU_REG(dhd->bus->sih, min_res_mask, ~0, + PMU_REG(dhd->bus->sih, max_res_mask, 0, 0)); + OSL_DELAY(10 * 1000); + + for (i = 0; i < MAX_NUM_D11CORES; i++) { + dhd->sssr_d11_outofreset[i] = TRUE; + } + + dhdpcie_bring_d11_outofreset(dhd); + OSL_DELAY(6000); + + /* clear FIS Done */ + PMU_REG(dhd->bus->sih, fis_ctrl_status, PMU_CLEAR_FIS_DONE_MASK, PMU_CLEAR_FIS_DONE_MASK); + + dhdpcie_d11_check_outofreset(dhd); + + DHD_ERROR(("%s: Collecting Dump after SR\n", __FUNCTION__)); + if (dhdpcie_sssr_dump_get_after_sr(dhd) != BCME_OK) { + DHD_ERROR(("%s: dhdpcie_sssr_dump_get_after_sr failed\n", __FUNCTION__)); + return BCME_ERROR; + } + + dhd_schedule_sssr_dump(dhd, SSSR_DUMP_MODE_FIS); + + return BCME_OK; +} + +int +dhd_bus_fis_dump(dhd_pub_t *dhd) +{ + return dhdpcie_fis_dump(dhd); +} +#endif /* DHD_SSSR_DUMP */ + +#ifdef DHD_WAKE_STATUS +wake_counts_t* +dhd_bus_get_wakecount(dhd_pub_t *dhd) +{ + return &dhd->bus->wake_counts; +} +int +dhd_bus_get_bus_wake(dhd_pub_t *dhd) +{ + return bcmpcie_set_get_wake(dhd->bus, 0); +} +#endif /* DHD_WAKE_STATUS */ + +#define OTP_ADDRESS (SI_ENUM_BASE_DEFAULT + CC_SROM_OTP) +#define OTP_USER_AREA_OFFSET 0x80 +#define OTP_USER_AREA_ADDR (OTP_ADDRESS + OTP_USER_AREA_OFFSET) +#define OTP_VERSION_TUPLE_ID 0x15 +#define OTP_VENDOR_TUPLE_ID 0x80 +#define OTP_CIS_REGION_END_TUPLE_ID 0XFF +#define PMU_RES_STATE_REG_ADDR (SI_ENUM_BASE_DEFAULT + PMU_RES_STATE) +#define PMU_MINRESMASK_REG_ADDR (SI_ENUM_BASE_DEFAULT + MINRESMASKREG) +#define OTP_CTRL1_REG_ADDR (SI_ENUM_BASE_DEFAULT + 0xF4) +#define SPROM_CTRL_REG_ADDR (SI_ENUM_BASE_DEFAULT + CC_SROM_CTRL) +#define CHIP_COMMON_STATUS_REG_ADDR (SI_ENUM_BASE_DEFAULT + 0x2C) +#define PMU_OTP_PWR_ON_MASK 0xC47 + +int +dhdpcie_get_nvpath_otp(dhd_bus_t *bus, char* program, char *nv_path) +{ + uint32 val = 0; + uint16 chip_id = 0; + uint8 otp_data[2]; + char stepping[3]; + char module_name[5]; + char module_vendor = 0; + char module_rev[4]; + uint8 tuple_id = 0; + uint8 tuple_len = 0; + uint32 cur_offset = 0; + uint32 version_tuple_offset = 0; + char module_info[64]; + char progname[32]; + bool srom_present = 0, otp_present = 0; + uint32 sprom_ctrl = 0; + uint32 otp_ctrl = 0, minres_mask = 0; + int i = 0, j = 0, status = BCME_ERROR; + + if (!nv_path || !bus) { + return BCME_ERROR; + } + + /* read chip id first */ + if (serialized_backplane_access(bus, SI_ENUM_BASE_DEFAULT, 4, &val, TRUE) != BCME_OK) { + DHD_ERROR(("%s: bkplane access error ! \n", __FUNCTION__)); + } + else { + chip_id = val & 0xffff; + } + + /* read SpromCtrl register */ + serialized_backplane_access(bus, SPROM_CTRL_REG_ADDR, 4, &sprom_ctrl, TRUE); + val = sprom_ctrl; + + /* proceed only if OTP is present - i.e, the 5th bit OtpPresent is set + * and chip is 4355 or 4364 + */ + if ((val & 0x20) && (chip_id == 0x4355 || chip_id == 0x4364)) { + otp_present = 1; + + /* Check if the 4th bit (sprom_present) in CC Status REG is set */ + serialized_backplane_access(bus, CHIP_COMMON_STATUS_REG_ADDR, 4, &val, TRUE); + if (val & 0x10) { + srom_present = 1; + } + + /* OTP power up sequence */ + /* 1. cache otp ctrl and enable OTP clock through OTPCtrl1 register */ + serialized_backplane_access(bus, OTP_CTRL1_REG_ADDR, 4, &otp_ctrl, TRUE); + val = 0x1A0000; + serialized_backplane_access(bus, OTP_CTRL1_REG_ADDR, 4, &val, FALSE); + + /* 2. enable OTP power through min res mask register in PMU */ + serialized_backplane_access(bus, PMU_MINRESMASK_REG_ADDR, 4, &minres_mask, TRUE); + val = minres_mask | PMU_OTP_PWR_ON_MASK; + serialized_backplane_access(bus, PMU_MINRESMASK_REG_ADDR, 4, &val, FALSE); + + /* 3. if srom is present, need to set OtpSelect 4th bit + * in SpromCtrl register to read otp + */ + if (srom_present) { + + val = sprom_ctrl | 0x10; + serialized_backplane_access(bus, SPROM_CTRL_REG_ADDR, 4, &val, FALSE); + + } + /* Wait for PMU to power up. */ + OSL_DELAY(500); + serialized_backplane_access(bus, PMU_RES_STATE_REG_ADDR, 4, &val, TRUE); + DHD_INFO(("%s: PMU_RES_STATE_REG_ADDR %x \n", __FUNCTION__, val)); + + serialized_backplane_access(bus, SI_ENUM_BASE_DEFAULT, 4, &val, TRUE); + DHD_INFO(("%s: _SI_ENUM_BASE %x \n", __FUNCTION__, val)); + + serialized_backplane_access(bus, OTP_ADDRESS, 2, &val, TRUE); + DHD_INFO(("%s: OTP_ADDRESS %x \n", __FUNCTION__, val)); + + cur_offset = OTP_USER_AREA_ADDR + 0x40; + /* read required data from otp to construct FW string name + * data like - chip info, module info. This is present in the + * form of a Vendor CIS Tuple whose format is provided by Olympic. + * The data is in the form of ASCII character strings. + * The Vendor tuple along with other CIS tuples are present + * in the OTP user area. A CIS tuple is a TLV format. + * (T = 1-byte, L = 1-byte, V = n-bytes) + */ + + /* Find the version tuple */ + while (tuple_id != OTP_CIS_REGION_END_TUPLE_ID) { + serialized_backplane_access(bus, cur_offset, + 2, (uint *)otp_data, TRUE); + + tuple_id = otp_data[0]; + tuple_len = otp_data[1]; + if (tuple_id == OTP_VERSION_TUPLE_ID) { + version_tuple_offset = cur_offset; + break; + } + /* if its NULL tuple, skip */ + if (tuple_id == 0) + cur_offset += 1; + else + cur_offset += tuple_len + 2; + } + + /* skip the major, minor ver. numbers, manufacturer and product names */ + cur_offset = version_tuple_offset + 6; + + /* read the chip info */ + serialized_backplane_access(bus, cur_offset, + 2, (uint *)otp_data, TRUE); + if (otp_data[0] == 's' && otp_data[1] == '=') { + /* read the stepping */ + cur_offset += 2; + stepping[2] = 0; + serialized_backplane_access(bus, cur_offset, + 2, (uint *)stepping, TRUE); + /* read module info */ + memset(module_info, 0, 64); + cur_offset += 2; + serialized_backplane_access(bus, cur_offset, + 2, (uint *)otp_data, TRUE); + while (otp_data[0] != OTP_CIS_REGION_END_TUPLE_ID && + otp_data[1] != OTP_CIS_REGION_END_TUPLE_ID) { + memcpy(&module_info[i], otp_data, 2); + i += 2; + cur_offset += 2; + serialized_backplane_access(bus, cur_offset, + 2, (uint *)otp_data, TRUE); + } + /* replace any null characters found at the beginning + * and middle of the string + */ + for (j = 0; j < i; ++j) { + if (module_info[j] == 0) + module_info[j] = ' '; + } + DHD_ERROR(("OTP chip_info: s=%c%c; module info: %s \n", + stepping[0], stepping[1], module_info)); + /* extract the module name, revision and vendor + * information from the module info string + */ + for (i = 0; module_info[i]; i++) { + if (module_info[i] == 'M' && module_info[i + 1] == '=') { + memcpy(module_name, &module_info[i + 2], 4); + module_name[4] = 0; + i += 5; + } + else if (module_info[i] == 'm' && module_info[i + 1] == '=') { + memcpy(module_rev, &module_info[i + 2], 3); + module_rev[3] = 0; + i += 4; + } + else if (module_info[i] == 'V' && module_info[i + 1] == '=') { + module_vendor = module_info[i + 2]; + i += 2; + } + } + + /* construct the complete file path to nvram as per + * olympic conventions + */ + strncpy(progname, program, sizeof(progname)); + sprintf(nv_path, "P-%s_M-%s_V-%c__m-%s.txt", progname, module_name, + module_vendor, module_rev); + DHD_ERROR(("%s NVRAM path = %s\n", __FUNCTION__, nv_path)); + status = BCME_OK; + } + + /* restore back the registers to their previous values */ + if (srom_present) { + serialized_backplane_access(bus, SPROM_CTRL_REG_ADDR, 4, &sprom_ctrl, + FALSE); + } + + if (otp_present) { + serialized_backplane_access(bus, PMU_MINRESMASK_REG_ADDR, 4, + &minres_mask, FALSE); + serialized_backplane_access(bus, OTP_CTRL1_REG_ADDR, 4, &otp_ctrl, FALSE); + } + + } + return status; +} + +/* Writes random number(s) to the TCM. FW upon initialization reads this register + * to fetch the random number, and uses it to randomize heap address space layout. + */ +static int +dhdpcie_wrt_rnd(struct dhd_bus *bus) +{ + bcm_rand_metadata_t rnd_data; + uint8 rand_buf[BCM_ENTROPY_HOST_NBYTES]; + uint32 count = BCM_ENTROPY_HOST_NBYTES; + int ret = 0; + uint32 addr = bus->dongle_ram_base + (bus->ramsize - BCM_NVRAM_OFFSET_TCM) - + ((bus->nvram_csm & 0xffff)* BCM_NVRAM_IMG_COMPRS_FACTOR + sizeof(rnd_data)); + + memset(rand_buf, 0, BCM_ENTROPY_HOST_NBYTES); + rnd_data.signature = htol32(BCM_NVRAM_RNG_SIGNATURE); + rnd_data.count = htol32(count); + /* write the metadata about random number */ + dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&rnd_data, sizeof(rnd_data)); + /* scale back by number of random number counts */ + addr -= count; + + /* Now get & write the random number(s) */ + ret = dhd_get_random_bytes(rand_buf, count); + if (ret != BCME_OK) { + return ret; + } + dhdpcie_bus_membytes(bus, TRUE, addr, rand_buf, count); + + return BCME_OK; +} + +void +dhd_pcie_intr_count_dump(dhd_pub_t *dhd) +{ + struct dhd_bus *bus = dhd->bus; + uint64 current_time; + + DHD_ERROR(("\n ------- DUMPING INTR enable/disable counters ------- \r\n")); + DHD_ERROR(("resume_intr_enable_count=%lu dpc_intr_enable_count=%lu\n", + bus->resume_intr_enable_count, bus->dpc_intr_enable_count)); + DHD_ERROR(("isr_intr_disable_count=%lu suspend_intr_disable_count=%lu\n", + bus->isr_intr_disable_count, bus->suspend_intr_disable_count)); +#ifdef BCMPCIE_OOB_HOST_WAKE + DHD_ERROR(("oob_intr_count=%lu oob_intr_enable_count=%lu oob_intr_disable_count=%lu\n", + bus->oob_intr_count, bus->oob_intr_enable_count, + bus->oob_intr_disable_count)); + DHD_ERROR(("oob_irq_num=%d last_oob_irq_time="SEC_USEC_FMT"\n", + dhdpcie_get_oob_irq_num(bus), + GET_SEC_USEC(bus->last_oob_irq_time))); +#endif /* BCMPCIE_OOB_HOST_WAKE */ + DHD_ERROR(("dpc_return_busdown_count=%lu\n", + dhd->bus->dpc_return_busdown_count)); + + current_time = OSL_SYSUPTIME_US(); + DHD_ERROR(("\ncurrent_time="SEC_USEC_FMT"\n", + GET_SEC_USEC(current_time))); + DHD_ERROR(("isr_entry_time="SEC_USEC_FMT + " isr_exit_time="SEC_USEC_FMT"\n", + GET_SEC_USEC(bus->isr_entry_time), + GET_SEC_USEC(bus->isr_exit_time))); + DHD_ERROR(("dpc_entry_time="SEC_USEC_FMT + " last_process_ctrlbuf_time="SEC_USEC_FMT"\n", + GET_SEC_USEC(bus->dpc_entry_time), + GET_SEC_USEC(bus->last_process_ctrlbuf_time))); + DHD_ERROR(("last_process_flowring_time="SEC_USEC_FMT + " last_process_txcpl_time="SEC_USEC_FMT"\n", + GET_SEC_USEC(bus->last_process_flowring_time), + GET_SEC_USEC(bus->last_process_txcpl_time))); + DHD_ERROR(("last_process_rxcpl_time="SEC_USEC_FMT + " last_process_infocpl_time="SEC_USEC_FMT"\n", + GET_SEC_USEC(bus->last_process_rxcpl_time), + GET_SEC_USEC(bus->last_process_infocpl_time))); + DHD_ERROR(("dpc_exit_time="SEC_USEC_FMT + " resched_dpc_time="SEC_USEC_FMT"\n", + GET_SEC_USEC(bus->dpc_exit_time), + GET_SEC_USEC(bus->resched_dpc_time))); + + DHD_ERROR(("\nlast_suspend_start_time="SEC_USEC_FMT + " last_suspend_end_time="SEC_USEC_FMT"\n", + GET_SEC_USEC(bus->last_suspend_start_time), + GET_SEC_USEC(bus->last_suspend_end_time))); + DHD_ERROR(("last_resume_start_time="SEC_USEC_FMT + " last_resume_end_time="SEC_USEC_FMT"\n", + GET_SEC_USEC(bus->last_resume_start_time), + GET_SEC_USEC(bus->last_resume_end_time))); + +#if defined(SHOW_LOGTRACE) && defined(DHD_USE_KTHREAD_FOR_LOGTRACE) + DHD_ERROR(("logtrace_thread_entry_time="SEC_USEC_FMT + "logtrace_thread_sem_down_time="SEC_USEC_FMT + "logtrace_thread_flush_time="SEC_USEC_FMT + "logtrace_thread_unexpected_break_time="SEC_USEC_FMT + " logtrace_thread_complete_time="SEC_USEC_FMT"\n", + GET_SEC_USEC(dhd->logtrace_thr_ts.entry_time), + GET_SEC_USEC(dhd->logtrace_thr_ts.sem_down_time), + GET_SEC_USEC(dhd->logtrace_thr_ts.flush_time), + GET_SEC_USEC(dhd->logtrace_thr_ts.unexpected_break_time), + GET_SEC_USEC(dhd->logtrace_thr_ts.complete_time))); +#endif /* SHOW_LOGTRACE && DHD_USE_KTHREAD_FOR_LOGTRACE */ +} + +void +dhd_bus_intr_count_dump(dhd_pub_t *dhd) +{ + dhd_pcie_intr_count_dump(dhd); +} + +int +dhd_pcie_dma_info_dump(dhd_pub_t *dhd) +{ + if (dhd->bus->is_linkdown) { + DHD_ERROR(("\n ------- SKIP DUMPING DMA Registers " + "due to PCIe link down ------- \r\n")); + return 0; + } + + DHD_ERROR(("\n ------- DUMPING DMA Registers ------- \r\n")); + + //HostToDev + DHD_ERROR(("HostToDev TX: XmtCtrl=0x%08x XmtPtr=0x%08x\n", + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x200, 0, 0), + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x204, 0, 0))); + DHD_ERROR((" : XmtAddrLow=0x%08x XmtAddrHigh=0x%08x\n", + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x208, 0, 0), + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x20C, 0, 0))); + DHD_ERROR((" : XmtStatus0=0x%08x XmtStatus1=0x%08x\n", + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x210, 0, 0), + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x214, 0, 0))); + + DHD_ERROR(("HostToDev RX: RcvCtrl=0x%08x RcvPtr=0x%08x\n", + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x220, 0, 0), + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x224, 0, 0))); + DHD_ERROR((" : RcvAddrLow=0x%08x RcvAddrHigh=0x%08x\n", + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x228, 0, 0), + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x22C, 0, 0))); + DHD_ERROR((" : RcvStatus0=0x%08x RcvStatus1=0x%08x\n", + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x230, 0, 0), + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x234, 0, 0))); + + //DevToHost + DHD_ERROR(("DevToHost TX: XmtCtrl=0x%08x XmtPtr=0x%08x\n", + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x240, 0, 0), + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x244, 0, 0))); + DHD_ERROR((" : XmtAddrLow=0x%08x XmtAddrHigh=0x%08x\n", + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x248, 0, 0), + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x24C, 0, 0))); + DHD_ERROR((" : XmtStatus0=0x%08x XmtStatus1=0x%08x\n", + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x250, 0, 0), + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x254, 0, 0))); + + DHD_ERROR(("DevToHost RX: RcvCtrl=0x%08x RcvPtr=0x%08x\n", + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x260, 0, 0), + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x264, 0, 0))); + DHD_ERROR((" : RcvAddrLow=0x%08x RcvAddrHigh=0x%08x\n", + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x268, 0, 0), + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x26C, 0, 0))); + DHD_ERROR((" : RcvStatus0=0x%08x RcvStatus1=0x%08x\n", + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x270, 0, 0), + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x274, 0, 0))); + + return 0; +} + +int +dhd_pcie_debug_info_dump(dhd_pub_t *dhd) +{ + uint32 intstatus = 0; + uint32 intmask = 0; + uint32 d2h_db0 = 0; + uint32 d2h_mb_data = 0; + int host_irq_disabled; + + DHD_ERROR(("bus->bus_low_power_state = %d\n", dhd->bus->bus_low_power_state)); + host_irq_disabled = dhdpcie_irq_disabled(dhd->bus); + DHD_ERROR(("host pcie_irq disabled = %d\n", host_irq_disabled)); + dhd_print_tasklet_status(dhd); + dhd_pcie_intr_count_dump(dhd); + + DHD_ERROR(("\n ------- DUMPING PCIE EP Resouce Info ------- \r\n")); + dhdpcie_dump_resource(dhd->bus); + + DHD_ERROR(("\n ------- DUMPING PCIE RC config space Registers ------- \r\n")); + DHD_ERROR(("Pcie RC Error Status Val=0x%x\n", + dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR, + PCIE_EXTCAP_AER_UCERR_OFFSET, TRUE, FALSE, 0))); +#ifdef EXTENDED_PCIE_DEBUG_DUMP + DHD_ERROR(("hdrlog0 =0x%x hdrlog1 =0x%x hdrlog2 =0x%x hdrlog3 =0x%x\n", + dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR, + PCIE_EXTCAP_ERR_HEADER_LOG_0, TRUE, FALSE, 0), + dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR, + PCIE_EXTCAP_ERR_HEADER_LOG_1, TRUE, FALSE, 0), + dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR, + PCIE_EXTCAP_ERR_HEADER_LOG_2, TRUE, FALSE, 0), + dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR, + PCIE_EXTCAP_ERR_HEADER_LOG_3, TRUE, FALSE, 0))); +#endif /* EXTENDED_PCIE_DEBUG_DUMP */ + + DHD_ERROR(("RootPort PCIe linkcap=0x%08x\n", + dhd_debug_get_rc_linkcap(dhd->bus))); + + DHD_ERROR(("\n ------- DUMPING PCIE EP config space Registers ------- \r\n")); + DHD_ERROR(("Status Command(0x%x)=0x%x, BaseAddress0(0x%x)=0x%x BaseAddress1(0x%x)=0x%x\n", + PCIECFGREG_STATUS_CMD, + dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_STATUS_CMD, sizeof(uint32)), + PCIECFGREG_BASEADDR0, + dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_BASEADDR0, sizeof(uint32)), + PCIECFGREG_BASEADDR1, + dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_BASEADDR1, sizeof(uint32)))); + DHD_ERROR(("LinkCtl(0x%x)=0x%x DeviceStatusControl2(0x%x)=0x%x " + "L1SSControl(0x%x)=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL, + dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_LINK_STATUS_CTRL, + sizeof(uint32)), PCIECFGGEN_DEV_STATUS_CTRL2, + dhd_pcie_config_read(dhd->bus->osh, PCIECFGGEN_DEV_STATUS_CTRL2, + sizeof(uint32)), PCIECFGREG_PML1_SUB_CTRL1, + dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_PML1_SUB_CTRL1, + sizeof(uint32)))); +#ifdef EXTENDED_PCIE_DEBUG_DUMP + DHD_ERROR(("DeviceStatusControl(0x%x)=0x%x SubsystemControl(0x%x)=0x%x " + "L1SSControl2(0x%x)=0x%x\n", PCIECFGREG_DEV_STATUS_CTRL, + dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_DEV_STATUS_CTRL, + sizeof(uint32)), PCIE_CFG_SUBSYSTEM_CONTROL, + dhd_pcie_config_read(dhd->bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, + sizeof(uint32)), PCIECFGREG_PML1_SUB_CTRL2, + dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_PML1_SUB_CTRL2, + sizeof(uint32)))); +#endif /* EXTENDED_PCIE_DEBUG_DUMP */ + intstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + dhd->bus->pcie_mailbox_int, 0, 0); + + intmask = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + dhd->bus->pcie_mailbox_mask, 0, 0); + d2h_db0 = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + PCID2H_MailBox, 0, 0); + DHD_ERROR(("\n ------- DUMPING INTR Status and Masks ------- \r\n")); + DHD_ERROR(("intstatus=0x%x intmask=0x%x d2h_db0=0x%x\n", + intstatus, intmask, d2h_db0)); + + dhd_bus_cmn_readshared(dhd->bus, &d2h_mb_data, D2H_MB_DATA, 0); + DHD_ERROR(("d2h_mb_data=0x%x def_intmask=0x%x \r\n", d2h_mb_data, + dhd->bus->def_intmask)); +#ifdef EXTENDED_PCIE_DEBUG_DUMP + DHD_ERROR(("\n ------- DUMPING PCIE DAR Registers ------- \r\n")); + DHD_ERROR(("clkctl(0x%x)=0x%x pwrctl(0x%x)=0x%x H2D_DB0(0x%x)=0x%x\n", + PCIDARClkCtl(dhd->bus->sih->buscorerev), + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + PCIDARClkCtl(dhd->bus->sih->buscorerev), 0, 0), + PCIDARPwrCtl(dhd->bus->sih->buscorerev), + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + PCIDARPwrCtl(dhd->bus->sih->buscorerev), 0, 0), + PCIDARH2D_DB0(dhd->bus->sih->buscorerev), + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + PCIDARH2D_DB0(dhd->bus->sih->buscorerev), 0, 0))); +#endif /* EXTENDED_PCIE_DEBUG_DUMP */ + if (intstatus == (uint32)-1) { + DHD_ERROR(("Skip dumping the PCIe Core registers due to invalid intstatus\n")); + return 0; + } + + DHD_ERROR(("\n ------- DUMPING PCIE core Registers ------- \r\n")); + +#ifdef EXTENDED_PCIE_DEBUG_DUMP + DHD_ERROR(("errlog(0x%x)=0x%x errlog_addr(0x%x)=0x%x\n", + PCIDARErrlog(dhd->bus->sih->buscorerev), + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + PCIDARErrlog(dhd->bus->sih->buscorerev), 0, 0), + PCIDARErrlog_Addr(dhd->bus->sih->buscorerev), + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + PCIDARErrlog_Addr(dhd->bus->sih->buscorerev), 0, 0))); + DHD_ERROR(("FunctionINtstatus(0x%x)=0x%x, Mailboxint(0x%x)=0x%x\n", + PCIDARFunctionIntstatus(dhd->bus->sih->buscorerev), + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + PCIDARFunctionIntstatus(dhd->bus->sih->buscorerev), 0, 0), + PCIDARMailboxint(dhd->bus->sih->buscorerev), + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + PCIDARMailboxint(dhd->bus->sih->buscorerev), 0, 0))); +#endif /* EXTENDED_PCIE_DEBUG_DUMP */ + + DHD_ERROR(("ClkReq0(0x%x)=0x%x ClkReq1(0x%x)=0x%x ClkReq2(0x%x)=0x%x " + "ClkReq3(0x%x)=0x%x\n", PCIECFGREG_PHY_DBG_CLKREQ0, + dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ0), + PCIECFGREG_PHY_DBG_CLKREQ1, + dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ1), + PCIECFGREG_PHY_DBG_CLKREQ2, + dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ2), + PCIECFGREG_PHY_DBG_CLKREQ3, + dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ3))); + +#ifdef EXTENDED_PCIE_DEBUG_DUMP + DHD_ERROR(("ltssm_hist_0(0x%x)=0x%x ltssm_hist_1(0x%x)=0x%x ltssm_hist_2(0x%x)=0x%x " + "ltssm_hist_3(0x%x)=0x%x\n", PCIECFGREG_PHY_LTSSM_HIST_0, + dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_LTSSM_HIST_0), + PCIECFGREG_PHY_LTSSM_HIST_1, + dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_LTSSM_HIST_1), + PCIECFGREG_PHY_LTSSM_HIST_2, + dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_LTSSM_HIST_2), + PCIECFGREG_PHY_LTSSM_HIST_3, + dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_LTSSM_HIST_3))); + DHD_ERROR(("clkctl(0x%x)=0x%x pwrctl(0x%x)=0x%x H2D_DB0(0x%x)=0x%x\n", + PCIE_CLK_CTRL, + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCIE_CLK_CTRL, 0, 0), + PCIE_PWR_CTRL, + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCIE_PWR_CTRL, 0, 0), + PCIH2D_MailBox, + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCIH2D_MailBox, 0, 0))); + DHD_ERROR(("trefup(0x%x)=0x%x trefup_ext(0x%x)=0x%x\n", + PCIECFGREG_TREFUP, + dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_TREFUP), + PCIECFGREG_TREFUP_EXT, + dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_TREFUP_EXT))); + DHD_ERROR(("errlog(0x%x)=0x%x errlog_addr(0x%x)=0x%x Function_Intstatus(0x%x)=0x%x\n", + PCIE_CORE_REG_ERRLOG, + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCIE_CORE_REG_ERRLOG, 0, 0), + PCIE_CORE_REG_ERR_ADDR, + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + PCIE_CORE_REG_ERR_ADDR, 0, 0), + PCIFunctionIntstatus(dhd->bus->sih->buscorerev), + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + PCIFunctionIntstatus(dhd->bus->sih->buscorerev), 0, 0))); + DHD_ERROR(("err_hdrlog1(0x%x)=0x%x err_hdrlog2(0x%x)=0x%x err_hdrlog3(0x%x)=0x%x " + "err_hdrlog4(0x%x)=0x%x\n", + (uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg1), + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg1), 0, 0), + (uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg2), + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg2), 0, 0), + (uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg3), + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg3), 0, 0), + (uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg4), + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg4), 0, 0))); + DHD_ERROR(("err_code(0x%x)=0x%x\n", + (uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_code_logreg), + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, u.pcie2.err_code_logreg), 0, 0))); +#endif /* EXTENDED_PCIE_DEBUG_DUMP */ + + dhd_pcie_dma_info_dump(dhd); + + return 0; +} + +bool +dhd_bus_force_bt_quiesce_enabled(struct dhd_bus *bus) +{ + return bus->force_bt_quiesce; +} diff --git a/bcmdhd.100.10.315.x/dhd_pcie.h b/bcmdhd.100.10.315.x/dhd_pcie.h new file mode 100644 index 0000000..27acd1c --- /dev/null +++ b/bcmdhd.100.10.315.x/dhd_pcie.h @@ -0,0 +1,552 @@ +/* + * Linux DHD Bus Module for PCIE + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_pcie.h 771838 2018-07-12 03:46:37Z $ + */ + +#ifndef dhd_pcie_h +#define dhd_pcie_h + +#include +#include + +/* defines */ +#define PCIE_SHARED_VERSION PCIE_SHARED_VERSION_7 + +#define PCMSGBUF_HDRLEN 0 +#define DONGLE_REG_MAP_SIZE (32 * 1024) +#define DONGLE_TCM_MAP_SIZE (4096 * 1024) +#define DONGLE_MIN_MEMSIZE (128 *1024) +#ifdef DHD_DEBUG +#define DHD_PCIE_SUCCESS 0 +#define DHD_PCIE_FAILURE 1 +#endif /* DHD_DEBUG */ +#define REMAP_ENAB(bus) ((bus)->remap) +#define REMAP_ISADDR(bus, a) (((a) >= ((bus)->orig_ramsize)) && ((a) < ((bus)->ramsize))) + +#define MAX_DHD_TX_FLOWS 320 + +/* user defined data structures */ +/* Device console log buffer state */ +#define CONSOLE_LINE_MAX 192u +#define CONSOLE_BUFFER_MAX (8 * 1024) + +#ifdef IDLE_TX_FLOW_MGMT +#define IDLE_FLOW_LIST_TIMEOUT 5000 +#define IDLE_FLOW_RING_TIMEOUT 5000 +#endif /* IDLE_TX_FLOW_MGMT */ + +/* implicit DMA for h2d wr and d2h rd indice from Host memory to TCM */ +#define IDMA_ENAB(dhd) ((dhd)->idma_enable) +#define IDMA_ACTIVE(dhd) (((dhd)->idma_enable) && ((dhd)->idma_inited)) + +#define IDMA_CAPABLE(bus) (((bus)->sih->buscorerev == 19) || ((bus)->sih->buscorerev >= 23)) + +/* IFRM (Implicit Flow Ring Manager enable and inited */ +#define IFRM_ENAB(dhd) ((dhd)->ifrm_enable) +#define IFRM_ACTIVE(dhd) (((dhd)->ifrm_enable) && ((dhd)->ifrm_inited)) + +/* DAR registers use for h2d doorbell */ +#define DAR_ENAB(dhd) ((dhd)->dar_enable) +#define DAR_ACTIVE(dhd) (((dhd)->dar_enable) && ((dhd)->dar_inited)) + +/* DAR WAR for revs < 64 */ +#define DAR_PWRREQ(bus) (((bus)->_dar_war) && DAR_ACTIVE((bus)->dhd)) + +/* PCIE CTO Prevention and Recovery */ +#define PCIECTO_ENAB(bus) ((bus)->cto_enable) + +/* Implicit DMA index usage : + * Index 0 for h2d write index transfer + * Index 1 for d2h read index transfer + */ +#define IDMA_IDX0 0 +#define IDMA_IDX1 1 +#define IDMA_IDX2 2 +#define IDMA_IDX3 3 +#define DMA_TYPE_SHIFT 4 +#define DMA_TYPE_IDMA 1 + +#define DHDPCIE_CONFIG_HDR_SIZE 16 +#define DHDPCIE_CONFIG_CHECK_DELAY_MS 10 /* 10ms */ +#define DHDPCIE_CONFIG_CHECK_RETRY_COUNT 20 +#define DHDPCIE_DONGLE_PWR_TOGGLE_DELAY 1000 /* 1ms in units of us */ +#define DHDPCIE_PM_D3_DELAY 200000 /* 200ms in units of us */ +#define DHDPCIE_PM_D2_DELAY 200 /* 200us */ + +typedef struct dhd_console { + uint count; /* Poll interval msec counter */ + uint log_addr; /* Log struct address (fixed) */ + hnd_log_t log; /* Log struct (host copy) */ + uint bufsize; /* Size of log buffer */ + uint8 *buf; /* Log buffer (host copy) */ + uint last; /* Last buffer read index */ +} dhd_console_t; + +typedef struct ring_sh_info { + uint32 ring_mem_addr; + uint32 ring_state_w; + uint32 ring_state_r; +} ring_sh_info_t; + +#define DEVICE_WAKE_NONE 0 +#define DEVICE_WAKE_OOB 1 +#define DEVICE_WAKE_INB 2 + +#define INBAND_DW_ENAB(bus) ((bus)->dw_option == DEVICE_WAKE_INB) +#define OOB_DW_ENAB(bus) ((bus)->dw_option == DEVICE_WAKE_OOB) +#define NO_DW_ENAB(bus) ((bus)->dw_option == DEVICE_WAKE_NONE) + +struct dhd_bus; + +struct dhd_pcie_rev { + uint8 fw_rev; + void (*handle_mb_data)(struct dhd_bus *); +}; + +typedef struct dhdpcie_config_save +{ + uint32 header[DHDPCIE_CONFIG_HDR_SIZE]; + /* pmcsr save */ + uint32 pmcsr; + /* express save */ + uint32 exp_dev_ctrl_stat; + uint32 exp_link_ctrl_stat; + uint32 exp_dev_ctrl_stat2; + uint32 exp_link_ctrl_stat2; + /* msi save */ + uint32 msi_cap; + uint32 msi_addr0; + uint32 msi_addr1; + uint32 msi_data; + /* l1pm save */ + uint32 l1pm0; + uint32 l1pm1; + /* ltr save */ + uint32 ltr; + /* aer save */ + uint32 aer_caps_ctrl; /* 0x18 */ + uint32 aer_severity; /* 0x0C */ + uint32 aer_umask; /* 0x08 */ + uint32 aer_cmask; /* 0x14 */ + uint32 aer_root_cmd; /* 0x2c */ + /* BAR0 and BAR1 windows */ + uint32 bar0_win; + uint32 bar1_win; +} dhdpcie_config_save_t; + +/* The level of bus communication with the dongle */ +enum dhd_bus_low_power_state { + DHD_BUS_NO_LOW_POWER_STATE, /* Not in low power state */ + DHD_BUS_D3_INFORM_SENT, /* D3 INFORM sent */ + DHD_BUS_D3_ACK_RECIEVED, /* D3 ACK recieved */ +}; + +/** Instantiated once for each hardware (dongle) instance that this DHD manages */ +typedef struct dhd_bus { + dhd_pub_t *dhd; /**< pointer to per hardware (dongle) unique instance */ + struct pci_dev *rc_dev; /* pci RC device handle */ + struct pci_dev *dev; /* pci device handle */ + + dll_t flowring_active_list; /* constructed list of tx flowring queues */ +#ifdef IDLE_TX_FLOW_MGMT + uint64 active_list_last_process_ts; + /* stores the timestamp of active list processing */ +#endif /* IDLE_TX_FLOW_MGMT */ + + si_t *sih; /* Handle for SI calls */ + char *vars; /* Variables (from CIS and/or other) */ + uint varsz; /* Size of variables buffer */ + uint32 sbaddr; /* Current SB window pointer (-1, invalid) */ + sbpcieregs_t *reg; /* Registers for PCIE core */ + + uint armrev; /* CPU core revision */ + uint ramrev; /* SOCRAM core revision */ + uint32 ramsize; /* Size of RAM in SOCRAM (bytes) */ + uint32 orig_ramsize; /* Size of RAM in SOCRAM (bytes) */ + bool ramsize_adjusted; /* flag to note adjustment, so that + * adjustment routine and file io + * are avoided on D3 cold -> D0 + */ + uint32 srmemsize; /* Size of SRMEM */ + + uint32 bus; /* gSPI or SDIO bus */ + uint32 intstatus; /* Intstatus bits (events) pending */ + bool dpc_sched; /* Indicates DPC schedule (intrpt rcvd) */ + bool fcstate; /* State of dongle flow-control */ + + uint16 cl_devid; /* cached devid for dhdsdio_probe_attach() */ + char *fw_path; /* module_param: path to firmware image */ + char *nv_path; /* module_param: path to nvram vars file */ + + struct pktq txq; /* Queue length used for flow-control */ + + bool intr; /* Use interrupts */ + bool poll; /* Use polling */ + bool ipend; /* Device interrupt is pending */ + bool intdis; /* Interrupts disabled by isr */ + uint intrcount; /* Count of device interrupt callbacks */ + uint lastintrs; /* Count as of last watchdog timer */ + + dhd_console_t console; /* Console output polling support */ + uint console_addr; /* Console address from shared struct */ + + bool alp_only; /* Don't use HT clock (ALP only) */ + + bool remap; /* Contiguous 1MB RAM: 512K socram + 512K devram + * Available with socram rev 16 + * Remap region not DMA-able + */ + uint32 resetinstr; + uint32 dongle_ram_base; + + ulong shared_addr; + pciedev_shared_t *pcie_sh; + uint32 dma_rxoffset; + volatile char *regs; /* pci device memory va */ + volatile char *tcm; /* pci device memory va */ + osl_t *osh; + uint32 nvram_csm; /* Nvram checksum */ + uint16 pollrate; + uint16 polltick; + + volatile uint32 *pcie_mb_intr_addr; + volatile uint32 *pcie_mb_intr_2_addr; + void *pcie_mb_intr_osh; + bool sleep_allowed; + + wake_counts_t wake_counts; + + /* version 3 shared struct related info start */ + ring_sh_info_t ring_sh[BCMPCIE_COMMON_MSGRINGS + MAX_DHD_TX_FLOWS]; + + uint8 h2d_ring_count; + uint8 d2h_ring_count; + uint32 ringmem_ptr; + uint32 ring_state_ptr; + + uint32 d2h_dma_scratch_buffer_mem_addr; + + uint32 h2d_mb_data_ptr_addr; + uint32 d2h_mb_data_ptr_addr; + /* version 3 shared struct related info end */ + + uint32 def_intmask; + uint32 d2h_mb_mask; + uint32 pcie_mailbox_mask; + uint32 pcie_mailbox_int; + bool ltrsleep_on_unload; + uint wait_for_d3_ack; + uint16 max_tx_flowrings; + uint16 max_submission_rings; + uint16 max_completion_rings; + uint16 max_cmn_rings; + uint32 rw_index_sz; + bool db1_for_mb; + + dhd_timeout_t doorbell_timer; + bool device_wake_state; + bool irq_registered; + bool d2h_intr_method; + int32 idletime; /* Control for activity timeout */ + uint32 d3_inform_cnt; + uint32 d0_inform_cnt; + uint32 d0_inform_in_use_cnt; + uint8 force_suspend; + uint8 is_linkdown; + uint8 no_bus_init; +#ifdef IDLE_TX_FLOW_MGMT + bool enable_idle_flowring_mgmt; +#endif /* IDLE_TX_FLOW_MGMT */ + struct dhd_pcie_rev api; + bool use_mailbox; + bool use_d0_inform; + void *bus_lock; + enum dhd_bus_low_power_state bus_low_power_state; + uint32 hostready_count; /* Number of hostready issued */ +#if defined(BCMPCIE_OOB_HOST_WAKE) + bool oob_presuspend; +#endif // endif + dhdpcie_config_save_t saved_config; + ulong resume_intr_enable_count; + ulong dpc_intr_enable_count; + ulong isr_intr_disable_count; + ulong suspend_intr_disable_count; + ulong dpc_return_busdown_count; +#ifdef BCMPCIE_OOB_HOST_WAKE + ulong oob_intr_count; + ulong oob_intr_enable_count; + ulong oob_intr_disable_count; + uint64 last_oob_irq_time; +#endif /* BCMPCIE_OOB_HOST_WAKE */ + uint64 isr_entry_time; + uint64 isr_exit_time; + uint64 dpc_entry_time; + uint64 dpc_exit_time; + uint64 resched_dpc_time; + uint64 last_process_ctrlbuf_time; + uint64 last_process_flowring_time; + uint64 last_process_txcpl_time; + uint64 last_process_rxcpl_time; + uint64 last_process_infocpl_time; + uint64 last_process_edl_time; + uint64 last_suspend_start_time; + uint64 last_suspend_end_time; + uint64 last_resume_start_time; + uint64 last_resume_end_time; + bool idma_enabled; + bool ifrm_enabled; + bool dar_enabled; + uint32 dmaxfer_complete; + uint8 dw_option; + bool _dar_war; + uint8 dma_chan; + bool cto_enable; /* enable PCIE CTO Prevention and recovery */ + uint32 cto_threshold; /* PCIE CTO timeout threshold */ + int pwr_req_ref; + bool flr_force_fail; /* user intends to simulate flr force fail */ + bool intr_enabled; /* ready to receive interrupts from dongle */ + bool force_bt_quiesce; /* send bt_quiesce command to BT driver. */ +#if defined(DHD_H2D_LOG_TIME_SYNC) + ulong dhd_rte_time_sync_count; /* OSL_SYSUPTIME_US() */ +#endif /* DHD_H2D_LOG_TIME_SYNC */ + bool rc_ep_aspm_cap; /* RC and EP ASPM capable */ + bool rc_ep_l1ss_cap; /* EC and EP L1SS capable */ +} dhd_bus_t; + +#ifdef DHD_MSI_SUPPORT +extern uint enable_msi; +#endif /* DHD_MSI_SUPPORT */ + +enum { + PCIE_INTX = 0, + PCIE_MSI = 1 +}; + +/* function declarations */ + +extern uint32* dhdpcie_bus_reg_map(osl_t *osh, ulong addr, int size); +extern int dhdpcie_bus_register(void); +extern void dhdpcie_bus_unregister(void); +extern bool dhdpcie_chipmatch(uint16 vendor, uint16 device); + +extern int dhdpcie_bus_attach(osl_t *osh, dhd_bus_t **bus_ptr, + volatile char *regs, volatile char *tcm, void *pci_dev); +extern uint32 dhdpcie_bus_cfg_read_dword(struct dhd_bus *bus, uint32 addr, uint32 size); +extern void dhdpcie_bus_cfg_write_dword(struct dhd_bus *bus, uint32 addr, uint32 size, uint32 data); +extern void dhdpcie_bus_intr_enable(struct dhd_bus *bus); +extern void dhdpcie_bus_intr_disable(struct dhd_bus *bus); +extern int dhpcie_bus_mask_interrupt(dhd_bus_t *bus); +extern void dhdpcie_bus_release(struct dhd_bus *bus); +extern int32 dhdpcie_bus_isr(struct dhd_bus *bus); +extern void dhdpcie_free_irq(dhd_bus_t *bus); +extern void dhdpcie_bus_ringbell_fast(struct dhd_bus *bus, uint32 value); +extern void dhdpcie_bus_ringbell_2_fast(struct dhd_bus *bus, uint32 value, bool devwake); +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM +extern int dhdpcie_bus_suspend(struct dhd_bus *bus, bool state, bool byint); +#else +extern int dhdpcie_bus_suspend(struct dhd_bus *bus, bool state); +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ +extern int dhdpcie_pci_suspend_resume(struct dhd_bus *bus, bool state); +extern uint32 dhdpcie_force_alp(struct dhd_bus *bus, bool enable); +extern uint32 dhdpcie_set_l1_entry_time(struct dhd_bus *bus, int force_l1_entry_time); +extern bool dhdpcie_tcm_valid(dhd_bus_t *bus); +extern void dhdpcie_pme_active(osl_t *osh, bool enable); +extern bool dhdpcie_pme_cap(osl_t *osh); +extern uint32 dhdpcie_lcreg(osl_t *osh, uint32 mask, uint32 val); +extern void dhdpcie_set_pmu_min_res_mask(struct dhd_bus *bus, uint min_res_mask); +extern uint8 dhdpcie_clkreq(osl_t *osh, uint32 mask, uint32 val); +extern int dhdpcie_disable_irq(dhd_bus_t *bus); +extern int dhdpcie_disable_irq_nosync(dhd_bus_t *bus); +extern int dhdpcie_enable_irq(dhd_bus_t *bus); +extern uint32 dhdpcie_rc_config_read(dhd_bus_t *bus, uint offset); +extern uint32 dhdpcie_rc_access_cap(dhd_bus_t *bus, int cap, uint offset, bool is_ext, + bool is_write, uint32 writeval); +extern uint32 dhdpcie_ep_access_cap(dhd_bus_t *bus, int cap, uint offset, bool is_ext, + bool is_write, uint32 writeval); +extern uint32 dhd_debug_get_rc_linkcap(dhd_bus_t *bus); +extern int dhdpcie_start_host_pcieclock(dhd_bus_t *bus); +extern int dhdpcie_stop_host_pcieclock(dhd_bus_t *bus); +extern int dhdpcie_disable_device(dhd_bus_t *bus); +extern int dhdpcie_alloc_resource(dhd_bus_t *bus); +extern void dhdpcie_free_resource(dhd_bus_t *bus); +extern void dhdpcie_dump_resource(dhd_bus_t *bus); +extern int dhdpcie_bus_request_irq(struct dhd_bus *bus); + +extern int dhdpcie_enable_device(dhd_bus_t *bus); + +#ifdef BCMPCIE_OOB_HOST_WAKE +extern int dhdpcie_oob_intr_register(dhd_bus_t *bus); +extern void dhdpcie_oob_intr_unregister(dhd_bus_t *bus); +extern void dhdpcie_oob_intr_set(dhd_bus_t *bus, bool enable); +extern int dhdpcie_get_oob_irq_num(dhd_bus_t *bus); +#endif /* BCMPCIE_OOB_HOST_WAKE */ + +#if defined(CONFIG_ARCH_EXYNOS) +#define SAMSUNG_PCIE_VENDOR_ID 0x144d +#if defined(CONFIG_MACH_UNIVERSAL5433) +#define SAMSUNG_PCIE_DEVICE_ID 0xa5e3 +#define SAMSUNG_PCIE_CH_NUM +#elif defined(CONFIG_MACH_UNIVERSAL7420) || defined(CONFIG_SOC_EXYNOS7420) +#define SAMSUNG_PCIE_DEVICE_ID 0xa575 +#define SAMSUNG_PCIE_CH_NUM 1 +#elif defined(CONFIG_SOC_EXYNOS8890) +#define SAMSUNG_PCIE_DEVICE_ID 0xa544 +#define SAMSUNG_PCIE_CH_NUM 0 +#elif defined(CONFIG_SOC_EXYNOS8895) || defined(CONFIG_SOC_EXYNOS9810) || \ + defined(CONFIG_SOC_EXYNOS9820) +#define SAMSUNG_PCIE_DEVICE_ID 0xecec +#define SAMSUNG_PCIE_CH_NUM 0 +#else +#error "Not supported platform" +#endif /* CONFIG_SOC_EXYNOSXXXX & CONFIG_MACH_UNIVERSALXXXX */ +#endif /* CONFIG_ARCH_EXYNOS */ + +#if defined(CONFIG_ARCH_MSM) +#define MSM_PCIE_VENDOR_ID 0x17cb +#if defined(CONFIG_ARCH_APQ8084) +#define MSM_PCIE_DEVICE_ID 0x0101 +#elif defined(CONFIG_ARCH_MSM8994) +#define MSM_PCIE_DEVICE_ID 0x0300 +#elif defined(CONFIG_ARCH_MSM8996) +#define MSM_PCIE_DEVICE_ID 0x0104 +#elif defined(CONFIG_ARCH_MSM8998) +#define MSM_PCIE_DEVICE_ID 0x0105 +#elif defined(CONFIG_ARCH_SDM845) || defined(CONFIG_ARCH_SM8150) +#define MSM_PCIE_DEVICE_ID 0x0106 +#else +#error "Not supported platform" +#endif // endif +#endif /* CONFIG_ARCH_MSM */ + +#if defined(CONFIG_X86) +#define X86_PCIE_VENDOR_ID 0x8086 +#define X86_PCIE_DEVICE_ID 0x9c1a +#endif /* CONFIG_X86 */ + +#if defined(CONFIG_ARCH_TEGRA) +#define TEGRA_PCIE_VENDOR_ID 0x14e4 +#define TEGRA_PCIE_DEVICE_ID 0x4347 +#endif /* CONFIG_ARCH_TEGRA */ + +#define HIKEY_PCIE_VENDOR_ID 0x19e5 +#define HIKEY_PCIE_DEVICE_ID 0x3660 + +#define DUMMY_PCIE_VENDOR_ID 0xffff +#define DUMMY_PCIE_DEVICE_ID 0xffff + +#if defined(CONFIG_ARCH_EXYNOS) +#define PCIE_RC_VENDOR_ID SAMSUNG_PCIE_VENDOR_ID +#define PCIE_RC_DEVICE_ID SAMSUNG_PCIE_DEVICE_ID +#elif defined(CONFIG_ARCH_MSM) +#define PCIE_RC_VENDOR_ID MSM_PCIE_VENDOR_ID +#define PCIE_RC_DEVICE_ID MSM_PCIE_DEVICE_ID +#elif defined(CONFIG_X86) +#define PCIE_RC_VENDOR_ID X86_PCIE_VENDOR_ID +#define PCIE_RC_DEVICE_ID X86_PCIE_DEVICE_ID +#elif defined(CONFIG_ARCH_TEGRA) +#define PCIE_RC_VENDOR_ID TEGRA_PCIE_VENDOR_ID +#define PCIE_RC_DEVICE_ID TEGRA_PCIE_DEVICE_ID +#else +#define PCIE_RC_VENDOR_ID HIKEY_PCIE_VENDOR_ID +#define PCIE_RC_DEVICE_ID HIKEY_PCIE_DEVICE_ID +#endif /* CONFIG_ARCH_EXYNOS */ + +#ifdef USE_EXYNOS_PCIE_RC_PMPATCH +#ifdef CONFIG_MACH_UNIVERSAL5433 +extern int exynos_pcie_pm_suspend(void); +extern int exynos_pcie_pm_resume(void); +#else +extern int exynos_pcie_pm_suspend(int ch_num); +extern int exynos_pcie_pm_resume(int ch_num); +#endif /* CONFIG_MACH_UNIVERSAL5433 */ +#endif /* USE_EXYNOS_PCIE_RC_PMPATCH */ + +#ifdef CONFIG_ARCH_TEGRA +extern int tegra_pcie_pm_suspend(void); +extern int tegra_pcie_pm_resume(void); +#endif /* CONFIG_ARCH_TEGRA */ + +extern int dhd_buzzz_dump_dngl(dhd_bus_t *bus); +#ifdef IDLE_TX_FLOW_MGMT +extern int dhd_bus_flow_ring_resume_request(struct dhd_bus *bus, void *arg); +extern void dhd_bus_flow_ring_resume_response(struct dhd_bus *bus, uint16 flowid, int32 status); +extern int dhd_bus_flow_ring_suspend_request(struct dhd_bus *bus, void *arg); +extern void dhd_bus_flow_ring_suspend_response(struct dhd_bus *bus, uint16 flowid, uint32 status); +extern void dhd_flow_ring_move_to_active_list_head(struct dhd_bus *bus, + flow_ring_node_t *flow_ring_node); +extern void dhd_flow_ring_add_to_active_list(struct dhd_bus *bus, + flow_ring_node_t *flow_ring_node); +extern void dhd_flow_ring_delete_from_active_list(struct dhd_bus *bus, + flow_ring_node_t *flow_ring_node); +extern void __dhd_flow_ring_delete_from_active_list(struct dhd_bus *bus, + flow_ring_node_t *flow_ring_node); +#endif /* IDLE_TX_FLOW_MGMT */ + +extern int dhdpcie_send_mb_data(dhd_bus_t *bus, uint32 h2d_mb_data); + +#ifdef DHD_WAKE_STATUS +int bcmpcie_get_total_wake(struct dhd_bus *bus); +int bcmpcie_set_get_wake(struct dhd_bus *bus, int flag); +#endif /* DHD_WAKE_STATUS */ +extern bool dhdpcie_bus_get_pcie_hostready_supported(dhd_bus_t *bus); +extern void dhd_bus_hostready(struct dhd_bus *bus); +extern void dhdpcie_bus_enab_pcie_dw(dhd_bus_t *bus, uint8 dw_option); +extern int dhdpcie_irq_disabled(struct dhd_bus *bus); + +static INLINE bool dhdpcie_is_arm_halted(struct dhd_bus *bus) {return TRUE;} +static INLINE int dhd_os_wifi_platform_set_power(uint32 value) {return BCME_OK; } + +int dhdpcie_config_check(dhd_bus_t *bus); +int dhdpcie_config_restore(dhd_bus_t *bus, bool restore_pmcsr); +int dhdpcie_config_save(dhd_bus_t *bus); +int dhdpcie_set_pwr_state(dhd_bus_t *bus, uint state); + +extern bool dhdpcie_bus_get_pcie_idma_supported(dhd_bus_t *bus); +extern bool dhdpcie_bus_get_pcie_ifrm_supported(dhd_bus_t *bus); +extern bool dhdpcie_bus_get_pcie_dar_supported(dhd_bus_t *bus); + +static INLINE uint32 +dhd_pcie_config_read(osl_t *osh, uint offset, uint size) +{ + OSL_DELAY(100); + return OSL_PCI_READ_CONFIG(osh, offset, size); +} + +static INLINE uint32 +dhd_pcie_corereg_read(si_t *sih, uint val) +{ + OSL_DELAY(100); + si_corereg(sih, sih->buscoreidx, OFFSETOF(sbpcieregs_t, configaddr), ~0, val); + return si_corereg(sih, sih->buscoreidx, OFFSETOF(sbpcieregs_t, configdata), 0, 0); +} + +extern int dhdpcie_get_nvpath_otp(dhd_bus_t *bus, char *program, char *nv_path); + +extern int dhd_pcie_debug_info_dump(dhd_pub_t *dhd); +extern void dhd_pcie_intr_count_dump(dhd_pub_t *dhd); +extern void dhdpcie_bus_clear_intstatus(dhd_bus_t *bus); +#endif /* dhd_pcie_h */ diff --git a/bcmdhd.100.10.315.x/dhd_pcie_linux.c b/bcmdhd.100.10.315.x/dhd_pcie_linux.c new file mode 100644 index 0000000..3033dfd --- /dev/null +++ b/bcmdhd.100.10.315.x/dhd_pcie_linux.c @@ -0,0 +1,2347 @@ +/* + * Linux DHD Bus Module for PCIE + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_pcie_linux.c 771838 2018-07-12 03:46:37Z $ + */ + +/* include files */ +#include +#include +#include +#include +#include +#include +#include +#if defined(DHD_DEBUG) +#include +#include +#endif /* defined(DHD_DEBUG) */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_ARCH_MSM +#if defined(CONFIG_PCI_MSM) || defined(CONFIG_ARCH_MSM8996) +#include +#else +#include +#endif /* CONFIG_PCI_MSM */ +#endif /* CONFIG_ARCH_MSM */ + +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM +#include +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM +#ifndef AUTO_SUSPEND_TIMEOUT +#define AUTO_SUSPEND_TIMEOUT 1000 +#endif /* AUTO_SUSPEND_TIMEOUT */ +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + +#include +#ifdef USE_SMMU_ARCH_MSM +#include +#include +#include +#include +#endif /* USE_SMMU_ARCH_MSM */ + +#define PCI_CFG_RETRY 10 +#define OS_HANDLE_MAGIC 0x1234abcd /* Magic # to recognize osh */ +#define BCM_MEM_FILENAME_LEN 24 /* Mem. filename length */ + +#ifdef FORCE_TPOWERON +extern uint32 tpoweron_scale; +#endif /* FORCE_TPOWERON */ +/* user defined data structures */ + +typedef struct dhd_pc_res { + uint32 bar0_size; + void* bar0_addr; + uint32 bar1_size; + void* bar1_addr; +} pci_config_res, *pPci_config_res; + +typedef bool (*dhdpcie_cb_fn_t)(void *); + +typedef struct dhdpcie_info +{ + dhd_bus_t *bus; + osl_t *osh; + struct pci_dev *dev; /* pci device handle */ + volatile char *regs; /* pci device memory va */ + volatile char *tcm; /* pci device memory va */ + uint32 tcm_size; /* pci device memory size */ + struct pcos_info *pcos_info; + uint16 last_intrstatus; /* to cache intrstatus */ + int irq; + char pciname[32]; + struct pci_saved_state* default_state; + struct pci_saved_state* state; +#ifdef BCMPCIE_OOB_HOST_WAKE + void *os_cxt; /* Pointer to per-OS private data */ +#endif /* BCMPCIE_OOB_HOST_WAKE */ +#ifdef DHD_WAKE_STATUS + spinlock_t pcie_lock; + unsigned int total_wake_count; + int pkt_wake; + int wake_irq; +#endif /* DHD_WAKE_STATUS */ +#ifdef USE_SMMU_ARCH_MSM + void *smmu_cxt; +#endif /* USE_SMMU_ARCH_MSM */ +} dhdpcie_info_t; + +struct pcos_info { + dhdpcie_info_t *pc; + spinlock_t lock; + wait_queue_head_t intr_wait_queue; + struct timer_list tuning_timer; + int tuning_timer_exp; + atomic_t timer_enab; + struct tasklet_struct tuning_tasklet; +}; + +#ifdef BCMPCIE_OOB_HOST_WAKE +typedef struct dhdpcie_os_info { + int oob_irq_num; /* valid when hardware or software oob in use */ + unsigned long oob_irq_flags; /* valid when hardware or software oob in use */ + bool oob_irq_registered; + bool oob_irq_enabled; + bool oob_irq_wake_enabled; + spinlock_t oob_irq_spinlock; + void *dev; /* handle to the underlying device */ +} dhdpcie_os_info_t; +static irqreturn_t wlan_oob_irq(int irq, void *data); +#ifdef CUSTOMER_HW2 +extern struct brcm_pcie_wake brcm_pcie_wake; +#endif /* CUSTOMER_HW2 */ +#endif /* BCMPCIE_OOB_HOST_WAKE */ + +#ifdef USE_SMMU_ARCH_MSM +typedef struct dhdpcie_smmu_info { + struct dma_iommu_mapping *smmu_mapping; + dma_addr_t smmu_iova_start; + size_t smmu_iova_len; +} dhdpcie_smmu_info_t; +#endif /* USE_SMMU_ARCH_MSM */ + +/* function declarations */ +static int __devinit +dhdpcie_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent); +static void __devexit +dhdpcie_pci_remove(struct pci_dev *pdev); +static int dhdpcie_init(struct pci_dev *pdev); +static irqreturn_t dhdpcie_isr(int irq, void *arg); +/* OS Routine functions for PCI suspend/resume */ + +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM +static int dhdpcie_set_suspend_resume(struct pci_dev *dev, bool state, bool byint); +#else +static int dhdpcie_set_suspend_resume(dhd_bus_t *bus, bool state); +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ +static int dhdpcie_resume_host_dev(dhd_bus_t *bus); +static int dhdpcie_suspend_host_dev(dhd_bus_t *bus); +static int dhdpcie_resume_dev(struct pci_dev *dev); +static int dhdpcie_suspend_dev(struct pci_dev *dev); +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM +static int dhdpcie_pm_system_suspend_noirq(struct device * dev); +static int dhdpcie_pm_system_resume_noirq(struct device * dev); +#else +static int dhdpcie_pci_suspend(struct pci_dev *dev, pm_message_t state); +static int dhdpcie_pci_resume(struct pci_dev *dev); +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM +static int dhdpcie_pm_runtime_suspend(struct device * dev); +static int dhdpcie_pm_runtime_resume(struct device * dev); +static int dhdpcie_pm_system_suspend_noirq(struct device * dev); +static int dhdpcie_pm_system_resume_noirq(struct device * dev); +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + +uint32 +dhdpcie_access_cap(struct pci_dev *pdev, int cap, uint offset, bool is_ext, bool is_write, + uint32 writeval); + +static struct pci_device_id dhdpcie_pci_devid[] __devinitdata = { + { vendor: 0x14e4, + device: PCI_ANY_ID, + subvendor: PCI_ANY_ID, + subdevice: PCI_ANY_ID, + class: PCI_CLASS_NETWORK_OTHER << 8, + class_mask: 0xffff00, + driver_data: 0, + }, + { 0, 0, 0, 0, 0, 0, 0} +}; +MODULE_DEVICE_TABLE(pci, dhdpcie_pci_devid); + +/* Power Management Hooks */ +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM +static const struct dev_pm_ops dhdpcie_pm_ops = { + SET_RUNTIME_PM_OPS(dhdpcie_pm_runtime_suspend, dhdpcie_pm_runtime_resume, NULL) + .suspend_noirq = dhdpcie_pm_system_suspend_noirq, + .resume_noirq = dhdpcie_pm_system_resume_noirq +}; +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + +static struct pci_driver dhdpcie_driver = { + node: {&dhdpcie_driver.node, &dhdpcie_driver.node}, + name: "pcieh", + id_table: dhdpcie_pci_devid, + probe: dhdpcie_pci_probe, + remove: dhdpcie_pci_remove, +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) + save_state: NULL, +#endif // endif +#if defined(DHD_PCIE_NATIVE_RUNTIMEPM) + .driver.pm = &dhd_pcie_pm_ops, +#else + suspend: dhdpcie_pci_suspend, + resume: dhdpcie_pci_resume, +#endif // endif +}; + +int dhdpcie_init_succeeded = FALSE; + +#ifdef USE_SMMU_ARCH_MSM +static int dhdpcie_smmu_init(struct pci_dev *pdev, void *smmu_cxt) +{ + struct dma_iommu_mapping *mapping; + struct device_node *root_node = NULL; + dhdpcie_smmu_info_t *smmu_info = (dhdpcie_smmu_info_t *)smmu_cxt; + int smmu_iova_address[2]; + char *wlan_node = "android,bcmdhd_wlan"; + char *wlan_smmu_node = "wlan-smmu-iova-address"; + int atomic_ctx = 1; + int s1_bypass = 1; + int ret = 0; + + DHD_ERROR(("%s: SMMU initialize\n", __FUNCTION__)); + + root_node = of_find_compatible_node(NULL, NULL, wlan_node); + if (!root_node) { + WARN(1, "failed to get device node of BRCM WLAN\n"); + return -ENODEV; + } + + if (of_property_read_u32_array(root_node, wlan_smmu_node, + smmu_iova_address, 2) == 0) { + DHD_ERROR(("%s : get SMMU start address 0x%x, size 0x%x\n", + __FUNCTION__, smmu_iova_address[0], smmu_iova_address[1])); + smmu_info->smmu_iova_start = smmu_iova_address[0]; + smmu_info->smmu_iova_len = smmu_iova_address[1]; + } else { + printf("%s : can't get smmu iova address property\n", + __FUNCTION__); + return -ENODEV; + } + + if (smmu_info->smmu_iova_len <= 0) { + DHD_ERROR(("%s: Invalid smmu iova len %d\n", + __FUNCTION__, (int)smmu_info->smmu_iova_len)); + return -EINVAL; + } + + DHD_ERROR(("%s : SMMU init start\n", __FUNCTION__)); + + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) || + pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { + DHD_ERROR(("%s: DMA set 64bit mask failed.\n", __FUNCTION__)); + return -EINVAL; + } + + mapping = arm_iommu_create_mapping(&platform_bus_type, + smmu_info->smmu_iova_start, smmu_info->smmu_iova_len); + if (IS_ERR(mapping)) { + DHD_ERROR(("%s: create mapping failed, err = %d\n", + __FUNCTION__, ret)); + ret = PTR_ERR(mapping); + goto map_fail; + } + + ret = iommu_domain_set_attr(mapping->domain, + DOMAIN_ATTR_ATOMIC, &atomic_ctx); + if (ret) { + DHD_ERROR(("%s: set atomic_ctx attribute failed, err = %d\n", + __FUNCTION__, ret)); + goto set_attr_fail; + } + + ret = iommu_domain_set_attr(mapping->domain, + DOMAIN_ATTR_S1_BYPASS, &s1_bypass); + if (ret < 0) { + DHD_ERROR(("%s: set s1_bypass attribute failed, err = %d\n", + __FUNCTION__, ret)); + goto set_attr_fail; + } + + ret = arm_iommu_attach_device(&pdev->dev, mapping); + if (ret) { + DHD_ERROR(("%s: attach device failed, err = %d\n", + __FUNCTION__, ret)); + goto attach_fail; + } + + smmu_info->smmu_mapping = mapping; + + return ret; + +attach_fail: +set_attr_fail: + arm_iommu_release_mapping(mapping); +map_fail: + return ret; +} + +static void dhdpcie_smmu_remove(struct pci_dev *pdev, void *smmu_cxt) +{ + dhdpcie_smmu_info_t *smmu_info; + + if (!smmu_cxt) { + return; + } + + smmu_info = (dhdpcie_smmu_info_t *)smmu_cxt; + if (smmu_info->smmu_mapping) { + arm_iommu_detach_device(&pdev->dev); + arm_iommu_release_mapping(smmu_info->smmu_mapping); + smmu_info->smmu_mapping = NULL; + } +} +#endif /* USE_SMMU_ARCH_MSM */ + +#ifdef FORCE_TPOWERON +static void +dhd_bus_get_tpoweron(dhd_bus_t *bus) +{ + + uint32 tpoweron_rc; + uint32 tpoweron_ep; + + tpoweron_rc = dhdpcie_rc_access_cap(bus, PCIE_EXTCAP_ID_L1SS, + PCIE_EXTCAP_L1SS_CONTROL2_OFFSET, TRUE, FALSE, 0); + tpoweron_ep = dhdpcie_ep_access_cap(bus, PCIE_EXTCAP_ID_L1SS, + PCIE_EXTCAP_L1SS_CONTROL2_OFFSET, TRUE, FALSE, 0); + DHD_ERROR(("%s: tpoweron_rc:0x%x tpoweron_ep:0x%x\n", + __FUNCTION__, tpoweron_rc, tpoweron_ep)); +} + +static void +dhd_bus_set_tpoweron(dhd_bus_t *bus, uint16 tpoweron) +{ + + dhd_bus_get_tpoweron(bus); + /* Set the tpoweron */ + DHD_ERROR(("%s tpoweron: 0x%x\n", __FUNCTION__, tpoweron)); + dhdpcie_rc_access_cap(bus, PCIE_EXTCAP_ID_L1SS, + PCIE_EXTCAP_L1SS_CONTROL2_OFFSET, TRUE, TRUE, tpoweron); + dhdpcie_ep_access_cap(bus, PCIE_EXTCAP_ID_L1SS, + PCIE_EXTCAP_L1SS_CONTROL2_OFFSET, TRUE, TRUE, tpoweron); + + dhd_bus_get_tpoweron(bus); + +} + +static bool +dhdpcie_chip_req_forced_tpoweron(dhd_bus_t *bus) +{ + /* + * On Fire's reference platform, coming out of L1.2, + * there is a constant delay of 45us between CLKREQ# and stable REFCLK + * Due to this delay, with tPowerOn < 50 + * there is a chance of the refclk sense to trigger on noise. + * + * Which ever chip needs forced tPowerOn of 50us should be listed below. + */ + if (si_chipid(bus->sih) == BCM4377_CHIP_ID) { + return TRUE; + } + return FALSE; +} +#endif /* FORCE_TPOWERON */ + +static bool +dhd_bus_aspm_enable_dev(dhd_bus_t *bus, struct pci_dev *dev, bool enable) +{ + uint32 linkctrl_before; + uint32 linkctrl_after = 0; + uint8 linkctrl_asm; + char *device; + + device = (dev == bus->dev) ? "EP" : "RC"; + + linkctrl_before = dhdpcie_access_cap(dev, PCIE_CAP_ID_EXP, PCIE_CAP_LINKCTRL_OFFSET, + FALSE, FALSE, 0); + linkctrl_asm = (linkctrl_before & PCIE_ASPM_CTRL_MASK); + + if (enable) { + if (linkctrl_asm == PCIE_ASPM_L1_ENAB) { + DHD_ERROR(("%s: %s already enabled linkctrl: 0x%x\n", + __FUNCTION__, device, linkctrl_before)); + return FALSE; + } + /* Enable only L1 ASPM (bit 1) */ + dhdpcie_access_cap(dev, PCIE_CAP_ID_EXP, PCIE_CAP_LINKCTRL_OFFSET, FALSE, + TRUE, (linkctrl_before | PCIE_ASPM_L1_ENAB)); + } else { + if (linkctrl_asm == 0) { + DHD_ERROR(("%s: %s already disabled linkctrl: 0x%x\n", + __FUNCTION__, device, linkctrl_before)); + return FALSE; + } + /* Disable complete ASPM (bit 1 and bit 0) */ + dhdpcie_access_cap(dev, PCIE_CAP_ID_EXP, PCIE_CAP_LINKCTRL_OFFSET, FALSE, + TRUE, (linkctrl_before & (~PCIE_ASPM_ENAB))); + } + + linkctrl_after = dhdpcie_access_cap(dev, PCIE_CAP_ID_EXP, PCIE_CAP_LINKCTRL_OFFSET, + FALSE, FALSE, 0); + DHD_ERROR(("%s: %s %s, linkctrl_before: 0x%x linkctrl_after: 0x%x\n", + __FUNCTION__, device, (enable ? "ENABLE " : "DISABLE"), + linkctrl_before, linkctrl_after)); + + return TRUE; +} + +static bool +dhd_bus_is_rc_ep_aspm_capable(dhd_bus_t *bus) +{ + uint32 rc_aspm_cap; + uint32 ep_aspm_cap; + + /* RC ASPM capability */ + rc_aspm_cap = dhdpcie_access_cap(bus->rc_dev, PCIE_CAP_ID_EXP, PCIE_CAP_LINKCTRL_OFFSET, + FALSE, FALSE, 0); + if (rc_aspm_cap == BCME_ERROR) { + DHD_ERROR(("%s RC is not ASPM capable\n", __FUNCTION__)); + return FALSE; + } + + /* EP ASPM capability */ + ep_aspm_cap = dhdpcie_access_cap(bus->dev, PCIE_CAP_ID_EXP, PCIE_CAP_LINKCTRL_OFFSET, + FALSE, FALSE, 0); + if (ep_aspm_cap == BCME_ERROR) { + DHD_ERROR(("%s EP is not ASPM capable\n", __FUNCTION__)); + return FALSE; + } + + return TRUE; +} + +bool +dhd_bus_aspm_enable_rc_ep(dhd_bus_t *bus, bool enable) +{ + bool ret; + + if (!bus->rc_ep_aspm_cap) { + DHD_ERROR(("%s: NOT ASPM CAPABLE rc_ep_aspm_cap: %d\n", + __FUNCTION__, bus->rc_ep_aspm_cap)); + return FALSE; + } + + if (enable) { + /* Enable only L1 ASPM first RC then EP */ + ret = dhd_bus_aspm_enable_dev(bus, bus->rc_dev, enable); + ret = dhd_bus_aspm_enable_dev(bus, bus->dev, enable); + } else { + /* Disable complete ASPM first EP then RC */ + ret = dhd_bus_aspm_enable_dev(bus, bus->dev, enable); + ret = dhd_bus_aspm_enable_dev(bus, bus->rc_dev, enable); + } + + return ret; +} + +static void +dhd_bus_l1ss_enable_dev(dhd_bus_t *bus, struct pci_dev *dev, bool enable) +{ + uint32 l1ssctrl_before; + uint32 l1ssctrl_after = 0; + uint8 l1ss_ep; + char *device; + + device = (dev == bus->dev) ? "EP" : "RC"; + + /* Extendend Capacility Reg */ + l1ssctrl_before = dhdpcie_access_cap(dev, PCIE_EXTCAP_ID_L1SS, + PCIE_EXTCAP_L1SS_CONTROL_OFFSET, TRUE, FALSE, 0); + l1ss_ep = (l1ssctrl_before & PCIE_EXT_L1SS_MASK); + + if (enable) { + if (l1ss_ep == PCIE_EXT_L1SS_ENAB) { + DHD_ERROR(("%s: %s already enabled, l1ssctrl: 0x%x\n", + __FUNCTION__, device, l1ssctrl_before)); + return; + } + dhdpcie_access_cap(dev, PCIE_EXTCAP_ID_L1SS, PCIE_EXTCAP_L1SS_CONTROL_OFFSET, + TRUE, TRUE, (l1ssctrl_before | PCIE_EXT_L1SS_ENAB)); + } else { + if (l1ss_ep == 0) { + DHD_ERROR(("%s: %s already disabled, l1ssctrl: 0x%x\n", + __FUNCTION__, device, l1ssctrl_before)); + return; + } + dhdpcie_access_cap(dev, PCIE_EXTCAP_ID_L1SS, PCIE_EXTCAP_L1SS_CONTROL_OFFSET, + TRUE, TRUE, (l1ssctrl_before & (~PCIE_EXT_L1SS_ENAB))); + } + l1ssctrl_after = dhdpcie_access_cap(dev, PCIE_EXTCAP_ID_L1SS, + PCIE_EXTCAP_L1SS_CONTROL_OFFSET, TRUE, FALSE, 0); + DHD_ERROR(("%s: %s %s, l1ssctrl_before: 0x%x l1ssctrl_after: 0x%x\n", + __FUNCTION__, device, (enable ? "ENABLE " : "DISABLE"), + l1ssctrl_before, l1ssctrl_after)); + +} + +static bool +dhd_bus_is_rc_ep_l1ss_capable(dhd_bus_t *bus) +{ + uint32 rc_l1ss_cap; + uint32 ep_l1ss_cap; + + /* RC Extendend Capacility */ + rc_l1ss_cap = dhdpcie_access_cap(bus->rc_dev, PCIE_EXTCAP_ID_L1SS, + PCIE_EXTCAP_L1SS_CONTROL_OFFSET, TRUE, FALSE, 0); + if (rc_l1ss_cap == BCME_ERROR) { + DHD_ERROR(("%s RC is not l1ss capable\n", __FUNCTION__)); + return FALSE; + } + + /* EP Extendend Capacility */ + ep_l1ss_cap = dhdpcie_access_cap(bus->dev, PCIE_EXTCAP_ID_L1SS, + PCIE_EXTCAP_L1SS_CONTROL_OFFSET, TRUE, FALSE, 0); + if (ep_l1ss_cap == BCME_ERROR) { + DHD_ERROR(("%s EP is not l1ss capable\n", __FUNCTION__)); + return FALSE; + } + + return TRUE; +} + +void +dhd_bus_l1ss_enable_rc_ep(dhd_bus_t *bus, bool enable) +{ + bool ret; + + if ((!bus->rc_ep_aspm_cap) || (!bus->rc_ep_l1ss_cap)) { + DHD_ERROR(("%s: NOT L1SS CAPABLE rc_ep_aspm_cap: %d rc_ep_l1ss_cap: %d\n", + __FUNCTION__, bus->rc_ep_aspm_cap, bus->rc_ep_l1ss_cap)); + return; + } + + /* Disable ASPM of RC and EP */ + ret = dhd_bus_aspm_enable_rc_ep(bus, FALSE); + + if (enable) { + /* Enable RC then EP */ + dhd_bus_l1ss_enable_dev(bus, bus->rc_dev, enable); + dhd_bus_l1ss_enable_dev(bus, bus->dev, enable); + } else { + /* Disable EP then RC */ + dhd_bus_l1ss_enable_dev(bus, bus->dev, enable); + dhd_bus_l1ss_enable_dev(bus, bus->rc_dev, enable); + } + + /* Enable ASPM of RC and EP only if this API disabled */ + if (ret == TRUE) { + dhd_bus_aspm_enable_rc_ep(bus, TRUE); + } +} + +static int dhdpcie_pci_suspend(struct pci_dev * pdev, pm_message_t state) +{ + int ret = 0; + dhdpcie_info_t *pch = pci_get_drvdata(pdev); + dhd_bus_t *bus = NULL; + unsigned long flags; + + if (pch) { + bus = pch->bus; + } + if (!bus) { + return ret; + } + + BCM_REFERENCE(state); + + DHD_GENERAL_LOCK(bus->dhd, flags); + if (!DHD_BUS_BUSY_CHECK_IDLE(bus->dhd)) { + DHD_ERROR(("%s: Bus not IDLE!! dhd_bus_busy_state = 0x%x\n", + __FUNCTION__, bus->dhd->dhd_bus_busy_state)); + DHD_GENERAL_UNLOCK(bus->dhd, flags); + return -EBUSY; + } + DHD_BUS_BUSY_SET_SUSPEND_IN_PROGRESS(bus->dhd); + DHD_GENERAL_UNLOCK(bus->dhd, flags); + + if (!bus->dhd->dongle_reset) + ret = dhdpcie_set_suspend_resume(bus, TRUE); + + DHD_GENERAL_LOCK(bus->dhd, flags); + DHD_BUS_BUSY_CLEAR_SUSPEND_IN_PROGRESS(bus->dhd); + dhd_os_busbusy_wake(bus->dhd); + DHD_GENERAL_UNLOCK(bus->dhd, flags); + + return ret; +} + +static int dhdpcie_pci_resume(struct pci_dev *pdev) +{ + int ret = 0; + dhdpcie_info_t *pch = pci_get_drvdata(pdev); + dhd_bus_t *bus = NULL; + unsigned long flags; + + if (pch) { + bus = pch->bus; + } + if (!bus) { + return ret; + } + + DHD_GENERAL_LOCK(bus->dhd, flags); + DHD_BUS_BUSY_SET_RESUME_IN_PROGRESS(bus->dhd); + DHD_GENERAL_UNLOCK(bus->dhd, flags); + + if (!bus->dhd->dongle_reset) + ret = dhdpcie_set_suspend_resume(bus, FALSE); + + DHD_GENERAL_LOCK(bus->dhd, flags); + DHD_BUS_BUSY_CLEAR_RESUME_IN_PROGRESS(bus->dhd); + dhd_os_busbusy_wake(bus->dhd); + DHD_GENERAL_UNLOCK(bus->dhd, flags); + + return ret; +} + +static int +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM +dhdpcie_set_suspend_resume(dhd_bus_t *bus, bool state, bool byint) +#else +dhdpcie_set_suspend_resume(dhd_bus_t *bus, bool state) +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ +{ + int ret = 0; + + ASSERT(bus && !bus->dhd->dongle_reset); + + /* When firmware is not loaded do the PCI bus */ + /* suspend/resume only */ + if (bus->dhd->busstate == DHD_BUS_DOWN) { + ret = dhdpcie_pci_suspend_resume(bus, state); + return ret; + } +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM + ret = dhdpcie_bus_suspend(bus, state, byint); +#else + ret = dhdpcie_bus_suspend(bus, state); +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + + return ret; +} + +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM +static int dhdpcie_pm_runtime_suspend(struct device * dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + dhdpcie_info_t *pch = pci_get_drvdata(pdev); + dhd_bus_t *bus = NULL; + int ret = 0; + + if (!pch) + return -EBUSY; + + bus = pch->bus; + + DHD_RPM(("%s Enter\n", __FUNCTION__)); + + if (atomic_read(&bus->dhd->block_bus)) + return -EHOSTDOWN; + + dhd_netif_stop_queue(bus); + atomic_set(&bus->dhd->block_bus, TRUE); + + if (dhdpcie_set_suspend_resume(pdev, TRUE, TRUE)) { + pm_runtime_mark_last_busy(dev); + ret = -EAGAIN; + } + + atomic_set(&bus->dhd->block_bus, FALSE); + dhd_bus_start_queue(bus); + + return ret; +} + +static int dhdpcie_pm_runtime_resume(struct device * dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + dhdpcie_info_t *pch = pci_get_drvdata(pdev); + dhd_bus_t *bus = pch->bus; + + DHD_RPM(("%s Enter\n", __FUNCTION__)); + + if (atomic_read(&bus->dhd->block_bus)) + return -EHOSTDOWN; + + if (dhdpcie_set_suspend_resume(pdev, FALSE, TRUE)) + return -EAGAIN; + + return 0; +} + +static int dhdpcie_pm_system_suspend_noirq(struct device * dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + dhdpcie_info_t *pch = pci_get_drvdata(pdev); + dhd_bus_t *bus = NULL; + int ret; + + DHD_RPM(("%s Enter\n", __FUNCTION__)); + + if (!pch) + return -EBUSY; + + bus = pch->bus; + + if (atomic_read(&bus->dhd->block_bus)) + return -EHOSTDOWN; + + dhd_netif_stop_queue(bus); + atomic_set(&bus->dhd->block_bus, TRUE); + + ret = dhdpcie_set_suspend_resume(pdev, TRUE, FALSE); + + if (ret) { + dhd_bus_start_queue(bus); + atomic_set(&bus->dhd->block_bus, FALSE); + } + + return ret; +} + +static int dhdpcie_pm_system_resume_noirq(struct device * dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + dhdpcie_info_t *pch = pci_get_drvdata(pdev); + dhd_bus_t *bus = NULL; + int ret; + + if (!pch) + return -EBUSY; + + bus = pch->bus; + + DHD_RPM(("%s Enter\n", __FUNCTION__)); + + ret = dhdpcie_set_suspend_resume(pdev, FALSE, FALSE); + + atomic_set(&bus->dhd->block_bus, FALSE); + dhd_bus_start_queue(bus); + pm_runtime_mark_last_busy(dhd_bus_to_dev(bus)); + + return ret; +} +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) +extern void dhd_dpc_tasklet_kill(dhd_pub_t *dhdp); +#endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ + +static int dhdpcie_suspend_dev(struct pci_dev *dev) +{ + int ret; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) + dhdpcie_info_t *pch = pci_get_drvdata(dev); + dhd_bus_t *bus = pch->bus; + + if (bus->is_linkdown) { + DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__)); + return BCME_ERROR; + } +#endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ + DHD_TRACE_HW4(("%s: Enter\n", __FUNCTION__)); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) + dhd_dpc_tasklet_kill(bus->dhd); +#endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ + pci_save_state(dev); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) + pch->state = pci_store_saved_state(dev); +#endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ + pci_enable_wake(dev, PCI_D0, TRUE); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) + if (pci_is_enabled(dev)) +#endif // endif + pci_disable_device(dev); + + ret = pci_set_power_state(dev, PCI_D3hot); + if (ret) { + DHD_ERROR(("%s: pci_set_power_state error %d\n", + __FUNCTION__, ret)); + } + dev->state_saved = FALSE; + return ret; +} + +#ifdef DHD_WAKE_STATUS +int bcmpcie_get_total_wake(struct dhd_bus *bus) +{ + dhdpcie_info_t *pch = pci_get_drvdata(bus->dev); + + return pch->total_wake_count; +} + +int bcmpcie_set_get_wake(struct dhd_bus *bus, int flag) +{ + dhdpcie_info_t *pch = pci_get_drvdata(bus->dev); + unsigned long flags; + int ret; + + spin_lock_irqsave(&pch->pcie_lock, flags); + + ret = pch->pkt_wake; + pch->total_wake_count += flag; + pch->pkt_wake = flag; + + spin_unlock_irqrestore(&pch->pcie_lock, flags); + return ret; +} +#endif /* DHD_WAKE_STATUS */ + +static int dhdpcie_resume_dev(struct pci_dev *dev) +{ + int err = 0; + dhdpcie_info_t *pch = pci_get_drvdata(dev); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) + pci_load_and_free_saved_state(dev, &pch->state); +#endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ + DHD_TRACE_HW4(("%s: Enter\n", __FUNCTION__)); + dev->state_saved = TRUE; + pci_restore_state(dev); +#ifdef FORCE_TPOWERON + if (dhdpcie_chip_req_forced_tpoweron(pch->bus)) { + dhd_bus_set_tpoweron(pch->bus, tpoweron_scale); + } +#endif /* FORCE_TPOWERON */ + err = pci_enable_device(dev); + if (err) { + printf("%s:pci_enable_device error %d \n", __FUNCTION__, err); + goto out; + } + pci_set_master(dev); + err = pci_set_power_state(dev, PCI_D0); + if (err) { + printf("%s:pci_set_power_state error %d \n", __FUNCTION__, err); + goto out; + } + BCM_REFERENCE(pch); +out: + return err; +} + +static int dhdpcie_resume_host_dev(dhd_bus_t *bus) +{ + int bcmerror = 0; +#ifdef USE_EXYNOS_PCIE_RC_PMPATCH + bcmerror = exynos_pcie_pm_resume(SAMSUNG_PCIE_CH_NUM); +#endif /* USE_EXYNOS_PCIE_RC_PMPATCH */ +#ifdef CONFIG_ARCH_MSM + bcmerror = dhdpcie_start_host_pcieclock(bus); +#endif /* CONFIG_ARCH_MSM */ +#ifdef CONFIG_ARCH_TEGRA + bcmerror = tegra_pcie_pm_resume(); +#endif /* CONFIG_ARCH_TEGRA */ + if (bcmerror < 0) { + DHD_ERROR(("%s: PCIe RC resume failed!!! (%d)\n", + __FUNCTION__, bcmerror)); + bus->is_linkdown = 1; + } + + return bcmerror; +} + +static int dhdpcie_suspend_host_dev(dhd_bus_t *bus) +{ + int bcmerror = 0; +#ifdef USE_EXYNOS_PCIE_RC_PMPATCH + if (bus->rc_dev) { + pci_save_state(bus->rc_dev); + } else { + DHD_ERROR(("%s: RC %x:%x handle is NULL\n", + __FUNCTION__, PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID)); + } + exynos_pcie_pm_suspend(SAMSUNG_PCIE_CH_NUM); +#endif /* USE_EXYNOS_PCIE_RC_PMPATCH */ +#ifdef CONFIG_ARCH_MSM + bcmerror = dhdpcie_stop_host_pcieclock(bus); +#endif /* CONFIG_ARCH_MSM */ +#ifdef CONFIG_ARCH_TEGRA + bcmerror = tegra_pcie_pm_suspend(); +#endif /* CONFIG_ARCH_TEGRA */ + return bcmerror; +} + +uint32 +dhdpcie_rc_config_read(dhd_bus_t *bus, uint offset) +{ + uint val = -1; /* Initialise to 0xfffffff */ + if (bus->rc_dev) { + pci_read_config_dword(bus->rc_dev, offset, &val); + OSL_DELAY(100); + } else { + DHD_ERROR(("%s: RC %x:%x handle is NULL\n", + __FUNCTION__, PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID)); + } + DHD_ERROR(("%s: RC %x:%x offset 0x%x val 0x%x\n", + __FUNCTION__, PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID, offset, val)); + return (val); +} + +/* + * Reads/ Writes the value of capability register + * from the given CAP_ID section of PCI Root Port + * + * Arguements + * @bus current dhd_bus_t pointer + * @cap Capability or Extended Capability ID to get + * @offset offset of Register to Read + * @is_ext TRUE if @cap is given for Extended Capability + * @is_write is set to TRUE to indicate write + * @val value to write + * + * Return Value + * Returns 0xffffffff on error + * on write success returns BCME_OK (0) + * on Read Success returns the value of register requested + * Note: caller shoud ensure valid capability ID and Ext. Capability ID. + */ + +uint32 +dhdpcie_access_cap(struct pci_dev *pdev, int cap, uint offset, bool is_ext, bool is_write, + uint32 writeval) +{ + int cap_ptr = 0; + uint32 ret = -1; + uint32 readval; + + if (!(pdev)) { + DHD_ERROR(("%s: pdev is NULL\n", __FUNCTION__)); + return ret; + } + + /* Find Capability offset */ + if (is_ext) { + /* removing max EXT_CAP_ID check as + * linux kernel definition's max value is not upadted yet as per spec + */ + cap_ptr = pci_find_ext_capability(pdev, cap); + + } else { + /* removing max PCI_CAP_ID_MAX check as + * pervious kernel versions dont have this definition + */ + cap_ptr = pci_find_capability(pdev, cap); + } + + /* Return if capability with given ID not found */ + if (cap_ptr == 0) { + DHD_ERROR(("%s: PCI Cap(0x%02x) not supported.\n", + __FUNCTION__, cap)); + return BCME_ERROR; + } + + if (is_write) { + pci_write_config_dword(pdev, (cap_ptr + offset), writeval); + ret = BCME_OK; + + } else { + + pci_read_config_dword(pdev, (cap_ptr + offset), &readval); + ret = readval; + } + + return ret; +} + +uint32 +dhdpcie_rc_access_cap(dhd_bus_t *bus, int cap, uint offset, bool is_ext, bool is_write, + uint32 writeval) +{ + if (!(bus->rc_dev)) { + DHD_ERROR(("%s: RC %x:%x handle is NULL\n", + __FUNCTION__, PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID)); + return BCME_ERROR; + } + + return dhdpcie_access_cap(bus->rc_dev, cap, offset, is_ext, is_write, writeval); +} + +uint32 +dhdpcie_ep_access_cap(dhd_bus_t *bus, int cap, uint offset, bool is_ext, bool is_write, + uint32 writeval) +{ + if (!(bus->dev)) { + DHD_ERROR(("%s: EP handle is NULL\n", __FUNCTION__)); + return BCME_ERROR; + } + + return dhdpcie_access_cap(bus->dev, cap, offset, is_ext, is_write, writeval); +} + +/* API wrapper to read Root Port link capability + * Returns 2 = GEN2 1 = GEN1 BCME_ERR on linkcap not found + */ + +uint32 dhd_debug_get_rc_linkcap(dhd_bus_t *bus) +{ + uint32 linkcap = -1; + linkcap = dhdpcie_rc_access_cap(bus, PCIE_CAP_ID_EXP, + PCIE_CAP_LINKCAP_OFFSET, FALSE, FALSE, 0); + linkcap &= PCIE_CAP_LINKCAP_LNKSPEED_MASK; + return linkcap; +} + +int dhdpcie_pci_suspend_resume(dhd_bus_t *bus, bool state) +{ + int rc; + + struct pci_dev *dev = bus->dev; + + if (state) { +#if !defined(BCMPCIE_OOB_HOST_WAKE) + dhdpcie_pme_active(bus->osh, state); +#endif // endif + rc = dhdpcie_suspend_dev(dev); + if (!rc) { + dhdpcie_suspend_host_dev(bus); + } + } else { + rc = dhdpcie_resume_host_dev(bus); + if (!rc) { + rc = dhdpcie_resume_dev(dev); + if (MULTIBP_ENAB(bus->sih) && (bus->sih->buscorerev >= 66)) { + /* reinit CTO configuration + * because cfg space got reset at D3 (PERST) + */ + dhdpcie_cto_init(bus, bus->cto_enable); + } + if (bus->sih->buscorerev == 66) { + dhdpcie_ssreset_dis_enum_rst(bus); + } +#if !defined(BCMPCIE_OOB_HOST_WAKE) + dhdpcie_pme_active(bus->osh, state); +#endif // endif + } +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + if (bus->is_linkdown) { + bus->dhd->hang_reason = HANG_REASON_PCIE_RC_LINK_UP_FAIL; + dhd_os_send_hang_message(bus->dhd); + } +#endif // endif + } + return rc; +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) +static int dhdpcie_device_scan(struct device *dev, void *data) +{ + struct pci_dev *pcidev; + int *cnt = data; + +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif // endif + pcidev = container_of(dev, struct pci_dev, dev); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif // endif + if (pcidev->vendor != 0x14e4) + return 0; + + DHD_INFO(("Found Broadcom PCI device 0x%04x\n", pcidev->device)); + *cnt += 1; + if (pcidev->driver && strcmp(pcidev->driver->name, dhdpcie_driver.name)) + DHD_ERROR(("Broadcom PCI Device 0x%04x has allocated with driver %s\n", + pcidev->device, pcidev->driver->name)); + + return 0; +} +#endif /* LINUX_VERSION >= 2.6.0 */ + +int +dhdpcie_bus_register(void) +{ + int error = 0; + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) + if (!(error = pci_module_init(&dhdpcie_driver))) + return 0; + + DHD_ERROR(("%s: pci_module_init failed 0x%x\n", __FUNCTION__, error)); +#else + if (!(error = pci_register_driver(&dhdpcie_driver))) { + bus_for_each_dev(dhdpcie_driver.driver.bus, NULL, &error, dhdpcie_device_scan); + if (!error) { + DHD_ERROR(("No Broadcom PCI device enumerated!\n")); + } else if (!dhdpcie_init_succeeded) { + DHD_ERROR(("%s: dhdpcie initialize failed.\n", __FUNCTION__)); + } else { + return 0; + } + + pci_unregister_driver(&dhdpcie_driver); + error = BCME_ERROR; + } +#endif /* LINUX_VERSION < 2.6.0 */ + + return error; +} + +void +dhdpcie_bus_unregister(void) +{ + pci_unregister_driver(&dhdpcie_driver); +} + +int __devinit +dhdpcie_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + DHD_MUTEX_LOCK(); + + if (dhdpcie_chipmatch (pdev->vendor, pdev->device)) { + DHD_ERROR(("%s: chipmatch failed!!\n", __FUNCTION__)); + return -ENODEV; + } + printf("PCI_PROBE: bus %X, slot %X,vendor %X, device %X" + "(good PCI location)\n", pdev->bus->number, + PCI_SLOT(pdev->devfn), pdev->vendor, pdev->device); + + if (dhdpcie_init (pdev)) { + DHD_ERROR(("%s: PCIe Enumeration failed\n", __FUNCTION__)); + return -ENODEV; + } + +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM + /* + Since MSM PCIe RC dev usage conunt already incremented +2 even + before dhdpcie_pci_probe() called, then we inevitably to call + pm_runtime_put_noidle() two times to make the count start with zero. + */ + + pm_runtime_put_noidle(&pdev->dev); + pm_runtime_put_noidle(&pdev->dev); + pm_runtime_set_suspended(&pdev->dev); +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + +#ifdef BCMPCIE_DISABLE_ASYNC_SUSPEND + /* disable async suspend */ + device_disable_async_suspend(&pdev->dev); +#endif /* BCMPCIE_DISABLE_ASYNC_SUSPEND */ + + DHD_TRACE(("%s: PCIe Enumeration done!!\n", __FUNCTION__)); + DHD_MUTEX_UNLOCK(); + return 0; +} + +int +dhdpcie_detach(dhdpcie_info_t *pch) +{ + if (pch) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) + if (!dhd_download_fw_on_driverload) { + pci_load_and_free_saved_state(pch->dev, &pch->default_state); + } +#endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ + MFREE(pch->osh, pch, sizeof(dhdpcie_info_t)); + } + return 0; +} + +void __devexit +dhdpcie_pci_remove(struct pci_dev *pdev) +{ + osl_t *osh = NULL; + dhdpcie_info_t *pch = NULL; + dhd_bus_t *bus = NULL; + + DHD_TRACE(("%s Enter\n", __FUNCTION__)); + + DHD_MUTEX_LOCK(); + + pch = pci_get_drvdata(pdev); + bus = pch->bus; + osh = pch->osh; + +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM + pm_runtime_get_noresume(&pdev->dev); + pm_runtime_get_noresume(&pdev->dev); +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + + if (bus) { + + bus->rc_dev = NULL; + + dhdpcie_bus_release(bus); + } +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) + if (pci_is_enabled(pdev)) +#endif // endif + pci_disable_device(pdev); +#ifdef BCMPCIE_OOB_HOST_WAKE + /* pcie os info detach */ + MFREE(osh, pch->os_cxt, sizeof(dhdpcie_os_info_t)); +#endif /* BCMPCIE_OOB_HOST_WAKE */ +#ifdef USE_SMMU_ARCH_MSM + /* smmu info detach */ + dhdpcie_smmu_remove(pdev, pch->smmu_cxt); + MFREE(osh, pch->smmu_cxt, sizeof(dhdpcie_smmu_info_t)); +#endif /* USE_SMMU_ARCH_MSM */ + /* pcie info detach */ + dhdpcie_detach(pch); + /* osl detach */ + osl_detach(osh); + +#if defined(BCMPCIE_OOB_HOST_WAKE) && defined(CUSTOMER_HW2) && \ + defined(CONFIG_ARCH_APQ8084) + brcm_pcie_wake.wake_irq = NULL; + brcm_pcie_wake.data = NULL; +#endif /* BCMPCIE_OOB_HOST_WAKE && CUSTOMR_HW2 && CONFIG_ARCH_APQ8084 */ + + dhdpcie_init_succeeded = FALSE; + + DHD_MUTEX_UNLOCK(); + + DHD_TRACE(("%s Exit\n", __FUNCTION__)); + + return; +} + +/* Enable Linux Msi */ +int +dhdpcie_enable_msi(struct pci_dev *pdev, unsigned int min_vecs, unsigned int max_vecs) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)) + return pci_alloc_irq_vectors(pdev, min_vecs, max_vecs, PCI_IRQ_MSI); +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)) + return pci_enable_msi_range(pdev, min_vecs, max_vecs); +#else + return pci_enable_msi_block(pdev, max_vecs); +#endif // endif +} + +/* Disable Linux Msi */ +void +dhdpcie_disable_msi(struct pci_dev *pdev) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)) + pci_free_irq_vectors(pdev); +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0)) + pci_disable_msi(pdev); +#else + pci_disable_msi(pdev); +#endif // endif + return; +} + +/* Request Linux irq */ +int +dhdpcie_request_irq(dhdpcie_info_t *dhdpcie_info) +{ + dhd_bus_t *bus = dhdpcie_info->bus; + struct pci_dev *pdev = dhdpcie_info->bus->dev; + int host_irq_disabled; + + if (!bus->irq_registered) { + snprintf(dhdpcie_info->pciname, sizeof(dhdpcie_info->pciname), + "dhdpcie:%s", pci_name(pdev)); + + if (bus->d2h_intr_method == PCIE_MSI) { + if (dhdpcie_enable_msi(pdev, 1, 1) < 0) { + DHD_ERROR(("%s: dhdpcie_enable_msi() failed\n", __FUNCTION__)); + dhdpcie_disable_msi(pdev); + bus->d2h_intr_method = PCIE_INTX; + } + } + + if (request_irq(pdev->irq, dhdpcie_isr, IRQF_SHARED, + dhdpcie_info->pciname, bus) < 0) { + DHD_ERROR(("%s: request_irq() failed\n", __FUNCTION__)); + if (bus->d2h_intr_method == PCIE_MSI) { + dhdpcie_disable_msi(pdev); + } + return -1; + } + else { + bus->irq_registered = TRUE; + } + } else { + DHD_ERROR(("%s: PCI IRQ is already registered\n", __FUNCTION__)); + } + + host_irq_disabled = dhdpcie_irq_disabled(bus); + if (host_irq_disabled) { + DHD_ERROR(("%s: PCIe IRQ was disabled(%d), so, enabled it again\n", + __FUNCTION__, host_irq_disabled)); + dhdpcie_enable_irq(bus); + } + + DHD_TRACE(("%s %s\n", __FUNCTION__, dhdpcie_info->pciname)); + + return 0; /* SUCCESS */ +} + +/** + * dhdpcie_get_pcieirq - return pcie irq number to linux-dhd + */ +int +dhdpcie_get_pcieirq(struct dhd_bus *bus, unsigned int *irq) +{ + struct pci_dev *pdev = bus->dev; + + if (!pdev) { + DHD_ERROR(("%s : bus->dev is NULL\n", __FUNCTION__)); + return -ENODEV; + } + + *irq = pdev->irq; + + return 0; /* SUCCESS */ +} + +#ifdef CONFIG_PHYS_ADDR_T_64BIT +#define PRINTF_RESOURCE "0x%016llx" +#else +#define PRINTF_RESOURCE "0x%08x" +#endif // endif + +#ifdef EXYNOS_PCIE_MODULE_PATCH +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) +extern struct pci_saved_state *bcm_pcie_default_state; +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ +#endif /* EXYNOS_MODULE_PATCH */ + +/* + +Name: osl_pci_get_resource + +Parametrs: + +1: struct pci_dev *pdev -- pci device structure +2: pci_res -- structure containing pci configuration space values + +Return value: + +int - Status (TRUE or FALSE) + +Description: +Access PCI configuration space, retrieve PCI allocated resources , updates in resource structure. + + */ +int dhdpcie_get_resource(dhdpcie_info_t *dhdpcie_info) +{ + phys_addr_t bar0_addr, bar1_addr; + ulong bar1_size; + struct pci_dev *pdev = NULL; + pdev = dhdpcie_info->dev; +#ifdef EXYNOS_PCIE_MODULE_PATCH +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) + if (bcm_pcie_default_state) { + pci_load_saved_state(pdev, bcm_pcie_default_state); + pci_restore_state(pdev); + } +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ +#endif /* EXYNOS_MODULE_PATCH */ + do { + if (pci_enable_device(pdev)) { + printf("%s: Cannot enable PCI device\n", __FUNCTION__); + break; + } + pci_set_master(pdev); + bar0_addr = pci_resource_start(pdev, 0); /* Bar-0 mapped address */ + bar1_addr = pci_resource_start(pdev, 2); /* Bar-1 mapped address */ + + /* read Bar-1 mapped memory range */ + bar1_size = pci_resource_len(pdev, 2); + + if ((bar1_size == 0) || (bar1_addr == 0)) { + printf("%s: BAR1 Not enabled for this device size(%ld)," + " addr(0x"PRINTF_RESOURCE")\n", + __FUNCTION__, bar1_size, bar1_addr); + goto err; + } + + dhdpcie_info->regs = (volatile char *) REG_MAP(bar0_addr, DONGLE_REG_MAP_SIZE); + dhdpcie_info->tcm_size = + (bar1_size > DONGLE_TCM_MAP_SIZE) ? bar1_size : DONGLE_TCM_MAP_SIZE; + dhdpcie_info->tcm = (volatile char *) REG_MAP(bar1_addr, dhdpcie_info->tcm_size); + + if (!dhdpcie_info->regs || !dhdpcie_info->tcm) { + DHD_ERROR(("%s:ioremap() failed\n", __FUNCTION__)); + break; + } +#ifdef EXYNOS_PCIE_MODULE_PATCH +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) + if (bcm_pcie_default_state == NULL) { + pci_save_state(pdev); + bcm_pcie_default_state = pci_store_saved_state(pdev); + } +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ +#endif /* EXYNOS_MODULE_PATCH */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) + /* Backup PCIe configuration so as to use Wi-Fi on/off process + * in case of built in driver + */ + pci_save_state(pdev); + dhdpcie_info->default_state = pci_store_saved_state(pdev); + + if (dhdpcie_info->default_state == NULL) { + DHD_ERROR(("%s pci_store_saved_state returns NULL\n", + __FUNCTION__)); + REG_UNMAP(dhdpcie_info->regs); + REG_UNMAP(dhdpcie_info->tcm); + pci_disable_device(pdev); + break; + } +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ + + DHD_TRACE(("%s:Phys addr : reg space = %p base addr 0x"PRINTF_RESOURCE" \n", + __FUNCTION__, dhdpcie_info->regs, bar0_addr)); + DHD_TRACE(("%s:Phys addr : tcm_space = %p base addr 0x"PRINTF_RESOURCE" \n", + __FUNCTION__, dhdpcie_info->tcm, bar1_addr)); + + return 0; /* SUCCESS */ + } while (0); +err: + return -1; /* FAILURE */ +} + +int dhdpcie_scan_resource(dhdpcie_info_t *dhdpcie_info) +{ + + DHD_TRACE(("%s: ENTER\n", __FUNCTION__)); + + do { + /* define it here only!! */ + if (dhdpcie_get_resource (dhdpcie_info)) { + DHD_ERROR(("%s: Failed to get PCI resources\n", __FUNCTION__)); + break; + } + DHD_TRACE(("%s:Exit - SUCCESS \n", + __FUNCTION__)); + + return 0; /* SUCCESS */ + + } while (0); + + DHD_TRACE(("%s:Exit - FAILURE \n", __FUNCTION__)); + + return -1; /* FAILURE */ + +} + +void dhdpcie_dump_resource(dhd_bus_t *bus) +{ + dhdpcie_info_t *pch; + + if (bus == NULL) { + DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__)); + return; + } + + if (bus->dev == NULL) { + DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__)); + return; + } + + pch = pci_get_drvdata(bus->dev); + if (pch == NULL) { + DHD_ERROR(("%s: pch is NULL\n", __FUNCTION__)); + return; + } + + /* BAR0 */ + DHD_ERROR(("%s: BAR0(VA): 0x%p, BAR0(PA): "PRINTF_RESOURCE", SIZE: %d\n", + __FUNCTION__, pch->regs, pci_resource_start(bus->dev, 0), + DONGLE_REG_MAP_SIZE)); + + /* BAR1 */ + DHD_ERROR(("%s: BAR1(VA): 0x%p, BAR1(PA): "PRINTF_RESOURCE", SIZE: %d\n", + __FUNCTION__, pch->tcm, pci_resource_start(bus->dev, 2), + pch->tcm_size)); +} + +int dhdpcie_init(struct pci_dev *pdev) +{ + + osl_t *osh = NULL; + dhd_bus_t *bus = NULL; + dhdpcie_info_t *dhdpcie_info = NULL; + wifi_adapter_info_t *adapter = NULL; +#ifdef BCMPCIE_OOB_HOST_WAKE + dhdpcie_os_info_t *dhdpcie_osinfo = NULL; +#endif /* BCMPCIE_OOB_HOST_WAKE */ +#ifdef USE_SMMU_ARCH_MSM + dhdpcie_smmu_info_t *dhdpcie_smmu_info = NULL; +#endif /* USE_SMMU_ARCH_MSM */ + int ret = 0; + + do { + /* osl attach */ + if (!(osh = osl_attach(pdev, PCI_BUS, FALSE))) { + DHD_ERROR(("%s: osl_attach failed\n", __FUNCTION__)); + break; + } + + /* initialize static buffer */ + adapter = dhd_wifi_platform_get_adapter(PCI_BUS, pdev->bus->number, + PCI_SLOT(pdev->devfn)); + if (adapter != NULL) { + DHD_ERROR(("%s: found adapter info '%s'\n", __FUNCTION__, adapter->name)); +#ifdef BUS_POWER_RESTORE + adapter->pci_dev = pdev; +#endif + } else + DHD_ERROR(("%s: can't find adapter info for this chip\n", __FUNCTION__)); + osl_static_mem_init(osh, adapter); + + /* Set ACP coherence flag */ + if (OSL_ACP_WAR_ENAB() || OSL_ARCH_IS_COHERENT()) + osl_flag_set(osh, OSL_ACP_COHERENCE); + + /* allocate linux spcific pcie structure here */ + if (!(dhdpcie_info = MALLOC(osh, sizeof(dhdpcie_info_t)))) { + DHD_ERROR(("%s: MALLOC of dhd_bus_t failed\n", __FUNCTION__)); + break; + } + bzero(dhdpcie_info, sizeof(dhdpcie_info_t)); + dhdpcie_info->osh = osh; + dhdpcie_info->dev = pdev; + +#ifdef BCMPCIE_OOB_HOST_WAKE + /* allocate OS speicific structure */ + dhdpcie_osinfo = MALLOC(osh, sizeof(dhdpcie_os_info_t)); + if (dhdpcie_osinfo == NULL) { + DHD_ERROR(("%s: MALLOC of dhdpcie_os_info_t failed\n", + __FUNCTION__)); + break; + } + bzero(dhdpcie_osinfo, sizeof(dhdpcie_os_info_t)); + dhdpcie_info->os_cxt = (void *)dhdpcie_osinfo; + + /* Initialize host wake IRQ */ + spin_lock_init(&dhdpcie_osinfo->oob_irq_spinlock); + /* Get customer specific host wake IRQ parametres: IRQ number as IRQ type */ + dhdpcie_osinfo->oob_irq_num = wifi_platform_get_irq_number(adapter, + &dhdpcie_osinfo->oob_irq_flags); + if (dhdpcie_osinfo->oob_irq_num < 0) { + DHD_ERROR(("%s: Host OOB irq is not defined\n", __FUNCTION__)); + } +#endif /* BCMPCIE_OOB_HOST_WAKE */ + +#ifdef USE_SMMU_ARCH_MSM + /* allocate private structure for using SMMU */ + dhdpcie_smmu_info = MALLOC(osh, sizeof(dhdpcie_smmu_info_t)); + if (dhdpcie_smmu_info == NULL) { + DHD_ERROR(("%s: MALLOC of dhdpcie_smmu_info_t failed\n", + __FUNCTION__)); + break; + } + bzero(dhdpcie_smmu_info, sizeof(dhdpcie_smmu_info_t)); + dhdpcie_info->smmu_cxt = (void *)dhdpcie_smmu_info; + + /* Initialize smmu structure */ + if (dhdpcie_smmu_init(pdev, dhdpcie_info->smmu_cxt) < 0) { + DHD_ERROR(("%s: Failed to initialize SMMU\n", + __FUNCTION__)); + break; + } +#endif /* USE_SMMU_ARCH_MSM */ + +#ifdef DHD_WAKE_STATUS + /* Initialize pcie_lock */ + spin_lock_init(&dhdpcie_info->pcie_lock); +#endif /* DHD_WAKE_STATUS */ + + /* Find the PCI resources, verify the */ + /* vendor and device ID, map BAR regions and irq, update in structures */ + if (dhdpcie_scan_resource(dhdpcie_info)) { + DHD_ERROR(("%s: dhd_Scan_PCI_Res failed\n", __FUNCTION__)); + + break; + } + + /* Bus initialization */ + ret = dhdpcie_bus_attach(osh, &bus, dhdpcie_info->regs, dhdpcie_info->tcm, pdev); + if (ret != BCME_OK) { + DHD_ERROR(("%s:dhdpcie_bus_attach() failed\n", __FUNCTION__)); + break; + } + + dhdpcie_info->bus = bus; + bus->is_linkdown = 0; + bus->no_bus_init = FALSE; + + bus->rc_dev = NULL; + + /* Get RC Device Handle */ + if (bus->dev->bus) { + /* self member of structure pci_bus is bridge device as seen by parent */ + bus->rc_dev = bus->dev->bus->self; + DHD_ERROR(("%s: rc_dev from dev->bus->self (%x:%x) is %p\n", __FUNCTION__, + bus->rc_dev->vendor, bus->rc_dev->device, bus->rc_dev)); + } else { + DHD_ERROR(("%s: unable to get rc_dev as dev->bus is NULL\n", __FUNCTION__)); + } + + /* if rc_dev is still NULL, try to get from vendor/device IDs */ + if (bus->rc_dev == NULL) { + bus->rc_dev = pci_get_device(PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID, NULL); + DHD_ERROR(("%s: rc_dev from pci_get_device (%x:%x) is %p\n", __FUNCTION__, + PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID, bus->rc_dev)); + } + + bus->rc_ep_aspm_cap = dhd_bus_is_rc_ep_aspm_capable(bus); + bus->rc_ep_l1ss_cap = dhd_bus_is_rc_ep_l1ss_capable(bus); + DHD_ERROR(("%s: rc_ep_aspm_cap: %d rc_ep_l1ss_cap: %d\n", + __FUNCTION__, bus->rc_ep_aspm_cap, bus->rc_ep_l1ss_cap)); + +#ifdef FORCE_TPOWERON + if (dhdpcie_chip_req_forced_tpoweron(bus)) { + dhd_bus_set_tpoweron(bus, tpoweron_scale); + } +#endif /* FORCE_TPOWERON */ + +#if defined(BCMPCIE_OOB_HOST_WAKE) && defined(CUSTOMER_HW2) && \ + defined(CONFIG_ARCH_APQ8084) + brcm_pcie_wake.wake_irq = wlan_oob_irq; + brcm_pcie_wake.data = bus; +#endif /* BCMPCIE_OOB_HOST_WAKE && CUSTOMR_HW2 && CONFIG_ARCH_APQ8084 */ + +#ifdef DONGLE_ENABLE_ISOLATION + bus->dhd->dongle_isolation = TRUE; +#endif /* DONGLE_ENABLE_ISOLATION */ + + if (bus->intr) { + /* Register interrupt callback, but mask it (not operational yet). */ + DHD_INTR(("%s: Registering and masking interrupts\n", __FUNCTION__)); + dhdpcie_bus_intr_disable(bus); + + if (dhdpcie_request_irq(dhdpcie_info)) { + DHD_ERROR(("%s: request_irq() failed\n", __FUNCTION__)); + break; + } + } else { + bus->pollrate = 1; + DHD_INFO(("%s: PCIe interrupt function is NOT registered " + "due to polling mode\n", __FUNCTION__)); + } + +#if defined(BCM_REQUEST_FW) + if (dhd_bus_download_firmware(bus, osh, NULL, NULL) < 0) { + DHD_ERROR(("%s: failed to download firmware\n", __FUNCTION__)); + } + bus->nv_path = NULL; + bus->fw_path = NULL; +#endif /* BCM_REQUEST_FW */ + + /* set private data for pci_dev */ + pci_set_drvdata(pdev, dhdpcie_info); + + if (dhd_download_fw_on_driverload) { + if (dhd_bus_start(bus->dhd)) { + DHD_ERROR(("%s: dhd_bud_start() failed\n", __FUNCTION__)); + if (!allow_delay_fwdl) + break; + } + } else { + /* Set ramdom MAC address during boot time */ + get_random_bytes(&bus->dhd->mac.octet[3], 3); + /* Adding BRCM OUI */ + bus->dhd->mac.octet[0] = 0; + bus->dhd->mac.octet[1] = 0x90; + bus->dhd->mac.octet[2] = 0x4C; + } + + /* Attach to the OS network interface */ + DHD_TRACE(("%s(): Calling dhd_register_if() \n", __FUNCTION__)); + if (dhd_register_if(bus->dhd, 0, TRUE)) { + DHD_ERROR(("%s(): ERROR.. dhd_register_if() failed\n", __FUNCTION__)); + break; + } + + dhdpcie_init_succeeded = TRUE; + +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM + pm_runtime_set_autosuspend_delay(&pdev->dev, AUTO_SUSPEND_TIMEOUT); + pm_runtime_use_autosuspend(&pdev->dev); + atomic_set(&bus->dhd->block_bus, FALSE); +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + +#if defined(MULTIPLE_SUPPLICANT) + wl_android_post_init(); // terence 20120530: fix critical section in dhd_open and dhdsdio_probe +#endif /* MULTIPLE_SUPPLICANT */ + + DHD_TRACE(("%s:Exit - SUCCESS \n", __FUNCTION__)); + return 0; /* return SUCCESS */ + + } while (0); + /* reverse the initialization in order in case of error */ + + if (bus) + dhdpcie_bus_release(bus); + +#ifdef BCMPCIE_OOB_HOST_WAKE + if (dhdpcie_osinfo) { + MFREE(osh, dhdpcie_osinfo, sizeof(dhdpcie_os_info_t)); + } +#endif /* BCMPCIE_OOB_HOST_WAKE */ + +#ifdef USE_SMMU_ARCH_MSM + if (dhdpcie_smmu_info) { + MFREE(osh, dhdpcie_smmu_info, sizeof(dhdpcie_smmu_info_t)); + dhdpcie_info->smmu_cxt = NULL; + } +#endif /* USE_SMMU_ARCH_MSM */ + + if (dhdpcie_info) + dhdpcie_detach(dhdpcie_info); + pci_disable_device(pdev); + if (osh) + osl_detach(osh); + + dhdpcie_init_succeeded = FALSE; + + DHD_TRACE(("%s:Exit - FAILURE \n", __FUNCTION__)); + + return -1; /* return FAILURE */ +} + +/* Free Linux irq */ +void +dhdpcie_free_irq(dhd_bus_t *bus) +{ + struct pci_dev *pdev = NULL; + + DHD_TRACE(("%s: freeing up the IRQ\n", __FUNCTION__)); + if (bus) { + pdev = bus->dev; + if (bus->irq_registered) { + free_irq(pdev->irq, bus); + bus->irq_registered = FALSE; + if (bus->d2h_intr_method == PCIE_MSI) { + dhdpcie_disable_msi(pdev); + } + } else { + DHD_ERROR(("%s: PCIe IRQ is not registered\n", __FUNCTION__)); + } + } + DHD_TRACE(("%s: Exit\n", __FUNCTION__)); + return; +} + +/* + +Name: dhdpcie_isr + +Parametrs: + +1: IN int irq -- interrupt vector +2: IN void *arg -- handle to private data structure + +Return value: + +Status (TRUE or FALSE) + +Description: +Interrupt Service routine checks for the status register, +disable interrupt and queue DPC if mail box interrupts are raised. +*/ + +irqreturn_t +dhdpcie_isr(int irq, void *arg) +{ + dhd_bus_t *bus = (dhd_bus_t*)arg; + bus->isr_entry_time = OSL_SYSUPTIME_US(); + if (!dhdpcie_bus_isr(bus)) { + DHD_LOG_MEM(("%s: dhdpcie_bus_isr returns with FALSE\n", __FUNCTION__)); + } + bus->isr_exit_time = OSL_SYSUPTIME_US(); + return IRQ_HANDLED; +} + +int +dhdpcie_disable_irq_nosync(dhd_bus_t *bus) +{ + struct pci_dev *dev; + if ((bus == NULL) || (bus->dev == NULL)) { + DHD_ERROR(("%s: bus or bus->dev is NULL\n", __FUNCTION__)); + return BCME_ERROR; + } + + dev = bus->dev; + disable_irq_nosync(dev->irq); + return BCME_OK; +} + +int +dhdpcie_disable_irq(dhd_bus_t *bus) +{ + struct pci_dev *dev; + if ((bus == NULL) || (bus->dev == NULL)) { + DHD_ERROR(("%s: bus or bus->dev is NULL\n", __FUNCTION__)); + return BCME_ERROR; + } + + dev = bus->dev; + disable_irq(dev->irq); + return BCME_OK; +} + +int +dhdpcie_enable_irq(dhd_bus_t *bus) +{ + struct pci_dev *dev; + if ((bus == NULL) || (bus->dev == NULL)) { + DHD_ERROR(("%s: bus or bus->dev is NULL\n", __FUNCTION__)); + return BCME_ERROR; + } + + dev = bus->dev; + enable_irq(dev->irq); + return BCME_OK; +} + +int +dhdpcie_irq_disabled(dhd_bus_t *bus) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)) + struct irq_desc *desc = irq_to_desc(bus->dev->irq); + /* depth will be zero, if enabled */ + return desc->depth; +#else + /* return ERROR by default as there is no support for lower versions */ + return BCME_ERROR; +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ +} + +int +dhdpcie_start_host_pcieclock(dhd_bus_t *bus) +{ + int ret = 0; +#ifdef CONFIG_ARCH_MSM +#endif /* CONFIG_ARCH_MSM */ + DHD_TRACE(("%s Enter:\n", __FUNCTION__)); + + if (bus == NULL) { + return BCME_ERROR; + } + + if (bus->dev == NULL) { + return BCME_ERROR; + } + +#ifdef CONFIG_ARCH_MSM + ret = msm_pcie_pm_control(MSM_PCIE_RESUME, bus->dev->bus->number, + bus->dev, NULL, 0); + if (ret) { + DHD_ERROR(("%s Failed to bring up PCIe link\n", __FUNCTION__)); + goto done; + } + +done: +#endif /* CONFIG_ARCH_MSM */ + DHD_TRACE(("%s Exit:\n", __FUNCTION__)); + return ret; +} + +int +dhdpcie_stop_host_pcieclock(dhd_bus_t *bus) +{ + int ret = 0; +#ifdef CONFIG_ARCH_MSM +#endif /* CONFIG_ARCH_MSM */ + + DHD_TRACE(("%s Enter:\n", __FUNCTION__)); + + if (bus == NULL) { + return BCME_ERROR; + } + + if (bus->dev == NULL) { + return BCME_ERROR; + } + +#ifdef CONFIG_ARCH_MSM + ret = msm_pcie_pm_control(MSM_PCIE_SUSPEND, bus->dev->bus->number, + bus->dev, NULL, 0); + if (ret) { + DHD_ERROR(("Failed to stop PCIe link\n")); + goto done; + } +done: +#endif /* CONFIG_ARCH_MSM */ + DHD_TRACE(("%s Exit:\n", __FUNCTION__)); + return ret; +} + +int +dhdpcie_disable_device(dhd_bus_t *bus) +{ + DHD_TRACE(("%s Enter:\n", __FUNCTION__)); + + if (bus == NULL) { + return BCME_ERROR; + } + + if (bus->dev == NULL) { + return BCME_ERROR; + } + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) + if (pci_is_enabled(bus->dev)) +#endif // endif + pci_disable_device(bus->dev); + + return 0; +} + +int +dhdpcie_enable_device(dhd_bus_t *bus) +{ + int ret = BCME_ERROR; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) + dhdpcie_info_t *pch; +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ + + DHD_TRACE(("%s Enter:\n", __FUNCTION__)); + + if (bus == NULL) { + return BCME_ERROR; + } + + if (bus->dev == NULL) { + return BCME_ERROR; + } + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) + pch = pci_get_drvdata(bus->dev); + if (pch == NULL) { + return BCME_ERROR; + } + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) && (LINUX_VERSION_CODE < \ + KERNEL_VERSION(3, 19, 0)) && !defined(CONFIG_SOC_EXYNOS8890) + /* Updated with pci_load_and_free_saved_state to compatible + * with Kernel version 3.14.0 to 3.18.41. + */ + pci_load_and_free_saved_state(bus->dev, &pch->default_state); + pch->default_state = pci_store_saved_state(bus->dev); +#else + pci_load_saved_state(bus->dev, pch->default_state); +#endif /* LINUX_VERSION >= 3.14.0 && LINUX_VERSION < 3.19.0 && !CONFIG_SOC_EXYNOS8890 */ + + /* Check if Device ID is valid */ + if (bus->dev->state_saved) { + uint32 vid, saved_vid; + pci_read_config_dword(bus->dev, PCI_CFG_VID, &vid); + saved_vid = bus->dev->saved_config_space[PCI_CFG_VID]; + if (vid != saved_vid) { + DHD_ERROR(("%s: VID(0x%x) is different from saved VID(0x%x) " + "Skip the bus init\n", __FUNCTION__, vid, saved_vid)); + bus->no_bus_init = TRUE; + /* Check if the PCIe link is down */ + if (vid == (uint32)-1) { + bus->is_linkdown = 1; + } + return BCME_ERROR; + } + } + + pci_restore_state(bus->dev); +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) */ + + ret = pci_enable_device(bus->dev); + if (ret) { + pci_disable_device(bus->dev); + } else { + pci_set_master(bus->dev); + } + + return ret; +} + +int +dhdpcie_alloc_resource(dhd_bus_t *bus) +{ + dhdpcie_info_t *dhdpcie_info; + phys_addr_t bar0_addr, bar1_addr; + ulong bar1_size; + + do { + if (bus == NULL) { + DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__)); + break; + } + + if (bus->dev == NULL) { + DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__)); + break; + } + + dhdpcie_info = pci_get_drvdata(bus->dev); + if (dhdpcie_info == NULL) { + DHD_ERROR(("%s: dhdpcie_info is NULL\n", __FUNCTION__)); + break; + } + + bar0_addr = pci_resource_start(bus->dev, 0); /* Bar-0 mapped address */ + bar1_addr = pci_resource_start(bus->dev, 2); /* Bar-1 mapped address */ + + /* read Bar-1 mapped memory range */ + bar1_size = pci_resource_len(bus->dev, 2); + + if ((bar1_size == 0) || (bar1_addr == 0)) { + printf("%s: BAR1 Not enabled for this device size(%ld)," + " addr(0x"PRINTF_RESOURCE")\n", + __FUNCTION__, bar1_size, bar1_addr); + break; + } + + dhdpcie_info->regs = (volatile char *) REG_MAP(bar0_addr, DONGLE_REG_MAP_SIZE); + if (!dhdpcie_info->regs) { + DHD_ERROR(("%s: ioremap() for regs is failed\n", __FUNCTION__)); + break; + } + + bus->regs = dhdpcie_info->regs; + dhdpcie_info->tcm_size = + (bar1_size > DONGLE_TCM_MAP_SIZE) ? bar1_size : DONGLE_TCM_MAP_SIZE; + dhdpcie_info->tcm = (volatile char *) REG_MAP(bar1_addr, dhdpcie_info->tcm_size); + if (!dhdpcie_info->tcm) { + DHD_ERROR(("%s: ioremap() for regs is failed\n", __FUNCTION__)); + REG_UNMAP(dhdpcie_info->regs); + bus->regs = NULL; + break; + } + + bus->tcm = dhdpcie_info->tcm; + + DHD_TRACE(("%s:Phys addr : reg space = %p base addr 0x"PRINTF_RESOURCE" \n", + __FUNCTION__, dhdpcie_info->regs, bar0_addr)); + DHD_TRACE(("%s:Phys addr : tcm_space = %p base addr 0x"PRINTF_RESOURCE" \n", + __FUNCTION__, dhdpcie_info->tcm, bar1_addr)); + + return 0; + } while (0); + + return BCME_ERROR; +} + +void +dhdpcie_free_resource(dhd_bus_t *bus) +{ + dhdpcie_info_t *dhdpcie_info; + + if (bus == NULL) { + DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__)); + return; + } + + if (bus->dev == NULL) { + DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__)); + return; + } + + dhdpcie_info = pci_get_drvdata(bus->dev); + if (dhdpcie_info == NULL) { + DHD_ERROR(("%s: dhdpcie_info is NULL\n", __FUNCTION__)); + return; + } + + if (bus->regs) { + REG_UNMAP(dhdpcie_info->regs); + bus->regs = NULL; + } + + if (bus->tcm) { + REG_UNMAP(dhdpcie_info->tcm); + bus->tcm = NULL; + } +} + +int +dhdpcie_bus_request_irq(struct dhd_bus *bus) +{ + dhdpcie_info_t *dhdpcie_info; + int ret = 0; + + if (bus == NULL) { + DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__)); + return BCME_ERROR; + } + + if (bus->dev == NULL) { + DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__)); + return BCME_ERROR; + } + + dhdpcie_info = pci_get_drvdata(bus->dev); + if (dhdpcie_info == NULL) { + DHD_ERROR(("%s: dhdpcie_info is NULL\n", __FUNCTION__)); + return BCME_ERROR; + } + + if (bus->intr) { + /* Register interrupt callback, but mask it (not operational yet). */ + DHD_INTR(("%s: Registering and masking interrupts\n", __FUNCTION__)); + dhdpcie_bus_intr_disable(bus); + ret = dhdpcie_request_irq(dhdpcie_info); + if (ret) { + DHD_ERROR(("%s: request_irq() failed, ret=%d\n", + __FUNCTION__, ret)); + return ret; + } + } + + return ret; +} + +#ifdef BCMPCIE_OOB_HOST_WAKE +int dhdpcie_get_oob_irq_num(dhd_bus_t *bus) +{ + dhdpcie_info_t *pch; + dhdpcie_os_info_t *dhdpcie_osinfo; + + if (bus == NULL) { + DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__)); + return 0; + } + + if (bus->dev == NULL) { + DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__)); + return 0; + } + + pch = pci_get_drvdata(bus->dev); + if (pch == NULL) { + DHD_ERROR(("%s: pch is NULL\n", __FUNCTION__)); + return 0; + } + + dhdpcie_osinfo = (dhdpcie_os_info_t *)pch->os_cxt; + + return dhdpcie_osinfo ? dhdpcie_osinfo->oob_irq_num : 0; +} + +void dhdpcie_oob_intr_set(dhd_bus_t *bus, bool enable) +{ + unsigned long flags; + dhdpcie_info_t *pch; + dhdpcie_os_info_t *dhdpcie_osinfo; + + if (bus == NULL) { + DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__)); + return; + } + + if (bus->dev == NULL) { + DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__)); + return; + } + + pch = pci_get_drvdata(bus->dev); + if (pch == NULL) { + DHD_ERROR(("%s: pch is NULL\n", __FUNCTION__)); + return; + } + + dhdpcie_osinfo = (dhdpcie_os_info_t *)pch->os_cxt; + spin_lock_irqsave(&dhdpcie_osinfo->oob_irq_spinlock, flags); + if ((dhdpcie_osinfo->oob_irq_enabled != enable) && + (dhdpcie_osinfo->oob_irq_num > 0)) { + if (enable) { + enable_irq(dhdpcie_osinfo->oob_irq_num); + bus->oob_intr_enable_count++; + } else { + disable_irq_nosync(dhdpcie_osinfo->oob_irq_num); + bus->oob_intr_disable_count++; + } + dhdpcie_osinfo->oob_irq_enabled = enable; + } + spin_unlock_irqrestore(&dhdpcie_osinfo->oob_irq_spinlock, flags); +} + +static irqreturn_t wlan_oob_irq(int irq, void *data) +{ + dhd_bus_t *bus; + unsigned long flags_bus; + DHD_TRACE(("%s: IRQ Triggered\n", __FUNCTION__)); + bus = (dhd_bus_t *)data; + dhdpcie_oob_intr_set(bus, FALSE); + bus->last_oob_irq_time = OSL_SYSUPTIME_US(); + bus->oob_intr_count++; +#ifdef DHD_WAKE_STATUS + { + bcmpcie_set_get_wake(bus, 1); + } +#endif /* DHD_WAKE_STATUS */ +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM + dhd_bus_wakeup_work(bus->dhd); +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ + DHD_BUS_LOCK(bus->bus_lock, flags_bus); + /* Hold wakelock if bus_low_power_state is + * DHD_BUS_D3_INFORM_SENT OR DHD_BUS_D3_ACK_RECIEVED + */ + if (bus->dhd->up && bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE) { + DHD_OS_OOB_IRQ_WAKE_LOCK_TIMEOUT(bus->dhd, OOB_WAKE_LOCK_TIMEOUT); + } + DHD_BUS_UNLOCK(bus->bus_lock, flags_bus); + return IRQ_HANDLED; +} + +int dhdpcie_oob_intr_register(dhd_bus_t *bus) +{ + int err = 0; + dhdpcie_info_t *pch; + dhdpcie_os_info_t *dhdpcie_osinfo; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + if (bus == NULL) { + DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__)); + return -EINVAL; + } + + if (bus->dev == NULL) { + DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__)); + return -EINVAL; + } + + pch = pci_get_drvdata(bus->dev); + if (pch == NULL) { + DHD_ERROR(("%s: pch is NULL\n", __FUNCTION__)); + return -EINVAL; + } + + dhdpcie_osinfo = (dhdpcie_os_info_t *)pch->os_cxt; + if (dhdpcie_osinfo->oob_irq_registered) { + DHD_ERROR(("%s: irq is already registered\n", __FUNCTION__)); + return -EBUSY; + } + + if (dhdpcie_osinfo->oob_irq_num > 0) { + printf("%s OOB irq=%d flags=0x%X\n", __FUNCTION__, + (int)dhdpcie_osinfo->oob_irq_num, + (int)dhdpcie_osinfo->oob_irq_flags); + err = request_irq(dhdpcie_osinfo->oob_irq_num, wlan_oob_irq, + dhdpcie_osinfo->oob_irq_flags, "dhdpcie_host_wake", + bus); + if (err) { + DHD_ERROR(("%s: request_irq failed with %d\n", + __FUNCTION__, err)); + return err; + } +#if defined(DISABLE_WOWLAN) + printf("%s: disable_irq_wake\n", __FUNCTION__); + dhdpcie_osinfo->oob_irq_wake_enabled = FALSE; +#else + printf("%s: enable_irq_wake\n", __FUNCTION__); + err = enable_irq_wake(dhdpcie_osinfo->oob_irq_num); + if (!err) { + dhdpcie_osinfo->oob_irq_wake_enabled = TRUE; + } else + printf("%s: enable_irq_wake failed with %d\n", __FUNCTION__, err); +#endif + dhdpcie_osinfo->oob_irq_enabled = TRUE; + } + + dhdpcie_osinfo->oob_irq_registered = TRUE; + + return err; +} + +void dhdpcie_oob_intr_unregister(dhd_bus_t *bus) +{ + int err = 0; + dhdpcie_info_t *pch; + dhdpcie_os_info_t *dhdpcie_osinfo; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + if (bus == NULL) { + DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__)); + return; + } + + if (bus->dev == NULL) { + DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__)); + return; + } + + pch = pci_get_drvdata(bus->dev); + if (pch == NULL) { + DHD_ERROR(("%s: pch is NULL\n", __FUNCTION__)); + return; + } + + dhdpcie_osinfo = (dhdpcie_os_info_t *)pch->os_cxt; + if (!dhdpcie_osinfo->oob_irq_registered) { + DHD_ERROR(("%s: irq is not registered\n", __FUNCTION__)); + return; + } + if (dhdpcie_osinfo->oob_irq_num > 0) { + if (dhdpcie_osinfo->oob_irq_wake_enabled) { + err = disable_irq_wake(dhdpcie_osinfo->oob_irq_num); + if (!err) { + dhdpcie_osinfo->oob_irq_wake_enabled = FALSE; + } + } + if (dhdpcie_osinfo->oob_irq_enabled) { + disable_irq(dhdpcie_osinfo->oob_irq_num); + dhdpcie_osinfo->oob_irq_enabled = FALSE; + } + free_irq(dhdpcie_osinfo->oob_irq_num, bus); + } + dhdpcie_osinfo->oob_irq_registered = FALSE; +} +#endif /* BCMPCIE_OOB_HOST_WAKE */ + +struct device * dhd_bus_to_dev(dhd_bus_t *bus) +{ + struct pci_dev *pdev; + pdev = bus->dev; + + if (pdev) + return &pdev->dev; + else + return NULL; +} diff --git a/bcmdhd.100.10.315.x/dhd_pno.c b/bcmdhd.100.10.315.x/dhd_pno.c new file mode 100644 index 0000000..ab4ca58 --- /dev/null +++ b/bcmdhd.100.10.315.x/dhd_pno.c @@ -0,0 +1,4514 @@ +/* + * Broadcom Dongle Host Driver (DHD) + * Prefered Network Offload and Wi-Fi Location Service(WLS) code. + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_pno.c 736010 2017-12-13 08:45:59Z $ + */ + +#if defined(GSCAN_SUPPORT) && !defined(PNO_SUPPORT) +#error "GSCAN needs PNO to be enabled!" +#endif // endif + +#ifdef PNO_SUPPORT +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#ifdef GSCAN_SUPPORT +#include +#endif /* GSCAN_SUPPORT */ +#ifdef WL_CFG80211 +#include +#endif /* WL_CFG80211 */ + +#ifdef __BIG_ENDIAN +#include +#define htod32(i) (bcmswap32(i)) +#define htod16(i) (bcmswap16(i)) +#define dtoh32(i) (bcmswap32(i)) +#define dtoh16(i) (bcmswap16(i)) +#define htodchanspec(i) htod16(i) +#define dtohchanspec(i) dtoh16(i) +#else +#define htod32(i) (i) +#define htod16(i) (i) +#define dtoh32(i) (i) +#define dtoh16(i) (i) +#define htodchanspec(i) (i) +#define dtohchanspec(i) (i) +#endif /* IL_BIGENDINA */ + +#define NULL_CHECK(p, s, err) \ + do { \ + if (!(p)) { \ + printf("NULL POINTER (%s) : %s\n", __FUNCTION__, (s)); \ + err = BCME_ERROR; \ + return err; \ + } \ + } while (0) +#define PNO_GET_PNOSTATE(dhd) ((dhd_pno_status_info_t *)dhd->pno_state) + +#define PNO_BESTNET_LEN WLC_IOCTL_MEDLEN + +#define PNO_ON 1 +#define PNO_OFF 0 +#define CHANNEL_2G_MIN 1 +#define CHANNEL_2G_MAX 14 +#define CHANNEL_5G_MIN 34 +#define CHANNEL_5G_MAX 165 +#define IS_2G_CHANNEL(ch) ((ch >= CHANNEL_2G_MIN) && \ + (ch <= CHANNEL_2G_MAX)) +#define IS_5G_CHANNEL(ch) ((ch >= CHANNEL_5G_MIN) && \ + (ch <= CHANNEL_5G_MAX)) +#define MAX_NODE_CNT 5 +#define WLS_SUPPORTED(pno_state) (pno_state->wls_supported == TRUE) +#define TIME_DIFF(timestamp1, timestamp2) (abs((uint32)(timestamp1/1000) \ + - (uint32)(timestamp2/1000))) +#define TIME_DIFF_MS(timestamp1, timestamp2) (abs((uint32)(timestamp1) \ + - (uint32)(timestamp2))) +#define TIMESPEC_TO_US(ts) (((uint64)(ts).tv_sec * USEC_PER_SEC) + \ + (ts).tv_nsec / NSEC_PER_USEC) + +#define ENTRY_OVERHEAD strlen("bssid=\nssid=\nfreq=\nlevel=\nage=\ndist=\ndistSd=\n====") +#define TIME_MIN_DIFF 5 + +#define EVENT_DATABUF_MAXLEN (512 - sizeof(bcm_event_t)) +#define EVENT_MAX_NETCNT_V1 \ + ((EVENT_DATABUF_MAXLEN - sizeof(wl_pfn_scanresults_v1_t)) \ + / sizeof(wl_pfn_net_info_v1_t) + 1) +#define EVENT_MAX_NETCNT_V2 \ + ((EVENT_DATABUF_MAXLEN - sizeof(wl_pfn_scanresults_v2_t)) \ + / sizeof(wl_pfn_net_info_v2_t) + 1) + +#ifdef GSCAN_SUPPORT +static int _dhd_pno_flush_ssid(dhd_pub_t *dhd); +static wl_pfn_gscan_ch_bucket_cfg_t * +dhd_pno_gscan_create_channel_list(dhd_pub_t *dhd, dhd_pno_status_info_t *pno_state, + uint16 *chan_list, uint32 *num_buckets, uint32 *num_buckets_to_fw); +#endif /* GSCAN_SUPPORT */ + +static int dhd_pno_set_legacy_pno(dhd_pub_t *dhd, uint16 scan_fr, int pno_repeat, + int pno_freq_expo_max, uint16 *channel_list, int nchan); + +static inline bool +is_dfs(dhd_pub_t *dhd, uint16 channel) +{ + u32 ch; + s32 err; + u8 buf[32]; + + ch = wl_ch_host_to_driver(channel); + err = dhd_iovar(dhd, 0, "per_chan_info", (char *)&ch, + sizeof(u32), buf, sizeof(buf), FALSE); + if (unlikely(err)) { + DHD_ERROR(("get per chan info failed:%d\n", err)); + return FALSE; + } + /* Check the channel flags returned by fw */ + if (*((u32 *)buf) & WL_CHAN_PASSIVE) { + return TRUE; + } + return FALSE; +} + +int +dhd_pno_clean(dhd_pub_t *dhd) +{ + int pfn = 0; + int err; + dhd_pno_status_info_t *_pno_state; + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + _pno_state = PNO_GET_PNOSTATE(dhd); + DHD_PNO(("%s enter\n", __FUNCTION__)); + /* Disable PNO */ + err = dhd_iovar(dhd, 0, "pfn", (char *)&pfn, sizeof(pfn), NULL, 0, TRUE); + if (err < 0) { + DHD_ERROR(("%s : failed to execute pfn(error : %d)\n", + __FUNCTION__, err)); + goto exit; + } + _pno_state->pno_status = DHD_PNO_DISABLED; + err = dhd_iovar(dhd, 0, "pfnclear", NULL, 0, NULL, 0, TRUE); + if (err < 0) { + DHD_ERROR(("%s : failed to execute pfnclear(error : %d)\n", + __FUNCTION__, err)); + } +exit: + return err; +} + +bool +dhd_is_pno_supported(dhd_pub_t *dhd) +{ + dhd_pno_status_info_t *_pno_state; + + if (!dhd || !dhd->pno_state) { + DHD_ERROR(("NULL POINTER : %s\n", + __FUNCTION__)); + return FALSE; + } + _pno_state = PNO_GET_PNOSTATE(dhd); + return WLS_SUPPORTED(_pno_state); +} + +bool +dhd_is_legacy_pno_enabled(dhd_pub_t *dhd) +{ + dhd_pno_status_info_t *_pno_state; + + if (!dhd || !dhd->pno_state) { + DHD_ERROR(("NULL POINTER : %s\n", + __FUNCTION__)); + return FALSE; + } + _pno_state = PNO_GET_PNOSTATE(dhd); + return ((_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) != 0); +} + +#ifdef GSCAN_SUPPORT +static uint64 +convert_fw_rel_time_to_systime(struct timespec *ts, uint32 fw_ts_ms) +{ + return ((uint64)(TIMESPEC_TO_US(*ts)) - (uint64)(fw_ts_ms * 1000)); +} + +static void +dhd_pno_idx_to_ssid(struct dhd_pno_gscan_params *gscan_params, + dhd_epno_results_t *res, uint32 idx) +{ + dhd_pno_ssid_t *iter, *next; + int i; + + /* If idx doesn't make sense */ + if (idx >= gscan_params->epno_cfg.num_epno_ssid) { + DHD_ERROR(("No match, idx %d num_ssid %d\n", idx, + gscan_params->epno_cfg.num_epno_ssid)); + goto exit; + } + + if (gscan_params->epno_cfg.num_epno_ssid > 0) { + i = 0; +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif // endif + list_for_each_entry_safe(iter, next, + &gscan_params->epno_cfg.epno_ssid_list, list) { + if (i++ == idx) { + memcpy(res->ssid, iter->SSID, iter->SSID_len); + res->ssid_len = iter->SSID_len; + return; + } + } + } +exit: + /* If we are here then there was no match */ + res->ssid[0] = '\0'; + res->ssid_len = 0; + return; +} + +/* Translate HAL flag bitmask to BRCM FW flag bitmask */ +void +dhd_pno_translate_epno_fw_flags(uint32 *flags) +{ + uint32 in_flags, fw_flags = 0; + in_flags = *flags; + + if (in_flags & DHD_EPNO_A_BAND_TRIG) { + fw_flags |= WL_PFN_SSID_A_BAND_TRIG; + } + + if (in_flags & DHD_EPNO_BG_BAND_TRIG) { + fw_flags |= WL_PFN_SSID_BG_BAND_TRIG; + } + + if (!(in_flags & DHD_EPNO_STRICT_MATCH) && + !(in_flags & DHD_EPNO_HIDDEN_SSID)) { + fw_flags |= WL_PFN_SSID_IMPRECISE_MATCH; + } + + if (in_flags & DHD_EPNO_SAME_NETWORK) { + fw_flags |= WL_PFN_SSID_SAME_NETWORK; + } + + /* Add any hard coded flags needed */ + fw_flags |= WL_PFN_SUPPRESS_AGING_MASK; + *flags = fw_flags; + + return; +} + +/* Translate HAL auth bitmask to BRCM FW bitmask */ +void +dhd_pno_set_epno_auth_flag(uint32 *wpa_auth) +{ + switch (*wpa_auth) { + case DHD_PNO_AUTH_CODE_OPEN: + *wpa_auth = WPA_AUTH_DISABLED; + break; + case DHD_PNO_AUTH_CODE_PSK: + *wpa_auth = (WPA_AUTH_PSK | WPA2_AUTH_PSK); + break; + case DHD_PNO_AUTH_CODE_EAPOL: + *wpa_auth = ~WPA_AUTH_NONE; + break; + default: + DHD_ERROR(("%s: Unknown auth %d", __FUNCTION__, *wpa_auth)); + *wpa_auth = WPA_AUTH_PFN_ANY; + break; + } + return; +} + +/* Cleanup all results */ +static void +dhd_gscan_clear_all_batch_results(dhd_pub_t *dhd) +{ + struct dhd_pno_gscan_params *gscan_params; + dhd_pno_status_info_t *_pno_state; + gscan_results_cache_t *iter; + + _pno_state = PNO_GET_PNOSTATE(dhd); + gscan_params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS].params_gscan; + iter = gscan_params->gscan_batch_cache; + /* Mark everything as consumed */ + while (iter) { + iter->tot_consumed = iter->tot_count; + iter = iter->next; + } + dhd_gscan_batch_cache_cleanup(dhd); + return; +} + +static int +_dhd_pno_gscan_cfg(dhd_pub_t *dhd, wl_pfn_gscan_cfg_t *pfncfg_gscan_param, int size) +{ + int err = BCME_OK; + NULL_CHECK(dhd, "dhd is NULL", err); + + DHD_PNO(("%s enter\n", __FUNCTION__)); + + err = dhd_iovar(dhd, 0, "pfn_gscan_cfg", (char *)pfncfg_gscan_param, size, NULL, 0, TRUE); + if (err < 0) { + DHD_ERROR(("%s : failed to execute pfncfg_gscan_param\n", __FUNCTION__)); + goto exit; + } +exit: + return err; +} + +static int +_dhd_pno_flush_ssid(dhd_pub_t *dhd) +{ + int err; + wl_pfn_t pfn_elem; + memset(&pfn_elem, 0, sizeof(wl_pfn_t)); + pfn_elem.flags = htod32(WL_PFN_FLUSH_ALL_SSIDS); + + err = dhd_iovar(dhd, 0, "pfn_add", (char *)&pfn_elem, sizeof(wl_pfn_t), NULL, 0, TRUE); + if (err < 0) { + DHD_ERROR(("%s : failed to execute pfn_add\n", __FUNCTION__)); + } + return err; +} + +static bool +is_batch_retrieval_complete(struct dhd_pno_gscan_params *gscan_params) +{ + smp_rmb(); + return (gscan_params->get_batch_flag == GSCAN_BATCH_RETRIEVAL_COMPLETE); +} +#endif /* GSCAN_SUPPORT */ + +static int +_dhd_pno_suspend(dhd_pub_t *dhd) +{ + int err; + int suspend = 1; + dhd_pno_status_info_t *_pno_state; + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + + DHD_PNO(("%s enter\n", __FUNCTION__)); + _pno_state = PNO_GET_PNOSTATE(dhd); + err = dhd_iovar(dhd, 0, "pfn_suspend", (char *)&suspend, sizeof(suspend), NULL, 0, TRUE); + if (err < 0) { + DHD_ERROR(("%s : failed to suspend pfn(error :%d)\n", __FUNCTION__, err)); + goto exit; + + } + _pno_state->pno_status = DHD_PNO_SUSPEND; +exit: + return err; +} +static int +_dhd_pno_enable(dhd_pub_t *dhd, int enable) +{ + int err = BCME_OK; + dhd_pno_status_info_t *_pno_state; + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + _pno_state = PNO_GET_PNOSTATE(dhd); + DHD_PNO(("%s enter\n", __FUNCTION__)); + + if (enable & 0xfffe) { + DHD_ERROR(("%s invalid value\n", __FUNCTION__)); + err = BCME_BADARG; + goto exit; + } + if (!dhd_support_sta_mode(dhd)) { + DHD_ERROR(("PNO is not allowed for non-STA mode")); + err = BCME_BADOPTION; + goto exit; + } + if (enable) { + if ((_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) && + dhd_is_associated(dhd, 0, NULL)) { + DHD_ERROR(("%s Legacy PNO mode cannot be enabled " + "in assoc mode , ignore it\n", __FUNCTION__)); + err = BCME_BADOPTION; + goto exit; + } + } + /* Enable/Disable PNO */ + err = dhd_iovar(dhd, 0, "pfn", (char *)&enable, sizeof(enable), NULL, 0, TRUE); + if (err < 0) { + DHD_ERROR(("%s : failed to execute pfn_set - %d\n", __FUNCTION__, err)); + goto exit; + } + _pno_state->pno_status = (enable)? + DHD_PNO_ENABLED : DHD_PNO_DISABLED; + if (!enable) + _pno_state->pno_mode = DHD_PNO_NONE_MODE; + + DHD_PNO(("%s set pno as %s\n", + __FUNCTION__, enable ? "Enable" : "Disable")); +exit: + return err; +} + +static int +_dhd_pno_set(dhd_pub_t *dhd, const dhd_pno_params_t *pno_params, dhd_pno_mode_t mode) +{ + int err = BCME_OK; + wl_pfn_param_t pfn_param; + dhd_pno_params_t *_params; + dhd_pno_status_info_t *_pno_state; + bool combined_scan = FALSE; + DHD_PNO(("%s enter\n", __FUNCTION__)); + + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + _pno_state = PNO_GET_PNOSTATE(dhd); + + memset(&pfn_param, 0, sizeof(pfn_param)); + + /* set pfn parameters */ + pfn_param.version = htod32(PFN_VERSION); + pfn_param.flags = ((PFN_LIST_ORDER << SORT_CRITERIA_BIT) | + (ENABLE << IMMEDIATE_SCAN_BIT) | (ENABLE << REPORT_SEPERATELY_BIT)); + if (mode == DHD_PNO_LEGACY_MODE) { + /* check and set extra pno params */ + if ((pno_params->params_legacy.pno_repeat != 0) || + (pno_params->params_legacy.pno_freq_expo_max != 0)) { + pfn_param.flags |= htod16(ENABLE << ENABLE_ADAPTSCAN_BIT); + pfn_param.repeat = (uchar) (pno_params->params_legacy.pno_repeat); + pfn_param.exp = (uchar) (pno_params->params_legacy.pno_freq_expo_max); + } + /* set up pno scan fr */ + if (pno_params->params_legacy.scan_fr != 0) + pfn_param.scan_freq = htod32(pno_params->params_legacy.scan_fr); + if (_pno_state->pno_mode & DHD_PNO_BATCH_MODE) { + DHD_PNO(("will enable combined scan with BATCHIG SCAN MODE\n")); + mode |= DHD_PNO_BATCH_MODE; + combined_scan = TRUE; + } else if (_pno_state->pno_mode & DHD_PNO_HOTLIST_MODE) { + DHD_PNO(("will enable combined scan with HOTLIST SCAN MODE\n")); + mode |= DHD_PNO_HOTLIST_MODE; + combined_scan = TRUE; + } +#ifdef GSCAN_SUPPORT + else if (_pno_state->pno_mode & DHD_PNO_GSCAN_MODE) { + DHD_PNO(("will enable combined scan with GSCAN SCAN MODE\n")); + mode |= DHD_PNO_GSCAN_MODE; + } +#endif /* GSCAN_SUPPORT */ + } + if (mode & (DHD_PNO_BATCH_MODE | DHD_PNO_HOTLIST_MODE)) { + /* Scan frequency of 30 sec */ + pfn_param.scan_freq = htod32(30); + /* slow adapt scan is off by default */ + pfn_param.slow_freq = htod32(0); + /* RSSI margin of 30 dBm */ + pfn_param.rssi_margin = htod16(PNO_RSSI_MARGIN_DBM); + /* Network timeout 60 sec */ + pfn_param.lost_network_timeout = htod32(60); + /* best n = 2 by default */ + pfn_param.bestn = DEFAULT_BESTN; + /* mscan m=0 by default, so not record best networks by default */ + pfn_param.mscan = DEFAULT_MSCAN; + /* default repeat = 10 */ + pfn_param.repeat = DEFAULT_REPEAT; + /* by default, maximum scan interval = 2^2 + * scan_freq when adaptive scan is turned on + */ + pfn_param.exp = DEFAULT_EXP; + if (mode == DHD_PNO_BATCH_MODE) { + /* In case of BATCH SCAN */ + if (pno_params->params_batch.bestn) + pfn_param.bestn = pno_params->params_batch.bestn; + if (pno_params->params_batch.scan_fr) + pfn_param.scan_freq = htod32(pno_params->params_batch.scan_fr); + if (pno_params->params_batch.mscan) + pfn_param.mscan = pno_params->params_batch.mscan; + /* enable broadcast scan */ + pfn_param.flags |= (ENABLE << ENABLE_BD_SCAN_BIT); + } else if (mode == DHD_PNO_HOTLIST_MODE) { + /* In case of HOTLIST SCAN */ + if (pno_params->params_hotlist.scan_fr) + pfn_param.scan_freq = htod32(pno_params->params_hotlist.scan_fr); + pfn_param.bestn = 0; + pfn_param.repeat = 0; + /* enable broadcast scan */ + pfn_param.flags |= (ENABLE << ENABLE_BD_SCAN_BIT); + } + if (combined_scan) { + /* Disable Adaptive Scan */ + pfn_param.flags &= ~(htod16(ENABLE << ENABLE_ADAPTSCAN_BIT)); + pfn_param.flags |= (ENABLE << ENABLE_BD_SCAN_BIT); + pfn_param.repeat = 0; + pfn_param.exp = 0; + if (_pno_state->pno_mode & DHD_PNO_BATCH_MODE) { + /* In case of Legacy PNO + BATCH SCAN */ + _params = &(_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS]); + if (_params->params_batch.bestn) + pfn_param.bestn = _params->params_batch.bestn; + if (_params->params_batch.scan_fr) + pfn_param.scan_freq = htod32(_params->params_batch.scan_fr); + if (_params->params_batch.mscan) + pfn_param.mscan = _params->params_batch.mscan; + } else if (_pno_state->pno_mode & DHD_PNO_HOTLIST_MODE) { + /* In case of Legacy PNO + HOTLIST SCAN */ + _params = &(_pno_state->pno_params_arr[INDEX_OF_HOTLIST_PARAMS]); + if (_params->params_hotlist.scan_fr) + pfn_param.scan_freq = htod32(_params->params_hotlist.scan_fr); + pfn_param.bestn = 0; + pfn_param.repeat = 0; + } + } + } +#ifdef GSCAN_SUPPORT + if (mode & DHD_PNO_GSCAN_MODE) { + uint32 lost_network_timeout; + + pfn_param.scan_freq = htod32(pno_params->params_gscan.scan_fr); + if (pno_params->params_gscan.mscan) { + pfn_param.bestn = pno_params->params_gscan.bestn; + pfn_param.mscan = pno_params->params_gscan.mscan; + pfn_param.flags |= (ENABLE << ENABLE_BD_SCAN_BIT); + } + /* RSSI margin of 30 dBm */ + pfn_param.rssi_margin = htod16(PNO_RSSI_MARGIN_DBM); + pfn_param.repeat = 0; + pfn_param.exp = 0; + pfn_param.slow_freq = 0; + pfn_param.flags |= htod16(ENABLE << ENABLE_ADAPTSCAN_BIT); + + if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) { + dhd_pno_params_t *params; + + params = &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS]); + + pfn_param.scan_freq = gcd(pno_params->params_gscan.scan_fr, + params->params_legacy.scan_fr); + + if ((params->params_legacy.pno_repeat != 0) || + (params->params_legacy.pno_freq_expo_max != 0)) { + pfn_param.repeat = (uchar) (params->params_legacy.pno_repeat); + pfn_param.exp = (uchar) (params->params_legacy.pno_freq_expo_max); + } + } + + lost_network_timeout = (pno_params->params_gscan.max_ch_bucket_freq * + pfn_param.scan_freq * + pno_params->params_gscan.lost_ap_window); + if (lost_network_timeout) { + pfn_param.lost_network_timeout = htod32(MIN(lost_network_timeout, + GSCAN_MIN_BSSID_TIMEOUT)); + } else { + pfn_param.lost_network_timeout = htod32(GSCAN_MIN_BSSID_TIMEOUT); + } + } else +#endif /* GSCAN_SUPPORT */ + { + if (pfn_param.scan_freq < htod32(PNO_SCAN_MIN_FW_SEC) || + pfn_param.scan_freq > htod32(PNO_SCAN_MAX_FW_SEC)) { + DHD_ERROR(("%s pno freq(%d sec) is not valid \n", + __FUNCTION__, PNO_SCAN_MIN_FW_SEC)); + err = BCME_BADARG; + goto exit; + } + } + + err = dhd_set_rand_mac_oui(dhd); + /* Ignore if chip doesnt support the feature */ + if (err < 0 && err != BCME_UNSUPPORTED) { + DHD_ERROR(("%s : failed to set random mac for PNO scan, %d\n", __FUNCTION__, err)); + goto exit; + } + +#ifdef GSCAN_SUPPORT + if (mode == DHD_PNO_BATCH_MODE || + ((mode & DHD_PNO_GSCAN_MODE) && pno_params->params_gscan.mscan)) +#else + if (mode == DHD_PNO_BATCH_MODE) +#endif /* GSCAN_SUPPORT */ + { + int _tmp = pfn_param.bestn; + /* set bestn to calculate the max mscan which firmware supports */ + err = dhd_iovar(dhd, 0, "pfnmem", (char *)&_tmp, sizeof(_tmp), NULL, 0, TRUE); + if (err < 0) { + DHD_ERROR(("%s : failed to set pfnmem\n", __FUNCTION__)); + goto exit; + } + /* get max mscan which the firmware supports */ + err = dhd_iovar(dhd, 0, "pfnmem", NULL, 0, (char *)&_tmp, sizeof(_tmp), FALSE); + if (err < 0) { + DHD_ERROR(("%s : failed to get pfnmem\n", __FUNCTION__)); + goto exit; + } + pfn_param.mscan = MIN(pfn_param.mscan, _tmp); + DHD_PNO((" returned mscan : %d, set bestn : %d mscan %d\n", _tmp, pfn_param.bestn, + pfn_param.mscan)); + } + err = dhd_iovar(dhd, 0, "pfn_set", (char *)&pfn_param, sizeof(pfn_param), NULL, 0, TRUE); + if (err < 0) { + DHD_ERROR(("%s : failed to execute pfn_set %d\n", __FUNCTION__, err)); + goto exit; + } + /* need to return mscan if this is for batch scan instead of err */ + err = (mode == DHD_PNO_BATCH_MODE)? pfn_param.mscan : err; +exit: + return err; +} + +static int +_dhd_pno_add_ssid(dhd_pub_t *dhd, struct list_head* ssid_list, int nssid) +{ + int err = BCME_OK; + int i = 0, mem_needed; + wl_pfn_t *pfn_elem_buf; + struct dhd_pno_ssid *iter, *next; + + NULL_CHECK(dhd, "dhd is NULL", err); + if (!nssid) { + NULL_CHECK(ssid_list, "ssid list is NULL", err); + return BCME_ERROR; + } + mem_needed = (sizeof(wl_pfn_t) * nssid); + pfn_elem_buf = (wl_pfn_t *) MALLOCZ(dhd->osh, mem_needed); + if (!pfn_elem_buf) { + DHD_ERROR(("%s: Can't malloc %d bytes!\n", __FUNCTION__, mem_needed)); + return BCME_NOMEM; + } + +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif // endif + list_for_each_entry_safe(iter, next, ssid_list, list) { + pfn_elem_buf[i].infra = htod32(1); + pfn_elem_buf[i].auth = htod32(DOT11_OPEN_SYSTEM); + pfn_elem_buf[i].wpa_auth = htod32(iter->wpa_auth); + pfn_elem_buf[i].flags = htod32(iter->flags); + if (iter->hidden) + pfn_elem_buf[i].flags |= htod32(ENABLE << WL_PFN_HIDDEN_BIT); + /* If a single RSSI threshold is defined, use that */ +#ifdef PNO_MIN_RSSI_TRIGGER + pfn_elem_buf[i].flags |= ((PNO_MIN_RSSI_TRIGGER & 0xFF) << WL_PFN_RSSI_SHIFT); +#else + pfn_elem_buf[i].flags |= ((iter->rssi_thresh & 0xFF) << WL_PFN_RSSI_SHIFT); +#endif /* PNO_MIN_RSSI_TRIGGER */ + memcpy((char *)pfn_elem_buf[i].ssid.SSID, iter->SSID, + iter->SSID_len); + pfn_elem_buf[i].ssid.SSID_len = iter->SSID_len; + DHD_PNO(("%s size = %d hidden = %d flags = %x rssi_thresh %d\n", + iter->SSID, iter->SSID_len, iter->hidden, + iter->flags, iter->rssi_thresh)); + if (++i >= nssid) { + /* shouldn't happen */ + break; + } + } + err = dhd_iovar(dhd, 0, "pfn_add", (char *)pfn_elem_buf, mem_needed, NULL, 0, TRUE); + if (err < 0) { + DHD_ERROR(("%s : failed to execute pfn_add\n", __FUNCTION__)); + } + MFREE(dhd->osh, pfn_elem_buf, mem_needed); + return err; +} + +/* qsort compare function */ +static int +_dhd_pno_cmpfunc(const void *a, const void *b) +{ + return (*(const uint16*)a - *(const uint16*)b); +} + +static int +_dhd_pno_chan_merge(uint16 *d_chan_list, int *nchan, + uint16 *chan_list1, int nchan1, uint16 *chan_list2, int nchan2) +{ + int err = BCME_OK; + int i = 0, j = 0, k = 0; + uint16 tmp; + NULL_CHECK(d_chan_list, "d_chan_list is NULL", err); + NULL_CHECK(nchan, "nchan is NULL", err); + NULL_CHECK(chan_list1, "chan_list1 is NULL", err); + NULL_CHECK(chan_list2, "chan_list2 is NULL", err); + /* chan_list1 and chan_list2 should be sorted at first */ + while (i < nchan1 && j < nchan2) { + tmp = chan_list1[i] < chan_list2[j]? + chan_list1[i++] : chan_list2[j++]; + for (; i < nchan1 && chan_list1[i] == tmp; i++); + for (; j < nchan2 && chan_list2[j] == tmp; j++); + d_chan_list[k++] = tmp; + } + + while (i < nchan1) { + tmp = chan_list1[i++]; + for (; i < nchan1 && chan_list1[i] == tmp; i++); + d_chan_list[k++] = tmp; + } + + while (j < nchan2) { + tmp = chan_list2[j++]; + for (; j < nchan2 && chan_list2[j] == tmp; j++); + d_chan_list[k++] = tmp; + + } + *nchan = k; + return err; +} + +static int +_dhd_pno_get_channels(dhd_pub_t *dhd, uint16 *d_chan_list, + int *nchan, uint8 band, bool skip_dfs) +{ + int err = BCME_OK; + int i, j; + uint32 chan_buf[WL_NUMCHANNELS + 1]; + wl_uint32_list_t *list; + NULL_CHECK(dhd, "dhd is NULL", err); + if (*nchan) { + NULL_CHECK(d_chan_list, "d_chan_list is NULL", err); + } + memset(&chan_buf, 0, sizeof(chan_buf)); + list = (wl_uint32_list_t *) (void *)chan_buf; + list->count = htod32(WL_NUMCHANNELS); + err = dhd_wl_ioctl_cmd(dhd, WLC_GET_VALID_CHANNELS, chan_buf, sizeof(chan_buf), FALSE, 0); + if (err < 0) { + DHD_ERROR(("failed to get channel list (err: %d)\n", err)); + return err; + } + for (i = 0, j = 0; i < dtoh32(list->count) && i < *nchan; i++) { + if (IS_2G_CHANNEL(dtoh32(list->element[i]))) { + if (!(band & WLC_BAND_2G)) { + /* Skip, if not 2g */ + continue; + } + /* fall through to include the channel */ + } else if (IS_5G_CHANNEL(dtoh32(list->element[i]))) { + bool dfs_channel = is_dfs(dhd, dtoh32(list->element[i])); + if ((skip_dfs && dfs_channel) || + (!(band & WLC_BAND_5G) && !dfs_channel)) { + /* Skip the channel if: + * the DFS bit is NOT set & the channel is a dfs channel + * the band 5G is not set & the channel is a non DFS 5G channel + */ + continue; + } + /* fall through to include the channel */ + } else { + /* Not in range. Bad channel */ + DHD_ERROR(("Not in range. bad channel\n")); + *nchan = 0; + return BCME_BADCHAN; + } + + /* Include the channel */ + d_chan_list[j++] = (uint16) dtoh32(list->element[i]); + } + *nchan = j; + return err; +} + +static int +_dhd_pno_convert_format(dhd_pub_t *dhd, struct dhd_pno_batch_params *params_batch, + char *buf, int nbufsize) +{ + int err = BCME_OK; + int bytes_written = 0, nreadsize = 0; + int t_delta = 0; + int nleftsize = nbufsize; + uint8 cnt = 0; + char *bp = buf; + char eabuf[ETHER_ADDR_STR_LEN]; +#ifdef PNO_DEBUG + char *_base_bp; + char msg[150]; +#endif // endif + dhd_pno_bestnet_entry_t *iter, *next; + dhd_pno_scan_results_t *siter, *snext; + dhd_pno_best_header_t *phead, *pprev; + NULL_CHECK(params_batch, "params_batch is NULL", err); + if (nbufsize > 0) + NULL_CHECK(buf, "buf is NULL", err); + /* initialize the buffer */ + memset(buf, 0, nbufsize); + DHD_PNO(("%s enter \n", __FUNCTION__)); + /* # of scans */ + if (!params_batch->get_batch.batch_started) { + bp += nreadsize = snprintf(bp, nleftsize, "scancount=%d\n", + params_batch->get_batch.expired_tot_scan_cnt); + nleftsize -= nreadsize; + params_batch->get_batch.batch_started = TRUE; + } + DHD_PNO(("%s scancount %d\n", __FUNCTION__, params_batch->get_batch.expired_tot_scan_cnt)); + /* preestimate scan count until which scan result this report is going to end */ +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif // endif + list_for_each_entry_safe(siter, snext, + ¶ms_batch->get_batch.expired_scan_results_list, list) { +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif // endif + phead = siter->bestnetheader; + while (phead != NULL) { + /* if left_size is less than bestheader total size , stop this */ + if (nleftsize <= + (phead->tot_size + phead->tot_cnt * ENTRY_OVERHEAD)) + goto exit; + /* increase scan count */ + cnt++; + /* # best of each scan */ + DHD_PNO(("\n\n", cnt - 1, phead->tot_cnt)); + /* attribute of the scan */ + if (phead->reason & PNO_STATUS_ABORT_MASK) { + bp += nreadsize = snprintf(bp, nleftsize, "trunc\n"); + nleftsize -= nreadsize; + } +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif // endif + list_for_each_entry_safe(iter, next, + &phead->entry_list, list) { +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif // endif + t_delta = jiffies_to_msecs(jiffies - iter->recorded_time); +#ifdef PNO_DEBUG + _base_bp = bp; + memset(msg, 0, sizeof(msg)); +#endif // endif + /* BSSID info */ + bp += nreadsize = snprintf(bp, nleftsize, "bssid=%s\n", + bcm_ether_ntoa((const struct ether_addr *)&iter->BSSID, eabuf)); + nleftsize -= nreadsize; + /* SSID */ + bp += nreadsize = snprintf(bp, nleftsize, "ssid=%s\n", iter->SSID); + nleftsize -= nreadsize; + /* channel */ + bp += nreadsize = snprintf(bp, nleftsize, "freq=%d\n", + wf_channel2mhz(iter->channel, + iter->channel <= CH_MAX_2G_CHANNEL? + WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G)); + nleftsize -= nreadsize; + /* RSSI */ + bp += nreadsize = snprintf(bp, nleftsize, "level=%d\n", iter->RSSI); + nleftsize -= nreadsize; + /* add the time consumed in Driver to the timestamp of firmware */ + iter->timestamp += t_delta; + bp += nreadsize = snprintf(bp, nleftsize, + "age=%d\n", iter->timestamp); + nleftsize -= nreadsize; + /* RTT0 */ + bp += nreadsize = snprintf(bp, nleftsize, "dist=%d\n", + (iter->rtt0 == 0)? -1 : iter->rtt0); + nleftsize -= nreadsize; + /* RTT1 */ + bp += nreadsize = snprintf(bp, nleftsize, "distSd=%d\n", + (iter->rtt0 == 0)? -1 : iter->rtt1); + nleftsize -= nreadsize; + bp += nreadsize = snprintf(bp, nleftsize, "%s", AP_END_MARKER); + nleftsize -= nreadsize; + list_del(&iter->list); + MFREE(dhd->osh, iter, BESTNET_ENTRY_SIZE); +#ifdef PNO_DEBUG + memcpy(msg, _base_bp, bp - _base_bp); + DHD_PNO(("Entry : \n%s", msg)); +#endif // endif + } + bp += nreadsize = snprintf(bp, nleftsize, "%s", SCAN_END_MARKER); + DHD_PNO(("%s", SCAN_END_MARKER)); + nleftsize -= nreadsize; + pprev = phead; + /* reset the header */ + siter->bestnetheader = phead = phead->next; + MFREE(dhd->osh, pprev, BEST_HEADER_SIZE); + + siter->cnt_header--; + } + if (phead == NULL) { + /* we store all entry in this scan , so it is ok to delete */ + list_del(&siter->list); + MFREE(dhd->osh, siter, SCAN_RESULTS_SIZE); + } + } +exit: + if (cnt < params_batch->get_batch.expired_tot_scan_cnt) { + DHD_ERROR(("Buffer size is small to save all batch entry," + " cnt : %d (remained_scan_cnt): %d\n", + cnt, params_batch->get_batch.expired_tot_scan_cnt - cnt)); + } + params_batch->get_batch.expired_tot_scan_cnt -= cnt; + /* set FALSE only if the link list is empty after returning the data */ +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif // endif + if (list_empty(¶ms_batch->get_batch.expired_scan_results_list)) { +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif // endif + params_batch->get_batch.batch_started = FALSE; + bp += snprintf(bp, nleftsize, "%s", RESULTS_END_MARKER); + DHD_PNO(("%s", RESULTS_END_MARKER)); + DHD_PNO(("%s : Getting the batching data is complete\n", __FUNCTION__)); + } + /* return used memory in buffer */ + bytes_written = (int32)(bp - buf); + return bytes_written; +} + +static int +_dhd_pno_clear_all_batch_results(dhd_pub_t *dhd, struct list_head *head, bool only_last) +{ + int err = BCME_OK; + int removed_scan_cnt = 0; + dhd_pno_scan_results_t *siter, *snext; + dhd_pno_best_header_t *phead, *pprev; + dhd_pno_bestnet_entry_t *iter, *next; + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(head, "head is NULL", err); + NULL_CHECK(head->next, "head->next is NULL", err); + DHD_PNO(("%s enter\n", __FUNCTION__)); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif // endif + list_for_each_entry_safe(siter, snext, + head, list) { + if (only_last) { + /* in case that we need to delete only last one */ + if (!list_is_last(&siter->list, head)) { + /* skip if the one is not last */ + continue; + } + } + /* delete all data belong if the one is last */ + phead = siter->bestnetheader; + while (phead != NULL) { + removed_scan_cnt++; + list_for_each_entry_safe(iter, next, + &phead->entry_list, list) { + list_del(&iter->list); + MFREE(dhd->osh, iter, BESTNET_ENTRY_SIZE); + } + pprev = phead; + phead = phead->next; + MFREE(dhd->osh, pprev, BEST_HEADER_SIZE); + } + if (phead == NULL) { + /* it is ok to delete top node */ + list_del(&siter->list); + MFREE(dhd->osh, siter, SCAN_RESULTS_SIZE); + } + } +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif // endif + return removed_scan_cnt; +} + +static int +_dhd_pno_cfg(dhd_pub_t *dhd, uint16 *channel_list, int nchan) +{ + int err = BCME_OK; + int i = 0; + wl_pfn_cfg_t pfncfg_param; + NULL_CHECK(dhd, "dhd is NULL", err); + if (nchan) { + NULL_CHECK(channel_list, "nchan is NULL", err); + } + if (nchan > WL_NUMCHANNELS) { + return BCME_RANGE; + } + DHD_PNO(("%s enter : nchan : %d\n", __FUNCTION__, nchan)); + memset(&pfncfg_param, 0, sizeof(wl_pfn_cfg_t)); + /* Setup default values */ + pfncfg_param.reporttype = htod32(WL_PFN_REPORT_ALLNET); + pfncfg_param.channel_num = htod32(0); + + for (i = 0; i < nchan; i++) + pfncfg_param.channel_list[i] = channel_list[i]; + + pfncfg_param.channel_num = htod32(nchan); + err = dhd_iovar(dhd, 0, "pfn_cfg", (char *)&pfncfg_param, sizeof(pfncfg_param), NULL, 0, + TRUE); + if (err < 0) { + DHD_ERROR(("%s : failed to execute pfn_cfg\n", __FUNCTION__)); + goto exit; + } +exit: + return err; +} + +static int +_dhd_pno_reinitialize_prof(dhd_pub_t *dhd, dhd_pno_params_t *params, dhd_pno_mode_t mode) +{ + int err = BCME_OK; + dhd_pno_status_info_t *_pno_state; + NULL_CHECK(dhd, "dhd is NULL\n", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL\n", err); + DHD_PNO(("%s enter\n", __FUNCTION__)); + _pno_state = PNO_GET_PNOSTATE(dhd); + mutex_lock(&_pno_state->pno_mutex); + switch (mode) { + case DHD_PNO_LEGACY_MODE: { + struct dhd_pno_ssid *iter, *next; + if (params->params_legacy.nssid > 0) { +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif // endif + list_for_each_entry_safe(iter, next, + ¶ms->params_legacy.ssid_list, list) { + list_del(&iter->list); + MFREE(dhd->osh, iter, sizeof(struct dhd_pno_ssid)); + } + } +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif // endif + params->params_legacy.nssid = 0; + params->params_legacy.scan_fr = 0; + params->params_legacy.pno_freq_expo_max = 0; + params->params_legacy.pno_repeat = 0; + params->params_legacy.nchan = 0; + memset(params->params_legacy.chan_list, 0, + sizeof(params->params_legacy.chan_list)); + break; + } + case DHD_PNO_BATCH_MODE: { + params->params_batch.scan_fr = 0; + params->params_batch.mscan = 0; + params->params_batch.nchan = 0; + params->params_batch.rtt = 0; + params->params_batch.bestn = 0; + params->params_batch.nchan = 0; + params->params_batch.band = WLC_BAND_AUTO; + memset(params->params_batch.chan_list, 0, + sizeof(params->params_batch.chan_list)); + params->params_batch.get_batch.batch_started = FALSE; + params->params_batch.get_batch.buf = NULL; + params->params_batch.get_batch.bufsize = 0; + params->params_batch.get_batch.reason = 0; + _dhd_pno_clear_all_batch_results(dhd, + ¶ms->params_batch.get_batch.scan_results_list, FALSE); + _dhd_pno_clear_all_batch_results(dhd, + ¶ms->params_batch.get_batch.expired_scan_results_list, FALSE); + params->params_batch.get_batch.tot_scan_cnt = 0; + params->params_batch.get_batch.expired_tot_scan_cnt = 0; + params->params_batch.get_batch.top_node_cnt = 0; + INIT_LIST_HEAD(¶ms->params_batch.get_batch.scan_results_list); + INIT_LIST_HEAD(¶ms->params_batch.get_batch.expired_scan_results_list); + break; + } + case DHD_PNO_HOTLIST_MODE: { + struct dhd_pno_bssid *iter, *next; + if (params->params_hotlist.nbssid > 0) { +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif // endif + list_for_each_entry_safe(iter, next, + ¶ms->params_hotlist.bssid_list, list) { + list_del(&iter->list); + MFREE(dhd->osh, iter, sizeof(struct dhd_pno_ssid)); + } +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif // endif + } + params->params_hotlist.scan_fr = 0; + params->params_hotlist.nbssid = 0; + params->params_hotlist.nchan = 0; + params->params_batch.band = WLC_BAND_AUTO; + memset(params->params_hotlist.chan_list, 0, + sizeof(params->params_hotlist.chan_list)); + break; + } + default: + DHD_ERROR(("%s : unknown mode : %d\n", __FUNCTION__, mode)); + break; + } + mutex_unlock(&_pno_state->pno_mutex); + return err; +} + +static int +_dhd_pno_add_bssid(dhd_pub_t *dhd, wl_pfn_bssid_t *p_pfn_bssid, int nbssid) +{ + int err = BCME_OK; + NULL_CHECK(dhd, "dhd is NULL", err); + if (nbssid) { + NULL_CHECK(p_pfn_bssid, "bssid list is NULL", err); + } + err = dhd_iovar(dhd, 0, "pfn_add_bssid", (char *)p_pfn_bssid, + sizeof(wl_pfn_bssid_t) * nbssid, NULL, 0, TRUE); + if (err < 0) { + DHD_ERROR(("%s : failed to execute pfn_cfg\n", __FUNCTION__)); + goto exit; + } +exit: + return err; +} + +int +dhd_pno_stop_for_ssid(dhd_pub_t *dhd) +{ + int err = BCME_OK; + uint32 mode = 0, cnt = 0; + dhd_pno_status_info_t *_pno_state; + dhd_pno_params_t *_params = NULL; + wl_pfn_bssid_t *p_pfn_bssid = NULL, *tmp_bssid; + + NULL_CHECK(dhd, "dev is NULL", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + _pno_state = PNO_GET_PNOSTATE(dhd); + if (!(_pno_state->pno_mode & DHD_PNO_LEGACY_MODE)) { + DHD_ERROR(("%s : LEGACY PNO MODE is not enabled\n", __FUNCTION__)); + goto exit; + } + DHD_PNO(("%s enter\n", __FUNCTION__)); + /* If pno mode is PNO_LEGACY_MODE clear the pno values and unset the DHD_PNO_LEGACY_MODE */ + _params = &_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS]; + _dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_LEGACY_MODE); + _pno_state->pno_mode &= ~DHD_PNO_LEGACY_MODE; + +#ifdef GSCAN_SUPPORT + if (_pno_state->pno_mode & DHD_PNO_GSCAN_MODE) { + struct dhd_pno_gscan_params *gscan_params; + + _params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS]; + gscan_params = &_params->params_gscan; + if (gscan_params->mscan) { + /* retrieve the batching data from firmware into host */ + err = dhd_wait_batch_results_complete(dhd); + if (err != BCME_OK) + goto exit; + } + /* save current pno_mode before calling dhd_pno_clean */ + mutex_lock(&_pno_state->pno_mutex); + mode = _pno_state->pno_mode; + err = dhd_pno_clean(dhd); + if (err < 0) { + DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n", + __FUNCTION__, err)); + mutex_unlock(&_pno_state->pno_mutex); + goto exit; + } + /* restore previous pno_mode */ + _pno_state->pno_mode = mode; + mutex_unlock(&_pno_state->pno_mutex); + /* Restart gscan */ + err = dhd_pno_initiate_gscan_request(dhd, 1, 0); + goto exit; + } +#endif /* GSCAN_SUPPORT */ + /* restart Batch mode if the batch mode is on */ + if (_pno_state->pno_mode & (DHD_PNO_BATCH_MODE | DHD_PNO_HOTLIST_MODE)) { + /* retrieve the batching data from firmware into host */ + dhd_pno_get_for_batch(dhd, NULL, 0, PNO_STATUS_DISABLE); + /* save current pno_mode before calling dhd_pno_clean */ + mode = _pno_state->pno_mode; + err = dhd_pno_clean(dhd); + if (err < 0) { + err = BCME_ERROR; + DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n", + __FUNCTION__, err)); + goto exit; + } + + /* restore previous pno_mode */ + _pno_state->pno_mode = mode; + if (_pno_state->pno_mode & DHD_PNO_BATCH_MODE) { + _params = &(_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS]); + /* restart BATCH SCAN */ + err = dhd_pno_set_for_batch(dhd, &_params->params_batch); + if (err < 0) { + _pno_state->pno_mode &= ~DHD_PNO_BATCH_MODE; + DHD_ERROR(("%s : failed to restart batch scan(err: %d)\n", + __FUNCTION__, err)); + goto exit; + } + } else if (_pno_state->pno_mode & DHD_PNO_HOTLIST_MODE) { + /* restart HOTLIST SCAN */ + struct dhd_pno_bssid *iter, *next; + _params = &(_pno_state->pno_params_arr[INDEX_OF_HOTLIST_PARAMS]); + p_pfn_bssid = MALLOCZ(dhd->osh, sizeof(wl_pfn_bssid_t) * + _params->params_hotlist.nbssid); + if (p_pfn_bssid == NULL) { + DHD_ERROR(("%s : failed to allocate wl_pfn_bssid_t array" + " (count: %d)", + __FUNCTION__, _params->params_hotlist.nbssid)); + err = BCME_ERROR; + _pno_state->pno_mode &= ~DHD_PNO_HOTLIST_MODE; + goto exit; + } + /* convert dhd_pno_bssid to wl_pfn_bssid */ +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif // endif + cnt = 0; + tmp_bssid = p_pfn_bssid; + list_for_each_entry_safe(iter, next, + &_params->params_hotlist.bssid_list, list) { + memcpy(&tmp_bssid->macaddr, + &iter->macaddr, ETHER_ADDR_LEN); + tmp_bssid->flags = iter->flags; + if (cnt < _params->params_hotlist.nbssid) { + tmp_bssid++; + cnt++; + } else { + DHD_ERROR(("%s: Allocated insufficient memory\n", + __FUNCTION__)); + break; + } + } +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif // endif + err = dhd_pno_set_for_hotlist(dhd, p_pfn_bssid, &_params->params_hotlist); + if (err < 0) { + _pno_state->pno_mode &= ~DHD_PNO_HOTLIST_MODE; + DHD_ERROR(("%s : failed to restart hotlist scan(err: %d)\n", + __FUNCTION__, err)); + goto exit; + } + } + } else { + err = dhd_pno_clean(dhd); + if (err < 0) { + DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n", + __FUNCTION__, err)); + goto exit; + } + } +exit: + if (p_pfn_bssid) { + MFREE(dhd->osh, p_pfn_bssid, sizeof(wl_pfn_bssid_t) * + _params->params_hotlist.nbssid); + } + return err; +} + +int +dhd_pno_enable(dhd_pub_t *dhd, int enable) +{ + int err = BCME_OK; + NULL_CHECK(dhd, "dhd is NULL", err); + DHD_PNO(("%s enter\n", __FUNCTION__)); + return (_dhd_pno_enable(dhd, enable)); +} + +static int +dhd_pno_add_to_ssid_list(dhd_pub_t *dhd, struct list_head *ptr, wlc_ssid_ext_t *ssid_list, + int nssid, int *num_ssid_added) +{ + int ret = BCME_OK; + int i; + struct dhd_pno_ssid *_pno_ssid; + + for (i = 0; i < nssid; i++) { + if (ssid_list[i].SSID_len > DOT11_MAX_SSID_LEN) { + DHD_ERROR(("%s : Invalid SSID length %d\n", + __FUNCTION__, ssid_list[i].SSID_len)); + ret = BCME_ERROR; + goto exit; + } + /* Check for broadcast ssid */ + if (!ssid_list[i].SSID_len) { + DHD_ERROR(("%d: Broadcast SSID is illegal for PNO setting\n", i)); + ret = BCME_ERROR; + goto exit; + } + _pno_ssid = (struct dhd_pno_ssid *)MALLOCZ(dhd->osh, + sizeof(struct dhd_pno_ssid)); + if (_pno_ssid == NULL) { + DHD_ERROR(("%s : failed to allocate struct dhd_pno_ssid\n", + __FUNCTION__)); + ret = BCME_ERROR; + goto exit; + } + _pno_ssid->SSID_len = ssid_list[i].SSID_len; + _pno_ssid->hidden = ssid_list[i].hidden; + _pno_ssid->rssi_thresh = ssid_list[i].rssi_thresh; + _pno_ssid->flags = ssid_list[i].flags; + _pno_ssid->wpa_auth = WPA_AUTH_PFN_ANY; + + memcpy(_pno_ssid->SSID, ssid_list[i].SSID, _pno_ssid->SSID_len); + list_add_tail(&_pno_ssid->list, ptr); + } + +exit: + *num_ssid_added = i; + return ret; +} + +int +dhd_pno_set_for_ssid(dhd_pub_t *dhd, wlc_ssid_ext_t* ssid_list, int nssid, + uint16 scan_fr, int pno_repeat, int pno_freq_expo_max, uint16 *channel_list, int nchan) +{ + dhd_pno_status_info_t *_pno_state; + dhd_pno_params_t *_params; + struct dhd_pno_legacy_params *params_legacy; + int err = BCME_OK; + + if (!dhd || !dhd->pno_state) { + DHD_ERROR(("%s: PNO Not enabled/Not ready\n", __FUNCTION__)); + return BCME_NOTREADY; + } + + if (!dhd_support_sta_mode(dhd)) { + return BCME_BADOPTION; + } + + _pno_state = PNO_GET_PNOSTATE(dhd); + _params = &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS]); + params_legacy = &(_params->params_legacy); + err = _dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_LEGACY_MODE); + + if (err < 0) { + DHD_ERROR(("%s : failed to reinitialize profile (err %d)\n", + __FUNCTION__, err)); + return err; + } + + INIT_LIST_HEAD(¶ms_legacy->ssid_list); + + if (dhd_pno_add_to_ssid_list(dhd, ¶ms_legacy->ssid_list, ssid_list, + nssid, ¶ms_legacy->nssid) < 0) { + _dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_LEGACY_MODE); + return BCME_ERROR; + } + + DHD_PNO(("%s enter : nssid %d, scan_fr :%d, pno_repeat :%d," + "pno_freq_expo_max: %d, nchan :%d\n", __FUNCTION__, + params_legacy->nssid, scan_fr, pno_repeat, pno_freq_expo_max, nchan)); + + return dhd_pno_set_legacy_pno(dhd, scan_fr, pno_repeat, + pno_freq_expo_max, channel_list, nchan); + +} + +static int +dhd_pno_set_legacy_pno(dhd_pub_t *dhd, uint16 scan_fr, int pno_repeat, + int pno_freq_expo_max, uint16 *channel_list, int nchan) +{ + dhd_pno_params_t *_params; + dhd_pno_params_t *_params2; + dhd_pno_status_info_t *_pno_state; + uint16 _chan_list[WL_NUMCHANNELS]; + int32 tot_nchan = 0; + int err = BCME_OK; + int i, nssid; + int mode = 0; + struct list_head *ssid_list; + + _pno_state = PNO_GET_PNOSTATE(dhd); + + _params = &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS]); + /* If GSCAN is also ON will handle this down below */ +#ifdef GSCAN_SUPPORT + if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE && + !(_pno_state->pno_mode & DHD_PNO_GSCAN_MODE)) +#else + if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) +#endif /* GSCAN_SUPPORT */ + { + DHD_ERROR(("%s : Legacy PNO mode was already started, " + "will disable previous one to start new one\n", __FUNCTION__)); + err = dhd_pno_stop_for_ssid(dhd); + if (err < 0) { + DHD_ERROR(("%s : failed to stop legacy PNO (err %d)\n", + __FUNCTION__, err)); + return err; + } + } + _pno_state->pno_mode |= DHD_PNO_LEGACY_MODE; + memset(_chan_list, 0, sizeof(_chan_list)); + tot_nchan = MIN(nchan, WL_NUMCHANNELS); + if (tot_nchan > 0 && channel_list) { + for (i = 0; i < tot_nchan; i++) + _params->params_legacy.chan_list[i] = _chan_list[i] = channel_list[i]; + } +#ifdef GSCAN_SUPPORT + else { + tot_nchan = WL_NUMCHANNELS; + err = _dhd_pno_get_channels(dhd, _chan_list, &tot_nchan, + (WLC_BAND_2G | WLC_BAND_5G), FALSE); + if (err < 0) { + tot_nchan = 0; + DHD_PNO(("Could not get channel list for PNO SSID\n")); + } else { + for (i = 0; i < tot_nchan; i++) + _params->params_legacy.chan_list[i] = _chan_list[i]; + } + } +#endif /* GSCAN_SUPPORT */ + + if (_pno_state->pno_mode & (DHD_PNO_BATCH_MODE | DHD_PNO_HOTLIST_MODE)) { + DHD_PNO(("BATCH SCAN is on progress in firmware\n")); + /* retrieve the batching data from firmware into host */ + dhd_pno_get_for_batch(dhd, NULL, 0, PNO_STATUS_DISABLE); + /* store current pno_mode before disabling pno */ + mode = _pno_state->pno_mode; + err = _dhd_pno_enable(dhd, PNO_OFF); + if (err < 0) { + DHD_ERROR(("%s : failed to disable PNO\n", __FUNCTION__)); + goto exit; + } + /* restore the previous mode */ + _pno_state->pno_mode = mode; + /* use superset of channel list between two mode */ + if (_pno_state->pno_mode & DHD_PNO_BATCH_MODE) { + _params2 = &(_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS]); + if (_params2->params_batch.nchan > 0 && tot_nchan > 0) { + err = _dhd_pno_chan_merge(_chan_list, &tot_nchan, + &_params2->params_batch.chan_list[0], + _params2->params_batch.nchan, + &channel_list[0], tot_nchan); + if (err < 0) { + DHD_ERROR(("%s : failed to merge channel list" + " between legacy and batch\n", + __FUNCTION__)); + goto exit; + } + } else { + DHD_PNO(("superset channel will use" + " all channels in firmware\n")); + } + } else if (_pno_state->pno_mode & DHD_PNO_HOTLIST_MODE) { + _params2 = &(_pno_state->pno_params_arr[INDEX_OF_HOTLIST_PARAMS]); + if (_params2->params_hotlist.nchan > 0 && tot_nchan > 0) { + err = _dhd_pno_chan_merge(_chan_list, &tot_nchan, + &_params2->params_hotlist.chan_list[0], + _params2->params_hotlist.nchan, + &channel_list[0], tot_nchan); + if (err < 0) { + DHD_ERROR(("%s : failed to merge channel list" + " between legacy and hotlist\n", + __FUNCTION__)); + goto exit; + } + } + } + } + _params->params_legacy.scan_fr = scan_fr; + _params->params_legacy.pno_repeat = pno_repeat; + _params->params_legacy.pno_freq_expo_max = pno_freq_expo_max; + _params->params_legacy.nchan = tot_nchan; + ssid_list = &_params->params_legacy.ssid_list; + nssid = _params->params_legacy.nssid; + +#ifdef GSCAN_SUPPORT + /* dhd_pno_initiate_gscan_request will handle simultaneous Legacy PNO and GSCAN */ + if (_pno_state->pno_mode & DHD_PNO_GSCAN_MODE) { + struct dhd_pno_gscan_params *gscan_params; + gscan_params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS].params_gscan; + /* ePNO and Legacy PNO do not co-exist */ + if (gscan_params->epno_cfg.num_epno_ssid) { + DHD_PNO(("ePNO and Legacy PNO do not co-exist\n")); + err = BCME_EPERM; + goto exit; + } + DHD_PNO(("GSCAN mode is ON! Will restart GSCAN+Legacy PNO\n")); + err = dhd_pno_initiate_gscan_request(dhd, 1, 0); + goto exit; + } +#endif /* GSCAN_SUPPORT */ + if ((err = _dhd_pno_set(dhd, _params, DHD_PNO_LEGACY_MODE)) < 0) { + DHD_ERROR(("failed to set call pno_set (err %d) in firmware\n", err)); + goto exit; + } + if ((err = _dhd_pno_add_ssid(dhd, ssid_list, nssid)) < 0) { + DHD_ERROR(("failed to add ssid list(err %d), %d in firmware\n", err, nssid)); + goto exit; + } + if (tot_nchan > 0) { + if ((err = _dhd_pno_cfg(dhd, _chan_list, tot_nchan)) < 0) { + DHD_ERROR(("%s : failed to set call pno_cfg (err %d) in firmware\n", + __FUNCTION__, err)); + goto exit; + } + } + if (_pno_state->pno_status == DHD_PNO_DISABLED) { + if ((err = _dhd_pno_enable(dhd, PNO_ON)) < 0) + DHD_ERROR(("%s : failed to enable PNO\n", __FUNCTION__)); + } +exit: + if (err < 0) { + _dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_LEGACY_MODE); + } + /* clear mode in case of error */ + if (err < 0) { + int ret = dhd_pno_clean(dhd); + + if (ret < 0) { + DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n", + __FUNCTION__, ret)); + } else { + _pno_state->pno_mode &= ~DHD_PNO_LEGACY_MODE; + } + } + return err; +} + +int +dhd_pno_set_for_batch(dhd_pub_t *dhd, struct dhd_pno_batch_params *batch_params) +{ + int err = BCME_OK; + uint16 _chan_list[WL_NUMCHANNELS]; + int rem_nchan = 0, tot_nchan = 0; + int mode = 0, mscan = 0; + dhd_pno_params_t *_params; + dhd_pno_params_t *_params2; + dhd_pno_status_info_t *_pno_state; + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + NULL_CHECK(batch_params, "batch_params is NULL", err); + _pno_state = PNO_GET_PNOSTATE(dhd); + DHD_PNO(("%s enter\n", __FUNCTION__)); + if (!dhd_support_sta_mode(dhd)) { + err = BCME_BADOPTION; + goto exit; + } + if (!WLS_SUPPORTED(_pno_state)) { + DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__)); + err = BCME_UNSUPPORTED; + goto exit; + } + _params = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS]; + if (!(_pno_state->pno_mode & DHD_PNO_BATCH_MODE)) { + _pno_state->pno_mode |= DHD_PNO_BATCH_MODE; + err = _dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_BATCH_MODE); + if (err < 0) { + DHD_ERROR(("%s : failed to call _dhd_pno_reinitialize_prof\n", + __FUNCTION__)); + goto exit; + } + } else { + /* batch mode is already started */ + return -EBUSY; + } + _params->params_batch.scan_fr = batch_params->scan_fr; + _params->params_batch.bestn = batch_params->bestn; + _params->params_batch.mscan = (batch_params->mscan)? + batch_params->mscan : DEFAULT_BATCH_MSCAN; + _params->params_batch.nchan = batch_params->nchan; + memcpy(_params->params_batch.chan_list, batch_params->chan_list, + sizeof(_params->params_batch.chan_list)); + + memset(_chan_list, 0, sizeof(_chan_list)); + + rem_nchan = ARRAYSIZE(batch_params->chan_list) - batch_params->nchan; + if (batch_params->band == WLC_BAND_2G || batch_params->band == WLC_BAND_5G) { + /* get a valid channel list based on band B or A */ + err = _dhd_pno_get_channels(dhd, + &_params->params_batch.chan_list[batch_params->nchan], + &rem_nchan, batch_params->band, FALSE); + if (err < 0) { + DHD_ERROR(("%s: failed to get valid channel list(band : %d)\n", + __FUNCTION__, batch_params->band)); + goto exit; + } + /* now we need to update nchan because rem_chan has valid channel count */ + _params->params_batch.nchan += rem_nchan; + /* need to sort channel list */ + sort(_params->params_batch.chan_list, _params->params_batch.nchan, + sizeof(_params->params_batch.chan_list[0]), _dhd_pno_cmpfunc, NULL); + } +#ifdef PNO_DEBUG +{ + DHD_PNO(("Channel list : ")); + for (i = 0; i < _params->params_batch.nchan; i++) { + DHD_PNO(("%d ", _params->params_batch.chan_list[i])); + } + DHD_PNO(("\n")); +} +#endif // endif + if (_params->params_batch.nchan) { + /* copy the channel list into local array */ + memcpy(_chan_list, _params->params_batch.chan_list, sizeof(_chan_list)); + tot_nchan = _params->params_batch.nchan; + } + if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) { + DHD_PNO(("PNO SSID is on progress in firmware\n")); + /* store current pno_mode before disabling pno */ + mode = _pno_state->pno_mode; + err = _dhd_pno_enable(dhd, PNO_OFF); + if (err < 0) { + DHD_ERROR(("%s : failed to disable PNO\n", __FUNCTION__)); + goto exit; + } + /* restore the previous mode */ + _pno_state->pno_mode = mode; + /* Use the superset for channelist between two mode */ + _params2 = &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS]); + if (_params2->params_legacy.nchan > 0 && _params->params_batch.nchan > 0) { + err = _dhd_pno_chan_merge(_chan_list, &tot_nchan, + &_params2->params_legacy.chan_list[0], + _params2->params_legacy.nchan, + &_params->params_batch.chan_list[0], _params->params_batch.nchan); + if (err < 0) { + DHD_ERROR(("%s : failed to merge channel list" + " between legacy and batch\n", + __FUNCTION__)); + goto exit; + } + } else { + DHD_PNO(("superset channel will use all channels in firmware\n")); + } + if ((err = _dhd_pno_add_ssid(dhd, &_params2->params_legacy.ssid_list, + _params2->params_legacy.nssid)) < 0) { + DHD_ERROR(("failed to add ssid list (err %d) in firmware\n", err)); + goto exit; + } + } + if ((err = _dhd_pno_set(dhd, _params, DHD_PNO_BATCH_MODE)) < 0) { + DHD_ERROR(("%s : failed to set call pno_set (err %d) in firmware\n", + __FUNCTION__, err)); + goto exit; + } else { + /* we need to return mscan */ + mscan = err; + } + if (tot_nchan > 0) { + if ((err = _dhd_pno_cfg(dhd, _chan_list, tot_nchan)) < 0) { + DHD_ERROR(("%s : failed to set call pno_cfg (err %d) in firmware\n", + __FUNCTION__, err)); + goto exit; + } + } + if (_pno_state->pno_status == DHD_PNO_DISABLED) { + if ((err = _dhd_pno_enable(dhd, PNO_ON)) < 0) + DHD_ERROR(("%s : failed to enable PNO\n", __FUNCTION__)); + } +exit: + /* clear mode in case of error */ + if (err < 0) + _pno_state->pno_mode &= ~DHD_PNO_BATCH_MODE; + else { + /* return #max scan firmware can do */ + err = mscan; + } + return err; +} + +#ifdef GSCAN_SUPPORT + +static int +dhd_set_epno_params(dhd_pub_t *dhd, wl_ssid_ext_params_t *params, bool set) +{ + wl_pfn_ssid_cfg_t cfg; + int err; + NULL_CHECK(dhd, "dhd is NULL\n", err); + memset(&cfg, 0, sizeof(wl_pfn_ssid_cfg_t)); + cfg.version = WL_PFN_SSID_CFG_VERSION; + + /* If asked to clear params (set == FALSE) just set the CLEAR bit */ + if (!set) + cfg.flags |= WL_PFN_SSID_CFG_CLEAR; + else if (params) + memcpy(&cfg.params, params, sizeof(wl_ssid_ext_params_t)); + err = dhd_iovar(dhd, 0, "pfn_ssid_cfg", (char *)&cfg, + sizeof(wl_pfn_ssid_cfg_t), NULL, 0, TRUE); + if (err != BCME_OK) { + DHD_ERROR(("%s : Failed to execute pfn_ssid_cfg %d\n", __FUNCTION__, err)); + } + return err; +} + +int +dhd_pno_flush_fw_epno(dhd_pub_t *dhd) +{ + int err; + + NULL_CHECK(dhd, "dhd is NULL\n", err); + + err = dhd_set_epno_params(dhd, NULL, FALSE); + if (err < 0) { + DHD_ERROR(("failed to set ePNO params %d\n", err)); + return err; + } + err = _dhd_pno_flush_ssid(dhd); + return err; +} + +int +dhd_pno_set_epno(dhd_pub_t *dhd) +{ + int err = BCME_OK; + dhd_pno_params_t *params; + dhd_pno_status_info_t *_pno_state; + + struct dhd_pno_gscan_params *gscan_params; + + NULL_CHECK(dhd, "dhd is NULL\n", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + _pno_state = PNO_GET_PNOSTATE(dhd); + params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS]; + gscan_params = ¶ms->params_gscan; + + if (gscan_params->epno_cfg.num_epno_ssid) { + DHD_PNO(("num_epno_ssid %d\n", gscan_params->epno_cfg.num_epno_ssid)); + if ((err = _dhd_pno_add_ssid(dhd, &gscan_params->epno_cfg.epno_ssid_list, + gscan_params->epno_cfg.num_epno_ssid)) < 0) { + DHD_ERROR(("failed to add ssid list (err %d) to firmware\n", err)); + return err; + } + err = dhd_set_epno_params(dhd, &gscan_params->epno_cfg.params, TRUE); + if (err < 0) { + DHD_ERROR(("failed to set ePNO params %d\n", err)); + } + } + return err; +} + +static void +dhd_pno_reset_cfg_gscan(dhd_pub_t *dhd, dhd_pno_params_t *_params, + dhd_pno_status_info_t *_pno_state, uint8 flags) +{ + DHD_PNO(("%s enter\n", __FUNCTION__)); + + if (flags & GSCAN_FLUSH_SCAN_CFG) { + _params->params_gscan.bestn = 0; + _params->params_gscan.mscan = 0; + _params->params_gscan.buffer_threshold = GSCAN_BATCH_NO_THR_SET; + _params->params_gscan.scan_fr = 0; + _params->params_gscan.send_all_results_flag = 0; + memset(_params->params_gscan.channel_bucket, 0, + _params->params_gscan.nchannel_buckets * + sizeof(struct dhd_pno_gscan_channel_bucket)); + _params->params_gscan.nchannel_buckets = 0; + DHD_PNO(("Flush Scan config\n")); + } + if (flags & GSCAN_FLUSH_HOTLIST_CFG) { + struct dhd_pno_bssid *iter, *next; + if (_params->params_gscan.nbssid_hotlist > 0) { +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif // endif + list_for_each_entry_safe(iter, next, + &_params->params_gscan.hotlist_bssid_list, list) { + list_del(&iter->list); + MFREE(dhd->osh, iter, sizeof(struct dhd_pno_bssid)); + } +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif // endif + } + _params->params_gscan.nbssid_hotlist = 0; + DHD_PNO(("Flush Hotlist Config\n")); + } + if (flags & GSCAN_FLUSH_EPNO_CFG) { + dhd_pno_ssid_t *iter, *next; + dhd_epno_ssid_cfg_t *epno_cfg = &_params->params_gscan.epno_cfg; + + if (epno_cfg->num_epno_ssid > 0) { +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif // endif + list_for_each_entry_safe(iter, next, + &epno_cfg->epno_ssid_list, list) { + list_del(&iter->list); + MFREE(dhd->osh, iter, sizeof(struct dhd_pno_bssid)); + } +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif // endif + epno_cfg->num_epno_ssid = 0; + } + memset(&epno_cfg->params, 0, sizeof(wl_ssid_ext_params_t)); + DHD_PNO(("Flushed ePNO Config\n")); + } + + return; +} + +int +dhd_pno_lock_batch_results(dhd_pub_t *dhd) +{ + dhd_pno_status_info_t *_pno_state; + int err = BCME_OK; + + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + _pno_state = PNO_GET_PNOSTATE(dhd); + mutex_lock(&_pno_state->pno_mutex); + return err; +} + +void +dhd_pno_unlock_batch_results(dhd_pub_t *dhd) +{ + dhd_pno_status_info_t *_pno_state; + _pno_state = PNO_GET_PNOSTATE(dhd); + mutex_unlock(&_pno_state->pno_mutex); + return; +} + +int +dhd_wait_batch_results_complete(dhd_pub_t *dhd) +{ + dhd_pno_status_info_t *_pno_state; + dhd_pno_params_t *_params; + int err = BCME_OK; + + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + _pno_state = PNO_GET_PNOSTATE(dhd); + _params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS]; + + /* Has the workqueue finished its job already?? */ + if (_params->params_gscan.get_batch_flag == GSCAN_BATCH_RETRIEVAL_IN_PROGRESS) { + DHD_PNO(("%s: Waiting to complete retrieval..\n", __FUNCTION__)); + wait_event_interruptible_timeout(_pno_state->batch_get_wait, + is_batch_retrieval_complete(&_params->params_gscan), + msecs_to_jiffies(GSCAN_BATCH_GET_MAX_WAIT)); + } else { /* GSCAN_BATCH_RETRIEVAL_COMPLETE */ + gscan_results_cache_t *iter; + uint16 num_results = 0; + + mutex_lock(&_pno_state->pno_mutex); + iter = _params->params_gscan.gscan_batch_cache; + while (iter) { + num_results += iter->tot_count - iter->tot_consumed; + iter = iter->next; + } + mutex_unlock(&_pno_state->pno_mutex); + + /* All results consumed/No results cached?? + * Get fresh results from FW + */ + if ((_pno_state->pno_mode & DHD_PNO_GSCAN_MODE) && !num_results) { + DHD_PNO(("%s: No results cached, getting from FW..\n", __FUNCTION__)); + err = dhd_retreive_batch_scan_results(dhd); + if (err == BCME_OK) { + wait_event_interruptible_timeout(_pno_state->batch_get_wait, + is_batch_retrieval_complete(&_params->params_gscan), + msecs_to_jiffies(GSCAN_BATCH_GET_MAX_WAIT)); + } + } + } + DHD_PNO(("%s: Wait complete\n", __FUNCTION__)); + return err; +} + +static void * +dhd_get_gscan_batch_results(dhd_pub_t *dhd, uint32 *len) +{ + gscan_results_cache_t *iter, *results; + dhd_pno_status_info_t *_pno_state; + dhd_pno_params_t *_params; + uint16 num_scan_ids = 0, num_results = 0; + + _pno_state = PNO_GET_PNOSTATE(dhd); + _params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS]; + + iter = results = _params->params_gscan.gscan_batch_cache; + while (iter) { + num_results += iter->tot_count - iter->tot_consumed; + num_scan_ids++; + iter = iter->next; + } + + *len = ((num_results << 16) | (num_scan_ids)); + return results; +} + +int +dhd_pno_set_cfg_gscan(dhd_pub_t *dhd, dhd_pno_gscan_cmd_cfg_t type, + void *buf, bool flush) +{ + int err = BCME_OK; + dhd_pno_params_t *_params; + int i; + dhd_pno_status_info_t *_pno_state; + + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + + DHD_PNO(("%s enter\n", __FUNCTION__)); + + _pno_state = PNO_GET_PNOSTATE(dhd); + _params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS]; + mutex_lock(&_pno_state->pno_mutex); + + switch (type) { + case DHD_PNO_BATCH_SCAN_CFG_ID: + { + gscan_batch_params_t *ptr = (gscan_batch_params_t *)buf; + _params->params_gscan.bestn = ptr->bestn; + _params->params_gscan.mscan = ptr->mscan; + _params->params_gscan.buffer_threshold = ptr->buffer_threshold; + } + break; + case DHD_PNO_GEOFENCE_SCAN_CFG_ID: + { + gscan_hotlist_scan_params_t *ptr = (gscan_hotlist_scan_params_t *)buf; + struct dhd_pno_bssid *_pno_bssid; + struct bssid_t *bssid_ptr; + int8 flags; + + if (flush) { + dhd_pno_reset_cfg_gscan(dhd, _params, _pno_state, + GSCAN_FLUSH_HOTLIST_CFG); + } + + if (!ptr->nbssid) { + break; + } + if (!_params->params_gscan.nbssid_hotlist) { + INIT_LIST_HEAD(&_params->params_gscan.hotlist_bssid_list); + } + + for (i = 0, bssid_ptr = ptr->bssid; i < ptr->nbssid; i++, bssid_ptr++) { + _pno_bssid = (struct dhd_pno_bssid *)MALLOCZ(dhd->osh, + sizeof(struct dhd_pno_bssid)); + if (!_pno_bssid) { + DHD_ERROR(("_pno_bssid is NULL, cannot kalloc %zd bytes", + sizeof(struct dhd_pno_bssid))); + err = BCME_NOMEM; + goto exit; + } + memcpy(&_pno_bssid->macaddr, &bssid_ptr->macaddr, ETHER_ADDR_LEN); + + flags = (int8) bssid_ptr->rssi_reporting_threshold; + _pno_bssid->flags = flags << WL_PFN_RSSI_SHIFT; + list_add_tail(&_pno_bssid->list, + &_params->params_gscan.hotlist_bssid_list); + } + + _params->params_gscan.nbssid_hotlist += ptr->nbssid; + _params->params_gscan.lost_ap_window = ptr->lost_ap_window; + } + break; + case DHD_PNO_SCAN_CFG_ID: + { + int k; + uint16 band; + gscan_scan_params_t *ptr = (gscan_scan_params_t *)buf; + struct dhd_pno_gscan_channel_bucket *ch_bucket; + + if (ptr->nchannel_buckets <= GSCAN_MAX_CH_BUCKETS) { + _params->params_gscan.nchannel_buckets = ptr->nchannel_buckets; + + memcpy(_params->params_gscan.channel_bucket, ptr->channel_bucket, + _params->params_gscan.nchannel_buckets * + sizeof(struct dhd_pno_gscan_channel_bucket)); + ch_bucket = _params->params_gscan.channel_bucket; + + for (i = 0; i < ptr->nchannel_buckets; i++) { + band = ch_bucket[i].band; + for (k = 0; k < ptr->channel_bucket[i].num_channels; k++) { + ch_bucket[i].chan_list[k] = + wf_mhz2channel(ptr->channel_bucket[i].chan_list[k], + 0); + } + ch_bucket[i].band = 0; + /* HAL and DHD use different bits for 2.4G and + * 5G in bitmap. Hence translating it here... + */ + if (band & GSCAN_BG_BAND_MASK) { + ch_bucket[i].band |= WLC_BAND_2G; + } + if (band & GSCAN_A_BAND_MASK) { + ch_bucket[i].band |= WLC_BAND_5G; + } + if (band & GSCAN_DFS_MASK) { + ch_bucket[i].band |= GSCAN_DFS_MASK; + } + DHD_PNO(("band %d report_flag %d\n", ch_bucket[i].band, + ch_bucket[i].report_flag)); + } + + for (i = 0; i < ptr->nchannel_buckets; i++) { + ch_bucket[i].bucket_freq_multiple = + ch_bucket[i].bucket_freq_multiple/ptr->scan_fr; + ch_bucket[i].bucket_max_multiple = + ch_bucket[i].bucket_max_multiple/ptr->scan_fr; + DHD_PNO(("mult %d max_mult %d\n", + ch_bucket[i].bucket_freq_multiple, + ch_bucket[i].bucket_max_multiple)); + } + _params->params_gscan.scan_fr = ptr->scan_fr; + + DHD_PNO(("num_buckets %d scan_fr %d\n", ptr->nchannel_buckets, + _params->params_gscan.scan_fr)); + } else { + err = BCME_BADARG; + } + } + break; + case DHD_PNO_EPNO_CFG_ID: + if (flush) { + dhd_pno_reset_cfg_gscan(dhd, _params, _pno_state, + GSCAN_FLUSH_EPNO_CFG); + } + break; + case DHD_PNO_EPNO_PARAMS_ID: + if (flush) { + memset(&_params->params_gscan.epno_cfg.params, 0, + sizeof(wl_ssid_ext_params_t)); + } + if (buf) { + memcpy(&_params->params_gscan.epno_cfg.params, buf, + sizeof(wl_ssid_ext_params_t)); + } + break; + default: + err = BCME_BADARG; + DHD_ERROR(("%s: Unrecognized cmd type - %d\n", __FUNCTION__, type)); + break; + } +exit: + mutex_unlock(&_pno_state->pno_mutex); + return err; + +} + +static bool +validate_gscan_params(struct dhd_pno_gscan_params *gscan_params) +{ + unsigned int i, k; + + if (!gscan_params->scan_fr || !gscan_params->nchannel_buckets) { + DHD_ERROR(("%s : Scan freq - %d or number of channel buckets - %d is empty\n", + __FUNCTION__, gscan_params->scan_fr, gscan_params->nchannel_buckets)); + return false; + } + + for (i = 0; i < gscan_params->nchannel_buckets; i++) { + if (!gscan_params->channel_bucket[i].band) { + for (k = 0; k < gscan_params->channel_bucket[i].num_channels; k++) { + if (gscan_params->channel_bucket[i].chan_list[k] > CHANNEL_5G_MAX) { + DHD_ERROR(("%s : Unknown channel %d\n", __FUNCTION__, + gscan_params->channel_bucket[i].chan_list[k])); + return false; + } + } + } + } + + return true; +} + +static int +dhd_pno_set_for_gscan(dhd_pub_t *dhd, struct dhd_pno_gscan_params *gscan_params) +{ + int err = BCME_OK; + int mode, i = 0; + uint16 _chan_list[WL_NUMCHANNELS]; + int tot_nchan = 0; + int num_buckets_to_fw, tot_num_buckets, gscan_param_size; + dhd_pno_status_info_t *_pno_state = PNO_GET_PNOSTATE(dhd); + wl_pfn_gscan_ch_bucket_cfg_t *ch_bucket = NULL; + wl_pfn_gscan_cfg_t *pfn_gscan_cfg_t = NULL; + wl_pfn_bssid_t *p_pfn_bssid = NULL; + dhd_pno_params_t *_params; + bool fw_flushed = FALSE; + + _params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS]; + + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + NULL_CHECK(gscan_params, "gscan_params is NULL", err); + + DHD_PNO(("%s enter\n", __FUNCTION__)); + + if (!dhd_support_sta_mode(dhd)) { + err = BCME_BADOPTION; + goto exit; + } + if (!WLS_SUPPORTED(_pno_state)) { + DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__)); + err = BCME_UNSUPPORTED; + goto exit; + } + + if (!validate_gscan_params(gscan_params)) { + DHD_ERROR(("%s : Cannot start gscan - bad params\n", __FUNCTION__)); + err = BCME_BADARG; + goto exit; + } + + if (!(ch_bucket = dhd_pno_gscan_create_channel_list(dhd, _pno_state, + _chan_list, &tot_num_buckets, &num_buckets_to_fw))) { + goto exit; + } + + mutex_lock(&_pno_state->pno_mutex); + /* Clear any pre-existing results in our cache + * not consumed by framework + */ + dhd_gscan_clear_all_batch_results(dhd); + if (_pno_state->pno_mode & (DHD_PNO_GSCAN_MODE | DHD_PNO_LEGACY_MODE)) { + /* store current pno_mode before disabling pno */ + mode = _pno_state->pno_mode; + err = dhd_pno_clean(dhd); + if (err < 0) { + DHD_ERROR(("%s : failed to disable PNO\n", __FUNCTION__)); + mutex_unlock(&_pno_state->pno_mutex); + goto exit; + } + fw_flushed = TRUE; + /* restore the previous mode */ + _pno_state->pno_mode = mode; + } + _pno_state->pno_mode |= DHD_PNO_GSCAN_MODE; + mutex_unlock(&_pno_state->pno_mutex); + + if ((_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) && + !gscan_params->epno_cfg.num_epno_ssid) { + struct dhd_pno_legacy_params *params_legacy; + params_legacy = + &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS].params_legacy); + + if ((err = _dhd_pno_add_ssid(dhd, ¶ms_legacy->ssid_list, + params_legacy->nssid)) < 0) { + DHD_ERROR(("failed to add ssid list (err %d) in firmware\n", err)); + goto exit; + } + } + + if ((err = _dhd_pno_set(dhd, _params, DHD_PNO_GSCAN_MODE)) < 0) { + DHD_ERROR(("failed to set call pno_set (err %d) in firmware\n", err)); + goto exit; + } + + gscan_param_size = sizeof(wl_pfn_gscan_cfg_t) + + (num_buckets_to_fw - 1) * sizeof(wl_pfn_gscan_ch_bucket_cfg_t); + pfn_gscan_cfg_t = (wl_pfn_gscan_cfg_t *) MALLOCZ(dhd->osh, gscan_param_size); + + if (!pfn_gscan_cfg_t) { + DHD_ERROR(("%s: failed to malloc memory of size %d\n", + __FUNCTION__, gscan_param_size)); + err = BCME_NOMEM; + goto exit; + } + + pfn_gscan_cfg_t->version = WL_GSCAN_CFG_VERSION; + if (gscan_params->mscan) + pfn_gscan_cfg_t->buffer_threshold = gscan_params->buffer_threshold; + else + pfn_gscan_cfg_t->buffer_threshold = GSCAN_BATCH_NO_THR_SET; + + pfn_gscan_cfg_t->flags = + (gscan_params->send_all_results_flag & GSCAN_SEND_ALL_RESULTS_MASK); + pfn_gscan_cfg_t->flags |= GSCAN_ALL_BUCKETS_IN_FIRST_SCAN_MASK; + pfn_gscan_cfg_t->count_of_channel_buckets = num_buckets_to_fw; + pfn_gscan_cfg_t->retry_threshold = GSCAN_RETRY_THRESHOLD; + + for (i = 0; i < num_buckets_to_fw; i++) { + pfn_gscan_cfg_t->channel_bucket[i].bucket_end_index = + ch_bucket[i].bucket_end_index; + pfn_gscan_cfg_t->channel_bucket[i].bucket_freq_multiple = + ch_bucket[i].bucket_freq_multiple; + pfn_gscan_cfg_t->channel_bucket[i].max_freq_multiple = + ch_bucket[i].max_freq_multiple; + pfn_gscan_cfg_t->channel_bucket[i].repeat = + ch_bucket[i].repeat; + pfn_gscan_cfg_t->channel_bucket[i].flag = + ch_bucket[i].flag; + } + + tot_nchan = pfn_gscan_cfg_t->channel_bucket[num_buckets_to_fw - 1].bucket_end_index + 1; + DHD_PNO(("Total channel num %d total ch_buckets %d ch_buckets_to_fw %d \n", tot_nchan, + tot_num_buckets, num_buckets_to_fw)); + + if ((err = _dhd_pno_cfg(dhd, _chan_list, tot_nchan)) < 0) { + DHD_ERROR(("%s : failed to set call pno_cfg (err %d) in firmware\n", + __FUNCTION__, err)); + goto exit; + } + + if ((err = _dhd_pno_gscan_cfg(dhd, pfn_gscan_cfg_t, gscan_param_size)) < 0) { + DHD_ERROR(("%s : failed to set call pno_gscan_cfg (err %d) in firmware\n", + __FUNCTION__, err)); + goto exit; + } + /* Reprogram ePNO cfg from dhd cache if FW has been flushed */ + if (fw_flushed) { + dhd_pno_set_epno(dhd); + } + + if (gscan_params->nbssid_hotlist) { + struct dhd_pno_bssid *iter, *next; + wl_pfn_bssid_t *ptr; + p_pfn_bssid = (wl_pfn_bssid_t *)MALLOCZ(dhd->osh, + sizeof(wl_pfn_bssid_t) * gscan_params->nbssid_hotlist); + if (p_pfn_bssid == NULL) { + DHD_ERROR(("%s : failed to allocate wl_pfn_bssid_t array" + " (count: %d)", + __FUNCTION__, _params->params_hotlist.nbssid)); + err = BCME_NOMEM; + _pno_state->pno_mode &= ~DHD_PNO_HOTLIST_MODE; + goto exit; + } + ptr = p_pfn_bssid; + /* convert dhd_pno_bssid to wl_pfn_bssid */ + DHD_PNO(("nhotlist %d\n", gscan_params->nbssid_hotlist)); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif // endif + list_for_each_entry_safe(iter, next, + &gscan_params->hotlist_bssid_list, list) { + char buffer_hotlist[64]; + memcpy(&ptr->macaddr, + &iter->macaddr, ETHER_ADDR_LEN); + BCM_REFERENCE(buffer_hotlist); + DHD_PNO(("%s\n", bcm_ether_ntoa(&ptr->macaddr, buffer_hotlist))); + ptr->flags = iter->flags; + ptr++; + } +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif // endif + + err = _dhd_pno_add_bssid(dhd, p_pfn_bssid, gscan_params->nbssid_hotlist); + if (err < 0) { + DHD_ERROR(("%s : failed to call _dhd_pno_add_bssid(err :%d)\n", + __FUNCTION__, err)); + goto exit; + } + } + + if ((err = _dhd_pno_enable(dhd, PNO_ON)) < 0) { + DHD_ERROR(("%s : failed to enable PNO err %d\n", __FUNCTION__, err)); + } + +exit: + /* clear mode in case of error */ + if (err < 0) { + int ret = dhd_pno_clean(dhd); + + if (ret < 0) { + DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n", + __FUNCTION__, ret)); + } else { + _pno_state->pno_mode &= ~DHD_PNO_GSCAN_MODE; + } + } + MFREE(dhd->osh, p_pfn_bssid, + sizeof(wl_pfn_bssid_t) * gscan_params->nbssid_hotlist); + if (pfn_gscan_cfg_t) { + MFREE(dhd->osh, pfn_gscan_cfg_t, gscan_param_size); + } + if (ch_bucket) { + MFREE(dhd->osh, ch_bucket, + (tot_num_buckets * sizeof(wl_pfn_gscan_ch_bucket_cfg_t))); + } + return err; + +} + +static wl_pfn_gscan_ch_bucket_cfg_t * +dhd_pno_gscan_create_channel_list(dhd_pub_t *dhd, + dhd_pno_status_info_t *_pno_state, + uint16 *chan_list, + uint32 *num_buckets, + uint32 *num_buckets_to_fw) +{ + int i, num_channels, err, nchan = WL_NUMCHANNELS, ch_cnt; + uint16 *ptr = chan_list, max; + wl_pfn_gscan_ch_bucket_cfg_t *ch_bucket; + dhd_pno_params_t *_params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS]; + bool is_pno_legacy_running; + dhd_pno_gscan_channel_bucket_t *gscan_buckets = _params->params_gscan.channel_bucket; + + /* ePNO and Legacy PNO do not co-exist */ + is_pno_legacy_running = ((_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) && + !_params->params_gscan.epno_cfg.num_epno_ssid); + + if (is_pno_legacy_running) + *num_buckets = _params->params_gscan.nchannel_buckets + 1; + else + *num_buckets = _params->params_gscan.nchannel_buckets; + + *num_buckets_to_fw = 0; + + ch_bucket = (wl_pfn_gscan_ch_bucket_cfg_t *) MALLOC(dhd->osh, + ((*num_buckets) * sizeof(wl_pfn_gscan_ch_bucket_cfg_t))); + + if (!ch_bucket) { + DHD_ERROR(("%s: failed to malloc memory of size %zd\n", + __FUNCTION__, (*num_buckets) * sizeof(wl_pfn_gscan_ch_bucket_cfg_t))); + *num_buckets_to_fw = *num_buckets = 0; + return NULL; + } + + max = gscan_buckets[0].bucket_freq_multiple; + num_channels = 0; + /* nchan is the remaining space left in chan_list buffer + * So any overflow list of channels is ignored + */ + for (i = 0; i < _params->params_gscan.nchannel_buckets && nchan; i++) { + if (!gscan_buckets[i].band) { + ch_cnt = MIN(gscan_buckets[i].num_channels, (uint8)nchan); + num_channels += ch_cnt; + memcpy(ptr, gscan_buckets[i].chan_list, + ch_cnt * sizeof(uint16)); + ptr = ptr + ch_cnt; + } else { + /* get a valid channel list based on band B or A */ + err = _dhd_pno_get_channels(dhd, ptr, + &nchan, (gscan_buckets[i].band & GSCAN_ABG_BAND_MASK), + !(gscan_buckets[i].band & GSCAN_DFS_MASK)); + + if (err < 0) { + DHD_ERROR(("%s: failed to get valid channel list(band : %d)\n", + __FUNCTION__, gscan_buckets[i].band)); + MFREE(dhd->osh, ch_bucket, + ((*num_buckets) * sizeof(wl_pfn_gscan_ch_bucket_cfg_t))); + *num_buckets_to_fw = *num_buckets = 0; + return NULL; + } + + num_channels += nchan; + ptr = ptr + nchan; + } + + ch_bucket[i].bucket_end_index = num_channels - 1; + ch_bucket[i].bucket_freq_multiple = gscan_buckets[i].bucket_freq_multiple; + ch_bucket[i].repeat = gscan_buckets[i].repeat; + ch_bucket[i].max_freq_multiple = gscan_buckets[i].bucket_max_multiple; + ch_bucket[i].flag = gscan_buckets[i].report_flag; + /* HAL and FW interpretations are opposite for this bit */ + ch_bucket[i].flag ^= DHD_PNO_REPORT_NO_BATCH; + if (max < gscan_buckets[i].bucket_freq_multiple) + max = gscan_buckets[i].bucket_freq_multiple; + nchan = WL_NUMCHANNELS - num_channels; + *num_buckets_to_fw = *num_buckets_to_fw + 1; + DHD_PNO(("end_idx %d freq_mult - %d\n", + ch_bucket[i].bucket_end_index, ch_bucket[i].bucket_freq_multiple)); + } + + _params->params_gscan.max_ch_bucket_freq = max; + /* Legacy PNO maybe running, which means we need to create a legacy PNO bucket + * Get GCF of Legacy PNO and Gscan scanfreq + */ + if (is_pno_legacy_running) { + dhd_pno_params_t *_params1 = &_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS]; + uint16 *legacy_chan_list = _params1->params_legacy.chan_list; + uint16 common_freq; + uint32 legacy_bucket_idx = _params->params_gscan.nchannel_buckets; + /* If no space is left then only gscan buckets will be sent to FW */ + if (nchan) { + common_freq = gcd(_params->params_gscan.scan_fr, + _params1->params_legacy.scan_fr); + max = gscan_buckets[0].bucket_freq_multiple; + /* GSCAN buckets */ + for (i = 0; i < _params->params_gscan.nchannel_buckets; i++) { + ch_bucket[i].bucket_freq_multiple *= _params->params_gscan.scan_fr; + ch_bucket[i].bucket_freq_multiple /= common_freq; + if (max < gscan_buckets[i].bucket_freq_multiple) + max = gscan_buckets[i].bucket_freq_multiple; + } + /* Legacy PNO bucket */ + ch_bucket[legacy_bucket_idx].bucket_freq_multiple = + _params1->params_legacy.scan_fr; + ch_bucket[legacy_bucket_idx].bucket_freq_multiple /= + common_freq; + _params->params_gscan.max_ch_bucket_freq = MAX(max, + ch_bucket[legacy_bucket_idx].bucket_freq_multiple); + ch_bucket[legacy_bucket_idx].flag = CH_BUCKET_REPORT_REGULAR; + /* Now add channels to the legacy scan bucket */ + for (i = 0; i < _params1->params_legacy.nchan && nchan; i++, nchan--) { + ptr[i] = legacy_chan_list[i]; + num_channels++; + } + ch_bucket[legacy_bucket_idx].bucket_end_index = num_channels - 1; + *num_buckets_to_fw = *num_buckets_to_fw + 1; + DHD_PNO(("end_idx %d freq_mult - %d\n", + ch_bucket[legacy_bucket_idx].bucket_end_index, + ch_bucket[legacy_bucket_idx].bucket_freq_multiple)); + } + } + return ch_bucket; +} + +static int +dhd_pno_stop_for_gscan(dhd_pub_t *dhd) +{ + int err = BCME_OK; + int mode; + dhd_pno_status_info_t *_pno_state; + + _pno_state = PNO_GET_PNOSTATE(dhd); + DHD_PNO(("%s enter\n", __FUNCTION__)); + + if (!dhd_support_sta_mode(dhd)) { + err = BCME_BADOPTION; + goto exit; + } + if (!WLS_SUPPORTED(_pno_state)) { + DHD_ERROR(("%s : wifi location service is not supported\n", + __FUNCTION__)); + err = BCME_UNSUPPORTED; + goto exit; + } + + if (!(_pno_state->pno_mode & DHD_PNO_GSCAN_MODE)) { + DHD_ERROR(("%s : GSCAN is not enabled\n", __FUNCTION__)); + goto exit; + } + if (_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS].params_gscan.mscan) { + /* retrieve the batching data from firmware into host */ + err = dhd_wait_batch_results_complete(dhd); + if (err != BCME_OK) + goto exit; + } + mutex_lock(&_pno_state->pno_mutex); + mode = _pno_state->pno_mode & ~DHD_PNO_GSCAN_MODE; + err = dhd_pno_clean(dhd); + if (err < 0) { + DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n", + __FUNCTION__, err)); + mutex_unlock(&_pno_state->pno_mutex); + return err; + } + _pno_state->pno_mode = mode; + mutex_unlock(&_pno_state->pno_mutex); + + /* Reprogram Legacy PNO if it was running */ + if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) { + struct dhd_pno_legacy_params *params_legacy; + uint16 chan_list[WL_NUMCHANNELS]; + + params_legacy = &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS].params_legacy); + _pno_state->pno_mode &= ~DHD_PNO_LEGACY_MODE; + + DHD_PNO(("Restarting Legacy PNO SSID scan...\n")); + memcpy(chan_list, params_legacy->chan_list, + (params_legacy->nchan * sizeof(uint16))); + err = dhd_pno_set_legacy_pno(dhd, params_legacy->scan_fr, + params_legacy->pno_repeat, params_legacy->pno_freq_expo_max, + chan_list, params_legacy->nchan); + if (err < 0) { + DHD_ERROR(("%s : failed to restart legacy PNO scan(err: %d)\n", + __FUNCTION__, err)); + goto exit; + } + + } + +exit: + return err; +} + +int +dhd_pno_initiate_gscan_request(dhd_pub_t *dhd, bool run, bool flush) +{ + int err = BCME_OK; + dhd_pno_params_t *params; + dhd_pno_status_info_t *_pno_state; + struct dhd_pno_gscan_params *gscan_params; + + NULL_CHECK(dhd, "dhd is NULL\n", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + _pno_state = PNO_GET_PNOSTATE(dhd); + + DHD_PNO(("%s enter - run %d flush %d\n", __FUNCTION__, run, flush)); + + params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS]; + gscan_params = ¶ms->params_gscan; + + if (run) { + err = dhd_pno_set_for_gscan(dhd, gscan_params); + } else { + if (flush) { + mutex_lock(&_pno_state->pno_mutex); + dhd_pno_reset_cfg_gscan(dhd, params, _pno_state, GSCAN_FLUSH_ALL_CFG); + mutex_unlock(&_pno_state->pno_mutex); + } + /* Need to stop all gscan */ + err = dhd_pno_stop_for_gscan(dhd); + } + + return err; +} + +int +dhd_pno_enable_full_scan_result(dhd_pub_t *dhd, bool real_time_flag) +{ + int err = BCME_OK; + dhd_pno_params_t *params; + dhd_pno_status_info_t *_pno_state; + struct dhd_pno_gscan_params *gscan_params; + uint8 old_flag; + + NULL_CHECK(dhd, "dhd is NULL\n", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + _pno_state = PNO_GET_PNOSTATE(dhd); + + DHD_PNO(("%s enter\n", __FUNCTION__)); + + if (!WLS_SUPPORTED(_pno_state)) { + DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__)); + err = BCME_UNSUPPORTED; + goto exit; + } + + params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS]; + gscan_params = ¶ms->params_gscan; + + mutex_lock(&_pno_state->pno_mutex); + + old_flag = gscan_params->send_all_results_flag; + gscan_params->send_all_results_flag = (uint8) real_time_flag; + if (_pno_state->pno_mode & DHD_PNO_GSCAN_MODE) { + if (old_flag != gscan_params->send_all_results_flag) { + wl_pfn_gscan_cfg_t gscan_cfg; + + gscan_cfg.version = WL_GSCAN_CFG_VERSION; + gscan_cfg.flags = (gscan_params->send_all_results_flag & + GSCAN_SEND_ALL_RESULTS_MASK); + gscan_cfg.flags |= GSCAN_CFG_FLAGS_ONLY_MASK; + + if ((err = _dhd_pno_gscan_cfg(dhd, &gscan_cfg, + sizeof(wl_pfn_gscan_cfg_t))) < 0) { + DHD_ERROR(("%s : pno_gscan_cfg failed (err %d) in firmware\n", + __FUNCTION__, err)); + goto exit_mutex_unlock; + } + } else { + DHD_PNO(("No change in flag - %d\n", old_flag)); + } + } else { + DHD_PNO(("Gscan not started\n")); + } +exit_mutex_unlock: + mutex_unlock(&_pno_state->pno_mutex); +exit: + return err; +} + +/* Cleanup any consumed results + * Return TRUE if all results consumed else FALSE + */ +int dhd_gscan_batch_cache_cleanup(dhd_pub_t *dhd) +{ + int ret = 0; + dhd_pno_params_t *params; + struct dhd_pno_gscan_params *gscan_params; + dhd_pno_status_info_t *_pno_state; + gscan_results_cache_t *iter, *tmp; + + _pno_state = PNO_GET_PNOSTATE(dhd); + params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS]; + gscan_params = ¶ms->params_gscan; + iter = gscan_params->gscan_batch_cache; + + while (iter) { + if (iter->tot_consumed == iter->tot_count) { + tmp = iter->next; + MFREE(dhd->osh, iter, + ((iter->tot_count - 1) * sizeof(wifi_gscan_result_t)) + + sizeof(gscan_results_cache_t)); + iter = tmp; + } else + break; + } + gscan_params->gscan_batch_cache = iter; + ret = (iter == NULL); + return ret; +} + +static int +_dhd_pno_get_gscan_batch_from_fw(dhd_pub_t *dhd) +{ + int err = BCME_OK; + uint32 timestamp = 0, ts = 0, i, j, timediff; + dhd_pno_params_t *params; + dhd_pno_status_info_t *_pno_state; + wl_pfn_lnet_info_v1_t *plnetinfo; + wl_pfn_lnet_info_v2_t *plnetinfo_v2; + struct dhd_pno_gscan_params *gscan_params; + wl_pfn_lscanresults_v1_t *plbestnet_v1 = NULL; + wl_pfn_lscanresults_v2_t *plbestnet_v2 = NULL; + gscan_results_cache_t *iter, *tail; + wifi_gscan_result_t *result; + uint8 *nAPs_per_scan = NULL; + uint8 num_scans_in_cur_iter; + uint16 count; + uint16 fwcount; + uint16 fwstatus = PFN_INCOMPLETE; + struct timespec tm_spec; + + /* Static asserts in _dhd_pno_get_for_batch() below guarantee the v1 and v2 + * net_info and subnet_info structures are compatible in size and SSID offset, + * allowing v1 to be safely used in the code below except for lscanresults + * fields themselves (status, count, offset to netinfo). + */ + + NULL_CHECK(dhd, "dhd is NULL\n", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + + _pno_state = PNO_GET_PNOSTATE(dhd); + params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS]; + DHD_PNO(("%s enter\n", __FUNCTION__)); + + if (!WLS_SUPPORTED(_pno_state)) { + DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__)); + err = BCME_UNSUPPORTED; + goto exit; + } + if (!(_pno_state->pno_mode & DHD_PNO_GSCAN_MODE)) { + DHD_ERROR(("%s: GSCAN is not enabled\n", __FUNCTION__)); + goto exit; + } + gscan_params = ¶ms->params_gscan; + nAPs_per_scan = (uint8 *) MALLOC(dhd->osh, gscan_params->mscan); + + if (!nAPs_per_scan) { + DHD_ERROR(("%s :Out of memory!! Cant malloc %d bytes\n", __FUNCTION__, + gscan_params->mscan)); + err = BCME_NOMEM; + goto exit; + } + + plbestnet_v1 = (wl_pfn_lscanresults_v1_t *)MALLOC(dhd->osh, PNO_BESTNET_LEN); + if (!plbestnet_v1) { + DHD_ERROR(("%s :Out of memory!! Cant malloc %d bytes\n", __FUNCTION__, + (int)PNO_BESTNET_LEN)); + err = BCME_NOMEM; + goto exit; + } + plbestnet_v2 = (wl_pfn_lscanresults_v2_t *)plbestnet_v1; + + mutex_lock(&_pno_state->pno_mutex); + + dhd_gscan_clear_all_batch_results(dhd); + + if (!(_pno_state->pno_mode & DHD_PNO_GSCAN_MODE)) { + DHD_ERROR(("%s : GSCAN is not enabled\n", __FUNCTION__)); + goto exit_mutex_unlock; + } + + timediff = gscan_params->scan_fr * 1000; + timediff = timediff >> 1; + + /* Ok, now lets start getting results from the FW */ + tail = gscan_params->gscan_batch_cache; + do { + err = dhd_iovar(dhd, 0, "pfnlbest", NULL, 0, (char *)plbestnet_v1, PNO_BESTNET_LEN, + FALSE); + if (err < 0) { + DHD_ERROR(("%s : Cannot get all the batch results, err :%d\n", + __FUNCTION__, err)); + goto exit_mutex_unlock; + } + get_monotonic_boottime(&tm_spec); + + if (plbestnet_v1->version == PFN_LBEST_SCAN_RESULT_VERSION_V1) { + fwstatus = plbestnet_v1->status; + fwcount = plbestnet_v1->count; + plnetinfo = &plbestnet_v1->netinfo[0]; + + DHD_PNO(("ver %d, status : %d, count %d\n", + plbestnet_v1->version, fwstatus, fwcount)); + + if (fwcount == 0) { + DHD_PNO(("No more batch results\n")); + goto exit_mutex_unlock; + } + if (fwcount > BESTN_MAX) { + DHD_ERROR(("%s :fwcount %d is greater than BESTN_MAX %d \n", + __FUNCTION__, fwcount, (int)BESTN_MAX)); + /* Process only BESTN_MAX number of results per batch */ + fwcount = BESTN_MAX; + } + num_scans_in_cur_iter = 0; + + timestamp = plnetinfo->timestamp; + /* find out how many scans' results did we get in + * this batch of FW results + */ + for (i = 0, count = 0; i < fwcount; i++, count++, plnetinfo++) { + /* Unlikely to happen, but just in case the results from + * FW doesnt make sense..... Assume its part of one single scan + */ + if (num_scans_in_cur_iter >= gscan_params->mscan) { + num_scans_in_cur_iter = 0; + count = fwcount; + break; + } + if (TIME_DIFF_MS(timestamp, plnetinfo->timestamp) > timediff) { + nAPs_per_scan[num_scans_in_cur_iter] = count; + count = 0; + num_scans_in_cur_iter++; + } + timestamp = plnetinfo->timestamp; + } + if (num_scans_in_cur_iter < gscan_params->mscan) { + nAPs_per_scan[num_scans_in_cur_iter] = count; + num_scans_in_cur_iter++; + } + + DHD_PNO(("num_scans_in_cur_iter %d\n", num_scans_in_cur_iter)); + /* reset plnetinfo to the first item for the next loop */ + plnetinfo -= i; + + for (i = 0; i < num_scans_in_cur_iter; i++) { + iter = (gscan_results_cache_t *) + MALLOCZ(dhd->osh, ((nAPs_per_scan[i] - 1) * + sizeof(wifi_gscan_result_t)) + + sizeof(gscan_results_cache_t)); + if (!iter) { + DHD_ERROR(("%s :Out of memory!! Cant malloc %d bytes\n", + __FUNCTION__, gscan_params->mscan)); + err = BCME_NOMEM; + goto exit_mutex_unlock; + } + /* Need this check because the new set of results from FW + * maybe a continuation of previous sets' scan results + */ + if (TIME_DIFF_MS(ts, plnetinfo->timestamp) > timediff) { + iter->scan_id = ++gscan_params->scan_id; + } else { + iter->scan_id = gscan_params->scan_id; + } + DHD_PNO(("scan_id %d tot_count %d \n", + gscan_params->scan_id, nAPs_per_scan[i])); + iter->tot_count = nAPs_per_scan[i]; + iter->tot_consumed = 0; + iter->flag = 0; + if (plnetinfo->flags & PFN_PARTIAL_SCAN_MASK) { + DHD_PNO(("This scan is aborted\n")); + iter->flag = (ENABLE << PNO_STATUS_ABORT); + } else if (gscan_params->reason) { + iter->flag = (ENABLE << gscan_params->reason); + } + + if (!tail) { + gscan_params->gscan_batch_cache = iter; + } else { + tail->next = iter; + } + tail = iter; + iter->next = NULL; + for (j = 0; j < nAPs_per_scan[i]; j++, plnetinfo++) { + result = &iter->results[j]; + + result->channel = + wf_channel2mhz(plnetinfo->pfnsubnet.channel, + (plnetinfo->pfnsubnet.channel <= CH_MAX_2G_CHANNEL? + WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G)); + result->rssi = (int32) plnetinfo->RSSI; + result->beacon_period = 0; + result->capability = 0; + result->rtt = (uint64) plnetinfo->rtt0; + result->rtt_sd = (uint64) plnetinfo->rtt1; + result->ts = convert_fw_rel_time_to_systime(&tm_spec, + plnetinfo->timestamp); + ts = plnetinfo->timestamp; + if (plnetinfo->pfnsubnet.SSID_len > DOT11_MAX_SSID_LEN) { + DHD_ERROR(("%s: Invalid SSID length %d\n", + __FUNCTION__, + plnetinfo->pfnsubnet.SSID_len)); + plnetinfo->pfnsubnet.SSID_len = DOT11_MAX_SSID_LEN; + } + memcpy(result->ssid, plnetinfo->pfnsubnet.SSID, + plnetinfo->pfnsubnet.SSID_len); + result->ssid[plnetinfo->pfnsubnet.SSID_len] = '\0'; + memcpy(&result->macaddr, &plnetinfo->pfnsubnet.BSSID, + ETHER_ADDR_LEN); + + DHD_PNO(("\tSSID : ")); + DHD_PNO(("\n")); + DHD_PNO(("\tBSSID: "MACDBG"\n", + MAC2STRDBG(result->macaddr.octet))); + DHD_PNO(("\tchannel: %d, RSSI: %d, timestamp: %d ms\n", + plnetinfo->pfnsubnet.channel, + plnetinfo->RSSI, plnetinfo->timestamp)); + DHD_PNO(("\tRTT0 : %d, RTT1: %d\n", + plnetinfo->rtt0, plnetinfo->rtt1)); + + } + } + + } else if (plbestnet_v2->version == PFN_LBEST_SCAN_RESULT_VERSION_V2) { + fwstatus = plbestnet_v2->status; + fwcount = plbestnet_v2->count; + plnetinfo_v2 = (wl_pfn_lnet_info_v2_t*)&plbestnet_v2->netinfo[0]; + + DHD_PNO(("ver %d, status : %d, count %d\n", + plbestnet_v2->version, fwstatus, fwcount)); + + if (fwcount == 0) { + DHD_PNO(("No more batch results\n")); + goto exit_mutex_unlock; + } + if (fwcount > BESTN_MAX) { + DHD_ERROR(("%s :fwcount %d is greater than BESTN_MAX %d \n", + __FUNCTION__, fwcount, (int)BESTN_MAX)); + /* Process only BESTN_MAX number of results per batch */ + fwcount = BESTN_MAX; + } + num_scans_in_cur_iter = 0; + + timestamp = plnetinfo_v2->timestamp; + /* find out how many scans' results did we get + * in this batch of FW results + */ + for (i = 0, count = 0; i < fwcount; i++, count++, plnetinfo_v2++) { + /* Unlikely to happen, but just in case the results from + * FW doesnt make sense..... Assume its part of one single scan + */ + if (num_scans_in_cur_iter >= gscan_params->mscan) { + num_scans_in_cur_iter = 0; + count = fwcount; + break; + } + if (TIME_DIFF_MS(timestamp, plnetinfo_v2->timestamp) > timediff) { + nAPs_per_scan[num_scans_in_cur_iter] = count; + count = 0; + num_scans_in_cur_iter++; + } + timestamp = plnetinfo_v2->timestamp; + } + if (num_scans_in_cur_iter < gscan_params->mscan) { + nAPs_per_scan[num_scans_in_cur_iter] = count; + num_scans_in_cur_iter++; + } + + DHD_PNO(("num_scans_in_cur_iter %d\n", num_scans_in_cur_iter)); + /* reset plnetinfo to the first item for the next loop */ + plnetinfo_v2 -= i; + + for (i = 0; i < num_scans_in_cur_iter; i++) { + iter = (gscan_results_cache_t *) + MALLOCZ(dhd->osh, ((nAPs_per_scan[i] - 1) * + sizeof(wifi_gscan_result_t)) + + sizeof(gscan_results_cache_t)); + if (!iter) { + DHD_ERROR(("%s :Out of memory!! Cant malloc %d bytes\n", + __FUNCTION__, gscan_params->mscan)); + err = BCME_NOMEM; + goto exit_mutex_unlock; + } + /* Need this check because the new set of results from FW + * maybe a continuation of previous sets' scan results + */ + if (TIME_DIFF_MS(ts, plnetinfo_v2->timestamp) > timediff) { + iter->scan_id = ++gscan_params->scan_id; + } else { + iter->scan_id = gscan_params->scan_id; + } + DHD_PNO(("scan_id %d tot_count %d ch_bucket %x\n", + gscan_params->scan_id, nAPs_per_scan[i], + plbestnet_v2->scan_ch_buckets[i])); + iter->tot_count = nAPs_per_scan[i]; + iter->scan_ch_bucket = plbestnet_v2->scan_ch_buckets[i]; + iter->tot_consumed = 0; + iter->flag = 0; + if (plnetinfo_v2->flags & PFN_PARTIAL_SCAN_MASK) { + DHD_PNO(("This scan is aborted\n")); + iter->flag = (ENABLE << PNO_STATUS_ABORT); + } else if (gscan_params->reason) { + iter->flag = (ENABLE << gscan_params->reason); + } + + if (!tail) { + gscan_params->gscan_batch_cache = iter; + } else { + tail->next = iter; + } + tail = iter; + iter->next = NULL; + for (j = 0; j < nAPs_per_scan[i]; j++, plnetinfo_v2++) { + result = &iter->results[j]; + + result->channel = + wf_channel2mhz(plnetinfo_v2->pfnsubnet.channel, + (plnetinfo_v2->pfnsubnet.channel <= + CH_MAX_2G_CHANNEL? + WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G)); + result->rssi = (int32) plnetinfo_v2->RSSI; + /* Info not available & not expected */ + result->beacon_period = 0; + result->capability = 0; + result->rtt = (uint64) plnetinfo_v2->rtt0; + result->rtt_sd = (uint64) plnetinfo_v2->rtt1; + result->ts = convert_fw_rel_time_to_systime(&tm_spec, + plnetinfo_v2->timestamp); + ts = plnetinfo_v2->timestamp; + if (plnetinfo_v2->pfnsubnet.SSID_len > DOT11_MAX_SSID_LEN) { + DHD_ERROR(("%s: Invalid SSID length %d\n", + __FUNCTION__, + plnetinfo_v2->pfnsubnet.SSID_len)); + plnetinfo_v2->pfnsubnet.SSID_len = + DOT11_MAX_SSID_LEN; + } + memcpy(result->ssid, plnetinfo_v2->pfnsubnet.u.SSID, + plnetinfo_v2->pfnsubnet.SSID_len); + result->ssid[plnetinfo_v2->pfnsubnet.SSID_len] = '\0'; + memcpy(&result->macaddr, &plnetinfo_v2->pfnsubnet.BSSID, + ETHER_ADDR_LEN); + + DHD_PNO(("\tSSID : ")); + DHD_PNO(("\n")); + DHD_PNO(("\tBSSID: "MACDBG"\n", + MAC2STRDBG(result->macaddr.octet))); + DHD_PNO(("\tchannel: %d, RSSI: %d, timestamp: %d ms\n", + plnetinfo_v2->pfnsubnet.channel, + plnetinfo_v2->RSSI, plnetinfo_v2->timestamp)); + DHD_PNO(("\tRTT0 : %d, RTT1: %d\n", + plnetinfo_v2->rtt0, plnetinfo_v2->rtt1)); + + } + } + + } else { + err = BCME_VERSION; + DHD_ERROR(("bestnet fw version %d not supported\n", + plbestnet_v1->version)); + goto exit_mutex_unlock; + } + } while (fwstatus == PFN_INCOMPLETE); + +exit_mutex_unlock: + mutex_unlock(&_pno_state->pno_mutex); +exit: + params->params_gscan.get_batch_flag = GSCAN_BATCH_RETRIEVAL_COMPLETE; + smp_wmb(); + wake_up_interruptible(&_pno_state->batch_get_wait); + if (nAPs_per_scan) { + MFREE(dhd->osh, nAPs_per_scan, gscan_params->mscan * sizeof(uint8)); + } + if (plbestnet_v1) { + MFREE(dhd->osh, plbestnet_v1, PNO_BESTNET_LEN); + } + DHD_PNO(("Batch retrieval done!\n")); + return err; +} +#endif /* GSCAN_SUPPORT */ + +#if defined(GSCAN_SUPPORT) || defined(DHD_GET_VALID_CHANNELS) +void * +dhd_pno_get_gscan(dhd_pub_t *dhd, dhd_pno_gscan_cmd_cfg_t type, + void *info, uint32 *len) +{ + void *ret = NULL; + dhd_pno_gscan_capabilities_t *ptr; + dhd_pno_ssid_t *ssid_elem; + dhd_pno_params_t *_params; + dhd_epno_ssid_cfg_t *epno_cfg; + dhd_pno_status_info_t *_pno_state; + + if (!dhd || !dhd->pno_state) { + DHD_ERROR(("NULL POINTER : %s\n", __FUNCTION__)); + return NULL; + } + + _pno_state = PNO_GET_PNOSTATE(dhd); + _params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS]; + + if (!len) { + DHD_ERROR(("%s: len is NULL\n", __FUNCTION__)); + return NULL; + } + + switch (type) { + case DHD_PNO_GET_CAPABILITIES: + ptr = (dhd_pno_gscan_capabilities_t *) + MALLOCZ(dhd->osh, sizeof(dhd_pno_gscan_capabilities_t)); + if (!ptr) + break; + /* Hardcoding these values for now, need to get + * these values from FW, will change in a later check-in + */ + ptr->max_scan_cache_size = GSCAN_MAX_AP_CACHE; + ptr->max_scan_buckets = GSCAN_MAX_CH_BUCKETS; + ptr->max_ap_cache_per_scan = GSCAN_MAX_AP_CACHE_PER_SCAN; + ptr->max_scan_reporting_threshold = 100; + ptr->max_hotlist_aps = PFN_HOTLIST_MAX_NUM_APS; + ptr->max_epno_ssid_crc32 = MAX_EPNO_SSID_NUM; + ptr->max_epno_hidden_ssid = MAX_EPNO_HIDDEN_SSID; + ptr->max_white_list_ssid = MAX_WHITELIST_SSID; + ret = (void *)ptr; + *len = sizeof(dhd_pno_gscan_capabilities_t); + break; +#ifdef GSCAN_SUPPORT + case DHD_PNO_GET_BATCH_RESULTS: + ret = dhd_get_gscan_batch_results(dhd, len); + break; +#endif /* GSCAN_SUPPORT */ + case DHD_PNO_GET_CHANNEL_LIST: + if (info) { + uint16 ch_list[WL_NUMCHANNELS]; + uint32 *p, mem_needed, i; + int32 err, nchan = WL_NUMCHANNELS; + uint32 *gscan_band = (uint32 *) info; + uint8 band = 0; + + /* No band specified?, nothing to do */ + if ((*gscan_band & GSCAN_BAND_MASK) == 0) { + DHD_PNO(("No band specified\n")); + *len = 0; + break; + } + + /* HAL and DHD use different bits for 2.4G and + * 5G in bitmap. Hence translating it here... + */ + if (*gscan_band & GSCAN_BG_BAND_MASK) { + band |= WLC_BAND_2G; + } + if (*gscan_band & GSCAN_A_BAND_MASK) { + band |= WLC_BAND_5G; + } + + err = _dhd_pno_get_channels(dhd, ch_list, &nchan, + (band & GSCAN_ABG_BAND_MASK), + !(*gscan_band & GSCAN_DFS_MASK)); + + if (err < 0) { + DHD_ERROR(("%s: failed to get valid channel list\n", + __FUNCTION__)); + *len = 0; + } else { + mem_needed = sizeof(uint32) * nchan; + p = (uint32 *)MALLOCZ(dhd->osh, mem_needed); + if (!p) { + DHD_ERROR(("%s: Unable to malloc %d bytes\n", + __FUNCTION__, mem_needed)); + break; + } + for (i = 0; i < nchan; i++) { + p[i] = wf_channel2mhz(ch_list[i], + (ch_list[i] <= CH_MAX_2G_CHANNEL? + WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G)); + } + ret = p; + *len = mem_needed; + } + } else { + *len = 0; + DHD_ERROR(("%s: info buffer is NULL\n", __FUNCTION__)); + } + break; + case DHD_PNO_GET_NEW_EPNO_SSID_ELEM: + epno_cfg = &_params->params_gscan.epno_cfg; + if (epno_cfg->num_epno_ssid >= + MAX_EPNO_SSID_NUM) { + DHD_ERROR(("Excessive number of ePNO SSIDs programmed %d\n", + epno_cfg->num_epno_ssid)); + return NULL; + } + if (!epno_cfg->num_epno_ssid) { + INIT_LIST_HEAD(&epno_cfg->epno_ssid_list); + } + ssid_elem = MALLOCZ(dhd->osh, sizeof(dhd_pno_ssid_t)); + if (!ssid_elem) { + DHD_ERROR(("EPNO ssid: cannot alloc %zd bytes", + sizeof(dhd_pno_ssid_t))); + return NULL; + } + epno_cfg->num_epno_ssid++; + list_add_tail(&ssid_elem->list, &epno_cfg->epno_ssid_list); + ret = ssid_elem; + *len = sizeof(dhd_pno_ssid_t); + break; + default: + DHD_ERROR(("%s: Unrecognized cmd type - %d\n", __FUNCTION__, type)); + break; + } + + return ret; + +} +#endif /* GSCAN_SUPPORT || DHD_GET_VALID_CHANNELS */ + +static int +_dhd_pno_get_for_batch(dhd_pub_t *dhd, char *buf, int bufsize, int reason) +{ + int err = BCME_OK; + int i, j; + uint32 timestamp = 0; + dhd_pno_params_t *_params = NULL; + dhd_pno_status_info_t *_pno_state = NULL; + wl_pfn_lscanresults_v1_t *plbestnet_v1 = NULL; + wl_pfn_lscanresults_v2_t *plbestnet_v2 = NULL; + wl_pfn_lnet_info_v1_t *plnetinfo; + wl_pfn_lnet_info_v2_t *plnetinfo_v2; + dhd_pno_bestnet_entry_t *pbestnet_entry; + dhd_pno_best_header_t *pbestnetheader = NULL; + dhd_pno_scan_results_t *pscan_results = NULL, *siter, *snext; + bool allocate_header = FALSE; + uint16 fwstatus = PFN_INCOMPLETE; + uint16 fwcount; + + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + + /* The static asserts below guarantee the v1 and v2 net_info and subnet_info + * structures are compatible in size and SSID offset, allowing v1 to be safely + * used in the code below except for lscanresults fields themselves + * (status, count, offset to netinfo). + */ + STATIC_ASSERT(sizeof(wl_pfn_net_info_v1_t) == sizeof(wl_pfn_net_info_v2_t)); + STATIC_ASSERT(sizeof(wl_pfn_lnet_info_v1_t) == sizeof(wl_pfn_lnet_info_v2_t)); + STATIC_ASSERT(sizeof(wl_pfn_subnet_info_v1_t) == sizeof(wl_pfn_subnet_info_v2_t)); + STATIC_ASSERT(OFFSETOF(wl_pfn_subnet_info_v1_t, SSID) == + OFFSETOF(wl_pfn_subnet_info_v2_t, u.SSID)); + + DHD_PNO(("%s enter\n", __FUNCTION__)); + _pno_state = PNO_GET_PNOSTATE(dhd); + + if (!dhd_support_sta_mode(dhd)) { + err = BCME_BADOPTION; + goto exit_no_unlock; + } + + if (!WLS_SUPPORTED(_pno_state)) { + DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__)); + err = BCME_UNSUPPORTED; + goto exit_no_unlock; + } + + if (!(_pno_state->pno_mode & DHD_PNO_BATCH_MODE)) { + DHD_ERROR(("%s: Batching SCAN mode is not enabled\n", __FUNCTION__)); + goto exit_no_unlock; + } + mutex_lock(&_pno_state->pno_mutex); + _params = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS]; + if (buf && bufsize) { + if (!list_empty(&_params->params_batch.get_batch.expired_scan_results_list)) { + /* need to check whether we have cashed data or not */ + DHD_PNO(("%s: have cashed batching data in Driver\n", + __FUNCTION__)); + /* convert to results format */ + goto convert_format; + } else { + /* this is a first try to get batching results */ + if (!list_empty(&_params->params_batch.get_batch.scan_results_list)) { + /* move the scan_results_list to expired_scan_results_lists */ +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif // endif + list_for_each_entry_safe(siter, snext, + &_params->params_batch.get_batch.scan_results_list, list) { + list_move_tail(&siter->list, + &_params->params_batch.get_batch.expired_scan_results_list); + } +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif // endif + _params->params_batch.get_batch.top_node_cnt = 0; + _params->params_batch.get_batch.expired_tot_scan_cnt = + _params->params_batch.get_batch.tot_scan_cnt; + _params->params_batch.get_batch.tot_scan_cnt = 0; + goto convert_format; + } + } + } + /* create dhd_pno_scan_results_t whenever we got event WLC_E_PFN_BEST_BATCHING */ + pscan_results = (dhd_pno_scan_results_t *)MALLOC(dhd->osh, SCAN_RESULTS_SIZE); + if (pscan_results == NULL) { + err = BCME_NOMEM; + DHD_ERROR(("failed to allocate dhd_pno_scan_results_t\n")); + goto exit; + } + pscan_results->bestnetheader = NULL; + pscan_results->cnt_header = 0; + /* add the element into list unless total node cnt is less than MAX_NODE_ CNT */ + if (_params->params_batch.get_batch.top_node_cnt < MAX_NODE_CNT) { + list_add(&pscan_results->list, &_params->params_batch.get_batch.scan_results_list); + _params->params_batch.get_batch.top_node_cnt++; + } else { + int _removed_scan_cnt; + /* remove oldest one and add new one */ + DHD_PNO(("%s : Remove oldest node and add new one\n", __FUNCTION__)); + _removed_scan_cnt = _dhd_pno_clear_all_batch_results(dhd, + &_params->params_batch.get_batch.scan_results_list, TRUE); + _params->params_batch.get_batch.tot_scan_cnt -= _removed_scan_cnt; + list_add(&pscan_results->list, &_params->params_batch.get_batch.scan_results_list); + + } + + plbestnet_v1 = (wl_pfn_lscanresults_v1_t *)MALLOC(dhd->osh, PNO_BESTNET_LEN); + NULL_CHECK(plbestnet_v1, "failed to allocate buffer for bestnet", err); + plbestnet_v2 = (wl_pfn_lscanresults_v2_t*)plbestnet_v1; + + DHD_PNO(("%s enter\n", __FUNCTION__)); + do { + err = dhd_iovar(dhd, 0, "pfnlbest", NULL, 0, (char *)plbestnet_v1, PNO_BESTNET_LEN, + FALSE); + if (err < 0) { + if (err == BCME_EPERM) { + DHD_ERROR(("we cannot get the batching data " + "during scanning in firmware, try again\n,")); + msleep(500); + continue; + } else { + DHD_ERROR(("%s : failed to execute pfnlbest (err :%d)\n", + __FUNCTION__, err)); + goto exit; + } + } + + if (plbestnet_v1->version == PFN_LBEST_SCAN_RESULT_VERSION_V1) { + fwstatus = plbestnet_v1->status; + fwcount = plbestnet_v1->count; + plnetinfo = &plbestnet_v1->netinfo[0]; + if (fwcount == 0) { + DHD_PNO(("No more batch results\n")); + goto exit; + } + if (fwcount > BESTN_MAX) { + DHD_ERROR(("%s :fwcount %d is greater than BESTN_MAX %d \n", + __FUNCTION__, fwcount, (int)BESTN_MAX)); + /* Process only BESTN_MAX number of results per batch */ + fwcount = BESTN_MAX; + } + for (i = 0; i < fwcount; i++) { + pbestnet_entry = (dhd_pno_bestnet_entry_t *) + MALLOC(dhd->osh, BESTNET_ENTRY_SIZE); + if (pbestnet_entry == NULL) { + err = BCME_NOMEM; + DHD_ERROR(("failed to allocate dhd_pno_bestnet_entry\n")); + goto exit; + } + memset(pbestnet_entry, 0, BESTNET_ENTRY_SIZE); + /* record the current time */ + pbestnet_entry->recorded_time = jiffies; + /* create header for the first entry */ + allocate_header = (i == 0)? TRUE : FALSE; + /* check whether the new generation is started or not */ + if (timestamp && (TIME_DIFF(timestamp, plnetinfo->timestamp) + > TIME_MIN_DIFF)) + allocate_header = TRUE; + timestamp = plnetinfo->timestamp; + if (allocate_header) { + pbestnetheader = (dhd_pno_best_header_t *) + MALLOC(dhd->osh, BEST_HEADER_SIZE); + if (pbestnetheader == NULL) { + err = BCME_NOMEM; + if (pbestnet_entry) + MFREE(dhd->osh, pbestnet_entry, + BESTNET_ENTRY_SIZE); + DHD_ERROR(("failed to allocate" + " dhd_pno_bestnet_entry\n")); + goto exit; + } + /* increase total cnt of bestnet header */ + pscan_results->cnt_header++; + /* need to record the reason to call dhd_pno_get_for_bach */ + if (reason) + pbestnetheader->reason = (ENABLE << reason); + memset(pbestnetheader, 0, BEST_HEADER_SIZE); + /* initialize the head of linked list */ + INIT_LIST_HEAD(&(pbestnetheader->entry_list)); + /* link the pbestnet heaer into existed list */ + if (pscan_results->bestnetheader == NULL) + /* In case of header */ + pscan_results->bestnetheader = pbestnetheader; + else { + dhd_pno_best_header_t *head = + pscan_results->bestnetheader; + pscan_results->bestnetheader = pbestnetheader; + pbestnetheader->next = head; + } + } + pbestnet_entry->channel = plnetinfo->pfnsubnet.channel; + pbestnet_entry->RSSI = plnetinfo->RSSI; + if (plnetinfo->flags & PFN_PARTIAL_SCAN_MASK) { + /* if RSSI is positive value, we assume that + * this scan is aborted by other scan + */ + DHD_PNO(("This scan is aborted\n")); + pbestnetheader->reason = (ENABLE << PNO_STATUS_ABORT); + } + pbestnet_entry->rtt0 = plnetinfo->rtt0; + pbestnet_entry->rtt1 = plnetinfo->rtt1; + pbestnet_entry->timestamp = plnetinfo->timestamp; + if (plnetinfo->pfnsubnet.SSID_len > DOT11_MAX_SSID_LEN) { + DHD_ERROR(("%s: Invalid SSID length" + " %d: trimming it to max\n", + __FUNCTION__, plnetinfo->pfnsubnet.SSID_len)); + plnetinfo->pfnsubnet.SSID_len = DOT11_MAX_SSID_LEN; + } + pbestnet_entry->SSID_len = plnetinfo->pfnsubnet.SSID_len; + memcpy(pbestnet_entry->SSID, plnetinfo->pfnsubnet.SSID, + pbestnet_entry->SSID_len); + memcpy(&pbestnet_entry->BSSID, &plnetinfo->pfnsubnet.BSSID, + ETHER_ADDR_LEN); + /* add the element into list */ + list_add_tail(&pbestnet_entry->list, &pbestnetheader->entry_list); + /* increase best entry count */ + pbestnetheader->tot_cnt++; + pbestnetheader->tot_size += BESTNET_ENTRY_SIZE; + DHD_PNO(("Header %d\n", pscan_results->cnt_header - 1)); + DHD_PNO(("\tSSID : ")); + for (j = 0; j < plnetinfo->pfnsubnet.SSID_len; j++) + DHD_PNO(("%c", plnetinfo->pfnsubnet.SSID[j])); + DHD_PNO(("\n")); + DHD_PNO(("\tBSSID: "MACDBG"\n", + MAC2STRDBG(plnetinfo->pfnsubnet.BSSID.octet))); + DHD_PNO(("\tchannel: %d, RSSI: %d, timestamp: %d ms\n", + plnetinfo->pfnsubnet.channel, + plnetinfo->RSSI, plnetinfo->timestamp)); + DHD_PNO(("\tRTT0 : %d, RTT1: %d\n", plnetinfo->rtt0, + plnetinfo->rtt1)); + plnetinfo++; + } + } else if (plbestnet_v2->version == PFN_LBEST_SCAN_RESULT_VERSION_V2) { + fwstatus = plbestnet_v2->status; + fwcount = plbestnet_v2->count; + plnetinfo_v2 = (wl_pfn_lnet_info_v2_t*)&plbestnet_v2->netinfo[0]; + if (fwcount == 0) { + DHD_PNO(("No more batch results\n")); + goto exit; + } + if (fwcount > BESTN_MAX) { + DHD_ERROR(("%s :fwcount %d is greater than BESTN_MAX %d \n", + __FUNCTION__, fwcount, (int)BESTN_MAX)); + /* Process only BESTN_MAX number of results per batch */ + fwcount = BESTN_MAX; + } + DHD_PNO(("ver %d, status : %d, count %d\n", + plbestnet_v2->version, fwstatus, fwcount)); + + for (i = 0; i < fwcount; i++) { + pbestnet_entry = (dhd_pno_bestnet_entry_t *) + MALLOC(dhd->osh, BESTNET_ENTRY_SIZE); + if (pbestnet_entry == NULL) { + err = BCME_NOMEM; + DHD_ERROR(("failed to allocate dhd_pno_bestnet_entry\n")); + goto exit; + } + memset(pbestnet_entry, 0, BESTNET_ENTRY_SIZE); + /* record the current time */ + pbestnet_entry->recorded_time = jiffies; + /* create header for the first entry */ + allocate_header = (i == 0)? TRUE : FALSE; + /* check whether the new generation is started or not */ + if (timestamp && (TIME_DIFF(timestamp, plnetinfo_v2->timestamp) + > TIME_MIN_DIFF)) + allocate_header = TRUE; + timestamp = plnetinfo_v2->timestamp; + if (allocate_header) { + pbestnetheader = (dhd_pno_best_header_t *) + MALLOC(dhd->osh, BEST_HEADER_SIZE); + if (pbestnetheader == NULL) { + err = BCME_NOMEM; + if (pbestnet_entry) + MFREE(dhd->osh, pbestnet_entry, + BESTNET_ENTRY_SIZE); + DHD_ERROR(("failed to allocate" + " dhd_pno_bestnet_entry\n")); + goto exit; + } + /* increase total cnt of bestnet header */ + pscan_results->cnt_header++; + /* need to record the reason to call dhd_pno_get_for_bach */ + if (reason) + pbestnetheader->reason = (ENABLE << reason); + memset(pbestnetheader, 0, BEST_HEADER_SIZE); + /* initialize the head of linked list */ + INIT_LIST_HEAD(&(pbestnetheader->entry_list)); + /* link the pbestnet heaer into existed list */ + if (pscan_results->bestnetheader == NULL) + /* In case of header */ + pscan_results->bestnetheader = pbestnetheader; + else { + dhd_pno_best_header_t *head = + pscan_results->bestnetheader; + pscan_results->bestnetheader = pbestnetheader; + pbestnetheader->next = head; + } + } + /* fills the best network info */ + pbestnet_entry->channel = plnetinfo_v2->pfnsubnet.channel; + pbestnet_entry->RSSI = plnetinfo_v2->RSSI; + if (plnetinfo_v2->flags & PFN_PARTIAL_SCAN_MASK) { + /* if RSSI is positive value, we assume that + * this scan is aborted by other scan + */ + DHD_PNO(("This scan is aborted\n")); + pbestnetheader->reason = (ENABLE << PNO_STATUS_ABORT); + } + pbestnet_entry->rtt0 = plnetinfo_v2->rtt0; + pbestnet_entry->rtt1 = plnetinfo_v2->rtt1; + pbestnet_entry->timestamp = plnetinfo_v2->timestamp; + if (plnetinfo_v2->pfnsubnet.SSID_len > DOT11_MAX_SSID_LEN) { + DHD_ERROR(("%s: Invalid SSID length" + " %d: trimming it to max\n", + __FUNCTION__, plnetinfo_v2->pfnsubnet.SSID_len)); + plnetinfo_v2->pfnsubnet.SSID_len = DOT11_MAX_SSID_LEN; + } + pbestnet_entry->SSID_len = plnetinfo_v2->pfnsubnet.SSID_len; + memcpy(pbestnet_entry->SSID, plnetinfo_v2->pfnsubnet.u.SSID, + pbestnet_entry->SSID_len); + memcpy(&pbestnet_entry->BSSID, &plnetinfo_v2->pfnsubnet.BSSID, + ETHER_ADDR_LEN); + /* add the element into list */ + list_add_tail(&pbestnet_entry->list, &pbestnetheader->entry_list); + /* increase best entry count */ + pbestnetheader->tot_cnt++; + pbestnetheader->tot_size += BESTNET_ENTRY_SIZE; + DHD_PNO(("Header %d\n", pscan_results->cnt_header - 1)); + DHD_PNO(("\tSSID : ")); + for (j = 0; j < plnetinfo_v2->pfnsubnet.SSID_len; j++) + DHD_PNO(("%c", plnetinfo_v2->pfnsubnet.u.SSID[j])); + DHD_PNO(("\n")); + DHD_PNO(("\tBSSID: "MACDBG"\n", + MAC2STRDBG(plnetinfo_v2->pfnsubnet.BSSID.octet))); + DHD_PNO(("\tchannel: %d, RSSI: %d, timestamp: %d ms\n", + plnetinfo_v2->pfnsubnet.channel, + plnetinfo_v2->RSSI, plnetinfo_v2->timestamp)); + DHD_PNO(("\tRTT0 : %d, RTT1: %d\n", plnetinfo_v2->rtt0, + plnetinfo_v2->rtt1)); + plnetinfo_v2++; + } + } else { + err = BCME_VERSION; + DHD_ERROR(("bestnet fw version %d not supported\n", + plbestnet_v1->version)); + goto exit; + } + } while (fwstatus != PFN_COMPLETE); + + if (pscan_results->cnt_header == 0) { + /* In case that we didn't get any data from the firmware + * Remove the current scan_result list from get_bach.scan_results_list. + */ + DHD_PNO(("NO BATCH DATA from Firmware, Delete current SCAN RESULT LIST\n")); + list_del(&pscan_results->list); + MFREE(dhd->osh, pscan_results, SCAN_RESULTS_SIZE); + _params->params_batch.get_batch.top_node_cnt--; + } else { + /* increase total scan count using current scan count */ + _params->params_batch.get_batch.tot_scan_cnt += pscan_results->cnt_header; + } + + if (buf && bufsize) { + /* This is a first try to get batching results */ + if (!list_empty(&_params->params_batch.get_batch.scan_results_list)) { + /* move the scan_results_list to expired_scan_results_lists */ +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif // endif + list_for_each_entry_safe(siter, snext, + &_params->params_batch.get_batch.scan_results_list, list) { + list_move_tail(&siter->list, + &_params->params_batch.get_batch.expired_scan_results_list); + } +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif // endif + /* reset gloval values after moving to expired list */ + _params->params_batch.get_batch.top_node_cnt = 0; + _params->params_batch.get_batch.expired_tot_scan_cnt = + _params->params_batch.get_batch.tot_scan_cnt; + _params->params_batch.get_batch.tot_scan_cnt = 0; + } +convert_format: + err = _dhd_pno_convert_format(dhd, &_params->params_batch, buf, bufsize); + if (err < 0) { + DHD_ERROR(("failed to convert the data into upper layer format\n")); + goto exit; + } + } +exit: + if (plbestnet_v1) + MFREE(dhd->osh, plbestnet_v1, PNO_BESTNET_LEN); + if (_params) { + _params->params_batch.get_batch.buf = NULL; + _params->params_batch.get_batch.bufsize = 0; + _params->params_batch.get_batch.bytes_written = err; + } + mutex_unlock(&_pno_state->pno_mutex); +exit_no_unlock: + if (waitqueue_active(&_pno_state->get_batch_done.wait)) + complete(&_pno_state->get_batch_done); + return err; +} + +static void +_dhd_pno_get_batch_handler(struct work_struct *work) +{ + dhd_pno_status_info_t *_pno_state; + dhd_pub_t *dhd; + struct dhd_pno_batch_params *params_batch; + DHD_PNO(("%s enter\n", __FUNCTION__)); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif // endif + _pno_state = container_of(work, struct dhd_pno_status_info, work); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif // endif + dhd = _pno_state->dhd; + if (dhd == NULL) { + DHD_ERROR(("%s : dhd is NULL\n", __FUNCTION__)); + return; + } + +#ifdef GSCAN_SUPPORT + _dhd_pno_get_gscan_batch_from_fw(dhd); +#endif /* GSCAN_SUPPORT */ + if (_pno_state->pno_mode & DHD_PNO_BATCH_MODE) { + params_batch = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS].params_batch; + + _dhd_pno_get_for_batch(dhd, params_batch->get_batch.buf, + params_batch->get_batch.bufsize, params_batch->get_batch.reason); + } +} + +int +dhd_pno_get_for_batch(dhd_pub_t *dhd, char *buf, int bufsize, int reason) +{ + int err = BCME_OK; + char *pbuf = buf; + dhd_pno_status_info_t *_pno_state; + struct dhd_pno_batch_params *params_batch; + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + if (!dhd_support_sta_mode(dhd)) { + err = BCME_BADOPTION; + goto exit; + } + DHD_PNO(("%s enter\n", __FUNCTION__)); + _pno_state = PNO_GET_PNOSTATE(dhd); + + if (!WLS_SUPPORTED(_pno_state)) { + DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__)); + err = BCME_UNSUPPORTED; + goto exit; + } + params_batch = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS].params_batch; +#ifdef GSCAN_SUPPORT + if (_pno_state->pno_mode & DHD_PNO_GSCAN_MODE) { + struct dhd_pno_gscan_params *gscan_params; + gscan_params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS].params_gscan; + gscan_params->reason = reason; + err = dhd_retreive_batch_scan_results(dhd); + if (err == BCME_OK) { + wait_event_interruptible_timeout(_pno_state->batch_get_wait, + is_batch_retrieval_complete(gscan_params), + msecs_to_jiffies(GSCAN_BATCH_GET_MAX_WAIT)); + } + } else +#endif // endif + { + if (!(_pno_state->pno_mode & DHD_PNO_BATCH_MODE)) { + DHD_ERROR(("%s: Batching SCAN mode is not enabled\n", __FUNCTION__)); + memset(pbuf, 0, bufsize); + pbuf += snprintf(pbuf, bufsize, "scancount=%d\n", 0); + snprintf(pbuf, bufsize, "%s", RESULTS_END_MARKER); + err = strlen(buf); + goto exit; + } + params_batch->get_batch.buf = buf; + params_batch->get_batch.bufsize = bufsize; + params_batch->get_batch.reason = reason; + params_batch->get_batch.bytes_written = 0; + schedule_work(&_pno_state->work); + wait_for_completion(&_pno_state->get_batch_done); + } + +#ifdef GSCAN_SUPPORT + if (!(_pno_state->pno_mode & DHD_PNO_GSCAN_MODE)) +#endif // endif + err = params_batch->get_batch.bytes_written; +exit: + return err; +} + +int +dhd_pno_stop_for_batch(dhd_pub_t *dhd) +{ + int err = BCME_OK; + int mode = 0; + int i = 0; + dhd_pno_status_info_t *_pno_state; + dhd_pno_params_t *_params; + wl_pfn_bssid_t *p_pfn_bssid = NULL; + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + _pno_state = PNO_GET_PNOSTATE(dhd); + DHD_PNO(("%s enter\n", __FUNCTION__)); + if (!dhd_support_sta_mode(dhd)) { + err = BCME_BADOPTION; + goto exit; + } + if (!WLS_SUPPORTED(_pno_state)) { + DHD_ERROR(("%s : wifi location service is not supported\n", + __FUNCTION__)); + err = BCME_UNSUPPORTED; + goto exit; + } + +#ifdef GSCAN_SUPPORT + if (_pno_state->pno_mode & DHD_PNO_GSCAN_MODE) { + DHD_PNO(("Gscan is ongoing, nothing to stop here\n")); + return err; + } +#endif // endif + + if (!(_pno_state->pno_mode & DHD_PNO_BATCH_MODE)) { + DHD_ERROR(("%s : PNO BATCH MODE is not enabled\n", __FUNCTION__)); + goto exit; + } + _pno_state->pno_mode &= ~DHD_PNO_BATCH_MODE; + if (_pno_state->pno_mode & (DHD_PNO_LEGACY_MODE | DHD_PNO_HOTLIST_MODE)) { + mode = _pno_state->pno_mode; + err = dhd_pno_clean(dhd); + if (err < 0) { + DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n", + __FUNCTION__, err)); + goto exit; + } + + _pno_state->pno_mode = mode; + /* restart Legacy PNO if the Legacy PNO is on */ + if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) { + struct dhd_pno_legacy_params *_params_legacy; + _params_legacy = + &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS].params_legacy); + err = dhd_pno_set_legacy_pno(dhd, _params_legacy->scan_fr, + _params_legacy->pno_repeat, + _params_legacy->pno_freq_expo_max, + _params_legacy->chan_list, _params_legacy->nchan); + if (err < 0) { + DHD_ERROR(("%s : failed to restart legacy PNO scan(err: %d)\n", + __FUNCTION__, err)); + goto exit; + } + } else if (_pno_state->pno_mode & DHD_PNO_HOTLIST_MODE) { + struct dhd_pno_bssid *iter, *next; + _params = &(_pno_state->pno_params_arr[INDEX_OF_HOTLIST_PARAMS]); + p_pfn_bssid = (wl_pfn_bssid_t *)MALLOCZ(dhd->osh, + sizeof(wl_pfn_bssid_t) * _params->params_hotlist.nbssid); + if (p_pfn_bssid == NULL) { + DHD_ERROR(("%s : failed to allocate wl_pfn_bssid_t array" + " (count: %d)", + __FUNCTION__, _params->params_hotlist.nbssid)); + err = BCME_ERROR; + _pno_state->pno_mode &= ~DHD_PNO_HOTLIST_MODE; + goto exit; + } + i = 0; + /* convert dhd_pno_bssid to wl_pfn_bssid */ +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif // endif + list_for_each_entry_safe(iter, next, + &_params->params_hotlist.bssid_list, list) { + memcpy(&p_pfn_bssid[i].macaddr, &iter->macaddr, ETHER_ADDR_LEN); + p_pfn_bssid[i].flags = iter->flags; + i++; + } +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif // endif + err = dhd_pno_set_for_hotlist(dhd, p_pfn_bssid, &_params->params_hotlist); + if (err < 0) { + _pno_state->pno_mode &= ~DHD_PNO_HOTLIST_MODE; + DHD_ERROR(("%s : failed to restart hotlist scan(err: %d)\n", + __FUNCTION__, err)); + goto exit; + } + } + } else { + err = dhd_pno_clean(dhd); + if (err < 0) { + DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n", + __FUNCTION__, err)); + goto exit; + } + } +exit: + _params = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS]; + _dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_BATCH_MODE); + MFREE(dhd->osh, p_pfn_bssid, + sizeof(wl_pfn_bssid_t) * _params->params_hotlist.nbssid); + return err; +} + +int +dhd_pno_set_for_hotlist(dhd_pub_t *dhd, wl_pfn_bssid_t *p_pfn_bssid, + struct dhd_pno_hotlist_params *hotlist_params) +{ + int err = BCME_OK; + int i; + uint16 _chan_list[WL_NUMCHANNELS]; + int rem_nchan = 0; + int tot_nchan = 0; + int mode = 0; + dhd_pno_params_t *_params; + dhd_pno_params_t *_params2; + struct dhd_pno_bssid *_pno_bssid; + dhd_pno_status_info_t *_pno_state; + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + NULL_CHECK(hotlist_params, "hotlist_params is NULL", err); + NULL_CHECK(p_pfn_bssid, "p_pfn_bssid is NULL", err); + _pno_state = PNO_GET_PNOSTATE(dhd); + DHD_PNO(("%s enter\n", __FUNCTION__)); + + if (!dhd_support_sta_mode(dhd)) { + err = BCME_BADOPTION; + goto exit; + } + if (!WLS_SUPPORTED(_pno_state)) { + DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__)); + err = BCME_UNSUPPORTED; + goto exit; + } + _params = &_pno_state->pno_params_arr[INDEX_OF_HOTLIST_PARAMS]; + if (!(_pno_state->pno_mode & DHD_PNO_HOTLIST_MODE)) { + _pno_state->pno_mode |= DHD_PNO_HOTLIST_MODE; + err = _dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_HOTLIST_MODE); + if (err < 0) { + DHD_ERROR(("%s : failed to call _dhd_pno_reinitialize_prof\n", + __FUNCTION__)); + goto exit; + } + } + _params->params_batch.nchan = hotlist_params->nchan; + _params->params_batch.scan_fr = hotlist_params->scan_fr; + if (hotlist_params->nchan) + memcpy(_params->params_hotlist.chan_list, hotlist_params->chan_list, + sizeof(_params->params_hotlist.chan_list)); + memset(_chan_list, 0, sizeof(_chan_list)); + + rem_nchan = ARRAYSIZE(hotlist_params->chan_list) - hotlist_params->nchan; + if (hotlist_params->band == WLC_BAND_2G || hotlist_params->band == WLC_BAND_5G) { + /* get a valid channel list based on band B or A */ + err = _dhd_pno_get_channels(dhd, + &_params->params_hotlist.chan_list[hotlist_params->nchan], + &rem_nchan, hotlist_params->band, FALSE); + if (err < 0) { + DHD_ERROR(("%s: failed to get valid channel list(band : %d)\n", + __FUNCTION__, hotlist_params->band)); + goto exit; + } + /* now we need to update nchan because rem_chan has valid channel count */ + _params->params_hotlist.nchan += rem_nchan; + /* need to sort channel list */ + sort(_params->params_hotlist.chan_list, _params->params_hotlist.nchan, + sizeof(_params->params_hotlist.chan_list[0]), _dhd_pno_cmpfunc, NULL); + } +#ifdef PNO_DEBUG +{ + int i; + DHD_PNO(("Channel list : ")); + for (i = 0; i < _params->params_batch.nchan; i++) { + DHD_PNO(("%d ", _params->params_batch.chan_list[i])); + } + DHD_PNO(("\n")); +} +#endif // endif + if (_params->params_hotlist.nchan) { + /* copy the channel list into local array */ + memcpy(_chan_list, _params->params_hotlist.chan_list, + sizeof(_chan_list)); + tot_nchan = _params->params_hotlist.nchan; + } + if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) { + DHD_PNO(("PNO SSID is on progress in firmware\n")); + /* store current pno_mode before disabling pno */ + mode = _pno_state->pno_mode; + err = _dhd_pno_enable(dhd, PNO_OFF); + if (err < 0) { + DHD_ERROR(("%s : failed to disable PNO\n", __FUNCTION__)); + goto exit; + } + /* restore the previous mode */ + _pno_state->pno_mode = mode; + /* Use the superset for channelist between two mode */ + _params2 = &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS]); + if (_params2->params_legacy.nchan > 0 && + _params->params_hotlist.nchan > 0) { + err = _dhd_pno_chan_merge(_chan_list, &tot_nchan, + &_params2->params_legacy.chan_list[0], + _params2->params_legacy.nchan, + &_params->params_hotlist.chan_list[0], + _params->params_hotlist.nchan); + if (err < 0) { + DHD_ERROR(("%s : failed to merge channel list" + "between legacy and hotlist\n", + __FUNCTION__)); + goto exit; + } + } + + } + + INIT_LIST_HEAD(&(_params->params_hotlist.bssid_list)); + + err = _dhd_pno_add_bssid(dhd, p_pfn_bssid, hotlist_params->nbssid); + if (err < 0) { + DHD_ERROR(("%s : failed to call _dhd_pno_add_bssid(err :%d)\n", + __FUNCTION__, err)); + goto exit; + } + if ((err = _dhd_pno_set(dhd, _params, DHD_PNO_HOTLIST_MODE)) < 0) { + DHD_ERROR(("%s : failed to set call pno_set (err %d) in firmware\n", + __FUNCTION__, err)); + goto exit; + } + if (tot_nchan > 0) { + if ((err = _dhd_pno_cfg(dhd, _chan_list, tot_nchan)) < 0) { + DHD_ERROR(("%s : failed to set call pno_cfg (err %d) in firmware\n", + __FUNCTION__, err)); + goto exit; + } + } + for (i = 0; i < hotlist_params->nbssid; i++) { + _pno_bssid = (struct dhd_pno_bssid *)MALLOCZ(dhd->osh, + sizeof(struct dhd_pno_bssid)); + NULL_CHECK(_pno_bssid, "_pfn_bssid is NULL", err); + memcpy(&_pno_bssid->macaddr, &p_pfn_bssid[i].macaddr, ETHER_ADDR_LEN); + _pno_bssid->flags = p_pfn_bssid[i].flags; + list_add_tail(&_pno_bssid->list, &_params->params_hotlist.bssid_list); + } + _params->params_hotlist.nbssid = hotlist_params->nbssid; + if (_pno_state->pno_status == DHD_PNO_DISABLED) { + if ((err = _dhd_pno_enable(dhd, PNO_ON)) < 0) + DHD_ERROR(("%s : failed to enable PNO\n", __FUNCTION__)); + } +exit: + /* clear mode in case of error */ + if (err < 0) + _pno_state->pno_mode &= ~DHD_PNO_HOTLIST_MODE; + return err; +} + +int +dhd_pno_stop_for_hotlist(dhd_pub_t *dhd) +{ + int err = BCME_OK; + uint32 mode = 0; + dhd_pno_status_info_t *_pno_state; + dhd_pno_params_t *_params; + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + _pno_state = PNO_GET_PNOSTATE(dhd); + + if (!WLS_SUPPORTED(_pno_state)) { + DHD_ERROR(("%s : wifi location service is not supported\n", + __FUNCTION__)); + err = BCME_UNSUPPORTED; + goto exit; + } + + if (!(_pno_state->pno_mode & DHD_PNO_HOTLIST_MODE)) { + DHD_ERROR(("%s : Hotlist MODE is not enabled\n", + __FUNCTION__)); + goto exit; + } + _pno_state->pno_mode &= ~DHD_PNO_BATCH_MODE; + + if (_pno_state->pno_mode & (DHD_PNO_LEGACY_MODE | DHD_PNO_BATCH_MODE)) { + /* retrieve the batching data from firmware into host */ + dhd_pno_get_for_batch(dhd, NULL, 0, PNO_STATUS_DISABLE); + /* save current pno_mode before calling dhd_pno_clean */ + mode = _pno_state->pno_mode; + err = dhd_pno_clean(dhd); + if (err < 0) { + DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n", + __FUNCTION__, err)); + goto exit; + } + /* restore previos pno mode */ + _pno_state->pno_mode = mode; + if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) { + /* restart Legacy PNO Scan */ + struct dhd_pno_legacy_params *_params_legacy; + _params_legacy = + &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS].params_legacy); + err = dhd_pno_set_legacy_pno(dhd, _params_legacy->scan_fr, + _params_legacy->pno_repeat, _params_legacy->pno_freq_expo_max, + _params_legacy->chan_list, _params_legacy->nchan); + if (err < 0) { + DHD_ERROR(("%s : failed to restart legacy PNO scan(err: %d)\n", + __FUNCTION__, err)); + goto exit; + } + } else if (_pno_state->pno_mode & DHD_PNO_BATCH_MODE) { + /* restart Batching Scan */ + _params = &(_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS]); + /* restart BATCH SCAN */ + err = dhd_pno_set_for_batch(dhd, &_params->params_batch); + if (err < 0) { + _pno_state->pno_mode &= ~DHD_PNO_BATCH_MODE; + DHD_ERROR(("%s : failed to restart batch scan(err: %d)\n", + __FUNCTION__, err)); + goto exit; + } + } + } else { + err = dhd_pno_clean(dhd); + if (err < 0) { + DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n", + __FUNCTION__, err)); + goto exit; + } + } +exit: + return err; +} + +#ifdef GSCAN_SUPPORT +int +dhd_retreive_batch_scan_results(dhd_pub_t *dhd) +{ + int err = BCME_OK; + dhd_pno_status_info_t *_pno_state; + dhd_pno_params_t *_params; + struct dhd_pno_batch_params *params_batch; + + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + _pno_state = PNO_GET_PNOSTATE(dhd); + _params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS]; + + params_batch = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS].params_batch; + if (_params->params_gscan.get_batch_flag == GSCAN_BATCH_RETRIEVAL_COMPLETE) { + DHD_PNO(("Retreive batch results\n")); + params_batch->get_batch.buf = NULL; + params_batch->get_batch.bufsize = 0; + params_batch->get_batch.reason = PNO_STATUS_EVENT; + _params->params_gscan.get_batch_flag = GSCAN_BATCH_RETRIEVAL_IN_PROGRESS; + smp_wmb(); + schedule_work(&_pno_state->work); + } else { + DHD_PNO(("%s : WLC_E_PFN_BEST_BATCHING retrieval" + "already in progress, will skip\n", __FUNCTION__)); + err = BCME_ERROR; + } + + return err; +} + +void +dhd_gscan_hotlist_cache_cleanup(dhd_pub_t *dhd, hotlist_type_t type) +{ + dhd_pno_status_info_t *_pno_state = PNO_GET_PNOSTATE(dhd); + struct dhd_pno_gscan_params *gscan_params; + gscan_results_cache_t *iter, *tmp; + + if (!_pno_state) { + return; + } + gscan_params = &(_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS].params_gscan); + + if (type == HOTLIST_FOUND) { + iter = gscan_params->gscan_hotlist_found; + gscan_params->gscan_hotlist_found = NULL; + } else { + iter = gscan_params->gscan_hotlist_lost; + gscan_params->gscan_hotlist_lost = NULL; + } + + while (iter) { + tmp = iter->next; + MFREE(dhd->osh, iter, + ((iter->tot_count - 1) * sizeof(wifi_gscan_result_t)) + + sizeof(gscan_results_cache_t)); + iter = tmp; + } + + return; +} + +void * +dhd_process_full_gscan_result(dhd_pub_t *dhd, const void *data, uint32 len, int *size) +{ + wl_bss_info_t *bi = NULL; + wl_gscan_result_t *gscan_result; + wifi_gscan_full_result_t *result = NULL; + u32 bi_length = 0; + uint8 channel; + uint32 mem_needed; + struct timespec ts; + u32 bi_ie_length = 0; + u32 bi_ie_offset = 0; + + *size = 0; +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif // endif + gscan_result = (wl_gscan_result_t *)data; +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif // endif + if (!gscan_result) { + DHD_ERROR(("Invalid gscan result (NULL pointer)\n")); + goto exit; + } + + if ((len < sizeof(*gscan_result)) || + (len < dtoh32(gscan_result->buflen)) || + (dtoh32(gscan_result->buflen) > + (sizeof(*gscan_result) + WL_SCAN_IE_LEN_MAX))) { + DHD_ERROR(("%s: invalid gscan buflen:%u\n", __FUNCTION__, + dtoh32(gscan_result->buflen))); + goto exit; + } + + bi = &gscan_result->bss_info[0].info; + bi_length = dtoh32(bi->length); + if (bi_length != (dtoh32(gscan_result->buflen) - + WL_GSCAN_RESULTS_FIXED_SIZE - WL_GSCAN_INFO_FIXED_FIELD_SIZE)) { + DHD_ERROR(("Invalid bss_info length %d: ignoring\n", bi_length)); + goto exit; + } + bi_ie_offset = dtoh32(bi->ie_offset); + bi_ie_length = dtoh32(bi->ie_length); + if ((bi_ie_offset + bi_ie_length) > bi_length) { + DHD_ERROR(("%s: Invalid ie_length:%u or ie_offset:%u\n", + __FUNCTION__, bi_ie_length, bi_ie_offset)); + goto exit; + } + if (bi->SSID_len > DOT11_MAX_SSID_LEN) { + DHD_ERROR(("%s: Invalid SSID length:%u\n", __FUNCTION__, bi->SSID_len)); + goto exit; + } + + mem_needed = OFFSETOF(wifi_gscan_full_result_t, ie_data) + bi->ie_length; + result = (wifi_gscan_full_result_t *)MALLOC(dhd->osh, mem_needed); + if (!result) { + DHD_ERROR(("%s Cannot malloc scan result buffer %d bytes\n", + __FUNCTION__, mem_needed)); + goto exit; + } + + result->scan_ch_bucket = gscan_result->scan_ch_bucket; + memcpy(result->fixed.ssid, bi->SSID, bi->SSID_len); + result->fixed.ssid[bi->SSID_len] = '\0'; + channel = wf_chspec_ctlchan(bi->chanspec); + result->fixed.channel = wf_channel2mhz(channel, + (channel <= CH_MAX_2G_CHANNEL? + WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G)); + result->fixed.rssi = (int32) bi->RSSI; + result->fixed.rtt = 0; + result->fixed.rtt_sd = 0; + get_monotonic_boottime(&ts); + result->fixed.ts = (uint64) TIMESPEC_TO_US(ts); + result->fixed.beacon_period = dtoh16(bi->beacon_period); + result->fixed.capability = dtoh16(bi->capability); + result->ie_length = bi_ie_length; + memcpy(&result->fixed.macaddr, &bi->BSSID, ETHER_ADDR_LEN); + memcpy(result->ie_data, ((uint8 *)bi + bi_ie_offset), bi_ie_length); + *size = mem_needed; +exit: + return result; +} + +void * +dhd_pno_process_epno_result(dhd_pub_t *dhd, const void *data, uint32 event, int *size) +{ + dhd_epno_results_t *results = NULL; + dhd_pno_status_info_t *_pno_state = PNO_GET_PNOSTATE(dhd); + struct dhd_pno_gscan_params *gscan_params; + uint32 count, mem_needed = 0, i; + uint8 ssid[DOT11_MAX_SSID_LEN + 1]; + struct ether_addr *bssid; + + *size = 0; + if (!_pno_state) + return NULL; + gscan_params = &(_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS].params_gscan); + + if (event == WLC_E_PFN_NET_FOUND || event == WLC_E_PFN_NET_LOST) { +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif // endif + wl_pfn_scanresults_v1_t *pfn_result = (wl_pfn_scanresults_v1_t *)data; + wl_pfn_scanresults_v2_t *pfn_result_v2 = (wl_pfn_scanresults_v2_t *)data; + wl_pfn_net_info_v1_t *net; + wl_pfn_net_info_v2_t *net_v2; +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif // endif + if (pfn_result->version == PFN_SCANRESULT_VERSION_V1) { + /* Check if count of pfn results is corrupted */ + if (pfn_result->count > EVENT_MAX_NETCNT_V1) { + DHD_ERROR(("%s event %d: pfn results count %d" + "exceeds the max limit\n", + __FUNCTION__, event, pfn_result->count)); + return NULL; + } + count = pfn_result->count; + mem_needed = sizeof(dhd_epno_results_t) * count; + results = (dhd_epno_results_t *)MALLOC(dhd->osh, mem_needed); + if (!results) { + DHD_ERROR(("%s: Can't malloc %d bytes for results\n", __FUNCTION__, + mem_needed)); + return NULL; + } + for (i = 0; i < count; i++) { + net = &pfn_result->netinfo[i]; + results[i].rssi = net->RSSI; + results[i].channel = wf_channel2mhz(net->pfnsubnet.channel, + (net->pfnsubnet.channel <= CH_MAX_2G_CHANNEL ? + WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G)); + results[i].flags = (event == WLC_E_PFN_NET_FOUND) ? + WL_PFN_SSID_EXT_FOUND: WL_PFN_SSID_EXT_LOST; + results[i].ssid_len = min(net->pfnsubnet.SSID_len, + (uint8)DOT11_MAX_SSID_LEN); + bssid = &results[i].bssid; + memcpy(bssid, &net->pfnsubnet.BSSID, ETHER_ADDR_LEN); + if (!net->pfnsubnet.SSID_len) { + DHD_ERROR(("%s: Gscan results indexing is not" + " supported in version 1 \n", __FUNCTION__)); + MFREE(dhd->osh, results, mem_needed); + return NULL; + } else { + memcpy(results[i].ssid, net->pfnsubnet.SSID, + results[i].ssid_len); + } + memcpy(ssid, results[i].ssid, results[i].ssid_len); + ssid[results[i].ssid_len] = '\0'; + DHD_PNO(("ssid - %s bssid "MACDBG" ch %d rssi %d flags %d\n", + ssid, MAC2STRDBG(bssid->octet), results[i].channel, + results[i].rssi, results[i].flags)); + } + } else if (pfn_result_v2->version == PFN_SCANRESULT_VERSION_V2) { + /* Check if count of pfn results is corrupted */ + if (pfn_result_v2->count > EVENT_MAX_NETCNT_V2) { + DHD_ERROR(("%s event %d: pfn results count %d" + "exceeds the max limit\n", + __FUNCTION__, event, pfn_result_v2->count)); + return NULL; + } + count = pfn_result_v2->count; + mem_needed = sizeof(dhd_epno_results_t) * count; + results = (dhd_epno_results_t *)MALLOC(dhd->osh, mem_needed); + if (!results) { + DHD_ERROR(("%s: Can't malloc %d bytes for results\n", __FUNCTION__, + mem_needed)); + return NULL; + } + for (i = 0; i < count; i++) { + net_v2 = &pfn_result_v2->netinfo[i]; + results[i].rssi = net_v2->RSSI; + results[i].channel = wf_channel2mhz(net_v2->pfnsubnet.channel, + (net_v2->pfnsubnet.channel <= CH_MAX_2G_CHANNEL ? + WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G)); + results[i].flags = (event == WLC_E_PFN_NET_FOUND) ? + WL_PFN_SSID_EXT_FOUND: WL_PFN_SSID_EXT_LOST; + results[i].ssid_len = min(net_v2->pfnsubnet.SSID_len, + (uint8)DOT11_MAX_SSID_LEN); + bssid = &results[i].bssid; + memcpy(bssid, &net_v2->pfnsubnet.BSSID, ETHER_ADDR_LEN); + if (!net_v2->pfnsubnet.SSID_len) { + dhd_pno_idx_to_ssid(gscan_params, &results[i], + net_v2->pfnsubnet.u.index); + } else { + memcpy(results[i].ssid, net_v2->pfnsubnet.u.SSID, + results[i].ssid_len); + } + memcpy(ssid, results[i].ssid, results[i].ssid_len); + ssid[results[i].ssid_len] = '\0'; + DHD_PNO(("ssid - %s bssid "MACDBG" ch %d rssi %d flags %d\n", + ssid, MAC2STRDBG(bssid->octet), results[i].channel, + results[i].rssi, results[i].flags)); + } + } else { + DHD_ERROR(("%s event %d: Incorrect version %d , not supported\n", + __FUNCTION__, event, pfn_result->version)); + return NULL; + } + } + *size = mem_needed; + return results; +} + +void * +dhd_handle_hotlist_scan_evt(dhd_pub_t *dhd, const void *event_data, + int *send_evt_bytes, hotlist_type_t type, u32 *buf_len) +{ + void *ptr = NULL; + dhd_pno_status_info_t *_pno_state = PNO_GET_PNOSTATE(dhd); + struct dhd_pno_gscan_params *gscan_params; +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif // endif + wl_pfn_scanresults_v1_t *results_v1 = (wl_pfn_scanresults_v1_t *)event_data; + wl_pfn_scanresults_v2_t *results_v2 = (wl_pfn_scanresults_v2_t *)event_data; +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif // endif + wifi_gscan_result_t *hotlist_found_array; + wl_pfn_net_info_v1_t *pnetinfo; + wl_pfn_net_info_v2_t *pnetinfo_v2; + gscan_results_cache_t *gscan_hotlist_cache; + uint32 malloc_size = 0, i, total = 0; + struct timespec tm_spec; + uint16 fwstatus; + uint16 fwcount; + + /* Static asserts in _dhd_pno_get_for_batch() above guarantee the v1 and v2 + * net_info and subnet_info structures are compatible in size and SSID offset, + * allowing v1 to be safely used in the code below except for lscanresults + * fields themselves (status, count, offset to netinfo). + */ + + *buf_len = 0; + if (results_v1->version == PFN_SCANRESULTS_VERSION_V1) { + fwstatus = results_v1->status; + fwcount = results_v1->count; + pnetinfo = &results_v1->netinfo[0]; + + gscan_params = &(_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS].params_gscan); + + if (!fwcount || (fwcount > EVENT_MAX_NETCNT_V1)) { + DHD_ERROR(("%s: wrong v1 fwcount:%d\n", __FUNCTION__, fwcount)); + *send_evt_bytes = 0; + return ptr; + } + + get_monotonic_boottime(&tm_spec); + malloc_size = sizeof(gscan_results_cache_t) + + ((fwcount - 1) * sizeof(wifi_gscan_result_t)); + gscan_hotlist_cache = (gscan_results_cache_t *)MALLOC(dhd->osh, malloc_size); + if (!gscan_hotlist_cache) { + DHD_ERROR(("%s Cannot Malloc %d bytes!!\n", __FUNCTION__, malloc_size)); + *send_evt_bytes = 0; + return ptr; + } + + *buf_len = malloc_size; + if (type == HOTLIST_FOUND) { + gscan_hotlist_cache->next = gscan_params->gscan_hotlist_found; + gscan_params->gscan_hotlist_found = gscan_hotlist_cache; + DHD_PNO(("%s enter, FOUND results count %d\n", __FUNCTION__, fwcount)); + } else { + gscan_hotlist_cache->next = gscan_params->gscan_hotlist_lost; + gscan_params->gscan_hotlist_lost = gscan_hotlist_cache; + DHD_PNO(("%s enter, LOST results count %d\n", __FUNCTION__, fwcount)); + } + + gscan_hotlist_cache->tot_count = fwcount; + gscan_hotlist_cache->tot_consumed = 0; + + for (i = 0; i < fwcount; i++, pnetinfo++) { + hotlist_found_array = &gscan_hotlist_cache->results[i]; + memset(hotlist_found_array, 0, sizeof(wifi_gscan_result_t)); + hotlist_found_array->channel = wf_channel2mhz(pnetinfo->pfnsubnet.channel, + (pnetinfo->pfnsubnet.channel <= CH_MAX_2G_CHANNEL? + WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G)); + hotlist_found_array->rssi = (int32) pnetinfo->RSSI; + + hotlist_found_array->ts = + convert_fw_rel_time_to_systime(&tm_spec, + (pnetinfo->timestamp * 1000)); + if (pnetinfo->pfnsubnet.SSID_len > DOT11_MAX_SSID_LEN) { + DHD_ERROR(("Invalid SSID length %d: trimming it to max\n", + pnetinfo->pfnsubnet.SSID_len)); + pnetinfo->pfnsubnet.SSID_len = DOT11_MAX_SSID_LEN; + } + memcpy(hotlist_found_array->ssid, pnetinfo->pfnsubnet.SSID, + pnetinfo->pfnsubnet.SSID_len); + hotlist_found_array->ssid[pnetinfo->pfnsubnet.SSID_len] = '\0'; + + memcpy(&hotlist_found_array->macaddr, &pnetinfo->pfnsubnet.BSSID, + ETHER_ADDR_LEN); + DHD_PNO(("\t%s "MACDBG" rssi %d\n", + hotlist_found_array->ssid, + MAC2STRDBG(hotlist_found_array->macaddr.octet), + hotlist_found_array->rssi)); + } + } else if (results_v2->version == PFN_SCANRESULTS_VERSION_V2) { + fwstatus = results_v2->status; + fwcount = results_v2->count; + pnetinfo_v2 = (wl_pfn_net_info_v2_t*)&results_v2->netinfo[0]; + + gscan_params = &(_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS].params_gscan); + + if (!fwcount || (fwcount > EVENT_MAX_NETCNT_V2)) { + DHD_ERROR(("%s: wrong v2 fwcount:%d\n", __FUNCTION__, fwcount)); + *send_evt_bytes = 0; + return ptr; + } + + get_monotonic_boottime(&tm_spec); + malloc_size = sizeof(gscan_results_cache_t) + + ((fwcount - 1) * sizeof(wifi_gscan_result_t)); + gscan_hotlist_cache = + (gscan_results_cache_t *)MALLOC(dhd->osh, malloc_size); + if (!gscan_hotlist_cache) { + DHD_ERROR(("%s Cannot Malloc %d bytes!!\n", __FUNCTION__, malloc_size)); + *send_evt_bytes = 0; + return ptr; + } + *buf_len = malloc_size; + if (type == HOTLIST_FOUND) { + gscan_hotlist_cache->next = gscan_params->gscan_hotlist_found; + gscan_params->gscan_hotlist_found = gscan_hotlist_cache; + DHD_PNO(("%s enter, FOUND results count %d\n", __FUNCTION__, fwcount)); + } else { + gscan_hotlist_cache->next = gscan_params->gscan_hotlist_lost; + gscan_params->gscan_hotlist_lost = gscan_hotlist_cache; + DHD_PNO(("%s enter, LOST results count %d\n", __FUNCTION__, fwcount)); + } + + gscan_hotlist_cache->tot_count = fwcount; + gscan_hotlist_cache->tot_consumed = 0; + gscan_hotlist_cache->scan_ch_bucket = results_v2->scan_ch_bucket; + + for (i = 0; i < fwcount; i++, pnetinfo_v2++) { + hotlist_found_array = &gscan_hotlist_cache->results[i]; + memset(hotlist_found_array, 0, sizeof(wifi_gscan_result_t)); + hotlist_found_array->channel = + wf_channel2mhz(pnetinfo_v2->pfnsubnet.channel, + (pnetinfo_v2->pfnsubnet.channel <= CH_MAX_2G_CHANNEL? + WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G)); + hotlist_found_array->rssi = (int32) pnetinfo_v2->RSSI; + + hotlist_found_array->ts = + convert_fw_rel_time_to_systime(&tm_spec, + (pnetinfo_v2->timestamp * 1000)); + if (pnetinfo_v2->pfnsubnet.SSID_len > DOT11_MAX_SSID_LEN) { + DHD_ERROR(("Invalid SSID length %d: trimming it to max\n", + pnetinfo_v2->pfnsubnet.SSID_len)); + pnetinfo_v2->pfnsubnet.SSID_len = DOT11_MAX_SSID_LEN; + } + memcpy(hotlist_found_array->ssid, pnetinfo_v2->pfnsubnet.u.SSID, + pnetinfo_v2->pfnsubnet.SSID_len); + hotlist_found_array->ssid[pnetinfo_v2->pfnsubnet.SSID_len] = '\0'; + + memcpy(&hotlist_found_array->macaddr, &pnetinfo_v2->pfnsubnet.BSSID, + ETHER_ADDR_LEN); + DHD_PNO(("\t%s "MACDBG" rssi %d\n", + hotlist_found_array->ssid, + MAC2STRDBG(hotlist_found_array->macaddr.octet), + hotlist_found_array->rssi)); + } + } else { + DHD_ERROR(("%s: event version %d not supported\n", + __FUNCTION__, results_v1->version)); + *send_evt_bytes = 0; + return ptr; + } + if (fwstatus == PFN_COMPLETE) { + ptr = (void *) gscan_hotlist_cache; + while (gscan_hotlist_cache) { + total += gscan_hotlist_cache->tot_count; + gscan_hotlist_cache = gscan_hotlist_cache->next; + } + *send_evt_bytes = total * sizeof(wifi_gscan_result_t); + } + + return ptr; +} +#endif /* GSCAN_SUPPORT */ + +int +dhd_pno_event_handler(dhd_pub_t *dhd, wl_event_msg_t *event, void *event_data) +{ + int err = BCME_OK; + uint event_type; + dhd_pno_status_info_t *_pno_state; + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + _pno_state = PNO_GET_PNOSTATE(dhd); + if (!WLS_SUPPORTED(_pno_state)) { + DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__)); + err = BCME_UNSUPPORTED; + goto exit; + } + event_type = ntoh32(event->event_type); + DHD_PNO(("%s enter : event_type :%d\n", __FUNCTION__, event_type)); + switch (event_type) { + case WLC_E_PFN_BSSID_NET_FOUND: + case WLC_E_PFN_BSSID_NET_LOST: + /* TODO : need to implement event logic using generic netlink */ + break; + case WLC_E_PFN_BEST_BATCHING: +#ifndef GSCAN_SUPPORT + { + struct dhd_pno_batch_params *params_batch; + params_batch = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS].params_batch; + if (!waitqueue_active(&_pno_state->get_batch_done.wait)) { + DHD_PNO(("%s : WLC_E_PFN_BEST_BATCHING\n", __FUNCTION__)); + params_batch->get_batch.buf = NULL; + params_batch->get_batch.bufsize = 0; + params_batch->get_batch.reason = PNO_STATUS_EVENT; + schedule_work(&_pno_state->work); + } else + DHD_PNO(("%s : WLC_E_PFN_BEST_BATCHING" + "will skip this event\n", __FUNCTION__)); + break; + } +#else + break; +#endif /* !GSCAN_SUPPORT */ + default: + DHD_ERROR(("unknown event : %d\n", event_type)); + } +exit: + return err; +} + +int dhd_pno_init(dhd_pub_t *dhd) +{ + int err = BCME_OK; + dhd_pno_status_info_t *_pno_state; + char *buf = NULL; + NULL_CHECK(dhd, "dhd is NULL", err); + DHD_PNO(("%s enter\n", __FUNCTION__)); + UNUSED_PARAMETER(_dhd_pno_suspend); + if (dhd->pno_state) + goto exit; + dhd->pno_state = MALLOC(dhd->osh, sizeof(dhd_pno_status_info_t)); + NULL_CHECK(dhd->pno_state, "failed to create dhd_pno_state", err); + memset(dhd->pno_state, 0, sizeof(dhd_pno_status_info_t)); + /* need to check whether current firmware support batching and hotlist scan */ + _pno_state = PNO_GET_PNOSTATE(dhd); + _pno_state->wls_supported = TRUE; + _pno_state->dhd = dhd; + mutex_init(&_pno_state->pno_mutex); + INIT_WORK(&_pno_state->work, _dhd_pno_get_batch_handler); + init_completion(&_pno_state->get_batch_done); +#ifdef GSCAN_SUPPORT + init_waitqueue_head(&_pno_state->batch_get_wait); +#endif /* GSCAN_SUPPORT */ + buf = MALLOC(dhd->osh, WLC_IOCTL_SMLEN); + if (!buf) { + DHD_ERROR((":%s buf alloc err.\n", __FUNCTION__)); + return BCME_NOMEM; + } + err = dhd_iovar(dhd, 0, "pfnlbest", NULL, 0, buf, WLC_IOCTL_SMLEN, + FALSE); + if (err == BCME_UNSUPPORTED) { + _pno_state->wls_supported = FALSE; + DHD_INFO(("Current firmware doesn't support" + " Android Location Service\n")); + } else { + DHD_ERROR(("%s: Support Android Location Service\n", + __FUNCTION__)); + } +exit: + MFREE(dhd->osh, buf, WLC_IOCTL_SMLEN); + return err; +} + +int dhd_pno_deinit(dhd_pub_t *dhd) +{ + int err = BCME_OK; + dhd_pno_status_info_t *_pno_state; + dhd_pno_params_t *_params; + NULL_CHECK(dhd, "dhd is NULL", err); + + DHD_PNO(("%s enter\n", __FUNCTION__)); + _pno_state = PNO_GET_PNOSTATE(dhd); + NULL_CHECK(_pno_state, "pno_state is NULL", err); + /* may need to free legacy ssid_list */ + if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) { + _params = &_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS]; + _dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_LEGACY_MODE); + } + +#ifdef GSCAN_SUPPORT + if (_pno_state->pno_mode & DHD_PNO_GSCAN_MODE) { + _params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS]; + mutex_lock(&_pno_state->pno_mutex); + dhd_pno_reset_cfg_gscan(dhd, _params, _pno_state, GSCAN_FLUSH_ALL_CFG); + mutex_unlock(&_pno_state->pno_mutex); + } +#endif /* GSCAN_SUPPORT */ + + if (_pno_state->pno_mode & DHD_PNO_BATCH_MODE) { + _params = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS]; + /* clear resource if the BATCH MODE is on */ + _dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_BATCH_MODE); + } + cancel_work_sync(&_pno_state->work); + MFREE(dhd->osh, _pno_state, sizeof(dhd_pno_status_info_t)); + dhd->pno_state = NULL; + return err; +} +#endif /* PNO_SUPPORT */ diff --git a/bcmdhd.100.10.315.x/dhd_pno.h b/bcmdhd.100.10.315.x/dhd_pno.h new file mode 100644 index 0000000..66c41b6 --- /dev/null +++ b/bcmdhd.100.10.315.x/dhd_pno.h @@ -0,0 +1,572 @@ +/* + * Header file of Broadcom Dongle Host Driver (DHD) + * Prefered Network Offload code and Wi-Fi Location Service(WLS) code. + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_pno.h 722186 2017-09-19 07:03:42Z $ + */ + +#ifndef __DHD_PNO_H__ +#define __DHD_PNO_H__ + +#if defined(PNO_SUPPORT) +#define PNO_TLV_PREFIX 'S' +#define PNO_TLV_VERSION '1' +#define PNO_TLV_SUBTYPE_LEGACY_PNO '2' +#define PNO_TLV_RESERVED '0' + +#define PNO_BATCHING_SET "SET" +#define PNO_BATCHING_GET "GET" +#define PNO_BATCHING_STOP "STOP" + +#define PNO_PARAMS_DELIMETER " " +#define PNO_PARAM_CHANNEL_DELIMETER "," +#define PNO_PARAM_VALUE_DELLIMETER '=' +#define PNO_PARAM_SCANFREQ "SCANFREQ" +#define PNO_PARAM_BESTN "BESTN" +#define PNO_PARAM_MSCAN "MSCAN" +#define PNO_PARAM_CHANNEL "CHANNEL" +#define PNO_PARAM_RTT "RTT" + +#define PNO_TLV_TYPE_SSID_IE 'S' +#define PNO_TLV_TYPE_TIME 'T' +#define PNO_TLV_FREQ_REPEAT 'R' +#define PNO_TLV_FREQ_EXPO_MAX 'M' + +#define MAXNUM_SSID_PER_ADD 16 +#define MAXNUM_PNO_PARAMS 2 +#define PNO_TLV_COMMON_LENGTH 1 +#define DEFAULT_BATCH_MSCAN 16 + +#define RESULTS_END_MARKER "----\n" +#define SCAN_END_MARKER "####\n" +#define AP_END_MARKER "====\n" +#define PNO_RSSI_MARGIN_DBM 30 + +#define CSCAN_COMMAND "CSCAN " +#define CSCAN_TLV_PREFIX 'S' +#define CSCAN_TLV_VERSION 1 +#define CSCAN_TLV_SUBVERSION 0 +#define CSCAN_TLV_TYPE_SSID_IE 'S' +#define CSCAN_TLV_TYPE_CHANNEL_IE 'C' +#define CSCAN_TLV_TYPE_NPROBE_IE 'N' +#define CSCAN_TLV_TYPE_ACTIVE_IE 'A' +#define CSCAN_TLV_TYPE_PASSIVE_IE 'P' +#define CSCAN_TLV_TYPE_HOME_IE 'H' +#define CSCAN_TLV_TYPE_STYPE_IE 'T' + +#define WL_SCAN_PARAMS_SSID_MAX 10 +#define GET_SSID "SSID=" +#define GET_CHANNEL "CH=" +#define GET_NPROBE "NPROBE=" +#define GET_ACTIVE_ASSOC_DWELL "ACTIVE=" +#define GET_PASSIVE_ASSOC_DWELL "PASSIVE=" +#define GET_HOME_DWELL "HOME=" +#define GET_SCAN_TYPE "TYPE=" + +#if defined(GSCAN_SUPPORT) || defined(DHD_GET_VALID_CHANNELS) +#define GSCAN_MAX_CH_BUCKETS 8 +#define GSCAN_MAX_CHANNELS_IN_BUCKET 32 +#define GSCAN_MAX_AP_CACHE_PER_SCAN 32 +#define GSCAN_MAX_AP_CACHE 320 +#define GSCAN_BG_BAND_MASK (1 << 0) +#define GSCAN_A_BAND_MASK (1 << 1) +#define GSCAN_DFS_MASK (1 << 2) +#define GSCAN_ABG_BAND_MASK (GSCAN_A_BAND_MASK | GSCAN_BG_BAND_MASK) +#define GSCAN_BAND_MASK (GSCAN_ABG_BAND_MASK | GSCAN_DFS_MASK) + +#define GSCAN_FLUSH_HOTLIST_CFG (1 << 0) +#define GSCAN_FLUSH_SIGNIFICANT_CFG (1 << 1) +#define GSCAN_FLUSH_SCAN_CFG (1 << 2) +#define GSCAN_FLUSH_EPNO_CFG (1 << 3) +#define GSCAN_FLUSH_ALL_CFG (GSCAN_FLUSH_SCAN_CFG | \ + GSCAN_FLUSH_SIGNIFICANT_CFG | \ + GSCAN_FLUSH_HOTLIST_CFG | \ + GSCAN_FLUSH_EPNO_CFG) +#define DHD_EPNO_HIDDEN_SSID (1 << 0) +#define DHD_EPNO_A_BAND_TRIG (1 << 1) +#define DHD_EPNO_BG_BAND_TRIG (1 << 2) +#define DHD_EPNO_STRICT_MATCH (1 << 3) +#define DHD_EPNO_SAME_NETWORK (1 << 4) +#define DHD_PNO_USE_SSID (DHD_EPNO_HIDDEN_SSID | DHD_EPNO_STRICT_MATCH) + +/* Do not change GSCAN_BATCH_RETRIEVAL_COMPLETE */ +#define GSCAN_BATCH_RETRIEVAL_COMPLETE 0 +#define GSCAN_BATCH_RETRIEVAL_IN_PROGRESS 1 +#define GSCAN_BATCH_NO_THR_SET 101 +#define GSCAN_LOST_AP_WINDOW_DEFAULT 4 +#define GSCAN_MIN_BSSID_TIMEOUT 90 +#define GSCAN_BATCH_GET_MAX_WAIT 500 +#define CHANNEL_BUCKET_EMPTY_INDEX 0xFFFF +#define GSCAN_RETRY_THRESHOLD 3 + +#define MAX_EPNO_SSID_NUM 64 +#endif /* GSCAN_SUPPORT || DHD_GET_VALID_CHANNELS */ + +enum scan_status { + /* SCAN ABORT by other scan */ + PNO_STATUS_ABORT, + /* RTT is presence or not */ + PNO_STATUS_RTT_PRESENCE, + /* Disable PNO by Driver */ + PNO_STATUS_DISABLE, + /* NORMAL BATCHING GET */ + PNO_STATUS_NORMAL, + /* WLC_E_PFN_BEST_BATCHING */ + PNO_STATUS_EVENT, + PNO_STATUS_MAX +}; +#define PNO_STATUS_ABORT_MASK 0x0001 +#define PNO_STATUS_RTT_MASK 0x0002 +#define PNO_STATUS_DISABLE_MASK 0x0004 +#define PNO_STATUS_OOM_MASK 0x0010 + +enum index_mode { + INDEX_OF_LEGACY_PARAMS, + INDEX_OF_BATCH_PARAMS, + INDEX_OF_HOTLIST_PARAMS, + /* GSCAN includes hotlist scan and they do not run + * independent of each other + */ + INDEX_OF_GSCAN_PARAMS = INDEX_OF_HOTLIST_PARAMS, + INDEX_MODE_MAX +}; +enum dhd_pno_status { + DHD_PNO_DISABLED, + DHD_PNO_ENABLED, + DHD_PNO_SUSPEND +}; +typedef struct cmd_tlv { + char prefix; + char version; + char subtype; + char reserved; +} cmd_tlv_t; +#if defined(GSCAN_SUPPORT) || defined(DHD_GET_VALID_CHANNELS) +typedef enum { + WIFI_BAND_UNSPECIFIED, + WIFI_BAND_BG = 1, /* 2.4 GHz */ + WIFI_BAND_A = 2, /* 5 GHz without DFS */ + WIFI_BAND_A_DFS = 4, /* 5 GHz DFS only */ + WIFI_BAND_A_WITH_DFS = 6, /* 5 GHz with DFS */ + WIFI_BAND_ABG = 3, /* 2.4 GHz + 5 GHz; no DFS */ + WIFI_BAND_ABG_WITH_DFS = 7, /* 2.4 GHz + 5 GHz with DFS */ +} gscan_wifi_band_t; + +typedef enum { + HOTLIST_LOST, + HOTLIST_FOUND +} hotlist_type_t; + +typedef enum dhd_pno_gscan_cmd_cfg { + DHD_PNO_BATCH_SCAN_CFG_ID = 0, + DHD_PNO_GEOFENCE_SCAN_CFG_ID, + DHD_PNO_SIGNIFICANT_SCAN_CFG_ID, + DHD_PNO_SCAN_CFG_ID, + DHD_PNO_GET_CAPABILITIES, + DHD_PNO_GET_BATCH_RESULTS, + DHD_PNO_GET_CHANNEL_LIST, + DHD_PNO_GET_NEW_EPNO_SSID_ELEM, + DHD_PNO_EPNO_CFG_ID, + DHD_PNO_GET_AUTOJOIN_CAPABILITIES, + DHD_PNO_EPNO_PARAMS_ID +} dhd_pno_gscan_cmd_cfg_t; + +typedef enum dhd_pno_mode { + /* Wi-Fi Legacy PNO Mode */ + DHD_PNO_NONE_MODE = 0, + DHD_PNO_LEGACY_MODE = (1 << (0)), + /* Wi-Fi Android BATCH SCAN Mode */ + DHD_PNO_BATCH_MODE = (1 << (1)), + /* Wi-Fi Android Hotlist SCAN Mode */ + DHD_PNO_HOTLIST_MODE = (1 << (2)), + /* Wi-Fi Google Android SCAN Mode */ + DHD_PNO_GSCAN_MODE = (1 << (3)) +} dhd_pno_mode_t; +#else +typedef enum dhd_pno_mode { + /* Wi-Fi Legacy PNO Mode */ + DHD_PNO_NONE_MODE = 0, + DHD_PNO_LEGACY_MODE = (1 << (0)), + /* Wi-Fi Android BATCH SCAN Mode */ + DHD_PNO_BATCH_MODE = (1 << (1)), + /* Wi-Fi Android Hotlist SCAN Mode */ + DHD_PNO_HOTLIST_MODE = (1 << (2)) +} dhd_pno_mode_t; +#endif /* GSCAN_SUPPORT || DHD_GET_VALID_CHANNELS */ + +typedef struct dhd_pno_ssid { + bool hidden; + int8 rssi_thresh; + uint8 dummy; + uint16 SSID_len; + uint32 flags; + int32 wpa_auth; + uchar SSID[DOT11_MAX_SSID_LEN]; + struct list_head list; +} dhd_pno_ssid_t; + +struct dhd_pno_bssid { + struct ether_addr macaddr; + /* Bit4: suppress_lost, Bit3: suppress_found */ + uint16 flags; + struct list_head list; +}; + +typedef struct dhd_pno_bestnet_entry { + struct ether_addr BSSID; + uint8 SSID_len; + uint8 SSID[DOT11_MAX_SSID_LEN]; + int8 RSSI; + uint8 channel; + uint32 timestamp; + uint16 rtt0; /* distance_cm based on RTT */ + uint16 rtt1; /* distance_cm based on sample standard deviation */ + unsigned long recorded_time; + struct list_head list; +} dhd_pno_bestnet_entry_t; +#define BESTNET_ENTRY_SIZE (sizeof(dhd_pno_bestnet_entry_t)) + +typedef struct dhd_pno_bestnet_header { + struct dhd_pno_bestnet_header *next; + uint8 reason; + uint32 tot_cnt; + uint32 tot_size; + struct list_head entry_list; +} dhd_pno_best_header_t; +#define BEST_HEADER_SIZE (sizeof(dhd_pno_best_header_t)) + +typedef struct dhd_pno_scan_results { + dhd_pno_best_header_t *bestnetheader; + uint8 cnt_header; + struct list_head list; +} dhd_pno_scan_results_t; +#define SCAN_RESULTS_SIZE (sizeof(dhd_pno_scan_results_t)) + +struct dhd_pno_get_batch_info { + /* info related to get batch */ + char *buf; + bool batch_started; + uint32 tot_scan_cnt; + uint32 expired_tot_scan_cnt; + uint32 top_node_cnt; + uint32 bufsize; + uint32 bytes_written; + int reason; + struct list_head scan_results_list; + struct list_head expired_scan_results_list; +}; +struct dhd_pno_legacy_params { + uint16 scan_fr; + uint16 chan_list[WL_NUMCHANNELS]; + uint16 nchan; + int pno_repeat; + int pno_freq_expo_max; + int nssid; + struct list_head ssid_list; +}; +struct dhd_pno_batch_params { + int32 scan_fr; + uint8 bestn; + uint8 mscan; + uint8 band; + uint16 chan_list[WL_NUMCHANNELS]; + uint16 nchan; + uint16 rtt; + struct dhd_pno_get_batch_info get_batch; +}; +struct dhd_pno_hotlist_params { + uint8 band; + int32 scan_fr; + uint16 chan_list[WL_NUMCHANNELS]; + uint16 nchan; + uint16 nbssid; + struct list_head bssid_list; +}; +#if defined(GSCAN_SUPPORT) || defined(DHD_GET_VALID_CHANNELS) +#define DHD_PNO_REPORT_NO_BATCH (1 << 2) + +typedef struct dhd_pno_gscan_channel_bucket { + uint16 bucket_freq_multiple; + /* band = 1 All bg band channels, + * band = 2 All a band channels, + * band = 0 chan_list channels + */ + uint16 band; + uint8 report_flag; + uint8 num_channels; + uint16 repeat; + uint16 bucket_max_multiple; + uint16 chan_list[GSCAN_MAX_CHANNELS_IN_BUCKET]; +} dhd_pno_gscan_channel_bucket_t; + +#define DHD_PNO_AUTH_CODE_OPEN 1 /* Open */ +#define DHD_PNO_AUTH_CODE_PSK 2 /* WPA_PSK or WPA2PSK */ +#define DHD_PNO_AUTH_CODE_EAPOL 4 /* any EAPOL */ + +#define DHD_EPNO_DEFAULT_INDEX 0xFFFFFFFF + +typedef struct dhd_epno_params { + uint8 ssid[DOT11_MAX_SSID_LEN]; + uint8 ssid_len; + int8 rssi_thresh; + uint8 flags; + uint8 auth; + /* index required only for visble ssid */ + uint32 index; + struct list_head list; +} dhd_epno_params_t; + +typedef struct dhd_epno_results { + uint8 ssid[DOT11_MAX_SSID_LEN]; + uint8 ssid_len; + int8 rssi; + uint16 channel; + uint16 flags; + struct ether_addr bssid; +} dhd_epno_results_t; + +typedef struct wifi_gscan_result { + uint64 ts; /* Time of discovery */ + char ssid[DOT11_MAX_SSID_LEN+1]; /* null terminated */ + struct ether_addr macaddr; /* BSSID */ + uint32 channel; /* channel frequency in MHz */ + int32 rssi; /* in db */ + uint64 rtt; /* in nanoseconds */ + uint64 rtt_sd; /* standard deviation in rtt */ + uint16 beacon_period; /* units are Kusec */ + uint16 capability; /* Capability information */ + uint32 pad; +} wifi_gscan_result_t; + +typedef struct wifi_gscan_full_result { + wifi_gscan_result_t fixed; + uint32 scan_ch_bucket; + uint32 ie_length; /* byte length of Information Elements */ + char ie_data[1]; /* IE data to follow */ +} wifi_gscan_full_result_t; + +typedef struct gscan_results_cache { + struct gscan_results_cache *next; + uint8 scan_id; + uint8 flag; + uint8 tot_count; + uint8 tot_consumed; + uint32 scan_ch_bucket; + wifi_gscan_result_t results[1]; +} gscan_results_cache_t; + +typedef struct dhd_pno_gscan_capabilities { + int max_scan_cache_size; + int max_scan_buckets; + int max_ap_cache_per_scan; + int max_rssi_sample_size; + int max_scan_reporting_threshold; + int max_hotlist_aps; + int max_significant_wifi_change_aps; + int max_epno_ssid_crc32; + int max_epno_hidden_ssid; + int max_white_list_ssid; +} dhd_pno_gscan_capabilities_t; + +typedef struct dhd_epno_ssid_cfg { + wl_ssid_ext_params_t params; + uint32 num_epno_ssid; + struct list_head epno_ssid_list; +} dhd_epno_ssid_cfg_t; + +struct dhd_pno_gscan_params { + int32 scan_fr; + uint8 bestn; + uint8 mscan; + uint8 buffer_threshold; + uint8 lost_ap_window; + uint8 nchannel_buckets; + uint8 reason; + uint8 get_batch_flag; + uint8 send_all_results_flag; + uint16 max_ch_bucket_freq; + gscan_results_cache_t *gscan_batch_cache; + gscan_results_cache_t *gscan_hotlist_found; + gscan_results_cache_t*gscan_hotlist_lost; + uint16 nbssid_hotlist; + struct dhd_pno_gscan_channel_bucket channel_bucket[GSCAN_MAX_CH_BUCKETS]; + struct list_head hotlist_bssid_list; + dhd_epno_ssid_cfg_t epno_cfg; + uint32 scan_id; +}; + +typedef struct gscan_scan_params { + int32 scan_fr; + uint16 nchannel_buckets; + struct dhd_pno_gscan_channel_bucket channel_bucket[GSCAN_MAX_CH_BUCKETS]; +} gscan_scan_params_t; + +typedef struct gscan_batch_params { + uint8 bestn; + uint8 mscan; + uint8 buffer_threshold; +} gscan_batch_params_t; + +struct bssid_t { + struct ether_addr macaddr; + int16 rssi_reporting_threshold; /* 0 -> no reporting threshold */ +}; + +typedef struct gscan_hotlist_scan_params { + uint16 lost_ap_window; /* number of scans to declare LOST */ + uint16 nbssid; /* number of bssids */ + struct bssid_t bssid[1]; /* n bssids to follow */ +} gscan_hotlist_scan_params_t; + +#endif /* GSCAN_SUPPORT || DHD_GET_VALID_CHANNELS */ + +typedef union dhd_pno_params { + struct dhd_pno_legacy_params params_legacy; + struct dhd_pno_batch_params params_batch; + struct dhd_pno_hotlist_params params_hotlist; +#if defined(GSCAN_SUPPORT) || defined(DHD_GET_VALID_CHANNELS) + struct dhd_pno_gscan_params params_gscan; +#endif /* GSCAN_SUPPORT || DHD_GET_VALID_CHANNELS */ +} dhd_pno_params_t; + +typedef struct dhd_pno_status_info { + dhd_pub_t *dhd; + struct work_struct work; + struct mutex pno_mutex; +#ifdef GSCAN_SUPPORT + wait_queue_head_t batch_get_wait; +#endif /* GSCAN_SUPPORT */ + struct completion get_batch_done; + bool wls_supported; /* wifi location service supported or not */ + enum dhd_pno_status pno_status; + enum dhd_pno_mode pno_mode; + dhd_pno_params_t pno_params_arr[INDEX_MODE_MAX]; + struct list_head head_list; +} dhd_pno_status_info_t; + +/* wrapper functions */ +extern int +dhd_dev_pno_enable(struct net_device *dev, int enable); + +extern int +dhd_dev_pno_stop_for_ssid(struct net_device *dev); + +extern int +dhd_dev_pno_set_for_ssid(struct net_device *dev, wlc_ssid_ext_t* ssids_local, int nssid, + uint16 scan_fr, int pno_repeat, int pno_freq_expo_max, uint16 *channel_list, int nchan); + +extern int +dhd_dev_pno_set_for_batch(struct net_device *dev, + struct dhd_pno_batch_params *batch_params); + +extern int +dhd_dev_pno_get_for_batch(struct net_device *dev, char *buf, int bufsize); + +extern int +dhd_dev_pno_stop_for_batch(struct net_device *dev); + +extern int +dhd_dev_pno_set_for_hotlist(struct net_device *dev, wl_pfn_bssid_t *p_pfn_bssid, + struct dhd_pno_hotlist_params *hotlist_params); +extern bool dhd_dev_is_legacy_pno_enabled(struct net_device *dev); +#if defined(GSCAN_SUPPORT) || defined(DHD_GET_VALID_CHANNELS) +extern void * +dhd_dev_pno_get_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type, void *info, + uint32 *len); +#endif /* GSCAN_SUPPORT || DHD_GET_VALID_CHANNELS */ +#ifdef GSCAN_SUPPORT +extern int +dhd_dev_pno_set_cfg_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type, + void *buf, bool flush); +int dhd_dev_pno_lock_access_batch_results(struct net_device *dev); +void dhd_dev_pno_unlock_access_batch_results(struct net_device *dev); +extern int dhd_dev_pno_run_gscan(struct net_device *dev, bool run, bool flush); +extern int dhd_dev_pno_enable_full_scan_result(struct net_device *dev, bool real_time); +int dhd_retreive_batch_scan_results(dhd_pub_t *dhd); +extern void * dhd_dev_hotlist_scan_event(struct net_device *dev, + const void *data, int *send_evt_bytes, hotlist_type_t type, u32 *buf_len); +void * dhd_dev_process_full_gscan_result(struct net_device *dev, + const void *data, uint32 len, int *send_evt_bytes); +extern int dhd_dev_gscan_batch_cache_cleanup(struct net_device *dev); +extern void dhd_dev_gscan_hotlist_cache_cleanup(struct net_device *dev, hotlist_type_t type); +extern int dhd_dev_wait_batch_results_complete(struct net_device *dev); +extern void * dhd_dev_process_epno_result(struct net_device *dev, + const void *data, uint32 event, int *send_evt_bytes); +extern int dhd_dev_set_epno(struct net_device *dev); +extern int dhd_dev_flush_fw_epno(struct net_device *dev); +#endif /* GSCAN_SUPPORT */ +/* dhd pno fuctions */ +extern int dhd_pno_stop_for_ssid(dhd_pub_t *dhd); +extern int dhd_pno_enable(dhd_pub_t *dhd, int enable); +extern int dhd_pno_set_for_ssid(dhd_pub_t *dhd, wlc_ssid_ext_t* ssid_list, int nssid, + uint16 scan_fr, int pno_repeat, int pno_freq_expo_max, uint16 *channel_list, int nchan); + +extern int dhd_pno_set_for_batch(dhd_pub_t *dhd, struct dhd_pno_batch_params *batch_params); + +extern int dhd_pno_get_for_batch(dhd_pub_t *dhd, char *buf, int bufsize, int reason); + +extern int dhd_pno_stop_for_batch(dhd_pub_t *dhd); + +extern int dhd_pno_set_for_hotlist(dhd_pub_t *dhd, wl_pfn_bssid_t *p_pfn_bssid, + struct dhd_pno_hotlist_params *hotlist_params); + +extern int dhd_pno_stop_for_hotlist(dhd_pub_t *dhd); + +extern int dhd_pno_event_handler(dhd_pub_t *dhd, wl_event_msg_t *event, void *event_data); +extern int dhd_pno_init(dhd_pub_t *dhd); +extern int dhd_pno_deinit(dhd_pub_t *dhd); +extern bool dhd_is_pno_supported(dhd_pub_t *dhd); +extern bool dhd_is_legacy_pno_enabled(dhd_pub_t *dhd); +#if defined(GSCAN_SUPPORT) || defined(DHD_GET_VALID_CHANNELS) +extern void * dhd_pno_get_gscan(dhd_pub_t *dhd, dhd_pno_gscan_cmd_cfg_t type, void *info, + uint32 *len); +#endif /* GSCAN_SUPPORT || DHD_GET_VALID_CHANNELS */ +#ifdef GSCAN_SUPPORT +extern int dhd_pno_set_cfg_gscan(dhd_pub_t *dhd, dhd_pno_gscan_cmd_cfg_t type, + void *buf, bool flush); +extern int dhd_pno_lock_batch_results(dhd_pub_t *dhd); +extern void dhd_pno_unlock_batch_results(dhd_pub_t *dhd); +extern int dhd_pno_initiate_gscan_request(dhd_pub_t *dhd, bool run, bool flush); +extern int dhd_pno_enable_full_scan_result(dhd_pub_t *dhd, bool real_time_flag); +extern int dhd_pno_cfg_gscan(dhd_pub_t *dhd, dhd_pno_gscan_cmd_cfg_t type, void *buf); +extern int dhd_dev_retrieve_batch_scan(struct net_device *dev); +extern void *dhd_handle_hotlist_scan_evt(dhd_pub_t *dhd, const void *event_data, + int *send_evt_bytes, hotlist_type_t type, u32 *buf_len); +extern void *dhd_process_full_gscan_result(dhd_pub_t *dhd, const void *event_data, + uint32 len, int *send_evt_bytes); +extern int dhd_gscan_batch_cache_cleanup(dhd_pub_t *dhd); +extern void dhd_gscan_hotlist_cache_cleanup(dhd_pub_t *dhd, hotlist_type_t type); +extern int dhd_wait_batch_results_complete(dhd_pub_t *dhd); +extern void * dhd_pno_process_epno_result(dhd_pub_t *dhd, const void *data, + uint32 event, int *size); +extern void dhd_pno_translate_epno_fw_flags(uint32 *flags); +extern int dhd_pno_set_epno(dhd_pub_t *dhd); +extern int dhd_pno_flush_fw_epno(dhd_pub_t *dhd); +extern void dhd_pno_set_epno_auth_flag(uint32 *wpa_auth); +#endif /* GSCAN_SUPPORT */ +#endif // endif + +#endif /* __DHD_PNO_H__ */ diff --git a/bcmdhd.100.10.315.x/dhd_proto.h b/bcmdhd.100.10.315.x/dhd_proto.h new file mode 100644 index 0000000..3724e7f --- /dev/null +++ b/bcmdhd.100.10.315.x/dhd_proto.h @@ -0,0 +1,218 @@ +/* + * Header file describing the internal (inter-module) DHD interfaces. + * + * Provides type definitions and function prototypes used to link the + * DHD OS, bus, and protocol modules. + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_proto.h 769450 2018-06-26 10:16:46Z $ + */ + +#ifndef _dhd_proto_h_ +#define _dhd_proto_h_ + +#include +#include +#ifdef BCMPCIE +#include +#endif // endif + +#define DEFAULT_IOCTL_RESP_TIMEOUT 5000 +#ifndef IOCTL_RESP_TIMEOUT +/* In milli second default value for Production FW */ +#define IOCTL_RESP_TIMEOUT DEFAULT_IOCTL_RESP_TIMEOUT +#endif /* IOCTL_RESP_TIMEOUT */ + +#ifndef MFG_IOCTL_RESP_TIMEOUT +#define MFG_IOCTL_RESP_TIMEOUT 20000 /* In milli second default value for MFG FW */ +#endif /* MFG_IOCTL_RESP_TIMEOUT */ + +#define DEFAULT_D3_ACK_RESP_TIMEOUT 2000 +#ifndef D3_ACK_RESP_TIMEOUT +#define D3_ACK_RESP_TIMEOUT DEFAULT_D3_ACK_RESP_TIMEOUT +#endif /* D3_ACK_RESP_TIMEOUT */ + +#define DEFAULT_DHD_BUS_BUSY_TIMEOUT (IOCTL_RESP_TIMEOUT + 1000) +#ifndef DHD_BUS_BUSY_TIMEOUT +#define DHD_BUS_BUSY_TIMEOUT DEFAULT_DHD_BUS_BUSY_TIMEOUT +#endif /* DEFAULT_DHD_BUS_BUSY_TIMEOUT */ + +#define DS_EXIT_TIMEOUT 1000 /* In ms */ +#define DS_ENTER_TIMEOUT 1000 /* In ms */ + +#define IOCTL_DISABLE_TIMEOUT 0 + +/* + * Exported from the dhd protocol module (dhd_cdc, dhd_rndis) + */ + +/* Linkage, sets prot link and updates hdrlen in pub */ +extern int dhd_prot_attach(dhd_pub_t *dhdp); + +/* Initilizes the index block for dma'ing indices */ +extern int dhd_prot_dma_indx_init(dhd_pub_t *dhdp, uint32 rw_index_sz, + uint8 type, uint32 length); + +/* Unlink, frees allocated protocol memory (including dhd_prot) */ +extern void dhd_prot_detach(dhd_pub_t *dhdp); + +/* Initialize protocol: sync w/dongle state. + * Sets dongle media info (iswl, drv_version, mac address). + */ +extern int dhd_sync_with_dongle(dhd_pub_t *dhdp); + +/* Protocol initialization needed for IOCTL/IOVAR path */ +extern int dhd_prot_init(dhd_pub_t *dhd); + +/* Stop protocol: sync w/dongle state. */ +extern void dhd_prot_stop(dhd_pub_t *dhdp); + +/* Add any protocol-specific data header. + * Caller must reserve prot_hdrlen prepend space. + */ +extern void dhd_prot_hdrpush(dhd_pub_t *, int ifidx, void *txp); +extern uint dhd_prot_hdrlen(dhd_pub_t *, void *txp); + +/* Remove any protocol-specific data header. */ +extern int dhd_prot_hdrpull(dhd_pub_t *, int *ifidx, void *rxp, uchar *buf, uint *len); + +/* Use protocol to issue ioctl to dongle */ +extern int dhd_prot_ioctl(dhd_pub_t *dhd, int ifidx, wl_ioctl_t * ioc, void * buf, int len); + +/* Handles a protocol control response asynchronously */ +extern int dhd_prot_ctl_complete(dhd_pub_t *dhd); + +/* Check for and handle local prot-specific iovar commands */ +extern int dhd_prot_iovar_op(dhd_pub_t *dhdp, const char *name, + void *params, int plen, void *arg, int len, bool set); + +/* Add prot dump output to a buffer */ +extern void dhd_prot_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf); + +/* Dump extended trap data */ +extern int dhd_prot_dump_extended_trap(dhd_pub_t *dhdp, struct bcmstrbuf *b, bool raw); + +/* Update local copy of dongle statistics */ +extern void dhd_prot_dstats(dhd_pub_t *dhdp); + +extern int dhd_ioctl(dhd_pub_t * dhd_pub, dhd_ioctl_t *ioc, void * buf, uint buflen); + +extern int dhd_preinit_ioctls(dhd_pub_t *dhd); + +extern int dhd_process_pkt_reorder_info(dhd_pub_t *dhd, uchar *reorder_info_buf, + uint reorder_info_len, void **pkt, uint32 *free_buf_count); + +#ifdef BCMPCIE +extern bool dhd_prot_process_msgbuf_txcpl(dhd_pub_t *dhd, uint bound); +extern bool dhd_prot_process_msgbuf_rxcpl(dhd_pub_t *dhd, uint bound); +extern bool dhd_prot_process_msgbuf_infocpl(dhd_pub_t *dhd, uint bound); +extern int dhd_prot_process_ctrlbuf(dhd_pub_t * dhd); +extern int dhd_prot_process_trapbuf(dhd_pub_t * dhd); +extern bool dhd_prot_dtohsplit(dhd_pub_t * dhd); +extern int dhd_post_dummy_msg(dhd_pub_t *dhd); +extern int dhdmsgbuf_lpbk_req(dhd_pub_t *dhd, uint len); +extern void dhd_prot_rx_dataoffset(dhd_pub_t *dhd, uint32 offset); +extern int dhd_prot_txdata(dhd_pub_t *dhd, void *p, uint8 ifidx); +extern int dhdmsgbuf_dmaxfer_req(dhd_pub_t *dhd, + uint len, uint srcdelay, uint destdelay, uint d11_lpbk, uint core_num); +extern dma_xfer_status_t dhdmsgbuf_dmaxfer_status(dhd_pub_t *dhd); + +extern void dhd_dma_buf_init(dhd_pub_t *dhd, void *dma_buf, + void *va, uint32 len, dmaaddr_t pa, void *dmah, void *secdma); +extern void dhd_prot_flowrings_pool_release(dhd_pub_t *dhd, + uint16 flowid, void *msgbuf_ring); +extern int dhd_prot_flow_ring_create(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node); +extern int dhd_post_tx_ring_item(dhd_pub_t *dhd, void *PKTBUF, uint8 ifindex); +extern int dhd_prot_flow_ring_delete(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node); +extern int dhd_prot_flow_ring_flush(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node); +extern int dhd_prot_ringupd_dump(dhd_pub_t *dhd, struct bcmstrbuf *b); +extern uint32 dhd_prot_metadata_dbg_set(dhd_pub_t *dhd, bool val); +extern uint32 dhd_prot_metadata_dbg_get(dhd_pub_t *dhd); +extern uint32 dhd_prot_metadatalen_set(dhd_pub_t *dhd, uint32 val, bool rx); +extern uint32 dhd_prot_metadatalen_get(dhd_pub_t *dhd, bool rx); +extern void dhd_prot_print_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info, + struct bcmstrbuf *strbuf, const char * fmt); +extern void dhd_prot_print_info(dhd_pub_t *dhd, struct bcmstrbuf *strbuf); +extern void dhd_prot_update_txflowring(dhd_pub_t *dhdp, uint16 flow_id, void *msgring_info); +extern void dhd_prot_txdata_write_flush(dhd_pub_t *dhd, uint16 flow_id); +extern uint32 dhd_prot_txp_threshold(dhd_pub_t *dhd, bool set, uint32 val); +extern void dhd_prot_reset(dhd_pub_t *dhd); + +#ifdef IDLE_TX_FLOW_MGMT +extern int dhd_prot_flow_ring_batch_suspend_request(dhd_pub_t *dhd, uint16 *ringid, uint16 count); +extern int dhd_prot_flow_ring_resume(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node); +#endif /* IDLE_TX_FLOW_MGMT */ +extern int dhd_prot_init_info_rings(dhd_pub_t *dhd); + +#endif /* BCMPCIE */ + +#ifdef DHD_LB +extern void dhd_lb_tx_compl_handler(unsigned long data); +extern void dhd_lb_rx_compl_handler(unsigned long data); +extern void dhd_lb_rx_process_handler(unsigned long data); +#endif /* DHD_LB */ +extern int dhd_prot_h2d_mbdata_send_ctrlmsg(dhd_pub_t *dhd, uint32 mb_data); + +#ifdef BCMPCIE +extern int dhd_prot_send_host_timestamp(dhd_pub_t *dhdp, uchar *tlv, uint16 tlv_len, + uint16 seq, uint16 xt_id); +extern bool dhd_prot_data_path_tx_timestamp_logging(dhd_pub_t *dhd, bool enable, bool set); +extern bool dhd_prot_data_path_rx_timestamp_logging(dhd_pub_t *dhd, bool enable, bool set); +extern bool dhd_prot_pkt_noretry(dhd_pub_t *dhd, bool enable, bool set); +extern bool dhd_prot_pkt_noaggr(dhd_pub_t *dhd, bool enable, bool set); +extern bool dhd_prot_pkt_fixed_rate(dhd_pub_t *dhd, bool enable, bool set); +#else /* BCMPCIE */ +#define dhd_prot_send_host_timestamp(a, b, c, d, e) 0 +#define dhd_prot_data_path_tx_timestamp_logging(a, b, c) 0 +#define dhd_prot_data_path_rx_timestamp_logging(a, b, c) 0 +#endif /* BCMPCIE */ + +extern void dhd_prot_dma_indx_free(dhd_pub_t *dhd); + +#ifdef EWP_EDL +int dhd_prot_init_edl_rings(dhd_pub_t *dhd); +bool dhd_prot_process_msgbuf_edl(dhd_pub_t *dhd); +int dhd_prot_process_edl_complete(dhd_pub_t *dhd, void *evt_decode_data); +#endif /* EWP_EDL */ + +/* APIs for managing a DMA-able buffer */ +int dhd_dma_buf_alloc(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf, uint32 buf_len); +void dhd_dma_buf_free(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf); + +/******************************** + * For version-string expansion * + */ +#if defined(BDC) +#define DHD_PROTOCOL "bdc" +#elif defined(CDC) +#define DHD_PROTOCOL "cdc" +#else +#define DHD_PROTOCOL "unknown" +#endif /* proto */ + +void dhd_get_hscb_info(struct dhd_prot *prot, void ** va, uint32 *len); +void dhd_get_hscb_buff(struct dhd_prot *prot, uint32 offset, uint32 length, void * buff); + +#endif /* _dhd_proto_h_ */ diff --git a/bcmdhd.100.10.315.x/dhd_rtt.c b/bcmdhd.100.10.315.x/dhd_rtt.c new file mode 100644 index 0000000..f51b850 --- /dev/null +++ b/bcmdhd.100.10.315.x/dhd_rtt.c @@ -0,0 +1,3014 @@ +/* + * Broadcom Dongle Host Driver (DHD), RTT + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id$ + */ +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#ifdef WL_CFG80211 +#include +#endif /* WL_CFG80211 */ +#ifdef WL_NAN +#include +#endif /* WL_NAN */ + +static DEFINE_SPINLOCK(noti_list_lock); +#define NULL_CHECK(p, s, err) \ + do { \ + if (!(p)) { \ + printf("NULL POINTER (%s) : %s\n", __FUNCTION__, (s)); \ + err = BCME_ERROR; \ + return err; \ + } \ + } while (0) + +#define RTT_IS_ENABLED(rtt_status) (rtt_status->status == RTT_ENABLED) +#define RTT_IS_STOPPED(rtt_status) (rtt_status->status == RTT_STOPPED) +#define TIMESPEC_TO_US(ts) (((uint64)(ts).tv_sec * USEC_PER_SEC) + \ + (ts).tv_nsec / NSEC_PER_USEC) + +#define FTM_IOC_BUFSZ 2048 /* ioc buffsize for our module (> BCM_XTLV_HDR_SIZE) */ +#define FTM_AVAIL_MAX_SLOTS 32 +#define FTM_MAX_CONFIGS 10 +#define FTM_MAX_PARAMS 10 +#define FTM_DEFAULT_SESSION 1 +#define FTM_BURST_TIMEOUT_UNIT 250 /* 250 ns */ +#define FTM_INVALID -1 +#define FTM_DEFAULT_CNT_20M 12 +#define FTM_DEFAULT_CNT_40M 10 +#define FTM_DEFAULT_CNT_80M 5 + +/* convenience macros */ +#define FTM_TU2MICRO(_tu) ((uint64)(_tu) << 10) +#define FTM_MICRO2TU(_tu) ((uint64)(_tu) >> 10) +#define FTM_TU2MILLI(_tu) ((uint32)FTM_TU2MICRO(_tu) / 1000) +#define FTM_MICRO2MILLI(_x) ((uint32)(_x) / 1000) +#define FTM_MICRO2SEC(_x) ((uint32)(_x) / 1000000) +#define FTM_INTVL2NSEC(_intvl) ((uint32)ftm_intvl2nsec(_intvl)) +#define FTM_INTVL2USEC(_intvl) ((uint32)ftm_intvl2usec(_intvl)) +#define FTM_INTVL2MSEC(_intvl) (FTM_INTVL2USEC(_intvl) / 1000) +#define FTM_INTVL2SEC(_intvl) (FTM_INTVL2USEC(_intvl) / 1000000) +#define FTM_USECIN100MILLI(_usec) ((_usec) / 100000) + +/* broadcom specific set to have more accurate data */ +#define ENABLE_VHT_ACK +#define CH_MIN_5G_CHANNEL 34 +#define CH_MIN_2G_CHANNEL 1 + +struct rtt_noti_callback { + struct list_head list; + void *ctx; + dhd_rtt_compl_noti_fn noti_fn; +}; + +/* bitmask indicating which command groups; */ +typedef enum { + FTM_SUBCMD_FLAG_METHOD = 0x01, /* FTM method command */ + FTM_SUBCMD_FLAG_SESSION = 0x02, /* FTM session command */ + FTM_SUBCMD_FLAG_ALL = FTM_SUBCMD_FLAG_METHOD | FTM_SUBCMD_FLAG_SESSION +} ftm_subcmd_flag_t; + +/* proxd ftm config-category definition */ +typedef enum { + FTM_CONFIG_CAT_GENERAL = 1, /* generial configuration */ + FTM_CONFIG_CAT_OPTIONS = 2, /* 'config options' */ + FTM_CONFIG_CAT_AVAIL = 3, /* 'config avail' */ +} ftm_config_category_t; + +typedef struct ftm_subcmd_info { + int16 version; /* FTM version (optional) */ + char *name; /* cmd-name string as cmdline input */ + wl_proxd_cmd_t cmdid; /* cmd-id */ + bcm_xtlv_unpack_cbfn_t *handler; /* cmd response handler (optional) */ + ftm_subcmd_flag_t cmdflag; /* CMD flag (optional) */ +} ftm_subcmd_info_t; + +typedef struct ftm_config_options_info { + uint32 flags; /* wl_proxd_flags_t/wl_proxd_session_flags_t */ + bool enable; +} ftm_config_options_info_t; + +typedef struct ftm_config_param_info { + uint16 tlvid; /* mapping TLV id for the item */ + union { + uint32 chanspec; + struct ether_addr mac_addr; + wl_proxd_intvl_t data_intvl; + uint32 data32; + uint16 data16; + uint8 data8; + }; +} ftm_config_param_info_t; + +/* +* definition for id-string mapping. +* This is used to map an id (can be cmd-id, tlv-id, ....) to a text-string +* for debug-display or cmd-log-display +*/ +typedef struct ftm_strmap_entry { + int32 id; + char *text; +} ftm_strmap_entry_t; + +typedef struct ftm_status_map_host_entry { + wl_proxd_status_t proxd_status; + rtt_reason_t rtt_reason; +} ftm_status_map_host_entry_t; + +static uint16 +rtt_result_ver(uint16 tlvid, const uint8 *p_data); + +static int +dhd_rtt_convert_results_to_host_v1(rtt_report_t *rtt_report, const uint8 *p_data, + uint16 tlvid, uint16 len); + +static int +dhd_rtt_convert_results_to_host_v2(rtt_report_t *rtt_report, const uint8 *p_data, + uint16 tlvid, uint16 len); + +static wifi_rate_t +dhd_rtt_convert_rate_to_host(uint32 ratespec); + +#ifdef WL_CFG80211 +static int +dhd_rtt_start(dhd_pub_t *dhd); +#endif /* WL_CFG80211 */ +static const int burst_duration_idx[] = {0, 0, 1, 2, 4, 8, 16, 32, 64, 128, 0, 0}; + +/* ftm status mapping to host status */ +static const ftm_status_map_host_entry_t ftm_status_map_info[] = { + {WL_PROXD_E_INCOMPLETE, RTT_STATUS_FAILURE}, + {WL_PROXD_E_OVERRIDDEN, RTT_STATUS_FAILURE}, + {WL_PROXD_E_ASAP_FAILED, RTT_STATUS_FAILURE}, + {WL_PROXD_E_NOTSTARTED, RTT_STATUS_FAIL_NOT_SCHEDULED_YET}, + {WL_PROXD_E_INVALIDMEAS, RTT_STATUS_FAIL_INVALID_TS}, + {WL_PROXD_E_INCAPABLE, RTT_STATUS_FAIL_NO_CAPABILITY}, + {WL_PROXD_E_MISMATCH, RTT_STATUS_FAILURE}, + {WL_PROXD_E_DUP_SESSION, RTT_STATUS_FAILURE}, + {WL_PROXD_E_REMOTE_FAIL, RTT_STATUS_FAILURE}, + {WL_PROXD_E_REMOTE_INCAPABLE, RTT_STATUS_FAILURE}, + {WL_PROXD_E_SCHED_FAIL, RTT_STATUS_FAIL_SCHEDULE}, + {WL_PROXD_E_PROTO, RTT_STATUS_FAIL_PROTOCOL}, + {WL_PROXD_E_EXPIRED, RTT_STATUS_FAILURE}, + {WL_PROXD_E_TIMEOUT, RTT_STATUS_FAIL_TM_TIMEOUT}, + {WL_PROXD_E_NOACK, RTT_STATUS_FAIL_NO_RSP}, + {WL_PROXD_E_DEFERRED, RTT_STATUS_FAILURE}, + {WL_PROXD_E_INVALID_SID, RTT_STATUS_FAILURE}, + {WL_PROXD_E_REMOTE_CANCEL, RTT_STATUS_FAILURE}, + {WL_PROXD_E_CANCELED, RTT_STATUS_ABORTED}, + {WL_PROXD_E_INVALID_SESSION, RTT_STATUS_FAILURE}, + {WL_PROXD_E_BAD_STATE, RTT_STATUS_FAILURE}, + {WL_PROXD_E_ERROR, RTT_STATUS_FAILURE}, + {WL_PROXD_E_OK, RTT_STATUS_SUCCESS} +}; + +/* ftm tlv-id mapping */ +static const ftm_strmap_entry_t ftm_tlvid_loginfo[] = { + /* { WL_PROXD_TLV_ID_xxx, "text for WL_PROXD_TLV_ID_xxx" }, */ + { WL_PROXD_TLV_ID_NONE, "none" }, + { WL_PROXD_TLV_ID_METHOD, "method" }, + { WL_PROXD_TLV_ID_FLAGS, "flags" }, + { WL_PROXD_TLV_ID_CHANSPEC, "chanspec" }, + { WL_PROXD_TLV_ID_TX_POWER, "tx power" }, + { WL_PROXD_TLV_ID_RATESPEC, "ratespec" }, + { WL_PROXD_TLV_ID_BURST_DURATION, "burst duration" }, + { WL_PROXD_TLV_ID_BURST_PERIOD, "burst period" }, + { WL_PROXD_TLV_ID_BURST_FTM_SEP, "burst ftm sep" }, + { WL_PROXD_TLV_ID_BURST_NUM_FTM, "burst num ftm" }, + { WL_PROXD_TLV_ID_NUM_BURST, "num burst" }, + { WL_PROXD_TLV_ID_FTM_RETRIES, "ftm retries" }, + { WL_PROXD_TLV_ID_BSS_INDEX, "BSS index" }, + { WL_PROXD_TLV_ID_BSSID, "bssid" }, + { WL_PROXD_TLV_ID_INIT_DELAY, "burst init delay" }, + { WL_PROXD_TLV_ID_BURST_TIMEOUT, "burst timeout" }, + { WL_PROXD_TLV_ID_EVENT_MASK, "event mask" }, + { WL_PROXD_TLV_ID_FLAGS_MASK, "flags mask" }, + { WL_PROXD_TLV_ID_PEER_MAC, "peer addr" }, + { WL_PROXD_TLV_ID_FTM_REQ, "ftm req" }, + { WL_PROXD_TLV_ID_LCI_REQ, "lci req" }, + { WL_PROXD_TLV_ID_LCI, "lci" }, + { WL_PROXD_TLV_ID_CIVIC_REQ, "civic req" }, + { WL_PROXD_TLV_ID_CIVIC, "civic" }, + { WL_PROXD_TLV_ID_AVAIL, "availability" }, + { WL_PROXD_TLV_ID_SESSION_FLAGS, "session flags" }, + { WL_PROXD_TLV_ID_SESSION_FLAGS_MASK, "session flags mask" }, + { WL_PROXD_TLV_ID_RX_MAX_BURST, "rx max bursts" }, + { WL_PROXD_TLV_ID_RANGING_INFO, "ranging info" }, + { WL_PROXD_TLV_ID_RANGING_FLAGS, "ranging flags" }, + { WL_PROXD_TLV_ID_RANGING_FLAGS_MASK, "ranging flags mask" }, + /* output - 512 + x */ + { WL_PROXD_TLV_ID_STATUS, "status" }, + { WL_PROXD_TLV_ID_COUNTERS, "counters" }, + { WL_PROXD_TLV_ID_INFO, "info" }, + { WL_PROXD_TLV_ID_RTT_RESULT, "rtt result" }, + { WL_PROXD_TLV_ID_AOA_RESULT, "aoa result" }, + { WL_PROXD_TLV_ID_SESSION_INFO, "session info" }, + { WL_PROXD_TLV_ID_SESSION_STATUS, "session status" }, + { WL_PROXD_TLV_ID_SESSION_ID_LIST, "session ids" }, + /* debug tlvs can be added starting 1024 */ + { WL_PROXD_TLV_ID_DEBUG_MASK, "debug mask" }, + { WL_PROXD_TLV_ID_COLLECT, "collect" }, + { WL_PROXD_TLV_ID_STRBUF, "result" }, + { WL_PROXD_TLV_ID_COLLECT_DATA, "collect-data" }, + { WL_PROXD_TLV_ID_RI_RR, "ri_rr" }, + { WL_PROXD_TLV_ID_COLLECT_CHAN_DATA, "chan est"}, + { WL_PROXD_TLV_ID_MF_STATS_DATA, "mf_stats_data"} +}; + +static const ftm_strmap_entry_t ftm_event_type_loginfo[] = { + /* wl_proxd_event_type_t, text-string */ + { WL_PROXD_EVENT_NONE, "none" }, + { WL_PROXD_EVENT_SESSION_CREATE, "session create" }, + { WL_PROXD_EVENT_SESSION_START, "session start" }, + { WL_PROXD_EVENT_FTM_REQ, "FTM req" }, + { WL_PROXD_EVENT_BURST_START, "burst start" }, + { WL_PROXD_EVENT_BURST_END, "burst end" }, + { WL_PROXD_EVENT_SESSION_END, "session end" }, + { WL_PROXD_EVENT_SESSION_RESTART, "session restart" }, + { WL_PROXD_EVENT_BURST_RESCHED, "burst rescheduled" }, + { WL_PROXD_EVENT_SESSION_DESTROY, "session destroy" }, + { WL_PROXD_EVENT_RANGE_REQ, "range request" }, + { WL_PROXD_EVENT_FTM_FRAME, "FTM frame" }, + { WL_PROXD_EVENT_DELAY, "delay" }, + { WL_PROXD_EVENT_VS_INITIATOR_RPT, "initiator-report " }, /* rx initiator-rpt */ + { WL_PROXD_EVENT_RANGING, "ranging " }, + { WL_PROXD_EVENT_COLLECT, "collect" }, + { WL_PROXD_EVENT_MF_STATS, "mf_stats" }, +}; + +/* +* session-state --> text string mapping +*/ +static const ftm_strmap_entry_t ftm_session_state_value_loginfo[] = { + /* wl_proxd_session_state_t, text string */ + { WL_PROXD_SESSION_STATE_CREATED, "created" }, + { WL_PROXD_SESSION_STATE_CONFIGURED, "configured" }, + { WL_PROXD_SESSION_STATE_STARTED, "started" }, + { WL_PROXD_SESSION_STATE_DELAY, "delay" }, + { WL_PROXD_SESSION_STATE_USER_WAIT, "user-wait" }, + { WL_PROXD_SESSION_STATE_SCHED_WAIT, "sched-wait" }, + { WL_PROXD_SESSION_STATE_BURST, "burst" }, + { WL_PROXD_SESSION_STATE_STOPPING, "stopping" }, + { WL_PROXD_SESSION_STATE_ENDED, "ended" }, + { WL_PROXD_SESSION_STATE_DESTROYING, "destroying" }, + { WL_PROXD_SESSION_STATE_NONE, "none" } +}; + +/* +* ranging-state --> text string mapping +*/ +static const ftm_strmap_entry_t ftm_ranging_state_value_loginfo [] = { + /* wl_proxd_ranging_state_t, text string */ + { WL_PROXD_RANGING_STATE_NONE, "none" }, + { WL_PROXD_RANGING_STATE_NOTSTARTED, "nonstarted" }, + { WL_PROXD_RANGING_STATE_INPROGRESS, "inprogress" }, + { WL_PROXD_RANGING_STATE_DONE, "done" }, +}; + +/* +* status --> text string mapping +*/ +static const ftm_strmap_entry_t ftm_status_value_loginfo[] = { + /* wl_proxd_status_t, text-string */ + { WL_PROXD_E_OVERRIDDEN, "overridden" }, + { WL_PROXD_E_ASAP_FAILED, "ASAP failed" }, + { WL_PROXD_E_NOTSTARTED, "not started" }, + { WL_PROXD_E_INVALIDMEAS, "invalid measurement" }, + { WL_PROXD_E_INCAPABLE, "incapable" }, + { WL_PROXD_E_MISMATCH, "mismatch"}, + { WL_PROXD_E_DUP_SESSION, "dup session" }, + { WL_PROXD_E_REMOTE_FAIL, "remote fail" }, + { WL_PROXD_E_REMOTE_INCAPABLE, "remote incapable" }, + { WL_PROXD_E_SCHED_FAIL, "sched failure" }, + { WL_PROXD_E_PROTO, "protocol error" }, + { WL_PROXD_E_EXPIRED, "expired" }, + { WL_PROXD_E_TIMEOUT, "timeout" }, + { WL_PROXD_E_NOACK, "no ack" }, + { WL_PROXD_E_DEFERRED, "deferred" }, + { WL_PROXD_E_INVALID_SID, "invalid session id" }, + { WL_PROXD_E_REMOTE_CANCEL, "remote cancel" }, + { WL_PROXD_E_CANCELED, "canceled" }, + { WL_PROXD_E_INVALID_SESSION, "invalid session" }, + { WL_PROXD_E_BAD_STATE, "bad state" }, + { WL_PROXD_E_ERROR, "error" }, + { WL_PROXD_E_OK, "OK" } +}; + +/* +* time interval unit --> text string mapping +*/ +static const ftm_strmap_entry_t ftm_tmu_value_loginfo[] = { + /* wl_proxd_tmu_t, text-string */ + { WL_PROXD_TMU_TU, "TU" }, + { WL_PROXD_TMU_SEC, "sec" }, + { WL_PROXD_TMU_MILLI_SEC, "ms" }, + { WL_PROXD_TMU_MICRO_SEC, "us" }, + { WL_PROXD_TMU_NANO_SEC, "ns" }, + { WL_PROXD_TMU_PICO_SEC, "ps" } +}; + +#define RSPEC_BW(rspec) ((rspec) & WL_RSPEC_BW_MASK) +#define RSPEC_IS20MHZ(rspec) (RSPEC_BW(rspec) == WL_RSPEC_BW_20MHZ) +#define RSPEC_IS40MHZ(rspec) (RSPEC_BW(rspec) == WL_RSPEC_BW_40MHZ) +#define RSPEC_IS80MHZ(rspec) (RSPEC_BW(rspec) == WL_RSPEC_BW_80MHZ) +#define RSPEC_IS160MHZ(rspec) (RSPEC_BW(rspec) == WL_RSPEC_BW_160MHZ) + +#define IS_MCS(rspec) (((rspec) & WL_RSPEC_ENCODING_MASK) != WL_RSPEC_ENCODE_RATE) +#define IS_STBC(rspec) (((((rspec) & WL_RSPEC_ENCODING_MASK) == WL_RSPEC_ENCODE_HT) || \ + (((rspec) & WL_RSPEC_ENCODING_MASK) == WL_RSPEC_ENCODE_VHT)) && \ + (((rspec) & WL_RSPEC_STBC) == WL_RSPEC_STBC)) +#define RSPEC_ISSGI(rspec) (((rspec) & WL_RSPEC_SGI) != 0) +#define RSPEC_ISLDPC(rspec) (((rspec) & WL_RSPEC_LDPC) != 0) +#define RSPEC_ISSTBC(rspec) (((rspec) & WL_RSPEC_STBC) != 0) +#define RSPEC_ISTXBF(rspec) (((rspec) & WL_RSPEC_TXBF) != 0) +#define RSPEC_ISVHT(rspec) (((rspec) & WL_RSPEC_ENCODING_MASK) == WL_RSPEC_ENCODE_VHT) +#define RSPEC_ISHT(rspec) (((rspec) & WL_RSPEC_ENCODING_MASK) == WL_RSPEC_ENCODE_HT) +#define RSPEC_ISLEGACY(rspec) (((rspec) & WL_RSPEC_ENCODING_MASK) == WL_RSPEC_ENCODE_RATE) +#define RSPEC2RATE(rspec) (RSPEC_ISLEGACY(rspec) ? \ + ((rspec) & RSPEC_RATE_MASK) : rate_rspec2rate(rspec)) +/* return rate in unit of 500Kbps -- for internal use in wlc_rate_sel.c */ +#define RSPEC2KBPS(rspec) rate_rspec2rate(rspec) + +struct ieee_80211_mcs_rate_info { + uint8 constellation_bits; + uint8 coding_q; + uint8 coding_d; +}; + +static const struct ieee_80211_mcs_rate_info wl_mcs_info[] = { + { 1, 1, 2 }, /* MCS 0: MOD: BPSK, CR 1/2 */ + { 2, 1, 2 }, /* MCS 1: MOD: QPSK, CR 1/2 */ + { 2, 3, 4 }, /* MCS 2: MOD: QPSK, CR 3/4 */ + { 4, 1, 2 }, /* MCS 3: MOD: 16QAM, CR 1/2 */ + { 4, 3, 4 }, /* MCS 4: MOD: 16QAM, CR 3/4 */ + { 6, 2, 3 }, /* MCS 5: MOD: 64QAM, CR 2/3 */ + { 6, 3, 4 }, /* MCS 6: MOD: 64QAM, CR 3/4 */ + { 6, 5, 6 }, /* MCS 7: MOD: 64QAM, CR 5/6 */ + { 8, 3, 4 }, /* MCS 8: MOD: 256QAM, CR 3/4 */ + { 8, 5, 6 } /* MCS 9: MOD: 256QAM, CR 5/6 */ +}; + +/** + * Returns the rate in [Kbps] units for a caller supplied MCS/bandwidth/Nss/Sgi combination. + * 'mcs' : a *single* spatial stream MCS (11n or 11ac) + */ +uint +rate_mcs2rate(uint mcs, uint nss, uint bw, int sgi) +{ + const int ksps = 250; /* kilo symbols per sec, 4 us sym */ + const int Nsd_20MHz = 52; + const int Nsd_40MHz = 108; + const int Nsd_80MHz = 234; + const int Nsd_160MHz = 468; + uint rate; + + if (mcs == 32) { + /* just return fixed values for mcs32 instead of trying to parametrize */ + rate = (sgi == 0) ? 6000 : 6778; + } else if (mcs <= 9) { + /* This calculation works for 11n HT and 11ac VHT if the HT mcs values + * are decomposed into a base MCS = MCS % 8, and Nss = 1 + MCS / 8. + * That is, HT MCS 23 is a base MCS = 7, Nss = 3 + */ + + /* find the number of complex numbers per symbol */ + if (RSPEC_IS20MHZ(bw)) { + rate = Nsd_20MHz; + } else if (RSPEC_IS40MHZ(bw)) { + rate = Nsd_40MHz; + } else if (bw == WL_RSPEC_BW_80MHZ) { + rate = Nsd_80MHz; + } else if (bw == WL_RSPEC_BW_160MHZ) { + rate = Nsd_160MHz; + } else { + rate = 0; + } + + /* multiply by bits per number from the constellation in use */ + rate = rate * wl_mcs_info[mcs].constellation_bits; + + /* adjust for the number of spatial streams */ + rate = rate * nss; + + /* adjust for the coding rate given as a quotient and divisor */ + rate = (rate * wl_mcs_info[mcs].coding_q) / wl_mcs_info[mcs].coding_d; + + /* multiply by Kilo symbols per sec to get Kbps */ + rate = rate * ksps; + + /* adjust the symbols per sec for SGI + * symbol duration is 4 us without SGI, and 3.6 us with SGI, + * so ratio is 10 / 9 + */ + if (sgi) { + /* add 4 for rounding of division by 9 */ + rate = ((rate * 10) + 4) / 9; + } + } else { + rate = 0; + } + + return rate; +} /* wlc_rate_mcs2rate */ + +/** take a well formed ratespec_t arg and return phy rate in [Kbps] units */ +static uint32 +rate_rspec2rate(uint32 rspec) +{ + int rate = 0; + + if (RSPEC_ISLEGACY(rspec)) { + rate = 500 * (rspec & WL_RSPEC_RATE_MASK); + } else if (RSPEC_ISHT(rspec)) { + uint mcs = (rspec & WL_RSPEC_RATE_MASK); + + if (mcs == 32) { + rate = rate_mcs2rate(mcs, 1, WL_RSPEC_BW_40MHZ, RSPEC_ISSGI(rspec)); + } else { + uint nss = 1 + (mcs / 8); + mcs = mcs % 8; + rate = rate_mcs2rate(mcs, nss, RSPEC_BW(rspec), RSPEC_ISSGI(rspec)); + } + } else if (RSPEC_ISVHT(rspec)) { + uint mcs = (rspec & WL_RSPEC_VHT_MCS_MASK); + uint nss = (rspec & WL_RSPEC_VHT_NSS_MASK) >> WL_RSPEC_VHT_NSS_SHIFT; + if (mcs > 9 || nss > 8) { + DHD_RTT(("%s: Invalid mcs %d or nss %d\n", __FUNCTION__, mcs, nss)); + goto exit; + } + + rate = rate_mcs2rate(mcs, nss, RSPEC_BW(rspec), RSPEC_ISSGI(rspec)); + } else { + DHD_RTT(("%s: wrong rspec:%d\n", __FUNCTION__, rspec)); + } +exit: + return rate; +} + +char resp_buf[WLC_IOCTL_SMLEN]; + +static uint64 +ftm_intvl2nsec(const wl_proxd_intvl_t *intvl) +{ + uint64 ret; + ret = intvl->intvl; + switch (intvl->tmu) { + case WL_PROXD_TMU_TU: ret = FTM_TU2MICRO(ret) * 1000; break; + case WL_PROXD_TMU_SEC: ret *= 1000000000; break; + case WL_PROXD_TMU_MILLI_SEC: ret *= 1000000; break; + case WL_PROXD_TMU_MICRO_SEC: ret *= 1000; break; + case WL_PROXD_TMU_PICO_SEC: ret = intvl->intvl / 1000; break; + case WL_PROXD_TMU_NANO_SEC: /* fall through */ + default: break; + } + return ret; +} +uint64 +ftm_intvl2usec(const wl_proxd_intvl_t *intvl) +{ + uint64 ret; + ret = intvl->intvl; + switch (intvl->tmu) { + case WL_PROXD_TMU_TU: ret = FTM_TU2MICRO(ret); break; + case WL_PROXD_TMU_SEC: ret *= 1000000; break; + case WL_PROXD_TMU_NANO_SEC: ret = intvl->intvl / 1000; break; + case WL_PROXD_TMU_PICO_SEC: ret = intvl->intvl / 1000000; break; + case WL_PROXD_TMU_MILLI_SEC: ret *= 1000; break; + case WL_PROXD_TMU_MICRO_SEC: /* fall through */ + default: break; + } + return ret; +} + +/* +* lookup 'id' (as a key) from a fw status to host map table +* if found, return the corresponding reason code +*/ + +static rtt_reason_t +ftm_get_statusmap_info(wl_proxd_status_t id, const ftm_status_map_host_entry_t *p_table, + uint32 num_entries) +{ + int i; + const ftm_status_map_host_entry_t *p_entry; + /* scan thru the table till end */ + p_entry = p_table; + for (i = 0; i < (int) num_entries; i++) + { + if (p_entry->proxd_status == id) { + return p_entry->rtt_reason; + } + p_entry++; /* next entry */ + } + return RTT_STATUS_FAILURE; /* not found */ +} +/* +* lookup 'id' (as a key) from a table +* if found, return the entry pointer, otherwise return NULL +*/ +static const ftm_strmap_entry_t* +ftm_get_strmap_info(int32 id, const ftm_strmap_entry_t *p_table, uint32 num_entries) +{ + int i; + const ftm_strmap_entry_t *p_entry; + + /* scan thru the table till end */ + p_entry = p_table; + for (i = 0; i < (int) num_entries; i++) + { + if (p_entry->id == id) + return p_entry; + p_entry++; /* next entry */ + } + return NULL; /* not found */ +} + +/* +* map enum to a text-string for display, this function is called by the following: +* For debug/trace: +* ftm_[cmdid|tlvid]_to_str() +* For TLV-output log for 'get' commands +* ftm_[method|tmu|caps|status|state]_value_to_logstr() +* Input: +* pTable -- point to a 'enum to string' table. +*/ +static const char * +ftm_map_id_to_str(int32 id, const ftm_strmap_entry_t *p_table, uint32 num_entries) +{ + const ftm_strmap_entry_t*p_entry = ftm_get_strmap_info(id, p_table, num_entries); + if (p_entry) + return (p_entry->text); + + return "invalid"; +} + +#ifdef RTT_DEBUG + +/* define entry, e.g. { WL_PROXD_CMD_xxx, "WL_PROXD_CMD_xxx" } */ +#define DEF_STRMAP_ENTRY(id) { (id), #id } + +/* ftm cmd-id mapping */ +static const ftm_strmap_entry_t ftm_cmdid_map[] = { + /* {wl_proxd_cmd_t(WL_PROXD_CMD_xxx), "WL_PROXD_CMD_xxx" }, */ + DEF_STRMAP_ENTRY(WL_PROXD_CMD_NONE), + DEF_STRMAP_ENTRY(WL_PROXD_CMD_GET_VERSION), + DEF_STRMAP_ENTRY(WL_PROXD_CMD_ENABLE), + DEF_STRMAP_ENTRY(WL_PROXD_CMD_DISABLE), + DEF_STRMAP_ENTRY(WL_PROXD_CMD_CONFIG), + DEF_STRMAP_ENTRY(WL_PROXD_CMD_START_SESSION), + DEF_STRMAP_ENTRY(WL_PROXD_CMD_BURST_REQUEST), + DEF_STRMAP_ENTRY(WL_PROXD_CMD_STOP_SESSION), + DEF_STRMAP_ENTRY(WL_PROXD_CMD_DELETE_SESSION), + DEF_STRMAP_ENTRY(WL_PROXD_CMD_GET_RESULT), + DEF_STRMAP_ENTRY(WL_PROXD_CMD_GET_INFO), + DEF_STRMAP_ENTRY(WL_PROXD_CMD_GET_STATUS), + DEF_STRMAP_ENTRY(WL_PROXD_CMD_GET_SESSIONS), + DEF_STRMAP_ENTRY(WL_PROXD_CMD_GET_COUNTERS), + DEF_STRMAP_ENTRY(WL_PROXD_CMD_CLEAR_COUNTERS), + DEF_STRMAP_ENTRY(WL_PROXD_CMD_COLLECT), + DEF_STRMAP_ENTRY(WL_PROXD_CMD_TUNE), + DEF_STRMAP_ENTRY(WL_PROXD_CMD_DUMP), + DEF_STRMAP_ENTRY(WL_PROXD_CMD_START_RANGING), + DEF_STRMAP_ENTRY(WL_PROXD_CMD_STOP_RANGING), + DEF_STRMAP_ENTRY(WL_PROXD_CMD_GET_RANGING_INFO), +}; + +/* +* map a ftm cmd-id to a text-string for display +*/ +const char * +ftm_cmdid_to_str(uint16 cmdid) +{ + return ftm_map_id_to_str((int32) cmdid, &ftm_cmdid_map[0], ARRAYSIZE(ftm_cmdid_map)); +} +#endif /* RTT_DEBUG */ + +/* +* convert BCME_xxx error codes into related error strings +* note, bcmerrorstr() defined in bcmutils is for BCMDRIVER only, +* this duplicate copy is for WL access and may need to clean up later +*/ +static const char *ftm_bcmerrorstrtable[] = BCMERRSTRINGTABLE; +static const char * +ftm_status_value_to_logstr(wl_proxd_status_t status) +{ + static char ftm_msgbuf_status_undef[32]; + const ftm_strmap_entry_t *p_loginfo; + int bcmerror; + + /* check if within BCME_xxx error range */ + bcmerror = (int) status; + if (VALID_BCMERROR(bcmerror)) + return ftm_bcmerrorstrtable[-bcmerror]; + + /* otherwise, look for 'proxd ftm status' range */ + p_loginfo = ftm_get_strmap_info((int32) status, + &ftm_status_value_loginfo[0], ARRAYSIZE(ftm_status_value_loginfo)); + if (p_loginfo) + return p_loginfo->text; + + /* report for 'out of range' FTM-status error code */ + memset(ftm_msgbuf_status_undef, 0, sizeof(ftm_msgbuf_status_undef)); + snprintf(ftm_msgbuf_status_undef, sizeof(ftm_msgbuf_status_undef), + "Undefined status %d", status); + return &ftm_msgbuf_status_undef[0]; +} + +static const char * +ftm_tmu_value_to_logstr(wl_proxd_tmu_t tmu) +{ + return ftm_map_id_to_str((int32)tmu, + &ftm_tmu_value_loginfo[0], ARRAYSIZE(ftm_tmu_value_loginfo)); +} + +static const ftm_strmap_entry_t* +ftm_get_event_type_loginfo(wl_proxd_event_type_t event_type) +{ + /* look up 'event-type' from a predefined table */ + return ftm_get_strmap_info((int32) event_type, + ftm_event_type_loginfo, ARRAYSIZE(ftm_event_type_loginfo)); +} + +static const char * +ftm_session_state_value_to_logstr(wl_proxd_session_state_t state) +{ + return ftm_map_id_to_str((int32)state, &ftm_session_state_value_loginfo[0], + ARRAYSIZE(ftm_session_state_value_loginfo)); +} + +#ifdef WL_CFG80211 +/* +* send 'proxd' iovar for all ftm get-related commands +*/ +static int +rtt_do_get_ioctl(dhd_pub_t *dhd, wl_proxd_iov_t *p_proxd_iov, uint16 proxd_iovsize, + ftm_subcmd_info_t *p_subcmd_info) +{ + + wl_proxd_iov_t *p_iovresp = (wl_proxd_iov_t *)resp_buf; + int status; + int tlvs_len; + /* send getbuf proxd iovar */ + status = dhd_getiovar(dhd, 0, "proxd", (char *)p_proxd_iov, + proxd_iovsize, (char **)&p_iovresp, WLC_IOCTL_SMLEN); + if (status != BCME_OK) { + DHD_ERROR(("%s: failed to send getbuf proxd iovar (CMD ID : %d), status=%d\n", + __FUNCTION__, p_subcmd_info->cmdid, status)); + return status; + } + if (p_subcmd_info->cmdid == WL_PROXD_CMD_GET_VERSION) { + p_subcmd_info->version = ltoh16(p_iovresp->version); + DHD_RTT(("ftm version: 0x%x\n", ltoh16(p_iovresp->version))); + goto exit; + } + + tlvs_len = ltoh16(p_iovresp->len) - WL_PROXD_IOV_HDR_SIZE; + if (tlvs_len < 0) { + DHD_ERROR(("%s: alert, p_iovresp->len(%d) should not be smaller than %d\n", + __FUNCTION__, ltoh16(p_iovresp->len), (int) WL_PROXD_IOV_HDR_SIZE)); + tlvs_len = 0; + } + + if (tlvs_len > 0 && p_subcmd_info->handler) { + /* unpack TLVs and invokes the cbfn for processing */ + status = bcm_unpack_xtlv_buf(p_proxd_iov, (uint8 *)p_iovresp->tlvs, + tlvs_len, BCM_XTLV_OPTION_ALIGN32, p_subcmd_info->handler); + } +exit: + return status; +} + +static wl_proxd_iov_t * +rtt_alloc_getset_buf(wl_proxd_method_t method, wl_proxd_session_id_t session_id, + wl_proxd_cmd_t cmdid, uint16 tlvs_bufsize, uint16 *p_out_bufsize) +{ + uint16 proxd_iovsize; + uint16 kflags; + wl_proxd_tlv_t *p_tlv; + wl_proxd_iov_t *p_proxd_iov = (wl_proxd_iov_t *) NULL; + + *p_out_bufsize = 0; /* init */ + kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL; + /* calculate the whole buffer size, including one reserve-tlv entry in the header */ + proxd_iovsize = sizeof(wl_proxd_iov_t) + tlvs_bufsize; + + p_proxd_iov = kzalloc(proxd_iovsize, kflags); + if (p_proxd_iov == NULL) { + DHD_ERROR(("error: failed to allocate %d bytes of memory\n", proxd_iovsize)); + return NULL; + } + + /* setup proxd-FTM-method iovar header */ + p_proxd_iov->version = htol16(WL_PROXD_API_VERSION); + p_proxd_iov->len = htol16(proxd_iovsize); /* caller may adjust it based on #of TLVs */ + p_proxd_iov->cmd = htol16(cmdid); + p_proxd_iov->method = htol16(method); + p_proxd_iov->sid = htol16(session_id); + + /* initialize the reserved/dummy-TLV in iovar header */ + p_tlv = p_proxd_iov->tlvs; + p_tlv->id = htol16(WL_PROXD_TLV_ID_NONE); + p_tlv->len = htol16(0); + + *p_out_bufsize = proxd_iovsize; /* for caller's reference */ + + return p_proxd_iov; +} + +static int +dhd_rtt_common_get_handler(dhd_pub_t *dhd, ftm_subcmd_info_t *p_subcmd_info, + wl_proxd_method_t method, + wl_proxd_session_id_t session_id) +{ + int status = BCME_OK; + uint16 proxd_iovsize = 0; + wl_proxd_iov_t *p_proxd_iov; +#ifdef RTT_DEBUG + DHD_RTT(("enter %s: method=%d, session_id=%d, cmdid=%d(%s)\n", + __FUNCTION__, method, session_id, p_subcmd_info->cmdid, + ftm_cmdid_to_str(p_subcmd_info->cmdid))); +#endif // endif + /* alloc mem for ioctl headr + reserved 0 bufsize for tlvs (initialize to zero) */ + p_proxd_iov = rtt_alloc_getset_buf(method, session_id, p_subcmd_info->cmdid, + 0, &proxd_iovsize); + + if (p_proxd_iov == NULL) + return BCME_NOMEM; + + status = rtt_do_get_ioctl(dhd, p_proxd_iov, proxd_iovsize, p_subcmd_info); + + if (status != BCME_OK) { + DHD_RTT(("%s failed: status=%d\n", __FUNCTION__, status)); + } + kfree(p_proxd_iov); + return status; +} + +/* +* common handler for set-related proxd method commands which require no TLV as input +* wl proxd ftm [session-id] +* e.g. +* wl proxd ftm enable -- to enable ftm +* wl proxd ftm disable -- to disable ftm +* wl proxd ftm start -- to start a specified session +* wl proxd ftm stop -- to cancel a specified session; +* state is maintained till session is delete. +* wl proxd ftm delete -- to delete a specified session +* wl proxd ftm [] clear-counters -- to clear counters +* wl proxd ftm burst-request -- on initiator: to send burst request; +* on target: send FTM frame +* wl proxd ftm collect +* wl proxd ftm tune (TBD) +*/ +static int +dhd_rtt_common_set_handler(dhd_pub_t *dhd, const ftm_subcmd_info_t *p_subcmd_info, + wl_proxd_method_t method, wl_proxd_session_id_t session_id) +{ + uint16 proxd_iovsize; + wl_proxd_iov_t *p_proxd_iov; + int ret; + +#ifdef RTT_DEBUG + DHD_RTT(("enter %s: method=%d, session_id=%d, cmdid=%d(%s)\n", + __FUNCTION__, method, session_id, p_subcmd_info->cmdid, + ftm_cmdid_to_str(p_subcmd_info->cmdid))); +#endif // endif + + /* allocate and initialize a temp buffer for 'set proxd' iovar */ + proxd_iovsize = 0; + p_proxd_iov = rtt_alloc_getset_buf(method, session_id, p_subcmd_info->cmdid, + 0, &proxd_iovsize); /* no TLV */ + if (p_proxd_iov == NULL) + return BCME_NOMEM; + + /* no TLV to pack, simply issue a set-proxd iovar */ + ret = dhd_iovar(dhd, 0, "proxd", (char *)p_proxd_iov, proxd_iovsize, NULL, 0, TRUE); +#ifdef RTT_DEBUG + if (ret != BCME_OK) { + DHD_RTT(("error: IOVAR failed, status=%d\n", ret)); + } +#endif // endif + /* clean up */ + kfree(p_proxd_iov); + + return ret; +} +#endif /* WL_CFG80211 */ + +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif // endif + +/* gets the length and returns the version + * of the wl_proxd_collect_event_t version + */ +static uint +rtt_collect_data_event_ver(uint16 len) +{ + if (len > sizeof(wl_proxd_collect_event_data_v3_t)) { + return WL_PROXD_COLLECT_EVENT_DATA_VERSION_MAX; + } else if (len == sizeof(wl_proxd_collect_event_data_v3_t)) { + return WL_PROXD_COLLECT_EVENT_DATA_VERSION_3; + } else if (len == sizeof(wl_proxd_collect_event_data_v2_t)) { + return WL_PROXD_COLLECT_EVENT_DATA_VERSION_2; + } else { + return WL_PROXD_COLLECT_EVENT_DATA_VERSION_1; + } +} + +static void +rtt_collect_event_data_display(uint8 ver, void *ctx, const uint8 *p_data, uint16 len) +{ + int i; + wl_proxd_collect_event_data_v1_t *p_collect_data_v1 = NULL; + wl_proxd_collect_event_data_v2_t *p_collect_data_v2 = NULL; + wl_proxd_collect_event_data_v3_t *p_collect_data_v3 = NULL; + + if (!ctx || !p_data) { + return; + } + + switch (ver) { + case WL_PROXD_COLLECT_EVENT_DATA_VERSION_1: + DHD_RTT(("\tVERSION_1\n")); + memcpy(ctx, p_data, sizeof(wl_proxd_collect_event_data_v1_t)); + p_collect_data_v1 = (wl_proxd_collect_event_data_v1_t *)ctx; + DHD_RTT(("\tH_RX\n")); + for (i = 0; i < K_TOF_COLLECT_H_SIZE_20MHZ; i++) { + p_collect_data_v1->H_RX[i] = ltoh32_ua(&p_collect_data_v1->H_RX[i]); + DHD_RTT(("\t%u\n", p_collect_data_v1->H_RX[i])); + } + DHD_RTT(("\n")); + DHD_RTT(("\tH_LB\n")); + for (i = 0; i < K_TOF_COLLECT_H_SIZE_20MHZ; i++) { + p_collect_data_v1->H_LB[i] = ltoh32_ua(&p_collect_data_v1->H_LB[i]); + DHD_RTT(("\t%u\n", p_collect_data_v1->H_LB[i])); + } + DHD_RTT(("\n")); + DHD_RTT(("\tri_rr\n")); + for (i = 0; i < FTM_TPK_RI_RR_LEN; i++) { + DHD_RTT(("\t%u\n", p_collect_data_v1->ri_rr[i])); + } + p_collect_data_v1->phy_err_mask = ltoh32_ua(&p_collect_data_v1->phy_err_mask); + DHD_RTT(("\tphy_err_mask=0x%x\n", p_collect_data_v1->phy_err_mask)); + break; + case WL_PROXD_COLLECT_EVENT_DATA_VERSION_2: + memcpy(ctx, p_data, sizeof(wl_proxd_collect_event_data_v2_t)); + p_collect_data_v2 = (wl_proxd_collect_event_data_v2_t *)ctx; + DHD_RTT(("\tH_RX\n")); + for (i = 0; i < K_TOF_COLLECT_H_SIZE_20MHZ; i++) { + p_collect_data_v2->H_RX[i] = ltoh32_ua(&p_collect_data_v2->H_RX[i]); + DHD_RTT(("\t%u\n", p_collect_data_v2->H_RX[i])); + } + DHD_RTT(("\n")); + DHD_RTT(("\tH_LB\n")); + for (i = 0; i < K_TOF_COLLECT_H_SIZE_20MHZ; i++) { + p_collect_data_v2->H_LB[i] = ltoh32_ua(&p_collect_data_v2->H_LB[i]); + DHD_RTT(("\t%u\n", p_collect_data_v2->H_LB[i])); + } + DHD_RTT(("\n")); + DHD_RTT(("\tri_rr\n")); + for (i = 0; i < FTM_TPK_RI_RR_LEN_SECURE_2_0; i++) { + DHD_RTT(("\t%u\n", p_collect_data_v2->ri_rr[i])); + } + p_collect_data_v2->phy_err_mask = ltoh32_ua(&p_collect_data_v2->phy_err_mask); + DHD_RTT(("\tphy_err_mask=0x%x\n", p_collect_data_v2->phy_err_mask)); + break; + case WL_PROXD_COLLECT_EVENT_DATA_VERSION_3: + memcpy(ctx, p_data, sizeof(wl_proxd_collect_event_data_v3_t)); + p_collect_data_v3 = (wl_proxd_collect_event_data_v3_t *)ctx; + switch (p_collect_data_v3->version) { + case WL_PROXD_COLLECT_EVENT_DATA_VERSION_3: + if (p_collect_data_v3->length != + (len - OFFSETOF(wl_proxd_collect_event_data_v3_t, H_LB))) { + DHD_RTT(("\tversion/length mismatch\n")); + break; + } + DHD_RTT(("\tH_RX\n")); + for (i = 0; i < K_TOF_COLLECT_H_SIZE_20MHZ; i++) { + p_collect_data_v3->H_RX[i] = + ltoh32_ua(&p_collect_data_v3->H_RX[i]); + DHD_RTT(("\t%u\n", p_collect_data_v3->H_RX[i])); + } + DHD_RTT(("\n")); + DHD_RTT(("\tH_LB\n")); + for (i = 0; i < K_TOF_COLLECT_H_SIZE_20MHZ; i++) { + p_collect_data_v3->H_LB[i] = + ltoh32_ua(&p_collect_data_v3->H_LB[i]); + DHD_RTT(("\t%u\n", p_collect_data_v3->H_LB[i])); + } + DHD_RTT(("\n")); + DHD_RTT(("\tri_rr\n")); + for (i = 0; i < FTM_TPK_RI_RR_LEN_SECURE_2_0; i++) { + DHD_RTT(("\t%u\n", p_collect_data_v3->ri_rr[i])); + } + p_collect_data_v3->phy_err_mask = + ltoh32_ua(&p_collect_data_v3->phy_err_mask); + DHD_RTT(("\tphy_err_mask=0x%x\n", p_collect_data_v3->phy_err_mask)); + break; + /* future case */ + } + break; + } +} + +static uint16 +rtt_result_ver(uint16 tlvid, const uint8 *p_data) +{ + uint16 ret = BCME_OK; + const wl_proxd_rtt_result_v2_t *r_v2 = NULL; + + switch (tlvid) { + case WL_PROXD_TLV_ID_RTT_RESULT: + BCM_REFERENCE(p_data); + ret = WL_PROXD_RTT_RESULT_VERSION_1; + break; + case WL_PROXD_TLV_ID_RTT_RESULT_V2: + if (p_data) { + r_v2 = (const wl_proxd_rtt_result_v2_t *)p_data; + if (r_v2->version == WL_PROXD_RTT_RESULT_VERSION_2) { + ret = WL_PROXD_RTT_RESULT_VERSION_2; + } + } + break; + default: + DHD_ERROR(("%s: > Unsupported TLV ID %d\n", + __FUNCTION__, tlvid)); + break; + } + return ret; +} + +/* pretty hex print a contiguous buffer */ +static void +rtt_prhex(const char *msg, const uint8 *buf, uint nbytes) +{ + char line[128], *p; + int len = sizeof(line); + int nchar; + uint i; + + if (msg && (msg[0] != '\0')) + DHD_RTT(("%s:\n", msg)); + + p = line; + for (i = 0; i < nbytes; i++) { + if (i % 16 == 0) { + nchar = snprintf(p, len, " %04d: ", i); /* line prefix */ + p += nchar; + len -= nchar; + } + if (len > 0) { + nchar = snprintf(p, len, "%02x ", buf[i]); + p += nchar; + len -= nchar; + } + + if (i % 16 == 15) { + DHD_RTT(("%s\n", line)); /* flush line */ + p = line; + len = sizeof(line); + } + } + + /* flush last partial line */ + if (p != line) + DHD_RTT(("%s\n", line)); +} + +static int +rtt_unpack_xtlv_cbfn(void *ctx, const uint8 *p_data, uint16 tlvid, uint16 len) +{ + int ret = BCME_OK; + int i; + wl_proxd_ftm_session_status_t *p_data_info = NULL; + uint32 chan_data_entry = 0; + uint16 expected_rtt_result_ver = 0; + + BCM_REFERENCE(p_data_info); + + switch (tlvid) { + case WL_PROXD_TLV_ID_RTT_RESULT: + case WL_PROXD_TLV_ID_RTT_RESULT_V2: + DHD_RTT(("WL_PROXD_TLV_ID_RTT_RESULT\n")); + expected_rtt_result_ver = rtt_result_ver(tlvid, p_data); + switch (expected_rtt_result_ver) { + case WL_PROXD_RTT_RESULT_VERSION_1: + ret = dhd_rtt_convert_results_to_host_v1((rtt_report_t *)ctx, + p_data, tlvid, len); + break; + case WL_PROXD_RTT_RESULT_VERSION_2: + ret = dhd_rtt_convert_results_to_host_v2((rtt_report_t *)ctx, + p_data, tlvid, len); + break; + default: + DHD_ERROR((" > Unsupported RTT_RESULT version\n")); + ret = BCME_UNSUPPORTED; + break; + } + break; + case WL_PROXD_TLV_ID_SESSION_STATUS: + DHD_RTT(("WL_PROXD_TLV_ID_SESSION_STATUS\n")); + memcpy(ctx, p_data, sizeof(wl_proxd_ftm_session_status_t)); + p_data_info = (wl_proxd_ftm_session_status_t *)ctx; + p_data_info->sid = ltoh16_ua(&p_data_info->sid); + p_data_info->state = ltoh16_ua(&p_data_info->state); + p_data_info->status = ltoh32_ua(&p_data_info->status); + p_data_info->burst_num = ltoh16_ua(&p_data_info->burst_num); + DHD_RTT(("\tsid=%u, state=%d, status=%d, burst_num=%u\n", + p_data_info->sid, p_data_info->state, + p_data_info->status, p_data_info->burst_num)); + + break; + case WL_PROXD_TLV_ID_COLLECT_DATA: + DHD_RTT(("WL_PROXD_TLV_ID_COLLECT_DATA\n")); + rtt_collect_event_data_display( + rtt_collect_data_event_ver(len), + ctx, p_data, len); + break; + case WL_PROXD_TLV_ID_COLLECT_CHAN_DATA: + DHD_RTT(("WL_PROXD_TLV_ID_COLLECT_CHAN_DATA\n")); + DHD_RTT(("\tchan est %u\n", (uint32) (len / sizeof(uint32)))); + for (i = 0; i < (len/sizeof(chan_data_entry)); i++) { + uint32 *p = (uint32*)p_data; + chan_data_entry = ltoh32_ua(p + i); + DHD_RTT(("\t%u\n", chan_data_entry)); + } + break; + case WL_PROXD_TLV_ID_MF_STATS_DATA: + DHD_RTT(("WL_PROXD_TLV_ID_MF_STATS_DATA\n")); + DHD_RTT(("\tmf stats len=%u\n", len)); + rtt_prhex("", p_data, len); + break; + default: + DHD_ERROR(("> Unsupported TLV ID %d\n", tlvid)); + ret = BCME_ERROR; + break; + } + + return ret; +} + +#ifdef WL_CFG80211 +static int +rtt_handle_config_options(wl_proxd_session_id_t session_id, wl_proxd_tlv_t **p_tlv, + uint16 *p_buf_space_left, ftm_config_options_info_t *ftm_configs, int ftm_cfg_cnt) +{ + int ret = BCME_OK; + int cfg_idx = 0; + uint32 flags = WL_PROXD_FLAG_NONE; + uint32 flags_mask = WL_PROXD_FLAG_NONE; + uint32 new_mask; /* cmdline input */ + ftm_config_options_info_t *p_option_info; + uint16 type = (session_id == WL_PROXD_SESSION_ID_GLOBAL) ? + WL_PROXD_TLV_ID_FLAGS_MASK : WL_PROXD_TLV_ID_SESSION_FLAGS_MASK; + for (cfg_idx = 0; cfg_idx < ftm_cfg_cnt; cfg_idx++) { + p_option_info = (ftm_configs + cfg_idx); + if (p_option_info != NULL) { + new_mask = p_option_info->flags; + /* update flags mask */ + flags_mask |= new_mask; + if (p_option_info->enable) { + flags |= new_mask; /* set the bit on */ + } else { + flags &= ~new_mask; /* set the bit off */ + } + } + } + flags = htol32(flags); + flags_mask = htol32(flags_mask); + /* setup flags_mask TLV */ + ret = bcm_pack_xtlv_entry((uint8 **)p_tlv, p_buf_space_left, + type, sizeof(uint32), (uint8 *)&flags_mask, BCM_XTLV_OPTION_ALIGN32); + if (ret != BCME_OK) { + DHD_ERROR(("%s : bcm_pack_xltv_entry() for mask flags failed, status=%d\n", + __FUNCTION__, ret)); + goto exit; + } + + type = (session_id == WL_PROXD_SESSION_ID_GLOBAL)? + WL_PROXD_TLV_ID_FLAGS : WL_PROXD_TLV_ID_SESSION_FLAGS; + /* setup flags TLV */ + ret = bcm_pack_xtlv_entry((uint8 **)p_tlv, p_buf_space_left, + type, sizeof(uint32), (uint8 *)&flags, BCM_XTLV_OPTION_ALIGN32); + if (ret != BCME_OK) { +#ifdef RTT_DEBUG + DHD_RTT(("%s: bcm_pack_xltv_entry() for flags failed, status=%d\n", + __FUNCTION__, ret)); +#endif // endif + } +exit: + return ret; +} + +static int +rtt_handle_config_general(wl_proxd_session_id_t session_id, wl_proxd_tlv_t **p_tlv, + uint16 *p_buf_space_left, ftm_config_param_info_t *ftm_configs, int ftm_cfg_cnt) +{ + int ret = BCME_OK; + int cfg_idx = 0; + uint32 chanspec; + ftm_config_param_info_t *p_config_param_info; + void *p_src_data; + uint16 src_data_size; /* size of data pointed by p_src_data as 'source' */ + for (cfg_idx = 0; cfg_idx < ftm_cfg_cnt; cfg_idx++) { + p_config_param_info = (ftm_configs + cfg_idx); + if (p_config_param_info != NULL) { + switch (p_config_param_info->tlvid) { + case WL_PROXD_TLV_ID_BSS_INDEX: + case WL_PROXD_TLV_ID_FTM_RETRIES: + case WL_PROXD_TLV_ID_FTM_REQ_RETRIES: + p_src_data = &p_config_param_info->data8; + src_data_size = sizeof(uint8); + break; + case WL_PROXD_TLV_ID_BURST_NUM_FTM: /* uint16 */ + case WL_PROXD_TLV_ID_NUM_BURST: + case WL_PROXD_TLV_ID_RX_MAX_BURST: + p_src_data = &p_config_param_info->data16; + src_data_size = sizeof(uint16); + break; + case WL_PROXD_TLV_ID_TX_POWER: /* uint32 */ + case WL_PROXD_TLV_ID_RATESPEC: + case WL_PROXD_TLV_ID_EVENT_MASK: /* wl_proxd_event_mask_t/uint32 */ + case WL_PROXD_TLV_ID_DEBUG_MASK: + p_src_data = &p_config_param_info->data32; + src_data_size = sizeof(uint32); + break; + case WL_PROXD_TLV_ID_CHANSPEC: /* chanspec_t --> 32bit */ + chanspec = p_config_param_info->chanspec; + p_src_data = (void *) &chanspec; + src_data_size = sizeof(uint32); + break; + case WL_PROXD_TLV_ID_BSSID: /* mac address */ + case WL_PROXD_TLV_ID_PEER_MAC: + case WL_PROXD_TLV_ID_CUR_ETHER_ADDR: + p_src_data = &p_config_param_info->mac_addr; + src_data_size = sizeof(struct ether_addr); + break; + case WL_PROXD_TLV_ID_BURST_DURATION: /* wl_proxd_intvl_t */ + case WL_PROXD_TLV_ID_BURST_PERIOD: + case WL_PROXD_TLV_ID_BURST_FTM_SEP: + case WL_PROXD_TLV_ID_BURST_TIMEOUT: + case WL_PROXD_TLV_ID_INIT_DELAY: + p_src_data = &p_config_param_info->data_intvl; + src_data_size = sizeof(wl_proxd_intvl_t); + break; + default: + ret = BCME_BADARG; + break; + } + if (ret != BCME_OK) { + DHD_ERROR(("%s bad TLV ID : %d\n", + __FUNCTION__, p_config_param_info->tlvid)); + break; + } + + ret = bcm_pack_xtlv_entry((uint8 **) p_tlv, p_buf_space_left, + p_config_param_info->tlvid, src_data_size, (uint8 *)p_src_data, + BCM_XTLV_OPTION_ALIGN32); + if (ret != BCME_OK) { + DHD_ERROR(("%s: bcm_pack_xltv_entry() failed," + " status=%d\n", __FUNCTION__, ret)); + break; + } + + } + } + return ret; +} + +static int +dhd_rtt_ftm_enable(dhd_pub_t *dhd, bool enable) +{ + ftm_subcmd_info_t subcmd_info; + subcmd_info.name = (enable)? "enable" : "disable"; + subcmd_info.cmdid = (enable)? WL_PROXD_CMD_ENABLE: WL_PROXD_CMD_DISABLE; + subcmd_info.handler = NULL; + return dhd_rtt_common_set_handler(dhd, &subcmd_info, + WL_PROXD_METHOD_FTM, WL_PROXD_SESSION_ID_GLOBAL); +} + +static int +dhd_rtt_start_session(dhd_pub_t *dhd, wl_proxd_session_id_t session_id, bool start) +{ + ftm_subcmd_info_t subcmd_info; + subcmd_info.name = (start)? "start session" : "stop session"; + subcmd_info.cmdid = (start)? WL_PROXD_CMD_START_SESSION: WL_PROXD_CMD_STOP_SESSION; + subcmd_info.handler = NULL; + return dhd_rtt_common_set_handler(dhd, &subcmd_info, + WL_PROXD_METHOD_FTM, session_id); +} + +static int +dhd_rtt_delete_session(dhd_pub_t *dhd, wl_proxd_session_id_t session_id) +{ + ftm_subcmd_info_t subcmd_info; + subcmd_info.name = "delete session"; + subcmd_info.cmdid = WL_PROXD_CMD_DELETE_SESSION; + subcmd_info.handler = NULL; + return dhd_rtt_common_set_handler(dhd, &subcmd_info, + WL_PROXD_METHOD_FTM, session_id); +} + +#ifdef WL_NAN +static int +dhd_rtt_nan_start_session(dhd_pub_t *dhd, rtt_target_info_t *rtt_target) +{ + s32 err = BCME_OK; + struct net_device *dev = dhd_linux_get_primary_netdev(dhd); + struct wireless_dev *wdev = ndev_to_wdev(dev); + struct wiphy *wiphy = wdev->wiphy; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + + nan_ranging_inst_t *ranging_inst; + ranging_inst = wl_cfgnan_get_ranging_inst(cfg, + &rtt_target->addr, 0, TRUE); + if (ranging_inst->range_status != NAN_RANGING_IN_PROGRESS) { + WL_DBG(("Trigger nan based range request\n")); + err = wl_cfgnan_trigger_ranging(bcmcfg_to_prmry_ndev(cfg), + cfg, ranging_inst, NULL, NAN_RANGE_REQ_CMD); + if (unlikely(err)) { + WL_ERR(("Failed to trigger ranging, ret = (%d)\n", err)); + /* Send an event reporting the failure */ + err = wl_cfgvendor_send_as_rtt_legacy_event(cfg->wdev->wiphy, + bcmcfg_to_prmry_ndev(cfg), NULL, RTT_STATUS_FAILURE); + memset(ranging_inst, 0, sizeof(*ranging_inst)); + } + cfg->nancfg.range_type = LEGACY_NAN_RTT; + } + return err; +} +#endif /* WL_NAN */ + +static int +dhd_rtt_ftm_config(dhd_pub_t *dhd, wl_proxd_session_id_t session_id, + ftm_config_category_t catagory, void *ftm_configs, int ftm_cfg_cnt) +{ + ftm_subcmd_info_t subcmd_info; + wl_proxd_tlv_t *p_tlv; + /* alloc mem for ioctl headr + reserved 0 bufsize for tlvs (initialize to zero) */ + wl_proxd_iov_t *p_proxd_iov; + uint16 proxd_iovsize = 0; + uint16 bufsize; + uint16 buf_space_left; + uint16 all_tlvsize; + int ret = BCME_OK; + + subcmd_info.name = "config"; + subcmd_info.cmdid = WL_PROXD_CMD_CONFIG; + + p_proxd_iov = rtt_alloc_getset_buf(WL_PROXD_METHOD_FTM, session_id, subcmd_info.cmdid, + FTM_IOC_BUFSZ, &proxd_iovsize); + + if (p_proxd_iov == NULL) { + DHD_ERROR(("%s : failed to allocate the iovar (size :%d)\n", + __FUNCTION__, FTM_IOC_BUFSZ)); + return BCME_NOMEM; + } + /* setup TLVs */ + bufsize = proxd_iovsize - WL_PROXD_IOV_HDR_SIZE; /* adjust available size for TLVs */ + p_tlv = &p_proxd_iov->tlvs[0]; + /* TLV buffer starts with a full size, will decrement for each packed TLV */ + buf_space_left = bufsize; + if (catagory == FTM_CONFIG_CAT_OPTIONS) { + ret = rtt_handle_config_options(session_id, &p_tlv, &buf_space_left, + (ftm_config_options_info_t *)ftm_configs, ftm_cfg_cnt); + } else if (catagory == FTM_CONFIG_CAT_GENERAL) { + ret = rtt_handle_config_general(session_id, &p_tlv, &buf_space_left, + (ftm_config_param_info_t *)ftm_configs, ftm_cfg_cnt); + } + if (ret == BCME_OK) { + /* update the iov header, set len to include all TLVs + header */ + all_tlvsize = (bufsize - buf_space_left); + p_proxd_iov->len = htol16(all_tlvsize + WL_PROXD_IOV_HDR_SIZE); + ret = dhd_iovar(dhd, 0, "proxd", (char *)p_proxd_iov, + all_tlvsize + WL_PROXD_IOV_HDR_SIZE, NULL, 0, TRUE); + if (ret != BCME_OK) { + DHD_ERROR(("%s : failed to set config\n", __FUNCTION__)); + } + } + /* clean up */ + kfree(p_proxd_iov); + return ret; +} + +static int +dhd_rtt_get_version(dhd_pub_t *dhd, int *out_version) +{ + int ret; + ftm_subcmd_info_t subcmd_info; + subcmd_info.name = "ver"; + subcmd_info.cmdid = WL_PROXD_CMD_GET_VERSION; + subcmd_info.handler = NULL; + ret = dhd_rtt_common_get_handler(dhd, &subcmd_info, + WL_PROXD_METHOD_FTM, WL_PROXD_SESSION_ID_GLOBAL); + *out_version = (ret == BCME_OK) ? subcmd_info.version : 0; + return ret; +} +#endif /* WL_CFG80211 */ + +chanspec_t +dhd_rtt_convert_to_chspec(wifi_channel_info_t channel) +{ + int bw; + chanspec_t chanspec = 0; + uint8 center_chan; + uint8 primary_chan; + /* set witdh to 20MHZ for 2.4G HZ */ + if (channel.center_freq >= 2400 && channel.center_freq <= 2500) { + channel.width = WIFI_CHAN_WIDTH_20; + } + switch (channel.width) { + case WIFI_CHAN_WIDTH_20: + bw = WL_CHANSPEC_BW_20; + primary_chan = wf_mhz2channel(channel.center_freq, 0); + chanspec = wf_channel2chspec(primary_chan, bw); + break; + case WIFI_CHAN_WIDTH_40: + bw = WL_CHANSPEC_BW_40; + primary_chan = wf_mhz2channel(channel.center_freq, 0); + chanspec = wf_channel2chspec(primary_chan, bw); + break; + case WIFI_CHAN_WIDTH_80: + bw = WL_CHANSPEC_BW_80; + primary_chan = wf_mhz2channel(channel.center_freq, 0); + center_chan = wf_mhz2channel(channel.center_freq0, 0); + chanspec = wf_chspec_80(center_chan, primary_chan); + break; + default: + DHD_ERROR(("doesn't support this bandwith : %d", channel.width)); + bw = -1; + break; + } + return chanspec; +} + +int +dhd_rtt_idx_to_burst_duration(uint idx) +{ + if (idx >= ARRAY_SIZE(burst_duration_idx)) { + return -1; + } + return burst_duration_idx[idx]; +} + +int +dhd_rtt_set_cfg(dhd_pub_t *dhd, rtt_config_params_t *params) +{ + int err = BCME_OK; + int idx; + rtt_status_info_t *rtt_status; + NULL_CHECK(params, "params is NULL", err); + + NULL_CHECK(dhd, "dhd is NULL", err); + rtt_status = GET_RTTSTATE(dhd); + NULL_CHECK(rtt_status, "rtt_status is NULL", err); + if (!HAS_11MC_CAP(rtt_status->rtt_capa.proto)) { + DHD_ERROR(("doesn't support RTT \n")); + return BCME_ERROR; + } + if (rtt_status->status != RTT_STOPPED) { + DHD_ERROR(("rtt is already started\n")); + return BCME_BUSY; + } + DHD_RTT(("%s enter\n", __FUNCTION__)); + + memset(rtt_status->rtt_config.target_info, 0, TARGET_INFO_SIZE(RTT_MAX_TARGET_CNT)); + rtt_status->rtt_config.rtt_target_cnt = params->rtt_target_cnt; + memcpy(rtt_status->rtt_config.target_info, + params->target_info, TARGET_INFO_SIZE(params->rtt_target_cnt)); + rtt_status->status = RTT_STARTED; + /* start to measure RTT from first device */ + /* find next target to trigger RTT */ + for (idx = rtt_status->cur_idx; idx < rtt_status->rtt_config.rtt_target_cnt; idx++) { + /* skip the disabled device */ + if (rtt_status->rtt_config.target_info[idx].disable) { + continue; + } else { + /* set the idx to cur_idx */ + rtt_status->cur_idx = idx; + break; + } + } + if (idx < rtt_status->rtt_config.rtt_target_cnt) { + DHD_RTT(("rtt_status->cur_idx : %d\n", rtt_status->cur_idx)); + schedule_work(&rtt_status->work); + } + return err; +} + +int +dhd_rtt_stop(dhd_pub_t *dhd, struct ether_addr *mac_list, int mac_cnt) +{ + int err = BCME_OK; +#ifdef WL_CFG8011 + int i = 0, j = 0; + rtt_status_info_t *rtt_status; + rtt_results_header_t *entry, *next; + rtt_result_t *rtt_result, *next2; + struct rtt_noti_callback *iter; + + NULL_CHECK(dhd, "dhd is NULL", err); + rtt_status = GET_RTTSTATE(dhd); + NULL_CHECK(rtt_status, "rtt_status is NULL", err); + if (rtt_status->status == RTT_STOPPED) { + DHD_ERROR(("rtt is not started\n")); + return BCME_OK; + } + DHD_RTT(("%s enter\n", __FUNCTION__)); + mutex_lock(&rtt_status->rtt_mutex); + for (i = 0; i < mac_cnt; i++) { + for (j = 0; j < rtt_status->rtt_config.rtt_target_cnt; j++) { + if (!bcmp(&mac_list[i], &rtt_status->rtt_config.target_info[j].addr, + ETHER_ADDR_LEN)) { + rtt_status->rtt_config.target_info[j].disable = TRUE; + } + } + } + if (rtt_status->all_cancel) { + /* cancel all of request */ + rtt_status->status = RTT_STOPPED; + DHD_RTT(("current RTT process is cancelled\n")); + /* remove the rtt results in cache */ + if (!list_empty(&rtt_status->rtt_results_cache)) { + /* Iterate rtt_results_header list */ +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif // endif + list_for_each_entry_safe(entry, next, + &rtt_status->rtt_results_cache, list) { + list_del(&entry->list); + /* Iterate rtt_result list */ + list_for_each_entry_safe(rtt_result, next2, + &entry->result_list, list) { + list_del(&rtt_result->list); + kfree(rtt_result); + } + kfree(entry); + } +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif // endif + } + /* send the rtt complete event to wake up the user process */ +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif // endif + list_for_each_entry(iter, &rtt_status->noti_fn_list, list) { + iter->noti_fn(iter->ctx, &rtt_status->rtt_results_cache); + } +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif // endif + /* reinitialize the HEAD */ + INIT_LIST_HEAD(&rtt_status->rtt_results_cache); + /* clear information for rtt_config */ + rtt_status->rtt_config.rtt_target_cnt = 0; + memset(rtt_status->rtt_config.target_info, 0, + TARGET_INFO_SIZE(RTT_MAX_TARGET_CNT)); + rtt_status->cur_idx = 0; + dhd_rtt_delete_session(dhd, FTM_DEFAULT_SESSION); + dhd_rtt_ftm_enable(dhd, FALSE); + } + mutex_unlock(&rtt_status->rtt_mutex); +#endif /* WL_CFG80211 */ + return err; +} + +#ifdef WL_CFG80211 +static int +dhd_rtt_start(dhd_pub_t *dhd) +{ + int err = BCME_OK; + char eabuf[ETHER_ADDR_STR_LEN]; + char chanbuf[CHANSPEC_STR_LEN]; + int pm = PM_OFF; + int ftm_cfg_cnt = 0; + int ftm_param_cnt = 0; + uint32 rspec = 0; + ftm_config_options_info_t ftm_configs[FTM_MAX_CONFIGS]; + ftm_config_param_info_t ftm_params[FTM_MAX_PARAMS]; + rtt_target_info_t *rtt_target; + rtt_status_info_t *rtt_status; + struct net_device *dev = dhd_linux_get_primary_netdev(dhd); + u8 ioctl_buf[WLC_IOCTL_SMLEN]; + NULL_CHECK(dhd, "dhd is NULL", err); + + rtt_status = GET_RTTSTATE(dhd); + NULL_CHECK(rtt_status, "rtt_status is NULL", err); + + DHD_RTT(("Enter %s\n", __FUNCTION__)); + if (rtt_status->cur_idx >= rtt_status->rtt_config.rtt_target_cnt) { + err = BCME_RANGE; + DHD_RTT(("%s : idx %d is out of range\n", __FUNCTION__, rtt_status->cur_idx)); + if (rtt_status->flags == WL_PROXD_SESSION_FLAG_TARGET) { + DHD_ERROR(("STA is set as Target/Responder \n")); + return BCME_ERROR; + } + goto exit; + } + if (RTT_IS_STOPPED(rtt_status)) { + DHD_RTT(("RTT is stopped\n")); + goto exit; + } + rtt_status->pm = PM_OFF; + err = wldev_ioctl_get(dev, WLC_GET_PM, &rtt_status->pm, sizeof(rtt_status->pm)); + if (err) { + DHD_ERROR(("Failed to get the PM value\n")); + } else { + err = wldev_ioctl_set(dev, WLC_SET_PM, &pm, sizeof(pm)); + if (err) { + DHD_ERROR(("Failed to set the PM\n")); + rtt_status->pm_restore = FALSE; + } else { + rtt_status->pm_restore = TRUE; + } + } + + mutex_lock(&rtt_status->rtt_mutex); + /* Get a target information */ + rtt_target = &rtt_status->rtt_config.target_info[rtt_status->cur_idx]; + mutex_unlock(&rtt_status->rtt_mutex); + DHD_RTT(("%s enter\n", __FUNCTION__)); + +#ifdef WL_NAN + if (rtt_target->peer == RTT_PEER_NAN && + (!ETHER_ISNULLADDR(rtt_target->addr.octet))) { + err = dhd_rtt_nan_start_session(dhd, rtt_target); + /* Irrespectivily go to exit */ + goto exit; + } +#endif /* WL_NAN */ + + if (!RTT_IS_ENABLED(rtt_status)) { + /* enable ftm */ + err = dhd_rtt_ftm_enable(dhd, TRUE); + if (err) { + DHD_ERROR(("failed to enable FTM (%d)\n", err)); + goto exit; + } + } + + /* delete session of index default sesession */ + err = dhd_rtt_delete_session(dhd, FTM_DEFAULT_SESSION); + if (err < 0 && err != BCME_NOTFOUND) { + DHD_ERROR(("failed to delete session of FTM (%d)\n", err)); + goto exit; + } + rtt_status->status = RTT_ENABLED; + memset(ftm_configs, 0, sizeof(ftm_configs)); + memset(ftm_params, 0, sizeof(ftm_params)); + + /* configure the session 1 as initiator */ + ftm_configs[ftm_cfg_cnt].enable = TRUE; + ftm_configs[ftm_cfg_cnt++].flags = WL_PROXD_SESSION_FLAG_INITIATOR; + dhd_rtt_ftm_config(dhd, FTM_DEFAULT_SESSION, FTM_CONFIG_CAT_OPTIONS, + ftm_configs, ftm_cfg_cnt); + + memset(ioctl_buf, 0, WLC_IOCTL_SMLEN); + err = wldev_iovar_getbuf(dev, "cur_etheraddr", NULL, 0, + ioctl_buf, WLC_IOCTL_SMLEN, NULL); + if (err) { + WL_ERR(("WLC_GET_CUR_ETHERADDR failed, error %d\n", err)); + goto exit; + } + memcpy(rtt_target->local_addr.octet, ioctl_buf, ETHER_ADDR_LEN); + + /* local mac address */ + if (!ETHER_ISNULLADDR(rtt_target->local_addr.octet)) { + ftm_params[ftm_param_cnt].mac_addr = rtt_target->local_addr; + ftm_params[ftm_param_cnt++].tlvid = WL_PROXD_TLV_ID_CUR_ETHER_ADDR; + bcm_ether_ntoa(&rtt_target->local_addr, eabuf); + DHD_RTT((">\t local %s\n", eabuf)); + } + /* target's mac address */ + if (!ETHER_ISNULLADDR(rtt_target->addr.octet)) { + ftm_params[ftm_param_cnt].mac_addr = rtt_target->addr; + ftm_params[ftm_param_cnt++].tlvid = WL_PROXD_TLV_ID_PEER_MAC; + bcm_ether_ntoa(&rtt_target->addr, eabuf); + DHD_RTT((">\t target %s\n", eabuf)); + } + /* target's chanspec */ + if (rtt_target->chanspec) { + ftm_params[ftm_param_cnt].chanspec = htol32((uint32)rtt_target->chanspec); + ftm_params[ftm_param_cnt++].tlvid = WL_PROXD_TLV_ID_CHANSPEC; + wf_chspec_ntoa(rtt_target->chanspec, chanbuf); + DHD_RTT((">\t chanspec : %s\n", chanbuf)); + } + /* num-burst */ + if (rtt_target->num_burst) { + ftm_params[ftm_param_cnt].data16 = htol16(rtt_target->num_burst); + ftm_params[ftm_param_cnt++].tlvid = WL_PROXD_TLV_ID_NUM_BURST; + DHD_RTT((">\t num of burst : %d\n", rtt_target->num_burst)); + } + /* number of frame per burst */ + if (rtt_target->num_frames_per_burst == 0) { + rtt_target->num_frames_per_burst = + CHSPEC_IS20(rtt_target->chanspec) ? FTM_DEFAULT_CNT_20M : + CHSPEC_IS40(rtt_target->chanspec) ? FTM_DEFAULT_CNT_40M : + FTM_DEFAULT_CNT_80M; + } + ftm_params[ftm_param_cnt].data16 = htol16(rtt_target->num_frames_per_burst); + ftm_params[ftm_param_cnt++].tlvid = WL_PROXD_TLV_ID_BURST_NUM_FTM; + DHD_RTT((">\t number of frame per burst : %d\n", rtt_target->num_frames_per_burst)); + /* FTM retry count */ + if (rtt_target->num_retries_per_ftm) { + ftm_params[ftm_param_cnt].data8 = rtt_target->num_retries_per_ftm; + ftm_params[ftm_param_cnt++].tlvid = WL_PROXD_TLV_ID_FTM_RETRIES; + DHD_RTT((">\t retry count of FTM : %d\n", rtt_target->num_retries_per_ftm)); + } + /* FTM Request retry count */ + if (rtt_target->num_retries_per_ftmr) { + ftm_params[ftm_param_cnt].data8 = rtt_target->num_retries_per_ftmr; + ftm_params[ftm_param_cnt++].tlvid = WL_PROXD_TLV_ID_FTM_REQ_RETRIES; + DHD_RTT((">\t retry count of FTM Req : %d\n", rtt_target->num_retries_per_ftmr)); + } + /* burst-period */ + if (rtt_target->burst_period) { + ftm_params[ftm_param_cnt].data_intvl.intvl = + htol32(rtt_target->burst_period); /* ms */ + ftm_params[ftm_param_cnt].data_intvl.tmu = WL_PROXD_TMU_MILLI_SEC; + ftm_params[ftm_param_cnt++].tlvid = WL_PROXD_TLV_ID_BURST_PERIOD; + DHD_RTT((">\t burst period : %d ms\n", rtt_target->burst_period)); + } + /* burst-duration */ + if (rtt_target->burst_duration) { + ftm_params[ftm_param_cnt].data_intvl.intvl = + htol32(rtt_target->burst_duration); /* ms */ + ftm_params[ftm_param_cnt].data_intvl.tmu = WL_PROXD_TMU_MILLI_SEC; + ftm_params[ftm_param_cnt++].tlvid = WL_PROXD_TLV_ID_BURST_DURATION; + DHD_RTT((">\t burst duration : %d ms\n", + rtt_target->burst_duration)); + } + if (rtt_target->bw && rtt_target->preamble) { + bool use_default = FALSE; + int nss; + int mcs; + switch (rtt_target->preamble) { + case RTT_PREAMBLE_LEGACY: + rspec |= WL_RSPEC_ENCODE_RATE; /* 11abg */ + rspec |= WL_RATE_6M; + break; + case RTT_PREAMBLE_HT: + rspec |= WL_RSPEC_ENCODE_HT; /* 11n HT */ + mcs = 0; /* default MCS 0 */ + rspec |= mcs; + break; + case RTT_PREAMBLE_VHT: + rspec |= WL_RSPEC_ENCODE_VHT; /* 11ac VHT */ + mcs = 0; /* default MCS 0 */ + nss = 1; /* default Nss = 1 */ + rspec |= (nss << WL_RSPEC_VHT_NSS_SHIFT) | mcs; + break; + default: + DHD_RTT(("doesn't support this preamble : %d\n", rtt_target->preamble)); + use_default = TRUE; + break; + } + switch (rtt_target->bw) { + case RTT_BW_20: + rspec |= WL_RSPEC_BW_20MHZ; + break; + case RTT_BW_40: + rspec |= WL_RSPEC_BW_40MHZ; + break; + case RTT_BW_80: + rspec |= WL_RSPEC_BW_80MHZ; + break; + default: + DHD_RTT(("doesn't support this BW : %d\n", rtt_target->bw)); + use_default = TRUE; + break; + } + if (!use_default) { + ftm_params[ftm_param_cnt].data32 = htol32(rspec); + ftm_params[ftm_param_cnt++].tlvid = WL_PROXD_TLV_ID_RATESPEC; + DHD_RTT((">\t ratespec : %d\n", rspec)); + } + + } + dhd_set_rand_mac_oui(dhd); + dhd_rtt_ftm_config(dhd, FTM_DEFAULT_SESSION, FTM_CONFIG_CAT_GENERAL, + ftm_params, ftm_param_cnt); + + err = dhd_rtt_start_session(dhd, FTM_DEFAULT_SESSION, TRUE); + if (err) { + DHD_ERROR(("failed to start session of FTM : error %d\n", err)); + } +exit: + if (err) { + DHD_ERROR(("rtt is stopped %s \n", __FUNCTION__)); + rtt_status->status = RTT_STOPPED; + /* disable FTM */ + dhd_rtt_ftm_enable(dhd, FALSE); + if (rtt_status->pm_restore) { + pm = PM_FAST; + DHD_ERROR(("pm_restore =%d func =%s \n", + rtt_status->pm_restore, __FUNCTION__)); + err = wldev_ioctl_set(dev, WLC_SET_PM, &pm, sizeof(pm)); + if (err) { + DHD_ERROR(("Failed to set PM \n")); + } else { + rtt_status->pm_restore = FALSE; + } + } + } + return err; +} +#endif /* WL_CFG80211 */ + +int +dhd_rtt_register_noti_callback(dhd_pub_t *dhd, void *ctx, dhd_rtt_compl_noti_fn noti_fn) +{ + int err = BCME_OK; + struct rtt_noti_callback *cb = NULL, *iter; + rtt_status_info_t *rtt_status; + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(noti_fn, "noti_fn is NULL", err); + + rtt_status = GET_RTTSTATE(dhd); + NULL_CHECK(rtt_status, "rtt_status is NULL", err); + spin_lock_bh(¬i_list_lock); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif // endif + list_for_each_entry(iter, &rtt_status->noti_fn_list, list) { + if (iter->noti_fn == noti_fn) { + goto exit; + } + } +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif // endif + cb = kmalloc(sizeof(struct rtt_noti_callback), GFP_ATOMIC); + if (!cb) { + err = -ENOMEM; + goto exit; + } + cb->noti_fn = noti_fn; + cb->ctx = ctx; + list_add(&cb->list, &rtt_status->noti_fn_list); +exit: + spin_unlock_bh(¬i_list_lock); + return err; +} + +int +dhd_rtt_unregister_noti_callback(dhd_pub_t *dhd, dhd_rtt_compl_noti_fn noti_fn) +{ + int err = BCME_OK; + struct rtt_noti_callback *cb = NULL, *iter; + rtt_status_info_t *rtt_status; + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(noti_fn, "noti_fn is NULL", err); + rtt_status = GET_RTTSTATE(dhd); + NULL_CHECK(rtt_status, "rtt_status is NULL", err); + spin_lock_bh(¬i_list_lock); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif // endif + list_for_each_entry(iter, &rtt_status->noti_fn_list, list) { + if (iter->noti_fn == noti_fn) { + cb = iter; + list_del(&cb->list); + break; + } + } +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif // endif + + spin_unlock_bh(¬i_list_lock); + if (cb) { + kfree(cb); + } + return err; +} + +static wifi_rate_t +dhd_rtt_convert_rate_to_host(uint32 rspec) +{ + wifi_rate_t host_rate; + uint32 bandwidth; + memset(&host_rate, 0, sizeof(wifi_rate_t)); + if (RSPEC_ISLEGACY(rspec)) { + host_rate.preamble = 0; + } else if (RSPEC_ISHT(rspec)) { + host_rate.preamble = 2; + host_rate.rateMcsIdx = rspec & WL_RSPEC_RATE_MASK; + } else if (RSPEC_ISVHT(rspec)) { + host_rate.preamble = 3; + host_rate.rateMcsIdx = rspec & WL_RSPEC_VHT_MCS_MASK; + host_rate.nss = (rspec & WL_RSPEC_VHT_NSS_MASK) >> WL_RSPEC_VHT_NSS_SHIFT; + } + + bandwidth = RSPEC_BW(rspec); + switch (bandwidth) { + case WL_RSPEC_BW_20MHZ: + host_rate.bw = RTT_RATE_20M; + break; + case WL_RSPEC_BW_40MHZ: + host_rate.bw = RTT_RATE_40M; + break; + case WL_RSPEC_BW_80MHZ: + host_rate.bw = RTT_RATE_80M; + break; + case WL_RSPEC_BW_160MHZ: + host_rate.bw = RTT_RATE_160M; + break; + default: + host_rate.bw = RTT_RATE_20M; + break; + } + + host_rate.bitrate = rate_rspec2rate(rspec) / 100; /* 100kbps */ + DHD_RTT(("bit rate : %d\n", host_rate.bitrate)); + return host_rate; +} + +#define FTM_FRAME_TYPES {"SETUP", "TRIGGER", "TIMESTAMP"} +static int +dhd_rtt_convert_results_to_host_v1(rtt_report_t *rtt_report, const uint8 *p_data, + uint16 tlvid, uint16 len) +{ + int i; + int err = BCME_OK; + char eabuf[ETHER_ADDR_STR_LEN]; + wl_proxd_result_flags_t flags; + wl_proxd_session_state_t session_state; + wl_proxd_status_t proxd_status; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)) + struct timespec ts; +#endif /* LINUX_VER >= 2.6.39 */ + uint32 ratespec; + uint32 avg_dist; + const wl_proxd_rtt_result_v1_t *p_data_info = NULL; + const wl_proxd_rtt_sample_v1_t *p_sample_avg = NULL; + const wl_proxd_rtt_sample_v1_t *p_sample = NULL; + wl_proxd_intvl_t rtt; + wl_proxd_intvl_t p_time; + uint16 num_rtt = 0, snr = 0, bitflips = 0; + wl_proxd_phy_error_t tof_phy_error = 0; + wl_proxd_phy_error_t tof_phy_tgt_error = 0; + wl_proxd_snr_t tof_target_snr = 0; + wl_proxd_bitflips_t tof_target_bitflips = 0; + int16 rssi = 0; + int32 dist = 0; + uint8 num_ftm = 0; + char *ftm_frame_types[] = FTM_FRAME_TYPES; + + BCM_REFERENCE(ftm_frame_types); + BCM_REFERENCE(dist); + BCM_REFERENCE(rssi); + BCM_REFERENCE(tof_target_bitflips); + BCM_REFERENCE(tof_target_snr); + BCM_REFERENCE(tof_phy_tgt_error); + BCM_REFERENCE(tof_phy_error); + BCM_REFERENCE(bitflips); + BCM_REFERENCE(snr); + + NULL_CHECK(rtt_report, "rtt_report is NULL", err); + NULL_CHECK(p_data, "p_data is NULL", err); + DHD_RTT(("%s enter\n", __FUNCTION__)); + p_data_info = (const wl_proxd_rtt_result_v1_t *) p_data; + /* unpack and format 'flags' for display */ + flags = ltoh16_ua(&p_data_info->flags); + + /* session state and status */ + session_state = ltoh16_ua(&p_data_info->state); + proxd_status = ltoh32_ua(&p_data_info->status); + bcm_ether_ntoa((&(p_data_info->peer)), eabuf); + ftm_session_state_value_to_logstr(session_state); + ftm_status_value_to_logstr(proxd_status); + DHD_RTT((">\tTarget(%s) session state=%d(%s), status=%d(%s)\n", + eabuf, + session_state, + ftm_session_state_value_to_logstr(session_state), + proxd_status, + ftm_status_value_to_logstr(proxd_status))); + + /* show avg_dist (1/256m units), burst_num */ + avg_dist = ltoh32_ua(&p_data_info->avg_dist); + if (avg_dist == 0xffffffff) { /* report 'failure' case */ + DHD_RTT((">\tavg_dist=-1m, burst_num=%d, valid_measure_cnt=%d\n", + ltoh16_ua(&p_data_info->burst_num), + p_data_info->num_valid_rtt)); /* in a session */ + avg_dist = FTM_INVALID; + } + else { + DHD_RTT((">\tavg_dist=%d.%04dm, burst_num=%d, valid_measure_cnt=%d num_ftm=%d\n", + avg_dist >> 8, /* 1/256m units */ + ((avg_dist & 0xff) * 625) >> 4, + ltoh16_ua(&p_data_info->burst_num), + p_data_info->num_valid_rtt, + p_data_info->num_ftm)); /* in a session */ + } + /* show 'avg_rtt' sample */ + p_sample_avg = &p_data_info->avg_rtt; + ftm_tmu_value_to_logstr(ltoh16_ua(&p_sample_avg->rtt.tmu)); + DHD_RTT((">\tavg_rtt sample: rssi=%d rtt=%d%s std_deviation =%d.%d ratespec=0x%08x\n", + (int16) ltoh16_ua(&p_sample_avg->rssi), + ltoh32_ua(&p_sample_avg->rtt.intvl), + ftm_tmu_value_to_logstr(ltoh16_ua(&p_sample_avg->rtt.tmu)), + ltoh16_ua(&p_data_info->sd_rtt)/10, ltoh16_ua(&p_data_info->sd_rtt)%10, + ltoh32_ua(&p_sample_avg->ratespec))); + + /* set peer address */ + rtt_report->addr = p_data_info->peer; + /* burst num */ + rtt_report->burst_num = ltoh16_ua(&p_data_info->burst_num); + /* success num */ + rtt_report->success_num = p_data_info->num_valid_rtt; + /* actual number of FTM supported by peer */ + rtt_report->num_per_burst_peer = p_data_info->num_ftm; + rtt_report->negotiated_burst_num = p_data_info->num_ftm; + /* status */ + rtt_report->status = ftm_get_statusmap_info(proxd_status, + &ftm_status_map_info[0], ARRAYSIZE(ftm_status_map_info)); + + /* rssi (0.5db) */ + rtt_report->rssi = ABS((wl_proxd_rssi_t)ltoh16_ua(&p_data_info->avg_rtt.rssi)) * 2; + + /* rx rate */ + ratespec = ltoh32_ua(&p_data_info->avg_rtt.ratespec); + rtt_report->rx_rate = dhd_rtt_convert_rate_to_host(ratespec); + /* tx rate */ + if (flags & WL_PROXD_RESULT_FLAG_VHTACK) { + rtt_report->tx_rate = dhd_rtt_convert_rate_to_host(0x2010010); + } else { + rtt_report->tx_rate = dhd_rtt_convert_rate_to_host(0xc); + } + /* rtt_sd */ + rtt.tmu = ltoh16_ua(&p_data_info->avg_rtt.rtt.tmu); + rtt.intvl = ltoh32_ua(&p_data_info->avg_rtt.rtt.intvl); + rtt_report->rtt = (wifi_timespan)FTM_INTVL2NSEC(&rtt) * 1000; /* nano -> pico seconds */ + rtt_report->rtt_sd = ltoh16_ua(&p_data_info->sd_rtt); /* nano -> 0.1 nano */ + DHD_RTT(("rtt_report->rtt : %llu\n", rtt_report->rtt)); + DHD_RTT(("rtt_report->rssi : %d (0.5db)\n", rtt_report->rssi)); + + /* average distance */ + if (avg_dist != FTM_INVALID) { + rtt_report->distance = (avg_dist >> 8) * 1000; /* meter -> mm */ + rtt_report->distance += (avg_dist & 0xff) * 1000 / 256; + } else { + rtt_report->distance = FTM_INVALID; + } + /* time stamp */ + /* get the time elapsed from boot time */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)) + get_monotonic_boottime(&ts); + rtt_report->ts = (uint64)TIMESPEC_TO_US(ts); +#endif /* LINUX_VER >= 2.6.39 */ + + if (proxd_status == WL_PROXD_E_REMOTE_FAIL) { + /* retry time after failure */ + p_time.intvl = ltoh32_ua(&p_data_info->u.retry_after.intvl); + p_time.tmu = ltoh16_ua(&p_data_info->u.retry_after.tmu); + rtt_report->retry_after_duration = FTM_INTVL2SEC(&p_time); /* s -> s */ + DHD_RTT((">\tretry_after: %d%s\n", + ltoh32_ua(&p_data_info->u.retry_after.intvl), + ftm_tmu_value_to_logstr(ltoh16_ua(&p_data_info->u.retry_after.tmu)))); + } else { + /* burst duration */ + p_time.intvl = ltoh32_ua(&p_data_info->u.retry_after.intvl); + p_time.tmu = ltoh16_ua(&p_data_info->u.retry_after.tmu); + rtt_report->burst_duration = FTM_INTVL2MSEC(&p_time); /* s -> ms */ + DHD_RTT((">\tburst_duration: %d%s\n", + ltoh32_ua(&p_data_info->u.burst_duration.intvl), + ftm_tmu_value_to_logstr(ltoh16_ua(&p_data_info->u.burst_duration.tmu)))); + DHD_RTT(("rtt_report->burst_duration : %d\n", rtt_report->burst_duration)); + } + + /* display detail if available */ + num_rtt = ltoh16_ua(&p_data_info->num_rtt); + if (num_rtt > 0) { + DHD_RTT((">\tnum rtt: %d samples\n", num_rtt)); + p_sample = &p_data_info->rtt[0]; + for (i = 0; i < num_rtt; i++) { + snr = 0; + bitflips = 0; + tof_phy_error = 0; + tof_phy_tgt_error = 0; + tof_target_snr = 0; + tof_target_bitflips = 0; + rssi = 0; + dist = 0; + num_ftm = p_data_info->num_ftm; + /* FTM frames 1,4,7,11 have valid snr, rssi and bitflips */ + if ((i % num_ftm) == 1) { + rssi = (wl_proxd_rssi_t) ltoh16_ua(&p_sample->rssi); + snr = (wl_proxd_snr_t) ltoh16_ua(&p_sample->snr); + bitflips = (wl_proxd_bitflips_t) ltoh16_ua(&p_sample->bitflips); + tof_phy_error = + (wl_proxd_phy_error_t) + ltoh32_ua(&p_sample->tof_phy_error); + tof_phy_tgt_error = + (wl_proxd_phy_error_t) + ltoh32_ua(&p_sample->tof_tgt_phy_error); + tof_target_snr = + (wl_proxd_snr_t) + ltoh16_ua(&p_sample->tof_tgt_snr); + tof_target_bitflips = + (wl_proxd_bitflips_t) + ltoh16_ua(&p_sample->tof_tgt_bitflips); + dist = ltoh32_ua(&p_sample->distance); + } else { + rssi = -1; + snr = 0; + bitflips = 0; + dist = 0; + tof_target_bitflips = 0; + tof_target_snr = 0; + tof_phy_tgt_error = 0; + } + DHD_RTT((">\t sample[%d]: id=%d rssi=%d snr=0x%x bitflips=%d" + " tof_phy_error %x tof_phy_tgt_error %x target_snr=0x%x" + " target_bitflips=%d dist=%d rtt=%d%s status %s" + " Type %s coreid=%d\n", + i, p_sample->id, rssi, snr, + bitflips, tof_phy_error, tof_phy_tgt_error, + tof_target_snr, + tof_target_bitflips, dist, + ltoh32_ua(&p_sample->rtt.intvl), + ftm_tmu_value_to_logstr(ltoh16_ua(&p_sample->rtt.tmu)), + ftm_status_value_to_logstr(ltoh32_ua(&p_sample->status)), + ftm_frame_types[i % num_ftm], p_sample->coreid)); + p_sample++; + } + } + return err; +} + +static int +dhd_rtt_convert_results_to_host_v2(rtt_report_t *rtt_report, const uint8 *p_data, + uint16 tlvid, uint16 len) +{ + int i; + int err = BCME_OK; + char eabuf[ETHER_ADDR_STR_LEN]; + wl_proxd_result_flags_t flags; + wl_proxd_session_state_t session_state; + wl_proxd_status_t proxd_status; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)) + struct timespec ts; +#endif /* LINUX_VER >= 2.6.39 */ + uint32 ratespec; + uint32 avg_dist; + const wl_proxd_rtt_result_v2_t *p_data_info = NULL; + const wl_proxd_rtt_sample_v2_t *p_sample_avg = NULL; + const wl_proxd_rtt_sample_v2_t *p_sample = NULL; + uint16 num_rtt = 0; + wl_proxd_intvl_t rtt; + wl_proxd_intvl_t p_time; + uint16 snr = 0, bitflips = 0; + wl_proxd_phy_error_t tof_phy_error = 0; + wl_proxd_phy_error_t tof_phy_tgt_error = 0; + wl_proxd_snr_t tof_target_snr = 0; + wl_proxd_bitflips_t tof_target_bitflips = 0; + int16 rssi = 0; + int32 dist = 0; + uint32 chanspec = 0; + uint8 num_ftm = 0; + char *ftm_frame_types[] = FTM_FRAME_TYPES; + + BCM_REFERENCE(ftm_frame_types); + BCM_REFERENCE(dist); + BCM_REFERENCE(rssi); + BCM_REFERENCE(tof_target_bitflips); + BCM_REFERENCE(tof_target_snr); + BCM_REFERENCE(tof_phy_tgt_error); + BCM_REFERENCE(tof_phy_error); + BCM_REFERENCE(bitflips); + BCM_REFERENCE(snr); + BCM_REFERENCE(chanspec); + + NULL_CHECK(rtt_report, "rtt_report is NULL", err); + NULL_CHECK(p_data, "p_data is NULL", err); + DHD_RTT(("%s enter\n", __FUNCTION__)); + p_data_info = (const wl_proxd_rtt_result_v2_t *) p_data; + /* unpack and format 'flags' for display */ + flags = ltoh16_ua(&p_data_info->flags); + /* session state and status */ + session_state = ltoh16_ua(&p_data_info->state); + proxd_status = ltoh32_ua(&p_data_info->status); + bcm_ether_ntoa((&(p_data_info->peer)), eabuf); + ftm_session_state_value_to_logstr(session_state); + ftm_status_value_to_logstr(proxd_status); + DHD_RTT((">\tTarget(%s) session state=%d(%s), status=%d(%s)\n", + eabuf, + session_state, + ftm_session_state_value_to_logstr(session_state), + proxd_status, + ftm_status_value_to_logstr(proxd_status))); + /* show avg_dist (1/256m units), burst_num */ + avg_dist = ltoh32_ua(&p_data_info->avg_dist); + if (avg_dist == 0xffffffff) { /* report 'failure' case */ + DHD_RTT((">\tavg_dist=-1m, burst_num=%d, valid_measure_cnt=%d\n", + ltoh16_ua(&p_data_info->burst_num), + p_data_info->num_valid_rtt)); /* in a session */ + avg_dist = FTM_INVALID; + } else { + DHD_RTT((">\tavg_dist=%d.%04dm, burst_num=%d, valid_measure_cnt=%d num_ftm=%d\n", + avg_dist >> 8, /* 1/256m units */ + ((avg_dist & 0xff) * 625) >> 4, + ltoh16_ua(&p_data_info->burst_num), + p_data_info->num_valid_rtt, + p_data_info->num_ftm)); /* in a session */ + } + + /* show 'avg_rtt' sample */ + /* in v2, avg_rtt is the first element of the variable rtt[] */ + p_sample_avg = &p_data_info->rtt[0]; + ftm_tmu_value_to_logstr(ltoh16_ua(&p_sample_avg->rtt.tmu)); + DHD_RTT((">\tavg_rtt sample: rssi=%d rtt=%d%s std_deviation =%d.%d" + "ratespec=0x%08x chanspec=0x%08x\n", + (int16) ltoh16_ua(&p_sample_avg->rssi), + ltoh32_ua(&p_sample_avg->rtt.intvl), + ftm_tmu_value_to_logstr(ltoh16_ua(&p_sample_avg->rtt.tmu)), + ltoh16_ua(&p_data_info->sd_rtt)/10, ltoh16_ua(&p_data_info->sd_rtt)%10, + ltoh32_ua(&p_sample_avg->ratespec), + ltoh32_ua(&p_sample_avg->chanspec))); + + /* set peer address */ + rtt_report->addr = p_data_info->peer; + + /* burst num */ + rtt_report->burst_num = ltoh16_ua(&p_data_info->burst_num); + + /* success num */ + rtt_report->success_num = p_data_info->num_valid_rtt; + + /* actual number of FTM supported by peer */ + rtt_report->num_per_burst_peer = p_data_info->num_ftm; + rtt_report->negotiated_burst_num = p_data_info->num_ftm; + + /* status */ + rtt_report->status = ftm_get_statusmap_info(proxd_status, + &ftm_status_map_info[0], ARRAYSIZE(ftm_status_map_info)); + + /* rssi (0.5db) */ + rtt_report->rssi = ABS((wl_proxd_rssi_t)ltoh16_ua(&p_sample_avg->rssi)) * 2; + + /* rx rate */ + ratespec = ltoh32_ua(&p_sample_avg->ratespec); + rtt_report->rx_rate = dhd_rtt_convert_rate_to_host(ratespec); + + /* tx rate */ + if (flags & WL_PROXD_RESULT_FLAG_VHTACK) { + rtt_report->tx_rate = dhd_rtt_convert_rate_to_host(0x2010010); + } else { + rtt_report->tx_rate = dhd_rtt_convert_rate_to_host(0xc); + } + + /* rtt_sd */ + rtt.tmu = ltoh16_ua(&p_sample_avg->rtt.tmu); + rtt.intvl = ltoh32_ua(&p_sample_avg->rtt.intvl); + rtt_report->rtt = (wifi_timespan)FTM_INTVL2NSEC(&rtt) * 1000; /* nano -> pico seconds */ + rtt_report->rtt_sd = ltoh16_ua(&p_data_info->sd_rtt); /* nano -> 0.1 nano */ + DHD_RTT(("rtt_report->rtt : %llu\n", rtt_report->rtt)); + DHD_RTT(("rtt_report->rssi : %d (0.5db)\n", rtt_report->rssi)); + + /* average distance */ + if (avg_dist != FTM_INVALID) { + rtt_report->distance = (avg_dist >> 8) * 1000; /* meter -> mm */ + rtt_report->distance += (avg_dist & 0xff) * 1000 / 256; + } else { + rtt_report->distance = FTM_INVALID; + } + /* time stamp */ + /* get the time elapsed from boot time */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)) + get_monotonic_boottime(&ts); + rtt_report->ts = (uint64)TIMESPEC_TO_US(ts); +#endif /* LINUX_VER >= 2.6.39 */ + + if (proxd_status == WL_PROXD_E_REMOTE_FAIL) { + /* retry time after failure */ + p_time.intvl = ltoh32_ua(&p_data_info->u.retry_after.intvl); + p_time.tmu = ltoh16_ua(&p_data_info->u.retry_after.tmu); + rtt_report->retry_after_duration = FTM_INTVL2SEC(&p_time); /* s -> s */ + DHD_RTT((">\tretry_after: %d%s\n", + ltoh32_ua(&p_data_info->u.retry_after.intvl), + ftm_tmu_value_to_logstr(ltoh16_ua(&p_data_info->u.retry_after.tmu)))); + } else { + /* burst duration */ + p_time.intvl = ltoh32_ua(&p_data_info->u.retry_after.intvl); + p_time.tmu = ltoh16_ua(&p_data_info->u.retry_after.tmu); + rtt_report->burst_duration = FTM_INTVL2MSEC(&p_time); /* s -> ms */ + DHD_RTT((">\tburst_duration: %d%s\n", + ltoh32_ua(&p_data_info->u.burst_duration.intvl), + ftm_tmu_value_to_logstr(ltoh16_ua(&p_data_info->u.burst_duration.tmu)))); + DHD_RTT(("rtt_report->burst_duration : %d\n", rtt_report->burst_duration)); + } + /* display detail if available */ + num_rtt = ltoh16_ua(&p_data_info->num_rtt); + if (num_rtt > 0) { + DHD_RTT((">\tnum rtt: %d samples\n", num_rtt)); + p_sample = &p_data_info->rtt[1]; + for (i = 0; i < num_rtt; i++) { + snr = 0; + bitflips = 0; + tof_phy_error = 0; + tof_phy_tgt_error = 0; + tof_target_snr = 0; + tof_target_bitflips = 0; + rssi = 0; + dist = 0; + num_ftm = p_data_info->num_ftm; + /* FTM frames 1,4,7,11 have valid snr, rssi and bitflips */ + if ((i % num_ftm) == 1) { + rssi = (wl_proxd_rssi_t) ltoh16_ua(&p_sample->rssi); + snr = (wl_proxd_snr_t) ltoh16_ua(&p_sample->snr); + bitflips = (wl_proxd_bitflips_t) ltoh16_ua(&p_sample->bitflips); + tof_phy_error = + (wl_proxd_phy_error_t) + ltoh32_ua(&p_sample->tof_phy_error); + tof_phy_tgt_error = + (wl_proxd_phy_error_t) + ltoh32_ua(&p_sample->tof_tgt_phy_error); + tof_target_snr = + (wl_proxd_snr_t) + ltoh16_ua(&p_sample->tof_tgt_snr); + tof_target_bitflips = + (wl_proxd_bitflips_t) + ltoh16_ua(&p_sample->tof_tgt_bitflips); + dist = ltoh32_ua(&p_sample->distance); + chanspec = ltoh32_ua(&p_sample->chanspec); + } else { + rssi = -1; + snr = 0; + bitflips = 0; + dist = 0; + tof_target_bitflips = 0; + tof_target_snr = 0; + tof_phy_tgt_error = 0; + } + DHD_RTT((">\t sample[%d]: id=%d rssi=%d snr=0x%x bitflips=%d" + " tof_phy_error %x tof_phy_tgt_error %x target_snr=0x%x" + " target_bitflips=%d dist=%d rtt=%d%s status %s Type %s" + " coreid=%d chanspec=0x%08x\n", + i, p_sample->id, rssi, snr, + bitflips, tof_phy_error, tof_phy_tgt_error, + tof_target_snr, + tof_target_bitflips, dist, + ltoh32_ua(&p_sample->rtt.intvl), + ftm_tmu_value_to_logstr(ltoh16_ua(&p_sample->rtt.tmu)), + ftm_status_value_to_logstr(ltoh32_ua(&p_sample->status)), + ftm_frame_types[i % num_ftm], p_sample->coreid, + chanspec)); + p_sample++; + } + } + return err; +} + +int +dhd_rtt_event_handler(dhd_pub_t *dhd, wl_event_msg_t *event, void *event_data) +{ + int ret = BCME_OK; + int tlvs_len; + uint16 version; + wl_proxd_event_t *p_event; + wl_proxd_event_type_t event_type; + wl_proxd_ftm_session_status_t session_status; + const ftm_strmap_entry_t *p_loginfo; + rtt_result_t *rtt_result; + gfp_t kflags; +#ifdef WL_CFG80211 + int idx; + rtt_status_info_t *rtt_status; + rtt_target_info_t *rtt_target_info; + struct rtt_noti_callback *iter; + rtt_results_header_t *entry, *next, *rtt_results_header = NULL; + rtt_result_t *next2; + bool is_new = TRUE; +#endif /* WL_CFG80211 */ + + DHD_RTT(("Enter %s \n", __FUNCTION__)); + NULL_CHECK(dhd, "dhd is NULL", ret); + +#ifdef WL_CFG80211 + rtt_status = GET_RTTSTATE(dhd); + NULL_CHECK(rtt_status, "rtt_status is NULL", ret); + + if (RTT_IS_STOPPED(rtt_status)) { + /* Ignore the Proxd event */ + DHD_RTT((" event handler rtt is stopped \n")); + if (rtt_status->flags == WL_PROXD_SESSION_FLAG_TARGET) { + DHD_RTT(("Device is target/Responder. Recv the event. \n")); + } else { + return ret; + } + } +#endif /* WL_CFG80211 */ + if (ntoh32_ua((void *)&event->datalen) < OFFSETOF(wl_proxd_event_t, tlvs)) { + DHD_RTT(("%s: wrong datalen:%d\n", __FUNCTION__, + ntoh32_ua((void *)&event->datalen))); + return -EINVAL; + } + event_type = ntoh32_ua((void *)&event->event_type); + if (event_type != WLC_E_PROXD) { + DHD_ERROR((" failed event \n")); + return -EINVAL; + } + + if (!event_data) { + DHD_ERROR(("%s: event_data:NULL\n", __FUNCTION__)); + return -EINVAL; + } + p_event = (wl_proxd_event_t *) event_data; + version = ltoh16(p_event->version); + if (version < WL_PROXD_API_VERSION) { + DHD_ERROR(("ignore non-ftm event version = 0x%0x < WL_PROXD_API_VERSION (0x%x)\n", + version, WL_PROXD_API_VERSION)); + return ret; + } +#ifdef WL_CFG80211 + if (!in_atomic()) { + mutex_lock(&rtt_status->rtt_mutex); + } +#endif /* WL_CFG80211 */ + event_type = (wl_proxd_event_type_t) ltoh16(p_event->type); + + kflags = in_softirq()? GFP_ATOMIC : GFP_KERNEL; + + DHD_RTT(("event_type=0x%x, ntoh16()=0x%x, ltoh16()=0x%x\n", + p_event->type, ntoh16(p_event->type), ltoh16(p_event->type))); + p_loginfo = ftm_get_event_type_loginfo(event_type); + if (p_loginfo == NULL) { + DHD_ERROR(("receive an invalid FTM event %d\n", event_type)); + ret = -EINVAL; + goto exit; /* ignore this event */ + } + /* get TLVs len, skip over event header */ + if (ltoh16(p_event->len) < OFFSETOF(wl_proxd_event_t, tlvs)) { + DHD_ERROR(("invalid FTM event length:%d\n", ltoh16(p_event->len))); + ret = -EINVAL; + goto exit; + } + tlvs_len = ltoh16(p_event->len) - OFFSETOF(wl_proxd_event_t, tlvs); + DHD_RTT(("receive '%s' event: version=0x%x len=%d method=%d sid=%d tlvs_len=%d\n", + p_loginfo->text, + version, + ltoh16(p_event->len), + ltoh16(p_event->method), + ltoh16(p_event->sid), + tlvs_len)); +#ifdef WL_CFG80211 + rtt_target_info = &rtt_status->rtt_config.target_info[rtt_status->cur_idx]; +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif // endif + /* find a rtt_report_header for this mac address */ + list_for_each_entry(entry, &rtt_status->rtt_results_cache, list) { + if (!memcmp(&entry->peer_mac, &event->addr, ETHER_ADDR_LEN)) { + /* found a rtt_report_header for peer_mac in the list */ + is_new = FALSE; + rtt_results_header = entry; + break; + } + } +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif // endif +#endif /* WL_CFG80211 */ + switch (event_type) { + case WL_PROXD_EVENT_SESSION_CREATE: + DHD_RTT(("WL_PROXD_EVENT_SESSION_CREATE\n")); + break; + case WL_PROXD_EVENT_SESSION_START: + DHD_RTT(("WL_PROXD_EVENT_SESSION_START\n")); + break; + case WL_PROXD_EVENT_BURST_START: + DHD_RTT(("WL_PROXD_EVENT_BURST_START\n")); + break; + case WL_PROXD_EVENT_BURST_END: + DHD_RTT(("WL_PROXD_EVENT_BURST_END\n")); +#ifdef WL_CFG80211 + if (is_new) { + /* allocate new header for rtt_results */ + rtt_results_header = kzalloc(sizeof(rtt_results_header_t), kflags); + if (!rtt_results_header) { + ret = -ENOMEM; + goto exit; + } + /* Initialize the head of list for rtt result */ + INIT_LIST_HEAD(&rtt_results_header->result_list); + rtt_results_header->peer_mac = event->addr; + list_add_tail(&rtt_results_header->list, &rtt_status->rtt_results_cache); + } +#endif /* WL_CFG80211 */ + if (tlvs_len > 0) { + /* allocate rtt_results for new results */ + rtt_result = kzalloc(sizeof(rtt_result_t), kflags); + if (!rtt_result) { + ret = -ENOMEM; + goto exit; + } + /* unpack TLVs and invokes the cbfn to print the event content TLVs */ + ret = bcm_unpack_xtlv_buf((void *) &(rtt_result->report), + (uint8 *)&p_event->tlvs[0], tlvs_len, + BCM_XTLV_OPTION_ALIGN32, rtt_unpack_xtlv_cbfn); + if (ret != BCME_OK) { + DHD_ERROR(("%s : Failed to unpack xtlv for an event\n", + __FUNCTION__)); + goto exit; + } +#ifdef WL_CFG80211 + /* fill out the results from the configuration param */ + rtt_result->report.ftm_num = rtt_target_info->num_frames_per_burst; + rtt_result->report.type = RTT_TWO_WAY; + DHD_RTT(("report->ftm_num : %d\n", rtt_result->report.ftm_num)); + rtt_result->report_len = RTT_REPORT_SIZE; + + list_add_tail(&rtt_result->list, &rtt_results_header->result_list); + rtt_results_header->result_cnt++; + rtt_results_header->result_tot_len += rtt_result->report_len; +#endif /* WL_CFG80211 */ + } + break; + case WL_PROXD_EVENT_SESSION_END: + DHD_RTT(("WL_PROXD_EVENT_SESSION_END\n")); +#ifdef WL_CFG80211 + if (!RTT_IS_ENABLED(rtt_status)) { + DHD_RTT(("Ignore the session end evt\n")); + goto exit; + } +#endif /* WL_CFG80211 */ + if (tlvs_len > 0) { + /* unpack TLVs and invokes the cbfn to print the event content TLVs */ + ret = bcm_unpack_xtlv_buf((void *) &session_status, + (uint8 *)&p_event->tlvs[0], tlvs_len, + BCM_XTLV_OPTION_ALIGN32, rtt_unpack_xtlv_cbfn); + if (ret != BCME_OK) { + DHD_ERROR(("%s : Failed to unpack xtlv for an event\n", + __FUNCTION__)); + goto exit; + } + } +#ifdef WL_CFG80211 + /* In case of no result for the peer device, make fake result for error case */ + if (is_new) { + /* allocate new header for rtt_results */ + rtt_results_header = kzalloc(sizeof(rtt_results_header_t), GFP_KERNEL); + if (!rtt_results_header) { + ret = -ENOMEM; + goto exit; + } + /* Initialize the head of list for rtt result */ + INIT_LIST_HEAD(&rtt_results_header->result_list); + rtt_results_header->peer_mac = event->addr; + list_add_tail(&rtt_results_header->list, &rtt_status->rtt_results_cache); + + /* allocate rtt_results for new results */ + rtt_result = kzalloc(sizeof(rtt_result_t), kflags); + if (!rtt_result) { + ret = -ENOMEM; + kfree(rtt_results_header); + goto exit; + } + /* fill out the results from the configuration param */ + rtt_result->report.ftm_num = rtt_target_info->num_frames_per_burst; + rtt_result->report.type = RTT_TWO_WAY; + DHD_RTT(("report->ftm_num : %d\n", rtt_result->report.ftm_num)); + rtt_result->report_len = RTT_REPORT_SIZE; + rtt_result->report.status = RTT_STATUS_FAIL_NO_RSP; + rtt_result->report.addr = rtt_target_info->addr; + rtt_result->report.distance = FTM_INVALID; + list_add_tail(&rtt_result->list, &rtt_results_header->result_list); + rtt_results_header->result_cnt++; + rtt_results_header->result_tot_len += rtt_result->report_len; + } + /* find next target to trigger RTT */ + for (idx = (rtt_status->cur_idx + 1); + idx < rtt_status->rtt_config.rtt_target_cnt; idx++) { + /* skip the disabled device */ + if (rtt_status->rtt_config.target_info[idx].disable) { + continue; + } else { + /* set the idx to cur_idx */ + rtt_status->cur_idx = idx; + break; + } + } + if (idx < rtt_status->rtt_config.rtt_target_cnt) { + /* restart to measure RTT from next device */ + DHD_ERROR(("restart to measure rtt\n")); + schedule_work(&rtt_status->work); + } else { + DHD_RTT(("RTT_STOPPED\n")); + rtt_status->status = RTT_STOPPED; + schedule_work(&rtt_status->work); + /* notify the completed information to others */ +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif // endif + list_for_each_entry(iter, &rtt_status->noti_fn_list, list) { + iter->noti_fn(iter->ctx, &rtt_status->rtt_results_cache); + } + /* remove the rtt results in cache */ + if (!list_empty(&rtt_status->rtt_results_cache)) { + /* Iterate rtt_results_header list */ + list_for_each_entry_safe(entry, next, + &rtt_status->rtt_results_cache, list) { + list_del(&entry->list); + /* Iterate rtt_result list */ + list_for_each_entry_safe(rtt_result, next2, + &entry->result_list, list) { + list_del(&rtt_result->list); + kfree(rtt_result); + } + kfree(entry); + } + } +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif // endif + /* reinitialize the HEAD */ + INIT_LIST_HEAD(&rtt_status->rtt_results_cache); + /* clear information for rtt_config */ + rtt_status->rtt_config.rtt_target_cnt = 0; + memset(rtt_status->rtt_config.target_info, 0, + TARGET_INFO_SIZE(RTT_MAX_TARGET_CNT)); + rtt_status->cur_idx = 0; + } +#endif /* WL_CFG80211 */ + break; + case WL_PROXD_EVENT_SESSION_RESTART: + DHD_RTT(("WL_PROXD_EVENT_SESSION_RESTART\n")); + break; + case WL_PROXD_EVENT_BURST_RESCHED: + DHD_RTT(("WL_PROXD_EVENT_BURST_RESCHED\n")); + break; + case WL_PROXD_EVENT_SESSION_DESTROY: + DHD_RTT(("WL_PROXD_EVENT_SESSION_DESTROY\n")); + break; + case WL_PROXD_EVENT_FTM_FRAME: + DHD_RTT(("WL_PROXD_EVENT_FTM_FRAME\n")); + break; + case WL_PROXD_EVENT_DELAY: + DHD_RTT(("WL_PROXD_EVENT_DELAY\n")); + break; + case WL_PROXD_EVENT_VS_INITIATOR_RPT: + DHD_RTT(("WL_PROXD_EVENT_VS_INITIATOR_RPT\n ")); + break; + case WL_PROXD_EVENT_RANGING: + DHD_RTT(("WL_PROXD_EVENT_RANGING\n")); + break; + case WL_PROXD_EVENT_COLLECT: + DHD_RTT(("WL_PROXD_EVENT_COLLECT\n")); + if (tlvs_len > 0) { + void *buffer = NULL; + if (!(buffer = kzalloc(tlvs_len, kflags))) { + ret = -ENOMEM; + goto exit; + } + /* unpack TLVs and invokes the cbfn to print the event content TLVs */ + ret = bcm_unpack_xtlv_buf(buffer, + (uint8 *)&p_event->tlvs[0], tlvs_len, + BCM_XTLV_OPTION_NONE, rtt_unpack_xtlv_cbfn); + kfree(buffer); + if (ret != BCME_OK) { + DHD_ERROR(("%s : Failed to unpack xtlv for event %d\n", + __FUNCTION__, event_type)); + goto exit; + } + } + break; + case WL_PROXD_EVENT_MF_STATS: + DHD_RTT(("WL_PROXD_EVENT_MF_STATS\n")); + if (tlvs_len > 0) { + void *buffer = NULL; + if (!(buffer = kzalloc(tlvs_len, kflags))) { + ret = -ENOMEM; + goto exit; + } + /* unpack TLVs and invokes the cbfn to print the event content TLVs */ + ret = bcm_unpack_xtlv_buf(buffer, + (uint8 *)&p_event->tlvs[0], tlvs_len, + BCM_XTLV_OPTION_NONE, rtt_unpack_xtlv_cbfn); + kfree(buffer); + if (ret != BCME_OK) { + DHD_ERROR(("%s : Failed to unpack xtlv for event %d\n", + __FUNCTION__, event_type)); + goto exit; + } + } + break; + + default: + DHD_ERROR(("WLC_E_PROXD: not supported EVENT Type:%d\n", event_type)); + break; + } +exit: +#ifdef WL_CFG80211 + if (!in_atomic()) { + mutex_unlock(&rtt_status->rtt_mutex); + } +#endif /* WL_CFG80211 */ + + return ret; +} + +#ifdef WL_CFG80211 +static void +dhd_rtt_work(struct work_struct *work) +{ + rtt_status_info_t *rtt_status; + dhd_pub_t *dhd; +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif // endif + rtt_status = container_of(work, rtt_status_info_t, work); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif // endif + if (rtt_status == NULL) { + DHD_ERROR(("%s : rtt_status is NULL\n", __FUNCTION__)); + return; + } + dhd = rtt_status->dhd; + if (dhd == NULL) { + DHD_ERROR(("%s : dhd is NULL\n", __FUNCTION__)); + return; + } + (void) dhd_rtt_start(dhd); +} +#endif /* WL_CFG80211 */ + +int +dhd_rtt_capability(dhd_pub_t *dhd, rtt_capabilities_t *capa) +{ + rtt_status_info_t *rtt_status; + int err = BCME_OK; + NULL_CHECK(dhd, "dhd is NULL", err); + rtt_status = GET_RTTSTATE(dhd); + NULL_CHECK(rtt_status, "rtt_status is NULL", err); + NULL_CHECK(capa, "capa is NULL", err); + bzero(capa, sizeof(rtt_capabilities_t)); + + /* set rtt capabilities */ + if (rtt_status->rtt_capa.proto & RTT_CAP_ONE_WAY) + capa->rtt_one_sided_supported = 1; + if (rtt_status->rtt_capa.proto & RTT_CAP_FTM_WAY) + capa->rtt_ftm_supported = 1; + + if (rtt_status->rtt_capa.feature & RTT_FEATURE_LCI) + capa->lci_support = 1; + if (rtt_status->rtt_capa.feature & RTT_FEATURE_LCR) + capa->lcr_support = 1; + if (rtt_status->rtt_capa.feature & RTT_FEATURE_PREAMBLE) + capa->preamble_support = 1; + if (rtt_status->rtt_capa.feature & RTT_FEATURE_BW) + capa->bw_support = 1; + + /* bit mask */ + capa->preamble_support = rtt_status->rtt_capa.preamble; + capa->bw_support = rtt_status->rtt_capa.bw; + + return err; +} + +#ifdef WL_CFG80211 +int +dhd_rtt_avail_channel(dhd_pub_t *dhd, wifi_channel_info *channel_info) +{ + u32 chanspec = 0; + int err = BCME_OK; + chanspec_t c = 0; + u32 channel; + struct net_device *dev = dhd_linux_get_primary_netdev(dhd); + + if ((err = wldev_iovar_getint(dev, "chanspec", + (s32 *)&chanspec)) == BCME_OK) { + c = (chanspec_t)dtoh32(chanspec); + c = wl_chspec_driver_to_host(c); + channel = wf_chspec_ctlchan(c); + DHD_RTT((" control channel is %d \n", channel)); + if (CHSPEC_IS20(c)) { + channel_info->width = WIFI_CHAN_WIDTH_20; + DHD_RTT((" band is 20 \n")); + } else if (CHSPEC_IS40(c)) { + channel_info->width = WIFI_CHAN_WIDTH_40; + DHD_RTT(("band is 40 \n")); + } else { + channel_info->width = WIFI_CHAN_WIDTH_80; + DHD_RTT(("band is 80 \n")); + } + if (CHSPEC_IS2G(c) && (channel >= CH_MIN_2G_CHANNEL) && + (channel <= CH_MAX_2G_CHANNEL)) { + channel_info->center_freq = + ieee80211_channel_to_frequency(channel, IEEE80211_BAND_2GHZ); + } else if (CHSPEC_IS5G(c) && channel >= CH_MIN_5G_CHANNEL) { + channel_info->center_freq = + ieee80211_channel_to_frequency(channel, IEEE80211_BAND_5GHZ); + } + if ((channel_info->width == WIFI_CHAN_WIDTH_80) || + (channel_info->width == WIFI_CHAN_WIDTH_40)) { + channel = CHSPEC_CHANNEL(c); + channel_info->center_freq0 = + ieee80211_channel_to_frequency(channel, IEEE80211_BAND_5GHZ); + } + } else { + DHD_ERROR(("Failed to get the chanspec \n")); + } + return err; +} + +int +dhd_rtt_enable_responder(dhd_pub_t *dhd, wifi_channel_info *channel_info) +{ + int err = BCME_OK; + char chanbuf[CHANSPEC_STR_LEN]; + int pm = PM_OFF; + int ftm_cfg_cnt = 0; + chanspec_t chanspec; + wifi_channel_info_t channel; + struct net_device *dev = dhd_linux_get_primary_netdev(dhd); + ftm_config_options_info_t ftm_configs[FTM_MAX_CONFIGS]; + ftm_config_param_info_t ftm_params[FTM_MAX_PARAMS]; + rtt_status_info_t *rtt_status; + + memset(&channel, 0, sizeof(channel)); + BCM_REFERENCE(chanbuf); + NULL_CHECK(dhd, "dhd is NULL", err); + rtt_status = GET_RTTSTATE(dhd); + NULL_CHECK(rtt_status, "rtt_status is NULL", err); + if (RTT_IS_STOPPED(rtt_status)) { + DHD_RTT(("STA responder/Target. \n")); + } + DHD_RTT(("Enter %s \n", __FUNCTION__)); + if (!dhd_is_associated(dhd, 0, NULL)) { + if (channel_info) { + channel.width = channel_info->width; + channel.center_freq = channel_info->center_freq; + channel.center_freq0 = channel_info->center_freq; + } + else { + channel.width = WIFI_CHAN_WIDTH_80; + channel.center_freq = DEFAULT_FTM_FREQ; + channel.center_freq0 = DEFAULT_FTM_CNTR_FREQ0; + } + chanspec = dhd_rtt_convert_to_chspec(channel); + DHD_RTT(("chanspec/channel set as %s for rtt.\n", + wf_chspec_ntoa(chanspec, chanbuf))); + err = wldev_iovar_setint(dev, "chanspec", chanspec); + if (err) { + DHD_ERROR(("Failed to set the chanspec \n")); + } + } + rtt_status->pm = PM_OFF; + err = wldev_ioctl_get(dev, WLC_GET_PM, &rtt_status->pm, sizeof(rtt_status->pm)); + DHD_RTT(("Current PM value read %d\n", rtt_status->pm)); + if (err) { + DHD_ERROR(("Failed to get the PM value \n")); + } else { + err = wldev_ioctl_set(dev, WLC_SET_PM, &pm, sizeof(pm)); + if (err) { + DHD_ERROR(("Failed to set the PM \n")); + rtt_status->pm_restore = FALSE; + } else { + rtt_status->pm_restore = TRUE; + } + } + if (!RTT_IS_ENABLED(rtt_status)) { + err = dhd_rtt_ftm_enable(dhd, TRUE); + if (err) { + DHD_ERROR(("Failed to enable FTM (%d)\n", err)); + goto exit; + } + DHD_RTT(("FTM enabled \n")); + } + rtt_status->status = RTT_ENABLED; + DHD_RTT(("Responder enabled \n")); + memset(ftm_configs, 0, sizeof(ftm_configs)); + memset(ftm_params, 0, sizeof(ftm_params)); + ftm_configs[ftm_cfg_cnt].enable = TRUE; + ftm_configs[ftm_cfg_cnt++].flags = WL_PROXD_SESSION_FLAG_TARGET; + rtt_status->flags = WL_PROXD_SESSION_FLAG_TARGET; + DHD_RTT(("Set the device as responder \n")); + err = dhd_rtt_ftm_config(dhd, FTM_DEFAULT_SESSION, FTM_CONFIG_CAT_OPTIONS, + ftm_configs, ftm_cfg_cnt); +exit: + if (err) { + rtt_status->status = RTT_STOPPED; + DHD_ERROR(("rtt is stopped %s \n", __FUNCTION__)); + dhd_rtt_ftm_enable(dhd, FALSE); + DHD_RTT(("restoring the PM value \n")); + if (rtt_status->pm_restore) { + pm = PM_FAST; + err = wldev_ioctl_set(dev, WLC_SET_PM, &pm, sizeof(pm)); + if (err) { + DHD_ERROR(("Failed to restore PM \n")); + } else { + rtt_status->pm_restore = FALSE; + } + } + } + return err; +} + +int +dhd_rtt_cancel_responder(dhd_pub_t *dhd) +{ + int err = BCME_OK; + rtt_status_info_t *rtt_status; + int pm = 0; + struct net_device *dev = dhd_linux_get_primary_netdev(dhd); + + NULL_CHECK(dhd, "dhd is NULL", err); + rtt_status = GET_RTTSTATE(dhd); + NULL_CHECK(rtt_status, "rtt_status is NULL", err); + DHD_RTT(("Enter %s \n", __FUNCTION__)); + err = dhd_rtt_ftm_enable(dhd, FALSE); + if (err) { + DHD_ERROR(("failed to disable FTM (%d)\n", err)); + } + rtt_status->status = RTT_STOPPED; + if (rtt_status->pm_restore) { + pm = PM_FAST; + DHD_RTT(("pm_restore =%d \n", rtt_status->pm_restore)); + err = wldev_ioctl_set(dev, WLC_SET_PM, &pm, sizeof(pm)); + if (err) { + DHD_ERROR(("Failed to restore PM \n")); + } else { + rtt_status->pm_restore = FALSE; + } + } + return err; +} +#endif /* WL_CFG80211 */ + +int +dhd_rtt_init(dhd_pub_t *dhd) +{ + int err = BCME_OK; +#ifdef WL_CFG80211 + int ret; + int32 drv_up = 1; + int32 version; + rtt_status_info_t *rtt_status; + NULL_CHECK(dhd, "dhd is NULL", err); + dhd->rtt_supported = FALSE; + if (dhd->rtt_state) { + return err; + } + dhd->rtt_state = kzalloc(sizeof(rtt_status_info_t), GFP_KERNEL); + if (dhd->rtt_state == NULL) { + err = BCME_NOMEM; + DHD_ERROR(("%s : failed to create rtt_state\n", __FUNCTION__)); + return err; + } + bzero(dhd->rtt_state, sizeof(rtt_status_info_t)); + rtt_status = GET_RTTSTATE(dhd); + rtt_status->rtt_config.target_info = + kzalloc(TARGET_INFO_SIZE(RTT_MAX_TARGET_CNT), GFP_KERNEL); + if (rtt_status->rtt_config.target_info == NULL) { + DHD_ERROR(("%s failed to allocate the target info for %d\n", + __FUNCTION__, RTT_MAX_TARGET_CNT)); + err = BCME_NOMEM; + goto exit; + } + rtt_status->dhd = dhd; + /* need to do WLC_UP */ + dhd_wl_ioctl_cmd(dhd, WLC_UP, (char *)&drv_up, sizeof(int32), TRUE, 0); + + ret = dhd_rtt_get_version(dhd, &version); + if (ret == BCME_OK && (version == WL_PROXD_API_VERSION)) { + DHD_ERROR(("%s : FTM is supported\n", __FUNCTION__)); + dhd->rtt_supported = TRUE; + /* rtt_status->rtt_capa.proto |= RTT_CAP_ONE_WAY; */ + rtt_status->rtt_capa.proto |= RTT_CAP_FTM_WAY; + + /* indicate to set tx rate */ + rtt_status->rtt_capa.feature |= RTT_FEATURE_LCI; + rtt_status->rtt_capa.feature |= RTT_FEATURE_LCR; + rtt_status->rtt_capa.feature |= RTT_FEATURE_PREAMBLE; + rtt_status->rtt_capa.preamble |= RTT_PREAMBLE_VHT; + rtt_status->rtt_capa.preamble |= RTT_PREAMBLE_HT; + + /* indicate to set bandwith */ + rtt_status->rtt_capa.feature |= RTT_FEATURE_BW; + rtt_status->rtt_capa.bw |= RTT_BW_20; + rtt_status->rtt_capa.bw |= RTT_BW_40; + rtt_status->rtt_capa.bw |= RTT_BW_80; + } else { + if ((ret != BCME_OK) || (version == 0)) { + DHD_ERROR(("%s : FTM is not supported\n", __FUNCTION__)); + } else { + DHD_ERROR(("%s : FTM version mismatch between HOST (%d) and FW (%d)\n", + __FUNCTION__, WL_PROXD_API_VERSION, version)); + } + } + /* cancel all of RTT request once we got the cancel request */ + rtt_status->all_cancel = TRUE; + mutex_init(&rtt_status->rtt_mutex); + INIT_LIST_HEAD(&rtt_status->noti_fn_list); + INIT_LIST_HEAD(&rtt_status->rtt_results_cache); + INIT_WORK(&rtt_status->work, dhd_rtt_work); +exit: + if (err < 0) { + kfree(rtt_status->rtt_config.target_info); + kfree(dhd->rtt_state); + } +#endif /* WL_CFG80211 */ + return err; + +} + +int +dhd_rtt_deinit(dhd_pub_t *dhd) +{ + int err = BCME_OK; +#ifdef WL_CFG80211 + rtt_status_info_t *rtt_status; + rtt_results_header_t *rtt_header, *next; + rtt_result_t *rtt_result, *next2; + struct rtt_noti_callback *iter, *iter2; + NULL_CHECK(dhd, "dhd is NULL", err); + rtt_status = GET_RTTSTATE(dhd); + NULL_CHECK(rtt_status, "rtt_status is NULL", err); + rtt_status->status = RTT_STOPPED; + DHD_RTT(("rtt is stopped %s \n", __FUNCTION__)); + /* clear evt callback list */ +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif // endif + + if (!list_empty(&rtt_status->noti_fn_list)) { + list_for_each_entry_safe(iter, iter2, &rtt_status->noti_fn_list, list) { + list_del(&iter->list); + kfree(iter); + } + } + /* remove the rtt results */ + if (!list_empty(&rtt_status->rtt_results_cache)) { + list_for_each_entry_safe(rtt_header, next, &rtt_status->rtt_results_cache, list) { + list_del(&rtt_header->list); + list_for_each_entry_safe(rtt_result, next2, + &rtt_header->result_list, list) { + list_del(&rtt_result->list); + kfree(rtt_result); + } + kfree(rtt_header); + } + } +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif // endif + kfree(rtt_status->rtt_config.target_info); + kfree(dhd->rtt_state); + dhd->rtt_state = NULL; +#endif /* WL_CFG80211 */ + return err; +} diff --git a/bcmdhd.100.10.315.x/dhd_rtt.h b/bcmdhd.100.10.315.x/dhd_rtt.h new file mode 100644 index 0000000..df644ca --- /dev/null +++ b/bcmdhd.100.10.315.x/dhd_rtt.h @@ -0,0 +1,397 @@ +/* + * Broadcom Dongle Host Driver (DHD), RTT + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id$ + */ +#ifndef __DHD_RTT_H__ +#define __DHD_RTT_H__ + +#include "dngl_stats.h" + +#define RTT_MAX_TARGET_CNT 50 +#define RTT_MAX_FRAME_CNT 25 +#define RTT_MAX_RETRY_CNT 10 +#define DEFAULT_FTM_CNT 6 +#define DEFAULT_RETRY_CNT 6 +#define DEFAULT_FTM_FREQ 5180 +#define DEFAULT_FTM_CNTR_FREQ0 5210 + +#define TARGET_INFO_SIZE(count) (sizeof(rtt_target_info_t) * count) + +#define TARGET_TYPE(target) (target->type) + +#ifndef BIT +#define BIT(x) (1 << (x)) +#endif // endif + +/* DSSS, CCK and 802.11n rates in [500kbps] units */ +#define WL_MAXRATE 108 /* in 500kbps units */ +#define WL_RATE_1M 2 /* in 500kbps units */ +#define WL_RATE_2M 4 /* in 500kbps units */ +#define WL_RATE_5M5 11 /* in 500kbps units */ +#define WL_RATE_11M 22 /* in 500kbps units */ +#define WL_RATE_6M 12 /* in 500kbps units */ +#define WL_RATE_9M 18 /* in 500kbps units */ +#define WL_RATE_12M 24 /* in 500kbps units */ +#define WL_RATE_18M 36 /* in 500kbps units */ +#define WL_RATE_24M 48 /* in 500kbps units */ +#define WL_RATE_36M 72 /* in 500kbps units */ +#define WL_RATE_48M 96 /* in 500kbps units */ +#define WL_RATE_54M 108 /* in 500kbps units */ +#define GET_RTTSTATE(dhd) ((rtt_status_info_t *)dhd->rtt_state) + +enum rtt_role { + RTT_INITIATOR = 0, + RTT_TARGET = 1 +}; +enum rtt_status { + RTT_STOPPED = 0, + RTT_STARTED = 1, + RTT_ENABLED = 2 +}; +typedef int64_t wifi_timestamp; /* In microseconds (us) */ +typedef int64_t wifi_timespan; +typedef int32 wifi_rssi_rtt; + +typedef enum { + RTT_INVALID, + RTT_ONE_WAY, + RTT_TWO_WAY, + RTT_AUTO +} rtt_type_t; + +/* RTT peer type */ +typedef enum { + RTT_PEER_AP = 0x1, + RTT_PEER_STA = 0x2, + RTT_PEER_P2P_GO = 0x3, + RTT_PEER_P2P_CLIENT = 0x4, + RTT_PEER_NAN = 0x5, + RTT_PEER_INVALID = 0x6 +} rtt_peer_type_t; + +/* Ranging status */ +typedef enum rtt_reason { + RTT_STATUS_SUCCESS = 0, + RTT_STATUS_FAILURE = 1, // general failure status + RTT_STATUS_FAIL_NO_RSP = 2, // target STA does not respond to request + RTT_STATUS_FAIL_REJECTED = 3, // request rejected. Applies to 2-sided RTT only + RTT_STATUS_FAIL_NOT_SCHEDULED_YET = 4, + RTT_STATUS_FAIL_TM_TIMEOUT = 5, // timing measurement times out + RTT_STATUS_FAIL_AP_ON_DIFF_CHANNEL = 6, // Target on different channel, cannot range + RTT_STATUS_FAIL_NO_CAPABILITY = 7, // ranging not supported + RTT_STATUS_ABORTED = 8, // request aborted for unknown reason + RTT_STATUS_FAIL_INVALID_TS = 9, // Invalid T1-T4 timestamp + RTT_STATUS_FAIL_PROTOCOL = 10, // 11mc protocol failed + RTT_STATUS_FAIL_SCHEDULE = 11, // request could not be scheduled + RTT_STATUS_FAIL_BUSY_TRY_LATER = 12, // responder cannot collaborate at time of request + RTT_STATUS_INVALID_REQ = 13, // bad request args + RTT_STATUS_NO_WIFI = 14, // WiFi not enabled + // Responder overrides param info, cannot range with new params + RTT_STATUS_FAIL_FTM_PARAM_OVERRIDE = 15 +} rtt_reason_t; + +enum { + RTT_CAP_ONE_WAY = BIT(0), + /* IEEE802.11mc */ + RTT_CAP_FTM_WAY = BIT(1) +}; + +enum { + RTT_FEATURE_LCI = BIT(0), + RTT_FEATURE_LCR = BIT(1), + RTT_FEATURE_PREAMBLE = BIT(2), + RTT_FEATURE_BW = BIT(3) +}; + +enum { + RTT_PREAMBLE_LEGACY = BIT(0), + RTT_PREAMBLE_HT = BIT(1), + RTT_PREAMBLE_VHT = BIT(2) +}; + +enum { + RTT_BW_5 = BIT(0), + RTT_BW_10 = BIT(1), + RTT_BW_20 = BIT(2), + RTT_BW_40 = BIT(3), + RTT_BW_80 = BIT(4), + RTT_BW_160 = BIT(5) +}; + +enum rtt_rate_bw { + RTT_RATE_20M, + RTT_RATE_40M, + RTT_RATE_80M, + RTT_RATE_160M +}; + +#define FTM_MAX_NUM_BURST_EXP 14 +#define HAS_11MC_CAP(cap) (cap & RTT_CAP_FTM_WAY) +#define HAS_ONEWAY_CAP(cap) (cap & RTT_CAP_ONE_WAY) +#define HAS_RTT_CAP(cap) (HAS_ONEWAY_CAP(cap) || HAS_11MC_CAP(cap)) + +typedef struct wifi_channel_info { + wifi_channel_width_t width; + wifi_channel center_freq; /* primary 20 MHz channel */ + wifi_channel center_freq0; /* center freq (MHz) first segment */ + wifi_channel center_freq1; /* center freq (MHz) second segment valid for 80 + 80 */ +} wifi_channel_info_t; + +typedef struct wifi_rate { + uint32 preamble :3; /* 0: OFDM, 1: CCK, 2 : HT, 3: VHT, 4..7 reserved */ + uint32 nss :2; /* 1 : 1x1, 2: 2x2, 3: 3x3, 4: 4x4 */ + uint32 bw :3; /* 0: 20Mhz, 1: 40Mhz, 2: 80Mhz, 3: 160Mhz */ + /* OFDM/CCK rate code would be as per IEEE std in the unit of 0.5 mb + * HT/VHT it would be mcs index + */ + uint32 rateMcsIdx :8; + uint32 reserved :16; /* reserved */ + uint32 bitrate; /* unit of 100 Kbps */ +} wifi_rate_t; + +typedef struct rtt_target_info { + struct ether_addr addr; + struct ether_addr local_addr; + rtt_type_t type; /* rtt_type */ + rtt_peer_type_t peer; /* peer type */ + wifi_channel_info_t channel; /* channel information */ + chanspec_t chanspec; /* chanspec for channel */ + bool disable; /* disable for RTT measurement */ + /* + * Time interval between bursts (units: 100 ms). + * Applies to 1-sided and 2-sided RTT multi-burst requests. + * Range: 0-31, 0: no preference by initiator (2-sided RTT) + */ + uint32 burst_period; + /* + * Total number of RTT bursts to be executed. It will be + * specified in the same way as the parameter "Number of + * Burst Exponent" found in the FTM frame format. It + * applies to both: 1-sided RTT and 2-sided RTT. Valid + * values are 0 to 15 as defined in 802.11mc std. + * 0 means single shot + * The implication of this parameter on the maximum + * number of RTT results is the following: + * for 1-sided RTT: max num of RTT results = (2^num_burst)*(num_frames_per_burst) + * for 2-sided RTT: max num of RTT results = (2^num_burst)*(num_frames_per_burst - 1) + */ + uint16 num_burst; + /* + * num of frames per burst. + * Minimum value = 1, Maximum value = 31 + * For 2-sided this equals the number of FTM frames + * to be attempted in a single burst. This also + * equals the number of FTM frames that the + * initiator will request that the responder send + * in a single frame + */ + uint32 num_frames_per_burst; + /* num of frames in each RTT burst + * for single side, measurement result num = frame number + * for 2 side RTT, measurement result num = frame number - 1 + */ + uint32 num_retries_per_ftm; /* retry time for RTT measurment frame */ + /* following fields are only valid for 2 side RTT */ + uint32 num_retries_per_ftmr; + uint8 LCI_request; + uint8 LCR_request; + /* + * Applies to 1-sided and 2-sided RTT. Valid values will + * be 2-11 and 15 as specified by the 802.11mc std for + * the FTM parameter burst duration. In a multi-burst + * request, if responder overrides with larger value, + * the initiator will return failure. In a single-burst + * request if responder overrides with larger value, + * the initiator will sent TMR_STOP to terminate RTT + * at the end of the burst_duration it requested. + */ + uint32 burst_duration; + uint8 preamble; /* 1 - Legacy, 2 - HT, 4 - VHT */ + uint8 bw; /* 5, 10, 20, 40, 80, 160 */ +} rtt_target_info_t; + +typedef struct rtt_config_params { + int8 rtt_target_cnt; + rtt_target_info_t *target_info; +} rtt_config_params_t; + +typedef struct rtt_status_info { + dhd_pub_t *dhd; + int8 status; /* current status for the current entry */ + int8 txchain; /* current device tx chain */ + int pm; /* to save current value of pm */ + int8 pm_restore; /* flag to reset the old value of pm */ + int8 cur_idx; /* current entry to do RTT */ + bool all_cancel; /* cancel all request once we got the cancel requet */ + uint32 flags; /* indicate whether device is configured as initiator or target */ + struct capability { + int32 proto :8; + int32 feature :8; + int32 preamble :8; + int32 bw :8; + } rtt_capa; /* rtt capability */ + struct mutex rtt_mutex; + rtt_config_params_t rtt_config; + struct work_struct work; + struct list_head noti_fn_list; + struct list_head rtt_results_cache; /* store results for RTT */ +} rtt_status_info_t; + +typedef struct rtt_report { + struct ether_addr addr; + unsigned int burst_num; /* # of burst inside a multi-burst request */ + unsigned int ftm_num; /* total RTT measurement frames attempted */ + unsigned int success_num; /* total successful RTT measurement frames */ + uint8 num_per_burst_peer; /* max number of FTM number per burst the peer support */ + rtt_reason_t status; /* raging status */ + /* in s, 11mc only, only for RTT_REASON_FAIL_BUSY_TRY_LATER, 1- 31s */ + uint8 retry_after_duration; + rtt_type_t type; /* rtt type */ + wifi_rssi_rtt rssi; /* average rssi in 0.5 dB steps e.g. 143 implies -71.5 dB */ + wifi_rssi_rtt rssi_spread; /* rssi spread in 0.5 db steps e.g. 5 implies 2.5 spread */ + /* + * 1-sided RTT: TX rate of RTT frame. + * 2-sided RTT: TX rate of initiator's Ack in response to FTM frame. + */ + wifi_rate_t tx_rate; + /* + * 1-sided RTT: TX rate of Ack from other side. + * 2-sided RTT: TX rate of FTM frame coming from responder. + */ + wifi_rate_t rx_rate; + wifi_timespan rtt; /* round trip time in 0.1 nanoseconds */ + wifi_timespan rtt_sd; /* rtt standard deviation in 0.1 nanoseconds */ + wifi_timespan rtt_spread; /* difference between max and min rtt times recorded */ + int distance; /* distance in cm (optional) */ + int distance_sd; /* standard deviation in cm (optional) */ + int distance_spread; /* difference between max and min distance recorded (optional) */ + wifi_timestamp ts; /* time of the measurement (in microseconds since boot) */ + int burst_duration; /* in ms, how long the FW time is to fininish one burst measurement */ + int negotiated_burst_num; /* Number of bursts allowed by the responder */ + bcm_tlv_t *LCI; /* LCI Report */ + bcm_tlv_t *LCR; /* Location Civic Report */ +} rtt_report_t; +#define RTT_REPORT_SIZE (sizeof(rtt_report_t)) + +/* rtt_results_header to maintain rtt result list per mac address */ +typedef struct rtt_results_header { + struct ether_addr peer_mac; + uint32 result_cnt; + uint32 result_tot_len; /* sum of report_len of rtt_result */ + struct list_head list; + struct list_head result_list; +} rtt_results_header_t; + +/* rtt_result to link all of rtt_report */ +typedef struct rtt_result { + struct list_head list; + struct rtt_report report; + int32 report_len; /* total length of rtt_report */ +} rtt_result_t; + +/* RTT Capabilities */ +typedef struct rtt_capabilities { + uint8 rtt_one_sided_supported; /* if 1-sided rtt data collection is supported */ + uint8 rtt_ftm_supported; /* if ftm rtt data collection is supported */ + uint8 lci_support; /* location configuration information */ + uint8 lcr_support; /* Civic Location */ + uint8 preamble_support; /* bit mask indicate what preamble is supported */ + uint8 bw_support; /* bit mask indicate what BW is supported */ +} rtt_capabilities_t; + +/* RTT responder information */ +typedef struct wifi_rtt_responder { + wifi_channel_info channel; /* channel of responder */ + uint8 preamble; /* preamble supported by responder */ +} wifi_rtt_responder_t; + +typedef void (*dhd_rtt_compl_noti_fn)(void *ctx, void *rtt_data); +/* Linux wrapper to call common dhd_rtt_set_cfg */ +int +dhd_dev_rtt_set_cfg(struct net_device *dev, void *buf); + +int +dhd_dev_rtt_cancel_cfg(struct net_device *dev, struct ether_addr *mac_list, int mac_cnt); + +int +dhd_dev_rtt_register_noti_callback(struct net_device *dev, void *ctx, + dhd_rtt_compl_noti_fn noti_fn); + +int +dhd_dev_rtt_unregister_noti_callback(struct net_device *dev, dhd_rtt_compl_noti_fn noti_fn); + +int +dhd_dev_rtt_capability(struct net_device *dev, rtt_capabilities_t *capa); + +int +dhd_dev_rtt_avail_channel(struct net_device *dev, wifi_channel_info *channel_info); + +int +dhd_dev_rtt_enable_responder(struct net_device *dev, wifi_channel_info *channel_info); + +int +dhd_dev_rtt_cancel_responder(struct net_device *dev); +/* export to upper layer */ +chanspec_t +dhd_rtt_convert_to_chspec(wifi_channel_info_t channel); + +int +dhd_rtt_idx_to_burst_duration(uint idx); + +int +dhd_rtt_set_cfg(dhd_pub_t *dhd, rtt_config_params_t *params); + +int +dhd_rtt_stop(dhd_pub_t *dhd, struct ether_addr *mac_list, int mac_cnt); + +int +dhd_rtt_register_noti_callback(dhd_pub_t *dhd, void *ctx, dhd_rtt_compl_noti_fn noti_fn); + +int +dhd_rtt_unregister_noti_callback(dhd_pub_t *dhd, dhd_rtt_compl_noti_fn noti_fn); + +int +dhd_rtt_event_handler(dhd_pub_t *dhd, wl_event_msg_t *event, void *event_data); + +int +dhd_rtt_capability(dhd_pub_t *dhd, rtt_capabilities_t *capa); + +int +dhd_rtt_avail_channel(dhd_pub_t *dhd, wifi_channel_info *channel_info); + +int +dhd_rtt_enable_responder(dhd_pub_t *dhd, wifi_channel_info *channel_info); + +int +dhd_rtt_cancel_responder(dhd_pub_t *dhd); + +int +dhd_rtt_init(dhd_pub_t *dhd); + +int +dhd_rtt_deinit(dhd_pub_t *dhd); +#endif /* __DHD_RTT_H__ */ diff --git a/bcmdhd.100.10.315.x/dhd_sdio.c b/bcmdhd.100.10.315.x/dhd_sdio.c new file mode 100644 index 0000000..7771367 --- /dev/null +++ b/bcmdhd.100.10.315.x/dhd_sdio.c @@ -0,0 +1,10353 @@ +/* + * DHD Bus Module for SDIO + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_sdio.c 771911 2018-07-12 12:35:33Z $ + */ + +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#ifdef BCMSPI +#include +#endif /* BCMSPI */ +#include +#include +#include +#include + +#include +#include <802.1d.h> +#include <802.11.h> + +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef PROP_TXSTATUS +#include +#endif // endif +#ifdef DHDTCPACK_SUPPRESS +#include +#endif /* DHDTCPACK_SUPPRESS */ + +#ifdef BT_OVER_SDIO +#include +#endif /* BT_OVER_SDIO */ + +#if defined(DEBUGGER) || defined(DHD_DSCOPE) +#include +#endif /* DEBUGGER || DHD_DSCOPE */ + +bool dhd_mp_halting(dhd_pub_t *dhdp); +extern void bcmsdh_waitfor_iodrain(void *sdh); +extern void bcmsdh_reject_ioreqs(void *sdh, bool reject); +extern bool bcmsdh_fatal_error(void *sdh); +static int dhdsdio_suspend(void *context); +static int dhdsdio_resume(void *context); + +#ifndef DHDSDIO_MEM_DUMP_FNAME +#define DHDSDIO_MEM_DUMP_FNAME "mem_dump" +#endif // endif + +#define QLEN (1024) /* bulk rx and tx queue lengths */ +#define FCHI (QLEN - 10) +#define FCLOW (FCHI / 2) +#define PRIOMASK 7 + +#define F0_BLOCK_SIZE 32 +#define TXRETRIES 2 /* # of retries for tx frames */ +#define READ_FRM_CNT_RETRIES 3 +#ifndef DHD_RXBOUND +#define DHD_RXBOUND 50 /* Default for max rx frames in one scheduling */ +#endif // endif + +#ifndef DHD_TXBOUND +#define DHD_TXBOUND 20 /* Default for max tx frames in one scheduling */ +#endif // endif + +#define DHD_TXMINMAX 1 /* Max tx frames if rx still pending */ + +#define MEMBLOCK 2048 /* Block size used for downloading of dongle image */ +#define MAX_MEMBLOCK (32 * 1024) /* Block size used for downloading of dongle image */ + +#define MAX_DATA_BUF (64 * 1024) /* Must be large enough to hold biggest possible glom */ +#define MAX_MEM_BUF 4096 + +#ifndef DHD_FIRSTREAD +#define DHD_FIRSTREAD 32 +#endif // endif +#if !ISPOWEROF2(DHD_FIRSTREAD) +#error DHD_FIRSTREAD is not a power of 2! +#endif // endif + +/* Total length of frame header for dongle protocol */ +#define SDPCM_HDRLEN (SDPCM_FRAMETAG_LEN + SDPCM_SWHEADER_LEN) +#define SDPCM_HDRLEN_TXGLOM (SDPCM_HDRLEN + SDPCM_HWEXT_LEN) +#define MAX_TX_PKTCHAIN_CNT SDPCM_MAXGLOM_SIZE + +#ifdef SDTEST +#define SDPCM_RESERVE (SDPCM_HDRLEN + SDPCM_TEST_HDRLEN + DHD_SDALIGN) +#else +#define SDPCM_RESERVE (SDPCM_HDRLEN + DHD_SDALIGN) +#endif // endif + +/* Space for header read, limit for data packets */ +#ifndef MAX_HDR_READ +#define MAX_HDR_READ 32 +#endif // endif +#if !ISPOWEROF2(MAX_HDR_READ) +#error MAX_HDR_READ is not a power of 2! +#endif // endif + +#define MAX_RX_DATASZ 2048 + +/* Maximum milliseconds to wait for F2 to come up */ +#define DHD_WAIT_F2RDY 3000 + +/* Maximum usec to wait for HTAVAIL to come up */ +#define DHD_WAIT_HTAVAIL 10000 + +/* Bump up limit on waiting for HT to account for first startup; + * if the image is doing a CRC calculation before programming the PMU + * for HT availability, it could take a couple hundred ms more, so + * max out at a 1 second (1000000us). + */ +#if (PMU_MAX_TRANSITION_DLY <= 1000000) +#undef PMU_MAX_TRANSITION_DLY +#define PMU_MAX_TRANSITION_DLY 1000000 +#endif // endif + +/* hooks for limiting threshold custom tx num in rx processing */ +#define DEFAULT_TXINRX_THRES 0 +#ifndef CUSTOM_TXINRX_THRES +#define CUSTOM_TXINRX_THRES DEFAULT_TXINRX_THRES +#endif // endif + +/* Value for ChipClockCSR during initial setup */ +#define DHD_INIT_CLKCTL1 (SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ) +#define DHD_INIT_CLKCTL2 (SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP) + +/* Flags for SDH calls */ +#define F2SYNC (SDIO_REQ_4BYTE | SDIO_REQ_FIXED) + +/* Packet free applicable unconditionally for sdio and sdspi. Conditional if + * bufpool was present for gspi bus. + */ +#define PKTFREE2() if ((bus->bus != SPI_BUS) || bus->usebufpool) \ + PKTFREE(bus->dhd->osh, pkt, FALSE); +DHD_SPINWAIT_SLEEP_INIT(sdioh_spinwait_sleep); + +#ifdef PKT_STATICS +pkt_statics_t tx_statics = {0}; +#endif + +#ifdef SUPPORT_MULTIPLE_BOARD_REV_FROM_HW +extern unsigned int system_hw_rev; +#endif /* SUPPORT_MULTIPLE_BOARD_REV_FROM_HW */ + +/* Device console log buffer state */ +#define CONSOLE_LINE_MAX 192 +#define CONSOLE_BUFFER_MAX 2024 +typedef struct dhd_console { + uint count; /* Poll interval msec counter */ + uint log_addr; /* Log struct address (fixed) */ + hnd_log_t log; /* Log struct (host copy) */ + uint bufsize; /* Size of log buffer */ + uint8 *buf; /* Log buffer (host copy) */ + uint last; /* Last buffer read index */ +} dhd_console_t; + +#define REMAP_ENAB(bus) ((bus)->remap) +#define REMAP_ISADDR(bus, a) (((a) >= ((bus)->orig_ramsize)) && ((a) < ((bus)->ramsize))) +#define KSO_ENAB(bus) ((bus)->kso) +#define SR_ENAB(bus) ((bus)->_srenab) +#define SLPAUTO_ENAB(bus) ((SR_ENAB(bus)) && ((bus)->_slpauto)) + +#define MIN_RSRC_SR 0x3 +#define CORE_CAPEXT_ADDR_OFFSET (0x64c) +#define CORE_CAPEXT_SR_SUPPORTED_MASK (1 << 1) +#define RCTL_MACPHY_DISABLE_MASK (1 << 26) +#define RCTL_LOGIC_DISABLE_MASK (1 << 27) + +#define OOB_WAKEUP_ENAB(bus) ((bus)->_oobwakeup) +#define GPIO_DEV_SRSTATE 16 /* Host gpio17 mapped to device gpio0 SR state */ +#define GPIO_DEV_SRSTATE_TIMEOUT 320000 /* 320ms */ +#define GPIO_DEV_WAKEUP 17 /* Host gpio17 mapped to device gpio1 wakeup */ +#define CC_CHIPCTRL2_GPIO1_WAKEUP (1 << 0) +#define CC_CHIPCTRL3_SR_ENG_ENABLE (1 << 2) +#define OVERFLOW_BLKSZ512_WM 96 +#define OVERFLOW_BLKSZ512_MES 80 + +#define CC_PMUCC3 (0x3) + +#ifdef DHD_UCODE_DOWNLOAD +/* Ucode host download related macros */ +#define UCODE_DOWNLOAD_REQUEST 0xCAFECAFE +#define UCODE_DOWNLOAD_COMPLETE 0xABCDABCD +#endif /* DHD_UCODE_DOWNLOAD */ + +#if defined(BT_OVER_SDIO) +#define BTMEM_OFFSET 0x19000000 +/* BIT0 => WLAN Power UP and BIT1=> WLAN Wake */ +#define BT2WLAN_PWRUP_WAKE 0x03 +#define BT2WLAN_PWRUP_ADDR 0x640894 /* This address is specific to 43012B0 */ + +#define BTFW_MAX_STR_LEN 600 +#define BTFW_DOWNLOAD_BLK_SIZE (BTFW_MAX_STR_LEN/2 + 8) + +#define BTFW_ADDR_MODE_UNKNOWN 0 +#define BTFW_ADDR_MODE_EXTENDED 1 +#define BTFW_ADDR_MODE_SEGMENT 2 +#define BTFW_ADDR_MODE_LINEAR32 3 + +#define BTFW_HEX_LINE_TYPE_DATA 0 +#define BTFW_HEX_LINE_TYPE_END_OF_DATA 1 +#define BTFW_HEX_LINE_TYPE_EXTENDED_SEGMENT_ADDRESS 2 +#define BTFW_HEX_LINE_TYPE_EXTENDED_ADDRESS 4 +#define BTFW_HEX_LINE_TYPE_ABSOLUTE_32BIT_ADDRESS 5 + +#endif /* defined (BT_OVER_SDIO) */ + +/* Private data for SDIO bus interaction */ +typedef struct dhd_bus { + dhd_pub_t *dhd; + + bcmsdh_info_t *sdh; /* Handle for BCMSDH calls */ + si_t *sih; /* Handle for SI calls */ + char *vars; /* Variables (from CIS and/or other) */ + uint varsz; /* Size of variables buffer */ + uint32 sbaddr; /* Current SB window pointer (-1, invalid) */ + + sdpcmd_regs_t *regs; /* Registers for SDIO core */ + uint sdpcmrev; /* SDIO core revision */ + uint armrev; /* CPU core revision */ + uint ramrev; /* SOCRAM core revision */ + uint32 ramsize; /* Size of RAM in SOCRAM (bytes) */ + uint32 orig_ramsize; /* Size of RAM in SOCRAM (bytes) */ + uint32 srmemsize; /* Size of SRMEM */ + + uint32 bus; /* gSPI or SDIO bus */ + uint32 bus_num; /* bus number */ + uint32 slot_num; /* slot ID */ + uint32 hostintmask; /* Copy of Host Interrupt Mask */ + uint32 intstatus; /* Intstatus bits (events) pending */ + bool dpc_sched; /* Indicates DPC schedule (intrpt rcvd) */ + bool fcstate; /* State of dongle flow-control */ + + uint16 cl_devid; /* cached devid for dhdsdio_probe_attach() */ + char *fw_path; /* module_param: path to firmware image */ + char *nv_path; /* module_param: path to nvram vars file */ + + uint blocksize; /* Block size of SDIO transfers */ + uint roundup; /* Max roundup limit */ + + struct pktq txq; /* Queue length used for flow-control */ + uint8 flowcontrol; /* per prio flow control bitmask */ + uint8 tx_seq; /* Transmit sequence number (next) */ + uint8 tx_max; /* Maximum transmit sequence allowed */ + + uint8 hdrbuf[MAX_HDR_READ + DHD_SDALIGN]; + uint8 *rxhdr; /* Header of current rx frame (in hdrbuf) */ + uint16 nextlen; /* Next Read Len from last header */ + uint8 rx_seq; /* Receive sequence number (expected) */ + bool rxskip; /* Skip receive (awaiting NAK ACK) */ + + void *glomd; /* Packet containing glomming descriptor */ + void *glom; /* Packet chain for glommed superframe */ + uint glomerr; /* Glom packet read errors */ + + uint8 *rxbuf; /* Buffer for receiving control packets */ + uint rxblen; /* Allocated length of rxbuf */ + uint8 *rxctl; /* Aligned pointer into rxbuf */ + uint8 *databuf; /* Buffer for receiving big glom packet */ + uint8 *dataptr; /* Aligned pointer into databuf */ + uint rxlen; /* Length of valid data in buffer */ + + uint8 sdpcm_ver; /* Bus protocol reported by dongle */ + + bool intr; /* Use interrupts */ + bool poll; /* Use polling */ + bool ipend; /* Device interrupt is pending */ + bool intdis; /* Interrupts disabled by isr */ + uint intrcount; /* Count of device interrupt callbacks */ + uint lastintrs; /* Count as of last watchdog timer */ + uint spurious; /* Count of spurious interrupts */ + uint pollrate; /* Ticks between device polls */ + uint polltick; /* Tick counter */ + uint pollcnt; /* Count of active polls */ + + dhd_console_t console; /* Console output polling support */ + uint console_addr; /* Console address from shared struct */ + + uint regfails; /* Count of R_REG/W_REG failures */ + + uint clkstate; /* State of sd and backplane clock(s) */ + bool activity; /* Activity flag for clock down */ + int32 idletime; /* Control for activity timeout */ + int32 idlecount; /* Activity timeout counter */ + int32 idleclock; /* How to set bus driver when idle */ + int32 sd_divisor; /* Speed control to bus driver */ + int32 sd_mode; /* Mode control to bus driver */ + int32 sd_rxchain; /* If bcmsdh api accepts PKT chains */ + bool use_rxchain; /* If dhd should use PKT chains */ + bool sleeping; /* Is SDIO bus sleeping? */ +#if defined(SUPPORT_P2P_GO_PS) + wait_queue_head_t bus_sleep; +#endif /* LINUX && SUPPORT_P2P_GO_PS */ + bool ctrl_wait; + wait_queue_head_t ctrl_tx_wait; + uint rxflow_mode; /* Rx flow control mode */ + bool rxflow; /* Is rx flow control on */ + uint prev_rxlim_hit; /* Is prev rx limit exceeded (per dpc schedule) */ + bool alp_only; /* Don't use HT clock (ALP only) */ + /* Field to decide if rx of control frames happen in rxbuf or lb-pool */ + bool usebufpool; + int32 txinrx_thres; /* num of in-queued pkts */ + int32 dotxinrx; /* tx first in dhdsdio_readframes */ +#ifdef SDTEST + /* external loopback */ + bool ext_loop; + uint8 loopid; + + /* pktgen configuration */ + uint pktgen_freq; /* Ticks between bursts */ + uint pktgen_count; /* Packets to send each burst */ + uint pktgen_print; /* Bursts between count displays */ + uint pktgen_total; /* Stop after this many */ + uint pktgen_minlen; /* Minimum packet data len */ + uint pktgen_maxlen; /* Maximum packet data len */ + uint pktgen_mode; /* Configured mode: tx, rx, or echo */ + uint pktgen_stop; /* Number of tx failures causing stop */ + + /* active pktgen fields */ + uint pktgen_tick; /* Tick counter for bursts */ + uint pktgen_ptick; /* Burst counter for printing */ + uint pktgen_sent; /* Number of test packets generated */ + uint pktgen_rcvd; /* Number of test packets received */ + uint pktgen_prev_time; /* Time at which previous stats where printed */ + uint pktgen_prev_sent; /* Number of test packets generated when + * previous stats were printed + */ + uint pktgen_prev_rcvd; /* Number of test packets received when + * previous stats were printed + */ + uint pktgen_fail; /* Number of failed send attempts */ + uint16 pktgen_len; /* Length of next packet to send */ +#define PKTGEN_RCV_IDLE (0) +#define PKTGEN_RCV_ONGOING (1) + uint16 pktgen_rcv_state; /* receive state */ + uint pktgen_rcvd_rcvsession; /* test pkts rcvd per rcv session. */ +#endif /* SDTEST */ + + /* Some additional counters */ + uint tx_sderrs; /* Count of tx attempts with sd errors */ + uint fcqueued; /* Tx packets that got queued */ + uint rxrtx; /* Count of rtx requests (NAK to dongle) */ + uint rx_toolong; /* Receive frames too long to receive */ + uint rxc_errors; /* SDIO errors when reading control frames */ + uint rx_hdrfail; /* SDIO errors on header reads */ + uint rx_badhdr; /* Bad received headers (roosync?) */ + uint rx_badseq; /* Mismatched rx sequence number */ + uint fc_rcvd; /* Number of flow-control events received */ + uint fc_xoff; /* Number which turned on flow-control */ + uint fc_xon; /* Number which turned off flow-control */ + uint rxglomfail; /* Failed deglom attempts */ + uint rxglomframes; /* Number of glom frames (superframes) */ + uint rxglompkts; /* Number of packets from glom frames */ + uint f2rxhdrs; /* Number of header reads */ + uint f2rxdata; /* Number of frame data reads */ + uint f2txdata; /* Number of f2 frame writes */ + uint f1regdata; /* Number of f1 register accesses */ + wake_counts_t wake_counts; /* Wake up counter */ +#ifdef BCMSPI + bool dwordmode; +#endif /* BCMSPI */ +#ifdef DHDENABLE_TAILPAD + uint tx_tailpad_chain; /* Number of tail padding by chaining pad_pkt */ + uint tx_tailpad_pktget; /* Number of tail padding by new PKTGET */ +#endif /* DHDENABLE_TAILPAD */ + uint8 *ctrl_frame_buf; + uint32 ctrl_frame_len; + bool ctrl_frame_stat; +#ifndef BCMSPI + uint32 rxint_mode; /* rx interrupt mode */ +#endif /* BCMSPI */ + bool remap; /* Contiguous 1MB RAM: 512K socram + 512K devram + * Available with socram rev 16 + * Remap region not DMA-able + */ + bool kso; + bool _slpauto; + bool _oobwakeup; + bool _srenab; + bool readframes; + bool reqbussleep; + uint32 resetinstr; + uint32 dongle_ram_base; + + void *glom_pkt_arr[SDPCM_MAXGLOM_SIZE]; /* Array of pkts for glomming */ + uint32 txglom_cnt; /* Number of pkts in the glom array */ + uint32 txglom_total_len; /* Total length of pkts in glom array */ + bool txglom_enable; /* Flag to indicate whether tx glom is enabled/disabled */ + uint32 txglomsize; /* Glom size limitation */ +#ifdef DHDENABLE_TAILPAD + void *pad_pkt; +#endif /* DHDENABLE_TAILPAD */ + uint32 dongle_trap_addr; /* device trap addr location in device memory */ +#if defined(BT_OVER_SDIO) + char *btfw_path; /* module_param: path to BT firmware image */ + uint32 bt_use_count; /* Counter that tracks whether BT is using the bus */ +#endif /* defined (BT_OVER_SDIO) */ + uint txglomframes; /* Number of tx glom frames (superframes) */ + uint txglompkts; /* Number of packets from tx glom frames */ + uint8 *membuf; /* Buffer for dhdsdio_membytes */ +} dhd_bus_t; + +/* + * Whenever DHD_IDLE_IMMEDIATE condition is handled, we have to now check if + * BT is active too. Instead of adding #ifdef code in all the places, we thought + * of adding one macro check as part of the if condition that checks for DHD_IDLE_IMMEDIATE + * In case of non BT over SDIO builds, this macro will always return TRUE. In case + * of the builds where BT_OVER_SDIO is enabled, it will expand to a condition check + * that checks if bt_use_count is zero. So this macro will return equate to 1 if + * bt_use_count is 0, indicating that there are no active users and if bt_use_count + * is non zero it would return 0 there by preventing the caller from executing the + * sleep calls. + */ +#ifdef BT_OVER_SDIO +#define NO_OTHER_ACTIVE_BUS_USER(bus) (bus->bt_use_count == 0) +#else +#define NO_OTHER_ACTIVE_BUS_USER(bus) (1) +#endif /* BT_OVER_SDIO */ + +/* clkstate */ +#define CLK_NONE 0 +#define CLK_SDONLY 1 +#define CLK_PENDING 2 /* Not used yet */ +#define CLK_AVAIL 3 + +#define DHD_NOPMU(dhd) (FALSE) + +#if defined(BCMSDIOH_STD) +#define BLK_64_MAXTXGLOM 20 +#endif /* BCMSDIOH_STD */ + +#ifdef DHD_DEBUG +static int qcount[NUMPRIO]; +static int tx_packets[NUMPRIO]; +#endif /* DHD_DEBUG */ + +/* Deferred transmit */ +const uint dhd_deferred_tx = 1; + +extern uint dhd_watchdog_ms; +extern uint sd_f1_blocksize; + +#ifdef BCMSPI_ANDROID +extern uint *dhd_spi_lockcount; +#endif /* BCMSPI_ANDROID */ + +extern void dhd_os_wd_timer(void *bus, uint wdtick); +int dhd_enableOOB(dhd_pub_t *dhd, bool sleep); + +#ifdef DHD_PM_CONTROL_FROM_FILE +extern bool g_pm_control; +#endif /* DHD_PM_CONTROL_FROM_FILE */ + +/* Tx/Rx bounds */ +uint dhd_txbound; +uint dhd_rxbound; +uint dhd_txminmax = DHD_TXMINMAX; + +/* override the RAM size if possible */ +#define DONGLE_MIN_RAMSIZE (128 *1024) +int dhd_dongle_ramsize; + +uint dhd_doflow = TRUE; +uint dhd_dpcpoll = FALSE; + +module_param(dhd_doflow, uint, 0644); +module_param(dhd_dpcpoll, uint, 0644); + +static bool dhd_alignctl; + +static bool sd1idle; + +static bool retrydata; +#define RETRYCHAN(chan) (((chan) == SDPCM_EVENT_CHANNEL) || retrydata) + +#ifdef BCMSPI +/* At a watermark around 8 the spid hits underflow error. */ +static uint watermark = 32; +static uint mesbusyctrl = 0; +#else +static uint watermark = 8; +static uint mesbusyctrl = 0; +#endif /* BCMSPI */ +static const uint firstread = DHD_FIRSTREAD; + +/* Retry count for register access failures */ +static const uint retry_limit = 2; + +/* Force even SD lengths (some host controllers mess up on odd bytes) */ +static bool forcealign; + +#if defined(DEBUGGER) +static uint32 dhd_sdio_reg_read(struct dhd_bus *bus, ulong addr); +static void dhd_sdio_reg_write(struct dhd_bus *bus, ulong addr, uint32 val); + +/** the debugger layer will call back into this (bus) layer to read/write dongle memory */ +static struct dhd_dbg_bus_ops_s bus_ops = { + .read_u16 = NULL, + .read_u32 = dhd_sdio_reg_read, + .write_u32 = dhd_sdio_reg_write, +}; +#endif /* DEBUGGER */ + +#define ALIGNMENT 4 + +#if (defined(OOB_INTR_ONLY) && defined(HW_OOB)) || defined(FORCE_WOWLAN) +extern void bcmsdh_enable_hw_oob_intr(void *sdh, bool enable); +#endif // endif + +#if defined(OOB_INTR_ONLY) && defined(SDIO_ISR_THREAD) +#error OOB_INTR_ONLY is NOT working with SDIO_ISR_THREAD +#endif /* defined(OOB_INTR_ONLY) && defined(SDIO_ISR_THREAD) */ +#define PKTALIGN(osh, p, len, align) \ + do { \ + uintptr datalign; \ + datalign = (uintptr)PKTDATA((osh), (p)); \ + datalign = ROUNDUP(datalign, (align)) - datalign; \ + ASSERT(datalign < (align)); \ + ASSERT(PKTLEN((osh), (p)) >= ((len) + datalign)); \ + if (datalign) \ + PKTPULL((osh), (p), (uint)datalign); \ + PKTSETLEN((osh), (p), (len)); \ + } while (0) + +/* Limit on rounding up frames */ +static const uint max_roundup = 512; + +/* Try doing readahead */ +static bool dhd_readahead; + +#if defined(BCMSDIOH_TXGLOM_EXT) +bool +dhdsdio_is_dataok(dhd_bus_t *bus) { + return (((uint8)(bus->tx_max - bus->tx_seq) - bus->dhd->conf->tx_max_offset > 1) && \ + (((uint8)(bus->tx_max - bus->tx_seq) & 0x80) == 0)); +} + +uint8 +dhdsdio_get_databufcnt(dhd_bus_t *bus) { + return ((uint8)(bus->tx_max - bus->tx_seq) - 1 - bus->dhd->conf->tx_max_offset); +} +#endif + +/* To check if there's window offered */ +#if defined(BCMSDIOH_TXGLOM_EXT) +#define DATAOK(bus) dhdsdio_is_dataok(bus) +#else +#define DATAOK(bus) \ + (((uint8)(bus->tx_max - bus->tx_seq) > 1) && \ + (((uint8)(bus->tx_max - bus->tx_seq) & 0x80) == 0)) +#endif + +/* To check if there's window offered for ctrl frame */ +#define TXCTLOK(bus) \ + (((uint8)(bus->tx_max - bus->tx_seq) != 0) && \ + (((uint8)(bus->tx_max - bus->tx_seq) & 0x80) == 0)) + +/* Number of pkts available in dongle for data RX */ +#if defined(BCMSDIOH_TXGLOM_EXT) +#define DATABUFCNT(bus) dhdsdio_get_databufcnt(bus) +#else +#define DATABUFCNT(bus) \ + ((uint8)(bus->tx_max - bus->tx_seq) - 1) +#endif + +/* Macros to get register read/write status */ +/* NOTE: these assume a local dhdsdio_bus_t *bus! */ +#define R_SDREG(regvar, regaddr, retryvar) \ +do { \ + retryvar = 0; \ + do { \ + regvar = R_REG(bus->dhd->osh, regaddr); \ + } while (bcmsdh_regfail(bus->sdh) && (++retryvar <= retry_limit)); \ + if (retryvar) { \ + bus->regfails += (retryvar-1); \ + if (retryvar > retry_limit) { \ + DHD_ERROR(("%s: FAILED" #regvar "READ, LINE %d\n", \ + __FUNCTION__, __LINE__)); \ + regvar = 0; \ + } \ + } \ +} while (0) + +#define W_SDREG(regval, regaddr, retryvar) \ +do { \ + retryvar = 0; \ + do { \ + W_REG(bus->dhd->osh, regaddr, regval); \ + } while (bcmsdh_regfail(bus->sdh) && (++retryvar <= retry_limit)); \ + if (retryvar) { \ + bus->regfails += (retryvar-1); \ + if (retryvar > retry_limit) \ + DHD_ERROR(("%s: FAILED REGISTER WRITE, LINE %d\n", \ + __FUNCTION__, __LINE__)); \ + } \ +} while (0) + +#define BUS_WAKE(bus) \ + do { \ + bus->idlecount = 0; \ + if ((bus)->sleeping) \ + dhdsdio_bussleep((bus), FALSE); \ + } while (0); + +/* + * pktavail interrupts from dongle to host can be managed in 3 different ways + * whenever there is a packet available in dongle to transmit to host. + * + * Mode 0: Dongle writes the software host mailbox and host is interrupted. + * Mode 1: (sdiod core rev >= 4) + * Device sets a new bit in the intstatus whenever there is a packet + * available in fifo. Host can't clear this specific status bit until all the + * packets are read from the FIFO. No need to ack dongle intstatus. + * Mode 2: (sdiod core rev >= 4) + * Device sets a bit in the intstatus, and host acks this by writing + * one to this bit. Dongle won't generate anymore packet interrupts + * until host reads all the packets from the dongle and reads a zero to + * figure that there are no more packets. No need to disable host ints. + * Need to ack the intstatus. + */ + +#define SDIO_DEVICE_HMB_RXINT 0 /* default old way */ +#define SDIO_DEVICE_RXDATAINT_MODE_0 1 /* from sdiod rev 4 */ +#define SDIO_DEVICE_RXDATAINT_MODE_1 2 /* from sdiod rev 4 */ + +#ifdef BCMSPI + +#define FRAME_AVAIL_MASK(bus) I_HMB_FRAME_IND + +#define DHD_BUS SPI_BUS + +/* check packet-available-interrupt in piggybacked dstatus */ +#define PKT_AVAILABLE(bus, intstatus) (bcmsdh_get_dstatus(bus->sdh) & STATUS_F2_PKT_AVAILABLE) + +#define HOSTINTMASK (I_HMB_FC_CHANGE | I_HMB_HOST_INT) + +#define GSPI_PR55150_BAILOUT \ +do { \ + uint32 dstatussw = bcmsdh_get_dstatus((void *)bus->sdh); \ + uint32 dstatushw = bcmsdh_cfg_read_word(bus->sdh, SDIO_FUNC_0, SPID_STATUS_REG, NULL); \ + uint32 intstatuserr = 0; \ + uint retries = 0; \ + \ + R_SDREG(intstatuserr, &bus->regs->intstatus, retries); \ + printf("dstatussw = 0x%x, dstatushw = 0x%x, intstatus = 0x%x\n", \ + dstatussw, dstatushw, intstatuserr); \ + \ + bus->nextlen = 0; \ + *finished = TRUE; \ +} while (0) + +#else /* BCMSDIO */ + +#define FRAME_AVAIL_MASK(bus) \ + ((bus->rxint_mode == SDIO_DEVICE_HMB_RXINT) ? I_HMB_FRAME_IND : I_XMTDATA_AVAIL) + +#define DHD_BUS SDIO_BUS + +#define PKT_AVAILABLE(bus, intstatus) ((intstatus) & (FRAME_AVAIL_MASK(bus))) + +#define HOSTINTMASK (I_HMB_SW_MASK | I_CHIPACTIVE) + +#define GSPI_PR55150_BAILOUT + +#endif /* BCMSPI */ + +#ifdef SDTEST +static void dhdsdio_testrcv(dhd_bus_t *bus, void *pkt, uint seq); +static void dhdsdio_sdtest_set(dhd_bus_t *bus, uint count); +#endif // endif + +static int dhdsdio_checkdied(dhd_bus_t *bus, char *data, uint size); +#ifdef DHD_DEBUG +static int dhd_serialconsole(dhd_bus_t *bus, bool get, bool enable, int *bcmerror); +#endif /* DHD_DEBUG */ + +#if defined(DHD_FW_COREDUMP) +static int dhdsdio_mem_dump(dhd_bus_t *bus); +#endif /* DHD_FW_COREDUMP */ +static int dhdsdio_devcap_set(dhd_bus_t *bus, uint8 cap); +static int dhdsdio_download_state(dhd_bus_t *bus, bool enter); + +static void dhdsdio_release(dhd_bus_t *bus, osl_t *osh); +static void dhdsdio_release_malloc(dhd_bus_t *bus, osl_t *osh); +static void dhdsdio_disconnect(void *ptr); +static bool dhdsdio_chipmatch(uint16 chipid); +static bool dhdsdio_probe_attach(dhd_bus_t *bus, osl_t *osh, void *sdh, + void * regsva, uint16 devid); +static bool dhdsdio_probe_malloc(dhd_bus_t *bus, osl_t *osh, void *sdh); +static bool dhdsdio_probe_init(dhd_bus_t *bus, osl_t *osh, void *sdh); +static void dhdsdio_release_dongle(dhd_bus_t *bus, osl_t *osh, bool dongle_isolation, + bool reset_flag); + +static void dhd_dongle_setramsize(struct dhd_bus *bus, int mem_size); +static int dhd_bcmsdh_recv_buf(dhd_bus_t *bus, uint32 addr, uint fn, uint flags, + uint8 *buf, uint nbytes, + void *pkt, bcmsdh_cmplt_fn_t complete, void *handle); +static int dhd_bcmsdh_send_buf(dhd_bus_t *bus, uint32 addr, uint fn, uint flags, + uint8 *buf, uint nbytes, + void *pkt, bcmsdh_cmplt_fn_t complete, void *handle, int max_retry); +static int dhdsdio_txpkt(dhd_bus_t *bus, uint chan, void** pkts, int num_pkt, bool free_pkt); +static int dhdsdio_txpkt_preprocess(dhd_bus_t *bus, void *pkt, int chan, int txseq, + int prev_chain_total_len, bool last_chained_pkt, + int *pad_pkt_len, void **new_pkt +#if defined(BCMSDIOH_TXGLOM_EXT) + , int first_frame +#endif +); +static int dhdsdio_txpkt_postprocess(dhd_bus_t *bus, void *pkt); + +static int dhdsdio_download_firmware(dhd_bus_t *bus, osl_t *osh, void *sdh); +static int _dhdsdio_download_firmware(dhd_bus_t *bus); + +#ifdef DHD_UCODE_DOWNLOAD +static int dhdsdio_download_ucode_file(struct dhd_bus *bus, char *ucode_path); +#endif /* DHD_UCODE_DOWNLOAD */ +static int dhdsdio_download_code_file(dhd_bus_t *bus, char *image_path); +static int dhdsdio_download_nvram(dhd_bus_t *bus); +static int dhdsdio_bussleep(dhd_bus_t *bus, bool sleep); +static int dhdsdio_clkctl(dhd_bus_t *bus, uint target, bool pendok); +static uint8 dhdsdio_sleepcsr_get(dhd_bus_t *bus); +static bool dhdsdio_dpc(dhd_bus_t *bus); +static int dhd_bcmsdh_send_buffer(void *bus, uint8 *frame, uint16 len); +static int dhdsdio_set_sdmode(dhd_bus_t *bus, int32 sd_mode); +static int dhdsdio_sdclk(dhd_bus_t *bus, bool on); +static void dhdsdio_advertise_bus_cleanup(dhd_pub_t *dhdp); +static void dhdsdio_advertise_bus_remove(dhd_pub_t *dhdp); + +#if defined(BT_OVER_SDIO) +static int extract_hex_field(char * line, uint16 start_pos, uint16 num_chars, uint16 * value); +static int read_more_btbytes(struct dhd_bus *bus, void * file, char *line, int * addr_mode, + uint16 * hi_addr, uint32 * dest_addr, uint8 *data_bytes, uint32 * num_bytes); +static int dhdsdio_download_btfw(struct dhd_bus *bus, osl_t *osh, void *sdh); +static int _dhdsdio_download_btfw(struct dhd_bus *bus); +#endif /* defined (BT_OVER_SDIO) */ + +#ifdef DHD_ULP +#include +static int dhd_bus_ulp_reinit_fw(dhd_bus_t *bus); +#endif /* DHD_ULP */ + +static void +dhdsdio_tune_fifoparam(struct dhd_bus *bus) +{ + int err; + uint8 devctl, wm, mes; + + if (bus->sih->buscorerev >= 15) { + /* See .ppt in PR for these recommended values */ + if (bus->blocksize == 512) { + wm = OVERFLOW_BLKSZ512_WM; + mes = OVERFLOW_BLKSZ512_MES; + } else { + mes = bus->blocksize/4; + wm = bus->blocksize/4; + } + + watermark = wm; + mesbusyctrl = mes; + } else { + DHD_INFO(("skip fifotune: SdioRev(%d) is lower than minimal requested ver\n", + bus->sih->buscorerev)); + return; + } + + /* Update watermark */ + if (wm > 0) { + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_WATERMARK, wm, &err); + + devctl = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err); + devctl |= SBSDIO_DEVCTL_F2WM_ENAB; + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err); + } + + /* Update MES */ + if (mes > 0) { + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_MESBUSYCTRL, + (mes | SBSDIO_MESBUSYCTRL_ENAB), &err); + } + + DHD_INFO(("Apply overflow WAR: 0x%02x 0x%02x 0x%02x\n", + bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err), + bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_WATERMARK, &err), + bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_MESBUSYCTRL, &err))); +} + +static void +dhd_dongle_setramsize(struct dhd_bus *bus, int mem_size) +{ + int32 min_size = DONGLE_MIN_RAMSIZE; + /* Restrict the ramsize to user specified limit */ + DHD_ERROR(("user: Restrict the dongle ram size to %d, min accepted %d\n", + dhd_dongle_ramsize, min_size)); + if ((dhd_dongle_ramsize > min_size) && + (dhd_dongle_ramsize < (int32)bus->orig_ramsize)) + bus->ramsize = dhd_dongle_ramsize; +} + +static int +dhdsdio_set_siaddr_window(dhd_bus_t *bus, uint32 address) +{ + int err = 0; + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRLOW, + (address >> 8) & SBSDIO_SBADDRLOW_MASK, &err); + if (!err) + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRMID, + (address >> 16) & SBSDIO_SBADDRMID_MASK, &err); + if (!err) + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRHIGH, + (address >> 24) & SBSDIO_SBADDRHIGH_MASK, &err); + return err; +} + +#ifdef BCMSPI +static void +dhdsdio_wkwlan(dhd_bus_t *bus, bool on) +{ + int err; + uint32 regdata; + bcmsdh_info_t *sdh = bus->sdh; + + if (bus->sih->buscoretype == SDIOD_CORE_ID) { + /* wake up wlan function :WAKE_UP goes as ht_avail_request and alp_avail_request */ + regdata = bcmsdh_cfg_read_word(sdh, SDIO_FUNC_0, SPID_CONFIG, NULL); + DHD_INFO(("F0 REG0 rd = 0x%x\n", regdata)); + + if (on == TRUE) + regdata |= WAKE_UP; + else + regdata &= ~WAKE_UP; + + bcmsdh_cfg_write_word(sdh, SDIO_FUNC_0, SPID_CONFIG, regdata, &err); + } +} +#endif /* BCMSPI */ + +#ifdef USE_OOB_GPIO1 +static int +dhdsdio_oobwakeup_init(dhd_bus_t *bus) +{ + uint32 val, addr, data; + + bcmsdh_gpioouten(bus->sdh, GPIO_DEV_WAKEUP); + + addr = SI_ENUM_BASE(bus->sih) + OFFSETOF(chipcregs_t, chipcontrol_addr); + data = SI_ENUM_BASE(bus->sih) + OFFSETOF(chipcregs_t, chipcontrol_data); + + /* Set device for gpio1 wakeup */ + bcmsdh_reg_write(bus->sdh, addr, 4, 2); + val = bcmsdh_reg_read(bus->sdh, data, 4); + val |= CC_CHIPCTRL2_GPIO1_WAKEUP; + bcmsdh_reg_write(bus->sdh, data, 4, val); + + bus->_oobwakeup = TRUE; + + return 0; +} +#endif /* USE_OOB_GPIO1 */ + +#ifndef BCMSPI +/* + * Query if FW is in SR mode + */ +static bool +dhdsdio_sr_cap(dhd_bus_t *bus) +{ + bool cap = FALSE; + uint32 core_capext, addr, data; + + if (bus->sih->chip == BCM43430_CHIP_ID || + bus->sih->chip == BCM43018_CHIP_ID) { + /* check if fw initialized sr engine */ + addr = SI_ENUM_BASE(bus->sih) + OFFSETOF(chipcregs_t, sr_control1); + if (bcmsdh_reg_read(bus->sdh, addr, 4) != 0) + cap = TRUE; + + return cap; + } + if ( + 0) { + core_capext = FALSE; + } else if ((bus->sih->chip == BCM4335_CHIP_ID) || + (bus->sih->chip == BCM4339_CHIP_ID) || + BCM4345_CHIP(bus->sih->chip) || + (bus->sih->chip == BCM4354_CHIP_ID) || + (bus->sih->chip == BCM4358_CHIP_ID) || + (bus->sih->chip == BCM43569_CHIP_ID) || + (bus->sih->chip == BCM4371_CHIP_ID) || + (BCM4349_CHIP(bus->sih->chip)) || + (bus->sih->chip == BCM4350_CHIP_ID) || + (bus->sih->chip == BCM4362_CHIP_ID) || + (bus->sih->chip == BCM43012_CHIP_ID) || + (bus->sih->chip == BCM43014_CHIP_ID) || + (bus->sih->chip == BCM43751_CHIP_ID)) { + core_capext = TRUE; + } else { + core_capext = bcmsdh_reg_read(bus->sdh, + si_get_pmu_reg_addr(bus->sih, OFFSETOF(chipcregs_t, core_cap_ext)), + 4); + core_capext = (core_capext & CORE_CAPEXT_SR_SUPPORTED_MASK); + } + if (!(core_capext)) + return FALSE; + + if ((bus->sih->chip == BCM4335_CHIP_ID) || + (bus->sih->chip == BCM4339_CHIP_ID) || + BCM4345_CHIP(bus->sih->chip) || + (bus->sih->chip == BCM4354_CHIP_ID) || + (bus->sih->chip == BCM4358_CHIP_ID) || + (bus->sih->chip == BCM43569_CHIP_ID) || + (bus->sih->chip == BCM4371_CHIP_ID) || + (bus->sih->chip == BCM4350_CHIP_ID)) { + uint32 enabval = 0; + addr = SI_ENUM_BASE(bus->sih) + OFFSETOF(chipcregs_t, chipcontrol_addr); + data = SI_ENUM_BASE(bus->sih) + OFFSETOF(chipcregs_t, chipcontrol_data); + bcmsdh_reg_write(bus->sdh, addr, 4, CC_PMUCC3); + enabval = bcmsdh_reg_read(bus->sdh, data, 4); + + if ((bus->sih->chip == BCM4350_CHIP_ID) || + BCM4345_CHIP(bus->sih->chip) || + (bus->sih->chip == BCM4354_CHIP_ID) || + (bus->sih->chip == BCM4358_CHIP_ID) || + (bus->sih->chip == BCM43569_CHIP_ID) || + (bus->sih->chip == BCM4371_CHIP_ID)) + enabval &= CC_CHIPCTRL3_SR_ENG_ENABLE; + + if (enabval) + cap = TRUE; + } else { + data = bcmsdh_reg_read(bus->sdh, + si_get_pmu_reg_addr(bus->sih, OFFSETOF(chipcregs_t, retention_ctl)), + 4); + if ((data & (RCTL_MACPHY_DISABLE_MASK | RCTL_LOGIC_DISABLE_MASK)) == 0) + cap = TRUE; + } + + return cap; +} + +static int +dhdsdio_sr_init(dhd_bus_t *bus) +{ + uint8 val; + int err = 0; + + if (bus->sih->chip == BCM43012_CHIP_ID) { + val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WAKEUPCTRL, NULL); + val |= 1 << SBSDIO_FUNC1_WCTRL_ALPWAIT_SHIFT; + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WAKEUPCTRL, + 1 << SBSDIO_FUNC1_WCTRL_ALPWAIT_SHIFT, &err); + val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WAKEUPCTRL, NULL); + } else { + val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WAKEUPCTRL, NULL); + val |= 1 << SBSDIO_FUNC1_WCTRL_HTWAIT_SHIFT; + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WAKEUPCTRL, + 1 << SBSDIO_FUNC1_WCTRL_HTWAIT_SHIFT, &err); + val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WAKEUPCTRL, NULL); + } + +#ifdef USE_CMD14 + /* Add CMD14 Support */ + dhdsdio_devcap_set(bus, + (SDIOD_CCCR_BRCM_CARDCAP_CMD14_SUPPORT | SDIOD_CCCR_BRCM_CARDCAP_CMD14_EXT)); +#endif /* USE_CMD14 */ + + if (CHIPID(bus->sih->chip) == BCM43430_CHIP_ID || + CHIPID(bus->sih->chip) == BCM43018_CHIP_ID || + CHIPID(bus->sih->chip) == BCM4339_CHIP_ID || + CHIPID(bus->sih->chip) == BCM43012_CHIP_ID || + CHIPID(bus->sih->chip) == BCM4362_CHIP_ID || + CHIPID(bus->sih->chip) == BCM43014_CHIP_ID || + CHIPID(bus->sih->chip) == BCM43751_CHIP_ID) + dhdsdio_devcap_set(bus, SDIOD_CCCR_BRCM_CARDCAP_CMD_NODEC); + + if (bus->sih->chip == BCM43012_CHIP_ID) { + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, + SBSDIO_FUNC1_CHIPCLKCSR, SBSDIO_HT_AVAIL_REQ, &err); + } else { + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, + SBSDIO_FUNC1_CHIPCLKCSR, SBSDIO_FORCE_HT, &err); + } + bus->_slpauto = dhd_slpauto ? TRUE : FALSE; + + bus->_srenab = TRUE; + + return 0; +} +#endif /* BCMSPI */ + +/* + * FIX: Be sure KSO bit is enabled + * Currently, it's defaulting to 0 which should be 1. + */ +static int +dhdsdio_clk_kso_init(dhd_bus_t *bus) +{ + uint8 val; + int err = 0; + + /* set flag */ + bus->kso = TRUE; + + /* + * Enable KeepSdioOn (KSO) bit for normal operation + * Default is 0 (4334A0) so set it. Fixed in B0. + */ + val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SLEEPCSR, NULL); + if (!(val & SBSDIO_FUNC1_SLEEPCSR_KSO_MASK)) { + val |= (SBSDIO_FUNC1_SLEEPCSR_KSO_EN << SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT); + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SLEEPCSR, val, &err); + if (err) + DHD_ERROR(("%s: SBSDIO_FUNC1_SLEEPCSR err: 0x%x\n", __FUNCTION__, err)); + } + + return 0; +} + +#define KSO_DBG(x) +#define KSO_WAIT_US 50 +#define KSO_WAIT_MS 1 +#define KSO_SLEEP_RETRY_COUNT 20 +#define KSO_WAKE_RETRY_COUNT 100 +#define ERROR_BCME_NODEVICE_MAX 1 + +#define DEFAULT_MAX_KSO_ATTEMPTS (PMU_MAX_TRANSITION_DLY/KSO_WAIT_US) +#ifndef CUSTOM_MAX_KSO_ATTEMPTS +#define CUSTOM_MAX_KSO_ATTEMPTS DEFAULT_MAX_KSO_ATTEMPTS +#endif // endif + +static int +dhdsdio_clk_kso_enab(dhd_bus_t *bus, bool on) +{ + uint8 wr_val = 0, rd_val, cmp_val, bmask; + int err = 0; + int try_cnt = 0; + + KSO_DBG(("%s> op:%s\n", __FUNCTION__, (on ? "KSO_SET" : "KSO_CLR"))); + + wr_val |= (on << SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT); + + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SLEEPCSR, wr_val, &err); + + /* In case of 43012 chip, the chip could go down immediately after KSO bit is cleared. + * So the further reads of KSO register could fail. Thereby just bailing out immediately + * after clearing KSO bit, to avoid polling of KSO bit. + */ + if ((!on) && (bus->sih->chip == BCM43012_CHIP_ID)) { + return err; + } + + if (on) { + cmp_val = SBSDIO_FUNC1_SLEEPCSR_KSO_MASK | SBSDIO_FUNC1_SLEEPCSR_DEVON_MASK; + bmask = cmp_val; + + OSL_SLEEP(3); + + } else { + /* Put device to sleep, turn off KSO */ + cmp_val = 0; + bmask = SBSDIO_FUNC1_SLEEPCSR_KSO_MASK; + } + + do { + rd_val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SLEEPCSR, &err); + if (((rd_val & bmask) == cmp_val) && !err) + break; + + KSO_DBG(("%s> KSO wr/rd retry:%d, ERR:%x \n", __FUNCTION__, try_cnt, err)); + + if (((try_cnt + 1) % KSO_SLEEP_RETRY_COUNT) == 0) { + OSL_SLEEP(KSO_WAIT_MS); + } else + OSL_DELAY(KSO_WAIT_US); + + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SLEEPCSR, wr_val, &err); + } while (try_cnt++ < CUSTOM_MAX_KSO_ATTEMPTS); + + if (try_cnt > 2) + KSO_DBG(("%s> op:%s, try_cnt:%d, rd_val:%x, ERR:%x \n", + __FUNCTION__, (on ? "KSO_SET" : "KSO_CLR"), try_cnt, rd_val, err)); + + if (try_cnt > CUSTOM_MAX_KSO_ATTEMPTS) { + DHD_ERROR(("%s> op:%s, ERROR: try_cnt:%d, rd_val:%x, ERR:%x \n", + __FUNCTION__, (on ? "KSO_SET" : "KSO_CLR"), try_cnt, rd_val, err)); + } + + return err; +} + +static int +dhdsdio_clk_kso_iovar(dhd_bus_t *bus, bool on) +{ + int err = 0; + + if (on == FALSE) { + + BUS_WAKE(bus); + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + + DHD_ERROR(("%s: KSO disable clk: 0x%x\n", __FUNCTION__, + bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, + SBSDIO_FUNC1_CHIPCLKCSR, &err))); + dhdsdio_clk_kso_enab(bus, FALSE); + } else { + DHD_ERROR(("%s: KSO enable\n", __FUNCTION__)); + + /* Make sure we have SD bus access */ + if (bus->clkstate == CLK_NONE) { + DHD_ERROR(("%s: Request SD clk\n", __FUNCTION__)); + dhdsdio_clkctl(bus, CLK_SDONLY, FALSE); + } + + dhdsdio_clk_kso_enab(bus, TRUE); + + DHD_ERROR(("%s: sleepcsr: 0x%x\n", __FUNCTION__, + dhdsdio_sleepcsr_get(bus))); + } + + bus->kso = on; + BCM_REFERENCE(err); + + return 0; +} + +static uint8 +dhdsdio_sleepcsr_get(dhd_bus_t *bus) +{ + int err = 0; + uint8 val = 0; + + val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SLEEPCSR, &err); + if (err) + DHD_TRACE(("Failed to read SLEEPCSR: %d\n", err)); + + return val; +} + +uint8 +dhdsdio_devcap_get(dhd_bus_t *bus) +{ + return bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_BRCM_CARDCAP, NULL); +} + +static int +dhdsdio_devcap_set(dhd_bus_t *bus, uint8 cap) +{ + int err = 0; + + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_BRCM_CARDCAP, cap, &err); + if (err) + DHD_ERROR(("%s: devcap set err: 0x%x\n", __FUNCTION__, err)); + + return 0; +} + +static int +dhdsdio_clk_devsleep_iovar(dhd_bus_t *bus, bool on) +{ + int err = 0, retry; + uint8 val; + + retry = 0; + if (on == TRUE) { + /* Enter Sleep */ + + /* Be sure we request clk before going to sleep + * so we can wake-up with clk request already set + * else device can go back to sleep immediately + */ + if (!SLPAUTO_ENAB(bus)) + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + else { + val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err); + if ((val & SBSDIO_CSR_MASK) == 0) { + DHD_ERROR(("%s: No clock before enter sleep:0x%x\n", + __FUNCTION__, val)); + + /* Reset clock request */ + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, + SBSDIO_ALP_AVAIL_REQ, &err); + DHD_ERROR(("%s: clock before sleep:0x%x\n", __FUNCTION__, + bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, + SBSDIO_FUNC1_CHIPCLKCSR, &err))); + } + } + + DHD_TRACE(("%s: clk before sleep: 0x%x\n", __FUNCTION__, + bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, + SBSDIO_FUNC1_CHIPCLKCSR, &err))); +#ifdef USE_CMD14 + err = bcmsdh_sleep(bus->sdh, TRUE); +#else + if ((SLPAUTO_ENAB(bus)) && (bus->idleclock == DHD_IDLE_STOP)) { + if (sd1idle) { + /* Change to SD1 mode */ + dhdsdio_set_sdmode(bus, 1); + } + } + + err = dhdsdio_clk_kso_enab(bus, FALSE); + if (OOB_WAKEUP_ENAB(bus)) + { + err = bcmsdh_gpioout(bus->sdh, GPIO_DEV_WAKEUP, FALSE); /* GPIO_1 is off */ + } +#endif /* USE_CMD14 */ + + if ((SLPAUTO_ENAB(bus)) && (bus->idleclock != DHD_IDLE_ACTIVE)) { + DHD_TRACE(("%s: Turnoff SD clk\n", __FUNCTION__)); + /* Now remove the SD clock */ + err = dhdsdio_sdclk(bus, FALSE); + } + } else { + /* Exit Sleep */ + /* Make sure we have SD bus access */ + if (bus->clkstate == CLK_NONE) { + DHD_TRACE(("%s: Request SD clk\n", __FUNCTION__)); + dhdsdio_clkctl(bus, CLK_SDONLY, FALSE); + } +#ifdef USE_CMD14 + err = bcmsdh_sleep(bus->sdh, FALSE); + if (SLPAUTO_ENAB(bus) && (err != 0)) { + OSL_DELAY(10000); + DHD_TRACE(("%s: Resync device sleep\n", __FUNCTION__)); + + /* Toggle sleep to resync with host and device */ + err = bcmsdh_sleep(bus->sdh, TRUE); + OSL_DELAY(10000); + err = bcmsdh_sleep(bus->sdh, FALSE); + + if (err) { + OSL_DELAY(10000); + DHD_ERROR(("%s: CMD14 exit failed again!\n", __FUNCTION__)); + + /* Toggle sleep to resync with host and device */ + err = bcmsdh_sleep(bus->sdh, TRUE); + OSL_DELAY(10000); + err = bcmsdh_sleep(bus->sdh, FALSE); + if (err) { + DHD_ERROR(("%s: CMD14 exit failed twice!\n", __FUNCTION__)); + DHD_ERROR(("%s: FATAL: Device non-response!\n", + __FUNCTION__)); + err = 0; + } + } + } +#else + if (OOB_WAKEUP_ENAB(bus)) + { + err = bcmsdh_gpioout(bus->sdh, GPIO_DEV_WAKEUP, TRUE); /* GPIO_1 is on */ + } + do { + err = dhdsdio_clk_kso_enab(bus, TRUE); + if (err) + OSL_SLEEP(10); + } while ((err != 0) && (++retry < 3)); + + if (err != 0) { + DHD_ERROR(("ERROR: kso set failed retry: %d\n", retry)); +#ifndef BT_OVER_SDIO + err = 0; /* continue anyway */ +#endif /* BT_OVER_SDIO */ + } + + if ((SLPAUTO_ENAB(bus)) && (bus->idleclock == DHD_IDLE_STOP)) { + dhdsdio_set_sdmode(bus, bus->sd_mode); + } +#endif /* !USE_CMD14 */ + + if (err == 0) { + uint8 csr; + + /* Wait for device ready during transition to wake-up */ + SPINWAIT_SLEEP(sdioh_spinwait_sleep, + (((csr = dhdsdio_sleepcsr_get(bus)) & + SBSDIO_FUNC1_SLEEPCSR_DEVON_MASK) != + (SBSDIO_FUNC1_SLEEPCSR_DEVON_MASK)), (20000)); + + DHD_TRACE(("%s: ExitSleep sleepcsr: 0x%x\n", __FUNCTION__, csr)); + + if (!(csr & SBSDIO_FUNC1_SLEEPCSR_DEVON_MASK)) { + DHD_ERROR(("%s:ERROR: ExitSleep device NOT Ready! 0x%x\n", + __FUNCTION__, csr)); + err = BCME_NODEVICE; + } + + SPINWAIT_SLEEP(sdioh_spinwait_sleep, + (((csr = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, + SBSDIO_FUNC1_CHIPCLKCSR, &err)) & SBSDIO_HT_AVAIL) != + (SBSDIO_HT_AVAIL)), (DHD_WAIT_HTAVAIL)); + + DHD_TRACE(("%s: SBSDIO_FUNC1_CHIPCLKCSR : 0x%x\n", __FUNCTION__, csr)); + if (!err && ((csr & SBSDIO_HT_AVAIL) != SBSDIO_HT_AVAIL)) { + DHD_ERROR(("%s:ERROR: device NOT Ready! 0x%x\n", + __FUNCTION__, csr)); + err = BCME_NODEVICE; + } + } + } + + /* Update if successful */ + if (err == 0) + bus->kso = on ? FALSE : TRUE; + else { + DHD_ERROR(("%s: Sleep request failed: kso:%d on:%d err:%d\n", + __FUNCTION__, bus->kso, on, err)); + if (!on && retry > 2) + bus->kso = FALSE; + } + + return err; +} + +/* Turn backplane clock on or off */ +static int +dhdsdio_htclk(dhd_bus_t *bus, bool on, bool pendok) +{ +#define HT_AVAIL_ERROR_MAX 10 + static int ht_avail_error = 0; + int err; + uint8 clkctl, clkreq, devctl; + bcmsdh_info_t *sdh; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + clkctl = 0; + sdh = bus->sdh; + + if (!KSO_ENAB(bus)) + return BCME_OK; + + if (SLPAUTO_ENAB(bus)) { + bus->clkstate = (on ? CLK_AVAIL : CLK_SDONLY); + return BCME_OK; + } + + if (on) { + /* Request HT Avail */ + clkreq = bus->alp_only ? SBSDIO_ALP_AVAIL_REQ : SBSDIO_HT_AVAIL_REQ; + +#ifdef BCMSPI + dhdsdio_wkwlan(bus, TRUE); +#endif /* BCMSPI */ + + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, clkreq, &err); + if (err) { + ht_avail_error++; + if (ht_avail_error < HT_AVAIL_ERROR_MAX) { + DHD_ERROR(("%s: HT Avail request error: %d\n", __FUNCTION__, err)); + } + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) + else if (ht_avail_error == HT_AVAIL_ERROR_MAX) { + bus->dhd->hang_reason = HANG_REASON_HT_AVAIL_ERROR; + dhd_os_send_hang_message(bus->dhd); + } +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */ + return BCME_ERROR; + } else { + ht_avail_error = 0; + } + + /* Check current status */ + clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err); + if (err) { + DHD_ERROR(("%s: HT Avail read error: %d\n", __FUNCTION__, err)); + return BCME_ERROR; + } + +#if !defined(OOB_INTR_ONLY) + /* Go to pending and await interrupt if appropriate */ + if (!SBSDIO_CLKAV(clkctl, bus->alp_only) && pendok) { + /* Allow only clock-available interrupt */ + devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err); + if (err) { + DHD_ERROR(("%s: Devctl access error setting CA: %d\n", + __FUNCTION__, err)); + return BCME_ERROR; + } + + devctl |= SBSDIO_DEVCTL_CA_INT_ONLY; + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err); + DHD_INFO(("CLKCTL: set PENDING\n")); + bus->clkstate = CLK_PENDING; + return BCME_OK; + } else +#endif /* !defined (OOB_INTR_ONLY) */ + { + if (bus->clkstate == CLK_PENDING) { + /* Cancel CA-only interrupt filter */ + devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err); + devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY; + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err); + } + } +#ifndef BCMSDIOLITE + /* Otherwise, wait here (polling) for HT Avail */ + if (!SBSDIO_CLKAV(clkctl, bus->alp_only)) { + SPINWAIT_SLEEP(sdioh_spinwait_sleep, + ((clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, + SBSDIO_FUNC1_CHIPCLKCSR, &err)), + !SBSDIO_CLKAV(clkctl, bus->alp_only)), PMU_MAX_TRANSITION_DLY); + } + if (err) { + DHD_ERROR(("%s: HT Avail request error: %d\n", __FUNCTION__, err)); + return BCME_ERROR; + } + if (!SBSDIO_CLKAV(clkctl, bus->alp_only)) { + DHD_ERROR(("%s: HT Avail timeout (%d): clkctl 0x%02x\n", + __FUNCTION__, PMU_MAX_TRANSITION_DLY, clkctl)); + return BCME_ERROR; + } +#endif /* BCMSDIOLITE */ + /* Mark clock available */ + bus->clkstate = CLK_AVAIL; + DHD_INFO(("CLKCTL: turned ON\n")); + +#if defined(DHD_DEBUG) + if (bus->alp_only == TRUE) { +#if !defined(BCMLXSDMMC) + if (!SBSDIO_ALPONLY(clkctl)) { + DHD_ERROR(("%s: HT Clock, when ALP Only\n", __FUNCTION__)); + } +#endif /* !defined(BCMLXSDMMC) */ + } else { + if (SBSDIO_ALPONLY(clkctl)) { + DHD_ERROR(("%s: HT Clock should be on.\n", __FUNCTION__)); + } + } +#endif /* defined (DHD_DEBUG) */ + + bus->activity = TRUE; +#ifdef DHD_USE_IDLECOUNT + bus->idlecount = 0; +#endif /* DHD_USE_IDLECOUNT */ + } else { + clkreq = 0; + + if (bus->clkstate == CLK_PENDING) { + /* Cancel CA-only interrupt filter */ + devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err); + devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY; + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err); + } + + bus->clkstate = CLK_SDONLY; + if (!SR_ENAB(bus)) { + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, clkreq, &err); + DHD_INFO(("CLKCTL: turned OFF\n")); + if (err) { + DHD_ERROR(("%s: Failed access turning clock off: %d\n", + __FUNCTION__, err)); + return BCME_ERROR; + } + } +#ifdef BCMSPI + dhdsdio_wkwlan(bus, FALSE); +#endif /* BCMSPI */ + } + return BCME_OK; +} + +/* Change SD1/SD4 bus mode */ +static int +dhdsdio_set_sdmode(dhd_bus_t *bus, int32 sd_mode) +{ + int err; + + err = bcmsdh_iovar_op(bus->sdh, "sd_mode", NULL, 0, + &sd_mode, sizeof(sd_mode), TRUE); + if (err) { + DHD_ERROR(("%s: error changing sd_mode: %d\n", + __FUNCTION__, err)); + return BCME_ERROR; + } + return BCME_OK; +} + +/* Change idle/active SD state */ +static int +dhdsdio_sdclk(dhd_bus_t *bus, bool on) +{ +#ifndef BCMSPI + int err; + int32 iovalue; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (on) { + if (bus->idleclock == DHD_IDLE_STOP) { + /* Turn on clock and restore mode */ + iovalue = 1; + err = bcmsdh_iovar_op(bus->sdh, "sd_clock", NULL, 0, + &iovalue, sizeof(iovalue), TRUE); + if (err) { + DHD_ERROR(("%s: error enabling sd_clock: %d\n", + __FUNCTION__, err)); + return BCME_ERROR; + } + + } else if (bus->idleclock != DHD_IDLE_ACTIVE) { + /* Restore clock speed */ + iovalue = bus->sd_divisor; + err = bcmsdh_iovar_op(bus->sdh, "sd_divisor", NULL, 0, + &iovalue, sizeof(iovalue), TRUE); + if (err) { + DHD_ERROR(("%s: error restoring sd_divisor: %d\n", + __FUNCTION__, err)); + return BCME_ERROR; + } + } + bus->clkstate = CLK_SDONLY; + } else { + /* Stop or slow the SD clock itself */ + if ((bus->sd_divisor == -1) || (bus->sd_mode == -1)) { + DHD_TRACE(("%s: can't idle clock, divisor %d mode %d\n", + __FUNCTION__, bus->sd_divisor, bus->sd_mode)); + return BCME_ERROR; + } + if (bus->idleclock == DHD_IDLE_STOP) { + iovalue = 0; + err = bcmsdh_iovar_op(bus->sdh, "sd_clock", NULL, 0, + &iovalue, sizeof(iovalue), TRUE); + if (err) { + DHD_ERROR(("%s: error disabling sd_clock: %d\n", + __FUNCTION__, err)); + return BCME_ERROR; + } + } else if (bus->idleclock != DHD_IDLE_ACTIVE) { + /* Set divisor to idle value */ + iovalue = bus->idleclock; + err = bcmsdh_iovar_op(bus->sdh, "sd_divisor", NULL, 0, + &iovalue, sizeof(iovalue), TRUE); + if (err) { + DHD_ERROR(("%s: error changing sd_divisor: %d\n", + __FUNCTION__, err)); + return BCME_ERROR; + } + } + bus->clkstate = CLK_NONE; + } +#endif /* BCMSPI */ + + return BCME_OK; +} + +/* Transition SD and backplane clock readiness */ +static int +dhdsdio_clkctl(dhd_bus_t *bus, uint target, bool pendok) +{ + int ret = BCME_OK; +#ifdef DHD_DEBUG + uint oldstate = bus->clkstate; +#endif /* DHD_DEBUG */ + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + /* Early exit if we're already there */ + if (bus->clkstate == target) { + if (target == CLK_AVAIL) { + dhd_os_wd_timer(bus->dhd, dhd_watchdog_ms); + bus->activity = TRUE; +#ifdef DHD_USE_IDLECOUNT + bus->idlecount = 0; +#endif /* DHD_USE_IDLECOUNT */ + } + return ret; + } + + switch (target) { + case CLK_AVAIL: + /* Make sure SD clock is available */ + if (bus->clkstate == CLK_NONE) + dhdsdio_sdclk(bus, TRUE); + /* Now request HT Avail on the backplane */ + ret = dhdsdio_htclk(bus, TRUE, pendok); + if (ret == BCME_OK) { + dhd_os_wd_timer(bus->dhd, dhd_watchdog_ms); + bus->activity = TRUE; +#ifdef DHD_USE_IDLECOUNT + bus->idlecount = 0; +#endif /* DHD_USE_IDLECOUNT */ + } + break; + + case CLK_SDONLY: + +#ifdef BT_OVER_SDIO + /* + * If the request is to switch off Back plane clock, + * confirm that BT is inactive before doing so. + * If this call had come from Non Watchdog context any way + * the Watchdog would switch off the clock again when + * nothing is to be done & Bt has finished using the bus. + */ + if (bus->bt_use_count != 0) { + DHD_INFO(("%s(): Req CLK_SDONLY, BT is active %d not switching off \r\n", + __FUNCTION__, bus->bt_use_count)); + ret = BCME_OK; + dhd_os_wd_timer(bus->dhd, dhd_watchdog_ms); + break; + } + + DHD_INFO(("%s(): Request CLK_NONE BT is NOT active switching off \r\n", + __FUNCTION__)); +#endif /* BT_OVER_SDIO */ + + /* Remove HT request, or bring up SD clock */ + if (bus->clkstate == CLK_NONE) + ret = dhdsdio_sdclk(bus, TRUE); + else if (bus->clkstate == CLK_AVAIL) + ret = dhdsdio_htclk(bus, FALSE, FALSE); + else + DHD_ERROR(("dhdsdio_clkctl: request for %d -> %d\n", + bus->clkstate, target)); + if (ret == BCME_OK) { + dhd_os_wd_timer(bus->dhd, dhd_watchdog_ms); + } + break; + + case CLK_NONE: + +#ifdef BT_OVER_SDIO + /* + * If the request is to switch off Back plane clock, + * confirm that BT is inactive before doing so. + * If this call had come from Non Watchdog context any way + * the Watchdog would switch off the clock again when + * nothing is to be done & Bt has finished using the bus. + */ + if (bus->bt_use_count != 0) { + DHD_INFO(("%s(): Request CLK_NONE BT is active %d not switching off \r\n", + __FUNCTION__, bus->bt_use_count)); + ret = BCME_OK; + break; + } + + DHD_INFO(("%s(): Request CLK_NONE BT is NOT active switching off \r\n", + __FUNCTION__)); +#endif /* BT_OVER_SDIO */ + + /* Make sure to remove HT request */ + if (bus->clkstate == CLK_AVAIL) + ret = dhdsdio_htclk(bus, FALSE, FALSE); + /* Now remove the SD clock */ + ret = dhdsdio_sdclk(bus, FALSE); +#ifdef DHD_DEBUG + if (bus->dhd->dhd_console_ms == 0) +#endif /* DHD_DEBUG */ + if (bus->poll == 0) + dhd_os_wd_timer(bus->dhd, 0); + break; + } +#ifdef DHD_DEBUG + DHD_INFO(("dhdsdio_clkctl: %d -> %d\n", oldstate, bus->clkstate)); +#endif /* DHD_DEBUG */ + + return ret; +} + +static int +dhdsdio_bussleep(dhd_bus_t *bus, bool sleep) +{ + int err = 0; + bcmsdh_info_t *sdh = bus->sdh; + sdpcmd_regs_t *regs = bus->regs; + uint retries = 0; +#if defined(BCMSDIOH_STD) + uint32 sd3_tuning_disable = FALSE; +#endif /* BCMSDIOH_STD */ + + DHD_INFO(("dhdsdio_bussleep: request %s (currently %s)\n", + (sleep ? "SLEEP" : "WAKE"), + (bus->sleeping ? "SLEEP" : "WAKE"))); + + if (bus->dhd->hang_was_sent) + return BCME_ERROR; + + /* Done if we're already in the requested state */ + if (sleep == bus->sleeping) + return BCME_OK; + + /* Going to sleep: set the alarm and turn off the lights... */ + if (sleep) { + /* Don't sleep if something is pending */ +#ifdef DHD_USE_IDLECOUNT + if (bus->dpc_sched || bus->rxskip || pktq_n_pkts_tot(&bus->txq) || + bus->readframes || bus->ctrl_frame_stat) +#else + if (bus->dpc_sched || bus->rxskip || pktq_n_pkts_tot(&bus->txq)) +#endif /* DHD_USE_IDLECOUNT */ + return BCME_BUSY; + +#ifdef BT_OVER_SDIO + /* + * The following is the assumption based on which the hook is placed. + * From WLAN driver, either from the active contexts OR from the Watchdog contexts + * we will be attempting to Go to Sleep. AT that moment if we see that BT is still + * actively using the bus, we will return BCME_BUSY from here, but the bus->sleeping + * state would not have changed. So the caller can then schedule the Watchdog again + * which will come and attempt to sleep at a later point. + * + * In case if BT is the only one and is the last user, we don't switch off the clock + * immediately, we allow the WLAN to decide when to sleep i.e from the watchdog. + * Now if the watchdog becomes active and attempts to switch off the clock and if + * another WLAN context is active they are any way serialized with sdlock. + */ + if (bus->bt_use_count != 0) { + DHD_INFO(("%s(): Cannot sleep BT is active \r\n", __FUNCTION__)); + return BCME_BUSY; + } +#endif /* !BT_OVER_SDIO */ + + if (!SLPAUTO_ENAB(bus)) { + /* Disable SDIO interrupts (no longer interested) */ + bcmsdh_intr_disable(bus->sdh); + + /* Make sure the controller has the bus up */ + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + + /* Tell device to start using OOB wakeup */ + W_SDREG(SMB_USE_OOB, ®s->tosbmailbox, retries); + if (retries > retry_limit) + DHD_ERROR(("CANNOT SIGNAL CHIP, WILL NOT WAKE UP!!\n")); + + /* Turn off our contribution to the HT clock request */ + dhdsdio_clkctl(bus, CLK_SDONLY, FALSE); + + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, + SBSDIO_FORCE_HW_CLKREQ_OFF, NULL); + + /* Isolate the bus */ + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, + SBSDIO_DEVCTL_PADS_ISO, NULL); + } else { + /* Leave interrupts enabled since device can exit sleep and + * interrupt host + */ + err = dhdsdio_clk_devsleep_iovar(bus, TRUE /* sleep */); + } + + /* Change state */ + bus->sleeping = TRUE; +#if defined(BCMSDIOH_STD) + sd3_tuning_disable = TRUE; + err = bcmsdh_iovar_op(bus->sdh, "sd3_tuning_disable", NULL, 0, + &sd3_tuning_disable, sizeof(sd3_tuning_disable), TRUE); +#endif /* BCMSDIOH_STD */ +#if defined(SUPPORT_P2P_GO_PS) + wake_up(&bus->bus_sleep); +#endif /* LINUX && SUPPORT_P2P_GO_PS */ + } else { + /* Waking up: bus power up is ok, set local state */ + + if (!SLPAUTO_ENAB(bus)) { + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, 0, &err); + + /* Force pad isolation off if possible (in case power never toggled) */ + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, 0, NULL); + + /* Make sure the controller has the bus up */ + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + + /* Send misc interrupt to indicate OOB not needed */ + W_SDREG(0, ®s->tosbmailboxdata, retries); + if (retries <= retry_limit) + W_SDREG(SMB_DEV_INT, ®s->tosbmailbox, retries); + + if (retries > retry_limit) + DHD_ERROR(("CANNOT SIGNAL CHIP TO CLEAR OOB!!\n")); + + /* Make sure we have SD bus access */ + dhdsdio_clkctl(bus, CLK_SDONLY, FALSE); + + /* Enable interrupts again */ + if (bus->intr && (bus->dhd->busstate == DHD_BUS_DATA)) { + bus->intdis = FALSE; + bcmsdh_intr_enable(bus->sdh); + } + } else { + err = dhdsdio_clk_devsleep_iovar(bus, FALSE /* wake */); +#ifdef BT_OVER_SDIO + if (err < 0) { + struct net_device *net = NULL; + dhd_pub_t *dhd = bus->dhd; + net = dhd_idx2net(dhd, 0); + if (net != NULL) { + DHD_ERROR(("<< WIFI HANG by KSO Enabled failure\n")); + dhd_os_sdunlock(dhd); + net_os_send_hang_message(net); + dhd_os_sdlock(dhd); + } else { + DHD_ERROR(("<< WIFI HANG Fail because net is NULL\n")); + } + } +#endif /* BT_OVER_SDIO */ + } + + if (err == 0) { + /* Change state */ + bus->sleeping = FALSE; +#if defined(BCMSDIOH_STD) + sd3_tuning_disable = FALSE; + err = bcmsdh_iovar_op(bus->sdh, "sd3_tuning_disable", NULL, 0, + &sd3_tuning_disable, sizeof(sd3_tuning_disable), TRUE); +#endif /* BCMSDIOH_STD */ + } + } + + return err; +} + +#ifdef BT_OVER_SDIO +/* + * Call this function to Get the Clock running. + * Assumes that the caller holds the sdlock. + * bus - Pointer to the dhd_bus handle + * can_wait - TRUE if the caller can wait until the clock becomes ready + * FALSE if the caller cannot wait + */ +int __dhdsdio_clk_enable(struct dhd_bus *bus, bus_owner_t owner, int can_wait) +{ + int ret = BCME_ERROR; + + BCM_REFERENCE(owner); + + bus->bt_use_count++; + + /* + * We can call BUS_WAKE, clkctl multiple times, both of the items + * have states and if its already ON, no new configuration is done + */ + + /* Wake up the Dongle FW from SR */ + BUS_WAKE(bus); + + /* + * Make sure back plane ht clk is on + * CLK_AVAIL - Turn On both SD & HT clock + */ + ret = dhdsdio_clkctl(bus, CLK_AVAIL, can_wait); + + DHD_INFO(("%s():bt_use_count %d \r\n", __FUNCTION__, + bus->bt_use_count)); + return ret; +} + +/* + * Call this function to relinquish the Clock. + * Assumes that the caller holds the sdlock. + * bus - Pointer to the dhd_bus handle + * can_wait - TRUE if the caller can wait until the clock becomes ready + * FALSE if the caller cannot wait + */ +int __dhdsdio_clk_disable(struct dhd_bus *bus, bus_owner_t owner, int can_wait) +{ + int ret = BCME_ERROR; + + BCM_REFERENCE(owner); + BCM_REFERENCE(can_wait); + + if (bus->bt_use_count == 0) { + DHD_ERROR(("%s(): Clocks are already turned off \r\n", + __FUNCTION__)); + return ret; + } + + bus->bt_use_count--; + + /* + * When the SDIO Bus is shared between BT & WLAN, we turn Off the clock + * once the last user has relinqushed the same. But there are two schemes + * in that too. We consider WLAN as the bus master (even if its not + * active). Even when the WLAN is OFF the DHD Watchdog is active. + * So this Bus Watchdog is the context whill put the Bus to sleep. + * Refer dhd_bus_watchdog function + */ + + ret = BCME_OK; + DHD_INFO(("%s():bt_use_count %d \r\n", __FUNCTION__, + bus->bt_use_count)); + return ret; +} + +void dhdsdio_reset_bt_use_count(struct dhd_bus *bus) +{ + /* reset bt use count */ + bus->bt_use_count = 0; +} +#endif /* BT_OVER_SDIO */ + +int dhdsdio_func_blocksize(dhd_pub_t *dhd, int function_num, int block_size) +{ + int func_blk_size = function_num; + int bcmerr = 0; + int result; + + bcmerr = dhd_bus_iovar_op(dhd, "sd_blocksize", &func_blk_size, + sizeof(int), &result, sizeof(int), IOV_GET); + + if (bcmerr != BCME_OK) { + DHD_ERROR(("%s: Get F%d Block size error\n", __FUNCTION__, function_num)); + return BCME_ERROR; + } + + if (result != block_size) { + DHD_ERROR(("%s: F%d Block size set from %d to %d\n", + __FUNCTION__, function_num, result, block_size)); + func_blk_size = function_num << 16 | block_size; + bcmerr = dhd_bus_iovar_op(dhd, "sd_blocksize", NULL, + 0, &func_blk_size, sizeof(int32), IOV_SET); + if (bcmerr != BCME_OK) { + DHD_ERROR(("%s: Set F2 Block size error\n", __FUNCTION__)); + return BCME_ERROR; + } + } + + return BCME_OK; +} + +#if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) || defined(FORCE_WOWLAN) +void +dhd_enable_oob_intr(struct dhd_bus *bus, bool enable) +{ +#if defined(BCMSPI_ANDROID) + bcmsdh_intr_enable(bus->sdh); +#elif defined(HW_OOB) || defined(FORCE_WOWLAN) + bcmsdh_enable_hw_oob_intr(bus->sdh, enable); +#else + sdpcmd_regs_t *regs = bus->regs; + uint retries = 0; + + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + if (enable == TRUE) { + + /* Tell device to start using OOB wakeup */ + W_SDREG(SMB_USE_OOB, ®s->tosbmailbox, retries); + if (retries > retry_limit) + DHD_ERROR(("CANNOT SIGNAL CHIP, WILL NOT WAKE UP!!\n")); + + } else { + /* Send misc interrupt to indicate OOB not needed */ + W_SDREG(0, ®s->tosbmailboxdata, retries); + if (retries <= retry_limit) + W_SDREG(SMB_DEV_INT, ®s->tosbmailbox, retries); + } + + /* Turn off our contribution to the HT clock request */ + dhdsdio_clkctl(bus, CLK_SDONLY, FALSE); +#endif /* !defined(HW_OOB) */ +} +#endif /* defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) */ + +int +dhd_bus_txdata(struct dhd_bus *bus, void *pkt) +{ + int ret = BCME_ERROR; + osl_t *osh; + uint datalen, prec; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + osh = bus->dhd->osh; + datalen = PKTLEN(osh, pkt); + +#ifdef SDTEST + /* Push the test header if doing loopback */ + if (bus->ext_loop) { + uint8* data; + PKTPUSH(osh, pkt, SDPCM_TEST_HDRLEN); + data = PKTDATA(osh, pkt); + *data++ = SDPCM_TEST_ECHOREQ; + *data++ = (uint8)bus->loopid++; + *data++ = (datalen >> 0); + *data++ = (datalen >> 8); + datalen += SDPCM_TEST_HDRLEN; + } +#else /* SDTEST */ + BCM_REFERENCE(datalen); +#endif /* SDTEST */ + +#ifdef DHD_ULP + dhd_ulp_set_path(bus->dhd, DHD_ULP_TX_DATA); +#endif /* DHD_ULP */ + + prec = PRIO2PREC((PKTPRIO(pkt) & PRIOMASK)); + + /* move from dhdsdio_sendfromq(), try to orphan skb early */ + if (bus->dhd->conf->orphan_move) + PKTORPHAN(pkt, bus->dhd->conf->tsq); + + /* Check for existing queue, current flow-control, pending event, or pending clock */ + if (dhd_deferred_tx || bus->fcstate || pktq_n_pkts_tot(&bus->txq) || bus->dpc_sched || + (!DATAOK(bus)) || (bus->flowcontrol & NBITVAL(prec)) || + (bus->clkstate != CLK_AVAIL)) { + bool deq_ret; + int pkq_len = 0; + + DHD_TRACE(("%s: deferring pktq len %d\n", __FUNCTION__, + pktq_n_pkts_tot(&bus->txq))); + bus->fcqueued++; + + /* Priority based enq */ + dhd_os_sdlock_txq(bus->dhd); + deq_ret = dhd_prec_enq(bus->dhd, &bus->txq, pkt, prec); + dhd_os_sdunlock_txq(bus->dhd); + + if (!deq_ret) { +#ifdef PROP_TXSTATUS + if (DHD_PKTTAG_WLFCPKT(PKTTAG(pkt)) == 0) +#endif /* PROP_TXSTATUS */ + { +#ifdef DHDTCPACK_SUPPRESS + if (dhd_tcpack_check_xmit(bus->dhd, pkt) == BCME_ERROR) { + DHD_ERROR(("%s %d: tcpack_suppress ERROR!!! Stop using\n", + __FUNCTION__, __LINE__)); + dhd_tcpack_suppress_set(bus->dhd, TCPACK_SUP_OFF); + } +#endif /* DHDTCPACK_SUPPRESS */ + dhd_txcomplete(bus->dhd, pkt, FALSE); + PKTFREE(osh, pkt, TRUE); + } + ret = BCME_NORESOURCE; + } else + ret = BCME_OK; + + if (dhd_doflow) { + dhd_os_sdlock_txq(bus->dhd); + pkq_len = pktq_n_pkts_tot(&bus->txq); + dhd_os_sdunlock_txq(bus->dhd); + } + if (dhd_doflow && pkq_len >= FCHI) { + bool wlfc_enabled = FALSE; +#ifdef PROP_TXSTATUS + wlfc_enabled = (dhd_wlfc_flowcontrol(bus->dhd, ON, FALSE) != + WLFC_UNSUPPORTED); +#endif // endif + if (!wlfc_enabled && dhd_doflow) { + dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, ON); + } + } + +#ifdef DHD_DEBUG + dhd_os_sdlock_txq(bus->dhd); + if (pktqprec_n_pkts(&bus->txq, prec) > qcount[prec]) + qcount[prec] = pktqprec_n_pkts(&bus->txq, prec); + dhd_os_sdunlock_txq(bus->dhd); +#endif // endif + + /* Schedule DPC if needed to send queued packet(s) */ + if (dhd_deferred_tx && !bus->dpc_sched) { + if (bus->dhd->conf->deferred_tx_len) { + if(dhd_os_wd_timer_enabled(bus->dhd) == FALSE) { + bus->dpc_sched = TRUE; + dhd_sched_dpc(bus->dhd); + } + if(pktq_n_pkts_tot(&bus->txq) >= bus->dhd->conf->deferred_tx_len && + dhd_os_wd_timer_enabled(bus->dhd) == FALSE) { + bus->dpc_sched = TRUE; + dhd_sched_dpc(bus->dhd); + } + } else { + bus->dpc_sched = TRUE; + dhd_sched_dpc(bus->dhd); + } + } + } else { + int chan = SDPCM_DATA_CHANNEL; + +#ifdef SDTEST + chan = (bus->ext_loop ? SDPCM_TEST_CHANNEL : SDPCM_DATA_CHANNEL); +#endif // endif + /* Lock: we're about to use shared data/code (and SDIO) */ + dhd_os_sdlock(bus->dhd); + + /* Otherwise, send it now */ + BUS_WAKE(bus); + /* Make sure back plane ht clk is on, no pending allowed */ + dhdsdio_clkctl(bus, CLK_AVAIL, TRUE); + + ret = dhdsdio_txpkt(bus, chan, &pkt, 1, TRUE); + + if (ret != BCME_OK) + bus->dhd->tx_errors++; + else + bus->dhd->dstats.tx_bytes += datalen; + + if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched && + NO_OTHER_ACTIVE_BUS_USER(bus)) { + bus->activity = FALSE; + dhdsdio_bussleep(bus, TRUE); + dhdsdio_clkctl(bus, CLK_NONE, FALSE); + } + + dhd_os_sdunlock(bus->dhd); + } + + return ret; +} + +/* align packet data pointer and packet length to n-byte boundary, process packet headers, + * a new packet may be allocated if there is not enough head and/or tail from for padding. + * the caller is responsible for updating the glom size in the head packet (when glom is + * used) + * + * pad_pkt_len: returns the length of extra padding needed from the padding packet, this parameter + * is taken in tx glom mode only + * + * new_pkt: out, pointer of the new packet allocated due to insufficient head room for alignment + * padding, NULL if not needed, the caller is responsible for freeing the new packet + * + * return: positive value - length of the packet, including head and tail padding + * negative value - errors + */ +static int dhdsdio_txpkt_preprocess(dhd_bus_t *bus, void *pkt, int chan, int txseq, + int prev_chain_total_len, bool last_chained_pkt, + int *pad_pkt_len, void **new_pkt +#if defined(BCMSDIOH_TXGLOM_EXT) + , int first_frame +#endif +) +{ + osl_t *osh; + uint8 *frame; + int pkt_len; + int modulo; + int head_padding; + int tail_padding = 0; + uint32 swheader; + uint32 swhdr_offset; + bool alloc_new_pkt = FALSE; + uint8 sdpcm_hdrlen = bus->txglom_enable ? SDPCM_HDRLEN_TXGLOM : SDPCM_HDRLEN; +#ifdef PKT_STATICS + uint16 len; +#endif + + *new_pkt = NULL; + osh = bus->dhd->osh; + +#ifdef DHDTCPACK_SUPPRESS + if (dhd_tcpack_check_xmit(bus->dhd, pkt) == BCME_ERROR) { + DHD_ERROR(("%s %d: tcpack_suppress ERROR!!! Stop using it\n", + __FUNCTION__, __LINE__)); + dhd_tcpack_suppress_set(bus->dhd, TCPACK_SUP_OFF); + } +#endif /* DHDTCPACK_SUPPRESS */ + + /* Add space for the SDPCM hardware/software headers */ + PKTPUSH(osh, pkt, sdpcm_hdrlen); + ASSERT(ISALIGNED((uintptr)PKTDATA(osh, pkt), 2)); + + frame = (uint8*)PKTDATA(osh, pkt); + pkt_len = (uint16)PKTLEN(osh, pkt); + +#ifdef PKT_STATICS + len = (uint16)PKTLEN(osh, pkt); + switch(chan) { + case SDPCM_CONTROL_CHANNEL: + tx_statics.ctrl_count++; + tx_statics.ctrl_size += len; + break; + case SDPCM_DATA_CHANNEL: + tx_statics.data_count++; + tx_statics.data_size += len; + break; + case SDPCM_GLOM_CHANNEL: + tx_statics.glom_count++; + tx_statics.glom_size += len; + break; + case SDPCM_EVENT_CHANNEL: + tx_statics.event_count++; + tx_statics.event_size += len; + break; + case SDPCM_TEST_CHANNEL: + tx_statics.test_count++; + tx_statics.test_size += len; + break; + + default: + break; + } +#endif /* PKT_STATICS */ +#ifdef DHD_DEBUG + if (PKTPRIO(pkt) < ARRAYSIZE(tx_packets)) + tx_packets[PKTPRIO(pkt)]++; +#endif /* DHD_DEBUG */ + + /* align the data pointer, allocate a new packet if there is not enough space (new + * packet data pointer will be aligned thus no padding will be needed) + */ + head_padding = (uintptr)frame % DHD_SDALIGN; + if (PKTHEADROOM(osh, pkt) < head_padding) { + head_padding = 0; + alloc_new_pkt = TRUE; + } else { + uint cur_chain_total_len; + int chain_tail_padding = 0; + + /* All packets need to be aligned by DHD_SDALIGN */ + modulo = (pkt_len + head_padding) % DHD_SDALIGN; + tail_padding = modulo > 0 ? (DHD_SDALIGN - modulo) : 0; + + /* Total pkt chain length needs to be aligned by block size, + * unless it is a single pkt chain with total length less than one block size, + * which we prefer sending by byte mode. + * + * Do the chain alignment here if + * 1. This is the last pkt of the chain of multiple pkts or a single pkt. + * 2-1. This chain is of multiple pkts, or + * 2-2. This is a single pkt whose size is longer than one block size. + */ + cur_chain_total_len = prev_chain_total_len + + (head_padding + pkt_len + tail_padding); + if (last_chained_pkt && bus->blocksize != 0 && + (cur_chain_total_len > (int)bus->blocksize || prev_chain_total_len > 0)) { + modulo = cur_chain_total_len % bus->blocksize; + chain_tail_padding = modulo > 0 ? (bus->blocksize - modulo) : 0; + } + +#ifdef DHDENABLE_TAILPAD + if (PKTTAILROOM(osh, pkt) < tail_padding) { + /* We don't have tail room to align by DHD_SDALIGN */ + alloc_new_pkt = TRUE; + bus->tx_tailpad_pktget++; + } else if (PKTTAILROOM(osh, pkt) < tail_padding + chain_tail_padding) { + /* We have tail room for tail_padding of this pkt itself, but not for + * total pkt chain alignment by block size. + * Use the padding packet to avoid memory copy if applicable, + * otherwise, just allocate a new pkt. + */ + if (bus->pad_pkt) { + *pad_pkt_len = chain_tail_padding; + bus->tx_tailpad_chain++; + } else { + alloc_new_pkt = TRUE; + bus->tx_tailpad_pktget++; + } + } else + /* This last pkt's tailroom is sufficient to hold both tail_padding + * of the pkt itself and chain_tail_padding of total pkt chain + */ +#endif /* DHDENABLE_TAILPAD */ + tail_padding += chain_tail_padding; + } + + DHD_INFO(("%s sdhdr len + orig_pkt_len %d h_pad %d t_pad %d pad_pkt_len %d\n", + __FUNCTION__, pkt_len, head_padding, tail_padding, *pad_pkt_len)); + + if (alloc_new_pkt) { + void *tmp_pkt; + int newpkt_size; + int cur_total_len; + + ASSERT(*pad_pkt_len == 0); + + DHD_INFO(("%s allocating new packet for padding\n", __FUNCTION__)); + + /* head pointer is aligned now, no padding needed */ + head_padding = 0; + + /* update the tail padding as it depends on the head padding, since a new packet is + * allocated, the head padding is non longer needed and packet length is chagned + */ + + cur_total_len = prev_chain_total_len + pkt_len; + if (last_chained_pkt && bus->blocksize != 0 && + (cur_total_len > (int)bus->blocksize || prev_chain_total_len > 0)) { + modulo = cur_total_len % bus->blocksize; + tail_padding = modulo > 0 ? (bus->blocksize - modulo) : 0; + } else { + modulo = pkt_len % DHD_SDALIGN; + tail_padding = modulo > 0 ? (DHD_SDALIGN - modulo) : 0; + } + + newpkt_size = PKTLEN(osh, pkt) + bus->blocksize + DHD_SDALIGN; + bus->dhd->tx_realloc++; + tmp_pkt = PKTGET(osh, newpkt_size, TRUE); + if (tmp_pkt == NULL) { + DHD_ERROR(("failed to alloc new %d byte packet\n", newpkt_size)); + return BCME_NOMEM; + } + PKTALIGN(osh, tmp_pkt, PKTLEN(osh, pkt), DHD_SDALIGN); + bcopy(PKTDATA(osh, pkt), PKTDATA(osh, tmp_pkt), PKTLEN(osh, pkt)); + *new_pkt = tmp_pkt; + pkt = tmp_pkt; + } + + if (head_padding) + PKTPUSH(osh, pkt, head_padding); + + frame = (uint8*)PKTDATA(osh, pkt); + bzero(frame, head_padding + sdpcm_hdrlen); + pkt_len = (uint16)PKTLEN(osh, pkt); + + /* the header has the followming format + * 4-byte HW frame tag: length, ~length (for glom this is the total length) + * + * 8-byte HW extesion flags (glom mode only) as the following: + * 2-byte packet length, excluding HW tag and padding + * 2-byte frame channel and frame flags (e.g. next frame following) + * 2-byte header length + * 2-byte tail padding size + * + * 8-byte SW frame tags as the following + * 4-byte flags: host tx seq, channel, data offset + * 4-byte flags: TBD + */ + + swhdr_offset = SDPCM_FRAMETAG_LEN; + + /* hardware frame tag: + * + * in tx-glom mode, dongle only checks the hardware frame tag in the first + * packet and sees it as the total lenght of the glom (including tail padding), + * for each packet in the glom, the packet length needs to be updated, (see + * below PKTSETLEN) + * + * in non tx-glom mode, PKTLEN still need to include tail padding as to be + * referred to in sdioh_request_buffer(). The tail length will be excluded in + * dhdsdio_txpkt_postprocess(). + */ +#if defined(BCMSDIOH_TXGLOM_EXT) + if (bus->dhd->conf->txglom_bucket_size) + tail_padding = 0; +#endif + *(uint16*)frame = (uint16)htol16(pkt_len); + *(((uint16*)frame) + 1) = (uint16)htol16(~pkt_len); + pkt_len += tail_padding; + + /* hardware extesion flags */ + if (bus->txglom_enable) { + uint32 hwheader1; + uint32 hwheader2; +#ifdef BCMSDIOH_TXGLOM_EXT + uint32 act_len = pkt_len - tail_padding; + uint32 real_pad = 0; + if(bus->dhd->conf->txglom_ext && !last_chained_pkt) { + tail_padding = 0; + if(first_frame == 0) { + // first pkt, add pad to bucket size - recv offset + pkt_len = bus->dhd->conf->txglom_bucket_size - TXGLOM_RECV_OFFSET; + } else { + // add pad to bucket size + pkt_len = bus->dhd->conf->txglom_bucket_size; + } + swhdr_offset += SDPCM_HWEXT_LEN; + hwheader1 = (act_len - SDPCM_FRAMETAG_LEN) | (last_chained_pkt << 24); + hwheader2 = (pkt_len - act_len) << 16; + htol32_ua_store(hwheader1, frame + SDPCM_FRAMETAG_LEN); + htol32_ua_store(hwheader2, frame + SDPCM_FRAMETAG_LEN + 4); + real_pad = pkt_len - act_len; + + if (PKTTAILROOM(osh, pkt) < real_pad) { + DHD_INFO(("%s : insufficient tailroom %d for %d real_pad\n", + __func__, (int)PKTTAILROOM(osh, pkt), real_pad)); + if (PKTPADTAILROOM(osh, pkt, real_pad)) { + DHD_ERROR(("CHK1: padding error size %d\n", real_pad)); + } else + frame = (uint8 *)PKTDATA(osh, pkt); + } + } else +#endif + { + swhdr_offset += SDPCM_HWEXT_LEN; + hwheader1 = (pkt_len - SDPCM_FRAMETAG_LEN - tail_padding) | + (last_chained_pkt << 24); + hwheader2 = (tail_padding) << 16; + htol32_ua_store(hwheader1, frame + SDPCM_FRAMETAG_LEN); + htol32_ua_store(hwheader2, frame + SDPCM_FRAMETAG_LEN + 4); + } + } + PKTSETLEN((osh), (pkt), (pkt_len)); + + /* software frame tags */ + swheader = ((chan << SDPCM_CHANNEL_SHIFT) & SDPCM_CHANNEL_MASK) + | (txseq % SDPCM_SEQUENCE_WRAP) | + (((head_padding + sdpcm_hdrlen) << SDPCM_DOFFSET_SHIFT) & SDPCM_DOFFSET_MASK); + htol32_ua_store(swheader, frame + swhdr_offset); + htol32_ua_store(0, frame + swhdr_offset + sizeof(swheader)); + + return pkt_len; +} + +static int dhdsdio_txpkt_postprocess(dhd_bus_t *bus, void *pkt) +{ + osl_t *osh; + uint8 *frame; + int data_offset; + int tail_padding; + int swhdr_offset = SDPCM_FRAMETAG_LEN + (bus->txglom_enable ? SDPCM_HWEXT_LEN : 0); + + (void)osh; + osh = bus->dhd->osh; + + /* restore pkt buffer pointer, but keeps the header pushed by dhd_prot_hdrpush */ + frame = (uint8*)PKTDATA(osh, pkt); + + DHD_INFO(("%s PKTLEN before postprocess %d", + __FUNCTION__, PKTLEN(osh, pkt))); + + /* PKTLEN still includes tail_padding, so exclude it. + * We shall have head_padding + original pkt_len for PKTLEN afterwards. + */ + if (bus->txglom_enable) { + /* txglom pkts have tail_padding length in HW ext header */ + tail_padding = ltoh32_ua(frame + SDPCM_FRAMETAG_LEN + 4) >> 16; + PKTSETLEN(osh, pkt, PKTLEN(osh, pkt) - tail_padding); + DHD_INFO((" txglom pkt: tail_padding %d PKTLEN %d\n", + tail_padding, PKTLEN(osh, pkt))); + } else { + /* non-txglom pkts have head_padding + original pkt length in HW frame tag. + * We cannot refer to this field for txglom pkts as the first pkt of the chain will + * have the field for the total length of the chain. + */ + PKTSETLEN(osh, pkt, *(uint16*)frame); + DHD_INFO((" non-txglom pkt: HW frame tag len %d after PKTLEN %d\n", + *(uint16*)frame, PKTLEN(osh, pkt))); + } + + data_offset = ltoh32_ua(frame + swhdr_offset); + data_offset = (data_offset & SDPCM_DOFFSET_MASK) >> SDPCM_DOFFSET_SHIFT; + /* Get rid of sdpcm header + head_padding */ + PKTPULL(osh, pkt, data_offset); + + DHD_INFO(("%s data_offset %d, PKTLEN %d\n", + __FUNCTION__, data_offset, PKTLEN(osh, pkt))); + + return BCME_OK; +} + +static int dhdsdio_txpkt(dhd_bus_t *bus, uint chan, void** pkts, int num_pkt, bool free_pkt) +{ + int i; + int ret = 0; + osl_t *osh; + bcmsdh_info_t *sdh; + void *pkt = NULL; + void *pkt_chain; + int total_len = 0; + void *head_pkt = NULL; + void *prev_pkt = NULL; + int pad_pkt_len = 0; + int new_pkt_num = 0; + void *new_pkts[MAX_TX_PKTCHAIN_CNT]; + bool wlfc_enabled = FALSE; + + if (bus->dhd->dongle_reset) + return BCME_NOTREADY; + + if (num_pkt <= 0) + return BCME_BADARG; + + sdh = bus->sdh; + osh = bus->dhd->osh; + /* init new_pkts[0] to make some compiler happy, not necessary as we check new_pkt_num */ + new_pkts[0] = NULL; + + for (i = 0; i < num_pkt; i++) { + int pkt_len; + bool last_pkt; + void *new_pkt = NULL; + + pkt = pkts[i]; + ASSERT(pkt); + last_pkt = (i == num_pkt - 1); + pkt_len = dhdsdio_txpkt_preprocess(bus, pkt, chan, bus->tx_seq + i, + total_len, last_pkt, &pad_pkt_len, &new_pkt +#if defined(BCMSDIOH_TXGLOM_EXT) + , i +#endif + ); + if (pkt_len <= 0) + goto done; + if (new_pkt) { + pkt = new_pkt; + new_pkts[new_pkt_num++] = new_pkt; + } + total_len += pkt_len; + + PKTSETNEXT(osh, pkt, NULL); + /* insert the packet into the list */ + head_pkt ? PKTSETNEXT(osh, prev_pkt, pkt) : (head_pkt = pkt); + prev_pkt = pkt; + + } + + /* Update the HW frame tag (total length) in the first pkt of the glom */ + if (bus->txglom_enable) { + uint8 *frame; + + total_len += pad_pkt_len; + frame = (uint8*)PKTDATA(osh, head_pkt); + *(uint16*)frame = (uint16)htol16(total_len); + *(((uint16*)frame) + 1) = (uint16)htol16(~total_len); + + } + +#ifdef DHDENABLE_TAILPAD + /* if a padding packet if needed, insert it to the end of the link list */ + if (pad_pkt_len) { + PKTSETLEN(osh, bus->pad_pkt, pad_pkt_len); + PKTSETNEXT(osh, pkt, bus->pad_pkt); + } +#endif /* DHDENABLE_TAILPAD */ + + /* dhd_bcmsdh_send_buf ignores the buffer pointer if he packet + * parameter is not NULL, for non packet chian we pass NULL pkt pointer + * so it will take the aligned length and buffer pointer. + */ + pkt_chain = PKTNEXT(osh, head_pkt) ? head_pkt : NULL; + ret = dhd_bcmsdh_send_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC, + PKTDATA(osh, head_pkt), total_len, pkt_chain, NULL, NULL, TXRETRIES); + if (ret == BCME_OK) + bus->tx_seq = (bus->tx_seq + num_pkt) % SDPCM_SEQUENCE_WRAP; + + /* if a padding packet was needed, remove it from the link list as it not a data pkt */ + if (pad_pkt_len && pkt) + PKTSETNEXT(osh, pkt, NULL); + +done: + pkt = head_pkt; + while (pkt) { + void *pkt_next = PKTNEXT(osh, pkt); + PKTSETNEXT(osh, pkt, NULL); + dhdsdio_txpkt_postprocess(bus, pkt); + pkt = pkt_next; + } + + /* new packets might be allocated due to insufficient room for padding, but we + * still have to indicate the original packets to upper layer + */ + for (i = 0; i < num_pkt; i++) { + pkt = pkts[i]; + wlfc_enabled = FALSE; +#ifdef PROP_TXSTATUS + if (DHD_PKTTAG_WLFCPKT(PKTTAG(pkt))) { + wlfc_enabled = (dhd_wlfc_txcomplete(bus->dhd, pkt, ret == 0) != + WLFC_UNSUPPORTED); + } +#endif /* PROP_TXSTATUS */ + if (!wlfc_enabled) { + PKTSETNEXT(osh, pkt, NULL); + dhd_txcomplete(bus->dhd, pkt, ret != 0); + if (free_pkt) + PKTFREE(osh, pkt, TRUE); + } + } + + for (i = 0; i < new_pkt_num; i++) + PKTFREE(osh, new_pkts[i], TRUE); + + return ret; +} + +static uint +dhdsdio_sendfromq(dhd_bus_t *bus, uint maxframes) +{ + uint cnt = 0; + uint8 tx_prec_map; + uint16 txpktqlen = 0; + uint32 intstatus = 0; + uint retries = 0; + osl_t *osh; + uint datalen = 0; + dhd_pub_t *dhd = bus->dhd; + sdpcmd_regs_t *regs = bus->regs; +#ifdef DHD_LOSSLESS_ROAMING + uint8 *pktdata; + struct ether_header *eh; +#endif /* DHD_LOSSLESS_ROAMING */ + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (!KSO_ENAB(bus)) { + DHD_ERROR(("%s: Device asleep\n", __FUNCTION__)); + return BCME_NODEVICE; + } + + osh = dhd->osh; + tx_prec_map = ~bus->flowcontrol; +#ifdef DHD_LOSSLESS_ROAMING + tx_prec_map &= dhd->dequeue_prec_map; +#endif /* DHD_LOSSLESS_ROAMING */ + for (cnt = 0; (cnt < maxframes) && DATAOK(bus);) { + int i; + int num_pkt = 1; + void *pkts[MAX_TX_PKTCHAIN_CNT]; + int prec_out; + + dhd_os_sdlock_txq(bus->dhd); + if (bus->txglom_enable) { + uint32 glomlimit = (uint32)bus->txglomsize; +#if defined(BCMSDIOH_STD) + if (bus->blocksize == 64) { + glomlimit = MIN((uint32)bus->txglomsize, BLK_64_MAXTXGLOM); + } +#endif /* BCMSDIOH_STD */ + num_pkt = MIN((uint32)DATABUFCNT(bus), glomlimit); + num_pkt = MIN(num_pkt, ARRAYSIZE(pkts)); + } + num_pkt = MIN(num_pkt, pktq_mlen(&bus->txq, tx_prec_map)); + for (i = 0; i < num_pkt; i++) { + pkts[i] = pktq_mdeq(&bus->txq, tx_prec_map, &prec_out); + if (!pkts[i]) { + DHD_ERROR(("%s: pktq_mlen non-zero when no pkt\n", + __FUNCTION__)); + ASSERT(0); + break; + } +#ifdef DHD_LOSSLESS_ROAMING + pktdata = (uint8 *)PKTDATA(osh, pkts[i]); +#ifdef BDC + /* Skip BDC header */ + pktdata += BDC_HEADER_LEN + ((struct bdc_header *)pktdata)->dataOffset; +#endif // endif + eh = (struct ether_header *)pktdata; + if (eh->ether_type == hton16(ETHER_TYPE_802_1X)) { + uint8 prio = (uint8)PKTPRIO(pkts[i]); + + /* Restore to original priority for 802.1X packet */ + if (prio == PRIO_8021D_NC) { + PKTSETPRIO(pkts[i], dhd->prio_8021x); + } + } +#endif /* DHD_LOSSLESS_ROAMING */ + if (!bus->dhd->conf->orphan_move) + PKTORPHAN(pkts[i], bus->dhd->conf->tsq); + datalen += PKTLEN(osh, pkts[i]); + } + dhd_os_sdunlock_txq(bus->dhd); + + if (i == 0) + break; + if (dhdsdio_txpkt(bus, SDPCM_DATA_CHANNEL, pkts, i, TRUE) != BCME_OK) + dhd->tx_errors++; + else { + dhd->dstats.tx_bytes += datalen; + bus->txglomframes++; + bus->txglompkts += num_pkt; + } + cnt += i; +#ifdef PKT_STATICS + if (num_pkt) { + tx_statics.glom_cnt[num_pkt-1]++; + if (num_pkt > tx_statics.glom_max) + tx_statics.glom_max = num_pkt; + } +#endif + + /* In poll mode, need to check for other events */ + if (!bus->intr && cnt) + { + /* Check device status, signal pending interrupt */ + R_SDREG(intstatus, ®s->intstatus, retries); + bus->f2txdata++; + if (bcmsdh_regfail(bus->sdh)) + break; + if (intstatus & bus->hostintmask) + bus->ipend = TRUE; + } + + } + + if (dhd_doflow) { + dhd_os_sdlock_txq(bus->dhd); + txpktqlen = pktq_n_pkts_tot(&bus->txq); + dhd_os_sdunlock_txq(bus->dhd); + } + + /* Do flow-control if needed */ + if (dhd->up && (dhd->busstate == DHD_BUS_DATA) && (txpktqlen < FCLOW)) { + bool wlfc_enabled = FALSE; +#ifdef PROP_TXSTATUS + wlfc_enabled = (dhd_wlfc_flowcontrol(dhd, OFF, TRUE) != WLFC_UNSUPPORTED); +#endif // endif + if (!wlfc_enabled && dhd_doflow && dhd->txoff) { + dhd_txflowcontrol(dhd, ALL_INTERFACES, OFF); + } + } + + return cnt; +} + +static void +dhdsdio_sendpendctl(dhd_bus_t *bus) +{ + bcmsdh_info_t *sdh = bus->sdh; + int ret; + uint8* frame_seq = bus->ctrl_frame_buf + SDPCM_FRAMETAG_LEN; + + if (bus->txglom_enable) + frame_seq += SDPCM_HWEXT_LEN; + + if (*frame_seq != bus->tx_seq) { + DHD_INFO(("%s IOCTL frame seq lag detected!" + " frm_seq:%d != bus->tx_seq:%d, corrected\n", + __FUNCTION__, *frame_seq, bus->tx_seq)); + *frame_seq = bus->tx_seq; + } + + ret = dhd_bcmsdh_send_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC, + (uint8 *)bus->ctrl_frame_buf, (uint32)bus->ctrl_frame_len, + NULL, NULL, NULL, 1); + if (ret == BCME_OK) + bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP; + + bus->ctrl_frame_stat = FALSE; + dhd_wait_event_wakeup(bus->dhd); +} + +int +dhd_bus_txctl(struct dhd_bus *bus, uchar *msg, uint msglen) +{ + static int err_nodevice = 0; + uint8 *frame; + uint16 len; + uint32 swheader; + uint8 doff = 0; + int ret = -1; + uint8 sdpcm_hdrlen = bus->txglom_enable ? SDPCM_HDRLEN_TXGLOM : SDPCM_HDRLEN; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (bus->dhd->dongle_reset) + return -EIO; + + /* Back the pointer to make a room for bus header */ + frame = msg - sdpcm_hdrlen; + len = (msglen += sdpcm_hdrlen); + + /* Add alignment padding (optional for ctl frames) */ + if (dhd_alignctl) { + if ((doff = ((uintptr)frame % DHD_SDALIGN))) { + frame -= doff; + len += doff; + msglen += doff; + bzero(frame, doff + sdpcm_hdrlen); + } + ASSERT(doff < DHD_SDALIGN); + } + doff += sdpcm_hdrlen; + +#ifndef BCMSPI + /* Round send length to next SDIO block */ + if (bus->roundup && bus->blocksize && (len > bus->blocksize)) { + uint16 pad = bus->blocksize - (len % bus->blocksize); + if ((pad <= bus->roundup) && (pad < bus->blocksize)) + len += pad; + } else if (len % DHD_SDALIGN) { + len += DHD_SDALIGN - (len % DHD_SDALIGN); + } +#endif /* BCMSPI */ + + /* Satisfy length-alignment requirements */ + if (forcealign && (len & (ALIGNMENT - 1))) + len = ROUNDUP(len, ALIGNMENT); + + ASSERT(ISALIGNED((uintptr)frame, 2)); + + /* Need to lock here to protect txseq and SDIO tx calls */ + dhd_os_sdlock(bus->dhd); + if (bus->dhd->conf->txctl_tmo_fix > 0 && !TXCTLOK(bus)) { + bus->ctrl_wait = TRUE; + dhd_os_sdunlock(bus->dhd); + wait_event_interruptible_timeout(bus->ctrl_tx_wait, TXCTLOK(bus), + msecs_to_jiffies(bus->dhd->conf->txctl_tmo_fix)); + dhd_os_sdlock(bus->dhd); + bus->ctrl_wait = FALSE; + } + + BUS_WAKE(bus); + + /* Make sure backplane clock is on */ + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + + /* Hardware tag: 2 byte len followed by 2 byte ~len check (all LE) */ + *(uint16*)frame = htol16((uint16)msglen); + *(((uint16*)frame) + 1) = htol16(~msglen); + + if (bus->txglom_enable) { + uint32 hwheader1, hwheader2; + /* Software tag: channel, sequence number, data offset */ + swheader = ((SDPCM_CONTROL_CHANNEL << SDPCM_CHANNEL_SHIFT) & SDPCM_CHANNEL_MASK) + | bus->tx_seq + | ((doff << SDPCM_DOFFSET_SHIFT) & SDPCM_DOFFSET_MASK); + htol32_ua_store(swheader, frame + SDPCM_FRAMETAG_LEN + SDPCM_HWEXT_LEN); + htol32_ua_store(0, frame + SDPCM_FRAMETAG_LEN + + SDPCM_HWEXT_LEN + sizeof(swheader)); + + hwheader1 = (msglen - SDPCM_FRAMETAG_LEN) | (1 << 24); + hwheader2 = (len - (msglen)) << 16; + htol32_ua_store(hwheader1, frame + SDPCM_FRAMETAG_LEN); + htol32_ua_store(hwheader2, frame + SDPCM_FRAMETAG_LEN + 4); + + *(uint16*)frame = htol16(len); + *(((uint16*)frame) + 1) = htol16(~(len)); + } else { + /* Software tag: channel, sequence number, data offset */ + swheader = ((SDPCM_CONTROL_CHANNEL << SDPCM_CHANNEL_SHIFT) & SDPCM_CHANNEL_MASK) + | bus->tx_seq | ((doff << SDPCM_DOFFSET_SHIFT) & SDPCM_DOFFSET_MASK); + htol32_ua_store(swheader, frame + SDPCM_FRAMETAG_LEN); + htol32_ua_store(0, frame + SDPCM_FRAMETAG_LEN + sizeof(swheader)); + } + +#ifdef DHD_ULP + dhd_ulp_set_path(bus->dhd, DHD_ULP_TX_CTRL); + + if (!TXCTLOK(bus) || !dhd_ulp_f2_ready(bus->dhd, bus->sdh)) +#else + if (!TXCTLOK(bus)) +#endif // endif + { + DHD_INFO(("%s: No bus credit bus->tx_max %d, bus->tx_seq %d\n", + __FUNCTION__, bus->tx_max, bus->tx_seq)); + bus->ctrl_frame_stat = TRUE; + /* Send from dpc */ + bus->ctrl_frame_buf = frame; + bus->ctrl_frame_len = len; + + if (!bus->dpc_sched) { + bus->dpc_sched = TRUE; + dhd_sched_dpc(bus->dhd); + } + if (bus->ctrl_frame_stat) { + dhd_wait_for_event(bus->dhd, &bus->ctrl_frame_stat); + } + + if (bus->ctrl_frame_stat == FALSE) { + DHD_INFO(("%s: ctrl_frame_stat == FALSE\n", __FUNCTION__)); + ret = 0; + } else { + bus->dhd->txcnt_timeout++; + if (!bus->dhd->hang_was_sent) { + DHD_ERROR(("%s: ctrl_frame_stat == TRUE txcnt_timeout=%d\n", + __FUNCTION__, bus->dhd->txcnt_timeout)); + } +#ifdef DHD_FW_COREDUMP + /* Collect socram dump */ + if ((bus->dhd->memdump_enabled) && + (bus->dhd->txcnt_timeout >= MAX_CNTL_TX_TIMEOUT)) { + /* collect core dump */ + bus->dhd->memdump_type = DUMP_TYPE_RESUMED_ON_TIMEOUT_TX; + dhd_os_sdunlock(bus->dhd); + dhd_bus_mem_dump(bus->dhd); + dhd_os_sdlock(bus->dhd); + } +#endif /* DHD_FW_COREDUMP */ + ret = -1; + bus->ctrl_frame_stat = FALSE; + goto done; + } + } + + bus->dhd->txcnt_timeout = 0; + bus->ctrl_frame_stat = TRUE; + + if (ret == -1) { +#ifdef DHD_DEBUG + if (DHD_BYTES_ON() && DHD_CTL_ON()) { + prhex("Tx Frame", frame, len); + } else if (DHD_HDRS_ON()) { + prhex("TxHdr", frame, MIN(len, 16)); + } +#endif // endif +#ifdef PKT_STATICS + tx_statics.ctrl_count++; + tx_statics.ctrl_size += len; +#endif + ret = dhd_bcmsdh_send_buffer(bus, frame, len); + } + bus->ctrl_frame_stat = FALSE; +#ifdef DHD_ULP + dhd_ulp_enable_cached_sbwad(bus->dhd, bus->sdh); +#endif /* DHD_ULP */ + +done: + if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched && + NO_OTHER_ACTIVE_BUS_USER(bus)) { + bus->activity = FALSE; + dhdsdio_bussleep(bus, TRUE); + dhdsdio_clkctl(bus, CLK_NONE, FALSE); + } + + dhd_os_sdunlock(bus->dhd); + + if (ret) + bus->dhd->tx_ctlerrs++; + else + bus->dhd->tx_ctlpkts++; + + if (bus->dhd->txcnt_timeout >= MAX_CNTL_TX_TIMEOUT) { +#ifdef DHD_PM_CONTROL_FROM_FILE + if (g_pm_control == TRUE) { + return -BCME_ERROR; + } else { + return -ETIMEDOUT; + } +#else + return -ETIMEDOUT; +#endif /* DHD_PM_CONTROL_FROM_FILE */ + } + if (ret == BCME_NODEVICE) + err_nodevice++; + else + err_nodevice = 0; + + return ret ? err_nodevice >= ERROR_BCME_NODEVICE_MAX ? -ETIMEDOUT : -EIO : 0; +} + +int +dhd_bus_rxctl(struct dhd_bus *bus, uchar *msg, uint msglen) +{ + int timeleft; + uint rxlen = 0; + static uint cnt = 0; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (bus->dhd->dongle_reset) + return -EIO; + + /* Wait until control frame is available */ + timeleft = dhd_os_ioctl_resp_wait(bus->dhd, &bus->rxlen, false); + + dhd_os_sdlock(bus->dhd); + rxlen = bus->rxlen; + bcopy(bus->rxctl, msg, MIN(msglen, rxlen)); + bus->rxlen = 0; + dhd_os_sdunlock(bus->dhd); + + if (bus->dhd->conf->ctrl_resched > 0 && !rxlen && timeleft == 0) { + cnt++; + if (cnt <= bus->dhd->conf->ctrl_resched) { + uint32 status, retry = 0; + R_SDREG(status, &bus->regs->intstatus, retry); + if ((status & I_HMB_HOST_INT) || PKT_AVAILABLE(bus, status)) { + DHD_ERROR(("%s: reschedule dhd_dpc, cnt=%d, status=0x%x\n", + __FUNCTION__, cnt, status)); + bus->ipend = TRUE; + bus->dpc_sched = TRUE; + dhd_sched_dpc(bus->dhd); + + /* Wait until control frame is available */ + timeleft = dhd_os_ioctl_resp_wait(bus->dhd, &bus->rxlen, true); + + dhd_os_sdlock(bus->dhd); + rxlen = bus->rxlen; + bcopy(bus->rxctl, msg, MIN(msglen, rxlen)); + bus->rxlen = 0; + dhd_os_sdunlock(bus->dhd); + } + } + } else { + cnt = 0; + } + + if (rxlen) { + DHD_CTL(("%s: resumed on rxctl frame, got %d expected %d\n", + __FUNCTION__, rxlen, msglen)); + } else { + if (timeleft == 0) { +#ifdef DHD_DEBUG + uint32 status, retry = 0; + R_SDREG(status, &bus->regs->intstatus, retry); + DHD_ERROR(("%s: resumed on timeout, INT status=0x%08X\n", + __FUNCTION__, status)); +#else + DHD_ERROR(("%s: resumed on timeout\n", __FUNCTION__)); +#endif /* DHD_DEBUG */ + if (!bus->dhd->dongle_trap_occured) { +#ifdef DHD_FW_COREDUMP + bus->dhd->memdump_type = DUMP_TYPE_RESUMED_ON_TIMEOUT; +#endif /* DHD_FW_COREDUMP */ + dhd_os_sdlock(bus->dhd); + dhdsdio_checkdied(bus, NULL, 0); + dhd_os_sdunlock(bus->dhd); + } + } else { + DHD_CTL(("%s: resumed for unknown reason?\n", __FUNCTION__)); + if (!bus->dhd->dongle_trap_occured) { +#ifdef DHD_FW_COREDUMP + bus->dhd->memdump_type = DUMP_TYPE_RESUMED_UNKNOWN; +#endif /* DHD_FW_COREDUMP */ + dhd_os_sdlock(bus->dhd); + dhdsdio_checkdied(bus, NULL, 0); + dhd_os_sdunlock(bus->dhd); + } + } +#ifdef DHD_FW_COREDUMP + /* Dump the ram image */ + if (bus->dhd->memdump_enabled && !bus->dhd->dongle_trap_occured) + dhdsdio_mem_dump(bus); +#endif /* DHD_FW_COREDUMP */ + } + if (timeleft == 0) { + if (rxlen == 0) + bus->dhd->rxcnt_timeout++; + DHD_ERROR(("%s: rxcnt_timeout=%d, rxlen=%d\n", __FUNCTION__, + bus->dhd->rxcnt_timeout, rxlen)); +#ifdef DHD_FW_COREDUMP + /* collect socram dump */ + if (bus->dhd->memdump_enabled) { + bus->dhd->memdump_type = DUMP_TYPE_RESUMED_ON_TIMEOUT_RX; + dhd_bus_mem_dump(bus->dhd); + } +#endif /* DHD_FW_COREDUMP */ + } else { + bus->dhd->rxcnt_timeout = 0; + } + + if (rxlen) + bus->dhd->rx_ctlpkts++; + else + bus->dhd->rx_ctlerrs++; + + if (bus->dhd->rxcnt_timeout >= MAX_CNTL_RX_TIMEOUT) { +#ifdef DHD_PM_CONTROL_FROM_FILE + if (g_pm_control == TRUE) { + return -BCME_ERROR; + } else { + return -ETIMEDOUT; + } +#else + return -ETIMEDOUT; +#endif /* DHD_PM_CONTROL_FROM_FILE */ + } + if (bus->dhd->dongle_trap_occured) + return -EREMOTEIO; + + return rxlen ? (int)rxlen : -EIO; +} + +/* IOVar table */ +enum { + IOV_INTR = 1, + IOV_POLLRATE, + IOV_SDREG, + IOV_SBREG, + IOV_SDCIS, + IOV_RAMSIZE, + IOV_RAMSTART, +#ifdef DHD_DEBUG + IOV_CHECKDIED, + IOV_SERIALCONS, +#endif /* DHD_DEBUG */ + IOV_SET_DOWNLOAD_STATE, + IOV_SOCRAM_STATE, + IOV_FORCEEVEN, + IOV_SDIOD_DRIVE, + IOV_READAHEAD, + IOV_SDRXCHAIN, + IOV_ALIGNCTL, + IOV_SDALIGN, + IOV_DEVRESET, + IOV_CPU, +#if defined(USE_SDIOFIFO_IOVAR) + IOV_WATERMARK, + IOV_MESBUSYCTRL, +#endif /* USE_SDIOFIFO_IOVAR */ +#ifdef SDTEST + IOV_PKTGEN, + IOV_EXTLOOP, +#endif /* SDTEST */ + IOV_SPROM, + IOV_TXBOUND, + IOV_RXBOUND, + IOV_TXMINMAX, + IOV_IDLETIME, + IOV_IDLECLOCK, + IOV_SD1IDLE, + IOV_SLEEP, + IOV_DONGLEISOLATION, + IOV_KSO, + IOV_DEVSLEEP, + IOV_DEVCAP, + IOV_VARS, +#ifdef SOFTAP + IOV_FWPATH, +#endif // endif + IOV_TXGLOMSIZE, + IOV_TXGLOMMODE, + IOV_HANGREPORT, + IOV_TXINRX_THRES, + IOV_SDIO_SUSPEND +#if defined(DEBUGGER) || defined(DHD_DSCOPE) + IOV_GDB_SERVER, /**< starts gdb server on given interface */ +#endif /* DEBUGGER || DHD_DSCOPE */ +}; + +const bcm_iovar_t dhdsdio_iovars[] = { + {"intr", IOV_INTR, 0, 0, IOVT_BOOL, 0 }, + {"sleep", IOV_SLEEP, 0, 0, IOVT_BOOL, 0 }, + {"pollrate", IOV_POLLRATE, 0, 0, IOVT_UINT32, 0 }, + {"idletime", IOV_IDLETIME, 0, 0, IOVT_INT32, 0 }, + {"idleclock", IOV_IDLECLOCK, 0, 0, IOVT_INT32, 0 }, + {"sd1idle", IOV_SD1IDLE, 0, 0, IOVT_BOOL, 0 }, + {"ramsize", IOV_RAMSIZE, 0, 0, IOVT_UINT32, 0 }, + {"ramstart", IOV_RAMSTART, 0, 0, IOVT_UINT32, 0 }, + {"dwnldstate", IOV_SET_DOWNLOAD_STATE, 0, 0, IOVT_BOOL, 0 }, + {"socram_state", IOV_SOCRAM_STATE, 0, 0, IOVT_BOOL, 0 }, + {"vars", IOV_VARS, 0, 0, IOVT_BUFFER, 0 }, + {"sdiod_drive", IOV_SDIOD_DRIVE, 0, 0, IOVT_UINT32, 0 }, + {"readahead", IOV_READAHEAD, 0, 0, IOVT_BOOL, 0 }, + {"sdrxchain", IOV_SDRXCHAIN, 0, 0, IOVT_BOOL, 0 }, + {"alignctl", IOV_ALIGNCTL, 0, 0, IOVT_BOOL, 0 }, + {"sdalign", IOV_SDALIGN, 0, 0, IOVT_BOOL, 0 }, + {"devreset", IOV_DEVRESET, 0, 0, IOVT_BOOL, 0 }, +#ifdef DHD_DEBUG + {"sdreg", IOV_SDREG, 0, 0, IOVT_BUFFER, sizeof(sdreg_t) }, + {"sbreg", IOV_SBREG, 0, 0, IOVT_BUFFER, sizeof(sdreg_t) }, + {"sd_cis", IOV_SDCIS, 0, 0, IOVT_BUFFER, DHD_IOCTL_MAXLEN }, + {"forcealign", IOV_FORCEEVEN, 0, 0, IOVT_BOOL, 0 }, + {"txbound", IOV_TXBOUND, 0, 0, IOVT_UINT32, 0 }, + {"rxbound", IOV_RXBOUND, 0, 0, IOVT_UINT32, 0 }, + {"txminmax", IOV_TXMINMAX, 0, 0, IOVT_UINT32, 0 }, + {"cpu", IOV_CPU, 0, 0, IOVT_BOOL, 0 }, +#ifdef DHD_DEBUG + {"checkdied", IOV_CHECKDIED, 0, 0, IOVT_BUFFER, 0 }, + {"serial", IOV_SERIALCONS, 0, 0, IOVT_UINT32, 0 }, +#endif /* DHD_DEBUG */ +#endif /* DHD_DEBUG */ +#ifdef SDTEST + {"extloop", IOV_EXTLOOP, 0, 0, IOVT_BOOL, 0 }, + {"pktgen", IOV_PKTGEN, 0, 0, IOVT_BUFFER, sizeof(dhd_pktgen_t) }, +#endif /* SDTEST */ +#if defined(USE_SDIOFIFO_IOVAR) + {"watermark", IOV_WATERMARK, 0, 0, IOVT_UINT32, 0 }, + {"mesbusyctrl", IOV_MESBUSYCTRL, 0, 0, IOVT_UINT32, 0 }, +#endif /* USE_SDIOFIFO_IOVAR */ + {"devcap", IOV_DEVCAP, 0, 0, IOVT_UINT32, 0 }, + {"dngl_isolation", IOV_DONGLEISOLATION, 0, 0, IOVT_UINT32, 0 }, + {"kso", IOV_KSO, 0, 0, IOVT_UINT32, 0 }, + {"devsleep", IOV_DEVSLEEP, 0, 0, IOVT_UINT32, 0 }, +#ifdef SOFTAP + {"fwpath", IOV_FWPATH, 0, 0, IOVT_BUFFER, 0 }, +#endif // endif + {"txglomsize", IOV_TXGLOMSIZE, 0, 0, IOVT_UINT32, 0 }, + {"fw_hang_report", IOV_HANGREPORT, 0, 0, IOVT_BOOL, 0 }, + {"txinrx_thres", IOV_TXINRX_THRES, 0, 0, IOVT_INT32, 0 }, + {"sdio_suspend", IOV_SDIO_SUSPEND, 0, 0, IOVT_UINT32, 0 }, +#if defined(DEBUGGER) || defined(DHD_DSCOPE) + {"gdb_server", IOV_GDB_SERVER, 0, 0, IOVT_UINT32, 0 }, +#endif /* DEBUGGER || DHD_DSCOPE */ + {NULL, 0, 0, 0, 0, 0 } +}; + +static void +dhd_dump_pct(struct bcmstrbuf *strbuf, char *desc, uint num, uint div) +{ + uint q1, q2; + + if (!div) { + bcm_bprintf(strbuf, "%s N/A", desc); + } else { + q1 = num / div; + q2 = (100 * (num - (q1 * div))) / div; + bcm_bprintf(strbuf, "%s %d.%02d", desc, q1, q2); + } +} + +void +dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf) +{ + dhd_bus_t *bus = dhdp->bus; +#if defined(DHD_WAKE_STATUS) && defined(DHD_WAKE_EVENT_STATUS) + int i; +#endif // endif + + bcm_bprintf(strbuf, "Bus SDIO structure:\n"); + bcm_bprintf(strbuf, "hostintmask 0x%08x intstatus 0x%08x sdpcm_ver %d\n", + bus->hostintmask, bus->intstatus, bus->sdpcm_ver); + bcm_bprintf(strbuf, "fcstate %d qlen %u tx_seq %d, max %d, rxskip %d rxlen %u rx_seq %d\n", + bus->fcstate, pktq_n_pkts_tot(&bus->txq), bus->tx_seq, bus->tx_max, bus->rxskip, + bus->rxlen, bus->rx_seq); + bcm_bprintf(strbuf, "intr %d intrcount %u lastintrs %u spurious %u\n", + bus->intr, bus->intrcount, bus->lastintrs, bus->spurious); + +#ifdef DHD_WAKE_STATUS + bcm_bprintf(strbuf, "wake %u rxwake %u readctrlwake %u\n", + bcmsdh_get_total_wake(bus->sdh), bus->wake_counts.rxwake, + bus->wake_counts.rcwake); +#ifdef DHD_WAKE_RX_STATUS + bcm_bprintf(strbuf, " unicast %u multicast %u broadcast %u arp %u\n", + bus->wake_counts.rx_ucast, bus->wake_counts.rx_mcast, + bus->wake_counts.rx_bcast, bus->wake_counts.rx_arp); + bcm_bprintf(strbuf, " multi4 %u multi6 %u icmp6 %u multiother %u\n", + bus->wake_counts.rx_multi_ipv4, bus->wake_counts.rx_multi_ipv6, + bus->wake_counts.rx_icmpv6, bus->wake_counts.rx_multi_other); + bcm_bprintf(strbuf, " icmp6_ra %u, icmp6_na %u, icmp6_ns %u\n", + bus->wake_counts.rx_icmpv6_ra, bus->wake_counts.rx_icmpv6_na, + bus->wake_counts.rx_icmpv6_ns); +#endif /* DHD_WAKE_RX_STATUS */ +#ifdef DHD_WAKE_EVENT_STATUS + for (i = 0; i < WLC_E_LAST; i++) + if (bus->wake_counts.rc_event[i] != 0) + bcm_bprintf(strbuf, " %s = %u\n", bcmevent_get_name(i), + bus->wake_counts.rc_event[i]); + bcm_bprintf(strbuf, "\n"); +#endif /* DHD_WAKE_EVENT_STATUS */ +#endif /* DHD_WAKE_STATUS */ + + bcm_bprintf(strbuf, "pollrate %u pollcnt %u regfails %u\n", + bus->pollrate, bus->pollcnt, bus->regfails); + + bcm_bprintf(strbuf, "\nAdditional counters:\n"); +#ifdef DHDENABLE_TAILPAD + bcm_bprintf(strbuf, "tx_tailpad_chain %u tx_tailpad_pktget %u\n", + bus->tx_tailpad_chain, bus->tx_tailpad_pktget); +#endif /* DHDENABLE_TAILPAD */ + bcm_bprintf(strbuf, "tx_sderrs %u fcqueued %u rxrtx %u rx_toolong %u rxc_errors %u\n", + bus->tx_sderrs, bus->fcqueued, bus->rxrtx, bus->rx_toolong, + bus->rxc_errors); + bcm_bprintf(strbuf, "rx_hdrfail %u badhdr %u badseq %u\n", + bus->rx_hdrfail, bus->rx_badhdr, bus->rx_badseq); + bcm_bprintf(strbuf, "fc_rcvd %u, fc_xoff %u, fc_xon %u\n", + bus->fc_rcvd, bus->fc_xoff, bus->fc_xon); + bcm_bprintf(strbuf, "rxglomfail %u, rxglomframes %u, rxglompkts %u\n", + bus->rxglomfail, bus->rxglomframes, bus->rxglompkts); + bcm_bprintf(strbuf, "f2rx (hdrs/data) %u (%u/%u), f2tx %u f1regs %u\n", + (bus->f2rxhdrs + bus->f2rxdata), bus->f2rxhdrs, bus->f2rxdata, + bus->f2txdata, bus->f1regdata); + { + dhd_dump_pct(strbuf, "\nRx: pkts/f2rd", bus->dhd->rx_packets, + (bus->f2rxhdrs + bus->f2rxdata)); + dhd_dump_pct(strbuf, ", pkts/f1sd", bus->dhd->rx_packets, bus->f1regdata); + dhd_dump_pct(strbuf, ", pkts/sd", bus->dhd->rx_packets, + (bus->f2rxhdrs + bus->f2rxdata + bus->f1regdata)); + dhd_dump_pct(strbuf, ", pkts/int", bus->dhd->rx_packets, bus->intrcount); + bcm_bprintf(strbuf, "\n"); + + dhd_dump_pct(strbuf, "Rx: glom pct", (100 * bus->rxglompkts), + bus->dhd->rx_packets); + dhd_dump_pct(strbuf, ", pkts/glom", bus->rxglompkts, bus->rxglomframes); + bcm_bprintf(strbuf, "\n"); + + dhd_dump_pct(strbuf, "Tx: pkts/f2wr", bus->dhd->tx_packets, bus->f2txdata); + dhd_dump_pct(strbuf, ", pkts/f1sd", bus->dhd->tx_packets, bus->f1regdata); + dhd_dump_pct(strbuf, ", pkts/sd", bus->dhd->tx_packets, + (bus->f2txdata + bus->f1regdata)); + dhd_dump_pct(strbuf, ", pkts/int", bus->dhd->tx_packets, bus->intrcount); + bcm_bprintf(strbuf, "\n"); + + dhd_dump_pct(strbuf, "Total: pkts/f2rw", + (bus->dhd->tx_packets + bus->dhd->rx_packets), + (bus->f2txdata + bus->f2rxhdrs + bus->f2rxdata)); + dhd_dump_pct(strbuf, ", pkts/f1sd", + (bus->dhd->tx_packets + bus->dhd->rx_packets), bus->f1regdata); + dhd_dump_pct(strbuf, ", pkts/sd", + (bus->dhd->tx_packets + bus->dhd->rx_packets), + (bus->f2txdata + bus->f2rxhdrs + bus->f2rxdata + bus->f1regdata)); + dhd_dump_pct(strbuf, ", pkts/int", + (bus->dhd->tx_packets + bus->dhd->rx_packets), bus->intrcount); + bcm_bprintf(strbuf, "\n\n"); + } + +#ifdef SDTEST + if (bus->pktgen_count) { + bcm_bprintf(strbuf, "pktgen config and count:\n"); + bcm_bprintf(strbuf, "freq %u count %u print %u total %u min %u len %u\n", + bus->pktgen_freq, bus->pktgen_count, bus->pktgen_print, + bus->pktgen_total, bus->pktgen_minlen, bus->pktgen_maxlen); + bcm_bprintf(strbuf, "send attempts %u rcvd %u fail %u\n", + bus->pktgen_sent, bus->pktgen_rcvd, bus->pktgen_fail); + } +#endif /* SDTEST */ +#ifdef DHD_DEBUG + bcm_bprintf(strbuf, "dpc_sched %d host interrupt%spending\n", + bus->dpc_sched, (bcmsdh_intr_pending(bus->sdh) ? " " : " not ")); + bcm_bprintf(strbuf, "blocksize %u roundup %u\n", bus->blocksize, bus->roundup); +#endif /* DHD_DEBUG */ + bcm_bprintf(strbuf, "clkstate %d activity %d idletime %d idlecount %d sleeping %d\n", + bus->clkstate, bus->activity, bus->idletime, bus->idlecount, bus->sleeping); + dhd_dump_pct(strbuf, "Tx: glom pct", (100 * bus->txglompkts), bus->dhd->tx_packets); + dhd_dump_pct(strbuf, ", pkts/glom", bus->txglompkts, bus->txglomframes); + bcm_bprintf(strbuf, "\n"); + bcm_bprintf(strbuf, "txglomframes %u, txglompkts %u\n", bus->txglomframes, bus->txglompkts); + bcm_bprintf(strbuf, "\n"); +} + +void +dhd_bus_clearcounts(dhd_pub_t *dhdp) +{ + dhd_bus_t *bus = (dhd_bus_t *)dhdp->bus; + + bus->intrcount = bus->lastintrs = bus->spurious = bus->regfails = 0; + bus->rxrtx = bus->rx_toolong = bus->rxc_errors = 0; + bus->rx_hdrfail = bus->rx_badhdr = bus->rx_badseq = 0; +#ifdef DHDENABLE_TAILPAD + bus->tx_tailpad_chain = bus->tx_tailpad_pktget = 0; +#endif /* DHDENABLE_TAILPAD */ + bus->tx_sderrs = bus->fc_rcvd = bus->fc_xoff = bus->fc_xon = 0; + bus->rxglomfail = bus->rxglomframes = bus->rxglompkts = 0; + bus->f2rxhdrs = bus->f2rxdata = bus->f2txdata = bus->f1regdata = 0; + bus->txglomframes = bus->txglompkts = 0; +} + +#ifdef SDTEST +static int +dhdsdio_pktgen_get(dhd_bus_t *bus, uint8 *arg) +{ + dhd_pktgen_t pktgen; + + pktgen.version = DHD_PKTGEN_VERSION; + pktgen.freq = bus->pktgen_freq; + pktgen.count = bus->pktgen_count; + pktgen.print = bus->pktgen_print; + pktgen.total = bus->pktgen_total; + pktgen.minlen = bus->pktgen_minlen; + pktgen.maxlen = bus->pktgen_maxlen; + pktgen.numsent = bus->pktgen_sent; + pktgen.numrcvd = bus->pktgen_rcvd; + pktgen.numfail = bus->pktgen_fail; + pktgen.mode = bus->pktgen_mode; + pktgen.stop = bus->pktgen_stop; + + bcopy(&pktgen, arg, sizeof(pktgen)); + + return 0; +} + +static int +dhdsdio_pktgen_set(dhd_bus_t *bus, uint8 *arg) +{ + dhd_pktgen_t pktgen; + uint oldcnt, oldmode; + + bcopy(arg, &pktgen, sizeof(pktgen)); + if (pktgen.version != DHD_PKTGEN_VERSION) + return BCME_BADARG; + + oldcnt = bus->pktgen_count; + oldmode = bus->pktgen_mode; + + bus->pktgen_freq = pktgen.freq; + bus->pktgen_count = pktgen.count; + bus->pktgen_print = pktgen.print; + bus->pktgen_total = pktgen.total; + bus->pktgen_minlen = pktgen.minlen; + bus->pktgen_maxlen = pktgen.maxlen; + bus->pktgen_mode = pktgen.mode; + bus->pktgen_stop = pktgen.stop; + + bus->pktgen_tick = bus->pktgen_ptick = 0; + bus->pktgen_prev_time = jiffies; + bus->pktgen_len = MAX(bus->pktgen_len, bus->pktgen_minlen); + bus->pktgen_len = MIN(bus->pktgen_len, bus->pktgen_maxlen); + + /* Clear counts for a new pktgen (mode change, or was stopped) */ + if (bus->pktgen_count && (!oldcnt || oldmode != bus->pktgen_mode)) { + bus->pktgen_sent = bus->pktgen_prev_sent = bus->pktgen_rcvd = 0; + bus->pktgen_prev_rcvd = bus->pktgen_fail = 0; + } + + return 0; +} +#endif /* SDTEST */ + +static void +dhdsdio_devram_remap(dhd_bus_t *bus, bool val) +{ + uint8 enable, protect, remap; + + si_socdevram(bus->sih, FALSE, &enable, &protect, &remap); + remap = val ? TRUE : FALSE; + si_socdevram(bus->sih, TRUE, &enable, &protect, &remap); +} + +static int +dhdsdio_membytes(dhd_bus_t *bus, bool write, uint32 address, uint8 *data, uint size) +{ + int bcmerror = 0; + uint32 sdaddr; + uint dsize; + uint8 *pdata; + + /* In remap mode, adjust address beyond socram and redirect + * to devram at SOCDEVRAM_BP_ADDR since remap address > orig_ramsize + * is not backplane accessible + */ + if (REMAP_ENAB(bus) && REMAP_ISADDR(bus, address)) { + address -= bus->orig_ramsize; + address += SOCDEVRAM_BP_ADDR; + } + + /* Determine initial transfer parameters */ + sdaddr = address & SBSDIO_SB_OFT_ADDR_MASK; + if ((sdaddr + size) & SBSDIO_SBWINDOW_MASK) + dsize = (SBSDIO_SB_OFT_ADDR_LIMIT - sdaddr); + else + dsize = size; + + /* Set the backplane window to include the start address */ + if ((bcmerror = dhdsdio_set_siaddr_window(bus, address))) { + DHD_ERROR(("%s: window change failed\n", __FUNCTION__)); + goto xfer_done; + } + + /* Do the transfer(s) */ + while (size) { + DHD_INFO(("%s: %s %d bytes at offset 0x%08x in window 0x%08x\n", + __FUNCTION__, (write ? "write" : "read"), dsize, sdaddr, + (address & SBSDIO_SBWINDOW_MASK))); + if (dsize <= MAX_MEM_BUF) { + pdata = bus->membuf; + if (write) + memcpy(bus->membuf, data, dsize); + } else { + pdata = data; + } + if ((bcmerror = bcmsdh_rwdata(bus->sdh, write, sdaddr, pdata, dsize))) { + DHD_ERROR(("%s: membytes transfer failed\n", __FUNCTION__)); + break; + } + if (dsize <= MAX_MEM_BUF && !write) + memcpy(data, bus->membuf, dsize); + + /* Adjust for next transfer (if any) */ + if ((size -= dsize)) { + data += dsize; + address += dsize; + if ((bcmerror = dhdsdio_set_siaddr_window(bus, address))) { + DHD_ERROR(("%s: window change failed\n", __FUNCTION__)); + break; + } + sdaddr = 0; + dsize = MIN(SBSDIO_SB_OFT_ADDR_LIMIT, size); + } + + } + +xfer_done: + /* Return the window to backplane enumeration space for core access */ + if (dhdsdio_set_siaddr_window(bus, bcmsdh_cur_sbwad(bus->sdh))) { + DHD_ERROR(("%s: FAILED to set window back to 0x%x\n", __FUNCTION__, + bcmsdh_cur_sbwad(bus->sdh))); + } + + return bcmerror; +} + +static int +dhdsdio_readshared(dhd_bus_t *bus, sdpcm_shared_t *sh) +{ + uint32 addr; + int rv, i; + uint32 shaddr = 0; + + if (bus->sih == NULL) { + if (bus->dhd && bus->dhd->dongle_reset) { + DHD_ERROR(("%s: Dongle is in reset state\n", __FUNCTION__)); + return BCME_NOTREADY; + } else { + ASSERT(bus->dhd); + ASSERT(bus->sih); + DHD_ERROR(("%s: The address of sih is invalid\n", __FUNCTION__)); + return BCME_ERROR; + } + } + if ((CHIPID(bus->sih->chip) == BCM43430_CHIP_ID || + CHIPID(bus->sih->chip) == BCM43018_CHIP_ID) && !dhdsdio_sr_cap(bus)) + bus->srmemsize = 0; + + shaddr = bus->dongle_ram_base + bus->ramsize - 4; + i = 0; + do { + /* Read last word in memory to determine address of sdpcm_shared structure */ + if ((rv = dhdsdio_membytes(bus, FALSE, shaddr, (uint8 *)&addr, 4)) < 0) + return rv; + + addr = ltoh32(addr); + + DHD_INFO(("sdpcm_shared address 0x%08X\n", addr)); + + /* + * Check if addr is valid. + * NVRAM length at the end of memory should have been overwritten. + */ + if (addr == 0 || ((~addr >> 16) & 0xffff) == (addr & 0xffff)) { + if ((bus->srmemsize > 0) && (i++ == 0)) { + shaddr -= bus->srmemsize; + } else { + DHD_ERROR(("%s: address (0x%08x) of sdpcm_shared invalid\n", + __FUNCTION__, addr)); + return BCME_ERROR; + } + } else + break; + } while (i < 2); + + /* Read hndrte_shared structure */ + if ((rv = dhdsdio_membytes(bus, FALSE, addr, (uint8 *)sh, sizeof(sdpcm_shared_t))) < 0) + return rv; + + /* Endianness */ + sh->flags = ltoh32(sh->flags); + sh->trap_addr = ltoh32(sh->trap_addr); + sh->assert_exp_addr = ltoh32(sh->assert_exp_addr); + sh->assert_file_addr = ltoh32(sh->assert_file_addr); + sh->assert_line = ltoh32(sh->assert_line); + sh->console_addr = ltoh32(sh->console_addr); + sh->msgtrace_addr = ltoh32(sh->msgtrace_addr); + + if ((sh->flags & SDPCM_SHARED_VERSION_MASK) == 3 && SDPCM_SHARED_VERSION == 1) + return BCME_OK; + + if ((sh->flags & SDPCM_SHARED_VERSION_MASK) != SDPCM_SHARED_VERSION) { + DHD_ERROR(("%s: sdpcm_shared version %d in dhd " + "is different than sdpcm_shared version %d in dongle\n", + __FUNCTION__, SDPCM_SHARED_VERSION, + sh->flags & SDPCM_SHARED_VERSION_MASK)); + return BCME_ERROR; + } + + return BCME_OK; +} + +#define CONSOLE_LINE_MAX 192 + +#ifdef DHD_DEBUG +static int +dhdsdio_readconsole(dhd_bus_t *bus) +{ + dhd_console_t *c = &bus->console; + uint8 line[CONSOLE_LINE_MAX], ch; + uint32 n, idx, addr; + int rv; + + /* Don't do anything until FWREADY updates console address */ + if (bus->console_addr == 0) + return 0; + + if (!KSO_ENAB(bus)) + return 0; + + /* Read console log struct */ + addr = bus->console_addr + OFFSETOF(hnd_cons_t, log); + if ((rv = dhdsdio_membytes(bus, FALSE, addr, (uint8 *)&c->log, sizeof(c->log))) < 0) + return rv; + + /* Allocate console buffer (one time only) */ + if (c->buf == NULL) { + c->bufsize = ltoh32(c->log.buf_size); + if ((c->buf = MALLOC(bus->dhd->osh, c->bufsize)) == NULL) + return BCME_NOMEM; + } + + idx = ltoh32(c->log.idx); + + /* Protect against corrupt value */ + if (idx > c->bufsize) + return BCME_ERROR; + + /* Skip reading the console buffer if the index pointer has not moved */ + if (idx == c->last) + return BCME_OK; + + /* Read the console buffer */ + addr = ltoh32(c->log.buf); + if ((rv = dhdsdio_membytes(bus, FALSE, addr, c->buf, c->bufsize)) < 0) + return rv; + + while (c->last != idx) { + for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) { + if (c->last == idx) { + /* This would output a partial line. Instead, back up + * the buffer pointer and output this line next time around. + */ + if (c->last >= n) + c->last -= n; + else + c->last = c->bufsize - n; + goto break2; + } + ch = c->buf[c->last]; + c->last = (c->last + 1) % c->bufsize; + if (ch == '\n') + break; + line[n] = ch; + } + + if (n > 0) { + if (line[n - 1] == '\r') + n--; + line[n] = 0; + printf("CONSOLE: %s\n", line); +#ifdef LOG_INTO_TCPDUMP + dhd_sendup_log(bus->dhd, line, n); +#endif /* LOG_INTO_TCPDUMP */ + } + } +break2: + + return BCME_OK; +} +#endif /* DHD_DEBUG */ + +static int +dhdsdio_checkdied(dhd_bus_t *bus, char *data, uint size) +{ + int bcmerror = 0; + uint msize = 512; + char *mbuffer = NULL; + char *console_buffer = NULL; + uint maxstrlen = 256; + char *str = NULL; + sdpcm_shared_t l_sdpcm_shared; + struct bcmstrbuf strbuf; + uint32 console_ptr, console_size, console_index; + uint8 line[CONSOLE_LINE_MAX], ch; + uint32 n, i, addr; + int rv; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (DHD_NOCHECKDIED_ON()) + return 0; + + if (data == NULL) { + /* + * Called after a rx ctrl timeout. "data" is NULL. + * allocate memory to trace the trap or assert. + */ + size = msize; + mbuffer = data = MALLOC(bus->dhd->osh, msize); + if (mbuffer == NULL) { + DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, msize)); + bcmerror = BCME_NOMEM; + goto done; + } + } + + if ((str = MALLOC(bus->dhd->osh, maxstrlen)) == NULL) { + DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, maxstrlen)); + bcmerror = BCME_NOMEM; + goto done; + } + + if ((bcmerror = dhdsdio_readshared(bus, &l_sdpcm_shared)) < 0) + goto done; + + bcm_binit(&strbuf, data, size); + + bcm_bprintf(&strbuf, "msgtrace address : 0x%08X\nconsole address : 0x%08X\n", + l_sdpcm_shared.msgtrace_addr, l_sdpcm_shared.console_addr); + + if ((l_sdpcm_shared.flags & SDPCM_SHARED_ASSERT_BUILT) == 0) { + /* NOTE: Misspelled assert is intentional - DO NOT FIX. + * (Avoids conflict with real asserts for programmatic parsing of output.) + */ + bcm_bprintf(&strbuf, "Assrt not built in dongle\n"); + } + + if ((l_sdpcm_shared.flags & (SDPCM_SHARED_ASSERT|SDPCM_SHARED_TRAP)) == 0) { + /* NOTE: Misspelled assert is intentional - DO NOT FIX. + * (Avoids conflict with real asserts for programmatic parsing of output.) + */ + bcm_bprintf(&strbuf, "No trap%s in dongle", + (l_sdpcm_shared.flags & SDPCM_SHARED_ASSERT_BUILT) + ?"/assrt" :""); + } else { + if (l_sdpcm_shared.flags & SDPCM_SHARED_ASSERT) { + /* Download assert */ + bcm_bprintf(&strbuf, "Dongle assert"); + if (l_sdpcm_shared.assert_exp_addr != 0) { + str[0] = '\0'; + if ((bcmerror = dhdsdio_membytes(bus, FALSE, + l_sdpcm_shared.assert_exp_addr, + (uint8 *)str, maxstrlen)) < 0) + goto done; + + str[maxstrlen - 1] = '\0'; + bcm_bprintf(&strbuf, " expr \"%s\"", str); + } + + if (l_sdpcm_shared.assert_file_addr != 0) { + str[0] = '\0'; + if ((bcmerror = dhdsdio_membytes(bus, FALSE, + l_sdpcm_shared.assert_file_addr, + (uint8 *)str, maxstrlen)) < 0) + goto done; + + str[maxstrlen - 1] = '\0'; + bcm_bprintf(&strbuf, " file \"%s\"", str); + } + + bcm_bprintf(&strbuf, " line %d ", l_sdpcm_shared.assert_line); + } + + if (l_sdpcm_shared.flags & SDPCM_SHARED_TRAP) { + trap_t *tr = &bus->dhd->last_trap_info; + bus->dhd->dongle_trap_occured = TRUE; + if ((bcmerror = dhdsdio_membytes(bus, FALSE, + l_sdpcm_shared.trap_addr, + (uint8*)tr, sizeof(trap_t))) < 0) + goto done; + + bus->dongle_trap_addr = ltoh32(l_sdpcm_shared.trap_addr); + + dhd_bus_dump_trap_info(bus, &strbuf); + + addr = l_sdpcm_shared.console_addr + OFFSETOF(hnd_cons_t, log); + if ((rv = dhdsdio_membytes(bus, FALSE, addr, + (uint8 *)&console_ptr, sizeof(console_ptr))) < 0) + goto printbuf; + + addr = l_sdpcm_shared.console_addr + OFFSETOF(hnd_cons_t, log.buf_size); + if ((rv = dhdsdio_membytes(bus, FALSE, addr, + (uint8 *)&console_size, sizeof(console_size))) < 0) + goto printbuf; + + addr = l_sdpcm_shared.console_addr + OFFSETOF(hnd_cons_t, log.idx); + if ((rv = dhdsdio_membytes(bus, FALSE, addr, + (uint8 *)&console_index, sizeof(console_index))) < 0) + goto printbuf; + + console_ptr = ltoh32(console_ptr); + console_size = ltoh32(console_size); + console_index = ltoh32(console_index); + + if (console_size > CONSOLE_BUFFER_MAX || + !(console_buffer = MALLOC(bus->dhd->osh, console_size))) + goto printbuf; + + if ((rv = dhdsdio_membytes(bus, FALSE, console_ptr, + (uint8 *)console_buffer, console_size)) < 0) + goto printbuf; + + for (i = 0, n = 0; i < console_size; i += n + 1) { + for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) { + ch = console_buffer[(console_index + i + n) % console_size]; + if (ch == '\n') + break; + line[n] = ch; + } + + if (n > 0) { + if (line[n - 1] == '\r') + n--; + line[n] = 0; + /* Don't use DHD_ERROR macro since we print + * a lot of information quickly. The macro + * will truncate a lot of the printfs + */ + + if (dhd_msg_level & DHD_ERROR_VAL) + printf("CONSOLE: %s\n", line); + } + } + } + } + +printbuf: + if (l_sdpcm_shared.flags & (SDPCM_SHARED_ASSERT | SDPCM_SHARED_TRAP)) { + DHD_ERROR(("%s: %s\n", __FUNCTION__, strbuf.origbuf)); + } + +#if defined(DHD_FW_COREDUMP) + if (bus->dhd->memdump_enabled && (l_sdpcm_shared.flags & SDPCM_SHARED_TRAP)) { + /* Mem dump to a file on device */ + bus->dhd->memdump_type = DUMP_TYPE_DONGLE_TRAP; + dhd_os_sdunlock(bus->dhd); + dhdsdio_mem_dump(bus); + dhd_os_sdlock(bus->dhd); + } +#endif /* #if defined(DHD_FW_COREDUMP) */ + +done: + if (mbuffer) + MFREE(bus->dhd->osh, mbuffer, msize); + if (str) + MFREE(bus->dhd->osh, str, maxstrlen); + if (console_buffer) + MFREE(bus->dhd->osh, console_buffer, console_size); + + return bcmerror; +} + +#if defined(DHD_FW_COREDUMP) +int +dhd_bus_mem_dump(dhd_pub_t *dhdp) +{ + dhd_bus_t *bus = dhdp->bus; + if (dhdp->busstate == DHD_BUS_SUSPEND) { + DHD_ERROR(("%s: Bus is suspend so skip\n", __FUNCTION__)); + return 0; + } + return dhdsdio_mem_dump(bus); +} + +static int +dhdsdio_mem_dump(dhd_bus_t *bus) +{ + int ret = 0; + int size; /* Full mem size */ + uint32 start = bus->dongle_ram_base; /* Start address */ + uint read_size = 0; /* Read size of each iteration */ + uint8 *buf = NULL, *databuf = NULL; + + /* Get full mem size */ + size = bus->ramsize; + buf = dhd_get_fwdump_buf(bus->dhd, size); + if (!buf) { + DHD_ERROR(("%s: Out of memory (%d bytes)\n", __FUNCTION__, size)); + return -1; + } + + dhd_os_sdlock(bus->dhd); + BUS_WAKE(bus); + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + + /* Read mem content */ + DHD_ERROR(("Dump dongle memory\n")); + databuf = buf; + while (size) + { + read_size = MIN(MEMBLOCK, size); + if ((ret = dhdsdio_membytes(bus, FALSE, start, databuf, read_size))) + { + DHD_ERROR(("%s: Error membytes %d\n", __FUNCTION__, ret)); + ret = BCME_ERROR; + break; + } + /* Decrement size and increment start address */ + size -= read_size; + start += read_size; + databuf += read_size; + } + + if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched && + NO_OTHER_ACTIVE_BUS_USER(bus)) { + bus->activity = FALSE; + dhdsdio_clkctl(bus, CLK_NONE, TRUE); + } + + dhd_os_sdunlock(bus->dhd); + + /* schedule a work queue to perform actual memdump. dhd_mem_dump() performs the job */ + if (!ret) { + /* buf, actually soc_ram free handled in dhd_{free,clear} */ + dhd_schedule_memdump(bus->dhd, buf, bus->ramsize); + } + + return ret; +} +#endif /* DHD_FW_COREDUMP */ + +int +dhd_socram_dump(dhd_bus_t * bus) +{ +#if defined(DHD_FW_COREDUMP) + return (dhdsdio_mem_dump(bus)); +#else + return -1; +#endif // endif +} + +int +dhdsdio_downloadvars(dhd_bus_t *bus, void *arg, int len) +{ + int bcmerror = BCME_OK; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (bus->dhd->up && +#ifdef DHD_ULP + (DHD_ULP_DISABLED == dhd_ulp_get_ulp_state(bus->dhd)) && +#endif /* DHD_ULP */ + 1) { + bcmerror = BCME_NOTDOWN; + goto err; + } + if (!len) { + bcmerror = BCME_BUFTOOSHORT; + goto err; + } + + /* Free the old ones and replace with passed variables */ + if (bus->vars) + MFREE(bus->dhd->osh, bus->vars, bus->varsz); + + bus->vars = MALLOC(bus->dhd->osh, len); + bus->varsz = bus->vars ? len : 0; + if (bus->vars == NULL) { + bcmerror = BCME_NOMEM; + goto err; + } + + /* Copy the passed variables, which should include the terminating double-null */ + bcopy(arg, bus->vars, bus->varsz); +err: + return bcmerror; +} + +#ifdef DHD_DEBUG +static int +dhd_serialconsole(dhd_bus_t *bus, bool set, bool enable, int *bcmerror) +{ + int int_val; + uint32 addr, data, uart_enab = 0; + + addr = SI_ENUM_BASE(bus->sih) + OFFSETOF(chipcregs_t, chipcontrol_addr); + data = SI_ENUM_BASE(bus->sih) + OFFSETOF(chipcregs_t, chipcontrol_data); + *bcmerror = 0; + + bcmsdh_reg_write(bus->sdh, addr, 4, 1); + if (bcmsdh_regfail(bus->sdh)) { + *bcmerror = BCME_SDIO_ERROR; + return -1; + } + int_val = bcmsdh_reg_read(bus->sdh, data, 4); + if (bcmsdh_regfail(bus->sdh)) { + *bcmerror = BCME_SDIO_ERROR; + return -1; + } + + if (!set) + return (int_val & uart_enab); + if (enable) + int_val |= uart_enab; + else + int_val &= ~uart_enab; + bcmsdh_reg_write(bus->sdh, data, 4, int_val); + if (bcmsdh_regfail(bus->sdh)) { + *bcmerror = BCME_SDIO_ERROR; + return -1; + } + + return (int_val & uart_enab); +} +#endif // endif + +static int +dhdsdio_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, const char *name, + void *params, int plen, void *arg, int len, int val_size) +{ + int bcmerror = 0; + int32 int_val = 0; + bool bool_val = 0; + + DHD_TRACE(("%s: Enter, action %d name %s params %p plen %d arg %p len %d val_size %d\n", + __FUNCTION__, actionid, name, params, plen, arg, len, val_size)); + + if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, IOV_ISSET(actionid))) != 0) + goto exit; + + if (plen >= (int)sizeof(int_val)) + bcopy(params, &int_val, sizeof(int_val)); + + bool_val = (int_val != 0) ? TRUE : FALSE; + + /* Some ioctls use the bus */ + dhd_os_sdlock(bus->dhd); + + /* Check if dongle is in reset. If so, only allow DEVRESET iovars */ + if (bus->dhd->dongle_reset && !(actionid == IOV_SVAL(IOV_DEVRESET) || + actionid == IOV_GVAL(IOV_DEVRESET))) { + bcmerror = BCME_NOTREADY; + goto exit; + } + + /* + * Special handling for keepSdioOn: New SDIO Wake-up Mechanism + */ + if ((vi->varid == IOV_KSO) && (IOV_ISSET(actionid))) { + dhdsdio_clk_kso_iovar(bus, bool_val); + goto exit; + } else if ((vi->varid == IOV_DEVSLEEP) && (IOV_ISSET(actionid))) { + { + dhdsdio_clk_devsleep_iovar(bus, bool_val); + if (!SLPAUTO_ENAB(bus) && (bool_val == FALSE) && (bus->ipend)) { + DHD_ERROR(("INT pending in devsleep 1, dpc_sched: %d\n", + bus->dpc_sched)); + if (!bus->dpc_sched) { + bus->dpc_sched = TRUE; + dhd_sched_dpc(bus->dhd); + } + } + } + goto exit; + } + + /* Handle sleep stuff before any clock mucking */ + if (vi->varid == IOV_SLEEP) { + if (IOV_ISSET(actionid)) { + bcmerror = dhdsdio_bussleep(bus, bool_val); + } else { + int_val = (int32)bus->sleeping; + bcopy(&int_val, arg, val_size); + } + goto exit; + } + + /* Request clock to allow SDIO accesses */ + if (!bus->dhd->dongle_reset) { + BUS_WAKE(bus); + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + } + + switch (actionid) { + case IOV_GVAL(IOV_INTR): + int_val = (int32)bus->intr; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_INTR): + bus->intr = bool_val; + bus->intdis = FALSE; + if (bus->dhd->up) { + if (bus->intr) { + DHD_INTR(("%s: enable SDIO device interrupts\n", __FUNCTION__)); + // terence 20141207: enbale intdis + bus->intdis = TRUE; + bcmsdh_intr_enable(bus->sdh); + } else { + DHD_INTR(("%s: disable SDIO interrupts\n", __FUNCTION__)); + bcmsdh_intr_disable(bus->sdh); + } + } + break; + + case IOV_GVAL(IOV_POLLRATE): + int_val = (int32)bus->pollrate; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_POLLRATE): + bus->pollrate = (uint)int_val; + bus->poll = (bus->pollrate != 0); + break; + + case IOV_GVAL(IOV_IDLETIME): + int_val = bus->idletime; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_IDLETIME): + if ((int_val < 0) && (int_val != DHD_IDLE_IMMEDIATE)) { + bcmerror = BCME_BADARG; + } else { + bus->idletime = int_val; + } + break; + + case IOV_GVAL(IOV_IDLECLOCK): + int_val = (int32)bus->idleclock; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_IDLECLOCK): + bus->idleclock = int_val; + break; + + case IOV_GVAL(IOV_SD1IDLE): + int_val = (int32)sd1idle; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_SD1IDLE): + sd1idle = bool_val; + break; + +#ifdef DHD_DEBUG + case IOV_GVAL(IOV_CHECKDIED): + bcmerror = dhdsdio_checkdied(bus, arg, len); + break; +#endif /* DHD_DEBUG */ + + case IOV_GVAL(IOV_RAMSIZE): + int_val = (int32)bus->ramsize; + bcopy(&int_val, arg, val_size); + break; + + case IOV_GVAL(IOV_RAMSTART): + int_val = (int32)bus->dongle_ram_base; + bcopy(&int_val, arg, val_size); + break; + + case IOV_GVAL(IOV_SDIOD_DRIVE): + int_val = (int32)dhd_sdiod_drive_strength; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_SDIOD_DRIVE): + dhd_sdiod_drive_strength = int_val; + si_sdiod_drive_strength_init(bus->sih, bus->dhd->osh, dhd_sdiod_drive_strength); + break; + + case IOV_SVAL(IOV_SET_DOWNLOAD_STATE): + bcmerror = dhdsdio_download_state(bus, bool_val); + break; + + case IOV_SVAL(IOV_SOCRAM_STATE): + bcmerror = dhdsdio_download_state(bus, bool_val); + break; + + case IOV_SVAL(IOV_VARS): + bcmerror = dhdsdio_downloadvars(bus, arg, len); + break; + + case IOV_GVAL(IOV_READAHEAD): + int_val = (int32)dhd_readahead; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_READAHEAD): + if (bool_val && !dhd_readahead) + bus->nextlen = 0; + dhd_readahead = bool_val; + break; + + case IOV_GVAL(IOV_SDRXCHAIN): + int_val = (int32)bus->use_rxchain; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_SDRXCHAIN): + if (bool_val && !bus->sd_rxchain) + bcmerror = BCME_UNSUPPORTED; + else + bus->use_rxchain = bool_val; + break; +#ifndef BCMSPI + case IOV_GVAL(IOV_ALIGNCTL): + int_val = (int32)dhd_alignctl; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_ALIGNCTL): + dhd_alignctl = bool_val; + break; +#endif /* BCMSPI */ + + case IOV_GVAL(IOV_SDALIGN): + int_val = DHD_SDALIGN; + bcopy(&int_val, arg, val_size); + break; + +#ifdef DHD_DEBUG + case IOV_GVAL(IOV_VARS): + if (bus->varsz < (uint)len) + bcopy(bus->vars, arg, bus->varsz); + else + bcmerror = BCME_BUFTOOSHORT; + break; +#endif /* DHD_DEBUG */ + +#ifdef DHD_DEBUG + case IOV_GVAL(IOV_SDREG): + { + sdreg_t *sd_ptr; + uintptr addr; + uint size; + + sd_ptr = (sdreg_t *)params; + + addr = ((uintptr)bus->regs + sd_ptr->offset); + size = sd_ptr->func; + int_val = (int32)bcmsdh_reg_read(bus->sdh, addr, size); + if (bcmsdh_regfail(bus->sdh)) + bcmerror = BCME_SDIO_ERROR; + bcopy(&int_val, arg, sizeof(int32)); + break; + } + + case IOV_SVAL(IOV_SDREG): + { + sdreg_t *sd_ptr; + uintptr addr; + uint size; + + sd_ptr = (sdreg_t *)params; + + addr = ((uintptr)bus->regs + sd_ptr->offset); + size = sd_ptr->func; + bcmsdh_reg_write(bus->sdh, addr, size, sd_ptr->value); + if (bcmsdh_regfail(bus->sdh)) + bcmerror = BCME_SDIO_ERROR; + break; + } + + /* Same as above, but offset is not backplane (not SDIO core) */ + case IOV_GVAL(IOV_SBREG): + { + sdreg_t sdreg; + uint32 addr, size; + + bcopy(params, &sdreg, sizeof(sdreg)); + + addr = SI_ENUM_BASE(bus->sih) + sdreg.offset; + size = sdreg.func; + int_val = (int32)bcmsdh_reg_read(bus->sdh, addr, size); + if (bcmsdh_regfail(bus->sdh)) + bcmerror = BCME_SDIO_ERROR; + bcopy(&int_val, arg, sizeof(int32)); + break; + } + + case IOV_SVAL(IOV_SBREG): + { + sdreg_t sdreg; + uint32 addr, size; + + bcopy(params, &sdreg, sizeof(sdreg)); + + addr = SI_ENUM_BASE(bus->sih) + sdreg.offset; + size = sdreg.func; + bcmsdh_reg_write(bus->sdh, addr, size, sdreg.value); + if (bcmsdh_regfail(bus->sdh)) + bcmerror = BCME_SDIO_ERROR; + break; + } + + case IOV_GVAL(IOV_SDCIS): + { + *(char *)arg = 0; + + bcmstrcat(arg, "\nFunc 0\n"); + bcmsdh_cis_read(bus->sdh, 0x10, (uint8 *)arg + strlen(arg), SBSDIO_CIS_SIZE_LIMIT); + bcmstrcat(arg, "\nFunc 1\n"); + bcmsdh_cis_read(bus->sdh, 0x11, (uint8 *)arg + strlen(arg), SBSDIO_CIS_SIZE_LIMIT); + bcmstrcat(arg, "\nFunc 2\n"); + bcmsdh_cis_read(bus->sdh, 0x12, (uint8 *)arg + strlen(arg), SBSDIO_CIS_SIZE_LIMIT); + break; + } + + case IOV_GVAL(IOV_FORCEEVEN): + int_val = (int32)forcealign; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_FORCEEVEN): + forcealign = bool_val; + break; + + case IOV_GVAL(IOV_TXBOUND): + int_val = (int32)dhd_txbound; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_TXBOUND): + dhd_txbound = (uint)int_val; + break; + + case IOV_GVAL(IOV_RXBOUND): + int_val = (int32)dhd_rxbound; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_RXBOUND): + dhd_rxbound = (uint)int_val; + break; + + case IOV_GVAL(IOV_TXMINMAX): + int_val = (int32)dhd_txminmax; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_TXMINMAX): + dhd_txminmax = (uint)int_val; + break; + +#ifdef DHD_DEBUG + case IOV_GVAL(IOV_SERIALCONS): + int_val = dhd_serialconsole(bus, FALSE, 0, &bcmerror); + if (bcmerror != 0) + break; + + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_SERIALCONS): + dhd_serialconsole(bus, TRUE, bool_val, &bcmerror); + break; +#endif /* DHD_DEBUG */ + +#endif /* DHD_DEBUG */ + +#ifdef SDTEST + case IOV_GVAL(IOV_EXTLOOP): + int_val = (int32)bus->ext_loop; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_EXTLOOP): + bus->ext_loop = bool_val; + break; + + case IOV_GVAL(IOV_PKTGEN): + bcmerror = dhdsdio_pktgen_get(bus, arg); + break; + + case IOV_SVAL(IOV_PKTGEN): + bcmerror = dhdsdio_pktgen_set(bus, arg); + break; +#endif /* SDTEST */ + +#if defined(USE_SDIOFIFO_IOVAR) + case IOV_GVAL(IOV_WATERMARK): + int_val = (int32)watermark; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_WATERMARK): + watermark = (uint)int_val; + watermark = (watermark > SBSDIO_WATERMARK_MASK) ? SBSDIO_WATERMARK_MASK : watermark; + DHD_ERROR(("Setting watermark as 0x%x.\n", watermark)); + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_WATERMARK, (uint8)watermark, NULL); + break; + + case IOV_GVAL(IOV_MESBUSYCTRL): + int_val = (int32)mesbusyctrl; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_MESBUSYCTRL): + mesbusyctrl = (uint)int_val; + mesbusyctrl = (mesbusyctrl > SBSDIO_MESBUSYCTRL_MASK) + ? SBSDIO_MESBUSYCTRL_MASK : mesbusyctrl; + DHD_ERROR(("Setting mesbusyctrl as 0x%x.\n", mesbusyctrl)); + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_MESBUSYCTRL, + ((uint8)mesbusyctrl | 0x80), NULL); + break; +#endif // endif + + case IOV_GVAL(IOV_DONGLEISOLATION): + int_val = bus->dhd->dongle_isolation; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_DONGLEISOLATION): + bus->dhd->dongle_isolation = bool_val; + break; + + case IOV_SVAL(IOV_DEVRESET): + DHD_TRACE(("%s: Called set IOV_DEVRESET=%d dongle_reset=%d busstate=%d\n", + __FUNCTION__, bool_val, bus->dhd->dongle_reset, + bus->dhd->busstate)); + + ASSERT(bus->dhd->osh); + /* ASSERT(bus->cl_devid); */ + + /* must release sdlock, since devreset also acquires it */ + dhd_os_sdunlock(bus->dhd); + dhd_bus_devreset(bus->dhd, (uint8)bool_val); + dhd_os_sdlock(bus->dhd); + break; + /* + * softap firmware is updated through module parameter or android private command + */ + + case IOV_GVAL(IOV_DEVRESET): + DHD_TRACE(("%s: Called get IOV_DEVRESET\n", __FUNCTION__)); + + /* Get its status */ + int_val = (bool) bus->dhd->dongle_reset; + bcopy(&int_val, arg, val_size); + + break; + + case IOV_GVAL(IOV_KSO): + int_val = dhdsdio_sleepcsr_get(bus); + bcopy(&int_val, arg, val_size); + break; + + case IOV_GVAL(IOV_DEVCAP): + int_val = dhdsdio_devcap_get(bus); + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_DEVCAP): + dhdsdio_devcap_set(bus, (uint8) int_val); + break; + case IOV_GVAL(IOV_TXGLOMSIZE): + int_val = (int32)bus->txglomsize; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_TXGLOMSIZE): + if (int_val > SDPCM_MAXGLOM_SIZE) { + bcmerror = BCME_ERROR; + } else { + bus->txglomsize = (uint)int_val; + } + break; + case IOV_SVAL(IOV_HANGREPORT): + bus->dhd->hang_report = bool_val; + DHD_ERROR(("%s: Set hang_report as %d\n", __FUNCTION__, bus->dhd->hang_report)); + break; + + case IOV_GVAL(IOV_HANGREPORT): + int_val = (int32)bus->dhd->hang_report; + bcopy(&int_val, arg, val_size); + break; + + case IOV_GVAL(IOV_TXINRX_THRES): + int_val = bus->txinrx_thres; + bcopy(&int_val, arg, val_size); + break; + case IOV_SVAL(IOV_TXINRX_THRES): + if (int_val < 0) { + bcmerror = BCME_BADARG; + } else { + bus->txinrx_thres = int_val; + } + break; + + case IOV_GVAL(IOV_SDIO_SUSPEND): + int_val = (bus->dhd->busstate == DHD_BUS_SUSPEND) ? 1 : 0; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_SDIO_SUSPEND): + if (bool_val) { /* Suspend */ + dhdsdio_suspend(bus); + } + else { /* Resume */ + dhdsdio_resume(bus); + } + break; + +#if defined(DEBUGGER) || defined(DHD_DSCOPE) + case IOV_SVAL(IOV_GDB_SERVER): + if (bool_val == TRUE) { + debugger_init((void *) bus, &bus_ops, int_val, SI_ENUM_BASE(bus->sih)); + } else { + debugger_close(); + } + break; +#endif /* DEBUGGER || DHD_DSCOPE */ + + default: + bcmerror = BCME_UNSUPPORTED; + break; + } + +exit: + if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched && + NO_OTHER_ACTIVE_BUS_USER(bus)) { + bus->activity = FALSE; + dhdsdio_bussleep(bus, TRUE); + dhdsdio_clkctl(bus, CLK_NONE, FALSE); + } + + dhd_os_sdunlock(bus->dhd); + + return bcmerror; +} + +static int +dhdsdio_write_vars(dhd_bus_t *bus) +{ + int bcmerror = 0; + uint32 varsize, phys_size; + uint32 varaddr; + uint8 *vbuffer; + uint32 varsizew; +#ifdef DHD_DEBUG + uint8 *nvram_ularray; +#endif /* DHD_DEBUG */ + + /* Even if there are no vars are to be written, we still need to set the ramsize. */ + varsize = bus->varsz ? ROUNDUP(bus->varsz, 4) : 0; + varaddr = (bus->ramsize - 4) - varsize; + + // terence 20150412: fix for nvram failed to download + if (bus->dhd->conf->chip == BCM43340_CHIP_ID || + bus->dhd->conf->chip == BCM43341_CHIP_ID) { + varsize = varsize ? ROUNDUP(varsize, 64) : 0; + varaddr = (bus->ramsize - 64) - varsize; + } + + varaddr += bus->dongle_ram_base; + + if (bus->vars) { + if ((bus->sih->buscoretype == SDIOD_CORE_ID) && (bus->sdpcmrev == 7)) { + if (((varaddr & 0x3C) == 0x3C) && (varsize > 4)) { + DHD_ERROR(("PR85623WAR in place\n")); + varsize += 4; + varaddr -= 4; + } + } + + vbuffer = (uint8 *)MALLOC(bus->dhd->osh, varsize); + if (!vbuffer) + return BCME_NOMEM; + + bzero(vbuffer, varsize); + bcopy(bus->vars, vbuffer, bus->varsz); + + /* Write the vars list */ + bcmerror = dhdsdio_membytes(bus, TRUE, varaddr, vbuffer, varsize); + if (bcmerror) { + DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n", + __FUNCTION__, bcmerror, varsize, varaddr)); + return bcmerror; + } + +#ifdef DHD_DEBUG + /* Verify NVRAM bytes */ + DHD_INFO(("Compare NVRAM dl & ul; varsize=%d\n", varsize)); + nvram_ularray = (uint8*)MALLOC(bus->dhd->osh, varsize); + if (!nvram_ularray) { + MFREE(bus->dhd->osh, vbuffer, varsize); + return BCME_NOMEM; + } + + /* Upload image to verify downloaded contents. */ + memset(nvram_ularray, 0xaa, varsize); + + /* Read the vars list to temp buffer for comparison */ + bcmerror = dhdsdio_membytes(bus, FALSE, varaddr, nvram_ularray, varsize); + if (bcmerror) { + DHD_ERROR(("%s: error %d on reading %d nvram bytes at 0x%08x\n", + __FUNCTION__, bcmerror, varsize, varaddr)); + } + /* Compare the org NVRAM with the one read from RAM */ + if (memcmp(vbuffer, nvram_ularray, varsize)) { + DHD_ERROR(("%s: Downloaded NVRAM image is corrupted.\n", __FUNCTION__)); + } else + DHD_ERROR(("%s: Download, Upload and compare of NVRAM succeeded.\n", + __FUNCTION__)); + + MFREE(bus->dhd->osh, nvram_ularray, varsize); +#endif /* DHD_DEBUG */ + + MFREE(bus->dhd->osh, vbuffer, varsize); + } + + phys_size = REMAP_ENAB(bus) ? bus->ramsize : bus->orig_ramsize; + + phys_size += bus->dongle_ram_base; + + /* adjust to the user specified RAM */ + DHD_INFO(("Physical memory size: %d, usable memory size: %d\n", + phys_size, bus->ramsize)); + DHD_INFO(("Vars are at %d, orig varsize is %d\n", + varaddr, varsize)); + varsize = ((phys_size - 4) - varaddr); + + /* + * Determine the length token: + * Varsize, converted to words, in lower 16-bits, checksum in upper 16-bits. + */ +#ifdef DHD_DEBUG + if (bcmerror) { + varsizew = 0; + } else +#endif /* DHD_DEBUG */ + { + varsizew = varsize / 4; + varsizew = (~varsizew << 16) | (varsizew & 0x0000FFFF); + varsizew = htol32(varsizew); + } + + DHD_INFO(("New varsize is %d, length token=0x%08x\n", varsize, varsizew)); + + /* Write the length token to the last word */ + bcmerror = dhdsdio_membytes(bus, TRUE, (phys_size - 4), + (uint8*)&varsizew, 4); + + return bcmerror; +} + +static int +dhdsdio_download_state(dhd_bus_t *bus, bool enter) +{ + uint retries; + int bcmerror = 0; + int foundcr4 = 0; + + if (!bus->sih) + return BCME_ERROR; + /* To enter download state, disable ARM and reset SOCRAM. + * To exit download state, simply reset ARM (default is RAM boot). + */ + if (enter) { + bus->alp_only = TRUE; + + if (!(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) && + !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) { + if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) { + foundcr4 = 1; + } else { + DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + } + + if (!foundcr4) { + si_core_disable(bus->sih, 0); + if (bcmsdh_regfail(bus->sdh)) { + bcmerror = BCME_SDIO_ERROR; + goto fail; + } + + if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) { + DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + + si_core_reset(bus->sih, 0, 0); + if (bcmsdh_regfail(bus->sdh)) { + DHD_ERROR(("%s: Failure trying reset SOCRAM core?\n", + __FUNCTION__)); + bcmerror = BCME_SDIO_ERROR; + goto fail; + } + + /* Disable remap for download */ + if (REMAP_ENAB(bus) && si_socdevram_remap_isenb(bus->sih)) + dhdsdio_devram_remap(bus, FALSE); + + if (CHIPID(bus->sih->chip) == BCM43430_CHIP_ID || + CHIPID(bus->sih->chip) == BCM43018_CHIP_ID) { + /* Disabling Remap for SRAM_3 */ + si_socram_set_bankpda(bus->sih, 0x3, 0x0); + } + + /* Clear the top bit of memory */ + if (bus->ramsize) { + uint32 zeros = 0; + if (dhdsdio_membytes(bus, TRUE, bus->ramsize - 4, + (uint8*)&zeros, 4) < 0) { + bcmerror = BCME_SDIO_ERROR; + goto fail; + } + } + } else { + /* For CR4, + * Halt ARM + * Remove ARM reset + * Read RAM base address [0x18_0000] + * [next] Download firmware + * [done at else] Populate the reset vector + * [done at else] Remove ARM halt + */ + /* Halt ARM & remove reset */ + si_core_reset(bus->sih, SICF_CPUHALT, SICF_CPUHALT); + } + } else { + if (!si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) { + if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) { + DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + + if (!si_iscoreup(bus->sih)) { + DHD_ERROR(("%s: SOCRAM core is down after reset?\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + + if ((bcmerror = dhdsdio_write_vars(bus))) { + DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__)); + goto fail; + } + + /* Enable remap before ARM reset but after vars. + * No backplane access in remap mode + */ + if (REMAP_ENAB(bus) && !si_socdevram_remap_isenb(bus->sih)) + dhdsdio_devram_remap(bus, TRUE); +#ifdef BCMSDIOLITE + if (!si_setcore(bus->sih, CC_CORE_ID, 0)) { + DHD_ERROR(("%s: Can't set to Chip Common core?\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } +#else + if (!si_setcore(bus->sih, PCMCIA_CORE_ID, 0) && + !si_setcore(bus->sih, SDIOD_CORE_ID, 0)) { + DHD_ERROR(("%s: Can't change back to SDIO core?\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } +#endif // endif + W_SDREG(0xFFFFFFFF, &bus->regs->intstatus, retries); + + if (!(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) && + !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) { + DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + } else { + /* cr4 has no socram, but tcm's */ + /* write vars */ + if ((bcmerror = dhdsdio_write_vars(bus))) { + DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__)); + goto fail; + } +#ifdef BCMSDIOLITE + if (!si_setcore(bus->sih, CC_CORE_ID, 0)) { + DHD_ERROR(("%s: Can't set to Chip Common core?\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } +#else + if (!si_setcore(bus->sih, PCMCIA_CORE_ID, 0) && + !si_setcore(bus->sih, SDIOD_CORE_ID, 0)) { + DHD_ERROR(("%s: Can't change back to SDIO core?\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } +#endif // endif + W_SDREG(0xFFFFFFFF, &bus->regs->intstatus, retries); + + /* switch back to arm core again */ + if (!(si_setcore(bus->sih, ARMCR4_CORE_ID, 0))) { + DHD_ERROR(("%s: Failed to find ARM CR4 core!\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + /* write address 0 with reset instruction */ + bcmerror = dhdsdio_membytes(bus, TRUE, 0, + (uint8 *)&bus->resetinstr, sizeof(bus->resetinstr)); + + if (bcmerror == BCME_OK) { + uint32 tmp; + + /* verify write */ + bcmerror = dhdsdio_membytes(bus, FALSE, 0, + (uint8 *)&tmp, sizeof(tmp)); + + if (bcmerror == BCME_OK && tmp != bus->resetinstr) { + DHD_ERROR(("%s: Failed to write 0x%08x to addr 0\n", + __FUNCTION__, bus->resetinstr)); + DHD_ERROR(("%s: contents of addr 0 is 0x%08x\n", + __FUNCTION__, tmp)); + bcmerror = BCME_SDIO_ERROR; + goto fail; + } + } + + /* now remove reset and halt and continue to run CR4 */ + } + + si_core_reset(bus->sih, 0, 0); + if (bcmsdh_regfail(bus->sdh)) { + DHD_ERROR(("%s: Failure trying to reset ARM core?\n", __FUNCTION__)); + bcmerror = BCME_SDIO_ERROR; + goto fail; + } + + /* Allow HT Clock now that the ARM is running. */ + bus->alp_only = FALSE; + + bus->dhd->busstate = DHD_BUS_LOAD; + } + +fail: + /* Always return to SDIOD core */ + if (!si_setcore(bus->sih, PCMCIA_CORE_ID, 0)) + si_setcore(bus->sih, SDIOD_CORE_ID, 0); + + return bcmerror; +} + +int +dhd_bus_iovar_op(dhd_pub_t *dhdp, const char *name, + void *params, int plen, void *arg, int len, bool set) +{ + dhd_bus_t *bus = dhdp->bus; + const bcm_iovar_t *vi = NULL; + int bcmerror = 0; + int val_size; + uint32 actionid; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + ASSERT(name); + ASSERT(len >= 0); + + /* Get MUST have return space */ + ASSERT(set || (arg && len)); + + /* Set does NOT take qualifiers */ + ASSERT(!set || (!params && !plen)); + + /* Look up var locally; if not found pass to host driver */ + if ((vi = bcm_iovar_lookup(dhdsdio_iovars, name)) == NULL) { + dhd_os_sdlock(bus->dhd); + + BUS_WAKE(bus); + + /* Turn on clock in case SD command needs backplane */ + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + + bcmerror = bcmsdh_iovar_op(bus->sdh, name, params, plen, arg, len, set); + + /* Check for bus configuration changes of interest */ + + /* If it was divisor change, read the new one */ + if (set && strcmp(name, "sd_divisor") == 0) { + if (bcmsdh_iovar_op(bus->sdh, "sd_divisor", NULL, 0, + &bus->sd_divisor, sizeof(int32), FALSE) != BCME_OK) { + bus->sd_divisor = -1; + DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, name)); + } else { + DHD_INFO(("%s: noted %s update, value now %d\n", + __FUNCTION__, name, bus->sd_divisor)); + } + } + /* If it was a mode change, read the new one */ + if (set && strcmp(name, "sd_mode") == 0) { + if (bcmsdh_iovar_op(bus->sdh, "sd_mode", NULL, 0, + &bus->sd_mode, sizeof(int32), FALSE) != BCME_OK) { + bus->sd_mode = -1; + DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, name)); + } else { + DHD_INFO(("%s: noted %s update, value now %d\n", + __FUNCTION__, name, bus->sd_mode)); + } + } + /* Similar check for blocksize change */ + if (set && strcmp(name, "sd_blocksize") == 0) { + int32 fnum = 2; + if (bcmsdh_iovar_op(bus->sdh, "sd_blocksize", &fnum, sizeof(int32), + &bus->blocksize, sizeof(int32), FALSE) != BCME_OK) { + bus->blocksize = 0; + DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_blocksize")); + } else { + DHD_INFO(("%s: noted %s update, value now %d\n", + __FUNCTION__, "sd_blocksize", bus->blocksize)); + + dhdsdio_tune_fifoparam(bus); + } + } + bus->roundup = MIN(max_roundup, bus->blocksize); + + if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched && + NO_OTHER_ACTIVE_BUS_USER(bus)) { + bus->activity = FALSE; + dhdsdio_bussleep(bus, TRUE); + dhdsdio_clkctl(bus, CLK_NONE, FALSE); + } + + dhd_os_sdunlock(bus->dhd); + goto exit; + } + + DHD_CTL(("%s: %s %s, len %d plen %d\n", __FUNCTION__, + name, (set ? "set" : "get"), len, plen)); + + /* set up 'params' pointer in case this is a set command so that + * the convenience int and bool code can be common to set and get + */ + if (params == NULL) { + params = arg; + plen = len; + } + + if (vi->type == IOVT_VOID) + val_size = 0; + else if (vi->type == IOVT_BUFFER) + val_size = len; + else + /* all other types are integer sized */ + val_size = sizeof(int); + + actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid); + bcmerror = dhdsdio_doiovar(bus, vi, actionid, name, params, plen, arg, len, val_size); + +exit: + return bcmerror; +} + +void +dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex) +{ + osl_t *osh; + uint32 local_hostintmask; + uint8 saveclk; + uint retries; + int err; + bool wlfc_enabled = FALSE; + unsigned long flags; + + if (!bus->dhd) + return; + + osh = bus->dhd->osh; + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + bcmsdh_waitlockfree(bus->sdh); + + if (enforce_mutex) + dhd_os_sdlock(bus->dhd); + + if ((bus->dhd->busstate == DHD_BUS_DOWN) || bus->dhd->hang_was_sent) { + /* if Firmware already hangs disbale any interrupt */ + bus->dhd->busstate = DHD_BUS_DOWN; + bus->hostintmask = 0; + bcmsdh_intr_disable(bus->sdh); + } else { + + BUS_WAKE(bus); + + if (KSO_ENAB(bus)) { + + /* Enable clock for device interrupts */ + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + + /* Disable and clear interrupts at the chip level also */ + W_SDREG(0, &bus->regs->hostintmask, retries); + local_hostintmask = bus->hostintmask; + bus->hostintmask = 0; + + /* Force clocks on backplane to be sure F2 interrupt propagates */ + saveclk = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err); + if (!err) { + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, + (saveclk | SBSDIO_FORCE_HT), &err); + } + if (err) { + DHD_ERROR(("%s: Failed to force clock for F2: err %d\n", + __FUNCTION__, err)); + } + + /* Turn off the bus (F2), free any pending packets */ + DHD_INTR(("%s: disable SDIO interrupts\n", __FUNCTION__)); + bcmsdh_intr_disable(bus->sdh); +#ifndef BCMSPI + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, SDIO_FUNC_ENABLE_1, NULL); +#endif /* !BCMSPI */ + + /* Clear any pending interrupts now that F2 is disabled */ + W_SDREG(local_hostintmask, &bus->regs->intstatus, retries); + } + + /* Turn off the backplane clock (only) */ + dhdsdio_clkctl(bus, CLK_SDONLY, FALSE); + + /* Change our idea of bus state */ + DHD_LINUX_GENERAL_LOCK(bus->dhd, flags); + bus->dhd->busstate = DHD_BUS_DOWN; + DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags); + } + +#ifdef PROP_TXSTATUS + wlfc_enabled = (dhd_wlfc_cleanup_txq(bus->dhd, NULL, 0) != WLFC_UNSUPPORTED); +#endif // endif + if (!wlfc_enabled) { +#ifdef DHDTCPACK_SUPPRESS + /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt, + * when there is a newly coming packet from network stack. + */ + dhd_tcpack_info_tbl_clean(bus->dhd); +#endif /* DHDTCPACK_SUPPRESS */ + dhd_os_sdlock_txq(bus->dhd); + /* Clear the data packet queues */ + pktq_flush(osh, &bus->txq, TRUE); + dhd_os_sdunlock_txq(bus->dhd); + } + + /* Clear any held glomming stuff */ + if (bus->glomd) + PKTFREE(osh, bus->glomd, FALSE); + + if (bus->glom) + PKTFREE(osh, bus->glom, FALSE); + + bus->glom = bus->glomd = NULL; + + /* Clear rx control and wake any waiters */ + bus->rxlen = 0; + dhd_os_ioctl_resp_wake(bus->dhd); + + /* Reset some F2 state stuff */ + bus->rxskip = FALSE; + bus->tx_seq = bus->rx_seq = 0; + + bus->tx_max = 4; + + if (enforce_mutex) + dhd_os_sdunlock(bus->dhd); +} + +#if defined(BCMSDIOH_TXGLOM) && defined(BCMSDIOH_STD) +extern uint sd_txglom; +#endif // endif +void +dhd_txglom_enable(dhd_pub_t *dhdp, bool enable) +{ + /* can't enable host txglom by default, some platforms have no + * (or crappy) ADMA support and txglom will cause kernel assertions (e.g. + * panda board) + */ + dhd_bus_t *bus = dhdp->bus; +#ifdef BCMSDIOH_TXGLOM + uint32 rxglom; + int32 ret; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + +#ifdef BCMSDIOH_STD + if (enable) + enable = sd_txglom; +#endif /* BCMSDIOH_STD */ + + if (enable) { + rxglom = 1; + ret = dhd_iovar(dhdp, 0, "bus:rxglom", (char *)&rxglom, sizeof(rxglom), NULL, 0, + TRUE); + if (ret >= 0) + bus->txglom_enable = TRUE; + else { +#ifdef BCMSDIOH_STD + sd_txglom = 0; +#endif /* BCMSDIOH_STD */ + bus->txglom_enable = FALSE; + } + } else +#endif /* BCMSDIOH_TXGLOM */ + bus->txglom_enable = FALSE; + printf("%s: enable %d\n", __FUNCTION__, bus->txglom_enable); + dhd_conf_set_txglom_params(bus->dhd, bus->txglom_enable); + bcmsdh_set_mode(bus->sdh, bus->dhd->conf->txglom_mode); +} + +int +dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex) +{ + dhd_bus_t *bus = dhdp->bus; + dhd_timeout_t tmo; + uint retries = 0; + uint8 ready, enable; + int err, ret = 0; +#ifdef BCMSPI + uint32 dstatus = 0; /* gSPI device-status bits */ +#else /* BCMSPI */ + uint8 saveclk; +#endif /* BCMSPI */ + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + ASSERT(bus->dhd); + if (!bus->dhd) + return 0; + + if (enforce_mutex) + dhd_os_sdlock(bus->dhd); + + if (bus->sih->chip == BCM43362_CHIP_ID) { + printf("%s: delay 100ms for BCM43362\n", __FUNCTION__); + OSL_DELAY(100000); // terence 20131209: delay for 43362 + } + + /* Make sure backplane clock is on, needed to generate F2 interrupt */ + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + if (bus->clkstate != CLK_AVAIL) { + DHD_ERROR(("%s: clock state is wrong. state = %d\n", __FUNCTION__, bus->clkstate)); + ret = -1; + goto exit; + } + +#ifdef BCMSPI + /* fake "ready" for spi, wake-wlan would have already enabled F1 and F2 */ + ready = (SDIO_FUNC_ENABLE_1 | SDIO_FUNC_ENABLE_2); + enable = 0; + + /* Give the dongle some time to do its thing and set IOR2 */ + dhd_timeout_start(&tmo, WAIT_F2RXFIFORDY * WAIT_F2RXFIFORDY_DELAY * 1000); + while (!enable && !dhd_timeout_expired(&tmo)) { + dstatus = bcmsdh_cfg_read_word(bus->sdh, SDIO_FUNC_0, SPID_STATUS_REG, NULL); + if (dstatus & STATUS_F2_RX_READY) + enable = TRUE; + } + + if (enable) { + DHD_ERROR(("Took %u usec before dongle is ready\n", tmo.elapsed)); + enable = ready; + } else { + DHD_ERROR(("dstatus when timed out on f2-fifo not ready = 0x%x\n", dstatus)); + DHD_ERROR(("Waited %u usec, dongle is not ready\n", tmo.elapsed)); + ret = -1; + goto exit; + } + +#else /* !BCMSPI */ + /* Force clocks on backplane to be sure F2 interrupt propagates */ + saveclk = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err); + + if (!err) { + if (bus->sih->chip == BCM43012_CHIP_ID) { + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, + (saveclk | SBSDIO_HT_AVAIL_REQ), &err); + } else { + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, + (saveclk | SBSDIO_FORCE_HT), &err); + } + } + + if (err) { + DHD_ERROR(("%s: Failed to force clock for F2: err %d\n", __FUNCTION__, err)); + ret = -1; + goto exit; + } + + /* Enable function 2 (frame transfers) */ + W_SDREG((SDPCM_PROT_VERSION << SMB_DATA_VERSION_SHIFT), + &bus->regs->tosbmailboxdata, retries); + enable = (SDIO_FUNC_ENABLE_1 | SDIO_FUNC_ENABLE_2); + + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, enable, NULL); + + /* Give the dongle some time to do its thing and set IOR2 */ + dhd_timeout_start(&tmo, DHD_WAIT_F2RDY * 1000); + + ready = 0; + while (ready != enable && !dhd_timeout_expired(&tmo)) + ready = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IORDY, NULL); + +#endif /* !BCMSPI */ + + DHD_ERROR(("%s: enable 0x%02x, ready 0x%02x (waited %uus)\n", + __FUNCTION__, enable, ready, tmo.elapsed)); + + /* If F2 successfully enabled, set core and enable interrupts */ + if (ready == enable) { + /* Make sure we're talking to the core. */ +#ifdef BCMSDIOLITE + bus->regs = si_setcore(bus->sih, CC_CORE_ID, 0); + ASSERT(bus->regs != NULL); +#else + if (!(bus->regs = si_setcore(bus->sih, PCMCIA_CORE_ID, 0))) + bus->regs = si_setcore(bus->sih, SDIOD_CORE_ID, 0); + ASSERT(bus->regs != NULL); +#endif // endif + /* Set up the interrupt mask and enable interrupts */ + bus->hostintmask = HOSTINTMASK; + /* corerev 4 could use the newer interrupt logic to detect the frames */ +#ifndef BCMSPI + if ((bus->sih->buscoretype == SDIOD_CORE_ID) && (bus->sdpcmrev == 4) && + (bus->rxint_mode != SDIO_DEVICE_HMB_RXINT)) { + bus->hostintmask &= ~I_HMB_FRAME_IND; + bus->hostintmask |= I_XMTDATA_AVAIL; + } +#endif /* BCMSPI */ + W_SDREG(bus->hostintmask, &bus->regs->hostintmask, retries); + + if (bus->sih->buscorerev < 15) { + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_WATERMARK, + (uint8)watermark, &err); + } + + /* Set bus state according to enable result */ + dhdp->busstate = DHD_BUS_DATA; + + /* Need to set fn2 block size to match fn1 block size. + * Requests to fn2 go thru fn1. * + * faltwig has this code contitioned with #if !BCMSPI_ANDROID. + * It would be cleaner to use the ->sdh->block_sz[fno] instead of + * 64, but this layer has no access to sdh types. + */ + + /* bcmsdh_intr_unmask(bus->sdh); */ + + bus->intdis = FALSE; + if (bus->intr) { + DHD_INTR(("%s: enable SDIO device interrupts\n", __FUNCTION__)); +#ifndef BCMSPI_ANDROID + bcmsdh_intr_enable(bus->sdh); +#endif /* !BCMSPI_ANDROID */ + } else { + DHD_INTR(("%s: disable SDIO interrupts\n", __FUNCTION__)); + bcmsdh_intr_disable(bus->sdh); + } + + } + +#ifndef BCMSPI + + else { + /* Disable F2 again */ + enable = SDIO_FUNC_ENABLE_1; + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, enable, NULL); + } + + if (dhdsdio_sr_cap(bus)) { + dhdsdio_sr_init(bus); + /* Masking the chip active interrupt permanantly */ + bus->hostintmask &= ~I_CHIPACTIVE; + W_SDREG(bus->hostintmask, &bus->regs->hostintmask, retries); + DHD_INFO(("%s: disable I_CHIPACTIVE in hostintmask[0x%08x]\n", + __FUNCTION__, bus->hostintmask)); + } else { + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, + SBSDIO_FUNC1_CHIPCLKCSR, saveclk, &err); + } +#endif /* !BCMSPI */ + + /* If we didn't come up, turn off backplane clock */ + if (dhdp->busstate != DHD_BUS_DATA) + dhdsdio_clkctl(bus, CLK_NONE, FALSE); + +exit: + if (enforce_mutex) + dhd_os_sdunlock(bus->dhd); + + return ret; +} + +static void +dhdsdio_rxfail(dhd_bus_t *bus, bool abort, bool rtx) +{ + bcmsdh_info_t *sdh = bus->sdh; + sdpcmd_regs_t *regs = bus->regs; + uint retries = 0; + uint16 lastrbc; + uint8 hi, lo; + int err; + + DHD_ERROR(("%s: %sterminate frame%s\n", __FUNCTION__, + (abort ? "abort command, " : ""), (rtx ? ", send NAK" : ""))); + + if (!KSO_ENAB(bus)) { + DHD_ERROR(("%s: Device asleep\n", __FUNCTION__)); + return; + } + + if (abort) { + bcmsdh_abort(sdh, SDIO_FUNC_2); + } + + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_FRAMECTRL, SFC_RF_TERM, &err); + if (err) { + DHD_ERROR(("%s: SBSDIO_FUNC1_FRAMECTRL cmd err\n", __FUNCTION__)); + goto fail; + } + bus->f1regdata++; + + /* Wait until the packet has been flushed (device/FIFO stable) */ + for (lastrbc = retries = 0xffff; retries > 0; retries--) { + hi = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_RFRAMEBCHI, NULL); + lo = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_RFRAMEBCLO, &err); + if (err) { + DHD_ERROR(("%s: SBSDIO_FUNC1_RFAMEBCLO cmd err\n", __FUNCTION__)); + goto fail; + } + + bus->f1regdata += 2; + + if ((hi == 0) && (lo == 0)) + break; + + if ((hi > (lastrbc >> 8)) && (lo > (lastrbc & 0x00ff))) { + DHD_ERROR(("%s: count growing: last 0x%04x now 0x%04x\n", + __FUNCTION__, lastrbc, ((hi << 8) + lo))); + } + lastrbc = (hi << 8) + lo; + } + + if (!retries) { + DHD_ERROR(("%s: count never zeroed: last 0x%04x\n", __FUNCTION__, lastrbc)); + } else { + DHD_INFO(("%s: flush took %d iterations\n", __FUNCTION__, (0xffff - retries))); + } + + if (rtx) { + bus->rxrtx++; + W_SDREG(SMB_NAK, ®s->tosbmailbox, retries); + bus->f1regdata++; + if (retries <= retry_limit) { + bus->rxskip = TRUE; + } + } + + /* Clear partial in any case */ + bus->nextlen = 0; + +fail: + /* If we can't reach the device, signal failure */ + if (err || bcmsdh_regfail(sdh)) + bus->dhd->busstate = DHD_BUS_DOWN; +} + +static void +dhdsdio_read_control(dhd_bus_t *bus, uint8 *hdr, uint len, uint doff) +{ + bcmsdh_info_t *sdh = bus->sdh; + uint rdlen, pad; + + int sdret; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + /* Control data already received in aligned rxctl */ + if ((bus->bus == SPI_BUS) && (!bus->usebufpool)) + goto gotpkt; + + ASSERT(bus->rxbuf); + /* Set rxctl for frame (w/optional alignment) */ + bus->rxctl = bus->rxbuf; + if (dhd_alignctl) { + bus->rxctl += firstread; + if ((pad = ((uintptr)bus->rxctl % DHD_SDALIGN))) + bus->rxctl += (DHD_SDALIGN - pad); + bus->rxctl -= firstread; + } + ASSERT(bus->rxctl >= bus->rxbuf); + + /* Copy the already-read portion over */ + bcopy(hdr, bus->rxctl, firstread); + if (len <= firstread) + goto gotpkt; + + /* Copy the full data pkt in gSPI case and process ioctl. */ + if (bus->bus == SPI_BUS) { + bcopy(hdr, bus->rxctl, len); + goto gotpkt; + } + + /* Raise rdlen to next SDIO block to avoid tail command */ + rdlen = len - firstread; + if (bus->roundup && bus->blocksize && (rdlen > bus->blocksize)) { + pad = bus->blocksize - (rdlen % bus->blocksize); + if ((pad <= bus->roundup) && (pad < bus->blocksize) && + ((len + pad) < bus->dhd->maxctl)) + rdlen += pad; + } else if (rdlen % DHD_SDALIGN) { + rdlen += DHD_SDALIGN - (rdlen % DHD_SDALIGN); + } + + /* Satisfy length-alignment requirements */ + if (forcealign && (rdlen & (ALIGNMENT - 1))) + rdlen = ROUNDUP(rdlen, ALIGNMENT); + + /* Drop if the read is too big or it exceeds our maximum */ + if ((rdlen + firstread) > bus->dhd->maxctl) { + DHD_ERROR(("%s: %d-byte control read exceeds %d-byte buffer\n", + __FUNCTION__, rdlen, bus->dhd->maxctl)); + bus->dhd->rx_errors++; + dhdsdio_rxfail(bus, FALSE, FALSE); + goto done; + } + + if ((len - doff) > bus->dhd->maxctl) { + DHD_ERROR(("%s: %d-byte ctl frame (%d-byte ctl data) exceeds %d-byte limit\n", + __FUNCTION__, len, (len - doff), bus->dhd->maxctl)); + bus->dhd->rx_errors++; bus->rx_toolong++; + dhdsdio_rxfail(bus, FALSE, FALSE); + goto done; + } + + /* Read remainder of frame body into the rxctl buffer */ + sdret = dhd_bcmsdh_recv_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC, + (bus->rxctl + firstread), rdlen, NULL, NULL, NULL); + bus->f2rxdata++; + ASSERT(sdret != BCME_PENDING); + + /* Control frame failures need retransmission */ + if (sdret < 0) { + DHD_ERROR(("%s: read %d control bytes failed: %d\n", __FUNCTION__, rdlen, sdret)); + bus->rxc_errors++; /* dhd.rx_ctlerrs is higher level */ + dhdsdio_rxfail(bus, TRUE, TRUE); + goto done; + } + +gotpkt: + +#ifdef DHD_DEBUG + if (DHD_BYTES_ON() && DHD_CTL_ON()) { + prhex("RxCtrl", bus->rxctl, len); + } +#endif // endif + + /* Point to valid data and indicate its length */ + bus->rxctl += doff; + bus->rxlen = len - doff; + +done: + /* Awake any waiters */ + dhd_os_ioctl_resp_wake(bus->dhd); +} +int +dhd_process_pkt_reorder_info(dhd_pub_t *dhd, uchar *reorder_info_buf, uint reorder_info_len, + void **pkt, uint32 *pkt_count); + +static uint8 +dhdsdio_rxglom(dhd_bus_t *bus, uint8 rxseq) +{ + uint16 dlen, totlen; + uint8 *dptr, num = 0; + + uint16 sublen, check; + void *pfirst, *plast, *pnext; + void * list_tail[DHD_MAX_IFS] = { NULL }; + void * list_head[DHD_MAX_IFS] = { NULL }; + uint8 idx; + osl_t *osh = bus->dhd->osh; + + int errcode; + uint8 chan, seq, doff, sfdoff; + uint8 txmax; + uchar reorder_info_buf[WLHOST_REORDERDATA_TOTLEN]; + uint reorder_info_len; + + int ifidx = 0; + bool usechain = bus->use_rxchain; + + /* If packets, issue read(s) and send up packet chain */ + /* Return sequence numbers consumed? */ + + DHD_TRACE(("dhdsdio_rxglom: start: glomd %p glom %p\n", bus->glomd, bus->glom)); + + /* If there's a descriptor, generate the packet chain */ + if (bus->glomd) { + dhd_os_sdlock_rxq(bus->dhd); + + pfirst = plast = pnext = NULL; + dlen = (uint16)PKTLEN(osh, bus->glomd); + dptr = PKTDATA(osh, bus->glomd); + if (!dlen || (dlen & 1)) { + DHD_ERROR(("%s: bad glomd len (%d), ignore descriptor\n", + __FUNCTION__, dlen)); + dlen = 0; + } + + for (totlen = num = 0; dlen; num++) { + /* Get (and move past) next length */ + sublen = ltoh16_ua(dptr); + dlen -= sizeof(uint16); + dptr += sizeof(uint16); + if ((sublen < SDPCM_HDRLEN) || + ((num == 0) && (sublen < (2 * SDPCM_HDRLEN)))) { + DHD_ERROR(("%s: descriptor len %d bad: %d\n", + __FUNCTION__, num, sublen)); + pnext = NULL; + break; + } + if (sublen % DHD_SDALIGN) { + DHD_ERROR(("%s: sublen %d not a multiple of %d\n", + __FUNCTION__, sublen, DHD_SDALIGN)); + usechain = FALSE; + } + totlen += sublen; + + /* For last frame, adjust read len so total is a block multiple */ + if (!dlen) { + sublen += (ROUNDUP(totlen, bus->blocksize) - totlen); + totlen = ROUNDUP(totlen, bus->blocksize); + } + + /* Allocate/chain packet for next subframe */ + if ((pnext = PKTGET(osh, sublen + DHD_SDALIGN, FALSE)) == NULL) { + DHD_ERROR(("%s: PKTGET failed, num %d len %d\n", + __FUNCTION__, num, sublen)); + break; + } + ASSERT(!PKTLINK(pnext)); + if (!pfirst) { + ASSERT(!plast); + pfirst = plast = pnext; + } else { + ASSERT(plast); + PKTSETNEXT(osh, plast, pnext); + plast = pnext; + } + + /* Adhere to start alignment requirements */ + PKTALIGN(osh, pnext, sublen, DHD_SDALIGN); + } + + /* If all allocations succeeded, save packet chain in bus structure */ + if (pnext) { + DHD_GLOM(("%s: allocated %d-byte packet chain for %d subframes\n", + __FUNCTION__, totlen, num)); + if (DHD_GLOM_ON() && bus->nextlen) { + if (totlen != bus->nextlen) { + DHD_GLOM(("%s: glomdesc mismatch: nextlen %d glomdesc %d " + "rxseq %d\n", __FUNCTION__, bus->nextlen, + totlen, rxseq)); + } + } + bus->glom = pfirst; + pfirst = pnext = NULL; + } else { + if (pfirst) + PKTFREE(osh, pfirst, FALSE); + bus->glom = NULL; + num = 0; + } + + /* Done with descriptor packet */ + PKTFREE(osh, bus->glomd, FALSE); + bus->glomd = NULL; + bus->nextlen = 0; + + dhd_os_sdunlock_rxq(bus->dhd); + } + + /* Ok -- either we just generated a packet chain, or had one from before */ + if (bus->glom) { + if (DHD_GLOM_ON()) { + DHD_GLOM(("%s: attempt superframe read, packet chain:\n", __FUNCTION__)); + for (pnext = bus->glom; pnext; pnext = PKTNEXT(osh, pnext)) { + DHD_GLOM((" %p: %p len 0x%04x (%d)\n", + pnext, (uint8*)PKTDATA(osh, pnext), + PKTLEN(osh, pnext), PKTLEN(osh, pnext))); + } + } + + pfirst = bus->glom; + dlen = (uint16)pkttotlen(osh, pfirst); + + /* Do an SDIO read for the superframe. Configurable iovar to + * read directly into the chained packet, or allocate a large + * packet and and copy into the chain. + */ + if (usechain) { + errcode = dhd_bcmsdh_recv_buf(bus, + bcmsdh_cur_sbwad(bus->sdh), SDIO_FUNC_2, + F2SYNC, (uint8*)PKTDATA(osh, pfirst), + dlen, pfirst, NULL, NULL); + } else if (bus->dataptr) { + errcode = dhd_bcmsdh_recv_buf(bus, + bcmsdh_cur_sbwad(bus->sdh), SDIO_FUNC_2, + F2SYNC, bus->dataptr, + dlen, NULL, NULL, NULL); + sublen = (uint16)pktfrombuf(osh, pfirst, 0, dlen, bus->dataptr); + if (sublen != dlen) { + DHD_ERROR(("%s: FAILED TO COPY, dlen %d sublen %d\n", + __FUNCTION__, dlen, sublen)); + errcode = -1; + } + pnext = NULL; + BCM_REFERENCE(pnext); + } else { + DHD_ERROR(("COULDN'T ALLOC %d-BYTE GLOM, FORCE FAILURE\n", dlen)); + errcode = -1; + } + bus->f2rxdata++; + ASSERT(errcode != BCME_PENDING); + + /* On failure, kill the superframe, allow a couple retries */ + if (errcode < 0) { + DHD_ERROR(("%s: glom read of %d bytes failed: %d\n", + __FUNCTION__, dlen, errcode)); + bus->dhd->rx_errors++; + + if (bus->glomerr++ < 3) { + dhdsdio_rxfail(bus, TRUE, TRUE); + } else { + bus->glomerr = 0; + dhdsdio_rxfail(bus, TRUE, FALSE); + dhd_os_sdlock_rxq(bus->dhd); + PKTFREE(osh, bus->glom, FALSE); + dhd_os_sdunlock_rxq(bus->dhd); + bus->rxglomfail++; + bus->glom = NULL; + } + return 0; + } + +#ifdef DHD_DEBUG + if (DHD_GLOM_ON()) { + prhex("SUPERFRAME", PKTDATA(osh, pfirst), + MIN(PKTLEN(osh, pfirst), 48)); + } +#endif // endif + + /* Validate the superframe header */ + dptr = (uint8 *)PKTDATA(osh, pfirst); + sublen = ltoh16_ua(dptr); + check = ltoh16_ua(dptr + sizeof(uint16)); + + chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]); + seq = SDPCM_PACKET_SEQUENCE(&dptr[SDPCM_FRAMETAG_LEN]); + bus->nextlen = dptr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET]; + if ((bus->nextlen << 4) > MAX_RX_DATASZ) { + DHD_INFO(("%s: got frame w/nextlen too large (%d) seq %d\n", + __FUNCTION__, bus->nextlen, seq)); + bus->nextlen = 0; + } + doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]); + txmax = SDPCM_WINDOW_VALUE(&dptr[SDPCM_FRAMETAG_LEN]); + + errcode = 0; + if ((uint16)~(sublen^check)) { + DHD_ERROR(("%s (superframe): HW hdr error: len/check 0x%04x/0x%04x\n", + __FUNCTION__, sublen, check)); + errcode = -1; + } else if (ROUNDUP(sublen, bus->blocksize) != dlen) { + DHD_ERROR(("%s (superframe): len 0x%04x, rounded 0x%04x, expect 0x%04x\n", + __FUNCTION__, sublen, ROUNDUP(sublen, bus->blocksize), dlen)); + errcode = -1; + } else if (SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]) != SDPCM_GLOM_CHANNEL) { + DHD_ERROR(("%s (superframe): bad channel %d\n", __FUNCTION__, + SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]))); + errcode = -1; + } else if (SDPCM_GLOMDESC(&dptr[SDPCM_FRAMETAG_LEN])) { + DHD_ERROR(("%s (superframe): got second descriptor?\n", __FUNCTION__)); + errcode = -1; + } else if ((doff < SDPCM_HDRLEN) || + (doff > (PKTLEN(osh, pfirst) - SDPCM_HDRLEN))) { + DHD_ERROR(("%s (superframe): Bad data offset %d: HW %d pkt %d min %d\n", + __FUNCTION__, doff, sublen, PKTLEN(osh, pfirst), + SDPCM_HDRLEN)); + errcode = -1; + } + + /* Check sequence number of superframe SW header */ + if (rxseq != seq) { + DHD_INFO(("%s: (superframe) rx_seq %d, expected %d\n", + __FUNCTION__, seq, rxseq)); + bus->rx_badseq++; + rxseq = seq; + } + + /* Check window for sanity */ + if ((uint8)(txmax - bus->tx_seq) > 0x70) { + DHD_ERROR(("%s: got unlikely tx max %d with tx_seq %d\n", + __FUNCTION__, txmax, bus->tx_seq)); + txmax = bus->tx_max; + } + bus->tx_max = txmax; + + /* Remove superframe header, remember offset */ + PKTPULL(osh, pfirst, doff); + sfdoff = doff; + + /* Validate all the subframe headers */ + for (num = 0, pnext = pfirst; pnext && !errcode; + num++, pnext = PKTNEXT(osh, pnext)) { + dptr = (uint8 *)PKTDATA(osh, pnext); + dlen = (uint16)PKTLEN(osh, pnext); + sublen = ltoh16_ua(dptr); + check = ltoh16_ua(dptr + sizeof(uint16)); + chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]); + doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]); +#ifdef DHD_DEBUG + if (DHD_GLOM_ON()) { + prhex("subframe", dptr, 32); + } +#endif // endif + + if ((uint16)~(sublen^check)) { + DHD_ERROR(("%s (subframe %d): HW hdr error: " + "len/check 0x%04x/0x%04x\n", + __FUNCTION__, num, sublen, check)); + errcode = -1; + } else if ((sublen > dlen) || (sublen < SDPCM_HDRLEN)) { + DHD_ERROR(("%s (subframe %d): length mismatch: " + "len 0x%04x, expect 0x%04x\n", + __FUNCTION__, num, sublen, dlen)); + errcode = -1; + } else if ((chan != SDPCM_DATA_CHANNEL) && + (chan != SDPCM_EVENT_CHANNEL)) { + DHD_ERROR(("%s (subframe %d): bad channel %d\n", + __FUNCTION__, num, chan)); + errcode = -1; + } else if ((doff < SDPCM_HDRLEN) || (doff > sublen)) { + DHD_ERROR(("%s (subframe %d): Bad data offset %d: HW %d min %d\n", + __FUNCTION__, num, doff, sublen, SDPCM_HDRLEN)); + errcode = -1; + } + } + + if (errcode) { + /* Terminate frame on error, request a couple retries */ + if (bus->glomerr++ < 3) { + /* Restore superframe header space */ + PKTPUSH(osh, pfirst, sfdoff); + dhdsdio_rxfail(bus, TRUE, TRUE); + } else { + bus->glomerr = 0; + dhdsdio_rxfail(bus, TRUE, FALSE); + dhd_os_sdlock_rxq(bus->dhd); + PKTFREE(osh, bus->glom, FALSE); + dhd_os_sdunlock_rxq(bus->dhd); + bus->rxglomfail++; + bus->glom = NULL; + } + bus->nextlen = 0; + return 0; + } + + /* Basic SD framing looks ok - process each packet (header) */ + bus->glom = NULL; + plast = NULL; + + dhd_os_sdlock_rxq(bus->dhd); + for (num = 0; pfirst; rxseq++, pfirst = pnext) { + pnext = PKTNEXT(osh, pfirst); + PKTSETNEXT(osh, pfirst, NULL); + + dptr = (uint8 *)PKTDATA(osh, pfirst); + sublen = ltoh16_ua(dptr); + chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]); + seq = SDPCM_PACKET_SEQUENCE(&dptr[SDPCM_FRAMETAG_LEN]); + doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]); + + DHD_GLOM(("%s: Get subframe %d, %p(%p/%d), sublen %d chan %d seq %d\n", + __FUNCTION__, num, pfirst, PKTDATA(osh, pfirst), + PKTLEN(osh, pfirst), sublen, chan, seq)); + + ASSERT((chan == SDPCM_DATA_CHANNEL) || (chan == SDPCM_EVENT_CHANNEL)); + + if (rxseq != seq) { + DHD_GLOM(("%s: rx_seq %d, expected %d\n", + __FUNCTION__, seq, rxseq)); + bus->rx_badseq++; + rxseq = seq; + } + +#ifdef DHD_DEBUG + if (DHD_BYTES_ON() && DHD_DATA_ON()) { + prhex("Rx Subframe Data", dptr, dlen); + } +#endif // endif + + PKTSETLEN(osh, pfirst, sublen); + PKTPULL(osh, pfirst, doff); + + reorder_info_len = sizeof(reorder_info_buf); + + if (PKTLEN(osh, pfirst) == 0) { + PKTFREE(bus->dhd->osh, pfirst, FALSE); + continue; + } else if (dhd_prot_hdrpull(bus->dhd, &ifidx, pfirst, reorder_info_buf, + &reorder_info_len) != 0) { + DHD_ERROR(("%s: rx protocol error\n", __FUNCTION__)); + bus->dhd->rx_errors++; + PKTFREE(osh, pfirst, FALSE); + continue; + } + if (reorder_info_len) { + uint32 free_buf_count; + void *ppfirst; + + ppfirst = pfirst; + /* Reordering info from the firmware */ + dhd_process_pkt_reorder_info(bus->dhd, reorder_info_buf, + reorder_info_len, &ppfirst, &free_buf_count); + + if (free_buf_count == 0) { + continue; + } else { + void *temp; + + /* go to the end of the chain and attach the pnext there */ + temp = ppfirst; + while (PKTNEXT(osh, temp) != NULL) { + temp = PKTNEXT(osh, temp); + } + pfirst = temp; + if (list_tail[ifidx] == NULL) + list_head[ifidx] = ppfirst; + else + PKTSETNEXT(osh, list_tail[ifidx], ppfirst); + list_tail[ifidx] = pfirst; + } + + num += (uint8)free_buf_count; + } else { + /* this packet will go up, link back into chain and count it */ + + if (list_tail[ifidx] == NULL) { + list_head[ifidx] = list_tail[ifidx] = pfirst; + } else { + PKTSETNEXT(osh, list_tail[ifidx], pfirst); + list_tail[ifidx] = pfirst; + } + num++; + } +#ifdef DHD_DEBUG + if (DHD_GLOM_ON()) { + DHD_GLOM(("%s subframe %d to stack, %p(%p/%d) nxt/lnk %p/%p\n", + __FUNCTION__, num, pfirst, + PKTDATA(osh, pfirst), PKTLEN(osh, pfirst), + PKTNEXT(osh, pfirst), PKTLINK(pfirst))); + prhex("", (uint8 *)PKTDATA(osh, pfirst), + MIN(PKTLEN(osh, pfirst), 32)); + } +#endif /* DHD_DEBUG */ + } + dhd_os_sdunlock_rxq(bus->dhd); + + for (idx = 0; idx < DHD_MAX_IFS; idx++) { + if (list_head[idx]) { + void *temp; + uint8 cnt = 0; + temp = list_head[idx]; + do { + temp = PKTNEXT(osh, temp); + cnt++; + } while (temp); + if (cnt) { + dhd_os_sdunlock(bus->dhd); + dhd_rx_frame(bus->dhd, idx, list_head[idx], cnt, 0); + dhd_os_sdlock(bus->dhd); +#if defined(SDIO_ISR_THREAD) + /* terence 20150615: fix for below error due to bussleep in watchdog after dhd_os_sdunlock here, + * so call BUS_WAKE to wake up bus again + * dhd_bcmsdh_recv_buf: Device asleep + * dhdsdio_readframes: RXHEADER FAILED: -40 + * dhdsdio_rxfail: abort command, terminate frame, send NAK + */ + BUS_WAKE(bus); +#endif + } + } + } + bus->rxglomframes++; + bus->rxglompkts += num; + } + return num; +} + +/* Return TRUE if there may be more frames to read */ +static uint +dhdsdio_readframes(dhd_bus_t *bus, uint maxframes, bool *finished) +{ + osl_t *osh = bus->dhd->osh; + bcmsdh_info_t *sdh = bus->sdh; + + uint16 len, check; /* Extracted hardware header fields */ + uint8 chan, seq, doff; /* Extracted software header fields */ + uint8 fcbits; /* Extracted fcbits from software header */ + uint8 delta; + + void *pkt; /* Packet for event or data frames */ + uint16 pad; /* Number of pad bytes to read */ + uint16 rdlen; /* Total number of bytes to read */ + uint8 rxseq; /* Next sequence number to expect */ + uint rxleft = 0; /* Remaining number of frames allowed */ + int sdret; /* Return code from bcmsdh calls */ + uint8 txmax; /* Maximum tx sequence offered */ +#ifdef BCMSPI + uint32 dstatus = 0; /* gSPI device status bits of */ +#endif /* BCMSPI */ + bool len_consistent; /* Result of comparing readahead len and len from hw-hdr */ + uint8 *rxbuf; + int ifidx = 0; + uint rxcount = 0; /* Total frames read */ + uchar reorder_info_buf[WLHOST_REORDERDATA_TOTLEN]; + uint reorder_info_len; + uint pkt_count; + +#if defined(DHD_DEBUG) || defined(SDTEST) + bool sdtest = FALSE; /* To limit message spew from test mode */ +#endif // endif + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + bus->readframes = TRUE; + + if (!KSO_ENAB(bus)) { + DHD_ERROR(("%s: KSO off\n", __FUNCTION__)); + bus->readframes = FALSE; + return 0; + } + + ASSERT(maxframes); + +#ifdef SDTEST + /* Allow pktgen to override maxframes */ + if (bus->pktgen_count && (bus->pktgen_mode == DHD_PKTGEN_RECV)) { + maxframes = bus->pktgen_count; + sdtest = TRUE; + } +#endif // endif + + /* Not finished unless we encounter no more frames indication */ + *finished = FALSE; + +#ifdef BCMSPI + /* Get pktlen from gSPI device F0 reg. */ + if (bus->bus == SPI_BUS) { + /* Peek in dstatus bits and find out size to do rx-read. */ + dstatus = bcmsdh_get_dstatus(bus->sdh); + if (dstatus == 0) + DHD_ERROR(("%s:ZERO spi dstatus, a case observed in PR61352 hit !!!\n", + __FUNCTION__)); + + DHD_TRACE(("Device status from regread = 0x%x\n", dstatus)); + DHD_TRACE(("Device status from bit-reconstruction = 0x%x\n", + bcmsdh_get_dstatus((void *)bus->sdh))); + + if ((dstatus & STATUS_F2_PKT_AVAILABLE) && (((dstatus & STATUS_UNDERFLOW)) == 0)) { + bus->nextlen = ((dstatus & STATUS_F2_PKT_LEN_MASK) >> + STATUS_F2_PKT_LEN_SHIFT); + /* '0' size with pkt-available interrupt is eqvt to 2048 bytes */ + bus->nextlen = (bus->nextlen == 0) ? SPI_MAX_PKT_LEN : bus->nextlen; + if (bus->dwordmode) + bus->nextlen = bus->nextlen << 2; + DHD_TRACE(("Entering %s: length to be read from gSPI = %d\n", + __FUNCTION__, bus->nextlen)); + } else { + if (dstatus & STATUS_F2_PKT_AVAILABLE) + DHD_ERROR(("Underflow during %s.\n", __FUNCTION__)); + else + DHD_ERROR(("False pkt-available intr.\n")); + *finished = TRUE; + return (maxframes - rxleft); + } + } +#endif /* BCMSPI */ + + for (rxseq = bus->rx_seq, rxleft = maxframes; + !bus->rxskip && rxleft && bus->dhd->busstate != DHD_BUS_DOWN; + rxseq++, rxleft--) { +#ifdef DHDTCPACK_SUP_DBG + if (bus->dhd->tcpack_sup_mode != TCPACK_SUP_DELAYTX) { + if (bus->dotxinrx == FALSE) + DHD_ERROR(("%s %d: dotxinrx FALSE with tcpack_sub_mode %d\n", + __FUNCTION__, __LINE__, bus->dhd->tcpack_sup_mode)); + } +#ifdef DEBUG_COUNTER + else if (pktq_mlen(&bus->txq, ~bus->flowcontrol) > 0) { + tack_tbl.cnt[bus->dotxinrx ? 6 : 7]++; + } +#endif /* DEBUG_COUNTER */ +#endif /* DHDTCPACK_SUP_DBG */ + /* tx more to improve rx performance */ + if (TXCTLOK(bus) && bus->ctrl_frame_stat && (bus->clkstate == CLK_AVAIL)) { + dhdsdio_sendpendctl(bus); + } else if (bus->dotxinrx && (bus->clkstate == CLK_AVAIL) && + !bus->fcstate && DATAOK(bus) && + (pktq_mlen(&bus->txq, ~bus->flowcontrol) > bus->txinrx_thres)) { + dhdsdio_sendfromq(bus, dhd_txbound); +#ifdef DHDTCPACK_SUPPRESS + /* In TCPACK_SUP_DELAYTX mode, do txinrx only if + * 1. Any DATA packet to TX + * 2. TCPACK to TCPDATA PSH packets. + * in bus txq. + */ + bus->dotxinrx = (bus->dhd->tcpack_sup_mode == TCPACK_SUP_DELAYTX) ? + FALSE : TRUE; +#endif // endif + } + + /* Handle glomming separately */ + if (bus->glom || bus->glomd) { + uint8 cnt; + DHD_GLOM(("%s: calling rxglom: glomd %p, glom %p\n", + __FUNCTION__, bus->glomd, bus->glom)); + cnt = dhdsdio_rxglom(bus, rxseq); + DHD_GLOM(("%s: rxglom returned %d\n", __FUNCTION__, cnt)); + rxseq += cnt - 1; + rxleft = (rxleft > cnt) ? (rxleft - cnt) : 1; + continue; + } + + /* Try doing single read if we can */ + if (dhd_readahead && bus->nextlen) { + uint16 nextlen = bus->nextlen; + bus->nextlen = 0; + + if (bus->bus == SPI_BUS) { + rdlen = len = nextlen; + } else { + rdlen = len = nextlen << 4; + + /* Pad read to blocksize for efficiency */ + if (bus->roundup && bus->blocksize && (rdlen > bus->blocksize)) { + pad = bus->blocksize - (rdlen % bus->blocksize); + if ((pad <= bus->roundup) && (pad < bus->blocksize) && + ((rdlen + pad + firstread) < MAX_RX_DATASZ)) + rdlen += pad; + } else if (rdlen % DHD_SDALIGN) { + rdlen += DHD_SDALIGN - (rdlen % DHD_SDALIGN); + } + } + + /* We use bus->rxctl buffer in WinXP for initial control pkt receives. + * Later we use buffer-poll for data as well as control packets. + * This is required because dhd receives full frame in gSPI unlike SDIO. + * After the frame is received we have to distinguish whether it is data + * or non-data frame. + */ + /* Allocate a packet buffer */ + dhd_os_sdlock_rxq(bus->dhd); + if (!(pkt = PKTGET(osh, rdlen + DHD_SDALIGN, FALSE))) { + if (bus->bus == SPI_BUS) { + bus->usebufpool = FALSE; + bus->rxctl = bus->rxbuf; + if (dhd_alignctl) { + bus->rxctl += firstread; + if ((pad = ((uintptr)bus->rxctl % DHD_SDALIGN))) + bus->rxctl += (DHD_SDALIGN - pad); + bus->rxctl -= firstread; + } + ASSERT(bus->rxctl >= bus->rxbuf); + rxbuf = bus->rxctl; + /* Read the entire frame */ + sdret = dhd_bcmsdh_recv_buf(bus, + bcmsdh_cur_sbwad(sdh), + SDIO_FUNC_2, + F2SYNC, rxbuf, rdlen, + NULL, NULL, NULL); + bus->f2rxdata++; + ASSERT(sdret != BCME_PENDING); + +#ifdef BCMSPI + if (bcmsdh_get_dstatus((void *)bus->sdh) & + STATUS_UNDERFLOW) { + bus->nextlen = 0; + *finished = TRUE; + DHD_ERROR(("%s: read %d control bytes failed " + "due to spi underflow\n", + __FUNCTION__, rdlen)); + /* dhd.rx_ctlerrs is higher level */ + bus->rxc_errors++; + dhd_os_sdunlock_rxq(bus->dhd); + continue; + } +#endif /* BCMSPI */ + + /* Control frame failures need retransmission */ + if (sdret < 0) { + DHD_ERROR(("%s: read %d control bytes failed: %d\n", + __FUNCTION__, rdlen, sdret)); + /* dhd.rx_ctlerrs is higher level */ + bus->rxc_errors++; + dhd_os_sdunlock_rxq(bus->dhd); + dhdsdio_rxfail(bus, TRUE, + (bus->bus == SPI_BUS) ? FALSE : TRUE); + continue; + } + } else { + /* Give up on data, request rtx of events */ + DHD_ERROR(("%s (nextlen): PKTGET failed: len %d rdlen %d " + "expected rxseq %d\n", + __FUNCTION__, len, rdlen, rxseq)); + /* Just go try again w/normal header read */ + dhd_os_sdunlock_rxq(bus->dhd); + continue; + } + } else { + if (bus->bus == SPI_BUS) + bus->usebufpool = TRUE; + + ASSERT(!PKTLINK(pkt)); + PKTALIGN(osh, pkt, rdlen, DHD_SDALIGN); + rxbuf = (uint8 *)PKTDATA(osh, pkt); + /* Read the entire frame */ + sdret = dhd_bcmsdh_recv_buf(bus, bcmsdh_cur_sbwad(sdh), + SDIO_FUNC_2, + F2SYNC, rxbuf, rdlen, + pkt, NULL, NULL); + bus->f2rxdata++; + ASSERT(sdret != BCME_PENDING); +#ifdef BCMSPI + if (bcmsdh_get_dstatus((void *)bus->sdh) & STATUS_UNDERFLOW) { + bus->nextlen = 0; + *finished = TRUE; + DHD_ERROR(("%s (nextlen): read %d bytes failed due " + "to spi underflow\n", + __FUNCTION__, rdlen)); + PKTFREE(bus->dhd->osh, pkt, FALSE); + bus->dhd->rx_errors++; + dhd_os_sdunlock_rxq(bus->dhd); + continue; + } +#endif /* BCMSPI */ + + if (sdret < 0) { + DHD_ERROR(("%s (nextlen): read %d bytes failed: %d\n", + __FUNCTION__, rdlen, sdret)); + PKTFREE(bus->dhd->osh, pkt, FALSE); + bus->dhd->rx_errors++; + dhd_os_sdunlock_rxq(bus->dhd); + /* Force retry w/normal header read. Don't attempt NAK for + * gSPI + */ + dhdsdio_rxfail(bus, TRUE, + (bus->bus == SPI_BUS) ? FALSE : TRUE); + continue; + } + } + dhd_os_sdunlock_rxq(bus->dhd); + + /* Now check the header */ + bcopy(rxbuf, bus->rxhdr, SDPCM_HDRLEN); + + /* Extract hardware header fields */ + len = ltoh16_ua(bus->rxhdr); + check = ltoh16_ua(bus->rxhdr + sizeof(uint16)); + + /* All zeros means readahead info was bad */ + if (!(len|check)) { + DHD_INFO(("%s (nextlen): read zeros in HW header???\n", + __FUNCTION__)); + dhd_os_sdlock_rxq(bus->dhd); + PKTFREE2(); + dhd_os_sdunlock_rxq(bus->dhd); + GSPI_PR55150_BAILOUT; + continue; + } + + /* Validate check bytes */ + if ((uint16)~(len^check)) { + DHD_ERROR(("%s (nextlen): HW hdr error: nextlen/len/check" + " 0x%04x/0x%04x/0x%04x\n", __FUNCTION__, nextlen, + len, check)); + dhd_os_sdlock_rxq(bus->dhd); + PKTFREE2(); + dhd_os_sdunlock_rxq(bus->dhd); + bus->rx_badhdr++; + dhdsdio_rxfail(bus, FALSE, FALSE); + GSPI_PR55150_BAILOUT; + continue; + } + + /* Validate frame length */ + if (len < SDPCM_HDRLEN) { + DHD_ERROR(("%s (nextlen): HW hdr length invalid: %d\n", + __FUNCTION__, len)); + dhd_os_sdlock_rxq(bus->dhd); + PKTFREE2(); + dhd_os_sdunlock_rxq(bus->dhd); + GSPI_PR55150_BAILOUT; + continue; + } + + /* Check for consistency with readahead info */ +#ifdef BCMSPI + if (bus->bus == SPI_BUS) { + if (bus->dwordmode) { + uint16 spilen; + spilen = ROUNDUP(len, 4); + len_consistent = (nextlen != spilen); + } else + len_consistent = (nextlen != len); + } else +#endif /* BCMSPI */ + len_consistent = (nextlen != (ROUNDUP(len, 16) >> 4)); + if (len_consistent) { + /* Mismatch, force retry w/normal header (may be >4K) */ + DHD_ERROR(("%s (nextlen): mismatch, nextlen %d len %d rnd %d; " + "expected rxseq %d\n", + __FUNCTION__, nextlen, len, ROUNDUP(len, 16), rxseq)); + dhd_os_sdlock_rxq(bus->dhd); + PKTFREE2(); + dhd_os_sdunlock_rxq(bus->dhd); + dhdsdio_rxfail(bus, TRUE, (bus->bus == SPI_BUS) ? FALSE : TRUE); + GSPI_PR55150_BAILOUT; + continue; + } + + /* Extract software header fields */ + chan = SDPCM_PACKET_CHANNEL(&bus->rxhdr[SDPCM_FRAMETAG_LEN]); + seq = SDPCM_PACKET_SEQUENCE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]); + doff = SDPCM_DOFFSET_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]); + txmax = SDPCM_WINDOW_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]); + +#ifdef BCMSPI + /* Save the readahead length if there is one */ + if (bus->bus == SPI_BUS) { + /* Use reconstructed dstatus bits and find out readahead size */ + dstatus = bcmsdh_get_dstatus((void *)bus->sdh); + DHD_INFO(("Device status from bit-reconstruction = 0x%x\n", + bcmsdh_get_dstatus((void *)bus->sdh))); + if (dstatus & STATUS_F2_PKT_AVAILABLE) { + bus->nextlen = ((dstatus & STATUS_F2_PKT_LEN_MASK) >> + STATUS_F2_PKT_LEN_SHIFT); + bus->nextlen = (bus->nextlen == 0) ? + SPI_MAX_PKT_LEN : bus->nextlen; + if (bus->dwordmode) + bus->nextlen = bus->nextlen << 2; + DHD_INFO(("readahead len from gSPI = %d \n", + bus->nextlen)); + bus->dhd->rx_readahead_cnt ++; + } else { + bus->nextlen = 0; + *finished = TRUE; + } + } else { +#endif /* BCMSPI */ + bus->nextlen = + bus->rxhdr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET]; + if ((bus->nextlen << 4) > MAX_RX_DATASZ) { + DHD_INFO(("%s (nextlen): got frame w/nextlen too large" + " (%d), seq %d\n", __FUNCTION__, bus->nextlen, + seq)); + bus->nextlen = 0; + } + + bus->dhd->rx_readahead_cnt ++; +#ifdef BCMSPI + } +#endif /* BCMSPI */ + /* Handle Flow Control */ + fcbits = SDPCM_FCMASK_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]); + + delta = 0; + if (~bus->flowcontrol & fcbits) { + bus->fc_xoff++; + delta = 1; + } + if (bus->flowcontrol & ~fcbits) { + bus->fc_xon++; + delta = 1; + } + + if (delta) { + bus->fc_rcvd++; + bus->flowcontrol = fcbits; + } + + /* Check and update sequence number */ + if (rxseq != seq) { + DHD_INFO(("%s (nextlen): rx_seq %d, expected %d\n", + __FUNCTION__, seq, rxseq)); + bus->rx_badseq++; + rxseq = seq; + } + + /* Check window for sanity */ + if ((uint8)(txmax - bus->tx_seq) > 0x70) { +#ifdef BCMSPI + if ((bus->bus == SPI_BUS) && !(dstatus & STATUS_F2_RX_READY)) { + DHD_ERROR(("%s: got unlikely tx max %d with tx_seq %d\n", + __FUNCTION__, txmax, bus->tx_seq)); + txmax = bus->tx_seq + 2; + } else { +#endif /* BCMSPI */ + DHD_ERROR(("%s: got unlikely tx max %d with tx_seq %d\n", + __FUNCTION__, txmax, bus->tx_seq)); + txmax = bus->tx_max; +#ifdef BCMSPI + } +#endif /* BCMSPI */ + } + bus->tx_max = txmax; + +#ifdef DHD_DEBUG + if (DHD_BYTES_ON() && DHD_DATA_ON()) { + prhex("Rx Data", rxbuf, len); + } else if (DHD_HDRS_ON()) { + prhex("RxHdr", bus->rxhdr, SDPCM_HDRLEN); + } +#endif // endif + + if (chan == SDPCM_CONTROL_CHANNEL) { + if (bus->bus == SPI_BUS) { + dhdsdio_read_control(bus, rxbuf, len, doff); + if (bus->usebufpool) { + dhd_os_sdlock_rxq(bus->dhd); + PKTFREE(bus->dhd->osh, pkt, FALSE); + dhd_os_sdunlock_rxq(bus->dhd); + } + continue; + } else { + DHD_ERROR(("%s (nextlen): readahead on control" + " packet %d?\n", __FUNCTION__, seq)); + /* Force retry w/normal header read */ + bus->nextlen = 0; + dhdsdio_rxfail(bus, FALSE, TRUE); + dhd_os_sdlock_rxq(bus->dhd); + PKTFREE2(); + dhd_os_sdunlock_rxq(bus->dhd); + continue; + } + } + + if ((bus->bus == SPI_BUS) && !bus->usebufpool) { + DHD_ERROR(("Received %d bytes on %d channel. Running out of " + "rx pktbuf's or not yet malloced.\n", len, chan)); + continue; + } + + /* Validate data offset */ + if ((doff < SDPCM_HDRLEN) || (doff > len)) { + DHD_ERROR(("%s (nextlen): bad data offset %d: HW len %d min %d\n", + __FUNCTION__, doff, len, SDPCM_HDRLEN)); + dhd_os_sdlock_rxq(bus->dhd); + PKTFREE2(); + dhd_os_sdunlock_rxq(bus->dhd); + ASSERT(0); + dhdsdio_rxfail(bus, FALSE, FALSE); + continue; + } + + /* All done with this one -- now deliver the packet */ + goto deliver; + } + /* gSPI frames should not be handled in fractions */ + if (bus->bus == SPI_BUS) { + break; + } + + /* Read frame header (hardware and software) */ + sdret = dhd_bcmsdh_recv_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC, + bus->rxhdr, firstread, NULL, NULL, NULL); + bus->f2rxhdrs++; + ASSERT(sdret != BCME_PENDING); + + if (sdret < 0) { + DHD_ERROR(("%s: RXHEADER FAILED: %d\n", __FUNCTION__, sdret)); + bus->rx_hdrfail++; + dhdsdio_rxfail(bus, TRUE, TRUE); + continue; + } + +#ifdef DHD_DEBUG + if (DHD_BYTES_ON() || DHD_HDRS_ON()) { + prhex("RxHdr", bus->rxhdr, SDPCM_HDRLEN); + } +#endif // endif + + /* Extract hardware header fields */ + len = ltoh16_ua(bus->rxhdr); + check = ltoh16_ua(bus->rxhdr + sizeof(uint16)); + + /* All zeros means no more frames */ + if (!(len|check)) { + *finished = TRUE; + break; + } + + /* Validate check bytes */ + if ((uint16)~(len^check)) { + DHD_ERROR(("%s: HW hdr error: len/check 0x%04x/0x%04x\n", + __FUNCTION__, len, check)); + bus->rx_badhdr++; + dhdsdio_rxfail(bus, FALSE, FALSE); + continue; + } + + /* Validate frame length */ + if (len < SDPCM_HDRLEN) { + DHD_ERROR(("%s: HW hdr length invalid: %d\n", __FUNCTION__, len)); + continue; + } + + /* Extract software header fields */ + chan = SDPCM_PACKET_CHANNEL(&bus->rxhdr[SDPCM_FRAMETAG_LEN]); + seq = SDPCM_PACKET_SEQUENCE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]); + doff = SDPCM_DOFFSET_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]); + txmax = SDPCM_WINDOW_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]); + + /* Validate data offset */ + if ((doff < SDPCM_HDRLEN) || (doff > len)) { + DHD_ERROR(("%s: Bad data offset %d: HW len %d, min %d seq %d\n", + __FUNCTION__, doff, len, SDPCM_HDRLEN, seq)); + bus->rx_badhdr++; + ASSERT(0); + dhdsdio_rxfail(bus, FALSE, FALSE); + continue; + } + + /* Save the readahead length if there is one */ + bus->nextlen = bus->rxhdr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET]; + if ((bus->nextlen << 4) > MAX_RX_DATASZ) { + DHD_INFO(("%s (nextlen): got frame w/nextlen too large (%d), seq %d\n", + __FUNCTION__, bus->nextlen, seq)); + bus->nextlen = 0; + } + + /* Handle Flow Control */ + fcbits = SDPCM_FCMASK_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]); + + delta = 0; + if (~bus->flowcontrol & fcbits) { + bus->fc_xoff++; + delta = 1; + } + if (bus->flowcontrol & ~fcbits) { + bus->fc_xon++; + delta = 1; + } + + if (delta) { + bus->fc_rcvd++; + bus->flowcontrol = fcbits; + } + + /* Check and update sequence number */ + if (rxseq != seq) { + DHD_INFO(("%s: rx_seq %d, expected %d\n", __FUNCTION__, seq, rxseq)); + bus->rx_badseq++; + rxseq = seq; + } + + /* Check window for sanity */ + if ((uint8)(txmax - bus->tx_seq) > 0x70) { + DHD_ERROR(("%s: got unlikely tx max %d with tx_seq %d\n", + __FUNCTION__, txmax, bus->tx_seq)); + txmax = bus->tx_max; + } + bus->tx_max = txmax; + + /* Call a separate function for control frames */ + if (chan == SDPCM_CONTROL_CHANNEL) { + dhdsdio_read_control(bus, bus->rxhdr, len, doff); + continue; + } + + ASSERT((chan == SDPCM_DATA_CHANNEL) || (chan == SDPCM_EVENT_CHANNEL) || + (chan == SDPCM_TEST_CHANNEL) || (chan == SDPCM_GLOM_CHANNEL)); + + /* Length to read */ + rdlen = (len > firstread) ? (len - firstread) : 0; + + /* May pad read to blocksize for efficiency */ + if (bus->roundup && bus->blocksize && (rdlen > bus->blocksize)) { + pad = bus->blocksize - (rdlen % bus->blocksize); + if ((pad <= bus->roundup) && (pad < bus->blocksize) && + ((rdlen + pad + firstread) < MAX_RX_DATASZ)) + rdlen += pad; + } else if (rdlen % DHD_SDALIGN) { + rdlen += DHD_SDALIGN - (rdlen % DHD_SDALIGN); + } + + /* Satisfy length-alignment requirements */ + if (forcealign && (rdlen & (ALIGNMENT - 1))) + rdlen = ROUNDUP(rdlen, ALIGNMENT); + + if ((rdlen + firstread) > MAX_RX_DATASZ) { + /* Too long -- skip this frame */ + DHD_ERROR(("%s: too long: len %d rdlen %d\n", __FUNCTION__, len, rdlen)); + bus->dhd->rx_errors++; bus->rx_toolong++; + dhdsdio_rxfail(bus, FALSE, FALSE); + continue; + } + + dhd_os_sdlock_rxq(bus->dhd); + if (!(pkt = PKTGET(osh, (rdlen + firstread + DHD_SDALIGN), FALSE))) { + /* Give up on data, request rtx of events */ + DHD_ERROR(("%s: PKTGET failed: rdlen %d chan %d\n", + __FUNCTION__, rdlen, chan)); + bus->dhd->rx_dropped++; + dhd_os_sdunlock_rxq(bus->dhd); + dhdsdio_rxfail(bus, FALSE, RETRYCHAN(chan)); + continue; + } + dhd_os_sdunlock_rxq(bus->dhd); + + ASSERT(!PKTLINK(pkt)); + + /* Leave room for what we already read, and align remainder */ + ASSERT(firstread < (PKTLEN(osh, pkt))); + PKTPULL(osh, pkt, firstread); + PKTALIGN(osh, pkt, rdlen, DHD_SDALIGN); + + /* Read the remaining frame data */ + sdret = dhd_bcmsdh_recv_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC, + ((uint8 *)PKTDATA(osh, pkt)), rdlen, pkt, NULL, NULL); + bus->f2rxdata++; + ASSERT(sdret != BCME_PENDING); + + if (sdret < 0) { + DHD_ERROR(("%s: read %d %s bytes failed: %d\n", __FUNCTION__, rdlen, + ((chan == SDPCM_EVENT_CHANNEL) ? "event" : + ((chan == SDPCM_DATA_CHANNEL) ? "data" : "test")), sdret)); + dhd_os_sdlock_rxq(bus->dhd); + PKTFREE(bus->dhd->osh, pkt, FALSE); + dhd_os_sdunlock_rxq(bus->dhd); + bus->dhd->rx_errors++; + dhdsdio_rxfail(bus, TRUE, RETRYCHAN(chan)); + continue; + } + + /* Copy the already-read portion */ + PKTPUSH(osh, pkt, firstread); + bcopy(bus->rxhdr, PKTDATA(osh, pkt), firstread); + +#ifdef DHD_DEBUG + if (DHD_BYTES_ON() && DHD_DATA_ON()) { + prhex("Rx Data", PKTDATA(osh, pkt), len); + } +#endif // endif + +deliver: + /* Save superframe descriptor and allocate packet frame */ + if (chan == SDPCM_GLOM_CHANNEL) { + if (SDPCM_GLOMDESC(&bus->rxhdr[SDPCM_FRAMETAG_LEN])) { + DHD_GLOM(("%s: got glom descriptor, %d bytes:\n", + __FUNCTION__, len)); +#ifdef DHD_DEBUG + if (DHD_GLOM_ON()) { + prhex("Glom Data", PKTDATA(osh, pkt), len); + } +#endif // endif + PKTSETLEN(osh, pkt, len); + ASSERT(doff == SDPCM_HDRLEN); + PKTPULL(osh, pkt, SDPCM_HDRLEN); + bus->glomd = pkt; + } else { + DHD_ERROR(("%s: glom superframe w/o descriptor!\n", __FUNCTION__)); + dhdsdio_rxfail(bus, FALSE, FALSE); + } + continue; + } + + /* Fill in packet len and prio, deliver upward */ + PKTSETLEN(osh, pkt, len); + PKTPULL(osh, pkt, doff); + +#ifdef SDTEST + /* Test channel packets are processed separately */ + if (chan == SDPCM_TEST_CHANNEL) { + dhdsdio_testrcv(bus, pkt, seq); + continue; + } +#endif /* SDTEST */ + + if (PKTLEN(osh, pkt) == 0) { + dhd_os_sdlock_rxq(bus->dhd); + PKTFREE(bus->dhd->osh, pkt, FALSE); + dhd_os_sdunlock_rxq(bus->dhd); + continue; + } else if (dhd_prot_hdrpull(bus->dhd, &ifidx, pkt, reorder_info_buf, + &reorder_info_len) != 0) { + DHD_ERROR(("%s: rx protocol error\n", __FUNCTION__)); + dhd_os_sdlock_rxq(bus->dhd); + PKTFREE(bus->dhd->osh, pkt, FALSE); + dhd_os_sdunlock_rxq(bus->dhd); + bus->dhd->rx_errors++; + continue; + } + + if (reorder_info_len) { + /* Reordering info from the firmware */ + dhd_process_pkt_reorder_info(bus->dhd, reorder_info_buf, reorder_info_len, + &pkt, &pkt_count); + if (pkt_count == 0) + continue; + } else { + pkt_count = 1; + } + + /* Unlock during rx call */ + dhd_os_sdunlock(bus->dhd); + dhd_rx_frame(bus->dhd, ifidx, pkt, pkt_count, chan); + dhd_os_sdlock(bus->dhd); +#if defined(SDIO_ISR_THREAD) + /* terence 20150615: fix for below error due to bussleep in watchdog after dhd_os_sdunlock here, + * so call BUS_WAKE to wake up bus again + * dhd_bcmsdh_recv_buf: Device asleep + * dhdsdio_readframes: RXHEADER FAILED: -40 + * dhdsdio_rxfail: abort command, terminate frame, send NAK + */ + BUS_WAKE(bus); +#endif + } + rxcount = maxframes - rxleft; +#ifdef DHD_DEBUG + /* Message if we hit the limit */ + if (!rxleft && !sdtest) + DHD_DATA(("%s: hit rx limit of %d frames\n", __FUNCTION__, maxframes)); + else +#endif /* DHD_DEBUG */ + DHD_DATA(("%s: processed %d frames\n", __FUNCTION__, rxcount)); + /* Back off rxseq if awaiting rtx, update rx_seq */ + if (bus->rxskip) + rxseq--; + bus->rx_seq = rxseq; + + if (bus->reqbussleep) + { + dhdsdio_bussleep(bus, TRUE); + bus->reqbussleep = FALSE; + } + bus->readframes = FALSE; + + return rxcount; +} + +static uint32 +dhdsdio_hostmail(dhd_bus_t *bus, uint32 *hmbd) +{ + sdpcmd_regs_t *regs = bus->regs; + uint32 intstatus = 0; + uint32 hmb_data; + uint8 fcbits; + uint retries = 0; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + /* Read mailbox data and ack that we did so */ + R_SDREG(hmb_data, ®s->tohostmailboxdata, retries); + if (retries <= retry_limit) + W_SDREG(SMB_INT_ACK, ®s->tosbmailbox, retries); + bus->f1regdata += 2; + + /* Dongle recomposed rx frames, accept them again */ + if (hmb_data & HMB_DATA_NAKHANDLED) { + DHD_INFO(("Dongle reports NAK handled, expect rtx of %d\n", bus->rx_seq)); + if (!bus->rxskip) { + DHD_ERROR(("%s: unexpected NAKHANDLED!\n", __FUNCTION__)); + } + bus->rxskip = FALSE; + intstatus |= FRAME_AVAIL_MASK(bus); + } + + /* + * DEVREADY does not occur with gSPI. + */ + if (hmb_data & (HMB_DATA_DEVREADY | HMB_DATA_FWREADY)) { + bus->sdpcm_ver = (hmb_data & HMB_DATA_VERSION_MASK) >> HMB_DATA_VERSION_SHIFT; + if (bus->sdpcm_ver != SDPCM_PROT_VERSION) + DHD_ERROR(("Version mismatch, dongle reports %d, expecting %d\n", + bus->sdpcm_ver, SDPCM_PROT_VERSION)); + else + DHD_INFO(("Dongle ready, protocol version %d\n", bus->sdpcm_ver)); +#ifndef BCMSPI + /* make sure for the SDIO_DEVICE_RXDATAINT_MODE_1 corecontrol is proper */ + if ((bus->sih->buscoretype == SDIOD_CORE_ID) && (bus->sdpcmrev >= 4) && + (bus->rxint_mode == SDIO_DEVICE_RXDATAINT_MODE_1)) { + uint32 val; + + val = R_REG(bus->dhd->osh, &bus->regs->corecontrol); + val &= ~CC_XMTDATAAVAIL_MODE; + val |= CC_XMTDATAAVAIL_CTRL; + W_REG(bus->dhd->osh, &bus->regs->corecontrol, val); + + val = R_REG(bus->dhd->osh, &bus->regs->corecontrol); + } +#endif /* BCMSPI */ + +#ifdef DHD_DEBUG + /* Retrieve console state address now that firmware should have updated it */ + { + sdpcm_shared_t shared; + if (dhdsdio_readshared(bus, &shared) == 0) + bus->console_addr = shared.console_addr; + } +#endif /* DHD_DEBUG */ + } + + /* + * Flow Control has been moved into the RX headers and this out of band + * method isn't used any more. Leave this here for possibly remaining backward + * compatible with older dongles + */ + if (hmb_data & HMB_DATA_FC) { + fcbits = (hmb_data & HMB_DATA_FCDATA_MASK) >> HMB_DATA_FCDATA_SHIFT; + + if (fcbits & ~bus->flowcontrol) + bus->fc_xoff++; + if (bus->flowcontrol & ~fcbits) + bus->fc_xon++; + + bus->fc_rcvd++; + bus->flowcontrol = fcbits; + } + + /* At least print a message if FW halted */ + if (hmb_data & HMB_DATA_FWHALT) { + DHD_ERROR(("INTERNAL ERROR: FIRMWARE HALTED : set BUS DOWN\n")); + dhdsdio_checkdied(bus, NULL, 0); + DHD_ERROR(("Not doing bus down untill memdump done \n")); + } + + /* Shouldn't be any others */ + if (hmb_data & ~(HMB_DATA_DEVREADY | + HMB_DATA_FWHALT | + HMB_DATA_NAKHANDLED | + HMB_DATA_FC | + HMB_DATA_FWREADY | + HMB_DATA_FCDATA_MASK | + HMB_DATA_VERSION_MASK)) { + DHD_ERROR(("Unknown mailbox data content: 0x%02x\n", hmb_data)); + } + + if (hmbd) { + *hmbd = hmb_data; + } + + return intstatus; +} + +static bool +dhdsdio_dpc(dhd_bus_t *bus) +{ + bcmsdh_info_t *sdh = bus->sdh; + sdpcmd_regs_t *regs = bus->regs; + uint32 intstatus, newstatus = 0; + uint retries = 0; + uint rxlimit = dhd_rxbound; /* Rx frames to read before resched */ + uint txlimit = dhd_txbound; /* Tx frames to send before resched */ + uint framecnt = 0; /* Temporary counter of tx/rx frames */ + bool rxdone = TRUE; /* Flag for no more read data */ + bool resched = FALSE; /* Flag indicating resched wanted */ + unsigned long flags; +#ifdef DEBUG_DPC_THREAD_WATCHDOG + bool is_resched_by_readframe = FALSE; +#endif /* DEBUG_DPC_THREAD_WATCHDOG */ + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + dhd_os_sdlock(bus->dhd); + DHD_LINUX_GENERAL_LOCK(bus->dhd, flags); + if (bus->dhd->busstate == DHD_BUS_DOWN) { + DHD_ERROR(("%s: Bus down, ret\n", __FUNCTION__)); + bus->intstatus = 0; + DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags); + dhd_os_sdunlock(bus->dhd); + return 0; + } + + DHD_BUS_BUSY_SET_IN_DPC(bus->dhd); + DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags); + + /* Start with leftover status bits */ + intstatus = bus->intstatus; + + if (!SLPAUTO_ENAB(bus) && !KSO_ENAB(bus)) { + DHD_ERROR(("%s: Device asleep\n", __FUNCTION__)); + goto exit; + } + + /* If waiting for HTAVAIL, check status */ + if (!SLPAUTO_ENAB(bus) && (bus->clkstate == CLK_PENDING)) { + int err; + uint8 clkctl, devctl = 0; + +#ifdef DHD_DEBUG + /* Check for inconsistent device control */ + devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err); + if (err) { + DHD_ERROR(("%s: error reading DEVCTL: %d\n", __FUNCTION__, err)); + bus->dhd->busstate = DHD_BUS_DOWN; + } else { + ASSERT(devctl & SBSDIO_DEVCTL_CA_INT_ONLY); + } +#endif /* DHD_DEBUG */ + + /* Read CSR, if clock on switch to AVAIL, else ignore */ + clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err); + if (err) { + DHD_ERROR(("%s: error reading CSR: %d\n", __FUNCTION__, err)); + bus->dhd->busstate = DHD_BUS_DOWN; + } + + DHD_INFO(("DPC: PENDING, devctl 0x%02x clkctl 0x%02x\n", devctl, clkctl)); + + if (SBSDIO_HTAV(clkctl)) { + devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err); + if (err) { + DHD_ERROR(("%s: error reading DEVCTL: %d\n", + __FUNCTION__, err)); + bus->dhd->busstate = DHD_BUS_DOWN; + } + devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY; + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err); + if (err) { + DHD_ERROR(("%s: error writing DEVCTL: %d\n", + __FUNCTION__, err)); + bus->dhd->busstate = DHD_BUS_DOWN; + } + bus->clkstate = CLK_AVAIL; + } else { + goto clkwait; + } + } + + BUS_WAKE(bus); + + /* Make sure backplane clock is on */ + dhdsdio_clkctl(bus, CLK_AVAIL, TRUE); + if (bus->clkstate != CLK_AVAIL) + goto clkwait; + + /* Pending interrupt indicates new device status */ + if (bus->ipend) { + bus->ipend = FALSE; +#if defined(BT_OVER_SDIO) + bcmsdh_btsdio_process_f3_intr(); +#endif /* defined (BT_OVER_SDIO) */ + + R_SDREG(newstatus, ®s->intstatus, retries); + bus->f1regdata++; + if (bcmsdh_regfail(bus->sdh)) + newstatus = 0; + newstatus &= bus->hostintmask; + bus->fcstate = !!(newstatus & I_HMB_FC_STATE); + if (newstatus) { + bus->f1regdata++; +#ifndef BCMSPI + if ((bus->rxint_mode == SDIO_DEVICE_RXDATAINT_MODE_0) && + (newstatus == I_XMTDATA_AVAIL)) { + } else +#endif /* BCMSPI */ + W_SDREG(newstatus, ®s->intstatus, retries); + } + } + + /* Merge new bits with previous */ + intstatus |= newstatus; + bus->intstatus = 0; + + /* Handle flow-control change: read new state in case our ack + * crossed another change interrupt. If change still set, assume + * FC ON for safety, let next loop through do the debounce. + */ + if (intstatus & I_HMB_FC_CHANGE) { + intstatus &= ~I_HMB_FC_CHANGE; + W_SDREG(I_HMB_FC_CHANGE, ®s->intstatus, retries); + R_SDREG(newstatus, ®s->intstatus, retries); + bus->f1regdata += 2; + bus->fcstate = !!(newstatus & (I_HMB_FC_STATE | I_HMB_FC_CHANGE)); + intstatus |= (newstatus & bus->hostintmask); + } + + /* Handle host mailbox indication */ + if (intstatus & I_HMB_HOST_INT) { + uint32 hmbdata = 0; + + intstatus &= ~I_HMB_HOST_INT; + intstatus |= dhdsdio_hostmail(bus, &hmbdata); + +#ifdef DHD_ULP + /* ULP prototyping. Redowload fw on oob interupt */ + + /* all the writes after this point CAN use cached sbwad value */ + bcmsdh_force_sbwad_calc(bus->sdh, FALSE); + + if (dhd_ulp_pre_redownload_check(bus->dhd, bus->sdh, hmbdata)) { + if (dhd_bus_ulp_reinit_fw(bus) < 0) { + DHD_ERROR(("%s:%d FW redownload failed\n", + __FUNCTION__, __LINE__)); + goto exit; + } + } +#endif // endif + + } + +#ifdef DHD_UCODE_DOWNLOAD +exit_ucode: +#endif /* DHD_UCODE_DOWNLOAD */ + + /* Just being here means nothing more to do for chipactive */ + if (intstatus & I_CHIPACTIVE) { + /* ASSERT(bus->clkstate == CLK_AVAIL); */ + intstatus &= ~I_CHIPACTIVE; + } + + /* Handle host mailbox indication */ + if (intstatus & I_HMB_HOST_INT) { + intstatus &= ~I_HMB_HOST_INT; + intstatus |= dhdsdio_hostmail(bus, NULL); + } + + /* Generally don't ask for these, can get CRC errors... */ + if (intstatus & I_WR_OOSYNC) { + DHD_ERROR(("Dongle reports WR_OOSYNC\n")); + intstatus &= ~I_WR_OOSYNC; + } + + if (intstatus & I_RD_OOSYNC) { + DHD_ERROR(("Dongle reports RD_OOSYNC\n")); + intstatus &= ~I_RD_OOSYNC; + } + + if (intstatus & I_SBINT) { + DHD_ERROR(("Dongle reports SBINT\n")); + intstatus &= ~I_SBINT; + } + + /* Would be active due to wake-wlan in gSPI */ + if (intstatus & I_CHIPACTIVE) { + DHD_INFO(("Dongle reports CHIPACTIVE\n")); + intstatus &= ~I_CHIPACTIVE; + } + + if (intstatus & I_HMB_FC_STATE) { + DHD_INFO(("Dongle reports HMB_FC_STATE\n")); + intstatus &= ~I_HMB_FC_STATE; + } + + /* Ignore frame indications if rxskip is set */ + if (bus->rxskip) { + intstatus &= ~FRAME_AVAIL_MASK(bus); + } + + /* On frame indication, read available frames */ + if (PKT_AVAILABLE(bus, intstatus)) { + + framecnt = dhdsdio_readframes(bus, rxlimit, &rxdone); + if (rxdone || bus->rxskip) + intstatus &= ~FRAME_AVAIL_MASK(bus); + rxlimit -= MIN(framecnt, rxlimit); + } + + /* Keep still-pending events for next scheduling */ + bus->intstatus = intstatus; + +clkwait: + /* Re-enable interrupts to detect new device events (mailbox, rx frame) + * or clock availability. (Allows tx loop to check ipend if desired.) + * (Unless register access seems hosed, as we may not be able to ACK...) + */ + if (bus->intr && bus->intdis && !bcmsdh_regfail(sdh) && + !(bus->dhd->conf->oob_enabled_later && !bus->ctrl_frame_stat)) { + DHD_INTR(("%s: enable SDIO interrupts, rxdone %d framecnt %d\n", + __FUNCTION__, rxdone, framecnt)); + bus->intdis = FALSE; +#if defined(OOB_INTR_ONLY) + bcmsdh_oob_intr_set(bus->sdh, TRUE); +#endif /* defined(OOB_INTR_ONLY) */ + bcmsdh_intr_enable(sdh); +#ifdef BCMSPI_ANDROID + if (*dhd_spi_lockcount == 0) + bcmsdh_oob_intr_set(bus->sdh, TRUE); +#endif /* BCMSPI_ANDROID */ + } + +#if defined(OOB_INTR_ONLY) && !defined(HW_OOB) + /* In case of SW-OOB(using edge trigger), + * Check interrupt status in the dongle again after enable irq on the host. + * and rechedule dpc if interrupt is pended in the dongle. + * There is a chance to miss OOB interrupt while irq is disabled on the host. + * No need to do this with HW-OOB(level trigger) + */ + R_SDREG(newstatus, ®s->intstatus, retries); + if (bcmsdh_regfail(bus->sdh)) + newstatus = 0; + if (newstatus & bus->hostintmask) { + bus->ipend = TRUE; + resched = TRUE; + } +#endif /* defined(OOB_INTR_ONLY) && !defined(HW_OOB) */ + +#ifdef PROP_TXSTATUS + dhd_wlfc_commit_packets(bus->dhd, (f_commitpkt_t)dhd_bus_txdata, (void *)bus, NULL, FALSE); +#endif // endif + + if (TXCTLOK(bus) && bus->ctrl_frame_stat && (bus->clkstate == CLK_AVAIL)) + dhdsdio_sendpendctl(bus); + + /* Send queued frames (limit 1 if rx may still be pending) */ + else if ((bus->clkstate == CLK_AVAIL) && !bus->fcstate && + pktq_mlen(&bus->txq, ~bus->flowcontrol) && txlimit && DATAOK(bus)) { + +#ifdef DHD_ULP + if (dhd_ulp_f2_ready(bus->dhd, bus->sdh)) { +#endif /* DHD_ULP */ + if (bus->dhd->conf->dhd_txminmax < 0) + framecnt = rxdone ? txlimit : MIN(txlimit, DATABUFCNT(bus)); + else + framecnt = rxdone ? txlimit : MIN(txlimit, bus->dhd->conf->dhd_txminmax); + framecnt = dhdsdio_sendfromq(bus, framecnt); + txlimit -= framecnt; +#ifdef DHD_ULP + } else { + /* In other transient states like DHD_ULP_, after the states are + * DHD_ULP_F2ENAB_CLEARING and DHD_ULP_F2ENAB_SETTING, + * dpc is scheduled after steady-state and dhdsdio_sendfromq() will + * execute again + */ + } +#endif /* DHD_ULP */ + } + /* Resched the DPC if ctrl cmd is pending on bus credit */ + if (bus->ctrl_frame_stat) { + if (bus->dhd->conf->txctl_tmo_fix) { + set_current_state(TASK_INTERRUPTIBLE); + if (!kthread_should_stop()) + schedule_timeout(1); + set_current_state(TASK_RUNNING); + } + resched = TRUE; + } + + /* Resched if events or tx frames are pending, else await next interrupt */ + /* On failed register access, all bets are off: no resched or interrupts */ + if ((bus->dhd->busstate == DHD_BUS_DOWN) || bcmsdh_regfail(sdh)) { + if ((bus->sih && bus->sih->buscorerev >= 12) && !(dhdsdio_sleepcsr_get(bus) & + SBSDIO_FUNC1_SLEEPCSR_KSO_MASK)) { + /* Bus failed because of KSO */ + DHD_ERROR(("%s: Bus failed due to KSO\n", __FUNCTION__)); + bus->kso = FALSE; + } else { + DHD_ERROR(("%s: failed backplane access over SDIO, halting operation\n", + __FUNCTION__)); + bus->dhd->busstate = DHD_BUS_DOWN; + bus->intstatus = 0; + } + } else if (bus->clkstate == CLK_PENDING) { + /* Awaiting I_CHIPACTIVE; don't resched */ + } else if (bus->intstatus || bus->ipend || + (!bus->fcstate && pktq_mlen(&bus->txq, ~bus->flowcontrol) && DATAOK(bus)) || + PKT_AVAILABLE(bus, bus->intstatus)) { /* Read multiple frames */ + resched = TRUE; + } + + bus->dpc_sched = resched; + + /* If we're done for now, turn off clock request. */ + if ((bus->idletime == DHD_IDLE_IMMEDIATE) && (bus->clkstate != CLK_PENDING) && + NO_OTHER_ACTIVE_BUS_USER(bus)) { + bus->activity = FALSE; + dhdsdio_bussleep(bus, TRUE); + dhdsdio_clkctl(bus, CLK_NONE, FALSE); + } + +exit: + + if (!resched) { + /* Re-enable interrupts to detect new device events (mailbox, rx frame) + * or clock availability. (Allows tx loop to check ipend if desired.) + * (Unless register access seems hosed, as we may not be able to ACK...) + */ + if (bus->intr && bus->intdis && !bcmsdh_regfail(sdh) && + (bus->dhd->conf->oob_enabled_later && !bus->ctrl_frame_stat)) { + DHD_INTR(("%s: enable SDIO interrupts, rxdone %d framecnt %d\n", + __FUNCTION__, rxdone, framecnt)); + bus->intdis = FALSE; +#if defined(OOB_INTR_ONLY) + bcmsdh_oob_intr_set(bus->sdh, TRUE); +#endif /* defined(OOB_INTR_ONLY) */ + bcmsdh_intr_enable(sdh); + } + if (dhd_dpcpoll) { + if (dhdsdio_readframes(bus, dhd_rxbound, &rxdone) != 0) { + resched = TRUE; +#ifdef DEBUG_DPC_THREAD_WATCHDOG + is_resched_by_readframe = TRUE; +#endif /* DEBUG_DPC_THREAD_WATCHDOG */ + } + } + } + + if (bus->ctrl_wait && TXCTLOK(bus)) + wake_up_interruptible(&bus->ctrl_tx_wait); + dhd_os_sdunlock(bus->dhd); +#ifdef DEBUG_DPC_THREAD_WATCHDOG + if (bus->dhd->dhd_bug_on) { + DHD_INFO(("%s: resched = %d ctrl_frame_stat = %d intstatus 0x%08x" + " ipend = %d pktq_mlen = %d is_resched_by_readframe = %d \n", + __FUNCTION__, resched, bus->ctrl_frame_stat, + bus->intstatus, bus->ipend, + pktq_mlen(&bus->txq, ~bus->flowcontrol), is_resched_by_readframe)); + + bus->dhd->dhd_bug_on = FALSE; + } +#endif /* DEBUG_DPC_THREAD_WATCHDOG */ + + DHD_LINUX_GENERAL_LOCK(bus->dhd, flags); + DHD_BUS_BUSY_CLEAR_IN_DPC(bus->dhd); + dhd_os_busbusy_wake(bus->dhd); + DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags); + + return resched; +} + +bool +dhd_bus_dpc(struct dhd_bus *bus) +{ + bool resched; + + /* Call the DPC directly. */ + DHD_TRACE(("Calling dhdsdio_dpc() from %s\n", __FUNCTION__)); + resched = dhdsdio_dpc(bus); + + return resched; +} + +void +dhdsdio_isr(void *arg) +{ + dhd_bus_t *bus = (dhd_bus_t*)arg; + bcmsdh_info_t *sdh; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (!bus) { + DHD_ERROR(("%s : bus is null pointer , exit \n", __FUNCTION__)); + return; + } + sdh = bus->sdh; + + if (bus->dhd->busstate == DHD_BUS_DOWN) { + DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__)); + return; + } + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + /* Count the interrupt call */ + bus->intrcount++; + bus->ipend = TRUE; + + /* Shouldn't get this interrupt if we're sleeping? */ + if (!SLPAUTO_ENAB(bus)) { + if (bus->sleeping) { + DHD_ERROR(("INTERRUPT WHILE SLEEPING??\n")); + return; + } else if (!KSO_ENAB(bus)) { + DHD_ERROR(("ISR in devsleep 1\n")); + } + } + + /* Disable additional interrupts (is this needed now)? */ + if (bus->intr) { + DHD_INTR(("%s: disable SDIO interrupts\n", __FUNCTION__)); + } else { + DHD_ERROR(("dhdsdio_isr() w/o interrupt configured!\n")); + } + +#ifdef BCMSPI_ANDROID + bcmsdh_oob_intr_set(bus->sdh, FALSE); +#endif /* BCMSPI_ANDROID */ + bcmsdh_intr_disable(sdh); + bus->intdis = TRUE; + +#if defined(SDIO_ISR_THREAD) + DHD_TRACE(("Calling dhdsdio_dpc() from %s\n", __FUNCTION__)); + DHD_OS_WAKE_LOCK(bus->dhd); + /* terence 20150209: dpc should be scheded again if dpc_sched is TRUE or dhd_bus_txdata can + not schedule anymore because dpc_sched is TRUE now. + */ + if (dhdsdio_dpc(bus)) { + bus->dpc_sched = TRUE; + dhd_sched_dpc(bus->dhd); + } + DHD_OS_WAKE_UNLOCK(bus->dhd); +#else + bus->dpc_sched = TRUE; + dhd_sched_dpc(bus->dhd); +#endif /* defined(SDIO_ISR_THREAD) */ + +} + +#ifdef PKT_STATICS +void dhdsdio_txpktstatics(void) +{ + uint i, total = 0; + + printf("%s: TYPE EVENT: %d pkts (size=%d) transfered\n", + __FUNCTION__, tx_statics.event_count, tx_statics.event_size); + printf("%s: TYPE CTRL: %d pkts (size=%d) transfered\n", + __FUNCTION__, tx_statics.ctrl_count, tx_statics.ctrl_size); + printf("%s: TYPE DATA: %d pkts (size=%d) transfered\n", + __FUNCTION__, tx_statics.data_count, tx_statics.data_size); + printf("%s: Glom size distribution:\n", __FUNCTION__); + for (i=0;ipktgen_maxlen = MIN(dhd_pktgen_len, MAX_PKTGEN_LEN); + bus->pktgen_minlen = bus->pktgen_maxlen; + } else { + bus->pktgen_maxlen = MAX_PKTGEN_LEN; + bus->pktgen_minlen = 0; + } + bus->pktgen_len = (uint16)bus->pktgen_minlen; + + /* Default to per-watchdog burst with 10s print time */ + bus->pktgen_freq = 1; + bus->pktgen_print = dhd_watchdog_ms ? (10000 / dhd_watchdog_ms) : 0; + bus->pktgen_count = (dhd_pktgen * dhd_watchdog_ms + 999) / 1000; + + /* Default to echo mode */ + bus->pktgen_mode = DHD_PKTGEN_ECHO; + bus->pktgen_stop = 1; +} + +static void +dhdsdio_pktgen(dhd_bus_t *bus) +{ + void *pkt; + uint8 *data; + uint pktcount; + uint fillbyte; + osl_t *osh = bus->dhd->osh; + uint16 len; + ulong time_lapse; + uint sent_pkts; + uint rcvd_pkts; + + /* Display current count if appropriate */ + if (bus->pktgen_print && (++bus->pktgen_ptick >= bus->pktgen_print)) { + bus->pktgen_ptick = 0; + printf("%s: send attempts %d, rcvd %d, errors %d\n", + __FUNCTION__, bus->pktgen_sent, bus->pktgen_rcvd, bus->pktgen_fail); + + /* Print throughput stats only for constant length packet runs */ + if (bus->pktgen_minlen == bus->pktgen_maxlen) { + time_lapse = jiffies - bus->pktgen_prev_time; + bus->pktgen_prev_time = jiffies; + sent_pkts = bus->pktgen_sent - bus->pktgen_prev_sent; + bus->pktgen_prev_sent = bus->pktgen_sent; + rcvd_pkts = bus->pktgen_rcvd - bus->pktgen_prev_rcvd; + bus->pktgen_prev_rcvd = bus->pktgen_rcvd; + + printf("%s: Tx Throughput %d kbps, Rx Throughput %d kbps\n", + __FUNCTION__, + (sent_pkts * bus->pktgen_len / jiffies_to_msecs(time_lapse)) * 8, + (rcvd_pkts * bus->pktgen_len / jiffies_to_msecs(time_lapse)) * 8); + } + } + + /* For recv mode, just make sure dongle has started sending */ + if (bus->pktgen_mode == DHD_PKTGEN_RECV) { + if (bus->pktgen_rcv_state == PKTGEN_RCV_IDLE) { + bus->pktgen_rcv_state = PKTGEN_RCV_ONGOING; + dhdsdio_sdtest_set(bus, bus->pktgen_total); + } + return; + } + + /* Otherwise, generate or request the specified number of packets */ + for (pktcount = 0; pktcount < bus->pktgen_count; pktcount++) { + /* Stop if total has been reached */ + if (bus->pktgen_total && (bus->pktgen_sent >= bus->pktgen_total)) { + bus->pktgen_count = 0; + break; + } + + /* Allocate an appropriate-sized packet */ + if (bus->pktgen_mode == DHD_PKTGEN_RXBURST) { + len = SDPCM_TEST_PKT_CNT_FLD_LEN; + } else { + len = bus->pktgen_len; + } + if (!(pkt = PKTGET(osh, (len + SDPCM_HDRLEN + SDPCM_TEST_HDRLEN + DHD_SDALIGN), + TRUE))) {; + DHD_ERROR(("%s: PKTGET failed!\n", __FUNCTION__)); + break; + } + PKTALIGN(osh, pkt, (len + SDPCM_HDRLEN + SDPCM_TEST_HDRLEN), DHD_SDALIGN); + data = (uint8*)PKTDATA(osh, pkt) + SDPCM_HDRLEN; + + /* Write test header cmd and extra based on mode */ + switch (bus->pktgen_mode) { + case DHD_PKTGEN_ECHO: + *data++ = SDPCM_TEST_ECHOREQ; + *data++ = (uint8)bus->pktgen_sent; + break; + + case DHD_PKTGEN_SEND: + *data++ = SDPCM_TEST_DISCARD; + *data++ = (uint8)bus->pktgen_sent; + break; + + case DHD_PKTGEN_RXBURST: + *data++ = SDPCM_TEST_BURST; + *data++ = (uint8)bus->pktgen_count; /* Just for backward compatability */ + break; + + default: + DHD_ERROR(("Unrecognized pktgen mode %d\n", bus->pktgen_mode)); + PKTFREE(osh, pkt, TRUE); + bus->pktgen_count = 0; + return; + } + + /* Write test header length field */ + *data++ = (bus->pktgen_len >> 0); + *data++ = (bus->pktgen_len >> 8); + + /* Write frame count in a 4 byte field adjucent to SDPCM test header for + * burst mode + */ + if (bus->pktgen_mode == DHD_PKTGEN_RXBURST) { + *data++ = (uint8)(bus->pktgen_count >> 0); + *data++ = (uint8)(bus->pktgen_count >> 8); + *data++ = (uint8)(bus->pktgen_count >> 16); + *data++ = (uint8)(bus->pktgen_count >> 24); + } else { + + /* Then fill in the remainder -- N/A for burst */ + for (fillbyte = 0; fillbyte < len; fillbyte++) + *data++ = SDPCM_TEST_FILL(fillbyte, (uint8)bus->pktgen_sent); + } + +#ifdef DHD_DEBUG + if (DHD_BYTES_ON() && DHD_DATA_ON()) { + data = (uint8*)PKTDATA(osh, pkt) + SDPCM_HDRLEN; + prhex("dhdsdio_pktgen: Tx Data", data, PKTLEN(osh, pkt) - SDPCM_HDRLEN); + } +#endif // endif + + /* Send it */ + if (dhdsdio_txpkt(bus, SDPCM_TEST_CHANNEL, &pkt, 1, TRUE) != BCME_OK) { + bus->pktgen_fail++; + if (bus->pktgen_stop && bus->pktgen_stop == bus->pktgen_fail) + bus->pktgen_count = 0; + } + bus->pktgen_sent++; + + /* Bump length if not fixed, wrap at max */ + if (++bus->pktgen_len > bus->pktgen_maxlen) + bus->pktgen_len = (uint16)bus->pktgen_minlen; + + /* Special case for burst mode: just send one request! */ + if (bus->pktgen_mode == DHD_PKTGEN_RXBURST) + break; + } +} + +static void +dhdsdio_sdtest_set(dhd_bus_t *bus, uint count) +{ + void *pkt; + uint8 *data; + osl_t *osh = bus->dhd->osh; + + /* Allocate the packet */ + if (!(pkt = PKTGET(osh, SDPCM_HDRLEN + SDPCM_TEST_HDRLEN + + SDPCM_TEST_PKT_CNT_FLD_LEN + DHD_SDALIGN, TRUE))) { + DHD_ERROR(("%s: PKTGET failed!\n", __FUNCTION__)); + return; + } + PKTALIGN(osh, pkt, (SDPCM_HDRLEN + SDPCM_TEST_HDRLEN + + SDPCM_TEST_PKT_CNT_FLD_LEN), DHD_SDALIGN); + data = (uint8*)PKTDATA(osh, pkt) + SDPCM_HDRLEN; + + /* Fill in the test header */ + *data++ = SDPCM_TEST_SEND; + *data++ = (count > 0)?TRUE:FALSE; + *data++ = (bus->pktgen_maxlen >> 0); + *data++ = (bus->pktgen_maxlen >> 8); + *data++ = (uint8)(count >> 0); + *data++ = (uint8)(count >> 8); + *data++ = (uint8)(count >> 16); + *data++ = (uint8)(count >> 24); + + /* Send it */ + if (dhdsdio_txpkt(bus, SDPCM_TEST_CHANNEL, &pkt, 1, TRUE) != BCME_OK) + bus->pktgen_fail++; +} + +static void +dhdsdio_testrcv(dhd_bus_t *bus, void *pkt, uint seq) +{ + osl_t *osh = bus->dhd->osh; + uint8 *data; + uint pktlen; + + uint8 cmd; + uint8 extra; + uint16 len; + uint16 offset; + + /* Check for min length */ + if ((pktlen = PKTLEN(osh, pkt)) < SDPCM_TEST_HDRLEN) { + DHD_ERROR(("dhdsdio_restrcv: toss runt frame, pktlen %d\n", pktlen)); + PKTFREE(osh, pkt, FALSE); + return; + } + + /* Extract header fields */ + data = PKTDATA(osh, pkt); + cmd = *data++; + extra = *data++; + len = *data++; len += *data++ << 8; + DHD_TRACE(("%s:cmd:%d, xtra:%d,len:%d\n", __FUNCTION__, cmd, extra, len)); + /* Check length for relevant commands */ + if (cmd == SDPCM_TEST_DISCARD || cmd == SDPCM_TEST_ECHOREQ || cmd == SDPCM_TEST_ECHORSP) { + if (pktlen != len + SDPCM_TEST_HDRLEN) { + DHD_ERROR(("dhdsdio_testrcv: frame length mismatch, pktlen %d seq %d" + " cmd %d extra %d len %d\n", pktlen, seq, cmd, extra, len)); + PKTFREE(osh, pkt, FALSE); + return; + } + } + + /* Process as per command */ + switch (cmd) { + case SDPCM_TEST_ECHOREQ: + /* Rx->Tx turnaround ok (even on NDIS w/current implementation) */ + *(uint8 *)(PKTDATA(osh, pkt)) = SDPCM_TEST_ECHORSP; + if (dhdsdio_txpkt(bus, SDPCM_TEST_CHANNEL, &pkt, 1, TRUE) == BCME_OK) { + bus->pktgen_sent++; + } else { + bus->pktgen_fail++; + PKTFREE(osh, pkt, FALSE); + } + bus->pktgen_rcvd++; + break; + + case SDPCM_TEST_ECHORSP: + if (bus->ext_loop) { + PKTFREE(osh, pkt, FALSE); + bus->pktgen_rcvd++; + break; + } + + for (offset = 0; offset < len; offset++, data++) { + if (*data != SDPCM_TEST_FILL(offset, extra)) { + DHD_ERROR(("dhdsdio_testrcv: echo data mismatch: " + "offset %d (len %d) expect 0x%02x rcvd 0x%02x\n", + offset, len, SDPCM_TEST_FILL(offset, extra), *data)); + break; + } + } + PKTFREE(osh, pkt, FALSE); + bus->pktgen_rcvd++; + break; + + case SDPCM_TEST_DISCARD: + { + int i = 0; + uint8 *prn = data; + uint8 testval = extra; + for (i = 0; i < len; i++) { + if (*prn != testval) { + DHD_ERROR(("DIErr@Pkt#:%d,Ix:%d, expected:0x%x, got:0x%x\n", + i, bus->pktgen_rcvd_rcvsession, testval, *prn)); + prn++; testval++; + } + } + } + PKTFREE(osh, pkt, FALSE); + bus->pktgen_rcvd++; + break; + + case SDPCM_TEST_BURST: + case SDPCM_TEST_SEND: + default: + DHD_INFO(("dhdsdio_testrcv: unsupported or unknown command, pktlen %d seq %d" + " cmd %d extra %d len %d\n", pktlen, seq, cmd, extra, len)); + PKTFREE(osh, pkt, FALSE); + break; + } + + /* For recv mode, stop at limit (and tell dongle to stop sending) */ + if (bus->pktgen_mode == DHD_PKTGEN_RECV) { + if (bus->pktgen_rcv_state != PKTGEN_RCV_IDLE) { + bus->pktgen_rcvd_rcvsession++; + + if (bus->pktgen_total && + (bus->pktgen_rcvd_rcvsession >= bus->pktgen_total)) { + bus->pktgen_count = 0; + DHD_ERROR(("Pktgen:rcv test complete!\n")); + bus->pktgen_rcv_state = PKTGEN_RCV_IDLE; + dhdsdio_sdtest_set(bus, FALSE); + bus->pktgen_rcvd_rcvsession = 0; + } + } + } +} +#endif /* SDTEST */ + +int dhd_bus_oob_intr_register(dhd_pub_t *dhdp) +{ + int err = 0; + +#if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) + err = bcmsdh_oob_intr_register(dhdp->bus->sdh, dhdsdio_isr, dhdp->bus); +#endif // endif + return err; +} + +void dhd_bus_oob_intr_unregister(dhd_pub_t *dhdp) +{ +#if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) + bcmsdh_oob_intr_unregister(dhdp->bus->sdh); +#endif // endif +} + +void dhd_bus_oob_intr_set(dhd_pub_t *dhdp, bool enable) +{ +#if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) + bcmsdh_oob_intr_set(dhdp->bus->sdh, enable); +#endif // endif +} + +void dhd_bus_dev_pm_stay_awake(dhd_pub_t *dhdpub) +{ + bcmsdh_dev_pm_stay_awake(dhdpub->bus->sdh); +} + +void dhd_bus_dev_pm_relax(dhd_pub_t *dhdpub) +{ + bcmsdh_dev_relax(dhdpub->bus->sdh); +} + +bool dhd_bus_dev_pm_enabled(dhd_pub_t *dhdpub) +{ + bool enabled = FALSE; + + enabled = bcmsdh_dev_pm_enabled(dhdpub->bus->sdh); + return enabled; +} + +extern bool +dhd_bus_watchdog(dhd_pub_t *dhdp) +{ + dhd_bus_t *bus; + unsigned long flags; + + DHD_TIMER(("%s: Enter\n", __FUNCTION__)); + + bus = dhdp->bus; + + if (bus->dhd->dongle_reset) + return FALSE; + + if (bus->dhd->hang_was_sent) { + dhd_os_wd_timer(bus->dhd, 0); + return FALSE; + } + + /* Ignore the timer if simulating bus down */ + if (!SLPAUTO_ENAB(bus) && bus->sleeping) + return FALSE; + + DHD_LINUX_GENERAL_LOCK(dhdp, flags); + if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp) || + DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhdp)) { + DHD_LINUX_GENERAL_UNLOCK(dhdp, flags); + return FALSE; + } + DHD_BUS_BUSY_SET_IN_WD(dhdp); + DHD_LINUX_GENERAL_UNLOCK(dhdp, flags); + + dhd_os_sdlock(bus->dhd); + + /* Poll period: check device if appropriate. */ + // terence 20160615: remove !SLPAUTO_ENAB(bus) to fix not able to polling if sr supported + if (1 && (bus->poll && (++bus->polltick >= bus->pollrate))) { + uint32 intstatus = 0; + + /* Reset poll tick */ + bus->polltick = 0; + + /* Check device if no interrupts */ + if (!bus->intr || (bus->intrcount == bus->lastintrs)) { + +#ifndef BCMSPI + if (!bus->dpc_sched) { + uint8 devpend; + devpend = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_0, + SDIOD_CCCR_INTPEND, NULL); + intstatus = devpend & (INTR_STATUS_FUNC1 | INTR_STATUS_FUNC2); + } +#else + if (!bus->dpc_sched) { + uint32 devpend; + devpend = bcmsdh_cfg_read_word(bus->sdh, SDIO_FUNC_0, + SPID_STATUS_REG, NULL); + intstatus = devpend & STATUS_F2_PKT_AVAILABLE; + } +#endif /* !BCMSPI */ + + /* If there is something, make like the ISR and schedule the DPC */ + if (intstatus) { + bus->pollcnt++; + bus->ipend = TRUE; + if (bus->intr) { + bcmsdh_intr_disable(bus->sdh); + } + bus->dpc_sched = TRUE; + dhd_sched_dpc(bus->dhd); + } + } + + /* Update interrupt tracking */ + bus->lastintrs = bus->intrcount; + } + + if ((!bus->dpc_sched) && pktq_n_pkts_tot(&bus->txq)) { + bus->dpc_sched = TRUE; + dhd_sched_dpc(bus->dhd); + } + +#ifdef DHD_DEBUG + /* Poll for console output periodically */ + if (dhdp->busstate == DHD_BUS_DATA && dhdp->dhd_console_ms != 0) { + bus->console.count += dhd_watchdog_ms; + if (bus->console.count >= dhdp->dhd_console_ms) { + bus->console.count -= dhdp->dhd_console_ms; + /* Make sure backplane clock is on */ + if (SLPAUTO_ENAB(bus)) + dhdsdio_bussleep(bus, FALSE); + else + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + if (dhdsdio_readconsole(bus) < 0) + dhdp->dhd_console_ms = 0; /* On error, stop trying */ + } + } +#endif /* DHD_DEBUG */ + +#ifdef SDTEST + /* Generate packets if configured */ + if (bus->pktgen_count && (++bus->pktgen_tick >= bus->pktgen_freq)) { + /* Make sure backplane clock is on */ + if (SLPAUTO_ENAB(bus)) + dhdsdio_bussleep(bus, FALSE); + else + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + bus->pktgen_tick = 0; + dhdsdio_pktgen(bus); + } +#endif // endif + + /* On idle timeout clear activity flag and/or turn off clock */ +#ifdef DHD_USE_IDLECOUNT + if (bus->activity) + bus->activity = FALSE; + else { + bus->idlecount++; + + /* + * If the condition to switch off the clock is reached And if + * BT is inactive (in case of BT_OVER_SDIO build) turn off clk. + * + * Consider the following case, DHD is configured with + * 1) idletime == DHD_IDLE_IMMEDIATE + * 2) BT is the last user of the clock + * We cannot disable the clock from __dhdsdio_clk_disable + * since WLAN might be using it. If WLAN is active then + * from the respective function/context after doing the job + * the clk is turned off. + * But if WLAN is actually inactive then the watchdog should + * disable the clock. So the condition check below should be + * bus->idletime != 0 instead of idletime == 0 + */ + if ((bus->idletime != 0) && (bus->idlecount >= bus->idletime) && + NO_OTHER_ACTIVE_BUS_USER(bus)) { + DHD_TIMER(("%s: DHD Idle state!!\n", __FUNCTION__)); + if (!bus->poll && SLPAUTO_ENAB(bus)) { + if (dhdsdio_bussleep(bus, TRUE) != BCME_BUSY) + dhd_os_wd_timer(bus->dhd, 0); + } else + dhdsdio_clkctl(bus, CLK_NONE, FALSE); + + bus->idlecount = 0; + } + } +#else + if ((bus->idletime != 0) && (bus->clkstate == CLK_AVAIL) && + NO_OTHER_ACTIVE_BUS_USER(bus)) { + if (++bus->idlecount >= bus->idletime) { + bus->idlecount = 0; + if (bus->activity) { + bus->activity = FALSE; + if (!bus->poll && SLPAUTO_ENAB(bus)) { + if (!bus->readframes) + dhdsdio_bussleep(bus, TRUE); + else + bus->reqbussleep = TRUE; + } else { + dhdsdio_clkctl(bus, CLK_NONE, FALSE); + } + } + } + } +#endif /* DHD_USE_IDLECOUNT */ + + dhd_os_sdunlock(bus->dhd); + + DHD_LINUX_GENERAL_LOCK(dhdp, flags); + DHD_BUS_BUSY_CLEAR_IN_WD(dhdp); + dhd_os_busbusy_wake(dhdp); + DHD_LINUX_GENERAL_UNLOCK(dhdp, flags); + + return bus->ipend; +} + +extern int +dhd_bus_console_in(dhd_pub_t *dhdp, uchar *msg, uint msglen) +{ + dhd_bus_t *bus = dhdp->bus; + uint32 addr, val; + int rv; + void *pkt; + + /* Address could be zero if CONSOLE := 0 in dongle Makefile */ + if (bus->console_addr == 0) + return BCME_UNSUPPORTED; + + /* Exclusive bus access */ + dhd_os_sdlock(bus->dhd); + + /* Don't allow input if dongle is in reset */ + if (bus->dhd->dongle_reset) { + dhd_os_sdunlock(bus->dhd); + return BCME_NOTREADY; + } + + /* Request clock to allow SDIO accesses */ + BUS_WAKE(bus); + /* No pend allowed since txpkt is called later, ht clk has to be on */ + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + + /* Zero cbuf_index */ + addr = bus->console_addr + OFFSETOF(hnd_cons_t, cbuf_idx); + val = htol32(0); + if ((rv = dhdsdio_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0) + goto done; + + /* Write message into cbuf */ + addr = bus->console_addr + OFFSETOF(hnd_cons_t, cbuf); + if ((rv = dhdsdio_membytes(bus, TRUE, addr, (uint8 *)msg, msglen)) < 0) + goto done; + + /* Write length into vcons_in */ + addr = bus->console_addr + OFFSETOF(hnd_cons_t, vcons_in); + val = htol32(msglen); + if ((rv = dhdsdio_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0) + goto done; + + if (!DATAOK(bus)) { + rv = BCME_NOTREADY; + goto done; + } + + /* Bump dongle by sending an empty packet on the event channel. + * sdpcm_sendup (RX) checks for virtual console input. + */ + if ((pkt = PKTGET(bus->dhd->osh, 4 + SDPCM_RESERVE, TRUE)) != NULL) + rv = dhdsdio_txpkt(bus, SDPCM_EVENT_CHANNEL, &pkt, 1, TRUE); + +done: + if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched && + NO_OTHER_ACTIVE_BUS_USER(bus)) { + bus->activity = FALSE; + dhdsdio_bussleep(bus, TRUE); + dhdsdio_clkctl(bus, CLK_NONE, FALSE); + } + + dhd_os_sdunlock(bus->dhd); + + return rv; +} + +#if defined(DHD_DEBUG) && !defined(BCMSDIOLITE) +static void +dhd_dump_cis(uint fn, uint8 *cis) +{ + uint byte, tag, tdata; + DHD_INFO(("Function %d CIS:\n", fn)); + + for (tdata = byte = 0; byte < SBSDIO_CIS_SIZE_LIMIT; byte++) { + if ((byte % 16) == 0) + DHD_INFO((" ")); + DHD_INFO(("%02x ", cis[byte])); + if ((byte % 16) == 15) + DHD_INFO(("\n")); + if (!tdata--) { + tag = cis[byte]; + if (tag == 0xff) + break; + else if (!tag) + tdata = 0; + else if ((byte + 1) < SBSDIO_CIS_SIZE_LIMIT) + tdata = cis[byte + 1] + 1; + else + DHD_INFO(("]")); + } + } + if ((byte % 16) != 15) + DHD_INFO(("\n")); +} +#endif /* DHD_DEBUG */ + +static bool +dhdsdio_chipmatch(uint16 chipid) +{ + if (chipid == BCM4335_CHIP_ID) + return TRUE; + if (chipid == BCM4339_CHIP_ID) + return TRUE; + if (BCM4345_CHIP(chipid)) + return TRUE; + if (chipid == BCM4350_CHIP_ID) + return TRUE; + if (chipid == BCM4354_CHIP_ID) + return TRUE; + if (chipid == BCM4358_CHIP_ID) + return TRUE; + if (chipid == BCM43569_CHIP_ID) + return TRUE; + if (chipid == BCM4371_CHIP_ID) + return TRUE; + if (chipid == BCM43430_CHIP_ID) + return TRUE; + if (chipid == BCM43018_CHIP_ID) + return TRUE; + if (BCM4349_CHIP(chipid)) + return TRUE; + if (chipid == BCM4364_CHIP_ID) + return TRUE; + + if (chipid == BCM43012_CHIP_ID) + return TRUE; + + if (chipid == BCM43014_CHIP_ID) + return TRUE; + + if (chipid == BCM4369_CHIP_ID) + return TRUE; + + if (chipid == BCM4362_CHIP_ID) + return TRUE; + if (chipid == BCM43751_CHIP_ID) + return TRUE; + + return FALSE; +} + +static void * +dhdsdio_probe(uint16 venid, uint16 devid, uint16 bus_no, uint16 slot, + uint16 func, uint bustype, void *regsva, osl_t * osh, void *sdh) +{ + int ret; + dhd_bus_t *bus; +#ifdef GET_OTP_MAC_ENABLE + struct ether_addr ea_addr; +#endif + + DHD_MUTEX_LOCK(); + + /* Init global variables at run-time, not as part of the declaration. + * This is required to support init/de-init of the driver. Initialization + * of globals as part of the declaration results in non-deterministic + * behavior since the value of the globals may be different on the + * first time that the driver is initialized vs subsequent initializations. + */ + dhd_txbound = DHD_TXBOUND; + dhd_rxbound = DHD_RXBOUND; +#ifdef BCMSPI + dhd_alignctl = FALSE; +#else + dhd_alignctl = TRUE; +#endif /* BCMSPI */ + sd1idle = TRUE; + dhd_readahead = TRUE; + retrydata = FALSE; + +#ifdef DISABLE_FLOW_CONTROL + dhd_doflow = FALSE; +#endif /* DISABLE_FLOW_CONTROL */ + dhd_dongle_ramsize = 0; + dhd_txminmax = DHD_TXMINMAX; + +#ifdef BCMSPI + forcealign = FALSE; +#else + forcealign = TRUE; +#endif /* !BCMSPI */ + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + DHD_INFO(("%s: venid 0x%04x devid 0x%04x\n", __FUNCTION__, venid, devid)); + + /* We make assumptions about address window mappings */ + ASSERT((uintptr)regsva == si_enum_base(devid)); + + /* BCMSDH passes venid and devid based on CIS parsing -- but low-power start + * means early parse could fail, so here we should get either an ID + * we recognize OR (-1) indicating we must request power first. + */ + /* Check the Vendor ID */ + switch (venid) { + case 0x0000: + case VENDOR_BROADCOM: + break; + default: + DHD_ERROR(("%s: unknown vendor: 0x%04x\n", + __FUNCTION__, venid)); + goto forcereturn; + } + + /* Check the Device ID and make sure it's one that we support */ + switch (devid) { + case 0: + DHD_INFO(("%s: allow device id 0, will check chip internals\n", + __FUNCTION__)); + break; + + default: + DHD_ERROR(("%s: skipping 0x%04x/0x%04x, not a dongle\n", + __FUNCTION__, venid, devid)); + goto forcereturn; + } + + if (osh == NULL) { + DHD_ERROR(("%s: osh is NULL!\n", __FUNCTION__)); + goto forcereturn; + } + + /* Allocate private bus interface state */ + if (!(bus = MALLOC(osh, sizeof(dhd_bus_t)))) { + DHD_ERROR(("%s: MALLOC of dhd_bus_t failed\n", __FUNCTION__)); + goto fail; + } + bzero(bus, sizeof(dhd_bus_t)); + bus->sdh = sdh; + bus->cl_devid = (uint16)devid; + bus->bus = DHD_BUS; + bus->bus_num = bus_no; + bus->slot_num = slot; + bus->tx_seq = SDPCM_SEQUENCE_WRAP - 1; + bus->usebufpool = FALSE; /* Use bufpool if allocated, else use locally malloced rxbuf */ +#ifdef BT_OVER_SDIO + bus->bt_use_count = 0; +#endif // endif + +#if defined(SUPPORT_P2P_GO_PS) + init_waitqueue_head(&bus->bus_sleep); +#endif /* LINUX && SUPPORT_P2P_GO_PS */ + init_waitqueue_head(&bus->ctrl_tx_wait); + + /* attempt to attach to the dongle */ + if (!(dhdsdio_probe_attach(bus, osh, sdh, regsva, devid))) { + DHD_ERROR(("%s: dhdsdio_probe_attach failed\n", __FUNCTION__)); + goto fail; + } + + /* Attach to the dhd/OS/network interface */ + if (!(bus->dhd = dhd_attach(osh, bus, SDPCM_RESERVE))) { + DHD_ERROR(("%s: dhd_attach failed\n", __FUNCTION__)); + goto fail; + } + + /* Allocate buffers */ + if (!(dhdsdio_probe_malloc(bus, osh, sdh))) { + DHD_ERROR(("%s: dhdsdio_probe_malloc failed\n", __FUNCTION__)); + goto fail; + } + + if (!(dhdsdio_probe_init(bus, osh, sdh))) { + DHD_ERROR(("%s: dhdsdio_probe_init failed\n", __FUNCTION__)); + goto fail; + } + + if (bus->intr) { + /* Register interrupt callback, but mask it (not operational yet). */ + DHD_INTR(("%s: disable SDIO interrupts (not interested yet)\n", __FUNCTION__)); + bcmsdh_intr_disable(sdh); + if ((ret = bcmsdh_intr_reg(sdh, dhdsdio_isr, bus)) != 0) { + DHD_ERROR(("%s: FAILED: bcmsdh_intr_reg returned %d\n", + __FUNCTION__, ret)); + goto fail; + } + DHD_INTR(("%s: registered SDIO interrupt function ok\n", __FUNCTION__)); + } else { + DHD_INFO(("%s: SDIO interrupt function is NOT registered due to polling mode\n", + __FUNCTION__)); + } + + DHD_INFO(("%s: completed!!\n", __FUNCTION__)); + + /* if firmware path present try to download and bring up bus */ + bus->dhd->hang_report = TRUE; +#if 0 // terence 20150325: fix for WPA/WPA2 4-way handshake fail in hostapd + if (dhd_download_fw_on_driverload) { + if ((ret = dhd_bus_start(bus->dhd)) != 0) { + DHD_ERROR(("%s: dhd_bus_start failed\n", __FUNCTION__)); + goto fail; + } + } + else { + /* Set random MAC address during boot time */ + get_random_bytes(&bus->dhd->mac.octet[3], 3); + /* Adding BRCM OUI */ + bus->dhd->mac.octet[0] = 0; + bus->dhd->mac.octet[1] = 0x90; + bus->dhd->mac.octet[2] = 0x4C; + } +#endif +#if defined(BT_OVER_SDIO) + /* At this point Regulators are turned on and iconditionaly sdio bus is started + * based upon dhd_download_fw_on_driverload check, so + * increase the bus user count, this count will only be disabled inside + * dhd_register_if() function if flag dhd_download_fw_on_driverload is set to false, + * i.e FW download during insmod is not needed, otherwise it will not be decremented + * so that WALN will always hold the bus untill rmmod is done. + */ + dhdsdio_bus_usr_cnt_inc(bus->dhd); +#endif /* BT_OVER_SDIO */ + +#ifdef GET_OTP_MAC_ENABLE + if (dhd_conf_get_mac(bus->dhd, sdh, bus->sih, ea_addr.octet)) { + DHD_TRACE(("%s: Can not read MAC address\n", __FUNCTION__)); + } else + memcpy(bus->dhd->mac.octet, (void *)&ea_addr, ETHER_ADDR_LEN); +#endif /* GET_CUSTOM_MAC_ENABLE */ + + /* Ok, have the per-port tell the stack we're open for business */ + if (dhd_register_if(bus->dhd, 0, TRUE) != 0) { + DHD_ERROR(("%s: Net attach failed!!\n", __FUNCTION__)); + goto fail; + } + +#ifdef BCMHOST_XTAL_PU_TIME_MOD + bcmsdh_reg_write(bus->sdh, 0x18000620, 2, 11); + bcmsdh_reg_write(bus->sdh, 0x18000628, 4, 0x00F80001); +#endif /* BCMHOST_XTAL_PU_TIME_MOD */ + +#if defined(MULTIPLE_SUPPLICANT) + wl_android_post_init(); // terence 20120530: fix critical section in dhd_open and dhdsdio_probe +#endif /* MULTIPLE_SUPPLICANT */ + DHD_MUTEX_UNLOCK(); + + return bus; + +fail: + dhdsdio_release(bus, osh); + +forcereturn: + DHD_MUTEX_UNLOCK(); + + return NULL; +} + +static bool +dhdsdio_probe_attach(struct dhd_bus *bus, osl_t *osh, void *sdh, void *regsva, + uint16 devid) +{ +#ifndef BCMSPI + uint8 clkctl = 0; +#endif /* !BCMSPI */ + uint fn, numfn; + uint8 *cis[SDIOD_MAX_IOFUNCS]; + int32 value; + int err = 0; + + BCM_REFERENCE(value); + bus->alp_only = TRUE; + bus->sih = NULL; + + /* Return the window to backplane enumeration space for core access */ + if (dhdsdio_set_siaddr_window(bus, si_enum_base(devid))) { + DHD_ERROR(("%s: FAILED to return to SI_ENUM_BASE\n", __FUNCTION__)); + } + +#if defined(DHD_DEBUG) + DHD_ERROR(("F1 signature read @0x18000000=0x%4x\n", + bcmsdh_reg_read(bus->sdh, si_enum_base(devid), 4))); +#endif // endif + +#ifndef BCMSPI /* wake-wlan in gSPI will bring up the htavail/alpavail clocks. */ + + /* Force PLL off until si_attach() programs PLL control regs */ + + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, DHD_INIT_CLKCTL1, &err); + if (!err) + clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err); + + if (err || ((clkctl & ~SBSDIO_AVBITS) != DHD_INIT_CLKCTL1)) { + DHD_ERROR(("dhdsdio_probe: ChipClkCSR access: err %d wrote 0x%02x read 0x%02x\n", + err, DHD_INIT_CLKCTL1, clkctl)); + goto fail; + } + +#endif /* !BCMSPI */ +#ifndef BCMSPI + numfn = bcmsdh_query_iofnum(sdh); + ASSERT(numfn <= SDIOD_MAX_IOFUNCS); + + /* Make sure ALP is available before trying to read CIS */ + SPINWAIT(((clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, + SBSDIO_FUNC1_CHIPCLKCSR, NULL)), + !SBSDIO_ALPAV(clkctl)), PMU_MAX_TRANSITION_DLY); + + /* Now request ALP be put on the bus */ + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, + DHD_INIT_CLKCTL2, &err); + OSL_DELAY(65); +#else + numfn = 0; /* internally func is hardcoded to 1 as gSPI has cis on F1 only */ +#endif /* !BCMSPI */ +#ifndef BCMSDIOLITE + if (DHD_INFO_ON()) { + for (fn = 0; fn <= numfn; fn++) { + if (!(cis[fn] = MALLOC(osh, SBSDIO_CIS_SIZE_LIMIT))) { + DHD_INFO(("dhdsdio_probe: fn %d cis malloc failed\n", fn)); + break; + } + bzero(cis[fn], SBSDIO_CIS_SIZE_LIMIT); + + if ((err = bcmsdh_cis_read(sdh, fn, cis[fn], + SBSDIO_CIS_SIZE_LIMIT))) { + DHD_INFO(("dhdsdio_probe: fn %d cis read err %d\n", fn, err)); + MFREE(osh, cis[fn], SBSDIO_CIS_SIZE_LIMIT); + break; + } +#if 0 + /* Reading the F1, F2 and F3 max blocksize values from CIS + * and writing into the F1, F2 and F3 block size registers. + * There is no max block size register value available for F0 in CIS register. + * So, setting default value for F0 block size as 32 (which was set earlier + * in iovar). IOVAR takes only one arguement. + * So, we are passing the function number alongwith the value (fn<<16) + */ + if (!fn) + value = F0_BLOCK_SIZE; + else + value = (cis[fn][25]<<8) | cis[fn][24] | (fn<<16); + if (bcmsdh_iovar_op(sdh, "sd_blocksize", NULL, 0, &value, + sizeof(value), TRUE) != BCME_OK) { + bus->blocksize = 0; + DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, + "sd_blocksize")); + } +#endif +#ifdef DHD_DEBUG + if (DHD_INFO_ON()) { + dhd_dump_cis(fn, cis[fn]); + } +#endif /* DHD_DEBUG */ + } + while (fn-- > 0) { + ASSERT(cis[fn]); + MFREE(osh, cis[fn], SBSDIO_CIS_SIZE_LIMIT); + } + } +#else + BCM_REFERENCE(cis); + BCM_REFERENCE(fn); +#endif /* DHD_DEBUG */ + + if (err) { + DHD_ERROR(("dhdsdio_probe: failure reading or parsing CIS\n")); + goto fail; + } + /* si_attach() will provide an SI handle and scan the backplane */ + if (!(bus->sih = si_attach((uint)devid, osh, regsva, DHD_BUS, sdh, + &bus->vars, &bus->varsz))) { + DHD_ERROR(("%s: si_attach failed!\n", __FUNCTION__)); + goto fail; + } + +#ifdef DHD_DEBUG + DHD_ERROR(("F1 signature OK, socitype:0x%x chip:0x%4x rev:0x%x pkg:0x%x\n", + bus->sih->socitype, bus->sih->chip, bus->sih->chiprev, bus->sih->chippkg)); +#endif /* DHD_DEBUG */ + + bcmsdh_chipinfo(sdh, bus->sih->chip, bus->sih->chiprev); + + if (!dhdsdio_chipmatch((uint16)bus->sih->chip)) { + DHD_ERROR(("%s: unsupported chip: 0x%04x\n", + __FUNCTION__, bus->sih->chip)); + goto fail; + } + + if (bus->sih->buscorerev >= 12) + dhdsdio_clk_kso_init(bus); + else + bus->kso = TRUE; + + si_sdiod_drive_strength_init(bus->sih, osh, dhd_sdiod_drive_strength); + + /* Get info on the ARM and SOCRAM cores... */ + if (!DHD_NOPMU(bus)) { + if ((si_setcore(bus->sih, ARM7S_CORE_ID, 0)) || + (si_setcore(bus->sih, ARMCM3_CORE_ID, 0)) || + (si_setcore(bus->sih, ARMCR4_CORE_ID, 0))) { + bus->armrev = si_corerev(bus->sih); + } else { + DHD_ERROR(("%s: failed to find ARM core!\n", __FUNCTION__)); + goto fail; + } + + if (!si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) { + if (!(bus->orig_ramsize = si_socram_size(bus->sih))) { + DHD_ERROR(("%s: failed to find SOCRAM memory!\n", __FUNCTION__)); + goto fail; + } + } else { + /* cr4 has a different way to find the RAM size from TCM's */ + if (!(bus->orig_ramsize = si_tcm_size(bus->sih))) { + DHD_ERROR(("%s: failed to find CR4-TCM memory!\n", __FUNCTION__)); + goto fail; + } + /* also populate base address */ + switch ((uint16)bus->sih->chip) { + case BCM4335_CHIP_ID: + case BCM4339_CHIP_ID: + bus->dongle_ram_base = CR4_4335_RAM_BASE; + break; + case BCM4350_CHIP_ID: + case BCM4354_CHIP_ID: + case BCM4358_CHIP_ID: + case BCM43569_CHIP_ID: + case BCM4371_CHIP_ID: + bus->dongle_ram_base = CR4_4350_RAM_BASE; + break; + case BCM4360_CHIP_ID: + bus->dongle_ram_base = CR4_4360_RAM_BASE; + break; + CASE_BCM4345_CHIP: + bus->dongle_ram_base = (bus->sih->chiprev < 6) /* from 4345C0 */ + ? CR4_4345_LT_C0_RAM_BASE : CR4_4345_GE_C0_RAM_BASE; + break; + case BCM4349_CHIP_GRPID: + /* RAM based changed from 4349c0(revid=9) onwards */ + bus->dongle_ram_base = ((bus->sih->chiprev < 9) ? + CR4_4349_RAM_BASE: CR4_4349_RAM_BASE_FROM_REV_9); + break; + case BCM4364_CHIP_ID: + bus->dongle_ram_base = CR4_4364_RAM_BASE; + break; + case BCM4362_CHIP_ID: + bus->dongle_ram_base = CR4_4362_RAM_BASE; + break; + case BCM43751_CHIP_ID: + bus->dongle_ram_base = CR4_43751_RAM_BASE; + break; + case BCM4369_CHIP_ID: + bus->dongle_ram_base = CR4_4369_RAM_BASE; + break; + default: + bus->dongle_ram_base = 0; + DHD_ERROR(("%s: WARNING: Using default ram base at 0x%x\n", + __FUNCTION__, bus->dongle_ram_base)); + } + } + bus->ramsize = bus->orig_ramsize; + if (dhd_dongle_ramsize) + dhd_dongle_setramsize(bus, dhd_dongle_ramsize); + + DHD_ERROR(("DHD: dongle ram size is set to %d(orig %d) at 0x%x\n", + bus->ramsize, bus->orig_ramsize, bus->dongle_ram_base)); + + bus->srmemsize = si_socram_srmem_size(bus->sih); + } + + /* ...but normally deal with the SDPCMDEV core */ +#ifdef BCMSDIOLITE + if (!(bus->regs = si_setcore(bus->sih, CC_CORE_ID, 0))) { + DHD_ERROR(("%s: failed to find Chip Common core!\n", __FUNCTION__)); + goto fail; + } +#else + if (!(bus->regs = si_setcore(bus->sih, PCMCIA_CORE_ID, 0)) && + !(bus->regs = si_setcore(bus->sih, SDIOD_CORE_ID, 0))) { + DHD_ERROR(("%s: failed to find SDIODEV core!\n", __FUNCTION__)); + goto fail; + } +#endif // endif + bus->sdpcmrev = si_corerev(bus->sih); + + /* Set core control so an SDIO reset does a backplane reset */ + OR_REG(osh, &bus->regs->corecontrol, CC_BPRESEN); +#ifndef BCMSPI + bus->rxint_mode = SDIO_DEVICE_HMB_RXINT; + + if ((bus->sih->buscoretype == SDIOD_CORE_ID) && (bus->sdpcmrev >= 4) && + (bus->rxint_mode == SDIO_DEVICE_RXDATAINT_MODE_1)) + { + uint32 val; + + val = R_REG(osh, &bus->regs->corecontrol); + val &= ~CC_XMTDATAAVAIL_MODE; + val |= CC_XMTDATAAVAIL_CTRL; + W_REG(osh, &bus->regs->corecontrol, val); + } +#endif /* BCMSPI */ + + pktq_init(&bus->txq, (PRIOMASK + 1), QLEN); + + /* Locate an appropriately-aligned portion of hdrbuf */ + bus->rxhdr = (uint8 *)ROUNDUP((uintptr)&bus->hdrbuf[0], DHD_SDALIGN); + + /* Set the poll and/or interrupt flags */ + bus->intr = (bool)dhd_intr; + if ((bus->poll = (bool)dhd_poll)) + bus->pollrate = 1; + + /* Setting default Glom size */ + bus->txglomsize = SDPCM_DEFGLOM_SIZE; + + return TRUE; + +fail: + if (bus->sih != NULL) { + si_detach(bus->sih); + bus->sih = NULL; + } + return FALSE; +} + +static bool +dhdsdio_probe_malloc(dhd_bus_t *bus, osl_t *osh, void *sdh) +{ + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (bus->dhd->maxctl) { + bus->rxblen = ROUNDUP((bus->dhd->maxctl+SDPCM_HDRLEN), ALIGNMENT) + DHD_SDALIGN; + if (!(bus->rxbuf = DHD_OS_PREALLOC(bus->dhd, DHD_PREALLOC_RXBUF, bus->rxblen))) { + DHD_ERROR(("%s: MALLOC of %d-byte rxbuf failed\n", + __FUNCTION__, bus->rxblen)); + goto fail; + } + } + /* Allocate buffer to receive glomed packet */ + if (!(bus->databuf = DHD_OS_PREALLOC(bus->dhd, DHD_PREALLOC_DATABUF, MAX_DATA_BUF))) { + DHD_ERROR(("%s: MALLOC of %d-byte databuf failed\n", + __FUNCTION__, MAX_DATA_BUF)); + /* release rxbuf which was already located as above */ + if (!bus->rxblen) + DHD_OS_PREFREE(bus->dhd, bus->rxbuf, bus->rxblen); + goto fail; + } + /* Allocate buffer to membuf */ + bus->membuf = MALLOC(osh, MAX_MEM_BUF); + if (bus->membuf == NULL) { + DHD_ERROR(("%s: MALLOC of %d-byte membuf failed\n", + __FUNCTION__, MAX_MEM_BUF)); + if (bus->databuf) { +#ifndef CONFIG_DHD_USE_STATIC_BUF + MFREE(osh, bus->databuf, MAX_DATA_BUF); +#endif + bus->databuf = NULL; + } + /* release rxbuf which was already located as above */ + if (!bus->rxblen) + DHD_OS_PREFREE(bus->dhd, bus->rxbuf, bus->rxblen); + goto fail; + } + memset(bus->membuf, 0, MAX_MEM_BUF); + + /* Align the buffer */ + if ((uintptr)bus->databuf % DHD_SDALIGN) + bus->dataptr = bus->databuf + (DHD_SDALIGN - ((uintptr)bus->databuf % DHD_SDALIGN)); + else + bus->dataptr = bus->databuf; + + return TRUE; + +fail: + return FALSE; +} + +static bool +dhdsdio_probe_init(dhd_bus_t *bus, osl_t *osh, void *sdh) +{ + int32 fnum; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + bus->_srenab = FALSE; + +#ifdef SDTEST + dhdsdio_pktgen_init(bus); +#endif /* SDTEST */ + +#ifndef BCMSPI + /* Disable F2 to clear any intermediate frame state on the dongle */ + bcmsdh_cfg_write(sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, SDIO_FUNC_ENABLE_1, NULL); +#endif /* !BCMSPI */ + + bus->dhd->busstate = DHD_BUS_DOWN; + bus->sleeping = FALSE; + bus->rxflow = FALSE; + bus->prev_rxlim_hit = 0; + +#ifndef BCMSPI + /* Done with backplane-dependent accesses, can drop clock... */ + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL); +#endif /* !BCMSPI */ + + /* ...and initialize clock/power states */ + bus->clkstate = CLK_SDONLY; + bus->idletime = (int32)dhd_idletime; + bus->idleclock = DHD_IDLE_ACTIVE; + + /* Query the SD clock speed */ + if (bcmsdh_iovar_op(sdh, "sd_divisor", NULL, 0, + &bus->sd_divisor, sizeof(int32), FALSE) != BCME_OK) { + DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_divisor")); + bus->sd_divisor = -1; + } else { + DHD_INFO(("%s: Initial value for %s is %d\n", + __FUNCTION__, "sd_divisor", bus->sd_divisor)); + } + + /* Query the SD bus mode */ + if (bcmsdh_iovar_op(sdh, "sd_mode", NULL, 0, + &bus->sd_mode, sizeof(int32), FALSE) != BCME_OK) { + DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_mode")); + bus->sd_mode = -1; + } else { + DHD_INFO(("%s: Initial value for %s is %d\n", + __FUNCTION__, "sd_mode", bus->sd_mode)); + } + + /* Query the F2 block size, set roundup accordingly */ + fnum = 2; + if (bcmsdh_iovar_op(sdh, "sd_blocksize", &fnum, sizeof(int32), + &bus->blocksize, sizeof(int32), FALSE) != BCME_OK) { + bus->blocksize = 0; + DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_blocksize")); + } else { + DHD_INFO(("%s: Initial value for %s is %d\n", + __FUNCTION__, "sd_blocksize", bus->blocksize)); + + dhdsdio_tune_fifoparam(bus); + } + bus->roundup = MIN(max_roundup, bus->blocksize); + +#ifdef DHDENABLE_TAILPAD + if (bus->pad_pkt) + PKTFREE(osh, bus->pad_pkt, FALSE); + bus->pad_pkt = PKTGET(osh, SDIO_MAX_BLOCK_SIZE, FALSE); + if (bus->pad_pkt == NULL) + DHD_ERROR(("failed to allocate padding packet\n")); + else { + int alignment_offset = 0; + uintptr pktprt = (uintptr)PKTDATA(osh, bus->pad_pkt); + if (!(pktprt&1) && (pktprt = (pktprt % DHD_SDALIGN))) + PKTPUSH(osh, bus->pad_pkt, alignment_offset); + PKTSETNEXT(osh, bus->pad_pkt, NULL); + } +#endif /* DHDENABLE_TAILPAD */ + + /* Query if bus module supports packet chaining, default to use if supported */ + if (bcmsdh_iovar_op(sdh, "sd_rxchain", NULL, 0, + &bus->sd_rxchain, sizeof(int32), FALSE) != BCME_OK) { + bus->sd_rxchain = FALSE; + } else { + DHD_INFO(("%s: bus module (through bcmsdh API) %s chaining\n", + __FUNCTION__, (bus->sd_rxchain ? "supports" : "does not support"))); + } + bus->use_rxchain = (bool)bus->sd_rxchain; + bus->txinrx_thres = CUSTOM_TXINRX_THRES; + /* TX first in dhdsdio_readframes() */ + bus->dotxinrx = TRUE; + +#ifdef PKT_STATICS + memset((uint8*) &tx_statics, 0, sizeof(pkt_statics_t)); +#endif + + return TRUE; +} + +int +dhd_bus_download_firmware(struct dhd_bus *bus, osl_t *osh, + char *pfw_path, char *pnv_path, + char *pclm_path, char *pconf_path) +{ + int ret; + + bus->fw_path = pfw_path; + bus->nv_path = pnv_path; + bus->dhd->clm_path = pclm_path; + bus->dhd->conf_path = pconf_path; + + ret = dhdsdio_download_firmware(bus, osh, bus->sdh); + + return ret; +} + +void +dhd_set_path_params(struct dhd_bus *bus) +{ + /* External conf takes precedence if specified */ + dhd_conf_preinit(bus->dhd); + + if (bus->dhd->conf_path[0] == '\0') { + dhd_conf_set_path(bus->dhd, "config.txt", bus->dhd->conf_path, bus->nv_path); + } + if (bus->dhd->clm_path[0] == '\0') { + dhd_conf_set_path(bus->dhd, "clm.blob", bus->dhd->clm_path, bus->fw_path); + } +#ifdef CONFIG_PATH_AUTO_SELECT + dhd_conf_set_conf_name_by_chip(bus->dhd, bus->dhd->conf_path); +#endif + + dhd_conf_read_config(bus->dhd, bus->dhd->conf_path); + + dhd_conf_set_fw_name_by_chip(bus->dhd, bus->fw_path); + dhd_conf_set_nv_name_by_chip(bus->dhd, bus->nv_path); + dhd_conf_set_clm_name_by_chip(bus->dhd, bus->dhd->clm_path); + + dhd_conf_set_fw_name_by_mac(bus->dhd, bus->sdh, bus->sih, bus->fw_path); + dhd_conf_set_nv_name_by_mac(bus->dhd, bus->sdh, bus->sih, bus->nv_path); + + printf("Final fw_path=%s\n", bus->fw_path); + printf("Final nv_path=%s\n", bus->nv_path); + printf("Final clm_path=%s\n", bus->dhd->clm_path); + printf("Final conf_path=%s\n", bus->dhd->conf_path); + +} + +void +dhd_set_bus_params(struct dhd_bus *bus) +{ + if (bus->dhd->conf->dhd_poll >= 0) { + bus->poll = bus->dhd->conf->dhd_poll; + if (!bus->pollrate) + bus->pollrate = 1; + printf("%s: set polling mode %d\n", __FUNCTION__, bus->dhd->conf->dhd_poll); + } + if (bus->dhd->conf->use_rxchain >= 0) { + bus->use_rxchain = (bool)bus->dhd->conf->use_rxchain; + } + if (bus->dhd->conf->txinrx_thres >= 0) { + bus->txinrx_thres = bus->dhd->conf->txinrx_thres; + } + if (bus->dhd->conf->txglomsize >= 0) { + bus->txglomsize = bus->dhd->conf->txglomsize; + } +} + +static int +dhdsdio_download_firmware(struct dhd_bus *bus, osl_t *osh, void *sdh) +{ + int ret; + +#if defined(SUPPORT_MULTIPLE_REVISION) + if (concate_revision(bus, bus->fw_path, bus->nv_path) != 0) { + DHD_ERROR(("%s: fail to concatnate revison \n", + __FUNCTION__)); + return BCME_BADARG; + } +#endif /* SUPPORT_MULTIPLE_REVISION */ + +#if defined(DHD_BLOB_EXISTENCE_CHECK) + dhd_set_blob_support(bus->dhd, bus->fw_path); +#endif /* DHD_BLOB_EXISTENCE_CHECK */ + + DHD_TRACE_HW4(("%s: firmware path=%s, nvram path=%s\n", + __FUNCTION__, bus->fw_path, bus->nv_path)); + DHD_OS_WAKE_LOCK(bus->dhd); + + /* Download the firmware */ + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + + dhd_set_path_params(bus); + dhd_set_bus_params(bus); + + ret = _dhdsdio_download_firmware(bus); + + dhdsdio_clkctl(bus, CLK_SDONLY, FALSE); + + DHD_OS_WAKE_UNLOCK(bus->dhd); + return ret; +} + +/* Detach and free everything */ +static void +dhdsdio_release(dhd_bus_t *bus, osl_t *osh) +{ + bool dongle_isolation = FALSE; + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (bus) { + ASSERT(osh); + + if (bus->dhd) { +#if defined(DEBUGGER) || defined(DHD_DSCOPE) + debugger_close(); +#endif /* DEBUGGER || DHD_DSCOPE */ + dongle_isolation = bus->dhd->dongle_isolation; + dhd_detach(bus->dhd); + } + + /* De-register interrupt handler */ + bcmsdh_intr_disable(bus->sdh); + bcmsdh_intr_dereg(bus->sdh); + + if (bus->dhd) { + dhdsdio_release_dongle(bus, osh, dongle_isolation, TRUE); + dhd_free(bus->dhd); + bus->dhd = NULL; + } + + dhdsdio_release_malloc(bus, osh); + +#ifdef DHD_DEBUG + if (bus->console.buf != NULL) + MFREE(osh, bus->console.buf, bus->console.bufsize); +#endif // endif + +#ifdef DHDENABLE_TAILPAD + if (bus->pad_pkt) + PKTFREE(osh, bus->pad_pkt, FALSE); +#endif /* DHDENABLE_TAILPAD */ + + MFREE(osh, bus, sizeof(dhd_bus_t)); + } + + DHD_TRACE(("%s: Disconnected\n", __FUNCTION__)); +} + +static void +dhdsdio_release_malloc(dhd_bus_t *bus, osl_t *osh) +{ + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (bus->dhd && bus->dhd->dongle_reset) + return; + + if (bus->rxbuf) { +#ifndef CONFIG_DHD_USE_STATIC_BUF + MFREE(osh, bus->rxbuf, bus->rxblen); +#endif // endif + bus->rxctl = bus->rxbuf = NULL; + bus->rxlen = 0; + } + + if (bus->databuf) { +#ifndef CONFIG_DHD_USE_STATIC_BUF + MFREE(osh, bus->databuf, MAX_DATA_BUF); +#endif // endif + bus->databuf = NULL; + } + + if (bus->membuf) { + MFREE(osh, bus->membuf, MAX_DATA_BUF); + bus->membuf = NULL; + } + + if (bus->vars && bus->varsz) { + MFREE(osh, bus->vars, bus->varsz); + bus->vars = NULL; + } + +} + +static void +dhdsdio_release_dongle(dhd_bus_t *bus, osl_t *osh, bool dongle_isolation, bool reset_flag) +{ + DHD_TRACE(("%s: Enter bus->dhd %p bus->dhd->dongle_reset %d \n", __FUNCTION__, + bus->dhd, bus->dhd->dongle_reset)); + + if ((bus->dhd && bus->dhd->dongle_reset) && reset_flag) + return; + + if (bus->sih) { + /* In Win10, system will be BSOD if using "sysprep" to do OS image */ + /* Skip this will not cause the BSOD. */ +#if !defined(BCMLXSDMMC) + if (bus->dhd) { + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + } + if (KSO_ENAB(bus) && (dongle_isolation == FALSE)) + si_watchdog(bus->sih, 4); +#endif /* !defined(BCMLXSDMMC) */ + if (bus->dhd) { + dhdsdio_clkctl(bus, CLK_NONE, FALSE); + } + si_detach(bus->sih); + bus->sih = NULL; + if (bus->vars && bus->varsz) + MFREE(osh, bus->vars, bus->varsz); + bus->vars = NULL; + } + + DHD_TRACE(("%s: Disconnected\n", __FUNCTION__)); +} + +static void +dhdsdio_disconnect(void *ptr) +{ + dhd_bus_t *bus = (dhd_bus_t *)ptr; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + DHD_MUTEX_LOCK(); + if (bus) { + ASSERT(bus->dhd); + /* Advertise bus remove during rmmod */ + dhdsdio_advertise_bus_remove(bus->dhd); + dhdsdio_release(bus, bus->dhd->osh); + } + DHD_MUTEX_UNLOCK(); + + DHD_TRACE(("%s: Disconnected\n", __FUNCTION__)); +} + +static int +dhdsdio_suspend(void *context) +{ + int ret = 0; +#ifdef SUPPORT_P2P_GO_PS + int wait_time = 0; +#endif /* SUPPORT_P2P_GO_PS */ + + dhd_bus_t *bus = (dhd_bus_t*)context; + unsigned long flags; + + DHD_ERROR(("%s Enter\n", __FUNCTION__)); + if (bus->dhd == NULL) { + DHD_ERROR(("bus not inited\n")); + return BCME_ERROR; + } + if (bus->dhd->prot == NULL) { + DHD_ERROR(("prot is not inited\n")); + return BCME_ERROR; + } + + if (bus->dhd->up == FALSE) { + return BCME_OK; + } + + DHD_LINUX_GENERAL_LOCK(bus->dhd, flags); + if (bus->dhd->busstate != DHD_BUS_DATA && bus->dhd->busstate != DHD_BUS_SUSPEND) { + DHD_ERROR(("not in a readystate to LPBK is not inited\n")); + DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags); + return BCME_ERROR; + } + DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags); + if (bus->dhd->dongle_reset) { + DHD_ERROR(("Dongle is in reset state.\n")); + return -EIO; + } + + DHD_LINUX_GENERAL_LOCK(bus->dhd, flags); + bus->dhd->busstate = DHD_BUS_SUSPEND; + if (DHD_BUS_BUSY_CHECK_IN_TX(bus->dhd)) { + DHD_ERROR(("Tx Request is not ended\n")); + bus->dhd->busstate = DHD_BUS_DATA; + DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags); + return -EBUSY; + } + DHD_BUS_BUSY_SET_SUSPEND_IN_PROGRESS(bus->dhd); + DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags); + +#ifdef SUPPORT_P2P_GO_PS + if (bus->idletime > 0) { + wait_time = msecs_to_jiffies(bus->idletime * dhd_watchdog_ms); + } +#endif /* SUPPORT_P2P_GO_PS */ + ret = dhd_os_check_wakelock(bus->dhd); +#ifdef SUPPORT_P2P_GO_PS + // terence 20141124: fix for suspend issue + if (SLPAUTO_ENAB(bus) && (!ret) && (bus->dhd->up) && (bus->dhd->op_mode != DHD_FLAG_HOSTAP_MODE)) { + if (wait_event_timeout(bus->bus_sleep, bus->sleeping, wait_time) == 0) { + if (!bus->sleeping) { + ret = 1; + } + } + } +#endif /* SUPPORT_P2P_GO_PS */ + + DHD_LINUX_GENERAL_LOCK(bus->dhd, flags); + if (ret) { + bus->dhd->busstate = DHD_BUS_DATA; + } + DHD_BUS_BUSY_CLEAR_SUSPEND_IN_PROGRESS(bus->dhd); + dhd_os_busbusy_wake(bus->dhd); + DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags); + return ret; +} + +static int +dhdsdio_resume(void *context) +{ + dhd_bus_t *bus = (dhd_bus_t*)context; + ulong flags; + + DHD_ERROR(("%s Enter\n", __FUNCTION__)); + + if (bus->dhd->up == FALSE) { + return BCME_OK; + } + + DHD_LINUX_GENERAL_LOCK(bus->dhd, flags); + DHD_BUS_BUSY_SET_RESUME_IN_PROGRESS(bus->dhd); + DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags); + +#if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) + if (dhd_os_check_if_up(bus->dhd)) + bcmsdh_oob_intr_set(bus->sdh, TRUE); +#endif /* defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) */ + + DHD_LINUX_GENERAL_LOCK(bus->dhd, flags); + DHD_BUS_BUSY_CLEAR_RESUME_IN_PROGRESS(bus->dhd); + bus->dhd->busstate = DHD_BUS_DATA; + dhd_os_busbusy_wake(bus->dhd); + DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags); + + return 0; +} + +/* Register/Unregister functions are called by the main DHD entry + * point (e.g. module insertion) to link with the bus driver, in + * order to look for or await the device. + */ + +static bcmsdh_driver_t dhd_sdio = { + dhdsdio_probe, + dhdsdio_disconnect, + dhdsdio_suspend, + dhdsdio_resume +}; + +int +dhd_bus_register(void) +{ + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + return bcmsdh_register(&dhd_sdio); +} + +void +dhd_bus_unregister(void) +{ + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + bcmsdh_unregister(); +} + +#if defined(BCMLXSDMMC) +/* Register a dummy SDIO client driver in order to be notified of new SDIO device */ +int dhd_bus_reg_sdio_notify(void* semaphore) +{ + return bcmsdh_reg_sdio_notify(semaphore); +} + +void dhd_bus_unreg_sdio_notify(void) +{ + bcmsdh_unreg_sdio_notify(); +} +#endif /* defined(BCMLXSDMMC) */ + +static int +dhdsdio_download_code_file(struct dhd_bus *bus, char *pfw_path) +{ + int bcmerror = -1; + int offset = 0; + int len; + void *image = NULL; + uint8 *memblock = NULL, *memptr; + uint8 *memptr_tmp = NULL; // terence: check downloaded firmware is correct + uint memblock_size = MEMBLOCK; +#ifdef DHD_DEBUG_DOWNLOADTIME + unsigned long initial_jiffies = 0; + uint firmware_sz = 0; +#endif // endif + + DHD_INFO(("%s: download firmware %s\n", __FUNCTION__, pfw_path)); + + image = dhd_os_open_image1(bus->dhd, pfw_path); + if (image == NULL) { + printf("%s: Open firmware file failed %s\n", __FUNCTION__, pfw_path); + goto err; + } + + /* Update the dongle image download block size depending on the F1 block size */ + if (sd_f1_blocksize == 512) + memblock_size = MAX_MEMBLOCK; + memptr = memblock = MALLOC(bus->dhd->osh, memblock_size + DHD_SDALIGN); + if (memblock == NULL) { + DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, + memblock_size)); + goto err; + } + if (dhd_msg_level & DHD_TRACE_VAL) { + memptr_tmp = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN); + if (memptr_tmp == NULL) { + DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK)); + goto err; + } + } + if ((uint32)(uintptr)memblock % DHD_SDALIGN) + memptr += (DHD_SDALIGN - ((uint32)(uintptr)memblock % DHD_SDALIGN)); + +#ifdef DHD_DEBUG_DOWNLOADTIME + initial_jiffies = jiffies; +#endif // endif + + /* Download image */ + while ((len = dhd_os_get_image_block((char*)memptr, memblock_size, image))) { + // terence 20150412: fix for firmware failed to download + if (bus->dhd->conf->chip == BCM43340_CHIP_ID || + bus->dhd->conf->chip == BCM43341_CHIP_ID) { + if (len % 64 != 0) { + memset(memptr+len, 0, len%64); + len += (64 - len%64); + } + } + if (len < 0) { + DHD_ERROR(("%s: dhd_os_get_image_block failed (%d)\n", __FUNCTION__, len)); + bcmerror = BCME_ERROR; + goto err; + } + /* check if CR4 */ + if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) { + /* if address is 0, store the reset instruction to be written in 0 */ + + if (offset == 0) { + bus->resetinstr = *(((uint32*)memptr)); + /* Add start of RAM address to the address given by user */ + offset += bus->dongle_ram_base; + } + } + + bcmerror = dhdsdio_membytes(bus, TRUE, offset, memptr, len); + if (bcmerror) { + DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n", + __FUNCTION__, bcmerror, memblock_size, offset)); + goto err; + } + + if (dhd_msg_level & DHD_TRACE_VAL) { + bcmerror = dhdsdio_membytes(bus, FALSE, offset, memptr_tmp, len); + if (bcmerror) { + DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n", + __FUNCTION__, bcmerror, MEMBLOCK, offset)); + goto err; + } + if (memcmp(memptr_tmp, memptr, len)) { + DHD_ERROR(("%s: Downloaded image is corrupted.\n", __FUNCTION__)); + goto err; + } else + DHD_INFO(("%s: Download, Upload and compare succeeded.\n", __FUNCTION__)); + } + + offset += memblock_size; +#ifdef DHD_DEBUG_DOWNLOADTIME + firmware_sz += len; +#endif // endif + } + +#ifdef DHD_DEBUG_DOWNLOADTIME + DHD_ERROR(("Firmware download time for %u bytes: %u ms\n", + firmware_sz, jiffies_to_msecs(jiffies - initial_jiffies))); +#endif // endif + +err: + if (memblock) + MFREE(bus->dhd->osh, memblock, memblock_size + DHD_SDALIGN); + if (dhd_msg_level & DHD_TRACE_VAL) { + if (memptr_tmp) + MFREE(bus->dhd->osh, memptr_tmp, MEMBLOCK + DHD_SDALIGN); + } + + if (image) + dhd_os_close_image1(bus->dhd, image); + + return bcmerror; +} + +#ifdef DHD_UCODE_DOWNLOAD +/* Currently supported only for the chips in which ucode RAM is AXI addressable */ +static uint32 +dhdsdio_ucode_base(struct dhd_bus *bus) +{ + uint32 ucode_base = 0; + + switch ((uint16)bus->sih->chip) { + case BCM43012_CHIP_ID: + ucode_base = 0xE8020000; + break; + default: + DHD_ERROR(("%s: Unsupported!\n", __func__)); + break; + } + + return ucode_base; +} + +static int +dhdsdio_download_ucode_file(struct dhd_bus *bus, char *ucode_path) +{ + int bcmerror = -1; + int offset = 0; + int len; + uint32 ucode_base; + void *image = NULL; + uint8 *memblock = NULL, *memptr; + uint memblock_size = MEMBLOCK; +#ifdef DHD_DEBUG_DOWNLOADTIME + unsigned long initial_jiffies = 0; + uint firmware_sz = 0; +#endif // endif + + DHD_INFO(("%s: download firmware %s\n", __FUNCTION__, ucode_path)); + + ucode_base = dhdsdio_ucode_base(bus); + + image = dhd_os_open_image1(bus->dhd, ucode_path); + if (image == NULL) + goto err; + + /* Update the dongle image download block size depending on the F1 block size */ + if (sd_f1_blocksize == 512) + memblock_size = MAX_MEMBLOCK; + + memptr = memblock = MALLOC(bus->dhd->osh, memblock_size + DHD_SDALIGN); + if (memblock == NULL) { + DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, + memblock_size)); + goto err; + } + if ((uint32)(uintptr)memblock % DHD_SDALIGN) + memptr += (DHD_SDALIGN - ((uint32)(uintptr)memblock % DHD_SDALIGN)); + +#ifdef DHD_DEBUG_DOWNLOADTIME + initial_jiffies = jiffies; +#endif // endif + + /* Download image */ + while ((len = dhd_os_get_image_block((char*)memptr, memblock_size, image))) { + if (len < 0) { + DHD_ERROR(("%s: dhd_os_get_image_block failed (%d)\n", __FUNCTION__, len)); + bcmerror = BCME_ERROR; + goto err; + } + + bcmerror = dhdsdio_membytes(bus, TRUE, (ucode_base + offset), memptr, len); + if (bcmerror) { + DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n", + __FUNCTION__, bcmerror, memblock_size, offset)); + goto err; + } + + offset += memblock_size; +#ifdef DHD_DEBUG_DOWNLOADTIME + firmware_sz += len; +#endif // endif + } + +#ifdef DHD_DEBUG_DOWNLOADTIME + DHD_ERROR(("ucode download time for %u bytes: %u ms\n", + firmware_sz, jiffies_to_msecs(jiffies - initial_jiffies))); +#endif // endif + +err: + if (memblock) + MFREE(bus->dhd->osh, memblock, memblock_size + DHD_SDALIGN); + + if (image) + dhd_os_close_image1(bus->dhd, image); + + return bcmerror; +} /* dhdsdio_download_ucode_file */ + +void +dhd_bus_ucode_download(struct dhd_bus *bus) +{ + uint32 shaddr = 0, shdata = 0; + + shaddr = bus->dongle_ram_base + bus->ramsize - 4; + dhdsdio_membytes(bus, FALSE, shaddr, (uint8 *)&shdata, 4); + + DHD_TRACE(("%s: shdata:[0x%08x :0x%08x]\n", __func__, shaddr, shdata)); + + if (shdata == UCODE_DOWNLOAD_REQUEST) + { + DHD_ERROR(("%s: Received ucode download request!\n", __func__)); + + /* Download the ucode */ + if (!dhd_get_ucode_path(bus->dhd)) { + DHD_ERROR(("%s: bus->uc_path not set!\n", __func__)); + return; + } + dhdsdio_download_ucode_file(bus, dhd_get_ucode_path(bus->dhd)); + + DHD_ERROR(("%s: Ucode downloaded successfully!\n", __func__)); + + shdata = UCODE_DOWNLOAD_COMPLETE; + dhdsdio_membytes(bus, TRUE, shaddr, (uint8 *)&shdata, 4); + } +} + +#endif /* DHD_UCODE_DOWNLOAD */ + +static int +dhdsdio_download_nvram(struct dhd_bus *bus) +{ + int bcmerror = -1; + uint len; + void * image = NULL; + char * memblock = NULL; + char *bufp; + char *pnv_path; + bool nvram_file_exists; + + pnv_path = bus->nv_path; + + nvram_file_exists = ((pnv_path != NULL) && (pnv_path[0] != '\0')); + + /* For Get nvram from UEFI */ + if (nvram_file_exists) { + image = dhd_os_open_image1(bus->dhd, pnv_path); + if (image == NULL) { + printf("%s: Open nvram file failed %s\n", __FUNCTION__, pnv_path); + goto err; + } + } + + memblock = MALLOC(bus->dhd->osh, MAX_NVRAMBUF_SIZE); + if (memblock == NULL) { + DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", + __FUNCTION__, MAX_NVRAMBUF_SIZE)); + goto err; + } + + /* For Get nvram from image or UEFI (when image == NULL ) */ + len = dhd_os_get_image_block(memblock, MAX_NVRAMBUF_SIZE, image); + + if (len > 0 && len < MAX_NVRAMBUF_SIZE) { + bufp = (char *)memblock; + bufp[len] = 0; + len = process_nvram_vars(bufp, len); + if (len % 4) { + len += 4 - (len % 4); + } + bufp += len; + *bufp++ = 0; + if (len) + bcmerror = dhdsdio_downloadvars(bus, memblock, len + 1); + if (bcmerror) { + DHD_ERROR(("%s: error downloading vars: %d\n", + __FUNCTION__, bcmerror)); + } + } else { + DHD_ERROR(("%s: error reading nvram file: %d\n", + __FUNCTION__, len)); + bcmerror = BCME_SDIO_ERROR; + } + +err: + if (memblock) + MFREE(bus->dhd->osh, memblock, MAX_NVRAMBUF_SIZE); + + if (image) + dhd_os_close_image1(bus->dhd, image); + + return bcmerror; +} + +static int +_dhdsdio_download_firmware(struct dhd_bus *bus) +{ + int bcmerror = -1; + + bool embed = FALSE; /* download embedded firmware */ + bool dlok = FALSE; /* download firmware succeeded */ + + /* Out immediately if no image to download */ + if ((bus->fw_path == NULL) || (bus->fw_path[0] == '\0')) { + return bcmerror; + } + + /* Keep arm in reset */ + if (dhdsdio_download_state(bus, TRUE)) { + DHD_ERROR(("%s: error placing ARM core in reset\n", __FUNCTION__)); + goto err; + } + + /* External image takes precedence if specified */ + if ((bus->fw_path != NULL) && (bus->fw_path[0] != '\0')) { + if (dhdsdio_download_code_file(bus, bus->fw_path)) { + DHD_ERROR(("%s: dongle image file download failed\n", __FUNCTION__)); + goto err; + } else { + embed = FALSE; + dlok = TRUE; + } + } + + BCM_REFERENCE(embed); + if (!dlok) { + DHD_ERROR(("%s: dongle image download failed\n", __FUNCTION__)); + goto err; + } + + /* External nvram takes precedence if specified */ + if (dhdsdio_download_nvram(bus)) { + DHD_ERROR(("%s: dongle nvram file download failed\n", __FUNCTION__)); + goto err; + } + + /* Take arm out of reset */ + if (dhdsdio_download_state(bus, FALSE)) { + DHD_ERROR(("%s: error getting out of ARM core reset\n", __FUNCTION__)); + goto err; + } + + bcmerror = 0; + +err: + return bcmerror; +} + +static int +dhd_bcmsdh_recv_buf(dhd_bus_t *bus, uint32 addr, uint fn, uint flags, uint8 *buf, uint nbytes, + void *pkt, bcmsdh_cmplt_fn_t complete_fn, void *handle) +{ + int status; + + if (!KSO_ENAB(bus)) { + DHD_ERROR(("%s: Device asleep\n", __FUNCTION__)); + return BCME_NODEVICE; + } + + status = bcmsdh_recv_buf(bus->sdh, addr, fn, flags, buf, nbytes, pkt, complete_fn, handle); + + return status; +} + +static int +dhd_bcmsdh_send_buf(dhd_bus_t *bus, uint32 addr, uint fn, uint flags, uint8 *buf, uint nbytes, + void *pkt, bcmsdh_cmplt_fn_t complete_fn, void *handle, int max_retry) +{ + int ret; + int i = 0; + int retries = 0; + bcmsdh_info_t *sdh; + + if (!KSO_ENAB(bus)) { + DHD_ERROR(("%s: Device asleep\n", __FUNCTION__)); + return BCME_NODEVICE; + } + + sdh = bus->sdh; + do { + ret = bcmsdh_send_buf(bus->sdh, addr, fn, flags, buf, nbytes, + pkt, complete_fn, handle); + + bus->f2txdata++; + ASSERT(ret != BCME_PENDING); + + if (ret == BCME_NODEVICE) { + DHD_ERROR(("%s: Device asleep already\n", __FUNCTION__)); + } else if (ret < 0) { + /* On failure, abort the command and terminate the frame */ + DHD_ERROR(("%s: sdio error %d, abort command and terminate frame.\n", + __FUNCTION__, ret)); + bus->tx_sderrs++; + bus->f1regdata++; + bus->dhd->tx_errors++; + bcmsdh_abort(sdh, SDIO_FUNC_2); + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_FRAMECTRL, + SFC_WF_TERM, NULL); + for (i = 0; i < READ_FRM_CNT_RETRIES; i++) { + uint8 hi, lo; + hi = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WFRAMEBCHI, + NULL); + lo = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WFRAMEBCLO, + NULL); + bus->f1regdata += 2; + if ((hi == 0) && (lo == 0)) + break; + } + } + } while ((ret < 0) && retrydata && ++retries < max_retry); + + return ret; +} + +uint8 +dhd_bus_is_ioready(struct dhd_bus *bus) +{ + uint8 enable; + bcmsdh_info_t *sdh; + ASSERT(bus); + ASSERT(bus->sih != NULL); + enable = (SDIO_FUNC_ENABLE_1 | SDIO_FUNC_ENABLE_2); + sdh = bus->sdh; + return (enable == bcmsdh_cfg_read(sdh, SDIO_FUNC_0, SDIOD_CCCR_IORDY, NULL)); +} + +uint +dhd_bus_chip(struct dhd_bus *bus) +{ + ASSERT(bus->sih != NULL); + return bus->sih->chip; +} + +uint +dhd_bus_chiprev(struct dhd_bus *bus) +{ + ASSERT(bus); + ASSERT(bus->sih != NULL); + return bus->sih->chiprev; +} + +void * +dhd_bus_pub(struct dhd_bus *bus) +{ + return bus->dhd; +} + +void * +dhd_bus_sih(struct dhd_bus *bus) +{ + return (void *)bus->sih; +} + +void * +dhd_bus_txq(struct dhd_bus *bus) +{ + return &bus->txq; +} + +uint +dhd_bus_hdrlen(struct dhd_bus *bus) +{ + return (bus->txglom_enable) ? SDPCM_HDRLEN_TXGLOM : SDPCM_HDRLEN; +} + +void +dhd_bus_set_dotxinrx(struct dhd_bus *bus, bool val) +{ + bus->dotxinrx = val; +} + +/* + * dhdsdio_advertise_bus_cleanup advertises that clean up is under progress + * to other bus user contexts like Tx, Rx, IOVAR, WD etc and it waits for other contexts + * to gracefully exit. All the bus usage contexts before marking busstate as busy, will check for + * whether the busstate is DHD_BUS_DOWN or DHD_BUS_DOWN_IN_PROGRESS, if so + * they will exit from there itself without marking dhd_bus_busy_state as BUSY. + */ +static void +dhdsdio_advertise_bus_cleanup(dhd_pub_t *dhdp) +{ + unsigned long flags; + int timeleft; + + DHD_LINUX_GENERAL_LOCK(dhdp, flags); + dhdp->busstate = DHD_BUS_DOWN_IN_PROGRESS; + DHD_LINUX_GENERAL_UNLOCK(dhdp, flags); + + timeleft = dhd_os_busbusy_wait_negation(dhdp, &dhdp->dhd_bus_busy_state); + if ((timeleft == 0) || (timeleft == 1)) { + DHD_ERROR(("%s : Timeout due to dhd_bus_busy_state=0x%x\n", + __FUNCTION__, dhdp->dhd_bus_busy_state)); + ASSERT(0); + } + + return; +} + +static void +dhdsdio_advertise_bus_remove(dhd_pub_t *dhdp) +{ + unsigned long flags; + int timeleft; + + DHD_LINUX_GENERAL_LOCK(dhdp, flags); + dhdp->busstate = DHD_BUS_REMOVE; + DHD_LINUX_GENERAL_UNLOCK(dhdp, flags); + + timeleft = dhd_os_busbusy_wait_negation(dhdp, &dhdp->dhd_bus_busy_state); + if ((timeleft == 0) || (timeleft == 1)) { + DHD_ERROR(("%s : Timeout due to dhd_bus_busy_state=0x%x\n", + __FUNCTION__, dhdp->dhd_bus_busy_state)); + ASSERT(0); + } + + return; +} + +int +dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag) +{ + int bcmerror = 0; + dhd_bus_t *bus; + unsigned long flags; + + bus = dhdp->bus; + + if (flag == TRUE) { + if (!bus->dhd->dongle_reset) { + DHD_ERROR(("%s: == Power OFF ==\n", __FUNCTION__)); + dhdsdio_advertise_bus_cleanup(bus->dhd); + dhd_os_sdlock(dhdp); + dhd_os_wd_timer(dhdp, 0); +#if !defined(IGNORE_ETH0_DOWN) + /* Force flow control as protection when stop come before ifconfig_down */ + dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, ON); +#endif /* !defined(IGNORE_ETH0_DOWN) */ + /* Expect app to have torn down any connection before calling */ + /* Stop the bus, disable F2 */ + dhd_bus_stop(bus, FALSE); +#if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) + /* Clean up any pending IRQ */ + dhd_enable_oob_intr(bus, FALSE); + bcmsdh_oob_intr_set(bus->sdh, FALSE); + bcmsdh_oob_intr_unregister(bus->sdh); +#endif /* defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) */ + + /* Clean tx/rx buffer pointers, detach from the dongle */ + dhdsdio_release_dongle(bus, bus->dhd->osh, TRUE, TRUE); + + bus->dhd->dongle_reset = TRUE; + bus->dhd->up = FALSE; + dhd_txglom_enable(dhdp, FALSE); + dhd_os_sdunlock(dhdp); + + DHD_LINUX_GENERAL_LOCK(bus->dhd, flags); + bus->dhd->busstate = DHD_BUS_DOWN; + DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags); + + printf("%s: WLAN OFF DONE\n", __FUNCTION__); + /* App can now remove power from device */ + } else + bcmerror = BCME_SDIO_ERROR; + } else { + /* App must have restored power to device before calling */ + + printf("\n\n%s: == WLAN ON ==\n", __FUNCTION__); + + if (bus->dhd->dongle_reset) { + /* Turn on WLAN */ + dhd_os_sdlock(dhdp); + /* Reset SD client -- required if devreset is called + * via 'dhd devreset' iovar + */ + bcmsdh_reset(bus->sdh); + /* Attempt to re-attach & download */ + if (dhdsdio_probe_attach(bus, bus->dhd->osh, bus->sdh, + (uint32 *)(uintptr)si_enum_base(bus->cl_devid), + bus->cl_devid)) { + + DHD_LINUX_GENERAL_LOCK(bus->dhd, flags); + bus->dhd->busstate = DHD_BUS_DOWN; + DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags); + + /* Attempt to download binary to the dongle */ + if (dhdsdio_probe_init(bus, bus->dhd->osh, bus->sdh) && + dhdsdio_download_firmware(bus, bus->dhd->osh, bus->sdh) >= 0) { + + /* Re-init bus, enable F2 transfer */ + bcmerror = dhd_bus_init((dhd_pub_t *) bus->dhd, FALSE); + if (bcmerror == BCME_OK) { +#if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) + dhd_enable_oob_intr(bus, TRUE); + bcmsdh_oob_intr_register(bus->sdh, + dhdsdio_isr, bus); + bcmsdh_oob_intr_set(bus->sdh, TRUE); +#elif defined(FORCE_WOWLAN) + dhd_enable_oob_intr(bus, TRUE); +#endif /* defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) */ + + bus->dhd->dongle_reset = FALSE; + bus->dhd->up = TRUE; + +#if !defined(IGNORE_ETH0_DOWN) + /* Restore flow control */ + dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, OFF); +#endif // endif + dhd_os_wd_timer(dhdp, dhd_watchdog_ms); + + DHD_TRACE(("%s: WLAN ON DONE\n", __FUNCTION__)); + } else { + dhd_bus_stop(bus, FALSE); + dhdsdio_release_dongle(bus, bus->dhd->osh, + TRUE, FALSE); + } + } else { + DHD_ERROR(("%s Failed to download binary to the dongle\n", + __FUNCTION__)); + if (bus->sih != NULL) { + si_detach(bus->sih); + bus->sih = NULL; + } + bcmerror = BCME_SDIO_ERROR; + } + } else + bcmerror = BCME_SDIO_ERROR; + + dhd_os_sdunlock(dhdp); + } else { + printf("%s called when dongle is not in reset\n", + __FUNCTION__); + printf("Will call dhd_bus_start instead\n"); + dhd_bus_resume(dhdp, 1); +#if defined(HW_OOB) || defined(FORCE_WOWLAN) + dhd_conf_set_hw_oob_intr(bus->sdh, bus->sih); // terence 20120615: fix for OOB initial issue +#endif + if ((bcmerror = dhd_bus_start(dhdp)) != 0) + DHD_ERROR(("%s: dhd_bus_start fail with %d\n", + __FUNCTION__, bcmerror)); + } + } + +#ifdef PKT_STATICS + memset((uint8*) &tx_statics, 0, sizeof(pkt_statics_t)); +#endif + return bcmerror; +} + +int dhd_bus_suspend(dhd_pub_t *dhdpub) +{ + return bcmsdh_stop(dhdpub->bus->sdh); +} + +int dhd_bus_resume(dhd_pub_t *dhdpub, int stage) +{ + return bcmsdh_start(dhdpub->bus->sdh, stage); +} + +/* Get Chip ID version */ +uint dhd_bus_chip_id(dhd_pub_t *dhdp) +{ + dhd_bus_t *bus = dhdp->bus; + + if (bus && bus->sih) + return bus->sih->chip; + else + return 0; +} + +/* Get Chip Rev ID version */ +uint dhd_bus_chiprev_id(dhd_pub_t *dhdp) +{ + dhd_bus_t *bus = dhdp->bus; + + if (bus && bus->sih) + return bus->sih->chiprev; + else + return 0; +} + +/* Get Chip Pkg ID version */ +uint dhd_bus_chippkg_id(dhd_pub_t *dhdp) +{ + dhd_bus_t *bus = dhdp->bus; + + return bus->sih->chippkg; +} + +int dhd_bus_get_ids(struct dhd_bus *bus, uint32 *bus_type, uint32 *bus_num, uint32 *slot_num) +{ + *bus_type = bus->bus; + *bus_num = bus->bus_num; + *slot_num = bus->slot_num; + return 0; +} + +int +dhd_bus_membytes(dhd_pub_t *dhdp, bool set, uint32 address, uint8 *data, uint size) +{ + dhd_bus_t *bus; + + bus = dhdp->bus; + return dhdsdio_membytes(bus, set, address, data, size); +} + +#if defined(SUPPORT_MULTIPLE_REVISION) +static int +concate_revision_bcm4335(dhd_bus_t *bus, char *fw_path, char *nv_path) +{ + + uint chipver; +#if defined(SUPPORT_MULTIPLE_CHIPS) + char chipver_tag[10] = "_4335"; +#else + char chipver_tag[4] = {0, }; +#endif /* defined(SUPPORT_MULTIPLE_CHIPS) */ + + DHD_TRACE(("%s: BCM4335 Multiple Revision Check\n", __FUNCTION__)); + if (bus->sih->chip != BCM4335_CHIP_ID) { + DHD_ERROR(("%s:Chip is not BCM4335\n", __FUNCTION__)); + return -1; + } + chipver = bus->sih->chiprev; + DHD_ERROR(("CHIP VER = [0x%x]\n", chipver)); + if (chipver == 0x0) { + DHD_ERROR(("----- CHIP bcm4335_A0 -----\n")); + strcat(chipver_tag, "_a0"); + } else if (chipver == 0x1) { + DHD_ERROR(("----- CHIP bcm4335_B0 -----\n")); +#if defined(SUPPORT_MULTIPLE_CHIPS) + strcat(chipver_tag, "_b0"); +#endif /* defined(SUPPORT_MULTIPLE_CHIPS) */ + } + + strcat(fw_path, chipver_tag); + strcat(nv_path, chipver_tag); + return 0; +} + +static int +concate_revision_bcm4339(dhd_bus_t *bus, char *fw_path, char *nv_path) +{ + + uint chipver; +#if defined(SUPPORT_MULTIPLE_CHIPS) + char chipver_tag[10] = "_4339"; +#else + char chipver_tag[4] = {0, }; +#endif /* defined(SUPPORT_MULTIPLE_CHIPS) */ + + DHD_TRACE(("%s: BCM4339 Multiple Revision Check\n", __FUNCTION__)); + if (bus->sih->chip != BCM4339_CHIP_ID) { + DHD_ERROR(("%s:Chip is not BCM4339\n", __FUNCTION__)); + return -1; + } + chipver = bus->sih->chiprev; + DHD_ERROR(("CHIP VER = [0x%x]\n", chipver)); + if (chipver == 0x1) { + DHD_ERROR(("----- CHIP bcm4339_A0 -----\n")); + strcat(chipver_tag, "_a0"); + } else { + DHD_ERROR(("----- CHIP bcm4339 unknown revision %d -----\n", + chipver)); + } + + strcat(fw_path, chipver_tag); + strcat(nv_path, chipver_tag); + return 0; +} + +static int concate_revision_bcm4350(dhd_bus_t *bus, char *fw_path, char *nv_path) +{ + uint32 chip_ver; +#if defined(SUPPORT_MULTIPLE_CHIPS) + char chipver_tag[10] = {0, }; +#else + char chipver_tag[4] = {0, }; +#endif /* defined(SUPPORT_MULTIPLE_CHIPS) */ + chip_ver = bus->sih->chiprev; + +#if defined(SUPPORT_MULTIPLE_CHIPS) + if (chip_ver == 3) + strcat(chipver_tag, "_4354"); + else + strcat(chipver_tag, "_4350"); +#endif // endif + + if (chip_ver == 3) { + DHD_ERROR(("----- CHIP 4354 A0 -----\n")); + strcat(chipver_tag, "_a0"); + } else { + DHD_ERROR(("----- Unknown chip version, ver=%x -----\n", chip_ver)); + } + + strcat(fw_path, chipver_tag); + strcat(nv_path, chipver_tag); + return 0; +} + +static int concate_revision_bcm4354(dhd_bus_t *bus, char *fw_path, char *nv_path) +{ + uint32 chip_ver; +#if defined(SUPPORT_MULTIPLE_CHIPS) + char chipver_tag[10] = "_4354"; +#else + char chipver_tag[4] = {0, }; +#endif /* SUPPORT_MULTIPLE_CHIPS */ + + chip_ver = bus->sih->chiprev; + if (chip_ver == 1) { + DHD_ERROR(("----- CHIP 4354 A1 -----\n")); + strcat(chipver_tag, "_a1"); + } else { + DHD_ERROR(("----- Unknown chip version, ver=%x -----\n", chip_ver)); + } + + strcat(fw_path, chipver_tag); + strcat(nv_path, chipver_tag); + + return 0; +} + +static int +concate_revision_bcm43454(dhd_bus_t *bus, char *fw_path, char *nv_path) +{ + char chipver_tag[10] = {0, }; +#ifdef SUPPORT_MULTIPLE_BOARD_REV_FROM_DT + int base_system_rev_for_nv = 0; +#endif /* SUPPORT_MULTIPLE_BOARD_REV_FROM_DT */ + + DHD_TRACE(("%s: BCM43454 Multiple Revision Check\n", __FUNCTION__)); + if (bus->sih->chip != BCM43454_CHIP_ID) { + DHD_ERROR(("%s:Chip is not BCM43454!\n", __FUNCTION__)); + return -1; + } +#ifdef SUPPORT_MULTIPLE_BOARD_REV_FROM_DT + base_system_rev_for_nv = dhd_get_system_rev(); + if (base_system_rev_for_nv > 0) { + DHD_ERROR(("----- Board Rev [%d] -----\n", base_system_rev_for_nv)); + sprintf(chipver_tag, "_r%02d", base_system_rev_for_nv); + } +#endif /* SUPPORT_MULTIPLE_BOARD_REV_FROM_DT */ +#ifdef SUPPORT_MULTIPLE_BOARD_REV_FROM_HW + DHD_ERROR(("----- Rev [%d] Fot MULTIPLE Board. -----\n", system_hw_rev)); + if ((system_hw_rev >= 8) && (system_hw_rev <= 11)) { + DHD_ERROR(("This HW is Rev 08 ~ 11. this is For FD-HW\n")); + strcat(chipver_tag, "_FD"); + } +#endif /* SUPPORT_MULTIPLE_BOARD_REV_FROM_HW */ + + strcat(nv_path, chipver_tag); + return 0; +} + +int +concate_revision(dhd_bus_t *bus, char *fw_path, char *nv_path) +{ + int res = 0; + + if (!bus || !bus->sih) { + DHD_ERROR(("%s:Bus is Invalid\n", __FUNCTION__)); + return -1; + } + + switch (bus->sih->chip) { + case BCM4335_CHIP_ID: + res = concate_revision_bcm4335(bus, fw_path, nv_path); + + break; + case BCM4339_CHIP_ID: + res = concate_revision_bcm4339(bus, fw_path, nv_path); + break; + case BCM4350_CHIP_ID: + res = concate_revision_bcm4350(bus, fw_path, nv_path); + break; + case BCM4354_CHIP_ID: + res = concate_revision_bcm4354(bus, fw_path, nv_path); + break; + case BCM43454_CHIP_ID: + res = concate_revision_bcm43454(bus, fw_path, nv_path); + break; + + default: + DHD_ERROR(("REVISION SPECIFIC feature is not required\n")); + return res; + } + + if (res == 0) { + } + return res; +} +#endif /* SUPPORT_MULTIPLE_REVISION */ + +void +dhd_bus_update_fw_nv_path(struct dhd_bus *bus, char *pfw_path, char *pnv_path, + char *pclm_path, char *pconf_path) +{ + bus->fw_path = pfw_path; + bus->nv_path = pnv_path; + bus->dhd->clm_path = pclm_path; + bus->dhd->conf_path = pconf_path; +} + +int +dhd_enableOOB(dhd_pub_t *dhd, bool sleep) +{ + dhd_bus_t *bus = dhd->bus; + sdpcmd_regs_t *regs = bus->regs; + uint retries = 0; + + if (sleep) { + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + /* Tell device to start using OOB wakeup */ + W_SDREG(SMB_USE_OOB, ®s->tosbmailbox, retries); + if (retries > retry_limit) { + DHD_ERROR(("CANNOT SIGNAL CHIP, WILL NOT WAKE UP!!\n")); + return BCME_BUSY; + } + /* Turn off our contribution to the HT clock request */ + dhdsdio_clkctl(bus, CLK_SDONLY, FALSE); + } else { + /* Make sure the controller has the bus up */ + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + + /* Send misc interrupt to indicate OOB not needed */ + W_SDREG(0, ®s->tosbmailboxdata, retries); + if (retries <= retry_limit) + W_SDREG(SMB_DEV_INT, ®s->tosbmailbox, retries); + + if (retries > retry_limit) + DHD_ERROR(("CANNOT SIGNAL CHIP TO CLEAR OOB!!\n")); + + /* Make sure we have SD bus access */ + dhdsdio_clkctl(bus, CLK_SDONLY, FALSE); + } + return BCME_OK; +} + +void +dhd_bus_pktq_flush(dhd_pub_t *dhdp) +{ + dhd_bus_t *bus = dhdp->bus; + bool wlfc_enabled = FALSE; + +#ifdef PROP_TXSTATUS + wlfc_enabled = (dhd_wlfc_cleanup_txq(dhdp, NULL, 0) != WLFC_UNSUPPORTED); +#endif // endif + if (!wlfc_enabled) { +#ifdef DHDTCPACK_SUPPRESS + /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt, + * when there is a newly coming packet from network stack. + */ + dhd_tcpack_info_tbl_clean(bus->dhd); +#endif /* DHDTCPACK_SUPPRESS */ + /* Clear the data packet queues */ + pktq_flush(dhdp->osh, &bus->txq, TRUE); + } +} + +#ifdef BCMSDIO +int +dhd_sr_config(dhd_pub_t *dhd, bool on) +{ + dhd_bus_t *bus = dhd->bus; + + if (!bus->_srenab) + return -1; + + return dhdsdio_clk_devsleep_iovar(bus, on); +} + +uint16 +dhd_get_chipid(dhd_pub_t *dhd) +{ + dhd_bus_t *bus = dhd->bus; + + if (bus && bus->sih) + return (uint16)bus->sih->chip; + else + return 0; +} +#endif /* BCMSDIO */ + +#ifdef DEBUGGER +static uint32 +dhd_sdio_reg_read(struct dhd_bus *bus, ulong addr) +{ + uint32 rval; + + dhd_os_sdlock(bus->dhd); + + BUS_WAKE(bus); + + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + + rval = bcmsdh_reg_read(bus->sdh, addr, 4); + + dhd_os_sdunlock(bus->dhd); + + return rval; +} + +static void +dhd_sdio_reg_write(struct dhd_bus *bus, ulong addr, uint32 val) +{ + dhd_os_sdlock(bus->dhd); + + BUS_WAKE(bus); + + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + + bcmsdh_reg_write(bus->sdh, addr, 4, val); + + dhd_os_sdunlock(bus->dhd); +} + +#endif /* DEBUGGER */ + +#if defined(BT_OVER_SDIO) +uint8 dhd_bus_cfg_read(void *h, uint fun_num, uint32 addr, int *err) +{ + uint8 intrd; + dhd_pub_t *dhdp = (dhd_pub_t *)h; + dhd_bus_t *bus = (dhd_bus_t *)dhdp->bus; + + dhd_os_sdlock(bus->dhd); + + intrd = bcmsdh_cfg_read(bus->sdh, fun_num, addr, err); + + dhd_os_sdunlock(bus->dhd); + + return intrd; +} EXPORT_SYMBOL(dhd_bus_cfg_read); + +void dhd_bus_cfg_write(void *h, uint fun_num, uint32 addr, uint8 val, int *err) +{ + dhd_pub_t *dhdp = (dhd_pub_t *)h; + dhd_bus_t *bus = (dhd_bus_t *)dhdp->bus; + + dhd_os_sdlock(bus->dhd); + + bcmsdh_cfg_write(bus->sdh, fun_num, addr, val, err); + + dhd_os_sdunlock(bus->dhd); + +} EXPORT_SYMBOL(dhd_bus_cfg_write); + +static int +extract_hex_field(char * line, uint16 start_pos, uint16 num_chars, uint16 * value) +{ + char field [8]; + + strncpy(field, line + start_pos, num_chars); + field [num_chars] = '\0'; + + return (sscanf (field, "%hX", value) == 1); +} + +static int +read_more_btbytes(struct dhd_bus *bus, void * file, char *line, int * addr_mode, uint16 * hi_addr, + uint32 * dest_addr, uint8 *data_bytes, uint32 * num_bytes) +{ + int str_len; + uint16 num_data_bytes, addr, data_pos, type, w, i; + uint32 abs_base_addr32 = 0; + *num_bytes = 0; + + while (!*num_bytes) + { + str_len = dhd_os_gets_image(bus->dhd, line, BTFW_MAX_STR_LEN, file); + + DHD_TRACE(("%s: Len :0x%x %s\n", __FUNCTION__, str_len, line)); + + if (str_len == 0) { + break; + } else if (str_len > 9) { + extract_hex_field(line, 1, 2, &num_data_bytes); + extract_hex_field(line, 3, 4, &addr); + extract_hex_field(line, 7, 2, &type); + + data_pos = 9; + for (i = 0; i < num_data_bytes; i++) { + extract_hex_field(line, data_pos, 2, &w); + data_bytes [i] = (uint8)(w & 0x00FF); + data_pos += 2; + } + + if (type == BTFW_HEX_LINE_TYPE_EXTENDED_ADDRESS) { + *hi_addr = (data_bytes [0] << 8) | data_bytes [1]; + *addr_mode = BTFW_ADDR_MODE_EXTENDED; + } else if (type == BTFW_HEX_LINE_TYPE_EXTENDED_SEGMENT_ADDRESS) { + *hi_addr = (data_bytes [0] << 8) | data_bytes [1]; + *addr_mode = BTFW_ADDR_MODE_SEGMENT; + } else if (type == BTFW_HEX_LINE_TYPE_ABSOLUTE_32BIT_ADDRESS) { + abs_base_addr32 = (data_bytes [0] << 24) | (data_bytes [1] << 16) | + (data_bytes [2] << 8) | data_bytes [3]; + *addr_mode = BTFW_ADDR_MODE_LINEAR32; + } else if (type == BTFW_HEX_LINE_TYPE_DATA) { + *dest_addr = addr; + if (*addr_mode == BTFW_ADDR_MODE_EXTENDED) + *dest_addr += (*hi_addr << 16); + else if (*addr_mode == BTFW_ADDR_MODE_SEGMENT) + *dest_addr += (*hi_addr << 4); + else if (*addr_mode == BTFW_ADDR_MODE_LINEAR32) + *dest_addr += abs_base_addr32; + *num_bytes = num_data_bytes; + } + } + } + return (*num_bytes > 0); +} + +static int +_dhdsdio_download_btfw(struct dhd_bus *bus) +{ + int bcm_error = -1; + void *image = NULL; + uint8 *mem_blk = NULL, *mem_ptr = NULL, *data_ptr = NULL; + + uint32 offset_addr = 0, offset_len = 0, bytes_to_write = 0; + + char *line = NULL; + uint32 dest_addr = 0, num_bytes; + uint16 hiAddress = 0; + uint32 start_addr, start_data, end_addr, end_data, i, index, pad, + bt2wlan_pwrup_adr; + + int addr_mode = BTFW_ADDR_MODE_EXTENDED; + + /* Out immediately if no image to download */ + if ((bus->btfw_path == NULL) || (bus->btfw_path[0] == '\0')) { + return 0; + } + + image = dhd_os_open_image1(bus->dhd, bus->btfw_path); + if (image == NULL) + goto err; + + mem_ptr = mem_blk = MALLOC(bus->dhd->osh, BTFW_DOWNLOAD_BLK_SIZE + DHD_SDALIGN); + if (mem_blk == NULL) { + DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, + BTFW_DOWNLOAD_BLK_SIZE + DHD_SDALIGN)); + goto err; + } + if ((uint32)(uintptr)mem_blk % DHD_SDALIGN) + mem_ptr += (DHD_SDALIGN - ((uint32)(uintptr)mem_blk % DHD_SDALIGN)); + + data_ptr = MALLOC(bus->dhd->osh, BTFW_DOWNLOAD_BLK_SIZE - 8); + if (data_ptr == NULL) { + DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, + BTFW_DOWNLOAD_BLK_SIZE - 8)); + goto err; + } + /* Write to BT register to hold WLAN wake high during BT FW download */ + bt2wlan_pwrup_adr = BTMEM_OFFSET + BT2WLAN_PWRUP_ADDR; + bcmsdh_reg_write(bus->sdh, bt2wlan_pwrup_adr, 4, BT2WLAN_PWRUP_WAKE); + /* + * Wait for at least 2msec for the clock to be ready/Available. + */ + OSL_DELAY(2000); + + line = MALLOC(bus->dhd->osh, BTFW_MAX_STR_LEN); + if (line == NULL) { + DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", + __FUNCTION__, BTFW_MAX_STR_LEN)); + goto err; + } + memset(line, 0, BTFW_MAX_STR_LEN); + + while (read_more_btbytes (bus, image, line, &addr_mode, &hiAddress, &dest_addr, + data_ptr, &num_bytes)) { + + DHD_TRACE(("read %d bytes at address %08X\n", num_bytes, dest_addr)); + + start_addr = BTMEM_OFFSET + dest_addr; + index = 0; + + /* Make sure the start address is 4 byte aligned to avoid alignment issues + * with SD host controllers + */ + if (!ISALIGNED(start_addr, 4)) { + pad = start_addr % 4; + start_addr = ROUNDDN(start_addr, 4); + start_data = bcmsdh_reg_read(bus->sdh, start_addr, 4); + for (i = 0; i < pad; i++, index++) { + mem_ptr[index] = (uint8)((uint8 *)&start_data)[i]; + } + } + bcopy(data_ptr, &(mem_ptr[index]), num_bytes); + index += num_bytes; + + /* Make sure the length is multiple of 4bytes to avoid alignment issues + * with SD host controllers + */ + end_addr = start_addr + index; + if (!ISALIGNED(end_addr, 4)) { + end_addr = ROUNDDN(end_addr, 4); + end_data = bcmsdh_reg_read(bus->sdh, end_addr, 4); + for (i = (index % 4); i < 4; i++, index++) { + mem_ptr[index] = (uint8)((uint8 *)&end_data)[i]; + } + } + + offset_addr = start_addr & 0xFFF; + offset_len = offset_addr + index; + if (offset_len <= 0x1000) { + bcm_error = dhdsdio_membytes(bus, TRUE, start_addr, mem_ptr, index); + if (bcm_error) { + DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n", + __FUNCTION__, bcm_error, num_bytes, start_addr)); + goto err; + } + } + else { + bytes_to_write = 0x1000 - offset_addr; + bcm_error = dhdsdio_membytes(bus, TRUE, start_addr, mem_ptr, + bytes_to_write); + if (bcm_error) { + DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n", + __FUNCTION__, bcm_error, num_bytes, start_addr)); + goto err; + } + + OSL_DELAY(10000); + + bcm_error = dhdsdio_membytes(bus, TRUE, (start_addr + bytes_to_write), + (mem_ptr + bytes_to_write), (index - bytes_to_write)); + if (bcm_error) { + DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n", + __FUNCTION__, bcm_error, num_bytes, start_addr)); + goto err; + } + } + memset(line, 0, BTFW_MAX_STR_LEN); + } + + bcm_error = 0; +err: + if (mem_blk) + MFREE(bus->dhd->osh, mem_blk, BTFW_DOWNLOAD_BLK_SIZE + DHD_SDALIGN); + + if (data_ptr) + MFREE(bus->dhd->osh, data_ptr, BTFW_DOWNLOAD_BLK_SIZE - 8); + + if (line) + MFREE(bus->dhd->osh, line, BTFW_MAX_STR_LEN); + + if (image) + dhd_os_close_image1(bus->dhd, image); + + return bcm_error; +} + +static int +dhdsdio_download_btfw(struct dhd_bus *bus, osl_t *osh, void *sdh) +{ + int ret; + + DHD_TRACE(("%s: btfw path=%s\n", + __FUNCTION__, bus->btfw_path)); + DHD_OS_WAKE_LOCK(bus->dhd); + dhd_os_sdlock(bus->dhd); + + /* Download the firmware */ + ret = _dhdsdio_download_btfw(bus); + + dhd_os_sdunlock(bus->dhd); + DHD_OS_WAKE_UNLOCK(bus->dhd); + + return ret; +} + +int +dhd_bus_download_btfw(struct dhd_bus *bus, osl_t *osh, + char *pbtfw_path) +{ + int ret; + + bus->btfw_path = pbtfw_path; + + ret = dhdsdio_download_btfw(bus, osh, bus->sdh); + + return ret; +} +#endif /* defined (BT_OVER_SDIO) */ + +void +dhd_bus_dump_trap_info(dhd_bus_t *bus, struct bcmstrbuf *strbuf) +{ + trap_t *tr = &bus->dhd->last_trap_info; + + bcm_bprintf(strbuf, + "Dongle trap type 0x%x @ epc 0x%x, cpsr 0x%x, spsr 0x%x, sp 0x%x," + "lp 0x%x, rpc 0x%x Trap offset 0x%x, " + "r0 0x%x, r1 0x%x, r2 0x%x, r3 0x%x, " + "r4 0x%x, r5 0x%x, r6 0x%x, r7 0x%x\n\n", + ltoh32(tr->type), ltoh32(tr->epc), ltoh32(tr->cpsr), ltoh32(tr->spsr), + ltoh32(tr->r13), ltoh32(tr->r14), ltoh32(tr->pc), + ltoh32(bus->dongle_trap_addr), + ltoh32(tr->r0), ltoh32(tr->r1), ltoh32(tr->r2), ltoh32(tr->r3), + ltoh32(tr->r4), ltoh32(tr->r5), ltoh32(tr->r6), ltoh32(tr->r7)); + +} + +static int +dhd_bcmsdh_send_buffer(void *bus, uint8 *frame, uint16 len) +{ + int ret = -1; + + ret = dhd_bcmsdh_send_buf(bus, bcmsdh_cur_sbwad(((dhd_bus_t*)bus)->sdh), + SDIO_FUNC_2, F2SYNC, frame, len, NULL, NULL, NULL, TXRETRIES); + + if (ret == BCME_OK) + ((dhd_bus_t*)bus)->tx_seq = (((dhd_bus_t*)bus)->tx_seq + 1) % SDPCM_SEQUENCE_WRAP; + + return ret; +} + +/* Function to set the min res mask depending on the chip ID used */ +bool +dhd_bus_set_default_min_res_mask(struct dhd_bus *bus) +{ + if ((bus == NULL) || (bus->sih == NULL)) { + DHD_ERROR(("%s(): Invalid Arguments \r\n", __FUNCTION__)); + return FALSE; + } + + switch (bus->sih->chip) { + case BCM4339_CHIP_ID: + bcmsdh_reg_write(bus->sdh, SI_ENUM_BASE(bus->sih) + 0x618, 4, 0x3fcaf377); + if (bcmsdh_regfail(bus->sdh)) { + DHD_ERROR(("%s:%d Setting min_res_mask failed\n", __FUNCTION__, __LINE__)); + return FALSE; + } + break; + + case BCM43012_CHIP_ID: + bcmsdh_reg_write(bus->sdh, + si_get_pmu_reg_addr(bus->sih, OFFSETOF(pmuregs_t, min_res_mask)), + 4, DEFAULT_43012_MIN_RES_MASK); + if (bcmsdh_regfail(bus->sdh)) { + DHD_ERROR(("%s:%d Setting min_res_mask failed\n", __FUNCTION__, __LINE__)); + return FALSE; + } + break; + + default: + DHD_ERROR(("%s: Unhandled chip id\n", __FUNCTION__)); + return FALSE; + } + + return TRUE; +} + +/* Function to reset PMU registers */ +void +dhd_bus_pmu_reg_reset(dhd_pub_t *dhdp) +{ + struct dhd_bus *bus = dhdp->bus; + bcmsdh_reg_write(bus->sdh, si_get_pmu_reg_addr(bus->sih, + OFFSETOF(pmuregs_t, swscratch)), 4, 0x0); + if (bcmsdh_regfail(bus->sdh)) { + DHD_ERROR(("%s:%d Setting min_res_mask failed\n", __FUNCTION__, __LINE__)); + } +} + +#ifdef DHD_ULP + +/* Function to disable console messages on entering ULP mode */ +void +dhd_bus_ulp_disable_console(dhd_pub_t *dhdp) +{ +#ifdef DHD_DEBUG + DHD_ERROR(("Flushing and disabling console messages\n")); + + /* Save the console print interval */ + dhd_ulp_save_console_interval(dhdp); + + /* Flush the console buffer before disabling */ + dhdsdio_readconsole(dhdp->bus); + dhdp->dhd_console_ms = 0; +#endif /* DHD_DEBUG */ +} + +/* Function for redownloading firmaware */ +static int +dhd_bus_ulp_reinit_fw(dhd_bus_t *bus) +{ + int bcmerror = 0; + + /* After firmware redownload tx/rx seq are reset accordingly these values are + reset on DHD side tx_max is initially set to 4, which later is updated by FW + */ + bus->tx_seq = bus->rx_seq = 0; + bus->tx_max = 4; + + if (dhd_bus_download_firmware(bus, bus->dhd->osh, + bus->fw_path, bus->nv_path) >= 0) { + + /* Re-init bus, enable F2 transfer */ + bcmerror = dhd_bus_init((dhd_pub_t *) bus->dhd, FALSE); + if (bcmerror == BCME_OK) { + bus->dhd->up = TRUE; + dhd_os_wd_timer(bus->dhd, dhd_watchdog_ms); + + dhd_ulp_set_ulp_state(bus->dhd, DHD_ULP_READY); +#if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) + dhd_enable_oob_intr(bus, TRUE); + bcmsdh_oob_intr_set(bus->sdh, TRUE); +#endif /* defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) */ +#ifdef DHD_DEBUG + /* Re-enable the console messages on FW redownload to default value */ + dhd_ulp_restore_console_interval(bus->dhd); +#endif /* DHD_DEBUG */ + } else { + DHD_ERROR(("bus init failed\n")); + dhd_bus_stop(bus, FALSE); + dhdsdio_release_dongle(bus, bus->dhd->osh, + TRUE, FALSE); + } + } else + bcmerror = BCME_SDIO_ERROR; + + return bcmerror; +} +#endif /* DHD_ULP */ + +int +dhd_bus_readwrite_bp_addr(dhd_pub_t *dhdp, uint addr, uint size, uint* data, bool read) +{ + int bcmerror = 0; + struct dhd_bus *bus = dhdp->bus; + + if (read) { + *data = (int32)bcmsdh_reg_read(bus->sdh, addr, size); + } else { + bcmsdh_reg_write(bus->sdh, addr, size, *data); + } + + if (bcmsdh_regfail(bus->sdh)) + bcmerror = BCME_SDIO_ERROR; + + return bcmerror; +} + +int dhd_get_idletime(dhd_pub_t *dhd) +{ + return dhd->bus->idletime; +} + +#ifdef DHD_WAKE_STATUS +wake_counts_t* +dhd_bus_get_wakecount(dhd_pub_t *dhd) +{ + return &dhd->bus->wake_counts; +} +int +dhd_bus_get_bus_wake(dhd_pub_t *dhd) +{ + return bcmsdh_set_get_wake(dhd->bus->sdh, 0); +} +#endif /* DHD_WAKE_STATUS */ diff --git a/bcmdhd.100.10.315.x/dhd_static_buf.c b/bcmdhd.100.10.315.x/dhd_static_buf.c new file mode 100644 index 0000000..2e29474 --- /dev/null +++ b/bcmdhd.100.10.315.x/dhd_static_buf.c @@ -0,0 +1,535 @@ +#include +#include +#include +#include +#include +#include +#include + +#define DHD_STATIC_VERSION_STR "100.10.315.1" + +#define BCMDHD_SDIO +#define BCMDHD_PCIE + +enum dhd_prealloc_index { + DHD_PREALLOC_PROT = 0, +#if defined(BCMDHD_SDIO) + DHD_PREALLOC_RXBUF = 1, + DHD_PREALLOC_DATABUF = 2, +#endif + DHD_PREALLOC_OSL_BUF = 3, + DHD_PREALLOC_SKB_BUF = 4, + DHD_PREALLOC_WIPHY_ESCAN0 = 5, + DHD_PREALLOC_WIPHY_ESCAN1 = 6, + DHD_PREALLOC_DHD_INFO = 7, + DHD_PREALLOC_DHD_WLFC_INFO = 8, +#ifdef BCMDHD_PCIE + DHD_PREALLOC_IF_FLOW_LKUP = 9, +#endif + DHD_PREALLOC_MEMDUMP_BUF = 10, + DHD_PREALLOC_MEMDUMP_RAM = 11, + DHD_PREALLOC_DHD_WLFC_HANGER = 12, + DHD_PREALLOC_PKTID_MAP = 13, + DHD_PREALLOC_PKTID_MAP_IOCTL = 14, + DHD_PREALLOC_DHD_LOG_DUMP_BUF = 15, + DHD_PREALLOC_DHD_LOG_DUMP_BUF_EX = 16, + DHD_PREALLOC_DHD_PKTLOG_DUMP_BUF = 17, + DHD_PREALLOC_STAT_REPORT_BUF = 18, + DHD_PREALLOC_WL_ESCAN_INFO = 19, + DHD_PREALLOC_FW_VERBOSE_RING = 20, + DHD_PREALLOC_FW_EVENT_RING = 21, + DHD_PREALLOC_DHD_EVENT_RING = 22, + DHD_PREALLOC_NAN_EVENT_RING = 23, + DHD_PREALLOC_MAX +}; + +#define STATIC_BUF_MAX_NUM 20 +#define STATIC_BUF_SIZE (PAGE_SIZE*2) + +#define DHD_PREALLOC_PROT_SIZE (16 * 1024) +#define DHD_PREALLOC_RXBUF_SIZE (24 * 1024) +#define DHD_PREALLOC_DATABUF_SIZE (64 * 1024) +#define DHD_PREALLOC_OSL_BUF_SIZE (STATIC_BUF_MAX_NUM * STATIC_BUF_SIZE) +#define DHD_PREALLOC_WIPHY_ESCAN0_SIZE (64 * 1024) +#define DHD_PREALLOC_DHD_INFO_SIZE (32 * 1024) +#define DHD_PREALLOC_MEMDUMP_RAM_SIZE (810 * 1024) +#define DHD_PREALLOC_DHD_WLFC_HANGER_SIZE (73 * 1024) +#define DHD_PREALLOC_WL_ESCAN_INFO_SIZE (66 * 1024) +#ifdef CONFIG_64BIT +#define DHD_PREALLOC_IF_FLOW_LKUP_SIZE (20 * 1024 * 2) +#else +#define DHD_PREALLOC_IF_FLOW_LKUP_SIZE (20 * 1024) +#endif +#define FW_VERBOSE_RING_SIZE (256 * 1024) +#define FW_EVENT_RING_SIZE (64 * 1024) +#define DHD_EVENT_RING_SIZE (64 * 1024) +#define NAN_EVENT_RING_SIZE (64 * 1024) + +#if defined(CONFIG_64BIT) +#define WLAN_DHD_INFO_BUF_SIZE (24 * 1024) +#define WLAN_DHD_WLFC_BUF_SIZE (64 * 1024) +#define WLAN_DHD_IF_FLOW_LKUP_SIZE (64 * 1024) +#else +#define WLAN_DHD_INFO_BUF_SIZE (16 * 1024) +#define WLAN_DHD_WLFC_BUF_SIZE (64 * 1024) +#define WLAN_DHD_IF_FLOW_LKUP_SIZE (20 * 1024) +#endif /* CONFIG_64BIT */ +#define WLAN_DHD_MEMDUMP_SIZE (800 * 1024) + +#define DHD_SKB_1PAGE_BUFSIZE (PAGE_SIZE*1) +#define DHD_SKB_2PAGE_BUFSIZE (PAGE_SIZE*2) +#define DHD_SKB_4PAGE_BUFSIZE (PAGE_SIZE*4) + +#define DHD_SKB_1PAGE_BUF_NUM 8 +#ifdef BCMDHD_PCIE +#define DHD_SKB_2PAGE_BUF_NUM 64 +#elif defined(BCMDHD_SDIO) +#define DHD_SKB_2PAGE_BUF_NUM 8 +#endif +#define DHD_SKB_4PAGE_BUF_NUM 1 + +/* The number is defined in linux_osl.c + * WLAN_SKB_1_2PAGE_BUF_NUM => STATIC_PKT_1_2PAGE_NUM + * WLAN_SKB_BUF_NUM => STATIC_PKT_MAX_NUM + */ +#define WLAN_SKB_1_2PAGE_BUF_NUM ((DHD_SKB_1PAGE_BUF_NUM) + \ + (DHD_SKB_2PAGE_BUF_NUM)) +#define WLAN_SKB_BUF_NUM ((WLAN_SKB_1_2PAGE_BUF_NUM) + (DHD_SKB_4PAGE_BUF_NUM)) + +void *wlan_static_prot = NULL; +void *wlan_static_rxbuf = NULL; +void *wlan_static_databuf = NULL; +void *wlan_static_osl_buf = NULL; +void *wlan_static_scan_buf0 = NULL; +void *wlan_static_scan_buf1 = NULL; +void *wlan_static_dhd_info_buf = NULL; +void *wlan_static_dhd_wlfc_info_buf = NULL; +void *wlan_static_if_flow_lkup = NULL; +void *wlan_static_dhd_memdump_ram_buf = NULL; +void *wlan_static_dhd_wlfc_hanger_buf = NULL; +void *wlan_static_wl_escan_info_buf = NULL; +void *wlan_static_fw_verbose_ring_buf = NULL; +void *wlan_static_fw_event_ring_buf = NULL; +void *wlan_static_dhd_event_ring_buf = NULL; +void *wlan_static_nan_event_ring_buf = NULL; + +static struct sk_buff *wlan_static_skb[WLAN_SKB_BUF_NUM]; + +void *dhd_wlan_mem_prealloc(int section, unsigned long size) +{ + pr_err("%s: sectoin %d, %ld\n", __func__, section, size); + if (section == DHD_PREALLOC_PROT) + return wlan_static_prot; + +#if defined(BCMDHD_SDIO) + if (section == DHD_PREALLOC_RXBUF) + return wlan_static_rxbuf; + + if (section == DHD_PREALLOC_DATABUF) + return wlan_static_databuf; +#endif /* BCMDHD_SDIO */ + + if (section == DHD_PREALLOC_SKB_BUF) + return wlan_static_skb; + + if (section == DHD_PREALLOC_WIPHY_ESCAN0) + return wlan_static_scan_buf0; + + if (section == DHD_PREALLOC_WIPHY_ESCAN1) + return wlan_static_scan_buf1; + + if (section == DHD_PREALLOC_OSL_BUF) { + if (size > DHD_PREALLOC_OSL_BUF_SIZE) { + pr_err("request OSL_BUF(%lu) > %ld\n", + size, DHD_PREALLOC_OSL_BUF_SIZE); + return NULL; + } + return wlan_static_osl_buf; + } + + if (section == DHD_PREALLOC_DHD_INFO) { + if (size > DHD_PREALLOC_DHD_INFO_SIZE) { + pr_err("request DHD_INFO size(%lu) > %d\n", + size, DHD_PREALLOC_DHD_INFO_SIZE); + return NULL; + } + return wlan_static_dhd_info_buf; + } + if (section == DHD_PREALLOC_DHD_WLFC_INFO) { + if (size > WLAN_DHD_WLFC_BUF_SIZE) { + pr_err("request DHD_WLFC_INFO size(%lu) > %d\n", + size, WLAN_DHD_WLFC_BUF_SIZE); + return NULL; + } + return wlan_static_dhd_wlfc_info_buf; + } +#ifdef BCMDHD_PCIE + if (section == DHD_PREALLOC_IF_FLOW_LKUP) { + if (size > DHD_PREALLOC_IF_FLOW_LKUP_SIZE) { + pr_err("request DHD_IF_FLOW_LKUP size(%lu) > %d\n", + size, DHD_PREALLOC_IF_FLOW_LKUP_SIZE); + return NULL; + } + + return wlan_static_if_flow_lkup; + } +#endif /* BCMDHD_PCIE */ + if (section == DHD_PREALLOC_MEMDUMP_RAM) { + if (size > DHD_PREALLOC_MEMDUMP_RAM_SIZE) { + pr_err("request DHD_PREALLOC_MEMDUMP_RAM_SIZE(%lu) > %d\n", + size, DHD_PREALLOC_MEMDUMP_RAM_SIZE); + return NULL; + } + + return wlan_static_dhd_memdump_ram_buf; + } + if (section == DHD_PREALLOC_DHD_WLFC_HANGER) { + if (size > DHD_PREALLOC_DHD_WLFC_HANGER_SIZE) { + pr_err("request DHD_WLFC_HANGER size(%lu) > %d\n", + size, DHD_PREALLOC_DHD_WLFC_HANGER_SIZE); + return NULL; + } + return wlan_static_dhd_wlfc_hanger_buf; + } + if (section == DHD_PREALLOC_WL_ESCAN_INFO) { + if (size > DHD_PREALLOC_WL_ESCAN_INFO_SIZE) { + pr_err("request DHD_PREALLOC_WL_ESCAN_INFO_SIZE(%lu) > %d\n", + size, DHD_PREALLOC_WL_ESCAN_INFO_SIZE); + return NULL; + } + + return wlan_static_wl_escan_info_buf; + } + if (section == DHD_PREALLOC_FW_VERBOSE_RING) { + if (size > FW_VERBOSE_RING_SIZE) { + pr_err("request DHD_PREALLOC_FW_VERBOSE_RING(%lu) > %d\n", + size, FW_VERBOSE_RING_SIZE); + return NULL; + } + + return wlan_static_fw_verbose_ring_buf; + } + if (section == DHD_PREALLOC_FW_EVENT_RING) { + if (size > FW_EVENT_RING_SIZE) { + pr_err("request DHD_PREALLOC_FW_EVENT_RING(%lu) > %d\n", + size, FW_EVENT_RING_SIZE); + return NULL; + } + + return wlan_static_fw_event_ring_buf; + } + if (section == DHD_PREALLOC_DHD_EVENT_RING) { + if (size > DHD_EVENT_RING_SIZE) { + pr_err("request DHD_PREALLOC_DHD_EVENT_RING(%lu) > %d\n", + size, DHD_EVENT_RING_SIZE); + return NULL; + } + + return wlan_static_dhd_event_ring_buf; + } + if (section == DHD_PREALLOC_NAN_EVENT_RING) { + if (size > NAN_EVENT_RING_SIZE) { + pr_err("request DHD_PREALLOC_NAN_EVENT_RING(%lu) > %d\n", + size, NAN_EVENT_RING_SIZE); + return NULL; + } + + return wlan_static_nan_event_ring_buf; + } + if ((section < 0) || (section > DHD_PREALLOC_MAX)) + pr_err("request section id(%d) is out of max index %d\n", + section, DHD_PREALLOC_MAX); + + pr_err("%s: failed to alloc section %d, size=%ld\n", + __func__, section, size); + + return NULL; +} +EXPORT_SYMBOL(dhd_wlan_mem_prealloc); + +static int dhd_init_wlan_mem(void) +{ + int i; + int j; + printk(KERN_ERR "%s(): %s\n", __func__, DHD_STATIC_VERSION_STR); + + for (i = 0; i < DHD_SKB_1PAGE_BUF_NUM; i++) { + wlan_static_skb[i] = dev_alloc_skb(DHD_SKB_1PAGE_BUFSIZE); + if (!wlan_static_skb[i]) { + goto err_skb_alloc; + } + pr_err("%s: sectoin %d skb[%d], size=%ld\n", __func__, + DHD_PREALLOC_SKB_BUF, i, DHD_SKB_1PAGE_BUFSIZE); + } + + for (i = DHD_SKB_1PAGE_BUF_NUM; i < WLAN_SKB_1_2PAGE_BUF_NUM; i++) { + wlan_static_skb[i] = dev_alloc_skb(DHD_SKB_2PAGE_BUFSIZE); + if (!wlan_static_skb[i]) { + goto err_skb_alloc; + } + pr_err("%s: sectoin %d skb[%d], size=%ld\n", __func__, + DHD_PREALLOC_SKB_BUF, i, DHD_SKB_2PAGE_BUFSIZE); + } + +#if defined(BCMDHD_SDIO) + wlan_static_skb[i] = dev_alloc_skb(DHD_SKB_4PAGE_BUFSIZE); + if (!wlan_static_skb[i]) + goto err_skb_alloc; + pr_err("%s: sectoin %d skb[%d], size=%ld\n", __func__, + DHD_PREALLOC_SKB_BUF, i, DHD_SKB_4PAGE_BUFSIZE); +#endif /* BCMDHD_SDIO */ + + wlan_static_prot = kmalloc(DHD_PREALLOC_PROT_SIZE, GFP_KERNEL); + if (!wlan_static_prot) + goto err_mem_alloc; + pr_err("%s: sectoin %d, size=%d\n", __func__, + DHD_PREALLOC_PROT, DHD_PREALLOC_PROT_SIZE); + +#if defined(BCMDHD_SDIO) + wlan_static_rxbuf = kmalloc(DHD_PREALLOC_RXBUF_SIZE, GFP_KERNEL); + if (!wlan_static_rxbuf) + goto err_mem_alloc; + pr_err("%s: sectoin %d, size=%d\n", __func__, + DHD_PREALLOC_RXBUF, DHD_PREALLOC_RXBUF_SIZE); + + wlan_static_databuf = kmalloc(DHD_PREALLOC_DATABUF_SIZE, GFP_KERNEL); + if (!wlan_static_databuf) + goto err_mem_alloc; + pr_err("%s: sectoin %d, size=%d\n", __func__, + DHD_PREALLOC_DATABUF, DHD_PREALLOC_DATABUF_SIZE); +#endif /* BCMDHD_SDIO */ + + wlan_static_osl_buf = kmalloc(DHD_PREALLOC_OSL_BUF_SIZE, GFP_KERNEL); + if (!wlan_static_osl_buf) + goto err_mem_alloc; + pr_err("%s: sectoin %d, size=%ld\n", __func__, + DHD_PREALLOC_OSL_BUF, DHD_PREALLOC_OSL_BUF_SIZE); + + wlan_static_scan_buf0 = kmalloc(DHD_PREALLOC_WIPHY_ESCAN0_SIZE, GFP_KERNEL); + if (!wlan_static_scan_buf0) + goto err_mem_alloc; + pr_err("%s: sectoin %d, size=%d\n", __func__, + DHD_PREALLOC_WIPHY_ESCAN0, DHD_PREALLOC_WIPHY_ESCAN0_SIZE); + + wlan_static_dhd_info_buf = kmalloc(DHD_PREALLOC_DHD_INFO_SIZE, GFP_KERNEL); + if (!wlan_static_dhd_info_buf) + goto err_mem_alloc; + pr_err("%s: sectoin %d, size=%d\n", __func__, + DHD_PREALLOC_DHD_INFO, DHD_PREALLOC_DHD_INFO_SIZE); + + wlan_static_dhd_wlfc_info_buf = kmalloc(WLAN_DHD_WLFC_BUF_SIZE, GFP_KERNEL); + if (!wlan_static_dhd_wlfc_info_buf) + goto err_mem_alloc; + pr_err("%s: sectoin %d, size=%d\n", __func__, + DHD_PREALLOC_DHD_WLFC_INFO, WLAN_DHD_WLFC_BUF_SIZE); + +#ifdef BCMDHD_PCIE + wlan_static_if_flow_lkup = kmalloc(DHD_PREALLOC_IF_FLOW_LKUP_SIZE, GFP_KERNEL); + if (!wlan_static_if_flow_lkup) + goto err_mem_alloc; + pr_err("%s: sectoin %d, size=%d\n", __func__, + DHD_PREALLOC_IF_FLOW_LKUP, DHD_PREALLOC_IF_FLOW_LKUP_SIZE); +#endif /* BCMDHD_PCIE */ + + wlan_static_dhd_memdump_ram_buf = kmalloc(DHD_PREALLOC_MEMDUMP_RAM_SIZE, GFP_KERNEL); + if (!wlan_static_dhd_memdump_ram_buf) + goto err_mem_alloc; + pr_err("%s: sectoin %d, size=%d\n", __func__, + DHD_PREALLOC_MEMDUMP_RAM, DHD_PREALLOC_MEMDUMP_RAM_SIZE); + + wlan_static_dhd_wlfc_hanger_buf = kmalloc(DHD_PREALLOC_DHD_WLFC_HANGER_SIZE, GFP_KERNEL); + if (!wlan_static_dhd_wlfc_hanger_buf) + goto err_mem_alloc; + pr_err("%s: sectoin %d, size=%d\n", __func__, + DHD_PREALLOC_DHD_WLFC_HANGER, DHD_PREALLOC_DHD_WLFC_HANGER_SIZE); + + wlan_static_wl_escan_info_buf = kmalloc(DHD_PREALLOC_WL_ESCAN_INFO_SIZE, GFP_KERNEL); + if (!wlan_static_wl_escan_info_buf) + goto err_mem_alloc; + pr_err("%s: sectoin %d, size=%d\n", __func__, + DHD_PREALLOC_WL_ESCAN_INFO, DHD_PREALLOC_WL_ESCAN_INFO_SIZE); + + wlan_static_fw_verbose_ring_buf = kmalloc(FW_VERBOSE_RING_SIZE, GFP_KERNEL); + if (!wlan_static_fw_verbose_ring_buf) + goto err_mem_alloc; + pr_err("%s: sectoin %d, size=%d\n", __func__, + DHD_PREALLOC_FW_VERBOSE_RING, FW_VERBOSE_RING_SIZE); + + wlan_static_fw_event_ring_buf = kmalloc(FW_EVENT_RING_SIZE, GFP_KERNEL); + if (!wlan_static_fw_event_ring_buf) + goto err_mem_alloc; + pr_err("%s: sectoin %d, size=%d\n", __func__, + DHD_PREALLOC_FW_EVENT_RING, FW_EVENT_RING_SIZE); + + wlan_static_dhd_event_ring_buf = kmalloc(DHD_EVENT_RING_SIZE, GFP_KERNEL); + if (!wlan_static_dhd_event_ring_buf) + goto err_mem_alloc; + pr_err("%s: sectoin %d, size=%d\n", __func__, + DHD_PREALLOC_DHD_EVENT_RING, DHD_EVENT_RING_SIZE); + + wlan_static_nan_event_ring_buf = kmalloc(NAN_EVENT_RING_SIZE, GFP_KERNEL); + if (!wlan_static_nan_event_ring_buf) + goto err_mem_alloc; + pr_err("%s: sectoin %d, size=%d\n", __func__, + DHD_PREALLOC_NAN_EVENT_RING, NAN_EVENT_RING_SIZE); + + return 0; + +err_mem_alloc: + + if (wlan_static_prot) + kfree(wlan_static_prot); + +#if defined(BCMDHD_SDIO) + if (wlan_static_rxbuf) + kfree(wlan_static_rxbuf); + + if (wlan_static_databuf) + kfree(wlan_static_databuf); +#endif /* BCMDHD_SDIO */ + + if (wlan_static_osl_buf) + kfree(wlan_static_osl_buf); + + if (wlan_static_scan_buf0) + kfree(wlan_static_scan_buf0); + + if (wlan_static_scan_buf1) + kfree(wlan_static_scan_buf1); + + if (wlan_static_dhd_info_buf) + kfree(wlan_static_dhd_info_buf); + + if (wlan_static_dhd_wlfc_info_buf) + kfree(wlan_static_dhd_wlfc_info_buf); + +#ifdef BCMDHD_PCIE + if (wlan_static_if_flow_lkup) + kfree(wlan_static_if_flow_lkup); +#endif /* BCMDHD_PCIE */ + + if (wlan_static_dhd_memdump_ram_buf) + kfree(wlan_static_dhd_memdump_ram_buf); + + if (wlan_static_dhd_wlfc_hanger_buf) + kfree(wlan_static_dhd_wlfc_hanger_buf); + + if (wlan_static_wl_escan_info_buf) + kfree(wlan_static_wl_escan_info_buf); + +#ifdef BCMDHD_PCIE + if (wlan_static_fw_verbose_ring_buf) + kfree(wlan_static_fw_verbose_ring_buf); + + if (wlan_static_fw_event_ring_buf) + kfree(wlan_static_fw_event_ring_buf); + + if (wlan_static_dhd_event_ring_buf) + kfree(wlan_static_dhd_event_ring_buf); + + if (wlan_static_nan_event_ring_buf) + kfree(wlan_static_nan_event_ring_buf); +#endif /* BCMDHD_PCIE */ + + pr_err("%s: Failed to mem_alloc for WLAN\n", __func__); + + i = WLAN_SKB_BUF_NUM; + +err_skb_alloc: + pr_err("%s: Failed to skb_alloc for WLAN\n", __func__); + for (j = 0; j < i; j++) + dev_kfree_skb(wlan_static_skb[j]); + + return -ENOMEM; +} + +static int __init +dhd_static_buf_init(void) +{ + dhd_init_wlan_mem(); + + return 0; +} + +static void __exit +dhd_static_buf_exit(void) +{ + int i; + + pr_err("%s()\n", __FUNCTION__); + + for (i = 0; i < DHD_SKB_1PAGE_BUF_NUM; i++) { + if (wlan_static_skb[i]) + dev_kfree_skb(wlan_static_skb[i]); + } + + for (i = DHD_SKB_1PAGE_BUF_NUM; i < WLAN_SKB_1_2PAGE_BUF_NUM; i++) { + if (wlan_static_skb[i]) + dev_kfree_skb(wlan_static_skb[i]); + } + +#if defined(BCMDHD_SDIO) + if (wlan_static_skb[i]) + dev_kfree_skb(wlan_static_skb[i]); +#endif /* BCMDHD_SDIO */ + + if (wlan_static_prot) + kfree(wlan_static_prot); + +#if defined(BCMDHD_SDIO) + if (wlan_static_rxbuf) + kfree(wlan_static_rxbuf); + + if (wlan_static_databuf) + kfree(wlan_static_databuf); +#endif /* BCMDHD_SDIO */ + + if (wlan_static_osl_buf) + kfree(wlan_static_osl_buf); + + if (wlan_static_scan_buf0) + kfree(wlan_static_scan_buf0); + + if (wlan_static_scan_buf1) + kfree(wlan_static_scan_buf1); + + if (wlan_static_dhd_info_buf) + kfree(wlan_static_dhd_info_buf); + + if (wlan_static_dhd_wlfc_info_buf) + kfree(wlan_static_dhd_wlfc_info_buf); + +#ifdef BCMDHD_PCIE + if (wlan_static_if_flow_lkup) + kfree(wlan_static_if_flow_lkup); +#endif /* BCMDHD_PCIE */ + + if (wlan_static_dhd_memdump_ram_buf) + kfree(wlan_static_dhd_memdump_ram_buf); + + if (wlan_static_dhd_wlfc_hanger_buf) + kfree(wlan_static_dhd_wlfc_hanger_buf); + + if (wlan_static_wl_escan_info_buf) + kfree(wlan_static_wl_escan_info_buf); + +#ifdef BCMDHD_PCIE + if (wlan_static_fw_verbose_ring_buf) + kfree(wlan_static_fw_verbose_ring_buf); + + if (wlan_static_fw_event_ring_buf) + kfree(wlan_static_fw_event_ring_buf); + + if (wlan_static_dhd_event_ring_buf) + kfree(wlan_static_dhd_event_ring_buf); + + if (wlan_static_nan_event_ring_buf) + kfree(wlan_static_nan_event_ring_buf); +#endif + + return; +} + +module_init(dhd_static_buf_init); + +module_exit(dhd_static_buf_exit); diff --git a/bcmdhd.100.10.315.x/dhd_wlfc.c b/bcmdhd.100.10.315.x/dhd_wlfc.c new file mode 100644 index 0000000..09eab7d --- /dev/null +++ b/bcmdhd.100.10.315.x/dhd_wlfc.c @@ -0,0 +1,4578 @@ +/* + * DHD PROP_TXSTATUS Module. + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_wlfc.c 743239 2018-01-25 08:33:18Z $ + * + */ + +#include +#include + +#include +#include + +#include +#include + +#include + +#include +#include + +#ifdef PROP_TXSTATUS /* a form of flow control between host and dongle */ +#include +#include +#endif // endif + +#ifdef DHDTCPACK_SUPPRESS +#include +#endif /* DHDTCPACK_SUPPRESS */ + +/* + * wlfc naming and lock rules: + * + * 1. Private functions name like _dhd_wlfc_XXX, declared as static and avoid wlfc lock operation. + * 2. Public functions name like dhd_wlfc_XXX, use wlfc lock if needed. + * 3. Non-Proptxstatus module call public functions only and avoid wlfc lock operation. + * + */ + +#if defined(DHD_WLFC_THREAD) +#define WLFC_THREAD_QUICK_RETRY_WAIT_MS 10 /* 10 msec */ +#define WLFC_THREAD_RETRY_WAIT_MS 10000 /* 10 sec */ +#endif /* defined (DHD_WLFC_THREAD) */ + +#ifdef PROP_TXSTATUS + +#define DHD_WLFC_QMON_COMPLETE(entry) + +/** reordering related */ + +#if defined(DHD_WLFC_THREAD) +static void +_dhd_wlfc_thread_wakeup(dhd_pub_t *dhdp) +{ + dhdp->wlfc_thread_go = TRUE; + wake_up_interruptible(&dhdp->wlfc_wqhead); +} +#endif /* DHD_WLFC_THREAD */ + +static uint16 +_dhd_wlfc_adjusted_seq(void* p, uint8 current_seq) +{ + uint16 seq; + + if (!p) { + return 0xffff; + } + + seq = WL_TXSTATUS_GET_FREERUNCTR(DHD_PKTTAG_H2DTAG(PKTTAG(p))); + if (seq < current_seq) { + /* wrap around */ + seq += 256; + } + + return seq; +} + +/** + * Enqueue a caller supplied packet on a caller supplied precedence queue, optionally reorder + * suppressed packets. + * @param[in] pq caller supplied packet queue to enqueue the packet on + * @param[in] prec precedence of the to-be-queued packet + * @param[in] p transmit packet to enqueue + * @param[in] qHead if TRUE, enqueue to head instead of tail. Used to maintain d11 seq order. + * @param[in] current_seq + * @param[in] reOrder reOrder on odd precedence (=suppress queue) + */ +static void +_dhd_wlfc_prec_enque(struct pktq *pq, int prec, void* p, bool qHead, + uint8 current_seq, bool reOrder) +{ + struct pktq_prec *q; + uint16 seq, seq2; + void *p2, *p2_prev; + + if (!p) + return; + + ASSERT(prec >= 0 && prec < pq->num_prec); + ASSERT(PKTLINK(p) == NULL); /* queueing chains not allowed */ + + ASSERT(!pktq_full(pq)); + ASSERT(!pktqprec_full(pq, prec)); + + q = &pq->q[prec]; + + if (q->head == NULL) { + /* empty queue */ + q->head = p; + q->tail = p; + } else { + if (reOrder && (prec & 1)) { + seq = _dhd_wlfc_adjusted_seq(p, current_seq); + p2 = qHead ? q->head : q->tail; + seq2 = _dhd_wlfc_adjusted_seq(p2, current_seq); + + if ((qHead &&((seq+1) > seq2)) || (!qHead && ((seq2+1) > seq))) { + /* need reorder */ + p2 = q->head; + p2_prev = NULL; + seq2 = _dhd_wlfc_adjusted_seq(p2, current_seq); + + while (seq > seq2) { + p2_prev = p2; + p2 = PKTLINK(p2); + if (!p2) { + break; + } + seq2 = _dhd_wlfc_adjusted_seq(p2, current_seq); + } + + if (p2_prev == NULL) { + /* insert head */ + PKTSETLINK(p, q->head); + q->head = p; + } else if (p2 == NULL) { + /* insert tail */ + PKTSETLINK(p2_prev, p); + q->tail = p; + } else { + /* insert after p2_prev */ + PKTSETLINK(p, PKTLINK(p2_prev)); + PKTSETLINK(p2_prev, p); + } + goto exit; + } + } + + if (qHead) { + PKTSETLINK(p, q->head); + q->head = p; + } else { + PKTSETLINK(q->tail, p); + q->tail = p; + } + } + +exit: + + q->n_pkts++; + pq->n_pkts_tot++; + + if (pq->hi_prec < prec) + pq->hi_prec = (uint8)prec; +} /* _dhd_wlfc_prec_enque */ + +/** + * Create a place to store all packet pointers submitted to the firmware until a status comes back, + * suppress or otherwise. + * + * hang-er: noun, a contrivance on which things are hung, as a hook. + */ +/** @deprecated soon */ +static void* +_dhd_wlfc_hanger_create(dhd_pub_t *dhd, int max_items) +{ + int i; + wlfc_hanger_t* hanger; + + /* allow only up to a specific size for now */ + ASSERT(max_items == WLFC_HANGER_MAXITEMS); + + if ((hanger = (wlfc_hanger_t*)DHD_OS_PREALLOC(dhd, DHD_PREALLOC_DHD_WLFC_HANGER, + WLFC_HANGER_SIZE(max_items))) == NULL) { + return NULL; + } + memset(hanger, 0, WLFC_HANGER_SIZE(max_items)); + hanger->max_items = max_items; + + for (i = 0; i < hanger->max_items; i++) { + hanger->items[i].state = WLFC_HANGER_ITEM_STATE_FREE; + } + return hanger; +} + +/** @deprecated soon */ +static int +_dhd_wlfc_hanger_delete(dhd_pub_t *dhd, void* hanger) +{ + wlfc_hanger_t* h = (wlfc_hanger_t*)hanger; + + if (h) { + DHD_OS_PREFREE(dhd, h, WLFC_HANGER_SIZE(h->max_items)); + return BCME_OK; + } + return BCME_BADARG; +} + +/** @deprecated soon */ +static uint16 +_dhd_wlfc_hanger_get_free_slot(void* hanger) +{ + uint32 i; + wlfc_hanger_t* h = (wlfc_hanger_t*)hanger; + + if (h) { + i = h->slot_pos + 1; + if (i == h->max_items) { + i = 0; + } + while (i != h->slot_pos) { + if (h->items[i].state == WLFC_HANGER_ITEM_STATE_FREE) { + h->slot_pos = i; + return (uint16)i; + } + i++; + if (i == h->max_items) + i = 0; + } + h->failed_slotfind++; + } + return WLFC_HANGER_MAXITEMS; +} + +/** @deprecated soon */ +static int +_dhd_wlfc_hanger_get_genbit(void* hanger, void* pkt, uint32 slot_id, int* gen) +{ + int rc = BCME_OK; + wlfc_hanger_t* h = (wlfc_hanger_t*)hanger; + + *gen = 0xff; + + /* this packet was not pushed at the time it went to the firmware */ + if (slot_id == WLFC_HANGER_MAXITEMS) + return BCME_NOTFOUND; + + if (h) { + if (h->items[slot_id].state != WLFC_HANGER_ITEM_STATE_FREE) { + *gen = h->items[slot_id].gen; + } + else { + DHD_ERROR(("Error: %s():%d item not used\n", + __FUNCTION__, __LINE__)); + rc = BCME_NOTFOUND; + } + + } else { + rc = BCME_BADARG; + } + + return rc; +} + +/** @deprecated soon */ +static int +_dhd_wlfc_hanger_pushpkt(void* hanger, void* pkt, uint32 slot_id) +{ + int rc = BCME_OK; + wlfc_hanger_t* h = (wlfc_hanger_t*)hanger; + + if (h && (slot_id < WLFC_HANGER_MAXITEMS)) { + if (h->items[slot_id].state == WLFC_HANGER_ITEM_STATE_FREE) { + h->items[slot_id].state = WLFC_HANGER_ITEM_STATE_INUSE; + h->items[slot_id].pkt = pkt; + h->items[slot_id].pkt_state = 0; + h->items[slot_id].pkt_txstatus = 0; + h->pushed++; + } else { + h->failed_to_push++; + rc = BCME_NOTFOUND; + } + } else { + rc = BCME_BADARG; + } + + return rc; +} + +/** @deprecated soon */ +static int +_dhd_wlfc_hanger_poppkt(void* hanger, uint32 slot_id, void** pktout, bool remove_from_hanger) +{ + int rc = BCME_OK; + wlfc_hanger_t* h = (wlfc_hanger_t*)hanger; + + *pktout = NULL; + + /* this packet was not pushed at the time it went to the firmware */ + if (slot_id == WLFC_HANGER_MAXITEMS) + return BCME_NOTFOUND; + + if (h) { + if (h->items[slot_id].state != WLFC_HANGER_ITEM_STATE_FREE) { + *pktout = h->items[slot_id].pkt; + if (remove_from_hanger) { + h->items[slot_id].state = + WLFC_HANGER_ITEM_STATE_FREE; + h->items[slot_id].pkt = NULL; + h->items[slot_id].gen = 0xff; + h->items[slot_id].identifier = 0; + h->popped++; + } + } else { + h->failed_to_pop++; + rc = BCME_NOTFOUND; + } + } else { + rc = BCME_BADARG; + } + + return rc; +} + +/** @deprecated soon */ +static int +_dhd_wlfc_hanger_mark_suppressed(void* hanger, uint32 slot_id, uint8 gen) +{ + int rc = BCME_OK; + wlfc_hanger_t* h = (wlfc_hanger_t*)hanger; + + /* this packet was not pushed at the time it went to the firmware */ + if (slot_id == WLFC_HANGER_MAXITEMS) + return BCME_NOTFOUND; + if (h) { + h->items[slot_id].gen = gen; + if (h->items[slot_id].state == WLFC_HANGER_ITEM_STATE_INUSE) { + h->items[slot_id].state = WLFC_HANGER_ITEM_STATE_INUSE_SUPPRESSED; + } else { + rc = BCME_BADARG; + } + } else { + rc = BCME_BADARG; + } + + return rc; +} + +/** remove reference of specific packet in hanger */ +/** @deprecated soon */ +static bool +_dhd_wlfc_hanger_remove_reference(wlfc_hanger_t* h, void* pkt) +{ + int i; + + if (!h || !pkt) { + return FALSE; + } + + i = WL_TXSTATUS_GET_HSLOT(DHD_PKTTAG_H2DTAG(PKTTAG(pkt))); + + if ((i < h->max_items) && (pkt == h->items[i].pkt)) { + if (h->items[i].state == WLFC_HANGER_ITEM_STATE_INUSE_SUPPRESSED) { + h->items[i].state = WLFC_HANGER_ITEM_STATE_FREE; + h->items[i].pkt = NULL; + h->items[i].gen = 0xff; + h->items[i].identifier = 0; + return TRUE; + } else { + DHD_ERROR(("Error: %s():%d item not suppressed\n", + __FUNCTION__, __LINE__)); + } + } + + return FALSE; +} + +/** afq = At Firmware Queue, queue containing packets pending in the dongle */ +static int +_dhd_wlfc_enque_afq(athost_wl_status_info_t* ctx, void *p) +{ + wlfc_mac_descriptor_t* entry; + uint16 entry_idx = WL_TXSTATUS_GET_HSLOT(DHD_PKTTAG_H2DTAG(PKTTAG(p))); + uint8 prec = DHD_PKTTAG_FIFO(PKTTAG(p)); + + if (entry_idx < WLFC_MAC_DESC_TABLE_SIZE) + entry = &ctx->destination_entries.nodes[entry_idx]; + else if (entry_idx < (WLFC_MAC_DESC_TABLE_SIZE + WLFC_MAX_IFNUM)) + entry = &ctx->destination_entries.interfaces[entry_idx - WLFC_MAC_DESC_TABLE_SIZE]; + else + entry = &ctx->destination_entries.other; + + pktq_penq(&entry->afq, prec, p); + + return BCME_OK; +} + +/** afq = At Firmware Queue, queue containing packets pending in the dongle */ +static int +_dhd_wlfc_deque_afq(athost_wl_status_info_t* ctx, uint16 hslot, uint8 hcnt, uint8 prec, + void **pktout) +{ + wlfc_mac_descriptor_t *entry; + struct pktq *pq; + struct pktq_prec *q; + void *p, *b; + + if (!ctx) { + DHD_ERROR(("%s: ctx(%p), pktout(%p)\n", __FUNCTION__, ctx, pktout)); + return BCME_BADARG; + } + + if (pktout) { + *pktout = NULL; + } + + ASSERT(hslot < (WLFC_MAC_DESC_TABLE_SIZE + WLFC_MAX_IFNUM + 1)); + + if (hslot < WLFC_MAC_DESC_TABLE_SIZE) + entry = &ctx->destination_entries.nodes[hslot]; + else if (hslot < (WLFC_MAC_DESC_TABLE_SIZE + WLFC_MAX_IFNUM)) + entry = &ctx->destination_entries.interfaces[hslot - WLFC_MAC_DESC_TABLE_SIZE]; + else + entry = &ctx->destination_entries.other; + + pq = &entry->afq; + + ASSERT(prec < pq->num_prec); + + q = &pq->q[prec]; + + b = NULL; + p = q->head; + + while (p && (hcnt != WL_TXSTATUS_GET_FREERUNCTR(DHD_PKTTAG_H2DTAG(PKTTAG(p))))) + { + b = p; + p = PKTLINK(p); + } + + if (p == NULL) { + /* none is matched */ + if (b) { + DHD_ERROR(("%s: can't find matching seq(%d)\n", __FUNCTION__, hcnt)); + } else { + DHD_ERROR(("%s: queue is empty\n", __FUNCTION__)); + } + + return BCME_ERROR; + } + + bcm_pkt_validate_chk(p); + + if (!b) { + /* head packet is matched */ + if ((q->head = PKTLINK(p)) == NULL) { + q->tail = NULL; + } + } else { + /* middle packet is matched */ + DHD_INFO(("%s: out of order, seq(%d), head_seq(%d)\n", __FUNCTION__, hcnt, + WL_TXSTATUS_GET_FREERUNCTR(DHD_PKTTAG_H2DTAG(PKTTAG(q->head))))); + ctx->stats.ooo_pkts[prec]++; + PKTSETLINK(b, PKTLINK(p)); + if (PKTLINK(p) == NULL) { + q->tail = b; + } + } + + q->n_pkts--; + pq->n_pkts_tot--; + +#ifdef WL_TXQ_STALL + q->dequeue_count++; +#endif // endif + + PKTSETLINK(p, NULL); + + if (pktout) { + *pktout = p; + } + + return BCME_OK; +} /* _dhd_wlfc_deque_afq */ + +/** + * Flow control information piggy backs on packets, in the form of one or more TLVs. This function + * pushes one or more TLVs onto a packet that is going to be sent towards the dongle. + * + * @param[in] ctx + * @param[in/out] packet + * @param[in] tim_signal TRUE if parameter 'tim_bmp' is valid + * @param[in] tim_bmp + * @param[in] mac_handle + * @param[in] htodtag + * @param[in] htodseq d11 seqno for seqno reuse, only used if 'seq reuse' was agreed upon + * earlier between host and firmware. + * @param[in] skip_wlfc_hdr + */ +static int +_dhd_wlfc_pushheader(athost_wl_status_info_t* ctx, void** packet, bool tim_signal, + uint8 tim_bmp, uint8 mac_handle, uint32 htodtag, uint16 htodseq, bool skip_wlfc_hdr) +{ + uint32 wl_pktinfo = 0; + uint8* wlh; + uint8 dataOffset = 0; + uint8 fillers; + uint8 tim_signal_len = 0; + dhd_pub_t *dhdp = (dhd_pub_t *)ctx->dhdp; + + struct bdc_header *h; + void *p = *packet; + + if (skip_wlfc_hdr) + goto push_bdc_hdr; + + if (tim_signal) { + tim_signal_len = TLV_HDR_LEN + WLFC_CTL_VALUE_LEN_PENDING_TRAFFIC_BMP; + } + + /* +2 is for Type[1] and Len[1] in TLV, plus TIM signal */ + dataOffset = WLFC_CTL_VALUE_LEN_PKTTAG + TLV_HDR_LEN + tim_signal_len; + if (WLFC_GET_REUSESEQ(dhdp->wlfc_mode)) { + dataOffset += WLFC_CTL_VALUE_LEN_SEQ; + } + + fillers = ROUNDUP(dataOffset, 4) - dataOffset; + dataOffset += fillers; + + PKTPUSH(ctx->osh, p, dataOffset); + wlh = (uint8*) PKTDATA(ctx->osh, p); + + wl_pktinfo = htol32(htodtag); + + wlh[TLV_TAG_OFF] = WLFC_CTL_TYPE_PKTTAG; + wlh[TLV_LEN_OFF] = WLFC_CTL_VALUE_LEN_PKTTAG; + memcpy(&wlh[TLV_HDR_LEN] /* dst */, &wl_pktinfo, sizeof(uint32)); + + if (WLFC_GET_REUSESEQ(dhdp->wlfc_mode)) { + uint16 wl_seqinfo = htol16(htodseq); + wlh[TLV_LEN_OFF] += WLFC_CTL_VALUE_LEN_SEQ; + memcpy(&wlh[TLV_HDR_LEN + WLFC_CTL_VALUE_LEN_PKTTAG], &wl_seqinfo, + WLFC_CTL_VALUE_LEN_SEQ); + } + + if (tim_signal_len) { + wlh[dataOffset - fillers - tim_signal_len ] = + WLFC_CTL_TYPE_PENDING_TRAFFIC_BMP; + wlh[dataOffset - fillers - tim_signal_len + 1] = + WLFC_CTL_VALUE_LEN_PENDING_TRAFFIC_BMP; + wlh[dataOffset - fillers - tim_signal_len + 2] = mac_handle; + wlh[dataOffset - fillers - tim_signal_len + 3] = tim_bmp; + } + if (fillers) + memset(&wlh[dataOffset - fillers], WLFC_CTL_TYPE_FILLER, fillers); + +push_bdc_hdr: + PKTPUSH(ctx->osh, p, BDC_HEADER_LEN); + h = (struct bdc_header *)PKTDATA(ctx->osh, p); + h->flags = (BDC_PROTO_VER << BDC_FLAG_VER_SHIFT); + if (PKTSUMNEEDED(p)) + h->flags |= BDC_FLAG_SUM_NEEDED; + + h->priority = (PKTPRIO(p) & BDC_PRIORITY_MASK); + h->flags2 = 0; + h->dataOffset = dataOffset >> 2; + BDC_SET_IF_IDX(h, DHD_PKTTAG_IF(PKTTAG(p))); + *packet = p; + return BCME_OK; +} /* _dhd_wlfc_pushheader */ + +/** + * Removes (PULLs) flow control related headers from the caller supplied packet, is invoked eg + * when a packet is about to be freed. + */ +static int +_dhd_wlfc_pullheader(athost_wl_status_info_t* ctx, void* pktbuf) +{ + struct bdc_header *h; + + if (PKTLEN(ctx->osh, pktbuf) < BDC_HEADER_LEN) { + DHD_ERROR(("%s: rx data too short (%d < %d)\n", __FUNCTION__, + PKTLEN(ctx->osh, pktbuf), BDC_HEADER_LEN)); + return BCME_ERROR; + } + h = (struct bdc_header *)PKTDATA(ctx->osh, pktbuf); + + /* pull BDC header */ + PKTPULL(ctx->osh, pktbuf, BDC_HEADER_LEN); + + if (PKTLEN(ctx->osh, pktbuf) < (uint)(h->dataOffset << 2)) { + DHD_ERROR(("%s: rx data too short (%d < %d)\n", __FUNCTION__, + PKTLEN(ctx->osh, pktbuf), (h->dataOffset << 2))); + return BCME_ERROR; + } + + /* pull wl-header */ + PKTPULL(ctx->osh, pktbuf, (h->dataOffset << 2)); + return BCME_OK; +} + +/** + * @param[in/out] p packet + */ +static wlfc_mac_descriptor_t* +_dhd_wlfc_find_table_entry(athost_wl_status_info_t* ctx, void* p) +{ + int i; + wlfc_mac_descriptor_t* table = ctx->destination_entries.nodes; + uint8 ifid = DHD_PKTTAG_IF(PKTTAG(p)); + uint8* dstn = DHD_PKTTAG_DSTN(PKTTAG(p)); + wlfc_mac_descriptor_t* entry = DHD_PKTTAG_ENTRY(PKTTAG(p)); + int iftype = ctx->destination_entries.interfaces[ifid].iftype; + + /* saved one exists, return it */ + if (entry) + return entry; + + /* Multicast destination, STA and P2P clients get the interface entry. + * STA/GC gets the Mac Entry for TDLS destinations, TDLS destinations + * have their own entry. + */ + if ((iftype == WLC_E_IF_ROLE_STA || ETHER_ISMULTI(dstn) || + iftype == WLC_E_IF_ROLE_P2P_CLIENT) && + (ctx->destination_entries.interfaces[ifid].occupied)) { + entry = &ctx->destination_entries.interfaces[ifid]; + } + + if (entry && ETHER_ISMULTI(dstn)) { + DHD_PKTTAG_SET_ENTRY(PKTTAG(p), entry); + return entry; + } + + for (i = 0; i < WLFC_MAC_DESC_TABLE_SIZE; i++) { + if (table[i].occupied) { + if (table[i].interface_id == ifid) { + if (!memcmp(table[i].ea, dstn, ETHER_ADDR_LEN)) { + entry = &table[i]; + break; + } + } + } + } + + if (entry == NULL) + entry = &ctx->destination_entries.other; + + DHD_PKTTAG_SET_ENTRY(PKTTAG(p), entry); + + return entry; +} /* _dhd_wlfc_find_table_entry */ + +/** + * In case a packet must be dropped (because eg the queues are full), various tallies have to be + * be updated. Called from several other functions. + * @param[in] dhdp pointer to public DHD structure + * @param[in] prec precedence of the packet + * @param[in] p the packet to be dropped + * @param[in] bPktInQ TRUE if packet is part of a queue + */ +static int +_dhd_wlfc_prec_drop(dhd_pub_t *dhdp, int prec, void* p, bool bPktInQ) +{ + athost_wl_status_info_t* ctx; + void *pout = NULL; + + ASSERT(dhdp && p); + if (prec < 0 || prec >= WLFC_PSQ_PREC_COUNT) { + ASSERT(0); + return BCME_BADARG; + } + + ctx = (athost_wl_status_info_t*)dhdp->wlfc_state; + + if (!WLFC_GET_AFQ(dhdp->wlfc_mode) && (prec & 1)) { + /* suppressed queue, need pop from hanger */ + _dhd_wlfc_hanger_poppkt(ctx->hanger, WL_TXSTATUS_GET_HSLOT(DHD_PKTTAG_H2DTAG + (PKTTAG(p))), &pout, TRUE); + ASSERT(p == pout); + } + + if (!(prec & 1)) { +#ifdef DHDTCPACK_SUPPRESS + /* pkt in delayed q, so fake push BDC header for + * dhd_tcpack_check_xmit() and dhd_txcomplete(). + */ + _dhd_wlfc_pushheader(ctx, &p, FALSE, 0, 0, 0, 0, TRUE); + + /* This packet is about to be freed, so remove it from tcp_ack_info_tbl + * This must be one of... + * 1. A pkt already in delayQ is evicted by another pkt with higher precedence + * in _dhd_wlfc_prec_enq_with_drop() + * 2. A pkt could not be enqueued to delayQ because it is full, + * in _dhd_wlfc_enque_delayq(). + * 3. A pkt could not be enqueued to delayQ because it is full, + * in _dhd_wlfc_rollback_packet_toq(). + */ + if (dhd_tcpack_check_xmit(dhdp, p) == BCME_ERROR) { + DHD_ERROR(("%s %d: tcpack_suppress ERROR!!!" + " Stop using it\n", + __FUNCTION__, __LINE__)); + dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_OFF); + } +#endif /* DHDTCPACK_SUPPRESS */ + } + + if (bPktInQ) { + ctx->pkt_cnt_in_q[DHD_PKTTAG_IF(PKTTAG(p))][prec>>1]--; + ctx->pkt_cnt_per_ac[prec>>1]--; + ctx->pkt_cnt_in_psq--; + } + + ctx->pkt_cnt_in_drv[DHD_PKTTAG_IF(PKTTAG(p))][DHD_PKTTAG_FIFO(PKTTAG(p))]--; + ctx->stats.pktout++; + ctx->stats.drop_pkts[prec]++; + + dhd_txcomplete(dhdp, p, FALSE); + PKTFREE(ctx->osh, p, TRUE); + + return 0; +} /* _dhd_wlfc_prec_drop */ + +/** + * Called when eg the host handed a new packet over to the driver, or when the dongle reported + * that a packet could currently not be transmitted (=suppressed). This function enqueues a transmit + * packet in the host driver to be (re)transmitted at a later opportunity. + * @param[in] dhdp pointer to public DHD structure + * @param[in] qHead When TRUE, queue packet at head instead of tail, to preserve d11 sequence + */ +static bool +_dhd_wlfc_prec_enq_with_drop(dhd_pub_t *dhdp, struct pktq *pq, void *pkt, int prec, bool qHead, + uint8 current_seq) +{ + void *p = NULL; + int eprec = -1; /* precedence to evict from */ + athost_wl_status_info_t* ctx; + + ASSERT(dhdp && pq && pkt); + ASSERT(prec >= 0 && prec < pq->num_prec); + + ctx = (athost_wl_status_info_t*)dhdp->wlfc_state; + + /* Fast case, precedence queue is not full and we are also not + * exceeding total queue length + */ + if (!pktqprec_full(pq, prec) && !pktq_full(pq)) { + goto exit; + } + + /* Determine precedence from which to evict packet, if any */ + if (pktqprec_full(pq, prec)) { + eprec = prec; + } else if (pktq_full(pq)) { + p = pktq_peek_tail(pq, &eprec); + if (!p) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return FALSE; + } + if ((eprec > prec) || (eprec < 0)) { + if (!pktqprec_empty(pq, prec)) { + eprec = prec; + } else { + return FALSE; + } + } + } + + /* Evict if needed */ + if (eprec >= 0) { + /* Detect queueing to unconfigured precedence */ + ASSERT(!pktqprec_empty(pq, eprec)); + /* Evict all fragmented frames */ + dhd_prec_drop_pkts(dhdp, pq, eprec, _dhd_wlfc_prec_drop); + } + +exit: + /* Enqueue */ + _dhd_wlfc_prec_enque(pq, prec, pkt, qHead, current_seq, + WLFC_GET_REORDERSUPP(dhdp->wlfc_mode)); + ctx->pkt_cnt_in_q[DHD_PKTTAG_IF(PKTTAG(pkt))][prec>>1]++; + ctx->pkt_cnt_per_ac[prec>>1]++; + ctx->pkt_cnt_in_psq++; + + return TRUE; +} /* _dhd_wlfc_prec_enq_with_drop */ + +/** + * Called during eg the 'committing' of a transmit packet from the OS layer to a lower layer, in + * the event that this 'commit' failed. + */ +static int +_dhd_wlfc_rollback_packet_toq(athost_wl_status_info_t* ctx, + void* p, ewlfc_packet_state_t pkt_type, uint32 hslot) +{ + /* + * put the packet back to the head of queue + * - suppressed packet goes back to suppress sub-queue + * - pull out the header, if new or delayed packet + * + * Note: hslot is used only when header removal is done. + */ + wlfc_mac_descriptor_t* entry; + int rc = BCME_OK; + int prec, fifo_id; + + entry = _dhd_wlfc_find_table_entry(ctx, p); + prec = DHD_PKTTAG_FIFO(PKTTAG(p)); + fifo_id = prec << 1; + if (pkt_type == eWLFC_PKTTYPE_SUPPRESSED) + fifo_id += 1; + if (entry != NULL) { + /* + if this packet did not count against FIFO credit, it must have + taken a requested_credit from the firmware (for pspoll etc.) + */ + if ((prec != AC_COUNT) && !DHD_PKTTAG_CREDITCHECK(PKTTAG(p))) + entry->requested_credit++; + + if (pkt_type == eWLFC_PKTTYPE_DELAYED) { + /* decrement sequence count */ + WLFC_DECR_SEQCOUNT(entry, prec); + /* remove header first */ + rc = _dhd_wlfc_pullheader(ctx, p); + if (rc != BCME_OK) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + goto exit; + } + } + + if (_dhd_wlfc_prec_enq_with_drop(ctx->dhdp, &entry->psq, p, fifo_id, TRUE, + WLFC_SEQCOUNT(entry, fifo_id>>1)) + == FALSE) { + /* enque failed */ + DHD_ERROR(("Error: %s():%d, fifo_id(%d)\n", + __FUNCTION__, __LINE__, fifo_id)); + rc = BCME_ERROR; + } + } else { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + rc = BCME_ERROR; + } + +exit: + if (rc != BCME_OK) { + ctx->stats.rollback_failed++; + _dhd_wlfc_prec_drop(ctx->dhdp, fifo_id, p, FALSE); + } else { + ctx->stats.rollback++; + } + + return rc; +} /* _dhd_wlfc_rollback_packet_toq */ + +/** Returns TRUE if host OS -> DHD flow control is allowed on the caller supplied interface */ +static bool +_dhd_wlfc_allow_fc(athost_wl_status_info_t* ctx, uint8 ifid) +{ + int prec, ac_traffic = WLFC_NO_TRAFFIC; + + for (prec = 0; prec < AC_COUNT; prec++) { + if (ctx->pkt_cnt_in_drv[ifid][prec] > 0) { + if (ac_traffic == WLFC_NO_TRAFFIC) + ac_traffic = prec + 1; + else if (ac_traffic != (prec + 1)) + ac_traffic = WLFC_MULTI_TRAFFIC; + } + } + + if (ac_traffic >= 1 && ac_traffic <= AC_COUNT) { + /* single AC (BE/BK/VI/VO) in queue */ + if (ctx->allow_fc) { + return TRUE; + } else { + uint32 delta; + uint32 curr_t = OSL_SYSUPTIME(); + + if (ctx->fc_defer_timestamp == 0) { + /* first single ac scenario */ + ctx->fc_defer_timestamp = curr_t; + return FALSE; + } + + /* single AC duration, this handles wrap around, e.g. 1 - ~0 = 2. */ + delta = curr_t - ctx->fc_defer_timestamp; + if (delta >= WLFC_FC_DEFER_PERIOD_MS) { + ctx->allow_fc = TRUE; + } + } + } else { + /* multiple ACs or BCMC in queue */ + ctx->allow_fc = FALSE; + ctx->fc_defer_timestamp = 0; + } + + return ctx->allow_fc; +} /* _dhd_wlfc_allow_fc */ + +/** + * Starts or stops the flow of transmit packets from the host OS towards the DHD, depending on + * low/high watermarks. + */ +static void +_dhd_wlfc_flow_control_check(athost_wl_status_info_t* ctx, struct pktq* pq, uint8 if_id) +{ + dhd_pub_t *dhdp; + + ASSERT(ctx); + + dhdp = (dhd_pub_t *)ctx->dhdp; + ASSERT(dhdp); + + if (dhdp->skip_fc && dhdp->skip_fc((void *)dhdp, if_id)) + return; + + if ((ctx->hostif_flow_state[if_id] == OFF) && !_dhd_wlfc_allow_fc(ctx, if_id)) + return; + + if ((pq->n_pkts_tot <= WLFC_FLOWCONTROL_LOWATER) && (ctx->hostif_flow_state[if_id] == ON)) { + /* start traffic */ + ctx->hostif_flow_state[if_id] = OFF; + /* + WLFC_DBGMESG(("qlen:%02d, if:%02d, ->OFF, start traffic %s()\n", + pq->n_pkts_tot, if_id, __FUNCTION__)); + */ + WLFC_DBGMESG(("F")); + + dhd_txflowcontrol(dhdp, if_id, OFF); + + ctx->toggle_host_if = 0; + } + + if (pq->n_pkts_tot >= WLFC_FLOWCONTROL_HIWATER && ctx->hostif_flow_state[if_id] == OFF) { + /* stop traffic */ + ctx->hostif_flow_state[if_id] = ON; + /* + WLFC_DBGMESG(("qlen:%02d, if:%02d, ->ON, stop traffic %s()\n", + pq->n_pkts_tot, if_id, __FUNCTION__)); + */ + WLFC_DBGMESG(("N")); + + dhd_txflowcontrol(dhdp, if_id, ON); + + ctx->host_ifidx = if_id; + ctx->toggle_host_if = 1; + } + + return; +} /* _dhd_wlfc_flow_control_check */ + +static int +_dhd_wlfc_send_signalonly_packet(athost_wl_status_info_t* ctx, wlfc_mac_descriptor_t* entry, + uint8 ta_bmp) +{ + int rc = BCME_OK; + void* p = NULL; + int dummylen = ((dhd_pub_t *)ctx->dhdp)->hdrlen+ 16; + dhd_pub_t *dhdp = (dhd_pub_t *)ctx->dhdp; + + if (dhdp->proptxstatus_txoff) { + rc = BCME_NORESOURCE; + return rc; + } + + /* allocate a dummy packet */ + p = PKTGET(ctx->osh, dummylen, TRUE); + if (p) { + PKTPULL(ctx->osh, p, dummylen); + DHD_PKTTAG_SET_H2DTAG(PKTTAG(p), 0); + _dhd_wlfc_pushheader(ctx, &p, TRUE, ta_bmp, entry->mac_handle, 0, 0, FALSE); + DHD_PKTTAG_SETSIGNALONLY(PKTTAG(p), 1); + DHD_PKTTAG_WLFCPKT_SET(PKTTAG(p), 1); +#ifdef PROP_TXSTATUS_DEBUG + ctx->stats.signal_only_pkts_sent++; +#endif // endif + +#if defined(BCMPCIE) + rc = dhd_bus_txdata(dhdp->bus, p, ctx->host_ifidx); +#else + rc = dhd_bus_txdata(dhdp->bus, p); +#endif // endif + if (rc != BCME_OK) { + _dhd_wlfc_pullheader(ctx, p); + PKTFREE(ctx->osh, p, TRUE); + } + } else { + DHD_ERROR(("%s: couldn't allocate new %d-byte packet\n", + __FUNCTION__, dummylen)); + rc = BCME_NOMEM; + dhdp->tx_pktgetfail++; + } + + return rc; +} /* _dhd_wlfc_send_signalonly_packet */ + +/** + * Called on eg receiving 'mac close' indication from dongle. Updates the per-MAC administration + * maintained in caller supplied parameter 'entry'. + * + * @param[in/out] entry administration about a remote MAC entity + * @param[in] prec precedence queue for this remote MAC entitity + * + * Return value: TRUE if traffic availability changed + */ +static bool +_dhd_wlfc_traffic_pending_check(athost_wl_status_info_t* ctx, wlfc_mac_descriptor_t* entry, + int prec) +{ + bool rc = FALSE; + + if (entry->state == WLFC_STATE_CLOSE) { + if ((pktqprec_n_pkts(&entry->psq, (prec << 1)) == 0) && + (pktqprec_n_pkts(&entry->psq, ((prec << 1) + 1)) == 0)) { + /* no packets in both 'normal' and 'suspended' queues */ + if (entry->traffic_pending_bmp & NBITVAL(prec)) { + rc = TRUE; + entry->traffic_pending_bmp = + entry->traffic_pending_bmp & ~ NBITVAL(prec); + } + } else { + /* packets are queued in host for transmission to dongle */ + if (!(entry->traffic_pending_bmp & NBITVAL(prec))) { + rc = TRUE; + entry->traffic_pending_bmp = + entry->traffic_pending_bmp | NBITVAL(prec); + } + } + } + + if (rc) { + /* request a TIM update to firmware at the next piggyback opportunity */ + if (entry->traffic_lastreported_bmp != entry->traffic_pending_bmp) { + entry->send_tim_signal = 1; + _dhd_wlfc_send_signalonly_packet(ctx, entry, entry->traffic_pending_bmp); + entry->traffic_lastreported_bmp = entry->traffic_pending_bmp; + entry->send_tim_signal = 0; + } else { + rc = FALSE; + } + } + + return rc; +} /* _dhd_wlfc_traffic_pending_check */ + +/** + * Called on receiving a 'd11 suppressed' or 'wl suppressed' tx status from the firmware. Enqueues + * the packet to transmit to firmware again at a later opportunity. + */ +static int +_dhd_wlfc_enque_suppressed(athost_wl_status_info_t* ctx, int prec, void* p) +{ + wlfc_mac_descriptor_t* entry; + + entry = _dhd_wlfc_find_table_entry(ctx, p); + if (entry == NULL) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_NOTFOUND; + } + /* + - suppressed packets go to sub_queue[2*prec + 1] AND + - delayed packets go to sub_queue[2*prec + 0] to ensure + order of delivery. + */ + if (_dhd_wlfc_prec_enq_with_drop(ctx->dhdp, &entry->psq, p, ((prec << 1) + 1), FALSE, + WLFC_SEQCOUNT(entry, prec)) + == FALSE) { + ctx->stats.delayq_full_error++; + /* WLFC_DBGMESG(("Error: %s():%d\n", __FUNCTION__, __LINE__)); */ + WLFC_DBGMESG(("s")); + return BCME_ERROR; + } + + /* A packet has been pushed, update traffic availability bitmap, if applicable */ + _dhd_wlfc_traffic_pending_check(ctx, entry, prec); + _dhd_wlfc_flow_control_check(ctx, &entry->psq, DHD_PKTTAG_IF(PKTTAG(p))); + return BCME_OK; +} + +/** + * Called when a transmit packet is about to be 'committed' from the OS layer to a lower layer + * towards the dongle (eg the DBUS layer). Updates wlfc administration. May modify packet. + * + * @param[in/out] ctx driver specific flow control administration + * @param[in/out] entry The remote MAC entity for which the packet is destined. + * @param[in/out] packet Packet to send. This function optionally adds TLVs to the packet. + * @param[in] header_needed True if packet is 'new' to flow control + * @param[out] slot Handle to container in which the packet was 'parked' + */ +static int +_dhd_wlfc_pretx_pktprocess(athost_wl_status_info_t* ctx, + wlfc_mac_descriptor_t* entry, void** packet, int header_needed, uint32* slot) +{ + int rc = BCME_OK; + int hslot = WLFC_HANGER_MAXITEMS; + bool send_tim_update = FALSE; + uint32 htod = 0; + uint16 htodseq = 0; + uint8 free_ctr; + int gen = 0xff; + dhd_pub_t *dhdp = (dhd_pub_t *)ctx->dhdp; + void * p = *packet; + + *slot = hslot; + + if (entry == NULL) { + entry = _dhd_wlfc_find_table_entry(ctx, p); + } + + if (entry == NULL) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_ERROR; + } + + if (entry->send_tim_signal) { + /* sends a traffic indication bitmap to the dongle */ + send_tim_update = TRUE; + entry->send_tim_signal = 0; + entry->traffic_lastreported_bmp = entry->traffic_pending_bmp; + } + + if (header_needed) { + if (WLFC_GET_AFQ(dhdp->wlfc_mode)) { + hslot = (uint)(entry - &ctx->destination_entries.nodes[0]); + } else { + hslot = _dhd_wlfc_hanger_get_free_slot(ctx->hanger); + } + gen = entry->generation; + free_ctr = WLFC_SEQCOUNT(entry, DHD_PKTTAG_FIFO(PKTTAG(p))); + } else { + if (WLFC_GET_REUSESEQ(dhdp->wlfc_mode)) { + htodseq = DHD_PKTTAG_H2DSEQ(PKTTAG(p)); + } + + hslot = WL_TXSTATUS_GET_HSLOT(DHD_PKTTAG_H2DTAG(PKTTAG(p))); + + if (WLFC_GET_REORDERSUPP(dhdp->wlfc_mode)) { + gen = entry->generation; + } else if (WLFC_GET_AFQ(dhdp->wlfc_mode)) { + gen = WL_TXSTATUS_GET_GENERATION(DHD_PKTTAG_H2DTAG(PKTTAG(p))); + } else { + _dhd_wlfc_hanger_get_genbit(ctx->hanger, p, hslot, &gen); + } + + free_ctr = WL_TXSTATUS_GET_FREERUNCTR(DHD_PKTTAG_H2DTAG(PKTTAG(p))); + /* remove old header */ + _dhd_wlfc_pullheader(ctx, p); + } + + if (hslot >= WLFC_HANGER_MAXITEMS) { + DHD_ERROR(("Error: %s():no hanger slot available\n", __FUNCTION__)); + return BCME_ERROR; + } + + WL_TXSTATUS_SET_FREERUNCTR(htod, free_ctr); + WL_TXSTATUS_SET_HSLOT(htod, hslot); + WL_TXSTATUS_SET_FIFO(htod, DHD_PKTTAG_FIFO(PKTTAG(p))); + WL_TXSTATUS_SET_FLAGS(htod, WLFC_PKTFLAG_PKTFROMHOST); + WL_TXSTATUS_SET_GENERATION(htod, gen); + DHD_PKTTAG_SETPKTDIR(PKTTAG(p), 1); + + if (!DHD_PKTTAG_CREDITCHECK(PKTTAG(p))) { + /* + Indicate that this packet is being sent in response to an + explicit request from the firmware side. + */ + WLFC_PKTFLAG_SET_PKTREQUESTED(htod); + } else { + WLFC_PKTFLAG_CLR_PKTREQUESTED(htod); + } + + rc = _dhd_wlfc_pushheader(ctx, &p, send_tim_update, + entry->traffic_lastreported_bmp, entry->mac_handle, htod, htodseq, FALSE); + if (rc == BCME_OK) { + DHD_PKTTAG_SET_H2DTAG(PKTTAG(p), htod); + + if (!WLFC_GET_AFQ(dhdp->wlfc_mode)) { + wlfc_hanger_t *h = (wlfc_hanger_t*)(ctx->hanger); + if (header_needed) { + /* + a new header was created for this packet. + push to hanger slot and scrub q. Since bus + send succeeded, increment seq number as well. + */ + rc = _dhd_wlfc_hanger_pushpkt(ctx->hanger, p, hslot); + if (rc == BCME_OK) { +#ifdef PROP_TXSTATUS_DEBUG + h->items[hslot].push_time = + OSL_SYSUPTIME(); +#endif // endif + } else { + DHD_ERROR(("%s() hanger_pushpkt() failed, rc: %d\n", + __FUNCTION__, rc)); + } + } else { + /* clear hanger state */ + if (((wlfc_hanger_t*)(ctx->hanger))->items[hslot].pkt != p) + DHD_ERROR(("%s() pkt not match: cur %p, hanger pkt %p\n", + __FUNCTION__, p, h->items[hslot].pkt)); + ASSERT(h->items[hslot].pkt == p); + bcm_object_feature_set(h->items[hslot].pkt, + BCM_OBJECT_FEATURE_PKT_STATE, 0); + h->items[hslot].pkt_state = 0; + h->items[hslot].pkt_txstatus = 0; + h->items[hslot].state = WLFC_HANGER_ITEM_STATE_INUSE; + } + } + + if ((rc == BCME_OK) && header_needed) { + /* increment free running sequence count */ + WLFC_INCR_SEQCOUNT(entry, DHD_PKTTAG_FIFO(PKTTAG(p))); + } + } + *slot = hslot; + *packet = p; + return rc; +} /* _dhd_wlfc_pretx_pktprocess */ + +/** + * A remote wireless mac may be temporarily 'closed' due to power management. Returns '1' if remote + * mac is in the 'open' state, otherwise '0'. + */ +static int +_dhd_wlfc_is_destination_open(athost_wl_status_info_t* ctx, + wlfc_mac_descriptor_t* entry, int prec) +{ + wlfc_mac_descriptor_t* interfaces = ctx->destination_entries.interfaces; + + if (entry->interface_id >= WLFC_MAX_IFNUM) { + ASSERT(&ctx->destination_entries.other == entry); + return 1; + } + + if (interfaces[entry->interface_id].iftype == + WLC_E_IF_ROLE_P2P_GO) { + /* - destination interface is of type p2p GO. + For a p2pGO interface, if the destination is OPEN but the interface is + CLOSEd, do not send traffic. But if the dstn is CLOSEd while there is + destination-specific-credit left send packets. This is because the + firmware storing the destination-specific-requested packet in queue. + */ + if ((entry->state == WLFC_STATE_CLOSE) && (entry->requested_credit == 0) && + (entry->requested_packet == 0)) { + return 0; + } + } + + /* AP, p2p_go -> unicast desc entry, STA/p2p_cl -> interface desc. entry */ + if ((((entry->state == WLFC_STATE_CLOSE) || + (interfaces[entry->interface_id].state == WLFC_STATE_CLOSE)) && + (entry->requested_credit == 0) && + (entry->requested_packet == 0)) || + (!(entry->ac_bitmap & (1 << prec)))) { + return 0; + } + + return 1; +} /* _dhd_wlfc_is_destination_open */ + +/** + * Dequeues a suppressed or delayed packet from a queue + * @param[in/out] ctx Driver specific flow control administration + * @param[in] prec Precedence of queue to dequeue from + * @param[out] ac_credit_spent Boolean, returns 0 or 1 + * @param[out] needs_hdr Boolean, returns 0 or 1 + * @param[out] entry_out The remote MAC for which the packet is destined + * @param[in] only_no_credit If TRUE, searches all entries instead of just the active ones + * + * Return value: the dequeued packet + */ +static void* +_dhd_wlfc_deque_delayedq(athost_wl_status_info_t* ctx, int prec, + uint8* ac_credit_spent, uint8* needs_hdr, wlfc_mac_descriptor_t** entry_out, + bool only_no_credit) +{ + wlfc_mac_descriptor_t* entry; + int total_entries; + void* p = NULL; + int i; + uint8 credit_spent = ((prec == AC_COUNT) && !ctx->bcmc_credit_supported) ? 0 : 1; + + *entry_out = NULL; + /* most cases a packet will count against FIFO credit */ + *ac_credit_spent = credit_spent; + + /* search all entries, include nodes as well as interfaces */ + if (only_no_credit) { + total_entries = ctx->requested_entry_count; + } else { + total_entries = ctx->active_entry_count; + } + + for (i = 0; i < total_entries; i++) { + if (only_no_credit) { + entry = ctx->requested_entry[i]; + } else { + entry = ctx->active_entry_head; + /* move head to ensure fair round-robin */ + ctx->active_entry_head = ctx->active_entry_head->next; + } + ASSERT(entry); + + if (entry->occupied && _dhd_wlfc_is_destination_open(ctx, entry, prec) && + (entry->transit_count < WL_TXSTATUS_FREERUNCTR_MASK) && + (!entry->suppressed)) { + *ac_credit_spent = credit_spent; + if (entry->state == WLFC_STATE_CLOSE) { + *ac_credit_spent = 0; + } + + /* higher precedence will be picked up first, + * i.e. suppressed packets before delayed ones + */ + p = pktq_pdeq(&entry->psq, PSQ_SUP_IDX(prec)); + *needs_hdr = 0; + if (p == NULL) { + /* De-Q from delay Q */ + p = pktq_pdeq(&entry->psq, PSQ_DLY_IDX(prec)); + *needs_hdr = 1; + } + + if (p != NULL) { + bcm_pkt_validate_chk(p); + /* did the packet come from suppress sub-queue? */ + if (entry->requested_credit > 0) { + entry->requested_credit--; +#ifdef PROP_TXSTATUS_DEBUG + entry->dstncredit_sent_packets++; +#endif // endif + } else if (entry->requested_packet > 0) { + entry->requested_packet--; + DHD_PKTTAG_SETONETIMEPKTRQST(PKTTAG(p)); + } + + *entry_out = entry; + ctx->pkt_cnt_in_q[DHD_PKTTAG_IF(PKTTAG(p))][prec]--; + ctx->pkt_cnt_per_ac[prec]--; + ctx->pkt_cnt_in_psq--; + _dhd_wlfc_flow_control_check(ctx, &entry->psq, + DHD_PKTTAG_IF(PKTTAG(p))); + /* + * A packet has been picked up, update traffic availability bitmap, + * if applicable. + */ + _dhd_wlfc_traffic_pending_check(ctx, entry, prec); + return p; + } + } + } + return NULL; +} /* _dhd_wlfc_deque_delayedq */ + +/** Enqueues caller supplied packet on either a 'suppressed' or 'delayed' queue */ +static int +_dhd_wlfc_enque_delayq(athost_wl_status_info_t* ctx, void* pktbuf, int prec) +{ + wlfc_mac_descriptor_t* entry; + + if (pktbuf != NULL) { + entry = _dhd_wlfc_find_table_entry(ctx, pktbuf); + if (entry == NULL) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_ERROR; + } + + /* + - suppressed packets go to sub_queue[2*prec + 1] AND + - delayed packets go to sub_queue[2*prec + 0] to ensure + order of delivery. + */ + if (_dhd_wlfc_prec_enq_with_drop(ctx->dhdp, &entry->psq, pktbuf, (prec << 1), + FALSE, WLFC_SEQCOUNT(entry, prec)) + == FALSE) { + WLFC_DBGMESG(("D")); + ctx->stats.delayq_full_error++; + return BCME_ERROR; + } + + /* A packet has been pushed, update traffic availability bitmap, if applicable */ + _dhd_wlfc_traffic_pending_check(ctx, entry, prec); + } + + return BCME_OK; +} /* _dhd_wlfc_enque_delayq */ + +/** Returns TRUE if caller supplied packet is destined for caller supplied interface */ +static bool _dhd_wlfc_ifpkt_fn(void* p, void *p_ifid) +{ + if (!p || !p_ifid) + return FALSE; + + return (DHD_PKTTAG_WLFCPKT(PKTTAG(p))&& (*((uint8 *)p_ifid) == DHD_PKTTAG_IF(PKTTAG(p)))); +} + +/** Returns TRUE if caller supplied packet is destined for caller supplied remote MAC */ +static bool _dhd_wlfc_entrypkt_fn(void* p, void *entry) +{ + if (!p || !entry) + return FALSE; + + return (DHD_PKTTAG_WLFCPKT(PKTTAG(p))&& (entry == DHD_PKTTAG_ENTRY(PKTTAG(p)))); +} + +static void +_dhd_wlfc_return_implied_credit(athost_wl_status_info_t* wlfc, void* pkt) +{ + dhd_pub_t *dhdp; + bool credit_return = FALSE; + + if (!wlfc || !pkt) { + return; + } + + dhdp = (dhd_pub_t *)(wlfc->dhdp); + if (dhdp && (dhdp->proptxstatus_mode == WLFC_FCMODE_IMPLIED_CREDIT) && + DHD_PKTTAG_CREDITCHECK(PKTTAG(pkt))) { + int lender, credit_returned = 0; + uint8 fifo_id = DHD_PKTTAG_FIFO(PKTTAG(pkt)); + + credit_return = TRUE; + + /* Note that borrower is fifo_id */ + /* Return credits to highest priority lender first */ + for (lender = AC_COUNT; lender >= 0; lender--) { + if (wlfc->credits_borrowed[fifo_id][lender] > 0) { + wlfc->FIFO_credit[lender]++; + wlfc->credits_borrowed[fifo_id][lender]--; + credit_returned = 1; + break; + } + } + + if (!credit_returned) { + wlfc->FIFO_credit[fifo_id]++; + } + } + + BCM_REFERENCE(credit_return); +#if defined(DHD_WLFC_THREAD) + if (credit_return) { + _dhd_wlfc_thread_wakeup(dhdp); + } +#endif /* defined(DHD_WLFC_THREAD) */ +} + +/** Removes and frees a packet from the hanger. Called during eg tx complete. */ +static void +_dhd_wlfc_hanger_free_pkt(athost_wl_status_info_t* wlfc, uint32 slot_id, uint8 pkt_state, + int pkt_txstatus) +{ + wlfc_hanger_t* hanger; + wlfc_hanger_item_t* item; + + if (!wlfc) + return; + + hanger = (wlfc_hanger_t*)wlfc->hanger; + if (!hanger) + return; + + if (slot_id == WLFC_HANGER_MAXITEMS) + return; + + item = &hanger->items[slot_id]; + + if (item->pkt) { + item->pkt_state |= pkt_state; + if (pkt_txstatus != -1) + item->pkt_txstatus = (uint8)pkt_txstatus; + bcm_object_feature_set(item->pkt, BCM_OBJECT_FEATURE_PKT_STATE, item->pkt_state); + if (item->pkt_state == WLFC_HANGER_PKT_STATE_COMPLETE) { + void *p = NULL; + void *pkt = item->pkt; + uint8 old_state = item->state; + int ret = _dhd_wlfc_hanger_poppkt(wlfc->hanger, slot_id, &p, TRUE); + BCM_REFERENCE(ret); + BCM_REFERENCE(pkt); + ASSERT((ret == BCME_OK) && p && (pkt == p)); + if (old_state == WLFC_HANGER_ITEM_STATE_INUSE_SUPPRESSED) { + printf("ERROR: free a suppressed pkt %p state %d pkt_state %d\n", + pkt, old_state, item->pkt_state); + } + ASSERT(old_state != WLFC_HANGER_ITEM_STATE_INUSE_SUPPRESSED); + + /* free packet */ + wlfc->pkt_cnt_in_drv[DHD_PKTTAG_IF(PKTTAG(p))] + [DHD_PKTTAG_FIFO(PKTTAG(p))]--; + wlfc->stats.pktout++; + dhd_txcomplete((dhd_pub_t *)wlfc->dhdp, p, item->pkt_txstatus); + PKTFREE(wlfc->osh, p, TRUE); + } + } else { + /* free slot */ + if (item->state == WLFC_HANGER_ITEM_STATE_FREE) + DHD_ERROR(("Error: %s():%d Multiple TXSTATUS or BUSRETURNED: %d (%d)\n", + __FUNCTION__, __LINE__, item->pkt_state, pkt_state)); + item->state = WLFC_HANGER_ITEM_STATE_FREE; + } +} /* _dhd_wlfc_hanger_free_pkt */ + +/** Called during eg detach() */ +static void +_dhd_wlfc_pktq_flush(athost_wl_status_info_t* ctx, struct pktq *pq, + bool dir, f_processpkt_t fn, void *arg, q_type_t q_type) +{ + int prec; + dhd_pub_t *dhdp = (dhd_pub_t *)ctx->dhdp; + + ASSERT(dhdp); + + /* Optimize flush, if pktq len = 0, just return. + * pktq len of 0 means pktq's prec q's are all empty. + */ + if (pq->n_pkts_tot == 0) { + return; + } + + for (prec = 0; prec < pq->num_prec; prec++) { + struct pktq_prec *q; + void *p, *prev = NULL; + + q = &pq->q[prec]; + p = q->head; + while (p) { + bcm_pkt_validate_chk(p); + if (fn == NULL || (*fn)(p, arg)) { + bool head = (p == q->head); + if (head) + q->head = PKTLINK(p); + else + PKTSETLINK(prev, PKTLINK(p)); + if (q_type == Q_TYPE_PSQ) { + if (!WLFC_GET_AFQ(dhdp->wlfc_mode) && (prec & 1)) { + _dhd_wlfc_hanger_remove_reference(ctx->hanger, p); + } + ctx->pkt_cnt_in_q[DHD_PKTTAG_IF(PKTTAG(p))][prec>>1]--; + ctx->pkt_cnt_per_ac[prec>>1]--; + ctx->pkt_cnt_in_psq--; + ctx->stats.cleanup_psq_cnt++; + if (!(prec & 1)) { + /* pkt in delayed q, so fake push BDC header for + * dhd_tcpack_check_xmit() and dhd_txcomplete(). + */ + _dhd_wlfc_pushheader(ctx, &p, FALSE, 0, 0, + 0, 0, TRUE); +#ifdef DHDTCPACK_SUPPRESS + if (dhd_tcpack_check_xmit(dhdp, p) == BCME_ERROR) { + DHD_ERROR(("%s %d: tcpack_suppress ERROR!!!" + " Stop using it\n", + __FUNCTION__, __LINE__)); + dhd_tcpack_suppress_set(dhdp, + TCPACK_SUP_OFF); + } +#endif /* DHDTCPACK_SUPPRESS */ + } + } else if (q_type == Q_TYPE_AFQ) { + wlfc_mac_descriptor_t* entry = + _dhd_wlfc_find_table_entry(ctx, p); + if (entry->transit_count) + entry->transit_count--; + if (entry->suppr_transit_count) { + entry->suppr_transit_count--; + if (entry->suppressed && + (!entry->onbus_pkts_count) && + (!entry->suppr_transit_count)) + entry->suppressed = FALSE; + } + _dhd_wlfc_return_implied_credit(ctx, p); + ctx->stats.cleanup_fw_cnt++; + } + PKTSETLINK(p, NULL); + if (dir) { + ctx->pkt_cnt_in_drv[DHD_PKTTAG_IF(PKTTAG(p))][prec>>1]--; + ctx->stats.pktout++; + dhd_txcomplete(dhdp, p, FALSE); + } + PKTFREE(ctx->osh, p, dir); + + q->n_pkts--; + pq->n_pkts_tot--; +#ifdef WL_TXQ_STALL + q->dequeue_count++; +#endif // endif + + p = (head ? q->head : PKTLINK(prev)); + } else { + prev = p; + p = PKTLINK(p); + } + } + + if (q->head == NULL) { + ASSERT(q->n_pkts == 0); + q->tail = NULL; + } + + } + + if (fn == NULL) + ASSERT(pq->n_pkts_tot == 0); +} /* _dhd_wlfc_pktq_flush */ + +#ifndef BCMDBUS +/** !BCMDBUS specific function. Dequeues a packet from the caller supplied queue. */ +static void* +_dhd_wlfc_pktq_pdeq_with_fn(struct pktq *pq, int prec, f_processpkt_t fn, void *arg) +{ + struct pktq_prec *q; + void *p, *prev = NULL; + + ASSERT(prec >= 0 && prec < pq->num_prec); + + q = &pq->q[prec]; + p = q->head; + + while (p) { + if (fn == NULL || (*fn)(p, arg)) { + break; + } else { + prev = p; + p = PKTLINK(p); + } + } + if (p == NULL) + return NULL; + + bcm_pkt_validate_chk(p); + + if (prev == NULL) { + if ((q->head = PKTLINK(p)) == NULL) { + q->tail = NULL; + } + } else { + PKTSETLINK(prev, PKTLINK(p)); + if (q->tail == p) { + q->tail = prev; + } + } + + q->n_pkts--; + + pq->n_pkts_tot--; + +#ifdef WL_TXQ_STALL + q->dequeue_count++; +#endif // endif + + PKTSETLINK(p, NULL); + + return p; +} + +/** !BCMDBUS specific function */ +static void +_dhd_wlfc_cleanup_txq(dhd_pub_t *dhd, f_processpkt_t fn, void *arg) +{ + int prec; + void *pkt = NULL, *head = NULL, *tail = NULL; + struct pktq *txq = (struct pktq *)dhd_bus_txq(dhd->bus); + athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state; + wlfc_hanger_t* h = (wlfc_hanger_t*)wlfc->hanger; + wlfc_mac_descriptor_t* entry; + + dhd_os_sdlock_txq(dhd); + for (prec = 0; prec < txq->num_prec; prec++) { + while ((pkt = _dhd_wlfc_pktq_pdeq_with_fn(txq, prec, fn, arg))) { +#ifdef DHDTCPACK_SUPPRESS + if (dhd_tcpack_check_xmit(dhd, pkt) == BCME_ERROR) { + DHD_ERROR(("%s %d: tcpack_suppress ERROR!!! Stop using it\n", + __FUNCTION__, __LINE__)); + dhd_tcpack_suppress_set(dhd, TCPACK_SUP_OFF); + } +#endif /* DHDTCPACK_SUPPRESS */ + if (!head) { + head = pkt; + } + if (tail) { + PKTSETLINK(tail, pkt); + } + tail = pkt; + } + } + dhd_os_sdunlock_txq(dhd); + + while ((pkt = head)) { + head = PKTLINK(pkt); + PKTSETLINK(pkt, NULL); + entry = _dhd_wlfc_find_table_entry(wlfc, pkt); + + if (!WLFC_GET_AFQ(dhd->wlfc_mode) && + !_dhd_wlfc_hanger_remove_reference(h, pkt)) { + DHD_ERROR(("%s: can't find pkt(%p) in hanger, free it anyway\n", + __FUNCTION__, pkt)); + } + if (entry->transit_count) + entry->transit_count--; + if (entry->suppr_transit_count) { + entry->suppr_transit_count--; + if (entry->suppressed && + (!entry->onbus_pkts_count) && + (!entry->suppr_transit_count)) + entry->suppressed = FALSE; + } + _dhd_wlfc_return_implied_credit(wlfc, pkt); + wlfc->pkt_cnt_in_drv[DHD_PKTTAG_IF(PKTTAG(pkt))][DHD_PKTTAG_FIFO(PKTTAG(pkt))]--; + wlfc->stats.pktout++; + wlfc->stats.cleanup_txq_cnt++; + dhd_txcomplete(dhd, pkt, FALSE); + PKTFREE(wlfc->osh, pkt, TRUE); + } +} /* _dhd_wlfc_cleanup_txq */ +#endif /* !BCMDBUS */ + +/** called during eg detach */ +void +_dhd_wlfc_cleanup(dhd_pub_t *dhd, f_processpkt_t fn, void *arg) +{ + int i; + int total_entries; + athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state; + wlfc_mac_descriptor_t* table; + wlfc_hanger_t* h = (wlfc_hanger_t*)wlfc->hanger; + + wlfc->stats.cleanup_txq_cnt = 0; + wlfc->stats.cleanup_psq_cnt = 0; + wlfc->stats.cleanup_fw_cnt = 0; + + /* + * flush sequence should be txq -> psq -> hanger/afq, hanger has to be last one + */ +#ifndef BCMDBUS + /* flush bus->txq */ + _dhd_wlfc_cleanup_txq(dhd, fn, arg); +#endif /* !BCMDBUS */ + + /* flush psq, search all entries, include nodes as well as interfaces */ + total_entries = sizeof(wlfc->destination_entries)/sizeof(wlfc_mac_descriptor_t); + table = (wlfc_mac_descriptor_t*)&wlfc->destination_entries; + + for (i = 0; i < total_entries; i++) { + if (table[i].occupied) { + /* release packets held in PSQ (both delayed and suppressed) */ + if (table[i].psq.n_pkts_tot) { + WLFC_DBGMESG(("%s(): PSQ[%d].len = %d\n", + __FUNCTION__, i, table[i].psq.n_pkts_tot)); + _dhd_wlfc_pktq_flush(wlfc, &table[i].psq, TRUE, + fn, arg, Q_TYPE_PSQ); + } + + /* free packets held in AFQ */ + if (WLFC_GET_AFQ(dhd->wlfc_mode) && (table[i].afq.n_pkts_tot)) { + _dhd_wlfc_pktq_flush(wlfc, &table[i].afq, TRUE, + fn, arg, Q_TYPE_AFQ); + } + + if ((fn == NULL) && (&table[i] != &wlfc->destination_entries.other)) { + table[i].occupied = 0; + if (table[i].transit_count || table[i].suppr_transit_count) { + DHD_ERROR(("%s: table[%d] transit(%d), suppr_tansit(%d)\n", + __FUNCTION__, i, + table[i].transit_count, + table[i].suppr_transit_count)); + } + } + } + } + + /* + . flush remained pkt in hanger queue, not in bus->txq nor psq. + . the remained pkt was successfully downloaded to dongle already. + . hanger slot state cannot be set to free until receive txstatus update. + */ + if (!WLFC_GET_AFQ(dhd->wlfc_mode)) { + for (i = 0; i < h->max_items; i++) { + if ((h->items[i].state == WLFC_HANGER_ITEM_STATE_INUSE) || + (h->items[i].state == WLFC_HANGER_ITEM_STATE_INUSE_SUPPRESSED)) { + if (fn == NULL || (*fn)(h->items[i].pkt, arg)) { + h->items[i].state = WLFC_HANGER_ITEM_STATE_FLUSHED; + } + } + } + } + + return; +} /* _dhd_wlfc_cleanup */ + +/** Called after eg the dongle signalled a new remote MAC that it connected with to the DHD */ +static int +_dhd_wlfc_mac_entry_update(athost_wl_status_info_t* ctx, wlfc_mac_descriptor_t* entry, + uint8 action, uint8 ifid, uint8 iftype, uint8* ea, + f_processpkt_t fn, void *arg) +{ + int rc = BCME_OK; + + if ((action == eWLFC_MAC_ENTRY_ACTION_ADD) || (action == eWLFC_MAC_ENTRY_ACTION_UPDATE)) { + entry->occupied = 1; + entry->state = WLFC_STATE_OPEN; + entry->requested_credit = 0; + entry->interface_id = ifid; + entry->iftype = iftype; + entry->ac_bitmap = 0xff; /* update this when handling APSD */ + + /* for an interface entry we may not care about the MAC address */ + if (ea != NULL) + memcpy(&entry->ea[0], ea, ETHER_ADDR_LEN); + + if (action == eWLFC_MAC_ENTRY_ACTION_ADD) { + entry->suppressed = FALSE; + entry->transit_count = 0; + entry->suppr_transit_count = 0; + entry->onbus_pkts_count = 0; + } + + if (action == eWLFC_MAC_ENTRY_ACTION_ADD) { + dhd_pub_t *dhdp = (dhd_pub_t *)(ctx->dhdp); + + pktq_init(&entry->psq, WLFC_PSQ_PREC_COUNT, WLFC_PSQ_LEN); + _dhd_wlfc_flow_control_check(ctx, &entry->psq, ifid); + + if (WLFC_GET_AFQ(dhdp->wlfc_mode)) { + pktq_init(&entry->afq, WLFC_AFQ_PREC_COUNT, WLFC_PSQ_LEN); + } + + if (entry->next == NULL) { + /* not linked to anywhere, add to tail */ + if (ctx->active_entry_head) { + entry->prev = ctx->active_entry_head->prev; + ctx->active_entry_head->prev->next = entry; + ctx->active_entry_head->prev = entry; + entry->next = ctx->active_entry_head; + } else { + ASSERT(ctx->active_entry_count == 0); + entry->prev = entry->next = entry; + ctx->active_entry_head = entry; + } + ctx->active_entry_count++; + } else { + DHD_ERROR(("%s():%d, entry(%d)\n", __FUNCTION__, __LINE__, + (int)(entry - &ctx->destination_entries.nodes[0]))); + } + } + } else if (action == eWLFC_MAC_ENTRY_ACTION_DEL) { + /* When the entry is deleted, the packets that are queued in the entry must be + cleanup. The cleanup action should be before the occupied is set as 0. + */ + _dhd_wlfc_cleanup(ctx->dhdp, fn, arg); + _dhd_wlfc_flow_control_check(ctx, &entry->psq, ifid); + + entry->occupied = 0; + entry->state = WLFC_STATE_CLOSE; + memset(&entry->ea[0], 0, ETHER_ADDR_LEN); + + if (entry->next) { + /* not floating, remove from Q */ + if (ctx->active_entry_count <= 1) { + /* last item */ + ctx->active_entry_head = NULL; + ctx->active_entry_count = 0; + } else { + entry->prev->next = entry->next; + entry->next->prev = entry->prev; + if (entry == ctx->active_entry_head) { + ctx->active_entry_head = entry->next; + } + ctx->active_entry_count--; + } + entry->next = entry->prev = NULL; + } else { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + } + } + return rc; +} /* _dhd_wlfc_mac_entry_update */ + +#ifdef LIMIT_BORROW + +/** LIMIT_BORROW specific function */ +static int +_dhd_wlfc_borrow_credit(athost_wl_status_info_t* ctx, int highest_lender_ac, int borrower_ac, + bool bBorrowAll) +{ + int lender_ac, borrow_limit = 0; + int rc = -1; + + if (ctx == NULL) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return -1; + } + + /* Borrow from lowest priority available AC (including BC/MC credits) */ + for (lender_ac = 0; lender_ac <= highest_lender_ac; lender_ac++) { + if (!bBorrowAll) { + borrow_limit = ctx->Init_FIFO_credit[lender_ac]/WLFC_BORROW_LIMIT_RATIO; + } else { + borrow_limit = 0; + } + + if (ctx->FIFO_credit[lender_ac] > borrow_limit) { + ctx->credits_borrowed[borrower_ac][lender_ac]++; + ctx->FIFO_credit[lender_ac]--; + rc = lender_ac; + break; + } + } + + return rc; +} + +/** LIMIT_BORROW specific function */ +static int _dhd_wlfc_return_credit(athost_wl_status_info_t* ctx, int lender_ac, int borrower_ac) +{ + if ((ctx == NULL) || (lender_ac < 0) || (lender_ac > AC_COUNT) || + (borrower_ac < 0) || (borrower_ac > AC_COUNT)) { + DHD_ERROR(("Error: %s():%d, ctx(%p), lender_ac(%d), borrower_ac(%d)\n", + __FUNCTION__, __LINE__, ctx, lender_ac, borrower_ac)); + + return BCME_BADARG; + } + + ctx->credits_borrowed[borrower_ac][lender_ac]--; + ctx->FIFO_credit[lender_ac]++; + + return BCME_OK; +} + +#endif /* LIMIT_BORROW */ + +/** + * Called on an interface event (WLC_E_IF) indicated by firmware. + * @param action : eg eWLFC_MAC_ENTRY_ACTION_UPDATE or eWLFC_MAC_ENTRY_ACTION_ADD + */ +static int +_dhd_wlfc_interface_entry_update(void* state, + uint8 action, uint8 ifid, uint8 iftype, uint8* ea) +{ + athost_wl_status_info_t* ctx = (athost_wl_status_info_t*)state; + wlfc_mac_descriptor_t* entry; + + if (ifid >= WLFC_MAX_IFNUM) + return BCME_BADARG; + + entry = &ctx->destination_entries.interfaces[ifid]; + + return _dhd_wlfc_mac_entry_update(ctx, entry, action, ifid, iftype, ea, + _dhd_wlfc_ifpkt_fn, &ifid); +} + +/** + * Called eg on receiving a WLC_E_BCMC_CREDIT_SUPPORT event from the dongle (broadcast/multicast + * specific) + */ +static int +_dhd_wlfc_BCMCCredit_support_update(void* state) +{ + athost_wl_status_info_t* ctx = (athost_wl_status_info_t*)state; + + ctx->bcmc_credit_supported = TRUE; + return BCME_OK; +} + +/** Called eg on receiving a WLC_E_FIFO_CREDIT_MAP event from the dongle */ +static int +_dhd_wlfc_FIFOcreditmap_update(void* state, uint8* credits) +{ + athost_wl_status_info_t* ctx = (athost_wl_status_info_t*)state; + int i; + + for (i = 0; i <= 4; i++) { + if (ctx->Init_FIFO_credit[i] != ctx->FIFO_credit[i]) { + DHD_ERROR(("%s: credit[i] is not returned, (%d %d)\n", + __FUNCTION__, ctx->Init_FIFO_credit[i], ctx->FIFO_credit[i])); + } + } + + /* update the AC FIFO credit map */ + ctx->FIFO_credit[0] += (credits[0] - ctx->Init_FIFO_credit[0]); + ctx->FIFO_credit[1] += (credits[1] - ctx->Init_FIFO_credit[1]); + ctx->FIFO_credit[2] += (credits[2] - ctx->Init_FIFO_credit[2]); + ctx->FIFO_credit[3] += (credits[3] - ctx->Init_FIFO_credit[3]); + ctx->FIFO_credit[4] += (credits[4] - ctx->Init_FIFO_credit[4]); + + ctx->Init_FIFO_credit[0] = credits[0]; + ctx->Init_FIFO_credit[1] = credits[1]; + ctx->Init_FIFO_credit[2] = credits[2]; + ctx->Init_FIFO_credit[3] = credits[3]; + ctx->Init_FIFO_credit[4] = credits[4]; + + /* credit for ATIM FIFO is not used yet. */ + ctx->Init_FIFO_credit[5] = ctx->FIFO_credit[5] = 0; + + return BCME_OK; +} + +/** + * Called during committing of a transmit packet from the OS DHD layer to the next layer towards + * the dongle (eg the DBUS layer). All transmit packets flow via this function to the next layer. + * + * @param[in/out] ctx Driver specific flow control administration + * @param[in] ac Access Category (QoS) of called supplied packet + * @param[in] commit_info Contains eg the packet to send + * @param[in] fcommit Function pointer to transmit function of next software layer + * @param[in] commit_ctx Opaque context used when calling next layer + */ +static int +_dhd_wlfc_handle_packet_commit(athost_wl_status_info_t* ctx, int ac, + dhd_wlfc_commit_info_t *commit_info, f_commitpkt_t fcommit, void* commit_ctx) +{ + uint32 hslot; + int rc; + dhd_pub_t *dhdp = (dhd_pub_t *)(ctx->dhdp); + + /* + if ac_fifo_credit_spent = 0 + + This packet will not count against the FIFO credit. + To ensure the txstatus corresponding to this packet + does not provide an implied credit (default behavior) + mark the packet accordingly. + + if ac_fifo_credit_spent = 1 + + This is a normal packet and it counts against the FIFO + credit count. + */ + DHD_PKTTAG_SETCREDITCHECK(PKTTAG(commit_info->p), commit_info->ac_fifo_credit_spent); + rc = _dhd_wlfc_pretx_pktprocess(ctx, commit_info->mac_entry, &commit_info->p, + commit_info->needs_hdr, &hslot); + + if (rc == BCME_OK) { + rc = fcommit(commit_ctx, commit_info->p); + if (rc == BCME_OK) { + uint8 gen = WL_TXSTATUS_GET_GENERATION( + DHD_PKTTAG_H2DTAG(PKTTAG(commit_info->p))); + ctx->stats.pkt2bus++; + if (commit_info->ac_fifo_credit_spent || (ac == AC_COUNT)) { + ctx->stats.send_pkts[ac]++; + WLFC_HOST_FIFO_CREDIT_INC_SENTCTRS(ctx, ac); + } + + if (gen != commit_info->mac_entry->generation) { + /* will be suppressed back by design */ + if (!commit_info->mac_entry->suppressed) { + commit_info->mac_entry->suppressed = TRUE; + } + commit_info->mac_entry->suppr_transit_count++; + } + commit_info->mac_entry->transit_count++; + commit_info->mac_entry->onbus_pkts_count++; + } else if (commit_info->needs_hdr) { + if (!WLFC_GET_AFQ(dhdp->wlfc_mode)) { + void *pout = NULL; + /* pop hanger for delayed packet */ + _dhd_wlfc_hanger_poppkt(ctx->hanger, WL_TXSTATUS_GET_HSLOT( + DHD_PKTTAG_H2DTAG(PKTTAG(commit_info->p))), &pout, TRUE); + ASSERT(commit_info->p == pout); + } + } + } else { + ctx->stats.generic_error++; + } + + if (rc != BCME_OK) { + /* + pretx pkt process or bus commit has failed, rollback. + - remove wl-header for a delayed packet + - save wl-header header for suppressed packets + - reset credit check flag + */ + _dhd_wlfc_rollback_packet_toq(ctx, commit_info->p, commit_info->pkt_type, hslot); + DHD_PKTTAG_SETCREDITCHECK(PKTTAG(commit_info->p), 0); + } + + return rc; +} /* _dhd_wlfc_handle_packet_commit */ + +/** Returns remote MAC descriptor for caller supplied MAC address */ +static uint8 +_dhd_wlfc_find_mac_desc_id_from_mac(dhd_pub_t *dhdp, uint8 *ea) +{ + wlfc_mac_descriptor_t* table = + ((athost_wl_status_info_t*)dhdp->wlfc_state)->destination_entries.nodes; + uint8 table_index; + + if (ea != NULL) { + for (table_index = 0; table_index < WLFC_MAC_DESC_TABLE_SIZE; table_index++) { + if ((memcmp(ea, &table[table_index].ea[0], ETHER_ADDR_LEN) == 0) && + table[table_index].occupied) + return table_index; + } + } + return WLFC_MAC_DESC_ID_INVALID; +} + +/** + * Called when the host receives a WLFC_CTL_TYPE_TXSTATUS event from the dongle, indicating the + * status of a frame that the dongle attempted to transmit over the wireless medium. + */ +static int +dhd_wlfc_suppressed_acked_update(dhd_pub_t *dhd, uint16 hslot, uint8 prec, uint8 hcnt) +{ + athost_wl_status_info_t* ctx; + wlfc_mac_descriptor_t* entry = NULL; + struct pktq *pq; + struct pktq_prec *q; + void *p, *b; + + if (!dhd) { + DHD_ERROR(("%s: dhd(%p)\n", __FUNCTION__, dhd)); + return BCME_BADARG; + } + ctx = (athost_wl_status_info_t*)dhd->wlfc_state; + if (!ctx) { + DHD_ERROR(("%s: ctx(%p)\n", __FUNCTION__, ctx)); + return BCME_ERROR; + } + + ASSERT(hslot < (WLFC_MAC_DESC_TABLE_SIZE + WLFC_MAX_IFNUM + 1)); + + if (hslot < WLFC_MAC_DESC_TABLE_SIZE) + entry = &ctx->destination_entries.nodes[hslot]; + else if (hslot < (WLFC_MAC_DESC_TABLE_SIZE + WLFC_MAX_IFNUM)) + entry = &ctx->destination_entries.interfaces[hslot - WLFC_MAC_DESC_TABLE_SIZE]; + else + entry = &ctx->destination_entries.other; + + pq = &entry->psq; + + ASSERT(((prec << 1) + 1) < pq->num_prec); + + q = &pq->q[((prec << 1) + 1)]; + + b = NULL; + p = q->head; + + while (p && (hcnt != WL_TXSTATUS_GET_FREERUNCTR(DHD_PKTTAG_H2DTAG(PKTTAG(p))))) { + b = p; + p = PKTLINK(p); + } + + if (p == NULL) { + /* none is matched */ + if (b) { + DHD_ERROR(("%s: can't find matching seq(%d)\n", __FUNCTION__, hcnt)); + } else { + DHD_ERROR(("%s: queue is empty\n", __FUNCTION__)); + } + + return BCME_ERROR; + } + + if (!b) { + /* head packet is matched */ + if ((q->head = PKTLINK(p)) == NULL) { + q->tail = NULL; + } + } else { + /* middle packet is matched */ + PKTSETLINK(b, PKTLINK(p)); + if (PKTLINK(p) == NULL) { + q->tail = b; + } + } + + q->n_pkts--; + pq->n_pkts_tot--; + +#ifdef WL_TXQ_STALL + q->dequeue_count++; +#endif // endif + + ctx->pkt_cnt_in_q[DHD_PKTTAG_IF(PKTTAG(p))][prec]--; + ctx->pkt_cnt_per_ac[prec]--; + + PKTSETLINK(p, NULL); + + if (WLFC_GET_AFQ(dhd->wlfc_mode)) { + _dhd_wlfc_enque_afq(ctx, p); + } else { + _dhd_wlfc_hanger_pushpkt(ctx->hanger, p, hslot); + } + + entry->transit_count++; + + return BCME_OK; +} + +static int +_dhd_wlfc_compressed_txstatus_update(dhd_pub_t *dhd, uint8* pkt_info, uint8 len, void** p_mac) +{ + uint8 status_flag_ori, status_flag; + uint32 status; + int ret = BCME_OK; + int remove_from_hanger_ori, remove_from_hanger = 1; + void* pktbuf = NULL; + uint8 fifo_id = 0, gen = 0, count = 0, hcnt; + uint16 hslot; + wlfc_mac_descriptor_t* entry = NULL; + athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state; + uint16 seq = 0, seq_fromfw = 0, seq_num = 0; + + memcpy(&status, pkt_info, sizeof(uint32)); + status = ltoh32(status); + status_flag = WL_TXSTATUS_GET_FLAGS(status); + hcnt = WL_TXSTATUS_GET_FREERUNCTR(status); + hslot = WL_TXSTATUS_GET_HSLOT(status); + fifo_id = WL_TXSTATUS_GET_FIFO(status); + gen = WL_TXSTATUS_GET_GENERATION(status); + + if (WLFC_GET_REUSESEQ(dhd->wlfc_mode)) { + memcpy(&seq, pkt_info + WLFC_CTL_VALUE_LEN_TXSTATUS, WLFC_CTL_VALUE_LEN_SEQ); + seq = ltoh16(seq); + seq_fromfw = GET_WL_HAS_ASSIGNED_SEQ(seq); + seq_num = WL_SEQ_GET_NUM(seq); + } + + wlfc->stats.txstatus_in += len; + + if (status_flag == WLFC_CTL_PKTFLAG_DISCARD) { + wlfc->stats.pkt_freed += len; + } else if (status_flag == WLFC_CTL_PKTFLAG_DISCARD_NOACK) { + wlfc->stats.pkt_freed += len; + } else if (status_flag == WLFC_CTL_PKTFLAG_D11SUPPRESS) { + wlfc->stats.d11_suppress += len; + remove_from_hanger = 0; + } else if (status_flag == WLFC_CTL_PKTFLAG_WLSUPPRESS) { + wlfc->stats.wl_suppress += len; + remove_from_hanger = 0; + } else if (status_flag == WLFC_CTL_PKTFLAG_TOSSED_BYWLC) { + wlfc->stats.wlc_tossed_pkts += len; + } else if (status_flag == WLFC_CTL_PKTFLAG_SUPPRESS_ACKED) { + wlfc->stats.pkt_freed += len; + } else if (status_flag == WLFC_CTL_PKTFLAG_EXPIRED) { + wlfc->stats.pkt_exptime += len; + } else if (status_flag == WLFC_CTL_PKTFLAG_DROPPED) { + wlfc->stats.pkt_dropped += len; + } + + if (dhd->proptxstatus_txstatus_ignore) { + if (!remove_from_hanger) { + DHD_ERROR(("suppress txstatus: %d\n", status_flag)); + } + return BCME_OK; + } + + status_flag_ori = status_flag; + remove_from_hanger_ori = remove_from_hanger; + + while (count < len) { + if (status_flag == WLFC_CTL_PKTFLAG_SUPPRESS_ACKED) { + dhd_wlfc_suppressed_acked_update(dhd, hslot, fifo_id, hcnt); + } + if (WLFC_GET_AFQ(dhd->wlfc_mode)) { + ret = _dhd_wlfc_deque_afq(wlfc, hslot, hcnt, fifo_id, &pktbuf); + } else { + status_flag = status_flag_ori; + remove_from_hanger = remove_from_hanger_ori; + ret = _dhd_wlfc_hanger_poppkt(wlfc->hanger, hslot, &pktbuf, FALSE); + if (!pktbuf) { + _dhd_wlfc_hanger_free_pkt(wlfc, hslot, + WLFC_HANGER_PKT_STATE_TXSTATUS, -1); + goto cont; + } else { + wlfc_hanger_t* h = (wlfc_hanger_t*)wlfc->hanger; + if (h->items[hslot].state == WLFC_HANGER_ITEM_STATE_FLUSHED) { + status_flag = WLFC_CTL_PKTFLAG_DISCARD; + remove_from_hanger = 1; + } + } + } + + if ((ret != BCME_OK) || !pktbuf) { + goto cont; + } + + bcm_pkt_validate_chk(pktbuf); + + /* set fifo_id to correct value because not all FW does that */ + fifo_id = DHD_PKTTAG_FIFO(PKTTAG(pktbuf)); + + entry = _dhd_wlfc_find_table_entry(wlfc, pktbuf); + + if (!remove_from_hanger) { + /* this packet was suppressed */ + if (!entry->suppressed || (entry->generation != gen)) { + if (!entry->suppressed) { + entry->suppr_transit_count = entry->transit_count; + if (p_mac) { + *p_mac = entry; + } + } else { + DHD_ERROR(("gen(%d), entry->generation(%d)\n", + gen, entry->generation)); + } + entry->suppressed = TRUE; + + } + entry->generation = gen; + } + +#ifdef PROP_TXSTATUS_DEBUG + if (!WLFC_GET_AFQ(dhd->wlfc_mode)) + { + uint32 new_t = OSL_SYSUPTIME(); + uint32 old_t; + uint32 delta; + old_t = ((wlfc_hanger_t*)(wlfc->hanger))->items[hslot].push_time; + + wlfc->stats.latency_sample_count++; + if (new_t > old_t) + delta = new_t - old_t; + else + delta = 0xffffffff + new_t - old_t; + wlfc->stats.total_status_latency += delta; + wlfc->stats.latency_most_recent = delta; + + wlfc->stats.deltas[wlfc->stats.idx_delta++] = delta; + if (wlfc->stats.idx_delta == sizeof(wlfc->stats.deltas)/sizeof(uint32)) + wlfc->stats.idx_delta = 0; + } +#endif /* PROP_TXSTATUS_DEBUG */ + + /* pick up the implicit credit from this packet */ + if (DHD_PKTTAG_CREDITCHECK(PKTTAG(pktbuf))) { + _dhd_wlfc_return_implied_credit(wlfc, pktbuf); + } else { + /* + if this packet did not count against FIFO credit, it must have + taken a requested_credit from the destination entry (for pspoll etc.) + */ + if (!DHD_PKTTAG_ONETIMEPKTRQST(PKTTAG(pktbuf))) { + entry->requested_credit++; +#if defined(DHD_WLFC_THREAD) + _dhd_wlfc_thread_wakeup(dhd); +#endif /* DHD_WLFC_THREAD */ + } +#ifdef PROP_TXSTATUS_DEBUG + entry->dstncredit_acks++; +#endif // endif + } + + if ((status_flag == WLFC_CTL_PKTFLAG_D11SUPPRESS) || + (status_flag == WLFC_CTL_PKTFLAG_WLSUPPRESS)) { + /* save generation bit inside packet */ + WL_TXSTATUS_SET_GENERATION(DHD_PKTTAG_H2DTAG(PKTTAG(pktbuf)), gen); + + if (WLFC_GET_REUSESEQ(dhd->wlfc_mode)) { + WL_SEQ_SET_REUSE(DHD_PKTTAG_H2DSEQ(PKTTAG(pktbuf)), seq_fromfw); + WL_SEQ_SET_NUM(DHD_PKTTAG_H2DSEQ(PKTTAG(pktbuf)), seq_num); + } + + ret = _dhd_wlfc_enque_suppressed(wlfc, fifo_id, pktbuf); + if (ret != BCME_OK) { + /* delay q is full, drop this packet */ + DHD_WLFC_QMON_COMPLETE(entry); + _dhd_wlfc_prec_drop(dhd, (fifo_id << 1) + 1, pktbuf, FALSE); + } else { + if (!WLFC_GET_AFQ(dhd->wlfc_mode)) { + /* Mark suppressed to avoid a double free + during wlfc cleanup + */ + _dhd_wlfc_hanger_mark_suppressed(wlfc->hanger, hslot, gen); + } + } + } else { + + DHD_WLFC_QMON_COMPLETE(entry); + + if (!WLFC_GET_AFQ(dhd->wlfc_mode)) { + _dhd_wlfc_hanger_free_pkt(wlfc, hslot, + WLFC_HANGER_PKT_STATE_TXSTATUS, TRUE); + } else { + dhd_txcomplete(dhd, pktbuf, TRUE); + wlfc->pkt_cnt_in_drv[DHD_PKTTAG_IF(PKTTAG(pktbuf))] + [DHD_PKTTAG_FIFO(PKTTAG(pktbuf))]--; + wlfc->stats.pktout++; + /* free the packet */ + PKTFREE(wlfc->osh, pktbuf, TRUE); + } + } + /* pkt back from firmware side */ + if (entry->transit_count) + entry->transit_count--; + if (entry->suppr_transit_count) { + entry->suppr_transit_count--; + if (entry->suppressed && + (!entry->onbus_pkts_count) && + (!entry->suppr_transit_count)) + entry->suppressed = FALSE; + } + +cont: + hcnt = (hcnt + 1) & WL_TXSTATUS_FREERUNCTR_MASK; + if (!WLFC_GET_AFQ(dhd->wlfc_mode)) { + hslot = (hslot + 1) & WL_TXSTATUS_HSLOT_MASK; + } + + if (WLFC_GET_REUSESEQ(dhd->wlfc_mode) && seq_fromfw) { + seq_num = (seq_num + 1) & WL_SEQ_NUM_MASK; + } + + count++; + } + + return BCME_OK; +} /* _dhd_wlfc_compressed_txstatus_update */ + +/** + * Called when eg host receives a 'WLFC_CTL_TYPE_FIFO_CREDITBACK' event from the dongle. + * @param[in] credits caller supplied credit that will be added to the host credit. + */ +static int +_dhd_wlfc_fifocreditback_indicate(dhd_pub_t *dhd, uint8* credits) +{ + int i; + athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state; + for (i = 0; i < WLFC_CTL_VALUE_LEN_FIFO_CREDITBACK; i++) { +#ifdef PROP_TXSTATUS_DEBUG + wlfc->stats.fifo_credits_back[i] += credits[i]; +#endif // endif + + /* update FIFO credits */ + if (dhd->proptxstatus_mode == WLFC_FCMODE_EXPLICIT_CREDIT) + { + int lender; /* Note that borrower is i */ + + /* Return credits to highest priority lender first */ + for (lender = AC_COUNT; (lender >= 0) && (credits[i] > 0); lender--) { + if (wlfc->credits_borrowed[i][lender] > 0) { + if (credits[i] >= wlfc->credits_borrowed[i][lender]) { + credits[i] -= + (uint8)wlfc->credits_borrowed[i][lender]; + wlfc->FIFO_credit[lender] += + wlfc->credits_borrowed[i][lender]; + wlfc->credits_borrowed[i][lender] = 0; + } else { + wlfc->credits_borrowed[i][lender] -= credits[i]; + wlfc->FIFO_credit[lender] += credits[i]; + credits[i] = 0; + } + } + } + + /* If we have more credits left over, these must belong to the AC */ + if (credits[i] > 0) { + wlfc->FIFO_credit[i] += credits[i]; + } + + if (wlfc->FIFO_credit[i] > wlfc->Init_FIFO_credit[i]) { + wlfc->FIFO_credit[i] = wlfc->Init_FIFO_credit[i]; + } + } + } + +#if defined(DHD_WLFC_THREAD) + _dhd_wlfc_thread_wakeup(dhd); +#endif /* defined(DHD_WLFC_THREAD) */ + + return BCME_OK; +} /* _dhd_wlfc_fifocreditback_indicate */ + +#ifndef BCMDBUS +/** !BCMDBUS specific function */ +static void +_dhd_wlfc_suppress_txq(dhd_pub_t *dhd, f_processpkt_t fn, void *arg) +{ + athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state; + wlfc_mac_descriptor_t* entry; + int prec; + void *pkt = NULL, *head = NULL, *tail = NULL; + struct pktq *txq = (struct pktq *)dhd_bus_txq(dhd->bus); + uint8 results[WLFC_CTL_VALUE_LEN_TXSTATUS+WLFC_CTL_VALUE_LEN_SEQ]; + uint8 credits[WLFC_CTL_VALUE_LEN_FIFO_CREDITBACK] = {0}; + uint32 htod = 0; + uint16 htodseq = 0; + bool bCreditUpdate = FALSE; + + dhd_os_sdlock_txq(dhd); + for (prec = 0; prec < txq->num_prec; prec++) { + while ((pkt = _dhd_wlfc_pktq_pdeq_with_fn(txq, prec, fn, arg))) { + if (!head) { + head = pkt; + } + if (tail) { + PKTSETLINK(tail, pkt); + } + tail = pkt; + } + } + dhd_os_sdunlock_txq(dhd); + + while ((pkt = head)) { + head = PKTLINK(pkt); + PKTSETLINK(pkt, NULL); + + entry = _dhd_wlfc_find_table_entry(wlfc, pkt); + if (!entry) { + PKTFREE(dhd->osh, pkt, TRUE); + continue; + } + if (entry->onbus_pkts_count > 0) { + entry->onbus_pkts_count--; + } + if (entry->suppressed && + (!entry->onbus_pkts_count) && + (!entry->suppr_transit_count)) { + entry->suppressed = FALSE; + } + /* fake a suppression txstatus */ + htod = DHD_PKTTAG_H2DTAG(PKTTAG(pkt)); + WL_TXSTATUS_SET_FLAGS(htod, WLFC_CTL_PKTFLAG_WLSUPPRESS); + WL_TXSTATUS_SET_GENERATION(htod, entry->generation); + htod = htol32(htod); + memcpy(results, &htod, WLFC_CTL_VALUE_LEN_TXSTATUS); + if (WLFC_GET_REUSESEQ(dhd->wlfc_mode)) { + htodseq = DHD_PKTTAG_H2DSEQ(PKTTAG(pkt)); + if (IS_WL_TO_REUSE_SEQ(htodseq)) { + SET_WL_HAS_ASSIGNED_SEQ(htodseq); + RESET_WL_TO_REUSE_SEQ(htodseq); + } + htodseq = htol16(htodseq); + memcpy(results + WLFC_CTL_VALUE_LEN_TXSTATUS, &htodseq, + WLFC_CTL_VALUE_LEN_SEQ); + } + if (WLFC_GET_AFQ(dhd->wlfc_mode)) { + _dhd_wlfc_enque_afq(wlfc, pkt); + } + _dhd_wlfc_compressed_txstatus_update(dhd, results, 1, NULL); + + /* fake a fifo credit back */ + if (DHD_PKTTAG_CREDITCHECK(PKTTAG(pkt))) { + credits[DHD_PKTTAG_FIFO(PKTTAG(pkt))]++; + bCreditUpdate = TRUE; + } + } + + if (bCreditUpdate) { + _dhd_wlfc_fifocreditback_indicate(dhd, credits); + } +} /* _dhd_wlfc_suppress_txq */ +#endif /* !BCMDBUS */ + +static int +_dhd_wlfc_dbg_senum_check(dhd_pub_t *dhd, uint8 *value) +{ + uint32 timestamp; + + (void)dhd; + + bcopy(&value[2], ×tamp, sizeof(uint32)); + timestamp = ltoh32(timestamp); + DHD_INFO(("RXPKT: SEQ: %d, timestamp %d\n", value[1], timestamp)); + return BCME_OK; +} + +static int +_dhd_wlfc_rssi_indicate(dhd_pub_t *dhd, uint8* rssi) +{ + (void)dhd; + (void)rssi; + return BCME_OK; +} + +static void +_dhd_wlfc_add_requested_entry(athost_wl_status_info_t* wlfc, wlfc_mac_descriptor_t* entry) +{ + int i; + + if (!wlfc || !entry) { + return; + } + + for (i = 0; i < wlfc->requested_entry_count; i++) { + if (entry == wlfc->requested_entry[i]) { + break; + } + } + + if (i == wlfc->requested_entry_count) { + /* no match entry found */ + ASSERT(wlfc->requested_entry_count <= (WLFC_MAC_DESC_TABLE_SIZE-1)); + wlfc->requested_entry[wlfc->requested_entry_count++] = entry; + } +} + +/** called on eg receiving 'mac open' event from the dongle. */ +static void +_dhd_wlfc_remove_requested_entry(athost_wl_status_info_t* wlfc, wlfc_mac_descriptor_t* entry) +{ + int i; + + if (!wlfc || !entry) { + return; + } + + for (i = 0; i < wlfc->requested_entry_count; i++) { + if (entry == wlfc->requested_entry[i]) { + break; + } + } + + if (i < wlfc->requested_entry_count) { + /* found */ + ASSERT(wlfc->requested_entry_count > 0); + wlfc->requested_entry_count--; + if (i != wlfc->requested_entry_count) { + wlfc->requested_entry[i] = + wlfc->requested_entry[wlfc->requested_entry_count]; + } + wlfc->requested_entry[wlfc->requested_entry_count] = NULL; + } +} + +/** called on eg receiving a WLFC_CTL_TYPE_MACDESC_ADD TLV from the dongle */ +static int +_dhd_wlfc_mac_table_update(dhd_pub_t *dhd, uint8* value, uint8 type) +{ + int rc; + athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state; + wlfc_mac_descriptor_t* table; + uint8 existing_index; + uint8 table_index; + uint8 ifid; + uint8* ea; + + WLFC_DBGMESG(("%s(), mac ["MACDBG"],%s,idx:%d,id:0x%02x\n", + __FUNCTION__, MAC2STRDBG(&value[2]), + ((type == WLFC_CTL_TYPE_MACDESC_ADD) ? "ADD":"DEL"), + WLFC_MAC_DESC_GET_LOOKUP_INDEX(value[0]), value[0])); + + table = wlfc->destination_entries.nodes; + table_index = WLFC_MAC_DESC_GET_LOOKUP_INDEX(value[0]); + ifid = value[1]; + ea = &value[2]; + + _dhd_wlfc_remove_requested_entry(wlfc, &table[table_index]); + if (type == WLFC_CTL_TYPE_MACDESC_ADD) { + existing_index = _dhd_wlfc_find_mac_desc_id_from_mac(dhd, &value[2]); + if ((existing_index != WLFC_MAC_DESC_ID_INVALID) && + (existing_index != table_index) && table[existing_index].occupied) { + /* + there is an existing different entry, free the old one + and move it to new index if necessary. + */ + rc = _dhd_wlfc_mac_entry_update(wlfc, &table[existing_index], + eWLFC_MAC_ENTRY_ACTION_DEL, table[existing_index].interface_id, + table[existing_index].iftype, NULL, _dhd_wlfc_entrypkt_fn, + &table[existing_index]); + } + + if (!table[table_index].occupied) { + /* this new MAC entry does not exist, create one */ + table[table_index].mac_handle = value[0]; + rc = _dhd_wlfc_mac_entry_update(wlfc, &table[table_index], + eWLFC_MAC_ENTRY_ACTION_ADD, ifid, + wlfc->destination_entries.interfaces[ifid].iftype, + ea, NULL, NULL); + } else { + /* the space should have been empty, but it's not */ + wlfc->stats.mac_update_failed++; + } + } + + if (type == WLFC_CTL_TYPE_MACDESC_DEL) { + if (table[table_index].occupied) { + rc = _dhd_wlfc_mac_entry_update(wlfc, &table[table_index], + eWLFC_MAC_ENTRY_ACTION_DEL, ifid, + wlfc->destination_entries.interfaces[ifid].iftype, + ea, _dhd_wlfc_entrypkt_fn, &table[table_index]); + } else { + /* the space should have been occupied, but it's not */ + wlfc->stats.mac_update_failed++; + } + } + BCM_REFERENCE(rc); + return BCME_OK; +} /* _dhd_wlfc_mac_table_update */ + +/** Called on a 'mac open' or 'mac close' event indicated by the dongle */ +static int +_dhd_wlfc_psmode_update(dhd_pub_t *dhd, uint8* value, uint8 type) +{ + /* Handle PS on/off indication */ + athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state; + wlfc_mac_descriptor_t* table; + wlfc_mac_descriptor_t* desc; /* a table maps from mac handle to mac descriptor */ + uint8 mac_handle = value[0]; + int i; + + table = wlfc->destination_entries.nodes; + desc = &table[WLFC_MAC_DESC_GET_LOOKUP_INDEX(mac_handle)]; + if (desc->occupied) { + if (type == WLFC_CTL_TYPE_MAC_OPEN) { + desc->state = WLFC_STATE_OPEN; + desc->ac_bitmap = 0xff; + DHD_WLFC_CTRINC_MAC_OPEN(desc); + desc->requested_credit = 0; + desc->requested_packet = 0; + _dhd_wlfc_remove_requested_entry(wlfc, desc); + } else { + desc->state = WLFC_STATE_CLOSE; + DHD_WLFC_CTRINC_MAC_CLOSE(desc); + /* Indicate to firmware if there is any traffic pending. */ + for (i = 0; i < AC_COUNT; i++) { + _dhd_wlfc_traffic_pending_check(wlfc, desc, i); + } + } + } else { + wlfc->stats.psmode_update_failed++; + } + + return BCME_OK; +} /* _dhd_wlfc_psmode_update */ + +/** called upon receiving 'interface open' or 'interface close' event from the dongle */ +static int +_dhd_wlfc_interface_update(dhd_pub_t *dhd, uint8* value, uint8 type) +{ + /* Handle PS on/off indication */ + athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state; + wlfc_mac_descriptor_t* table; + uint8 if_id = value[0]; + + if (if_id < WLFC_MAX_IFNUM) { + table = wlfc->destination_entries.interfaces; + if (table[if_id].occupied) { + if (type == WLFC_CTL_TYPE_INTERFACE_OPEN) { + table[if_id].state = WLFC_STATE_OPEN; + /* WLFC_DBGMESG(("INTERFACE[%d] OPEN\n", if_id)); */ + } else { + table[if_id].state = WLFC_STATE_CLOSE; + /* WLFC_DBGMESG(("INTERFACE[%d] CLOSE\n", if_id)); */ + } + return BCME_OK; + } + } + wlfc->stats.interface_update_failed++; + + return BCME_OK; +} + +/** Called on receiving a WLFC_CTL_TYPE_MAC_REQUEST_CREDIT TLV from the dongle */ +static int +_dhd_wlfc_credit_request(dhd_pub_t *dhd, uint8* value) +{ + athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state; + wlfc_mac_descriptor_t* table; + wlfc_mac_descriptor_t* desc; + uint8 mac_handle; + uint8 credit; + + table = wlfc->destination_entries.nodes; + mac_handle = value[1]; + credit = value[0]; + + desc = &table[WLFC_MAC_DESC_GET_LOOKUP_INDEX(mac_handle)]; + if (desc->occupied) { + desc->requested_credit = credit; + + desc->ac_bitmap = value[2] & (~(1<stats.credit_request_failed++; + } + + return BCME_OK; +} + +/** Called on receiving a WLFC_CTL_TYPE_MAC_REQUEST_PACKET TLV from the dongle */ +static int +_dhd_wlfc_packet_request(dhd_pub_t *dhd, uint8* value) +{ + athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state; + wlfc_mac_descriptor_t* table; + wlfc_mac_descriptor_t* desc; + uint8 mac_handle; + uint8 packet_count; + + table = wlfc->destination_entries.nodes; + mac_handle = value[1]; + packet_count = value[0]; + + desc = &table[WLFC_MAC_DESC_GET_LOOKUP_INDEX(mac_handle)]; + if (desc->occupied) { + desc->requested_packet = packet_count; + + desc->ac_bitmap = value[2] & (~(1<stats.packet_request_failed++; + } + + return BCME_OK; +} + +/** Called when host receives a WLFC_CTL_TYPE_HOST_REORDER_RXPKTS TLV from the dongle */ +static void +_dhd_wlfc_reorderinfo_indicate(uint8 *val, uint8 len, uchar *info_buf, uint *info_len) +{ + if (info_len) { + /* Check copy length to avoid buffer overrun. In case of length exceeding + * WLHOST_REORDERDATA_TOTLEN, return failure instead sending incomplete result + * of length WLHOST_REORDERDATA_TOTLEN + */ + if ((info_buf) && (len <= WLHOST_REORDERDATA_TOTLEN)) { + bcopy(val, info_buf, len); + *info_len = len; + } else { + *info_len = 0; + } + } +} + +/* + * public functions + */ + +bool dhd_wlfc_is_supported(dhd_pub_t *dhd) +{ + bool rc = TRUE; + + if (dhd == NULL) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return FALSE; + } + + dhd_os_wlfc_block(dhd); + + if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) { + rc = FALSE; + } + + dhd_os_wlfc_unblock(dhd); + + return rc; +} + +int dhd_wlfc_enable(dhd_pub_t *dhd) +{ + int i, rc = BCME_OK; + athost_wl_status_info_t* wlfc; + + if (dhd == NULL) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhd); + + if (!dhd->wlfc_enabled || dhd->wlfc_state) { + rc = BCME_OK; + goto exit; + } + + /* allocate space to track txstatus propagated from firmware */ + dhd->wlfc_state = DHD_OS_PREALLOC(dhd, DHD_PREALLOC_DHD_WLFC_INFO, + sizeof(athost_wl_status_info_t)); + if (dhd->wlfc_state == NULL) { + rc = BCME_NOMEM; + goto exit; + } + + /* initialize state space */ + wlfc = (athost_wl_status_info_t*)dhd->wlfc_state; + memset(wlfc, 0, sizeof(athost_wl_status_info_t)); + + /* remember osh & dhdp */ + wlfc->osh = dhd->osh; + wlfc->dhdp = dhd; + + if (!WLFC_GET_AFQ(dhd->wlfc_mode)) { + wlfc->hanger = _dhd_wlfc_hanger_create(dhd, WLFC_HANGER_MAXITEMS); + if (wlfc->hanger == NULL) { + DHD_OS_PREFREE(dhd, dhd->wlfc_state, + sizeof(athost_wl_status_info_t)); + dhd->wlfc_state = NULL; + rc = BCME_NOMEM; + goto exit; + } + } + + dhd->proptxstatus_mode = WLFC_FCMODE_EXPLICIT_CREDIT; + /* default to check rx pkt */ + dhd->wlfc_rxpkt_chk = TRUE; + if (dhd->op_mode & DHD_FLAG_IBSS_MODE) { + dhd->wlfc_rxpkt_chk = FALSE; + } + + /* initialize all interfaces to accept traffic */ + for (i = 0; i < WLFC_MAX_IFNUM; i++) { + wlfc->hostif_flow_state[i] = OFF; + } + + _dhd_wlfc_mac_entry_update(wlfc, &wlfc->destination_entries.other, + eWLFC_MAC_ENTRY_ACTION_ADD, 0xff, 0, NULL, NULL, NULL); + + wlfc->allow_credit_borrow = 0; + wlfc->single_ac = 0; + wlfc->single_ac_timestamp = 0; + +exit: + DHD_ERROR(("%s: ret=%d\n", __FUNCTION__, rc)); + dhd_os_wlfc_unblock(dhd); + + return rc; +} /* dhd_wlfc_enable */ + +#ifdef SUPPORT_P2P_GO_PS + +/** + * Called when the host platform enters a lower power mode, eg right before a system hibernate. + * SUPPORT_P2P_GO_PS specific function. + */ +int +dhd_wlfc_suspend(dhd_pub_t *dhd) +{ + uint32 tlv = 0; + + DHD_TRACE(("%s: masking wlfc events\n", __FUNCTION__)); + if (!dhd->wlfc_enabled) + return -1; + + if (!dhd_wl_ioctl_get_intiovar(dhd, "tlv", &tlv, WLC_GET_VAR, FALSE, 0)) + return -1; + if ((tlv & (WLFC_FLAGS_RSSI_SIGNALS | WLFC_FLAGS_XONXOFF_SIGNALS)) == 0) + return 0; + tlv &= ~(WLFC_FLAGS_RSSI_SIGNALS | WLFC_FLAGS_XONXOFF_SIGNALS); + if (!dhd_wl_ioctl_set_intiovar(dhd, "tlv", tlv, WLC_SET_VAR, TRUE, 0)) + return -1; + + return 0; +} + +/** + * Called when the host platform resumes from a power management operation, eg resume after a + * system hibernate. SUPPORT_P2P_GO_PS specific function. + */ +int +dhd_wlfc_resume(dhd_pub_t *dhd) +{ + uint32 tlv = 0; + + DHD_TRACE(("%s: unmasking wlfc events\n", __FUNCTION__)); + if (!dhd->wlfc_enabled) + return -1; + + if (!dhd_wl_ioctl_get_intiovar(dhd, "tlv", &tlv, WLC_GET_VAR, FALSE, 0)) + return -1; + if ((tlv & (WLFC_FLAGS_RSSI_SIGNALS | WLFC_FLAGS_XONXOFF_SIGNALS)) == + (WLFC_FLAGS_RSSI_SIGNALS | WLFC_FLAGS_XONXOFF_SIGNALS)) + return 0; + tlv |= (WLFC_FLAGS_RSSI_SIGNALS | WLFC_FLAGS_XONXOFF_SIGNALS); + if (!dhd_wl_ioctl_set_intiovar(dhd, "tlv", tlv, WLC_SET_VAR, TRUE, 0)) + return -1; + + return 0; +} + +#endif /* SUPPORT_P2P_GO_PS */ + +/** A flow control header was received from firmware, containing one or more TLVs */ +int +dhd_wlfc_parse_header_info(dhd_pub_t *dhd, void* pktbuf, int tlv_hdr_len, uchar *reorder_info_buf, + uint *reorder_info_len) +{ + uint8 type, len; + uint8* value; + uint8* tmpbuf; + uint16 remainder = (uint16)tlv_hdr_len; + uint16 processed = 0; + athost_wl_status_info_t* wlfc = NULL; + void* entry; + + if ((dhd == NULL) || (pktbuf == NULL)) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhd); + + if (dhd->proptxstatus_mode != WLFC_ONLY_AMPDU_HOSTREORDER) { + if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) { + dhd_os_wlfc_unblock(dhd); + return WLFC_UNSUPPORTED; + } + wlfc = (athost_wl_status_info_t*)dhd->wlfc_state; + } + + tmpbuf = (uint8*)PKTDATA(dhd->osh, pktbuf); + + if (remainder) { + while ((processed < (WLFC_MAX_PENDING_DATALEN * 2)) && (remainder > 0)) { + type = tmpbuf[processed]; + if (type == WLFC_CTL_TYPE_FILLER) { + remainder -= 1; + processed += 1; + continue; + } + + len = tmpbuf[processed + 1]; + value = &tmpbuf[processed + 2]; + + if (remainder < (2 + len)) + break; + + remainder -= 2 + len; + processed += 2 + len; + entry = NULL; + + DHD_INFO(("%s():%d type %d remainder %d processed %d\n", + __FUNCTION__, __LINE__, type, remainder, processed)); + + if (type == WLFC_CTL_TYPE_HOST_REORDER_RXPKTS) + _dhd_wlfc_reorderinfo_indicate(value, len, reorder_info_buf, + reorder_info_len); + + if (wlfc == NULL) { + ASSERT(dhd->proptxstatus_mode == WLFC_ONLY_AMPDU_HOSTREORDER); + + if (type != WLFC_CTL_TYPE_HOST_REORDER_RXPKTS && + type != WLFC_CTL_TYPE_TRANS_ID) + DHD_INFO(("%s():%d dhd->wlfc_state is NULL yet!" + " type %d remainder %d processed %d\n", + __FUNCTION__, __LINE__, type, remainder, processed)); + continue; + } + + if (type == WLFC_CTL_TYPE_TXSTATUS) { + _dhd_wlfc_compressed_txstatus_update(dhd, value, 1, &entry); + } else if (type == WLFC_CTL_TYPE_COMP_TXSTATUS) { + uint8 compcnt_offset = WLFC_CTL_VALUE_LEN_TXSTATUS; + + if (WLFC_GET_REUSESEQ(dhd->wlfc_mode)) { + compcnt_offset += WLFC_CTL_VALUE_LEN_SEQ; + } + _dhd_wlfc_compressed_txstatus_update(dhd, value, + value[compcnt_offset], &entry); + } else if (type == WLFC_CTL_TYPE_FIFO_CREDITBACK) { + _dhd_wlfc_fifocreditback_indicate(dhd, value); + } else if (type == WLFC_CTL_TYPE_RSSI) { + _dhd_wlfc_rssi_indicate(dhd, value); + } else if (type == WLFC_CTL_TYPE_MAC_REQUEST_CREDIT) { + _dhd_wlfc_credit_request(dhd, value); + } else if (type == WLFC_CTL_TYPE_MAC_REQUEST_PACKET) { + _dhd_wlfc_packet_request(dhd, value); + } else if ((type == WLFC_CTL_TYPE_MAC_OPEN) || + (type == WLFC_CTL_TYPE_MAC_CLOSE)) { + _dhd_wlfc_psmode_update(dhd, value, type); + } else if ((type == WLFC_CTL_TYPE_MACDESC_ADD) || + (type == WLFC_CTL_TYPE_MACDESC_DEL)) { + _dhd_wlfc_mac_table_update(dhd, value, type); + } else if (type == WLFC_CTL_TYPE_TRANS_ID) { + _dhd_wlfc_dbg_senum_check(dhd, value); + } else if ((type == WLFC_CTL_TYPE_INTERFACE_OPEN) || + (type == WLFC_CTL_TYPE_INTERFACE_CLOSE)) { + _dhd_wlfc_interface_update(dhd, value, type); + } + +#ifndef BCMDBUS + if (entry && WLFC_GET_REORDERSUPP(dhd->wlfc_mode)) { + /* suppress all packets for this mac entry from bus->txq */ + _dhd_wlfc_suppress_txq(dhd, _dhd_wlfc_entrypkt_fn, entry); + } +#endif /* !BCMDBUS */ + } /* while */ + + if (remainder != 0 && wlfc) { + /* trouble..., something is not right */ + wlfc->stats.tlv_parse_failed++; + } + } /* if */ + + if (wlfc) + wlfc->stats.dhd_hdrpulls++; + + dhd_os_wlfc_unblock(dhd); + return BCME_OK; +} + +KERNEL_THREAD_RETURN_TYPE +dhd_wlfc_transfer_packets(void *data) +{ + dhd_pub_t *dhdp = (dhd_pub_t *)data; + int ac, single_ac = 0, rc = BCME_OK; + dhd_wlfc_commit_info_t commit_info; + athost_wl_status_info_t* ctx; + int bus_retry_count = 0; + int pkt_send = 0; + int pkt_send_per_ac = 0; + + uint8 tx_map = 0; /* packets (send + in queue), Bitmask for 4 ACs + BC/MC */ + uint8 rx_map = 0; /* received packets, Bitmask for 4 ACs + BC/MC */ + uint8 packets_map = 0; /* packets in queue, Bitmask for 4 ACs + BC/MC */ + bool no_credit = FALSE; + + int lender; + +#if defined(DHD_WLFC_THREAD) + /* wait till someone wakeup me up, will change it at running time */ + int wait_msec = msecs_to_jiffies(0xFFFFFFFF); +#endif /* defined(DHD_WLFC_THREAD) */ + +#if defined(DHD_WLFC_THREAD) + while (1) { + bus_retry_count = 0; + pkt_send = 0; + tx_map = 0; + rx_map = 0; + packets_map = 0; + wait_msec = wait_event_interruptible_timeout(dhdp->wlfc_wqhead, + dhdp->wlfc_thread_go, wait_msec); + if (kthread_should_stop()) { + break; + } + dhdp->wlfc_thread_go = FALSE; + + dhd_os_wlfc_block(dhdp); +#endif /* defined(DHD_WLFC_THREAD) */ + ctx = (athost_wl_status_info_t*)dhdp->wlfc_state; +#if defined(DHD_WLFC_THREAD) + if (!ctx) + goto exit; +#endif /* defined(DHD_WLFC_THREAD) */ + + memset(&commit_info, 0, sizeof(commit_info)); + + /* + Commit packets for regular AC traffic. Higher priority first. + First, use up FIFO credits available to each AC. Based on distribution + and credits left, borrow from other ACs as applicable + + -NOTE: + If the bus between the host and firmware is overwhelmed by the + traffic from host, it is possible that higher priority traffic + starves the lower priority queue. If that occurs often, we may + have to employ weighted round-robin or ucode scheme to avoid + low priority packet starvation. + */ + + for (ac = AC_COUNT; ac >= 0; ac--) { + if (dhdp->wlfc_rxpkt_chk) { + /* check rx packet */ + uint32 curr_t = OSL_SYSUPTIME(), delta; + + delta = curr_t - ctx->rx_timestamp[ac]; + if (delta < WLFC_RX_DETECTION_THRESHOLD_MS) { + rx_map |= (1 << ac); + } + } + + if (ctx->pkt_cnt_per_ac[ac] == 0) { + continue; + } + + tx_map |= (1 << ac); + single_ac = ac + 1; + pkt_send_per_ac = 0; + while ((FALSE == dhdp->proptxstatus_txoff) && + (pkt_send_per_ac < WLFC_PACKET_BOUND)) { + /* packets from delayQ with less priority are fresh and + * they'd need header and have no MAC entry + */ + no_credit = (ctx->FIFO_credit[ac] < 1); + if (dhdp->proptxstatus_credit_ignore || + ((ac == AC_COUNT) && !ctx->bcmc_credit_supported)) { + no_credit = FALSE; + } + + lender = -1; +#ifdef LIMIT_BORROW + if (no_credit && (ac < AC_COUNT) && (tx_map >= rx_map) && + dhdp->wlfc_borrow_allowed) { + /* try borrow from lower priority */ + lender = _dhd_wlfc_borrow_credit(ctx, ac - 1, ac, FALSE); + if (lender != -1) { + no_credit = FALSE; + } + } +#endif // endif + commit_info.needs_hdr = 1; + commit_info.mac_entry = NULL; + commit_info.p = _dhd_wlfc_deque_delayedq(ctx, ac, + &(commit_info.ac_fifo_credit_spent), + &(commit_info.needs_hdr), + &(commit_info.mac_entry), + no_credit); + commit_info.pkt_type = (commit_info.needs_hdr) ? eWLFC_PKTTYPE_DELAYED : + eWLFC_PKTTYPE_SUPPRESSED; + + if (commit_info.p == NULL) { +#ifdef LIMIT_BORROW + if (lender != -1 && dhdp->wlfc_borrow_allowed) { + _dhd_wlfc_return_credit(ctx, lender, ac); + } +#endif // endif + break; + } + + if (!dhdp->proptxstatus_credit_ignore && (lender == -1)) { + ASSERT(ctx->FIFO_credit[ac] >= commit_info.ac_fifo_credit_spent); + } + /* here we can ensure have credit or no credit needed */ + rc = _dhd_wlfc_handle_packet_commit(ctx, ac, &commit_info, + ctx->fcommit, ctx->commit_ctx); + + /* Bus commits may fail (e.g. flow control); abort after retries */ + if (rc == BCME_OK) { + pkt_send++; + pkt_send_per_ac++; + if (commit_info.ac_fifo_credit_spent && (lender == -1)) { + ctx->FIFO_credit[ac]--; + } +#ifdef LIMIT_BORROW + else if (!commit_info.ac_fifo_credit_spent && (lender != -1) && + dhdp->wlfc_borrow_allowed) { + _dhd_wlfc_return_credit(ctx, lender, ac); + } +#endif // endif + } else { +#ifdef LIMIT_BORROW + if (lender != -1 && dhdp->wlfc_borrow_allowed) { + _dhd_wlfc_return_credit(ctx, lender, ac); + } +#endif // endif + bus_retry_count++; + if (bus_retry_count >= BUS_RETRIES) { + DHD_ERROR(("%s: bus error %d\n", __FUNCTION__, rc)); + goto exit; + } + } + } + + if (ctx->pkt_cnt_per_ac[ac]) { + packets_map |= (1 << ac); + } + } + + if ((tx_map == 0) || dhdp->proptxstatus_credit_ignore) { + /* nothing send out or remain in queue */ + rc = BCME_OK; + goto exit; + } + + if (((tx_map & (tx_map - 1)) == 0) && (tx_map >= rx_map)) { + /* only one tx ac exist and no higher rx ac */ + if ((single_ac == ctx->single_ac) && ctx->allow_credit_borrow) { + ac = single_ac - 1; + } else { + uint32 delta; + uint32 curr_t = OSL_SYSUPTIME(); + + if (single_ac != ctx->single_ac) { + /* new single ac traffic (first single ac or different single ac) */ + ctx->allow_credit_borrow = 0; + ctx->single_ac_timestamp = curr_t; + ctx->single_ac = (uint8)single_ac; + rc = BCME_OK; + goto exit; + } + /* same ac traffic, check if it lasts enough time */ + delta = curr_t - ctx->single_ac_timestamp; + + if (delta >= WLFC_BORROW_DEFER_PERIOD_MS) { + /* wait enough time, can borrow now */ + ctx->allow_credit_borrow = 1; + ac = single_ac - 1; + } else { + rc = BCME_OK; + goto exit; + } + } + } else { + /* If we have multiple AC traffic, turn off borrowing, mark time and bail out */ + ctx->allow_credit_borrow = 0; + ctx->single_ac_timestamp = 0; + ctx->single_ac = 0; + rc = BCME_OK; + goto exit; + } + + if (packets_map == 0) { + /* nothing to send, skip borrow */ + rc = BCME_OK; + goto exit; + } + + /* At this point, borrow all credits only for ac */ + while (FALSE == dhdp->proptxstatus_txoff) { +#ifdef LIMIT_BORROW + if (dhdp->wlfc_borrow_allowed) { + if ((lender = _dhd_wlfc_borrow_credit(ctx, AC_COUNT, ac, TRUE)) == -1) { + break; + } + } + else + break; +#endif // endif + commit_info.p = _dhd_wlfc_deque_delayedq(ctx, ac, + &(commit_info.ac_fifo_credit_spent), + &(commit_info.needs_hdr), + &(commit_info.mac_entry), + FALSE); + if (commit_info.p == NULL) { + /* before borrow only one ac exists and now this only ac is empty */ +#ifdef LIMIT_BORROW + _dhd_wlfc_return_credit(ctx, lender, ac); +#endif // endif + break; + } + + commit_info.pkt_type = (commit_info.needs_hdr) ? eWLFC_PKTTYPE_DELAYED : + eWLFC_PKTTYPE_SUPPRESSED; + + rc = _dhd_wlfc_handle_packet_commit(ctx, ac, &commit_info, + ctx->fcommit, ctx->commit_ctx); + + /* Bus commits may fail (e.g. flow control); abort after retries */ + if (rc == BCME_OK) { + pkt_send++; + if (commit_info.ac_fifo_credit_spent) { +#ifndef LIMIT_BORROW + ctx->FIFO_credit[ac]--; +#endif // endif + } else { +#ifdef LIMIT_BORROW + _dhd_wlfc_return_credit(ctx, lender, ac); +#endif // endif + } + } else { +#ifdef LIMIT_BORROW + _dhd_wlfc_return_credit(ctx, lender, ac); +#endif // endif + bus_retry_count++; + if (bus_retry_count >= BUS_RETRIES) { + DHD_ERROR(("%s: bus error %d\n", __FUNCTION__, rc)); + goto exit; + } + } + } + + BCM_REFERENCE(pkt_send); + +exit: +#if defined(DHD_WLFC_THREAD) + dhd_os_wlfc_unblock(dhdp); + if (ctx && ctx->pkt_cnt_in_psq && pkt_send) { + wait_msec = msecs_to_jiffies(WLFC_THREAD_QUICK_RETRY_WAIT_MS); + } else { + wait_msec = msecs_to_jiffies(WLFC_THREAD_RETRY_WAIT_MS); + } + } + return 0; +#else + return rc; +#endif /* defined(DHD_WLFC_THREAD) */ +} + +/** + * Enqueues a transmit packet in the next layer towards the dongle, eg the DBUS layer. Called by + * eg dhd_sendpkt(). + * @param[in] dhdp Pointer to public DHD structure + * @param[in] fcommit Pointer to transmit function of next layer + * @param[in] commit_ctx Opaque context used when calling next layer + * @param[in] pktbuf Packet to send + * @param[in] need_toggle_host_if If TRUE, resets flag ctx->toggle_host_if + */ +int +dhd_wlfc_commit_packets(dhd_pub_t *dhdp, f_commitpkt_t fcommit, void* commit_ctx, void *pktbuf, + bool need_toggle_host_if) +{ + int rc = BCME_OK; + athost_wl_status_info_t* ctx; + +#if defined(DHD_WLFC_THREAD) + if (!pktbuf) + return BCME_OK; +#endif /* defined(DHD_WLFC_THREAD) */ + + if ((dhdp == NULL) || (fcommit == NULL)) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhdp); + + if (!dhdp->wlfc_state || (dhdp->proptxstatus_mode == WLFC_FCMODE_NONE)) { + if (pktbuf) { + DHD_PKTTAG_WLFCPKT_SET(PKTTAG(pktbuf), 0); + } + rc = WLFC_UNSUPPORTED; + goto exit; + } + + ctx = (athost_wl_status_info_t*)dhdp->wlfc_state; + +#ifdef BCMDBUS + if (!dhdp->up || (dhdp->busstate == DHD_BUS_DOWN)) { + if (pktbuf) { + PKTFREE(ctx->osh, pktbuf, TRUE); + rc = BCME_OK; + } + goto exit; + } +#endif /* BCMDBUS */ + + if (dhdp->proptxstatus_module_ignore) { + if (pktbuf) { + uint32 htod = 0; + WL_TXSTATUS_SET_FLAGS(htod, WLFC_PKTFLAG_PKTFROMHOST); + _dhd_wlfc_pushheader(ctx, &pktbuf, FALSE, 0, 0, htod, 0, FALSE); + if (fcommit(commit_ctx, pktbuf)) { + /* free it if failed, otherwise do it in tx complete cb */ + PKTFREE(ctx->osh, pktbuf, TRUE); + } + rc = BCME_OK; + } + goto exit; + } + + if (pktbuf) { + int ac = DHD_PKTTAG_FIFO(PKTTAG(pktbuf)); + ASSERT(ac <= AC_COUNT); + DHD_PKTTAG_WLFCPKT_SET(PKTTAG(pktbuf), 1); + /* en-queue the packets to respective queue. */ + rc = _dhd_wlfc_enque_delayq(ctx, pktbuf, ac); + if (rc) { + _dhd_wlfc_prec_drop(ctx->dhdp, (ac << 1), pktbuf, FALSE); + } else { + ctx->stats.pktin++; + ctx->pkt_cnt_in_drv[DHD_PKTTAG_IF(PKTTAG(pktbuf))][ac]++; + } + } + + if (!ctx->fcommit) { + ctx->fcommit = fcommit; + } else { + ASSERT(ctx->fcommit == fcommit); + } + if (!ctx->commit_ctx) { + ctx->commit_ctx = commit_ctx; + } else { + ASSERT(ctx->commit_ctx == commit_ctx); + } + +#if defined(DHD_WLFC_THREAD) + _dhd_wlfc_thread_wakeup(dhdp); +#else + dhd_wlfc_transfer_packets(dhdp); +#endif /* defined(DHD_WLFC_THREAD) */ + +exit: + dhd_os_wlfc_unblock(dhdp); + return rc; +} /* dhd_wlfc_commit_packets */ + +/** + * Called when the (lower) DBUS layer indicates completion (succesfull or not) of a transmit packet + */ +int +dhd_wlfc_txcomplete(dhd_pub_t *dhd, void *txp, bool success) +{ + athost_wl_status_info_t* wlfc; + wlfc_mac_descriptor_t *entry; + void* pout = NULL; + int rtn = BCME_OK; + if ((dhd == NULL) || (txp == NULL)) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + bcm_pkt_validate_chk(txp); + + dhd_os_wlfc_block(dhd); + + if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) { + rtn = WLFC_UNSUPPORTED; + goto EXIT; + } + + wlfc = (athost_wl_status_info_t*)dhd->wlfc_state; + if (DHD_PKTTAG_SIGNALONLY(PKTTAG(txp))) { +#ifdef PROP_TXSTATUS_DEBUG + wlfc->stats.signal_only_pkts_freed++; +#endif // endif + /* is this a signal-only packet? */ + _dhd_wlfc_pullheader(wlfc, txp); + PKTFREE(wlfc->osh, txp, TRUE); + goto EXIT; + } + + entry = _dhd_wlfc_find_table_entry(wlfc, txp); + ASSERT(entry); + + if (!success || dhd->proptxstatus_txstatus_ignore) { + WLFC_DBGMESG(("At: %s():%d, bus_complete() failure for %p, htod_tag:0x%08x\n", + __FUNCTION__, __LINE__, txp, DHD_PKTTAG_H2DTAG(PKTTAG(txp)))); + if (!WLFC_GET_AFQ(dhd->wlfc_mode)) { + _dhd_wlfc_hanger_poppkt(wlfc->hanger, WL_TXSTATUS_GET_HSLOT( + DHD_PKTTAG_H2DTAG(PKTTAG(txp))), &pout, TRUE); + ASSERT(txp == pout); + } + + /* indicate failure and free the packet */ + dhd_txcomplete(dhd, txp, success); + + /* return the credit, if necessary */ + _dhd_wlfc_return_implied_credit(wlfc, txp); + + if (entry->transit_count) + entry->transit_count--; + if (entry->suppr_transit_count) + entry->suppr_transit_count--; + wlfc->pkt_cnt_in_drv[DHD_PKTTAG_IF(PKTTAG(txp))][DHD_PKTTAG_FIFO(PKTTAG(txp))]--; + wlfc->stats.pktout++; + PKTFREE(wlfc->osh, txp, TRUE); + } else { + /* bus confirmed pkt went to firmware side */ + if (WLFC_GET_AFQ(dhd->wlfc_mode)) { + _dhd_wlfc_enque_afq(wlfc, txp); + } else { + int hslot = WL_TXSTATUS_GET_HSLOT(DHD_PKTTAG_H2DTAG(PKTTAG(txp))); + _dhd_wlfc_hanger_free_pkt(wlfc, hslot, + WLFC_HANGER_PKT_STATE_BUSRETURNED, -1); + } + } + + ASSERT(entry->onbus_pkts_count > 0); + if (entry->onbus_pkts_count > 0) + entry->onbus_pkts_count--; + if (entry->suppressed && + (!entry->onbus_pkts_count) && + (!entry->suppr_transit_count)) + entry->suppressed = FALSE; +EXIT: + dhd_os_wlfc_unblock(dhd); + return rtn; +} /* dhd_wlfc_txcomplete */ + +int +dhd_wlfc_init(dhd_pub_t *dhd) +{ + /* enable all signals & indicate host proptxstatus logic is active */ + uint32 tlv, mode, fw_caps; + int ret = 0; + + if (dhd == NULL) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhd); + if (dhd->wlfc_enabled) { + DHD_ERROR(("%s():%d, Already enabled!\n", __FUNCTION__, __LINE__)); + dhd_os_wlfc_unblock(dhd); + return BCME_OK; + } + dhd->wlfc_enabled = TRUE; + dhd_os_wlfc_unblock(dhd); + + tlv = WLFC_FLAGS_RSSI_SIGNALS | + WLFC_FLAGS_XONXOFF_SIGNALS | + WLFC_FLAGS_CREDIT_STATUS_SIGNALS | + WLFC_FLAGS_HOST_PROPTXSTATUS_ACTIVE | + WLFC_FLAGS_HOST_RXRERODER_ACTIVE; + + /* + try to enable/disable signaling by sending "tlv" iovar. if that fails, + fallback to no flow control? Print a message for now. + */ + + /* enable proptxtstatus signaling by default */ + if (!dhd_wl_ioctl_set_intiovar(dhd, "tlv", tlv, WLC_SET_VAR, TRUE, 0)) { + /* + Leaving the message for now, it should be removed after a while; once + the tlv situation is stable. + */ + DHD_ERROR(("dhd_wlfc_init(): successfully %s bdcv2 tlv signaling, %d\n", + dhd->wlfc_enabled?"enabled":"disabled", tlv)); + } + + mode = 0; + + /* query caps */ + ret = dhd_wl_ioctl_get_intiovar(dhd, "wlfc_mode", &fw_caps, WLC_GET_VAR, FALSE, 0); + + if (!ret) { + DHD_ERROR(("%s: query wlfc_mode succeed, fw_caps=0x%x\n", __FUNCTION__, fw_caps)); + + if (WLFC_IS_OLD_DEF(fw_caps)) { +#ifdef BCMDBUS + mode = WLFC_MODE_HANGER; +#else + /* enable proptxtstatus v2 by default */ + mode = WLFC_MODE_AFQ; +#endif /* BCMDBUS */ + } else { + WLFC_SET_AFQ(mode, WLFC_GET_AFQ(fw_caps)); +#ifdef BCMDBUS + WLFC_SET_AFQ(mode, 0); +#endif /* BCMDBUS */ + WLFC_SET_REUSESEQ(mode, WLFC_GET_REUSESEQ(fw_caps)); + WLFC_SET_REORDERSUPP(mode, WLFC_GET_REORDERSUPP(fw_caps)); + } + ret = dhd_wl_ioctl_set_intiovar(dhd, "wlfc_mode", mode, WLC_SET_VAR, TRUE, 0); + } + + dhd_os_wlfc_block(dhd); + + dhd->wlfc_mode = 0; + if (ret >= 0) { + if (WLFC_IS_OLD_DEF(mode)) { + WLFC_SET_AFQ(dhd->wlfc_mode, (mode == WLFC_MODE_AFQ)); + } else { + dhd->wlfc_mode = mode; + } + } + + DHD_ERROR(("dhd_wlfc_init(): wlfc_mode=0x%x, ret=%d\n", dhd->wlfc_mode, ret)); +#ifdef LIMIT_BORROW + dhd->wlfc_borrow_allowed = TRUE; +#endif // endif + dhd_os_wlfc_unblock(dhd); + + if (dhd->plat_init) + dhd->plat_init((void *)dhd); + + return BCME_OK; +} /* dhd_wlfc_init */ + +/** AMPDU host reorder specific function */ +int +dhd_wlfc_hostreorder_init(dhd_pub_t *dhd) +{ + /* enable only ampdu hostreorder here */ + uint32 tlv; + + if (dhd == NULL) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + DHD_TRACE(("%s():%d Enter\n", __FUNCTION__, __LINE__)); + + tlv = WLFC_FLAGS_HOST_RXRERODER_ACTIVE; + + /* enable proptxtstatus signaling by default */ + if (dhd_wl_ioctl_set_intiovar(dhd, "tlv", tlv, WLC_SET_VAR, TRUE, 0)) { + DHD_ERROR(("%s(): failed to enable/disable bdcv2 tlv signaling\n", + __FUNCTION__)); + } else { + /* + Leaving the message for now, it should be removed after a while; once + the tlv situation is stable. + */ + DHD_ERROR(("%s(): successful bdcv2 tlv signaling, %d\n", + __FUNCTION__, tlv)); + } + + dhd_os_wlfc_block(dhd); + dhd->proptxstatus_mode = WLFC_ONLY_AMPDU_HOSTREORDER; + dhd_os_wlfc_unblock(dhd); + /* terence 20161229: enable ampdu_hostreorder if tlv enable hostreorder */ + dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "ampdu_hostreorder", 1, 0, TRUE); + + return BCME_OK; +} + +int +dhd_wlfc_cleanup_txq(dhd_pub_t *dhd, f_processpkt_t fn, void *arg) +{ + if (dhd == NULL) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhd); + + if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) { + dhd_os_wlfc_unblock(dhd); + return WLFC_UNSUPPORTED; + } + +#ifndef BCMDBUS + _dhd_wlfc_cleanup_txq(dhd, fn, arg); +#endif /* !BCMDBUS */ + + dhd_os_wlfc_unblock(dhd); + + return BCME_OK; +} + +/** release all packet resources */ +int +dhd_wlfc_cleanup(dhd_pub_t *dhd, f_processpkt_t fn, void *arg) +{ + if (dhd == NULL) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhd); + + if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) { + dhd_os_wlfc_unblock(dhd); + return WLFC_UNSUPPORTED; + } + + _dhd_wlfc_cleanup(dhd, fn, arg); + + dhd_os_wlfc_unblock(dhd); + + return BCME_OK; +} + +int +dhd_wlfc_deinit(dhd_pub_t *dhd) +{ + /* cleanup all psq related resources */ + athost_wl_status_info_t* wlfc; + uint32 tlv = 0; + uint32 hostreorder = 0; + + if (dhd == NULL) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhd); + if (!dhd->wlfc_enabled) { + DHD_ERROR(("%s():%d, Already disabled!\n", __FUNCTION__, __LINE__)); + dhd_os_wlfc_unblock(dhd); + return BCME_OK; + } + + dhd->wlfc_enabled = FALSE; + dhd_os_wlfc_unblock(dhd); + + /* query ampdu hostreorder */ + (void) dhd_wl_ioctl_get_intiovar(dhd, "ampdu_hostreorder", + &hostreorder, WLC_GET_VAR, FALSE, 0); + + if (hostreorder) { + tlv = WLFC_FLAGS_HOST_RXRERODER_ACTIVE; + DHD_ERROR(("%s():%d, maintain HOST RXRERODER flag in tvl\n", + __FUNCTION__, __LINE__)); + } + + /* Disable proptxtstatus signaling for deinit */ + (void) dhd_wl_ioctl_set_intiovar(dhd, "tlv", tlv, WLC_SET_VAR, TRUE, 0); + + dhd_os_wlfc_block(dhd); + + if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) { + dhd_os_wlfc_unblock(dhd); + return WLFC_UNSUPPORTED; + } + + wlfc = (athost_wl_status_info_t*)dhd->wlfc_state; + + _dhd_wlfc_cleanup(dhd, NULL, NULL); + + if (!WLFC_GET_AFQ(dhd->wlfc_mode)) { + int i; + wlfc_hanger_t* h = (wlfc_hanger_t*)wlfc->hanger; + for (i = 0; i < h->max_items; i++) { + if (h->items[i].state != WLFC_HANGER_ITEM_STATE_FREE) { + _dhd_wlfc_hanger_free_pkt(wlfc, i, + WLFC_HANGER_PKT_STATE_COMPLETE, TRUE); + } + } + + /* delete hanger */ + _dhd_wlfc_hanger_delete(dhd, h); + } + + /* free top structure */ + DHD_OS_PREFREE(dhd, dhd->wlfc_state, + sizeof(athost_wl_status_info_t)); + dhd->wlfc_state = NULL; + dhd->proptxstatus_mode = hostreorder ? + WLFC_ONLY_AMPDU_HOSTREORDER : WLFC_FCMODE_NONE; + + DHD_ERROR(("%s: wlfc_mode=0x%x, tlv=%d\n", __FUNCTION__, dhd->wlfc_mode, tlv)); + + dhd_os_wlfc_unblock(dhd); + + if (dhd->plat_deinit) + dhd->plat_deinit((void *)dhd); + return BCME_OK; +} /* dhd_wlfc_init */ + +/** + * Called on an interface event (WLC_E_IF) indicated by firmware + * @param[in] dhdp Pointer to public DHD structure + * @param[in] action eg eWLFC_MAC_ENTRY_ACTION_UPDATE or eWLFC_MAC_ENTRY_ACTION_ADD + */ +int dhd_wlfc_interface_event(dhd_pub_t *dhdp, uint8 action, uint8 ifid, uint8 iftype, uint8* ea) +{ + int rc; + + if (dhdp == NULL) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhdp); + + if (!dhdp->wlfc_state || (dhdp->proptxstatus_mode == WLFC_FCMODE_NONE)) { + dhd_os_wlfc_unblock(dhdp); + return WLFC_UNSUPPORTED; + } + + rc = _dhd_wlfc_interface_entry_update(dhdp->wlfc_state, action, ifid, iftype, ea); + + dhd_os_wlfc_unblock(dhdp); + return rc; +} + +/** Called eg on receiving a WLC_E_FIFO_CREDIT_MAP event from the dongle */ +int dhd_wlfc_FIFOcreditmap_event(dhd_pub_t *dhdp, uint8* event_data) +{ + int rc; + + if (dhdp == NULL) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhdp); + + if (!dhdp->wlfc_state || (dhdp->proptxstatus_mode == WLFC_FCMODE_NONE)) { + dhd_os_wlfc_unblock(dhdp); + return WLFC_UNSUPPORTED; + } + + rc = _dhd_wlfc_FIFOcreditmap_update(dhdp->wlfc_state, event_data); + + dhd_os_wlfc_unblock(dhdp); + + return rc; +} +#ifdef LIMIT_BORROW +int dhd_wlfc_disable_credit_borrow_event(dhd_pub_t *dhdp, uint8* event_data) +{ + if (dhdp == NULL || event_data == NULL) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + dhd_os_wlfc_block(dhdp); + dhdp->wlfc_borrow_allowed = (bool)(*(uint32 *)event_data); + dhd_os_wlfc_unblock(dhdp); + + return BCME_OK; +} +#endif /* LIMIT_BORROW */ + +/** + * Called eg on receiving a WLC_E_BCMC_CREDIT_SUPPORT event from the dongle (broadcast/multicast + * specific) + */ +int dhd_wlfc_BCMCCredit_support_event(dhd_pub_t *dhdp) +{ + int rc; + + if (dhdp == NULL) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhdp); + + if (!dhdp->wlfc_state || (dhdp->proptxstatus_mode == WLFC_FCMODE_NONE)) { + dhd_os_wlfc_unblock(dhdp); + return WLFC_UNSUPPORTED; + } + + rc = _dhd_wlfc_BCMCCredit_support_update(dhdp->wlfc_state); + + dhd_os_wlfc_unblock(dhdp); + return rc; +} + +/** debug specific function */ +int +dhd_wlfc_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf) +{ + int i; + uint8* ea; + athost_wl_status_info_t* wlfc; + wlfc_hanger_t* h; + wlfc_mac_descriptor_t* mac_table; + wlfc_mac_descriptor_t* interfaces; + char* iftypes[] = {"STA", "AP", "WDS", "p2pGO", "p2pCL"}; + + if (!dhdp || !strbuf) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhdp); + + if (!dhdp->wlfc_state || (dhdp->proptxstatus_mode == WLFC_FCMODE_NONE)) { + dhd_os_wlfc_unblock(dhdp); + return WLFC_UNSUPPORTED; + } + + wlfc = (athost_wl_status_info_t*)dhdp->wlfc_state; + + h = (wlfc_hanger_t*)wlfc->hanger; + if (h == NULL) { + bcm_bprintf(strbuf, "wlfc-hanger not initialized yet\n"); + } + + mac_table = wlfc->destination_entries.nodes; + interfaces = wlfc->destination_entries.interfaces; + bcm_bprintf(strbuf, "---- wlfc stats ----\n"); + + if (!WLFC_GET_AFQ(dhdp->wlfc_mode)) { + h = (wlfc_hanger_t*)wlfc->hanger; + if (h == NULL) { + bcm_bprintf(strbuf, "wlfc-hanger not initialized yet\n"); + } else { + bcm_bprintf(strbuf, "wlfc hanger (pushed,popped,f_push," + "f_pop,f_slot, pending) = (%d,%d,%d,%d,%d,%d)\n", + h->pushed, + h->popped, + h->failed_to_push, + h->failed_to_pop, + h->failed_slotfind, + (h->pushed - h->popped)); + } + } + + bcm_bprintf(strbuf, "wlfc fail(tlv,credit_rqst,mac_update,psmode_update), " + "(dq_full,rollback_fail) = (%d,%d,%d,%d), (%d,%d)\n", + wlfc->stats.tlv_parse_failed, + wlfc->stats.credit_request_failed, + wlfc->stats.mac_update_failed, + wlfc->stats.psmode_update_failed, + wlfc->stats.delayq_full_error, + wlfc->stats.rollback_failed); + + bcm_bprintf(strbuf, "PKTS (init_credit,credit,sent,drop_d,drop_s,outoforder) " + "(AC0[%d,%d,%d,%d,%d,%d],AC1[%d,%d,%d,%d,%d,%d],AC2[%d,%d,%d,%d,%d,%d]," + "AC3[%d,%d,%d,%d,%d,%d],BC_MC[%d,%d,%d,%d,%d,%d])\n", + wlfc->Init_FIFO_credit[0], wlfc->FIFO_credit[0], wlfc->stats.send_pkts[0], + wlfc->stats.drop_pkts[0], wlfc->stats.drop_pkts[1], wlfc->stats.ooo_pkts[0], + wlfc->Init_FIFO_credit[1], wlfc->FIFO_credit[1], wlfc->stats.send_pkts[1], + wlfc->stats.drop_pkts[2], wlfc->stats.drop_pkts[3], wlfc->stats.ooo_pkts[1], + wlfc->Init_FIFO_credit[2], wlfc->FIFO_credit[2], wlfc->stats.send_pkts[2], + wlfc->stats.drop_pkts[4], wlfc->stats.drop_pkts[5], wlfc->stats.ooo_pkts[2], + wlfc->Init_FIFO_credit[3], wlfc->FIFO_credit[3], wlfc->stats.send_pkts[3], + wlfc->stats.drop_pkts[6], wlfc->stats.drop_pkts[7], wlfc->stats.ooo_pkts[3], + wlfc->Init_FIFO_credit[4], wlfc->FIFO_credit[4], wlfc->stats.send_pkts[4], + wlfc->stats.drop_pkts[8], wlfc->stats.drop_pkts[9], wlfc->stats.ooo_pkts[4]); + + bcm_bprintf(strbuf, "\n"); + for (i = 0; i < WLFC_MAX_IFNUM; i++) { + if (interfaces[i].occupied) { + char* iftype_desc; + + if (interfaces[i].iftype > WLC_E_IF_ROLE_P2P_CLIENT) + iftype_desc = "hostif_flow_state[i] == OFF) + ? " OFF":" ON")); + + bcm_bprintf(strbuf, "INTERFACE[%d].PSQ(len,state,credit)," + "(trans,supp_trans,onbus)" + "= (%d,%s,%d),(%d,%d,%d)\n", + i, + interfaces[i].psq.n_pkts_tot, + ((interfaces[i].state == + WLFC_STATE_OPEN) ? "OPEN":"CLOSE"), + interfaces[i].requested_credit, + interfaces[i].transit_count, + interfaces[i].suppr_transit_count, + interfaces[i].onbus_pkts_count); + + bcm_bprintf(strbuf, "INTERFACE[%d].PSQ" + "(delay0,sup0,afq0),(delay1,sup1,afq1),(delay2,sup2,afq2)," + "(delay3,sup3,afq3),(delay4,sup4,afq4) = (%d,%d,%d)," + "(%d,%d,%d),(%d,%d,%d),(%d,%d,%d),(%d,%d,%d)\n", + i, + interfaces[i].psq.q[0].n_pkts, + interfaces[i].psq.q[1].n_pkts, + interfaces[i].afq.q[0].n_pkts, + interfaces[i].psq.q[2].n_pkts, + interfaces[i].psq.q[3].n_pkts, + interfaces[i].afq.q[1].n_pkts, + interfaces[i].psq.q[4].n_pkts, + interfaces[i].psq.q[5].n_pkts, + interfaces[i].afq.q[2].n_pkts, + interfaces[i].psq.q[6].n_pkts, + interfaces[i].psq.q[7].n_pkts, + interfaces[i].afq.q[3].n_pkts, + interfaces[i].psq.q[8].n_pkts, + interfaces[i].psq.q[9].n_pkts, + interfaces[i].afq.q[4].n_pkts); + } + } + + bcm_bprintf(strbuf, "\n"); + for (i = 0; i < WLFC_MAC_DESC_TABLE_SIZE; i++) { + if (mac_table[i].occupied) { + ea = mac_table[i].ea; + bcm_bprintf(strbuf, "MAC_table[%d].ea = " + "["MACDBG"], if:%d \n", i, + MAC2STRDBG(ea), mac_table[i].interface_id); + + bcm_bprintf(strbuf, "MAC_table[%d].PSQ(len,state,credit)," + "(trans,supp_trans,onbus)" + "= (%d,%s,%d),(%d,%d,%d)\n", + i, + mac_table[i].psq.n_pkts_tot, + ((mac_table[i].state == + WLFC_STATE_OPEN) ? " OPEN":"CLOSE"), + mac_table[i].requested_credit, + mac_table[i].transit_count, + mac_table[i].suppr_transit_count, + mac_table[i].onbus_pkts_count); +#ifdef PROP_TXSTATUS_DEBUG + bcm_bprintf(strbuf, "MAC_table[%d]: (opened, closed) = (%d, %d)\n", + i, mac_table[i].opened_ct, mac_table[i].closed_ct); +#endif // endif + bcm_bprintf(strbuf, "MAC_table[%d].PSQ" + "(delay0,sup0,afq0),(delay1,sup1,afq1),(delay2,sup2,afq2)," + "(delay3,sup3,afq3),(delay4,sup4,afq4) =(%d,%d,%d)," + "(%d,%d,%d),(%d,%d,%d),(%d,%d,%d),(%d,%d,%d)\n", + i, + mac_table[i].psq.q[0].n_pkts, + mac_table[i].psq.q[1].n_pkts, + mac_table[i].afq.q[0].n_pkts, + mac_table[i].psq.q[2].n_pkts, + mac_table[i].psq.q[3].n_pkts, + mac_table[i].afq.q[1].n_pkts, + mac_table[i].psq.q[4].n_pkts, + mac_table[i].psq.q[5].n_pkts, + mac_table[i].afq.q[2].n_pkts, + mac_table[i].psq.q[6].n_pkts, + mac_table[i].psq.q[7].n_pkts, + mac_table[i].afq.q[3].n_pkts, + mac_table[i].psq.q[8].n_pkts, + mac_table[i].psq.q[9].n_pkts, + mac_table[i].afq.q[4].n_pkts); + + } + } + +#ifdef PROP_TXSTATUS_DEBUG + { + int avg; + int moving_avg = 0; + int moving_samples; + + if (wlfc->stats.latency_sample_count) { + moving_samples = sizeof(wlfc->stats.deltas)/sizeof(uint32); + + for (i = 0; i < moving_samples; i++) + moving_avg += wlfc->stats.deltas[i]; + moving_avg /= moving_samples; + + avg = (100 * wlfc->stats.total_status_latency) / + wlfc->stats.latency_sample_count; + bcm_bprintf(strbuf, "txstatus latency (average, last, moving[%d]) = " + "(%d.%d, %03d, %03d)\n", + moving_samples, avg/100, (avg - (avg/100)*100), + wlfc->stats.latency_most_recent, + moving_avg); + } + } + + bcm_bprintf(strbuf, "wlfc- fifo[0-5] credit stats: sent = (%d,%d,%d,%d,%d,%d), " + "back = (%d,%d,%d,%d,%d,%d)\n", + wlfc->stats.fifo_credits_sent[0], + wlfc->stats.fifo_credits_sent[1], + wlfc->stats.fifo_credits_sent[2], + wlfc->stats.fifo_credits_sent[3], + wlfc->stats.fifo_credits_sent[4], + wlfc->stats.fifo_credits_sent[5], + + wlfc->stats.fifo_credits_back[0], + wlfc->stats.fifo_credits_back[1], + wlfc->stats.fifo_credits_back[2], + wlfc->stats.fifo_credits_back[3], + wlfc->stats.fifo_credits_back[4], + wlfc->stats.fifo_credits_back[5]); + { + uint32 fifo_cr_sent = 0; + uint32 fifo_cr_acked = 0; + uint32 request_cr_sent = 0; + uint32 request_cr_ack = 0; + uint32 bc_mc_cr_ack = 0; + + for (i = 0; i < sizeof(wlfc->stats.fifo_credits_sent)/sizeof(uint32); i++) { + fifo_cr_sent += wlfc->stats.fifo_credits_sent[i]; + } + + for (i = 0; i < sizeof(wlfc->stats.fifo_credits_back)/sizeof(uint32); i++) { + fifo_cr_acked += wlfc->stats.fifo_credits_back[i]; + } + + for (i = 0; i < WLFC_MAC_DESC_TABLE_SIZE; i++) { + if (wlfc->destination_entries.nodes[i].occupied) { + request_cr_sent += + wlfc->destination_entries.nodes[i].dstncredit_sent_packets; + } + } + for (i = 0; i < WLFC_MAX_IFNUM; i++) { + if (wlfc->destination_entries.interfaces[i].occupied) { + request_cr_sent += + wlfc->destination_entries.interfaces[i].dstncredit_sent_packets; + } + } + for (i = 0; i < WLFC_MAC_DESC_TABLE_SIZE; i++) { + if (wlfc->destination_entries.nodes[i].occupied) { + request_cr_ack += + wlfc->destination_entries.nodes[i].dstncredit_acks; + } + } + for (i = 0; i < WLFC_MAX_IFNUM; i++) { + if (wlfc->destination_entries.interfaces[i].occupied) { + request_cr_ack += + wlfc->destination_entries.interfaces[i].dstncredit_acks; + } + } + bcm_bprintf(strbuf, "wlfc- (sent, status) => pq(%d,%d), vq(%d,%d)," + "other:%d, bc_mc:%d, signal-only, (sent,freed): (%d,%d)", + fifo_cr_sent, fifo_cr_acked, + request_cr_sent, request_cr_ack, + wlfc->destination_entries.other.dstncredit_acks, + bc_mc_cr_ack, + wlfc->stats.signal_only_pkts_sent, wlfc->stats.signal_only_pkts_freed); + } +#endif /* PROP_TXSTATUS_DEBUG */ + bcm_bprintf(strbuf, "\n"); + bcm_bprintf(strbuf, "wlfc- pkt((in,2bus,txstats,hdrpull,out)," + "(dropped,hdr_only,wlc_tossed,wlc_dropped,wlc_exptime)" + "(freed,free_err,rollback)) = " + "((%d,%d,%d,%d,%d),(%d,%d,%d,%d,%d),(%d,%d,%d))\n", + wlfc->stats.pktin, + wlfc->stats.pkt2bus, + wlfc->stats.txstatus_in, + wlfc->stats.dhd_hdrpulls, + wlfc->stats.pktout, + + wlfc->stats.pktdropped, + wlfc->stats.wlfc_header_only_pkt, + wlfc->stats.wlc_tossed_pkts, + wlfc->stats.pkt_dropped, + wlfc->stats.pkt_exptime, + + wlfc->stats.pkt_freed, + wlfc->stats.pkt_free_err, wlfc->stats.rollback); + + bcm_bprintf(strbuf, "wlfc- suppress((d11,wlc,err),enq(d11,wl,hq,mac?),retx(d11,wlc,hq)) = " + "((%d,%d,%d),(%d,%d,%d,%d),(%d,%d,%d))\n", + wlfc->stats.d11_suppress, + wlfc->stats.wl_suppress, + wlfc->stats.bad_suppress, + + wlfc->stats.psq_d11sup_enq, + wlfc->stats.psq_wlsup_enq, + wlfc->stats.psq_hostq_enq, + wlfc->stats.mac_handle_notfound, + + wlfc->stats.psq_d11sup_retx, + wlfc->stats.psq_wlsup_retx, + wlfc->stats.psq_hostq_retx); + + bcm_bprintf(strbuf, "wlfc- cleanup(txq,psq,fw) = (%d,%d,%d)\n", + wlfc->stats.cleanup_txq_cnt, + wlfc->stats.cleanup_psq_cnt, + wlfc->stats.cleanup_fw_cnt); + + bcm_bprintf(strbuf, "wlfc- generic error: %d\n", wlfc->stats.generic_error); + + for (i = 0; i < WLFC_MAX_IFNUM; i++) { + bcm_bprintf(strbuf, "wlfc- if[%d], pkt_cnt_in_q/AC[0-4] = (%d,%d,%d,%d,%d)\n", i, + wlfc->pkt_cnt_in_q[i][0], + wlfc->pkt_cnt_in_q[i][1], + wlfc->pkt_cnt_in_q[i][2], + wlfc->pkt_cnt_in_q[i][3], + wlfc->pkt_cnt_in_q[i][4]); + } + bcm_bprintf(strbuf, "\n"); + + dhd_os_wlfc_unblock(dhdp); + return BCME_OK; +} /* dhd_wlfc_dump */ + +int dhd_wlfc_clear_counts(dhd_pub_t *dhd) +{ + athost_wl_status_info_t* wlfc; + wlfc_hanger_t* hanger; + + if (dhd == NULL) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhd); + + if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) { + dhd_os_wlfc_unblock(dhd); + return WLFC_UNSUPPORTED; + } + + wlfc = (athost_wl_status_info_t*)dhd->wlfc_state; + + memset(&wlfc->stats, 0, sizeof(athost_wl_stat_counters_t)); + + if (!WLFC_GET_AFQ(dhd->wlfc_mode)) { + hanger = (wlfc_hanger_t*)wlfc->hanger; + + hanger->pushed = 0; + hanger->popped = 0; + hanger->failed_slotfind = 0; + hanger->failed_to_pop = 0; + hanger->failed_to_push = 0; + } + + dhd_os_wlfc_unblock(dhd); + + return BCME_OK; +} + +/** returns TRUE if flow control is enabled */ +int dhd_wlfc_get_enable(dhd_pub_t *dhd, bool *val) +{ + if (!dhd || !val) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhd); + + *val = dhd->wlfc_enabled; + + dhd_os_wlfc_unblock(dhd); + + return BCME_OK; +} + +/** Called via an IOVAR */ +int dhd_wlfc_get_mode(dhd_pub_t *dhd, int *val) +{ + if (!dhd || !val) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhd); + + *val = dhd->wlfc_state ? dhd->proptxstatus_mode : 0; + + dhd_os_wlfc_unblock(dhd); + + return BCME_OK; +} + +/** Called via an IOVAR */ +int dhd_wlfc_set_mode(dhd_pub_t *dhd, int val) +{ + if (!dhd) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhd); + + if (dhd->wlfc_state) { + dhd->proptxstatus_mode = val & 0xff; + } + + dhd_os_wlfc_unblock(dhd); + + return BCME_OK; +} + +/** Called when rx frame is received from the dongle */ +bool dhd_wlfc_is_header_only_pkt(dhd_pub_t * dhd, void *pktbuf) +{ + athost_wl_status_info_t* wlfc; + bool rc = FALSE; + + if (dhd == NULL) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return FALSE; + } + + dhd_os_wlfc_block(dhd); + + if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) { + dhd_os_wlfc_unblock(dhd); + return FALSE; + } + + wlfc = (athost_wl_status_info_t*)dhd->wlfc_state; + + if (PKTLEN(wlfc->osh, pktbuf) == 0) { + wlfc->stats.wlfc_header_only_pkt++; + rc = TRUE; + } + + dhd_os_wlfc_unblock(dhd); + + return rc; +} + +int dhd_wlfc_flowcontrol(dhd_pub_t *dhdp, bool state, bool bAcquireLock) +{ + if (dhdp == NULL) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + if (bAcquireLock) { + dhd_os_wlfc_block(dhdp); + } + + if (!dhdp->wlfc_state || (dhdp->proptxstatus_mode == WLFC_FCMODE_NONE) || + dhdp->proptxstatus_module_ignore) { + if (bAcquireLock) { + dhd_os_wlfc_unblock(dhdp); + } + return WLFC_UNSUPPORTED; + } + + if (state != dhdp->proptxstatus_txoff) { + dhdp->proptxstatus_txoff = state; + } + + if (bAcquireLock) { + dhd_os_wlfc_unblock(dhdp); + } + + return BCME_OK; +} + +/** Called when eg an rx frame is received from the dongle */ +int dhd_wlfc_save_rxpath_ac_time(dhd_pub_t * dhd, uint8 prio) +{ + athost_wl_status_info_t* wlfc; + int rx_path_ac = -1; + + if ((dhd == NULL) || (prio >= NUMPRIO)) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhd); + + if (!dhd->wlfc_rxpkt_chk) { + dhd_os_wlfc_unblock(dhd); + return BCME_OK; + } + + if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) { + dhd_os_wlfc_unblock(dhd); + return WLFC_UNSUPPORTED; + } + + wlfc = (athost_wl_status_info_t*)dhd->wlfc_state; + + rx_path_ac = prio2fifo[prio]; + wlfc->rx_timestamp[rx_path_ac] = OSL_SYSUPTIME(); + + dhd_os_wlfc_unblock(dhd); + + return BCME_OK; +} + +/** called via an IOVAR */ +int dhd_wlfc_get_module_ignore(dhd_pub_t *dhd, int *val) +{ + if (!dhd || !val) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhd); + + *val = dhd->proptxstatus_module_ignore; + + dhd_os_wlfc_unblock(dhd); + + return BCME_OK; +} + +/** called via an IOVAR */ +int dhd_wlfc_set_module_ignore(dhd_pub_t *dhd, int val) +{ + uint32 tlv = 0; + bool bChanged = FALSE; + + if (!dhd) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhd); + + if ((bool)val != dhd->proptxstatus_module_ignore) { + dhd->proptxstatus_module_ignore = (val != 0); + /* force txstatus_ignore sync with proptxstatus_module_ignore */ + dhd->proptxstatus_txstatus_ignore = dhd->proptxstatus_module_ignore; + if (FALSE == dhd->proptxstatus_module_ignore) { + tlv = WLFC_FLAGS_RSSI_SIGNALS | + WLFC_FLAGS_XONXOFF_SIGNALS | + WLFC_FLAGS_CREDIT_STATUS_SIGNALS | + WLFC_FLAGS_HOST_PROPTXSTATUS_ACTIVE; + } + /* always enable host reorder */ + tlv |= WLFC_FLAGS_HOST_RXRERODER_ACTIVE; + bChanged = TRUE; + } + + dhd_os_wlfc_unblock(dhd); + + if (bChanged) { + /* select enable proptxtstatus signaling */ + if (dhd_wl_ioctl_set_intiovar(dhd, "tlv", tlv, WLC_SET_VAR, TRUE, 0)) { + DHD_ERROR(("%s: failed to set bdcv2 tlv signaling to 0x%x\n", + __FUNCTION__, tlv)); + } else { + DHD_ERROR(("%s: successfully set bdcv2 tlv signaling to 0x%x\n", + __FUNCTION__, tlv)); + } + } + +#if defined(DHD_WLFC_THREAD) + _dhd_wlfc_thread_wakeup(dhd); +#endif /* defined(DHD_WLFC_THREAD) */ + + return BCME_OK; +} + +/** called via an IOVAR */ +int dhd_wlfc_get_credit_ignore(dhd_pub_t *dhd, int *val) +{ + if (!dhd || !val) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhd); + + *val = dhd->proptxstatus_credit_ignore; + + dhd_os_wlfc_unblock(dhd); + + return BCME_OK; +} + +/** called via an IOVAR */ +int dhd_wlfc_set_credit_ignore(dhd_pub_t *dhd, int val) +{ + if (!dhd) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhd); + + dhd->proptxstatus_credit_ignore = (val != 0); + + dhd_os_wlfc_unblock(dhd); + + return BCME_OK; +} + +/** called via an IOVAR */ +int dhd_wlfc_get_txstatus_ignore(dhd_pub_t *dhd, int *val) +{ + if (!dhd || !val) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhd); + + *val = dhd->proptxstatus_txstatus_ignore; + + dhd_os_wlfc_unblock(dhd); + + return BCME_OK; +} + +/** called via an IOVAR */ +int dhd_wlfc_set_txstatus_ignore(dhd_pub_t *dhd, int val) +{ + if (!dhd) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhd); + + dhd->proptxstatus_txstatus_ignore = (val != 0); + + dhd_os_wlfc_unblock(dhd); + + return BCME_OK; +} + +/** called via an IOVAR */ +int dhd_wlfc_get_rxpkt_chk(dhd_pub_t *dhd, int *val) +{ + if (!dhd || !val) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhd); + + *val = dhd->wlfc_rxpkt_chk; + + dhd_os_wlfc_unblock(dhd); + + return BCME_OK; +} + +/** called via an IOVAR */ +int dhd_wlfc_set_rxpkt_chk(dhd_pub_t *dhd, int val) +{ + if (!dhd) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhd); + + dhd->wlfc_rxpkt_chk = (val != 0); + + dhd_os_wlfc_unblock(dhd); + + return BCME_OK; +} + +#endif /* PROP_TXSTATUS */ diff --git a/bcmdhd.100.10.315.x/dhd_wlfc.h b/bcmdhd.100.10.315.x/dhd_wlfc.h new file mode 100644 index 0000000..918de38 --- /dev/null +++ b/bcmdhd.100.10.315.x/dhd_wlfc.h @@ -0,0 +1,562 @@ +/* + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_wlfc.h 690477 2017-03-16 10:17:17Z $ + * + */ +#ifndef __wlfc_host_driver_definitions_h__ +#define __wlfc_host_driver_definitions_h__ + +/* #define OOO_DEBUG */ + +#define KERNEL_THREAD_RETURN_TYPE int + +typedef int (*f_commitpkt_t)(void* ctx, void* p); +typedef bool (*f_processpkt_t)(void* p, void* arg); + +#define WLFC_UNSUPPORTED -9999 + +#define WLFC_NO_TRAFFIC -1 +#define WLFC_MULTI_TRAFFIC 0 + +#define BUS_RETRIES 1 /* # of retries before aborting a bus tx operation */ + +/** 16 bits will provide an absolute max of 65536 slots */ +#define WLFC_HANGER_MAXITEMS 3072 + +#define WLFC_HANGER_ITEM_STATE_FREE 1 +#define WLFC_HANGER_ITEM_STATE_INUSE 2 +#define WLFC_HANGER_ITEM_STATE_INUSE_SUPPRESSED 3 +#define WLFC_HANGER_ITEM_STATE_FLUSHED 4 + +#define WLFC_HANGER_PKT_STATE_TXSTATUS 1 +#define WLFC_HANGER_PKT_STATE_BUSRETURNED 2 +#define WLFC_HANGER_PKT_STATE_COMPLETE \ + (WLFC_HANGER_PKT_STATE_TXSTATUS | WLFC_HANGER_PKT_STATE_BUSRETURNED) + +typedef enum { + Q_TYPE_PSQ, /**< Power Save Queue, contains both delayed and suppressed packets */ + Q_TYPE_AFQ /**< At Firmware Queue */ +} q_type_t; + +typedef enum ewlfc_packet_state { + eWLFC_PKTTYPE_NEW, /**< unused in the code (Jan 2015) */ + eWLFC_PKTTYPE_DELAYED, /**< packet did not enter wlfc yet */ + eWLFC_PKTTYPE_SUPPRESSED, /**< packet entered wlfc and was suppressed by the dongle */ + eWLFC_PKTTYPE_MAX +} ewlfc_packet_state_t; + +typedef enum ewlfc_mac_entry_action { + eWLFC_MAC_ENTRY_ACTION_ADD, + eWLFC_MAC_ENTRY_ACTION_DEL, + eWLFC_MAC_ENTRY_ACTION_UPDATE, + eWLFC_MAC_ENTRY_ACTION_MAX +} ewlfc_mac_entry_action_t; + +typedef struct wlfc_hanger_item { + uint8 state; + uint8 gen; + uint8 pkt_state; /**< bitmask containing eg WLFC_HANGER_PKT_STATE_TXCOMPLETE */ + uint8 pkt_txstatus; + uint32 identifier; + void* pkt; +#ifdef PROP_TXSTATUS_DEBUG + uint32 push_time; +#endif // endif + struct wlfc_hanger_item *next; +} wlfc_hanger_item_t; + +/** hanger contains packets that have been posted by the dhd to the dongle and are expected back */ +typedef struct wlfc_hanger { + int max_items; + uint32 pushed; + uint32 popped; + uint32 failed_to_push; + uint32 failed_to_pop; + uint32 failed_slotfind; + uint32 slot_pos; + wlfc_hanger_item_t items[1]; +} wlfc_hanger_t; + +#define WLFC_HANGER_SIZE(n) ((sizeof(wlfc_hanger_t) - \ + sizeof(wlfc_hanger_item_t)) + ((n)*sizeof(wlfc_hanger_item_t))) + +#define WLFC_STATE_OPEN 1 /**< remote MAC is able to receive packets */ +#define WLFC_STATE_CLOSE 2 /**< remote MAC is in power save mode */ + +#define WLFC_PSQ_PREC_COUNT ((AC_COUNT + 1) * 2) /**< 2 for each AC traffic and bc/mc */ +#define WLFC_AFQ_PREC_COUNT (AC_COUNT + 1) + +#define WLFC_PSQ_LEN (4096 * 8) + +#ifdef BCMDBUS +#define WLFC_FLOWCONTROL_HIWATER 512 +#define WLFC_FLOWCONTROL_LOWATER (WLFC_FLOWCONTROL_HIWATER / 4) +#else +#define WLFC_FLOWCONTROL_HIWATER ((4096 * 8) - 256) +#define WLFC_FLOWCONTROL_LOWATER 256 +#endif + +#if (WLFC_FLOWCONTROL_HIWATER >= (WLFC_PSQ_LEN - 256)) +#undef WLFC_FLOWCONTROL_HIWATER +#define WLFC_FLOWCONTROL_HIWATER (WLFC_PSQ_LEN - 256) +#undef WLFC_FLOWCONTROL_LOWATER +#define WLFC_FLOWCONTROL_LOWATER (WLFC_FLOWCONTROL_HIWATER / 4) +#endif // endif + +#define WLFC_LOG_BUF_SIZE (1024*1024) + +/** Properties related to a remote MAC entity */ +typedef struct wlfc_mac_descriptor { + uint8 occupied; /**< if 0, this descriptor is unused and thus can be (re)used */ + uint8 interface_id; + uint8 iftype; /**< eg WLC_E_IF_ROLE_STA */ + uint8 state; /**< eg WLFC_STATE_OPEN */ + uint8 ac_bitmap; /**< automatic power save delivery (APSD) */ + uint8 requested_credit; + uint8 requested_packet; /**< unit: [number of packets] */ + uint8 ea[ETHER_ADDR_LEN]; + + /** maintain (MAC,AC) based seq count for packets going to the device. As well as bc/mc. */ + uint8 seq[AC_COUNT + 1]; + uint8 generation; /**< toggles between 0 and 1 */ + struct pktq psq; /**< contains both 'delayed' and 'suppressed' packets */ + /** packets at firmware queue */ + struct pktq afq; + /** The AC pending bitmap that was reported to the fw at last change */ + uint8 traffic_lastreported_bmp; + /** The new AC pending bitmap */ + uint8 traffic_pending_bmp; + /** 1= send on next opportunity */ + uint8 send_tim_signal; + uint8 mac_handle; /**< mac handles are assigned by the dongle */ + /** Number of packets at dongle for this entry. */ + int transit_count; + /** Number of suppression to wait before evict from delayQ */ + int suppr_transit_count; + /** pkt sent to bus but no bus TX complete yet */ + int onbus_pkts_count; + /** flag. TRUE when remote MAC is in suppressed state */ + uint8 suppressed; + +#ifdef PROP_TXSTATUS_DEBUG + uint32 dstncredit_sent_packets; + uint32 dstncredit_acks; + uint32 opened_ct; + uint32 closed_ct; +#endif // endif + struct wlfc_mac_descriptor* prev; + struct wlfc_mac_descriptor* next; +} wlfc_mac_descriptor_t; + +/** A 'commit' is the hand over of a packet from the host OS layer to the layer below (eg DBUS) */ +typedef struct dhd_wlfc_commit_info { + uint8 needs_hdr; + uint8 ac_fifo_credit_spent; + ewlfc_packet_state_t pkt_type; + wlfc_mac_descriptor_t* mac_entry; + void* p; +} dhd_wlfc_commit_info_t; + +#define WLFC_DECR_SEQCOUNT(entry, prec) do { if (entry->seq[(prec)] == 0) {\ + entry->seq[prec] = 0xff; } else entry->seq[prec]--;} while (0) + +#define WLFC_INCR_SEQCOUNT(entry, prec) entry->seq[(prec)]++ +#define WLFC_SEQCOUNT(entry, prec) entry->seq[(prec)] + +typedef struct athost_wl_stat_counters { + uint32 pktin; + uint32 pktout; + uint32 pkt2bus; + uint32 pktdropped; + uint32 tlv_parse_failed; + uint32 rollback; + uint32 rollback_failed; + uint32 delayq_full_error; + uint32 credit_request_failed; + uint32 packet_request_failed; + uint32 mac_update_failed; + uint32 psmode_update_failed; + uint32 interface_update_failed; + uint32 wlfc_header_only_pkt; + uint32 txstatus_in; + uint32 d11_suppress; + uint32 wl_suppress; + uint32 bad_suppress; + uint32 pkt_dropped; + uint32 pkt_exptime; + uint32 pkt_freed; + uint32 pkt_free_err; + uint32 psq_wlsup_retx; + uint32 psq_wlsup_enq; + uint32 psq_d11sup_retx; + uint32 psq_d11sup_enq; + uint32 psq_hostq_retx; + uint32 psq_hostq_enq; + uint32 mac_handle_notfound; + uint32 wlc_tossed_pkts; + uint32 dhd_hdrpulls; + uint32 generic_error; + /* an extra one for bc/mc traffic */ + uint32 send_pkts[AC_COUNT + 1]; + uint32 drop_pkts[WLFC_PSQ_PREC_COUNT]; + uint32 ooo_pkts[AC_COUNT + 1]; +#ifdef PROP_TXSTATUS_DEBUG + /** all pkt2bus -> txstatus latency accumulated */ + uint32 latency_sample_count; + uint32 total_status_latency; + uint32 latency_most_recent; + int idx_delta; + uint32 deltas[10]; + uint32 fifo_credits_sent[6]; + uint32 fifo_credits_back[6]; + uint32 dropped_qfull[6]; + uint32 signal_only_pkts_sent; + uint32 signal_only_pkts_freed; +#endif // endif + uint32 cleanup_txq_cnt; + uint32 cleanup_psq_cnt; + uint32 cleanup_fw_cnt; +} athost_wl_stat_counters_t; + +#ifdef PROP_TXSTATUS_DEBUG +#define WLFC_HOST_FIFO_CREDIT_INC_SENTCTRS(ctx, ac) do { \ + (ctx)->stats.fifo_credits_sent[(ac)]++;} while (0) +#define WLFC_HOST_FIFO_CREDIT_INC_BACKCTRS(ctx, ac) do { \ + (ctx)->stats.fifo_credits_back[(ac)]++;} while (0) +#define WLFC_HOST_FIFO_DROPPEDCTR_INC(ctx, ac) do { \ + (ctx)->stats.dropped_qfull[(ac)]++;} while (0) +#else +#define WLFC_HOST_FIFO_CREDIT_INC_SENTCTRS(ctx, ac) do {} while (0) +#define WLFC_HOST_FIFO_CREDIT_INC_BACKCTRS(ctx, ac) do {} while (0) +#define WLFC_HOST_FIFO_DROPPEDCTR_INC(ctx, ac) do {} while (0) +#endif // endif +#define WLFC_PACKET_BOUND 10 +#define WLFC_FCMODE_NONE 0 +#define WLFC_FCMODE_IMPLIED_CREDIT 1 +#define WLFC_FCMODE_EXPLICIT_CREDIT 2 +#define WLFC_ONLY_AMPDU_HOSTREORDER 3 + +/** Reserved credits ratio when borrowed by hihger priority */ +#define WLFC_BORROW_LIMIT_RATIO 4 + +/** How long to defer borrowing in milliseconds */ +#define WLFC_BORROW_DEFER_PERIOD_MS 100 + +/** How long to defer flow control in milliseconds */ +#define WLFC_FC_DEFER_PERIOD_MS 200 + +/** How long to detect occurance per AC in miliseconds */ +#define WLFC_RX_DETECTION_THRESHOLD_MS 100 + +/** Mask to represent available ACs (note: BC/MC is ignored) */ +#define WLFC_AC_MASK 0xF + +/** flow control specific information, only 1 instance during driver lifetime */ +typedef struct athost_wl_status_info { + uint8 last_seqid_to_wlc; + + /** OSL handle */ + osl_t *osh; + /** dhd public struct pointer */ + void *dhdp; + + f_commitpkt_t fcommit; + void* commit_ctx; + + /** statistics */ + athost_wl_stat_counters_t stats; + + /** incremented on eg receiving a credit map event from the dongle */ + int Init_FIFO_credit[AC_COUNT + 2]; + /** the additional ones are for bc/mc and ATIM FIFO */ + int FIFO_credit[AC_COUNT + 2]; + /** Credit borrow counts for each FIFO from each of the other FIFOs */ + int credits_borrowed[AC_COUNT + 2][AC_COUNT + 2]; + + /** packet hanger and MAC->handle lookup table */ + void *hanger; + + struct { + /** table for individual nodes */ + wlfc_mac_descriptor_t nodes[WLFC_MAC_DESC_TABLE_SIZE]; + /** table for interfaces */ + wlfc_mac_descriptor_t interfaces[WLFC_MAX_IFNUM]; + /* OS may send packets to unknown (unassociated) destinations */ + /** A place holder for bc/mc and packets to unknown destinations */ + wlfc_mac_descriptor_t other; + } destination_entries; + + wlfc_mac_descriptor_t *active_entry_head; /**< a chain of MAC descriptors */ + int active_entry_count; + + wlfc_mac_descriptor_t *requested_entry[WLFC_MAC_DESC_TABLE_SIZE]; + int requested_entry_count; + + /* pkt counts for each interface and ac */ + int pkt_cnt_in_q[WLFC_MAX_IFNUM][AC_COUNT+1]; + int pkt_cnt_per_ac[AC_COUNT+1]; + int pkt_cnt_in_drv[WLFC_MAX_IFNUM][AC_COUNT+1]; + int pkt_cnt_in_psq; + uint8 allow_fc; /**< Boolean */ + uint32 fc_defer_timestamp; + uint32 rx_timestamp[AC_COUNT+1]; + + /** ON/OFF state for flow control to the host network interface */ + uint8 hostif_flow_state[WLFC_MAX_IFNUM]; + uint8 host_ifidx; + + /** to flow control an OS interface */ + uint8 toggle_host_if; + + /** To borrow credits */ + uint8 allow_credit_borrow; + + /** ac number for the first single ac traffic */ + uint8 single_ac; + + /** Timestamp for the first single ac traffic */ + uint32 single_ac_timestamp; + + bool bcmc_credit_supported; + +} athost_wl_status_info_t; + +/** Please be mindful that total pkttag space is 32 octets only */ +typedef struct dhd_pkttag { + +#ifdef BCM_OBJECT_TRACE + /* if use this field, keep it at the first 4 bytes */ + uint32 sn; +#endif /* BCM_OBJECT_TRACE */ + + /** + b[15] - 1 = wlfc packet + b[14:13] - encryption exemption + b[12 ] - 1 = event channel + b[11 ] - 1 = this packet was sent in response to one time packet request, + do not increment credit on status for this one. [WLFC_CTL_TYPE_MAC_REQUEST_PACKET]. + b[10 ] - 1 = signal-only-packet to firmware [i.e. nothing to piggyback on] + b[9 ] - 1 = packet is host->firmware (transmit direction) + - 0 = packet received from firmware (firmware->host) + b[8 ] - 1 = packet was sent due to credit_request (pspoll), + packet does not count against FIFO credit. + - 0 = normal transaction, packet counts against FIFO credit + b[7 ] - 1 = AP, 0 = STA + b[6:4] - AC FIFO number + b[3:0] - interface index + */ + uint16 if_flags; + + /** + * destination MAC address for this packet so that not every module needs to open the packet + * to find this + */ + uint8 dstn_ether[ETHER_ADDR_LEN]; + + /** This 32-bit goes from host to device for every packet. */ + uint32 htod_tag; + + /** This 16-bit is original d11seq number for every suppressed packet. */ + uint16 htod_seq; + + /** This address is mac entry for every packet. */ + void *entry; + + /** bus specific stuff */ + union { + struct { + void *stuff; + uint32 thing1; + uint32 thing2; + } sd; + + struct { + void *bus; + void *urb; + } usb; + } bus_specific; +} dhd_pkttag_t; + +#define DHD_PKTTAG_WLFCPKT_MASK 0x1 +#define DHD_PKTTAG_WLFCPKT_SHIFT 15 +#define DHD_PKTTAG_WLFCPKT_SET(tag, value) ((dhd_pkttag_t*)(tag))->if_flags = \ + (((dhd_pkttag_t*)(tag))->if_flags & \ + ~(DHD_PKTTAG_WLFCPKT_MASK << DHD_PKTTAG_WLFCPKT_SHIFT)) | \ + (((value) & DHD_PKTTAG_WLFCPKT_MASK) << DHD_PKTTAG_WLFCPKT_SHIFT) +#define DHD_PKTTAG_WLFCPKT(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \ + DHD_PKTTAG_WLFCPKT_SHIFT) & DHD_PKTTAG_WLFCPKT_MASK) + +#define DHD_PKTTAG_EXEMPT_MASK 0x3 +#define DHD_PKTTAG_EXEMPT_SHIFT 13 +#define DHD_PKTTAG_EXEMPT_SET(tag, value) ((dhd_pkttag_t*)(tag))->if_flags = \ + (((dhd_pkttag_t*)(tag))->if_flags & \ + ~(DHD_PKTTAG_EXEMPT_MASK << DHD_PKTTAG_EXEMPT_SHIFT)) | \ + (((value) & DHD_PKTTAG_EXEMPT_MASK) << DHD_PKTTAG_EXEMPT_SHIFT) +#define DHD_PKTTAG_EXEMPT(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \ + DHD_PKTTAG_EXEMPT_SHIFT) & DHD_PKTTAG_EXEMPT_MASK) + +#define DHD_PKTTAG_EVENT_MASK 0x1 +#define DHD_PKTTAG_EVENT_SHIFT 12 +#define DHD_PKTTAG_SETEVENT(tag, event) ((dhd_pkttag_t*)(tag))->if_flags = \ + (((dhd_pkttag_t*)(tag))->if_flags & \ + ~(DHD_PKTTAG_EVENT_MASK << DHD_PKTTAG_EVENT_SHIFT)) | \ + (((event) & DHD_PKTTAG_EVENT_MASK) << DHD_PKTTAG_EVENT_SHIFT) +#define DHD_PKTTAG_EVENT(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \ + DHD_PKTTAG_EVENT_SHIFT) & DHD_PKTTAG_EVENT_MASK) + +#define DHD_PKTTAG_ONETIMEPKTRQST_MASK 0x1 +#define DHD_PKTTAG_ONETIMEPKTRQST_SHIFT 11 +#define DHD_PKTTAG_SETONETIMEPKTRQST(tag) ((dhd_pkttag_t*)(tag))->if_flags = \ + (((dhd_pkttag_t*)(tag))->if_flags & \ + ~(DHD_PKTTAG_ONETIMEPKTRQST_MASK << DHD_PKTTAG_ONETIMEPKTRQST_SHIFT)) | \ + (1 << DHD_PKTTAG_ONETIMEPKTRQST_SHIFT) +#define DHD_PKTTAG_ONETIMEPKTRQST(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \ + DHD_PKTTAG_ONETIMEPKTRQST_SHIFT) & DHD_PKTTAG_ONETIMEPKTRQST_MASK) + +#define DHD_PKTTAG_SIGNALONLY_MASK 0x1 +#define DHD_PKTTAG_SIGNALONLY_SHIFT 10 +#define DHD_PKTTAG_SETSIGNALONLY(tag, signalonly) ((dhd_pkttag_t*)(tag))->if_flags = \ + (((dhd_pkttag_t*)(tag))->if_flags & \ + ~(DHD_PKTTAG_SIGNALONLY_MASK << DHD_PKTTAG_SIGNALONLY_SHIFT)) | \ + (((signalonly) & DHD_PKTTAG_SIGNALONLY_MASK) << DHD_PKTTAG_SIGNALONLY_SHIFT) +#define DHD_PKTTAG_SIGNALONLY(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \ + DHD_PKTTAG_SIGNALONLY_SHIFT) & DHD_PKTTAG_SIGNALONLY_MASK) + +#define DHD_PKTTAG_PKTDIR_MASK 0x1 +#define DHD_PKTTAG_PKTDIR_SHIFT 9 +#define DHD_PKTTAG_SETPKTDIR(tag, dir) ((dhd_pkttag_t*)(tag))->if_flags = \ + (((dhd_pkttag_t*)(tag))->if_flags & \ + ~(DHD_PKTTAG_PKTDIR_MASK << DHD_PKTTAG_PKTDIR_SHIFT)) | \ + (((dir) & DHD_PKTTAG_PKTDIR_MASK) << DHD_PKTTAG_PKTDIR_SHIFT) +#define DHD_PKTTAG_PKTDIR(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \ + DHD_PKTTAG_PKTDIR_SHIFT) & DHD_PKTTAG_PKTDIR_MASK) + +#define DHD_PKTTAG_CREDITCHECK_MASK 0x1 +#define DHD_PKTTAG_CREDITCHECK_SHIFT 8 +#define DHD_PKTTAG_SETCREDITCHECK(tag, check) ((dhd_pkttag_t*)(tag))->if_flags = \ + (((dhd_pkttag_t*)(tag))->if_flags & \ + ~(DHD_PKTTAG_CREDITCHECK_MASK << DHD_PKTTAG_CREDITCHECK_SHIFT)) | \ + (((check) & DHD_PKTTAG_CREDITCHECK_MASK) << DHD_PKTTAG_CREDITCHECK_SHIFT) +#define DHD_PKTTAG_CREDITCHECK(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \ + DHD_PKTTAG_CREDITCHECK_SHIFT) & DHD_PKTTAG_CREDITCHECK_MASK) + +#define DHD_PKTTAG_IFTYPE_MASK 0x1 +#define DHD_PKTTAG_IFTYPE_SHIFT 7 +#define DHD_PKTTAG_SETIFTYPE(tag, isAP) ((dhd_pkttag_t*)(tag))->if_flags = \ + (((dhd_pkttag_t*)(tag))->if_flags & \ + ~(DHD_PKTTAG_IFTYPE_MASK << DHD_PKTTAG_IFTYPE_SHIFT)) | \ + (((isAP) & DHD_PKTTAG_IFTYPE_MASK) << DHD_PKTTAG_IFTYPE_SHIFT) +#define DHD_PKTTAG_IFTYPE(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \ + DHD_PKTTAG_IFTYPE_SHIFT) & DHD_PKTTAG_IFTYPE_MASK) + +#define DHD_PKTTAG_FIFO_MASK 0x7 +#define DHD_PKTTAG_FIFO_SHIFT 4 +#define DHD_PKTTAG_SETFIFO(tag, fifo) ((dhd_pkttag_t*)(tag))->if_flags = \ + (((dhd_pkttag_t*)(tag))->if_flags & ~(DHD_PKTTAG_FIFO_MASK << DHD_PKTTAG_FIFO_SHIFT)) | \ + (((fifo) & DHD_PKTTAG_FIFO_MASK) << DHD_PKTTAG_FIFO_SHIFT) +#define DHD_PKTTAG_FIFO(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \ + DHD_PKTTAG_FIFO_SHIFT) & DHD_PKTTAG_FIFO_MASK) + +#define DHD_PKTTAG_IF_MASK 0xf +#define DHD_PKTTAG_IF_SHIFT 0 +#define DHD_PKTTAG_SETIF(tag, if) ((dhd_pkttag_t*)(tag))->if_flags = \ + (((dhd_pkttag_t*)(tag))->if_flags & ~(DHD_PKTTAG_IF_MASK << DHD_PKTTAG_IF_SHIFT)) | \ + (((if) & DHD_PKTTAG_IF_MASK) << DHD_PKTTAG_IF_SHIFT) +#define DHD_PKTTAG_IF(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \ + DHD_PKTTAG_IF_SHIFT) & DHD_PKTTAG_IF_MASK) + +#define DHD_PKTTAG_SETDSTN(tag, dstn_MAC_ea) memcpy(((dhd_pkttag_t*)((tag)))->dstn_ether, \ + (dstn_MAC_ea), ETHER_ADDR_LEN) +#define DHD_PKTTAG_DSTN(tag) ((dhd_pkttag_t*)(tag))->dstn_ether + +#define DHD_PKTTAG_SET_H2DTAG(tag, h2dvalue) ((dhd_pkttag_t*)(tag))->htod_tag = (h2dvalue) +#define DHD_PKTTAG_H2DTAG(tag) (((dhd_pkttag_t*)(tag))->htod_tag) + +#define DHD_PKTTAG_SET_H2DSEQ(tag, seq) ((dhd_pkttag_t*)(tag))->htod_seq = (seq) +#define DHD_PKTTAG_H2DSEQ(tag) (((dhd_pkttag_t*)(tag))->htod_seq) + +#define DHD_PKTTAG_SET_ENTRY(tag, entry) ((dhd_pkttag_t*)(tag))->entry = (entry) +#define DHD_PKTTAG_ENTRY(tag) (((dhd_pkttag_t*)(tag))->entry) + +#define PSQ_SUP_IDX(x) (x * 2 + 1) +#define PSQ_DLY_IDX(x) (x * 2) + +#ifdef PROP_TXSTATUS_DEBUG +#define DHD_WLFC_CTRINC_MAC_CLOSE(entry) do { (entry)->closed_ct++; } while (0) +#define DHD_WLFC_CTRINC_MAC_OPEN(entry) do { (entry)->opened_ct++; } while (0) +#else +#define DHD_WLFC_CTRINC_MAC_CLOSE(entry) do {} while (0) +#define DHD_WLFC_CTRINC_MAC_OPEN(entry) do {} while (0) +#endif // endif + +#ifdef BCM_OBJECT_TRACE +#define DHD_PKTTAG_SET_SN(tag, val) ((dhd_pkttag_t*)(tag))->sn = (val) +#define DHD_PKTTAG_SN(tag) (((dhd_pkttag_t*)(tag))->sn) +#endif /* BCM_OBJECT_TRACE */ + +/* public functions */ +int dhd_wlfc_parse_header_info(dhd_pub_t *dhd, void* pktbuf, int tlv_hdr_len, + uchar *reorder_info_buf, uint *reorder_info_len); +KERNEL_THREAD_RETURN_TYPE dhd_wlfc_transfer_packets(void *data); +int dhd_wlfc_commit_packets(dhd_pub_t *dhdp, f_commitpkt_t fcommit, + void* commit_ctx, void *pktbuf, bool need_toggle_host_if); +int dhd_wlfc_txcomplete(dhd_pub_t *dhd, void *txp, bool success); +int dhd_wlfc_init(dhd_pub_t *dhd); +#ifdef SUPPORT_P2P_GO_PS +int dhd_wlfc_suspend(dhd_pub_t *dhd); +int dhd_wlfc_resume(dhd_pub_t *dhd); +#endif /* SUPPORT_P2P_GO_PS */ +int dhd_wlfc_hostreorder_init(dhd_pub_t *dhd); +int dhd_wlfc_cleanup_txq(dhd_pub_t *dhd, f_processpkt_t fn, void *arg); +int dhd_wlfc_cleanup(dhd_pub_t *dhd, f_processpkt_t fn, void* arg); +int dhd_wlfc_deinit(dhd_pub_t *dhd); +int dhd_wlfc_interface_event(dhd_pub_t *dhdp, uint8 action, uint8 ifid, uint8 iftype, uint8* ea); +int dhd_wlfc_FIFOcreditmap_event(dhd_pub_t *dhdp, uint8* event_data); +#ifdef LIMIT_BORROW +int dhd_wlfc_disable_credit_borrow_event(dhd_pub_t *dhdp, uint8* event_data); +#endif /* LIMIT_BORROW */ +int dhd_wlfc_BCMCCredit_support_event(dhd_pub_t *dhdp); +int dhd_wlfc_enable(dhd_pub_t *dhdp); +int dhd_wlfc_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf); +int dhd_wlfc_clear_counts(dhd_pub_t *dhd); +int dhd_wlfc_get_enable(dhd_pub_t *dhd, bool *val); +int dhd_wlfc_get_mode(dhd_pub_t *dhd, int *val); +int dhd_wlfc_set_mode(dhd_pub_t *dhd, int val); +bool dhd_wlfc_is_supported(dhd_pub_t *dhd); +bool dhd_wlfc_is_header_only_pkt(dhd_pub_t * dhd, void *pktbuf); +int dhd_wlfc_flowcontrol(dhd_pub_t *dhdp, bool state, bool bAcquireLock); +int dhd_wlfc_save_rxpath_ac_time(dhd_pub_t * dhd, uint8 prio); + +int dhd_wlfc_get_module_ignore(dhd_pub_t *dhd, int *val); +int dhd_wlfc_set_module_ignore(dhd_pub_t *dhd, int val); +int dhd_wlfc_get_credit_ignore(dhd_pub_t *dhd, int *val); +int dhd_wlfc_set_credit_ignore(dhd_pub_t *dhd, int val); +int dhd_wlfc_get_txstatus_ignore(dhd_pub_t *dhd, int *val); +int dhd_wlfc_set_txstatus_ignore(dhd_pub_t *dhd, int val); + +int dhd_wlfc_get_rxpkt_chk(dhd_pub_t *dhd, int *val); +int dhd_wlfc_set_rxpkt_chk(dhd_pub_t *dhd, int val); + +#endif /* __wlfc_host_driver_definitions_h__ */ diff --git a/bcmdhd.100.10.315.x/dngl_stats.h b/bcmdhd.100.10.315.x/dngl_stats.h new file mode 100644 index 0000000..6bbe9fd --- /dev/null +++ b/bcmdhd.100.10.315.x/dngl_stats.h @@ -0,0 +1,386 @@ +/* + * Common stats definitions for clients of dongle + * ports + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dngl_stats.h 716269 2017-08-17 09:22:46Z $ + */ + +#ifndef _dngl_stats_h_ +#define _dngl_stats_h_ + +#include +#include <802.11.h> + +typedef struct { + unsigned long rx_packets; /* total packets received */ + unsigned long tx_packets; /* total packets transmitted */ + unsigned long rx_bytes; /* total bytes received */ + unsigned long tx_bytes; /* total bytes transmitted */ + unsigned long rx_errors; /* bad packets received */ + unsigned long tx_errors; /* packet transmit problems */ + unsigned long rx_dropped; /* packets dropped by dongle */ + unsigned long tx_dropped; /* packets dropped by dongle */ + unsigned long multicast; /* multicast packets received */ +} dngl_stats_t; + +typedef int32 wifi_radio; +typedef int32 wifi_channel; +typedef int32 wifi_rssi; +typedef struct { uint16 version; uint16 length; } ver_len; + +typedef enum wifi_channel_width { + WIFI_CHAN_WIDTH_20 = 0, + WIFI_CHAN_WIDTH_40 = 1, + WIFI_CHAN_WIDTH_80 = 2, + WIFI_CHAN_WIDTH_160 = 3, + WIFI_CHAN_WIDTH_80P80 = 4, + WIFI_CHAN_WIDTH_5 = 5, + WIFI_CHAN_WIDTH_10 = 6, + WIFI_CHAN_WIDTH_INVALID = -1 +} wifi_channel_width_t; + +typedef enum { + WIFI_DISCONNECTED = 0, + WIFI_AUTHENTICATING = 1, + WIFI_ASSOCIATING = 2, + WIFI_ASSOCIATED = 3, + WIFI_EAPOL_STARTED = 4, /* if done by firmware/driver */ + WIFI_EAPOL_COMPLETED = 5, /* if done by firmware/driver */ +} wifi_connection_state; + +typedef enum { + WIFI_ROAMING_IDLE = 0, + WIFI_ROAMING_ACTIVE = 1 +} wifi_roam_state; + +typedef enum { + WIFI_INTERFACE_STA = 0, + WIFI_INTERFACE_SOFTAP = 1, + WIFI_INTERFACE_IBSS = 2, + WIFI_INTERFACE_P2P_CLIENT = 3, + WIFI_INTERFACE_P2P_GO = 4, + WIFI_INTERFACE_NAN = 5, + WIFI_INTERFACE_MESH = 6 +} wifi_interface_mode; + +#define WIFI_CAPABILITY_QOS 0x00000001 /* set for QOS association */ +#define WIFI_CAPABILITY_PROTECTED 0x00000002 /* set for protected association (802.11 + * beacon frame control protected bit set) + */ +#define WIFI_CAPABILITY_INTERWORKING 0x00000004 /* set if 802.11 Extended Capabilities + * element interworking bit is set + */ +#define WIFI_CAPABILITY_HS20 0x00000008 /* set for HS20 association */ +#define WIFI_CAPABILITY_SSID_UTF8 0x00000010 /* set is 802.11 Extended Capabilities + * element UTF-8 SSID bit is set + */ +#define WIFI_CAPABILITY_COUNTRY 0x00000020 /* set is 802.11 Country Element is present */ +#define PACK_ATTRIBUTE __attribute__ ((packed)) +typedef struct { + wifi_interface_mode mode; /* interface mode */ + uint8 mac_addr[6]; /* interface mac address (self) */ + uint8 PAD[2]; + wifi_connection_state state; /* connection state (valid for STA, CLI only) */ + wifi_roam_state roaming; /* roaming state */ + uint32 capabilities; /* WIFI_CAPABILITY_XXX (self) */ + uint8 ssid[DOT11_MAX_SSID_LEN+1]; /* null terminated SSID */ + uint8 bssid[ETHER_ADDR_LEN]; /* bssid */ + uint8 PAD[1]; + uint8 ap_country_str[3]; /* country string advertised by AP */ + uint8 country_str[3]; /* country string for this association */ + uint8 PAD[2]; +} wifi_interface_info; + +typedef wifi_interface_info *wifi_interface_handle; + +/* channel information */ +typedef struct { + wifi_channel_width_t width; /* channel width (20, 40, 80, 80+80, 160) */ + wifi_channel center_freq; /* primary 20 MHz channel */ + wifi_channel center_freq0; /* center frequency (MHz) first segment */ + wifi_channel center_freq1; /* center frequency (MHz) second segment */ +} wifi_channel_info; + +/* wifi rate */ +typedef struct { + uint32 preamble; /* 0: OFDM, 1:CCK, 2:HT 3:VHT 4..7 reserved */ + uint32 nss; /* 0:1x1, 1:2x2, 3:3x3, 4:4x4 */ + uint32 bw; /* 0:20MHz, 1:40Mhz, 2:80Mhz, 3:160Mhz */ + uint32 rateMcsIdx; /* OFDM/CCK rate code would be as per ieee std + * in the units of 0.5mbps + */ + /* HT/VHT it would be mcs index */ + uint32 reserved; /* reserved */ + uint32 bitrate; /* units of 100 Kbps */ +} wifi_rate; + +typedef struct { + uint32 preamble :3; /* 0: OFDM, 1:CCK, 2:HT 3:VHT 4..7 reserved */ + uint32 nss :2; /* 0:1x1, 1:2x2, 3:3x3, 4:4x4 */ + uint32 bw :3; /* 0:20MHz, 1:40Mhz, 2:80Mhz, 3:160Mhz */ + uint32 rateMcsIdx :8; /* OFDM/CCK rate code would be as per ieee std + * in the units of 0.5mbps HT/VHT it would be + * mcs index + */ + uint32 reserved :16; /* reserved */ + uint32 bitrate; /* units of 100 Kbps */ +} wifi_rate_v1; + +/* channel statistics */ +typedef struct { + wifi_channel_info channel; /* channel */ + uint32 on_time; /* msecs the radio is awake (32 bits number + * accruing over time) + */ + uint32 cca_busy_time; /* msecs the CCA register is busy (32 bits number + * accruing over time) + */ +} wifi_channel_stat; + +/* radio statistics */ +typedef struct { + struct { + uint16 version; + uint16 length; + }; + wifi_radio radio; /* wifi radio (if multiple radio supported) */ + uint32 on_time; /* msecs the radio is awake (32 bits number + * accruing over time) + */ + uint32 tx_time; /* msecs the radio is transmitting (32 bits + * number accruing over time) + */ + uint32 rx_time; /* msecs the radio is in active receive (32 bits + * number accruing over time) + */ + uint32 on_time_scan; /* msecs the radio is awake due to all scan (32 bits + * number accruing over time) + */ + uint32 on_time_nbd; /* msecs the radio is awake due to NAN (32 bits + * number accruing over time) + */ + uint32 on_time_gscan; /* msecs the radio is awake due to G?scan (32 bits + * number accruing over time) + */ + uint32 on_time_roam_scan; /* msecs the radio is awake due to roam?scan (32 bits + * number accruing over time) + */ + uint32 on_time_pno_scan; /* msecs the radio is awake due to PNO scan (32 bits + * number accruing over time) + */ + uint32 on_time_hs20; /* msecs the radio is awake due to HS2.0 scans and + * GAS exchange (32 bits number accruing over time) + */ + uint32 num_channels; /* number of channels */ + wifi_channel_stat channels[1]; /* channel statistics */ +} wifi_radio_stat; + +typedef struct { + wifi_radio radio; + uint32 on_time; + uint32 tx_time; + uint32 rx_time; + uint32 on_time_scan; + uint32 on_time_nbd; + uint32 on_time_gscan; + uint32 on_time_roam_scan; + uint32 on_time_pno_scan; + uint32 on_time_hs20; + uint32 num_channels; +} wifi_radio_stat_h; + +/* per rate statistics */ +typedef struct { + wifi_rate_v1 rate; /* rate information */ + uint32 tx_mpdu; /* number of successfully transmitted data pkts (ACK rcvd) */ + uint32 rx_mpdu; /* number of received data pkts */ + uint32 mpdu_lost; /* number of data packet losses (no ACK) */ + uint32 retries; /* total number of data pkt retries */ + uint32 retries_short; /* number of short data pkt retries */ + uint32 retries_long; /* number of long data pkt retries */ +} wifi_rate_stat_v1; + +typedef struct { + uint16 version; + uint16 length; + uint32 tx_mpdu; /* number of successfully transmitted data pkts (ACK rcvd) */ + uint32 rx_mpdu; /* number of received data pkts */ + uint32 mpdu_lost; /* number of data packet losses (no ACK) */ + uint32 retries; /* total number of data pkt retries */ + uint32 retries_short; /* number of short data pkt retries */ + uint32 retries_long; /* number of long data pkt retries */ + wifi_rate rate; +} wifi_rate_stat; + +/* access categories */ +typedef enum { + WIFI_AC_VO = 0, + WIFI_AC_VI = 1, + WIFI_AC_BE = 2, + WIFI_AC_BK = 3, + WIFI_AC_MAX = 4 +} wifi_traffic_ac; + +/* wifi peer type */ +typedef enum +{ + WIFI_PEER_STA, + WIFI_PEER_AP, + WIFI_PEER_P2P_GO, + WIFI_PEER_P2P_CLIENT, + WIFI_PEER_NAN, + WIFI_PEER_TDLS, + WIFI_PEER_INVALID +} wifi_peer_type; + +/* per peer statistics */ +typedef struct { + wifi_peer_type type; /* peer type (AP, TDLS, GO etc.) */ + uint8 peer_mac_address[6]; /* mac address */ + uint32 capabilities; /* peer WIFI_CAPABILITY_XXX */ + uint32 num_rate; /* number of rates */ + wifi_rate_stat rate_stats[1]; /* per rate statistics, number of entries = num_rate */ +} wifi_peer_info; + +/* per access category statistics */ +typedef struct { + wifi_traffic_ac ac; /* access category (VI, VO, BE, BK) */ + uint32 tx_mpdu; /* number of successfully transmitted unicast data pkts + * (ACK rcvd) + */ + uint32 rx_mpdu; /* number of received unicast mpdus */ + uint32 tx_mcast; /* number of succesfully transmitted multicast + * data packets + */ + /* STA case: implies ACK received from AP for the + * unicast packet in which mcast pkt was sent + */ + uint32 rx_mcast; /* number of received multicast data packets */ + uint32 rx_ampdu; /* number of received unicast a-mpdus */ + uint32 tx_ampdu; /* number of transmitted unicast a-mpdus */ + uint32 mpdu_lost; /* number of data pkt losses (no ACK) */ + uint32 retries; /* total number of data pkt retries */ + uint32 retries_short; /* number of short data pkt retries */ + uint32 retries_long; /* number of long data pkt retries */ + uint32 contention_time_min; /* data pkt min contention time (usecs) */ + uint32 contention_time_max; /* data pkt max contention time (usecs) */ + uint32 contention_time_avg; /* data pkt avg contention time (usecs) */ + uint32 contention_num_samples; /* num of data pkts used for contention statistics */ +} wifi_wmm_ac_stat; + +/* interface statistics */ +typedef struct { + wifi_interface_handle iface; /* wifi interface */ + wifi_interface_info info; /* current state of the interface */ + uint32 beacon_rx; /* access point beacon received count from + * connected AP + */ + uint64 average_tsf_offset; /* average beacon offset encountered (beacon_TSF - TBTT) + * The average_tsf_offset field is used so as to calculate + * the typical beacon contention time on the channel as well + * may be used to debug beacon synchronization and related + * power consumption issue + */ + uint32 leaky_ap_detected; /* indicate that this AP + * typically leaks packets beyond + * the driver guard time. + */ + uint32 leaky_ap_avg_num_frames_leaked; /* average number of frame leaked by AP after + * frame with PM bit set was ACK'ed by AP + */ + uint32 leaky_ap_guard_time; /* guard time currently in force + * (when implementing IEEE power management + * based on frame control PM bit), How long + * driver waits before shutting down the radio and after + * receiving an ACK for a data frame with PM bit set) + */ + uint32 mgmt_rx; /* access point mgmt frames received count from + * connected AP (including Beacon) + */ + uint32 mgmt_action_rx; /* action frames received count */ + uint32 mgmt_action_tx; /* action frames transmit count */ + wifi_rssi rssi_mgmt; /* access Point Beacon and Management frames RSSI + * (averaged) + */ + wifi_rssi rssi_data; /* access Point Data Frames RSSI (averaged) from + * connected AP + */ + wifi_rssi rssi_ack; /* access Point ACK RSSI (averaged) from + * connected AP + */ + wifi_wmm_ac_stat ac[WIFI_AC_MAX]; /* per ac data packet statistics */ + uint32 num_peers; /* number of peers */ + wifi_peer_info peer_info[1]; /* per peer statistics */ +} wifi_iface_stat; + +#ifdef CONFIG_COMPAT +/* interface statistics */ +typedef struct { + compat_uptr_t iface; /* wifi interface */ + wifi_interface_info info; /* current state of the interface */ + uint32 beacon_rx; /* access point beacon received count from + * connected AP + */ + uint64 average_tsf_offset; /* average beacon offset encountered (beacon_TSF - TBTT) + * The average_tsf_offset field is used so as to calculate + * the typical beacon contention time on the channel as well + * may be used to debug beacon synchronization and related + * power consumption issue + */ + uint32 leaky_ap_detected; /* indicate that this AP + * typically leaks packets beyond + * the driver guard time. + */ + uint32 leaky_ap_avg_num_frames_leaked; /* average number of frame leaked by AP after + * frame with PM bit set was ACK'ed by AP + */ + uint32 leaky_ap_guard_time; /* guard time currently in force + * (when implementing IEEE power management + * based on frame control PM bit), How long + * driver waits before shutting down the radio and after + * receiving an ACK for a data frame with PM bit set) + */ + uint32 mgmt_rx; /* access point mgmt frames received count from + * connected AP (including Beacon) + */ + uint32 mgmt_action_rx; /* action frames received count */ + uint32 mgmt_action_tx; /* action frames transmit count */ + wifi_rssi rssi_mgmt; /* access Point Beacon and Management frames RSSI + * (averaged) + */ + wifi_rssi rssi_data; /* access Point Data Frames RSSI (averaged) from + * connected AP + */ + wifi_rssi rssi_ack; /* access Point ACK RSSI (averaged) from + * connected AP + */ + wifi_wmm_ac_stat ac[WIFI_AC_MAX]; /* per ac data packet statistics */ + uint32 num_peers; /* number of peers */ + wifi_peer_info peer_info[1]; /* per peer statistics */ +} compat_wifi_iface_stat; +#endif /* CONFIG_COMPAT */ + +#endif /* _dngl_stats_h_ */ diff --git a/bcmdhd.100.10.315.x/dngl_wlhdr.h b/bcmdhd.100.10.315.x/dngl_wlhdr.h new file mode 100644 index 0000000..6c297c8 --- /dev/null +++ b/bcmdhd.100.10.315.x/dngl_wlhdr.h @@ -0,0 +1,43 @@ +/* + * Dongle WL Header definitions + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dngl_wlhdr.h 514727 2014-11-12 03:02:48Z $ + */ + +#ifndef _dngl_wlhdr_h_ +#define _dngl_wlhdr_h_ + +typedef struct wl_header { + uint8 type; /* Header type */ + uint8 version; /* Header version */ + int8 rssi; /* RSSI */ + uint8 pad; /* Unused */ +} wl_header_t; + +#define WL_HEADER_LEN sizeof(wl_header_t) +#define WL_HEADER_TYPE 0 +#define WL_HEADER_VER 1 +#endif /* _dngl_wlhdr_h_ */ diff --git a/bcmdhd.100.10.315.x/frag.c b/bcmdhd.100.10.315.x/frag.c new file mode 100644 index 0000000..c2fde6b --- /dev/null +++ b/bcmdhd.100.10.315.x/frag.c @@ -0,0 +1,112 @@ +/* + * IE/TLV fragmentation/defragmentation support for + * Broadcom 802.11bang Networking Device Driver + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * $Id$ + * + * <> + */ + +#include +#include +#include <802.11.h> + +/* defrag a fragmented dot11 ie/tlv. if space does not permit, return the needed + * ie length to contain all the fragments with status BCME_BUFTOOSHORT. + * out_len is in/out parameter, max length on input, used/required length on output + */ +int +bcm_tlv_dot11_defrag(const void *buf, uint buf_len, uint8 id, bool id_ext, + uint8 *out, uint *out_len) +{ + int err = BCME_OK; + const bcm_tlv_t *ie; + uint tot_len = 0; + uint out_left; + + /* find the ie; includes validation */ + ie = bcm_parse_tlvs_dot11(buf, buf_len, id, id_ext); + if (!ie) { + err = BCME_IE_NOTFOUND; + goto done; + } + + out_left = (out && out_len) ? *out_len : 0; + + /* first fragment */ + tot_len = id_ext ? ie->len - 1 : ie->len; + + /* copy out if output space permits */ + if (out_left < tot_len) { + err = BCME_BUFTOOSHORT; + out_left = 0; /* prevent further copy */ + } else { + memcpy(out, &ie->data[id_ext ? 1 : 0], tot_len); + out += tot_len; + out_left -= tot_len; + } + + /* if not fragmened or not fragmentable per 802.11 table 9-77 11md0.1 bail + * we can introduce the latter check later + */ + if (ie->len != BCM_TLV_MAX_DATA_SIZE) { + goto done; + } + + /* adjust buf_len to length after ie including it */ + buf_len -= ((const uint8 *)ie - (const uint8 *)buf); + + /* update length from fragments, okay if no next ie */ + while ((ie = bcm_next_tlv(ie, &buf_len)) && + (ie->id == DOT11_MNG_FRAGMENT_ID)) { + /* note: buf_len starts at next ie and last frag may be partial */ + if (out_left < ie->len) { + err = BCME_BUFTOOSHORT; + out_left = 0; + } else { + memcpy(out, &ie->data[0], ie->len); + out += ie->len; + out_left -= ie->len; + } + + tot_len += ie->len + BCM_TLV_HDR_SIZE; + + /* all but last should be of max size */ + if (ie->len < BCM_TLV_MAX_DATA_SIZE) { + break; + } + } + +done: + if (out_len) { + *out_len = tot_len; + } + + return err; +} + +int +bcm_tlv_dot11_frag_tot_len(const void *buf, uint buf_len, + uint8 id, bool id_ext, uint *ie_len) +{ + return bcm_tlv_dot11_defrag(buf, buf_len, id, id_ext, NULL, ie_len); +} diff --git a/bcmdhd.100.10.315.x/frag.h b/bcmdhd.100.10.315.x/frag.h new file mode 100644 index 0000000..5ab2092 --- /dev/null +++ b/bcmdhd.100.10.315.x/frag.h @@ -0,0 +1,38 @@ +/* + * IE/TLV (de)fragmentation declarations/definitions for + * Broadcom 802.11abgn Networking Device Driver + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id$ + * + */ + +#ifndef __FRAG_H__ +#define __FRAG_H__ + +int bcm_tlv_dot11_frag_tot_len(const void *buf, uint buf_len, + uint8 id, bool id_ext, uint *ie_len); + +#endif /* __FRAG_H__ */ diff --git a/bcmdhd.100.10.315.x/hnd_pktpool.c b/bcmdhd.100.10.315.x/hnd_pktpool.c new file mode 100644 index 0000000..93ad389 --- /dev/null +++ b/bcmdhd.100.10.315.x/hnd_pktpool.c @@ -0,0 +1,1427 @@ +/* + * HND generic packet pool operation primitives + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: hnd_pktpool.c 677681 2017-01-04 09:10:30Z $ + */ + +#include +#include +#include +#include +#include +#ifdef BCMRESVFRAGPOOL +#include +#endif /* BCMRESVFRAGPOOL */ +#ifdef BCMFRWDPOOLREORG +#include +#endif /* BCMFRWDPOOLREORG */ + +/* mutex macros for thread safe */ +#ifdef HND_PKTPOOL_THREAD_SAFE +#define HND_PKTPOOL_MUTEX_CREATE(name, mutex) osl_ext_mutex_create(name, mutex) +#define HND_PKTPOOL_MUTEX_DELETE(mutex) osl_ext_mutex_delete(mutex) +#define HND_PKTPOOL_MUTEX_ACQUIRE(mutex, msec) osl_ext_mutex_acquire(mutex, msec) +#define HND_PKTPOOL_MUTEX_RELEASE(mutex) osl_ext_mutex_release(mutex) +#else +#define HND_PKTPOOL_MUTEX_CREATE(name, mutex) OSL_EXT_SUCCESS +#define HND_PKTPOOL_MUTEX_DELETE(mutex) OSL_EXT_SUCCESS +#define HND_PKTPOOL_MUTEX_ACQUIRE(mutex, msec) OSL_EXT_SUCCESS +#define HND_PKTPOOL_MUTEX_RELEASE(mutex) OSL_EXT_SUCCESS +#endif // endif + +/* Registry size is one larger than max pools, as slot #0 is reserved */ +#define PKTPOOLREG_RSVD_ID (0U) +#define PKTPOOLREG_RSVD_PTR (POOLPTR(0xdeaddead)) +#define PKTPOOLREG_FREE_PTR (POOLPTR(NULL)) + +#define PKTPOOL_REGISTRY_SET(id, pp) (pktpool_registry_set((id), (pp))) +#define PKTPOOL_REGISTRY_CMP(id, pp) (pktpool_registry_cmp((id), (pp))) + +/* Tag a registry entry as free for use */ +#define PKTPOOL_REGISTRY_CLR(id) \ + PKTPOOL_REGISTRY_SET((id), PKTPOOLREG_FREE_PTR) +#define PKTPOOL_REGISTRY_ISCLR(id) \ + (PKTPOOL_REGISTRY_CMP((id), PKTPOOLREG_FREE_PTR)) + +/* Tag registry entry 0 as reserved */ +#define PKTPOOL_REGISTRY_RSV() \ + PKTPOOL_REGISTRY_SET(PKTPOOLREG_RSVD_ID, PKTPOOLREG_RSVD_PTR) +#define PKTPOOL_REGISTRY_ISRSVD() \ + (PKTPOOL_REGISTRY_CMP(PKTPOOLREG_RSVD_ID, PKTPOOLREG_RSVD_PTR)) + +/* Walk all un-reserved entries in registry */ +#define PKTPOOL_REGISTRY_FOREACH(id) \ + for ((id) = 1U; (id) <= pktpools_max; (id)++) + +enum pktpool_empty_cb_state { + EMPTYCB_ENABLED = 0, /* Enable callback when new packets are added to pool */ + EMPTYCB_DISABLED, /* Disable callback when new packets are added to pool */ + EMPTYCB_SKIPPED /* Packet was added to pool when callback was disabled */ +}; + +uint32 pktpools_max = 0U; /* maximum number of pools that may be initialized */ +pktpool_t *pktpools_registry[PKTPOOL_MAXIMUM_ID + 1]; /* Pktpool registry */ + +/* Register/Deregister a pktpool with registry during pktpool_init/deinit */ +static int pktpool_register(pktpool_t * poolptr); +static int pktpool_deregister(pktpool_t * poolptr); + +/** add declaration */ +static void pktpool_avail_notify(pktpool_t *pktp); + +/** accessor functions required when ROMming this file, forced into RAM */ + +pktpool_t * +BCMRAMFN(get_pktpools_registry)(int id) +{ + return pktpools_registry[id]; +} + +static void +BCMRAMFN(pktpool_registry_set)(int id, pktpool_t *pp) +{ + pktpools_registry[id] = pp; +} + +static bool +BCMRAMFN(pktpool_registry_cmp)(int id, pktpool_t *pp) +{ + return pktpools_registry[id] == pp; +} + +/** Constructs a pool registry to serve a maximum of total_pools */ +int +pktpool_attach(osl_t *osh, uint32 total_pools) +{ + uint32 poolid; + BCM_REFERENCE(osh); + + if (pktpools_max != 0U) { + return BCME_ERROR; + } + + ASSERT(total_pools <= PKTPOOL_MAXIMUM_ID); + + /* Initialize registry: reserve slot#0 and tag others as free */ + PKTPOOL_REGISTRY_RSV(); /* reserve slot#0 */ + + PKTPOOL_REGISTRY_FOREACH(poolid) { /* tag all unreserved entries as free */ + PKTPOOL_REGISTRY_CLR(poolid); + } + + pktpools_max = total_pools; + + return (int)pktpools_max; +} + +/** Destructs the pool registry. Ascertain all pools were first de-inited */ +int +pktpool_dettach(osl_t *osh) +{ + uint32 poolid; + BCM_REFERENCE(osh); + + if (pktpools_max == 0U) { + return BCME_OK; + } + + /* Ascertain that no pools are still registered */ + ASSERT(PKTPOOL_REGISTRY_ISRSVD()); /* assert reserved slot */ + + PKTPOOL_REGISTRY_FOREACH(poolid) { /* ascertain all others are free */ + ASSERT(PKTPOOL_REGISTRY_ISCLR(poolid)); + } + + pktpools_max = 0U; /* restore boot state */ + + return BCME_OK; +} + +/** Registers a pool in a free slot; returns the registry slot index */ +static int +pktpool_register(pktpool_t * poolptr) +{ + uint32 poolid; + + if (pktpools_max == 0U) { + return PKTPOOL_INVALID_ID; /* registry has not yet been constructed */ + } + + ASSERT(pktpools_max != 0U); + + /* find an empty slot in pktpools_registry */ + PKTPOOL_REGISTRY_FOREACH(poolid) { + if (PKTPOOL_REGISTRY_ISCLR(poolid)) { + PKTPOOL_REGISTRY_SET(poolid, POOLPTR(poolptr)); /* register pool */ + return (int)poolid; /* return pool ID */ + } + } /* FOREACH */ + + return PKTPOOL_INVALID_ID; /* error: registry is full */ +} + +/** Deregisters a pktpool, given the pool pointer; tag slot as free */ +static int +pktpool_deregister(pktpool_t * poolptr) +{ + uint32 poolid; + + ASSERT(POOLPTR(poolptr) != POOLPTR(NULL)); + + poolid = POOLID(poolptr); + ASSERT(poolid <= pktpools_max); + + /* Asertain that a previously registered poolptr is being de-registered */ + if (PKTPOOL_REGISTRY_CMP(poolid, POOLPTR(poolptr))) { + PKTPOOL_REGISTRY_CLR(poolid); /* mark as free */ + } else { + ASSERT(0); + return BCME_ERROR; /* mismatch in registry */ + } + + return BCME_OK; +} + +/** + * pktpool_init: + * User provides a pktpool_t structure and specifies the number of packets to + * be pre-filled into the pool (n_pkts). + * pktpool_init first attempts to register the pool and fetch a unique poolid. + * If registration fails, it is considered an BCME_ERR, caused by either the + * registry was not pre-created (pktpool_attach) or the registry is full. + * If registration succeeds, then the requested number of packets will be filled + * into the pool as part of initialization. In the event that there is no + * available memory to service the request, then BCME_NOMEM will be returned + * along with the count of how many packets were successfully allocated. + * In dongle builds, prior to memory reclaimation, one should limit the number + * of packets to be allocated during pktpool_init and fill the pool up after + * reclaim stage. + * + * @param n_pkts Number of packets to be pre-filled into the pool + * @param max_pkt_bytes The size of all packets in a pool must be the same. E.g. PKTBUFSZ. + * @param type e.g. 'lbuf_frag' + */ +int +pktpool_init(osl_t *osh, pktpool_t *pktp, int *n_pkts, int max_pkt_bytes, bool istx, + uint8 type) +{ + int i, err = BCME_OK; + int pktplen; + uint8 pktp_id; + + ASSERT(pktp != NULL); + ASSERT(osh != NULL); + ASSERT(n_pkts != NULL); + + pktplen = *n_pkts; + + bzero(pktp, sizeof(pktpool_t)); + + /* assign a unique pktpool id */ + if ((pktp_id = (uint8) pktpool_register(pktp)) == PKTPOOL_INVALID_ID) { + return BCME_ERROR; + } + POOLSETID(pktp, pktp_id); + + pktp->inited = TRUE; + pktp->istx = istx ? TRUE : FALSE; + pktp->max_pkt_bytes = (uint16)max_pkt_bytes; + pktp->type = type; + + if (HND_PKTPOOL_MUTEX_CREATE("pktpool", &pktp->mutex) != OSL_EXT_SUCCESS) { + return BCME_ERROR; + } + + pktp->maxlen = PKTPOOL_LEN_MAX; + pktplen = LIMIT_TO_MAX(pktplen, pktp->maxlen); + + for (i = 0; i < pktplen; i++) { + void *p; + p = PKTGET(osh, max_pkt_bytes, TRUE); + + if (p == NULL) { + /* Not able to allocate all requested pkts + * so just return what was actually allocated + * We can add to the pool later + */ + if (pktp->freelist == NULL) /* pktpool free list is empty */ + err = BCME_NOMEM; + + goto exit; + } + + PKTSETPOOL(osh, p, TRUE, pktp); /* Tag packet with pool ID */ + + PKTSETFREELIST(p, pktp->freelist); /* insert p at head of free list */ + pktp->freelist = p; + + pktp->avail++; + +#ifdef BCMDBG_POOL + pktp->dbg_q[pktp->dbg_qlen++].p = p; +#endif // endif + } + +exit: + pktp->n_pkts = pktp->avail; + + *n_pkts = pktp->n_pkts; /* number of packets managed by pool */ + return err; +} /* pktpool_init */ + +/** + * pktpool_deinit: + * Prior to freeing a pktpool, all packets must be first freed into the pktpool. + * Upon pktpool_deinit, all packets in the free pool will be freed to the heap. + * An assert is in place to ensure that there are no packets still lingering + * around. Packets freed to a pool after the deinit will cause a memory + * corruption as the pktpool_t structure no longer exists. + */ +int +pktpool_deinit(osl_t *osh, pktpool_t *pktp) +{ + uint16 freed = 0; + + ASSERT(osh != NULL); + ASSERT(pktp != NULL); + +#ifdef BCMDBG_POOL + { + int i; + for (i = 0; i <= pktp->n_pkts; i++) { + pktp->dbg_q[i].p = NULL; + } + } +#endif // endif + + while (pktp->freelist != NULL) { + void * p = pktp->freelist; + + pktp->freelist = PKTFREELIST(p); /* unlink head packet from free list */ + PKTSETFREELIST(p, NULL); + + PKTSETPOOL(osh, p, FALSE, NULL); /* clear pool ID tag in pkt */ + + PKTFREE(osh, p, pktp->istx); /* free the packet */ + + freed++; + ASSERT(freed <= pktp->n_pkts); + } + + pktp->avail -= freed; + ASSERT(pktp->avail == 0); + + pktp->n_pkts -= freed; + + pktpool_deregister(pktp); /* release previously acquired unique pool id */ + POOLSETID(pktp, PKTPOOL_INVALID_ID); + + if (HND_PKTPOOL_MUTEX_DELETE(&pktp->mutex) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + pktp->inited = FALSE; + + /* Are there still pending pkts? */ + ASSERT(pktp->n_pkts == 0); + + return 0; +} + +int +pktpool_fill(osl_t *osh, pktpool_t *pktp, bool minimal) +{ + void *p; + int err = 0; + int n_pkts, psize, maxlen; + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + ASSERT(pktp->max_pkt_bytes != 0); + + maxlen = pktp->maxlen; + psize = minimal ? (maxlen >> 2) : maxlen; + for (n_pkts = (int)pktp->n_pkts; n_pkts < psize; n_pkts++) { + + p = PKTGET(osh, pktp->n_pkts, TRUE); + + if (p == NULL) { + err = BCME_NOMEM; + break; + } + + if (pktpool_add(pktp, p) != BCME_OK) { + PKTFREE(osh, p, FALSE); + err = BCME_ERROR; + break; + } + } + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + if (pktp->cbcnt) { + if (pktp->empty == FALSE) + pktpool_avail_notify(pktp); + } + + return err; +} + +#ifdef BCMPOOLRECLAIM +/* New API to decrease the pkts from pool, but not deinit +*/ +uint16 +pktpool_reclaim(osl_t *osh, pktpool_t *pktp, uint16 free_cnt) +{ + uint16 freed = 0; + + pktpool_cb_extn_t cb = NULL; + void *arg = NULL; + + ASSERT(osh != NULL); + ASSERT(pktp != NULL); + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) { + return freed; + } + + if (pktp->avail < free_cnt) { + free_cnt = pktp->avail; + } + + if (BCMSPLITRX_ENAB() && (pktp->type == lbuf_rxfrag)) { + /* If pool is shared rx frag pool, use call back fn to reclaim host address + * and Rx cpl ID associated with the pkt. + */ + ASSERT(pktp->cbext.cb != NULL); + + cb = pktp->cbext.cb; + arg = pktp->cbext.arg; + + } else if ((pktp->type == lbuf_basic) && (pktp->rxcplidfn.cb != NULL)) { + /* If pool is shared rx pool, use call back fn to freeup Rx cpl ID + * associated with the pkt. + */ + cb = pktp->rxcplidfn.cb; + arg = pktp->rxcplidfn.arg; + } + + while ((pktp->freelist != NULL) && (free_cnt)) { + void * p = pktp->freelist; + + pktp->freelist = PKTFREELIST(p); /* unlink head packet from free list */ + PKTSETFREELIST(p, NULL); + + if (cb != NULL) { + if (cb(pktp, arg, p, REMOVE_RXCPLID)) { + PKTSETFREELIST(p, pktp->freelist); + pktp->freelist = p; + break; + } + } + + PKTSETPOOL(osh, p, FALSE, NULL); /* clear pool ID tag in pkt */ + + PKTFREE(osh, p, pktp->istx); /* free the packet */ + + freed++; + free_cnt--; + } + + pktp->avail -= freed; + + pktp->n_pkts -= freed; + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) { + return freed; + } + + return freed; +} +#endif /* #ifdef BCMPOOLRECLAIM */ + +/* New API to empty the pkts from pool, but not deinit +* NOTE: caller is responsible to ensure, +* all pkts are available in pool for free; else LEAK ! +*/ +int +pktpool_empty(osl_t *osh, pktpool_t *pktp) +{ + uint16 freed = 0; + + ASSERT(osh != NULL); + ASSERT(pktp != NULL); + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return BCME_ERROR; + +#ifdef BCMDBG_POOL + { + int i; + for (i = 0; i <= pktp->n_pkts; i++) { + pktp->dbg_q[i].p = NULL; + } + } +#endif // endif + + while (pktp->freelist != NULL) { + void * p = pktp->freelist; + + pktp->freelist = PKTFREELIST(p); /* unlink head packet from free list */ + PKTSETFREELIST(p, NULL); + + PKTSETPOOL(osh, p, FALSE, NULL); /* clear pool ID tag in pkt */ + + PKTFREE(osh, p, pktp->istx); /* free the packet */ + + freed++; + ASSERT(freed <= pktp->n_pkts); + } + + pktp->avail -= freed; + ASSERT(pktp->avail == 0); + + pktp->n_pkts -= freed; + + ASSERT(pktp->n_pkts == 0); + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + return 0; +} + +static void * +pktpool_deq(pktpool_t *pktp) +{ + void *p = NULL; + + if (pktp->avail == 0) + return NULL; + + ASSERT(pktp->freelist != NULL); + + p = pktp->freelist; /* dequeue packet from head of pktpool free list */ + pktp->freelist = PKTFREELIST(p); /* free list points to next packet */ + + PKTSETFREELIST(p, NULL); + + pktp->avail--; + + return p; +} + +static void +pktpool_enq(pktpool_t *pktp, void *p) +{ + ASSERT(p != NULL); + + PKTSETFREELIST(p, pktp->freelist); /* insert at head of pktpool free list */ + pktp->freelist = p; /* free list points to newly inserted packet */ + + pktp->avail++; + ASSERT(pktp->avail <= pktp->n_pkts); +} + +/** utility for registering host addr fill function called from pciedev */ +int +/* BCMATTACHFN */ +(pktpool_hostaddr_fill_register)(pktpool_t *pktp, pktpool_cb_extn_t cb, void *arg) +{ + + ASSERT(cb != NULL); + + ASSERT(pktp->cbext.cb == NULL); + pktp->cbext.cb = cb; + pktp->cbext.arg = arg; + return 0; +} + +int +pktpool_rxcplid_fill_register(pktpool_t *pktp, pktpool_cb_extn_t cb, void *arg) +{ + + ASSERT(cb != NULL); + + if (pktp == NULL) + return BCME_ERROR; + ASSERT(pktp->rxcplidfn.cb == NULL); + pktp->rxcplidfn.cb = cb; + pktp->rxcplidfn.arg = arg; + return 0; +} + +/** whenever host posts rxbuffer, invoke dma_rxfill from pciedev layer */ +void +pktpool_invoke_dmarxfill(pktpool_t *pktp) +{ + ASSERT(pktp->dmarxfill.cb); + ASSERT(pktp->dmarxfill.arg); + + if (pktp->dmarxfill.cb) + pktp->dmarxfill.cb(pktp, pktp->dmarxfill.arg); +} + +/** Registers callback functions for split rx mode */ +int +pkpool_haddr_avail_register_cb(pktpool_t *pktp, pktpool_cb_t cb, void *arg) +{ + + ASSERT(cb != NULL); + + pktp->dmarxfill.cb = cb; + pktp->dmarxfill.arg = arg; + + return 0; +} + +/** + * Registers callback functions. + * No BCMATTACHFN as it is used in xdc_enable_ep which is not an attach function + */ +int +pktpool_avail_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg) +{ + int err = 0; + int i; + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + ASSERT(cb != NULL); + + for (i = 0; i < pktp->cbcnt; i++) { + ASSERT(pktp->cbs[i].cb != NULL); + if ((cb == pktp->cbs[i].cb) && (arg == pktp->cbs[i].arg)) { + pktp->cbs[i].refcnt++; + goto done; + } + } + + i = pktp->cbcnt; + if (i == PKTPOOL_CB_MAX_AVL) { + err = BCME_ERROR; + goto done; + } + + ASSERT(pktp->cbs[i].cb == NULL); + pktp->cbs[i].cb = cb; + pktp->cbs[i].arg = arg; + pktp->cbs[i].refcnt++; + pktp->cbcnt++; + +done: + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + return err; +} + +/* No BCMATTACHFN as it is used in a non-attach function */ +int +pktpool_avail_deregister(pktpool_t *pktp, pktpool_cb_t cb, void *arg) +{ + int err = 0; + int i, k; + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) { + return BCME_ERROR; + } + + ASSERT(cb != NULL); + + for (i = 0; i < pktp->cbcnt; i++) { + ASSERT(pktp->cbs[i].cb != NULL); + if ((cb == pktp->cbs[i].cb) && (arg == pktp->cbs[i].arg)) { + pktp->cbs[i].refcnt--; + if (pktp->cbs[i].refcnt) { + /* Still there are references to this callback */ + goto done; + } + /* Moving any more callbacks to fill the hole */ + for (k = i+1; k < pktp->cbcnt; i++, k++) { + pktp->cbs[i].cb = pktp->cbs[k].cb; + pktp->cbs[i].arg = pktp->cbs[k].arg; + pktp->cbs[i].refcnt = pktp->cbs[k].refcnt; + } + + /* reset the last callback */ + pktp->cbs[i].cb = NULL; + pktp->cbs[i].arg = NULL; + pktp->cbs[i].refcnt = 0; + + pktp->cbcnt--; + goto done; + } + } + +done: + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) { + return BCME_ERROR; + } + + return err; +} + +/** Registers callback functions */ +int +pktpool_empty_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg) +{ + int err = 0; + int i; + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + ASSERT(cb != NULL); + + i = pktp->ecbcnt; + if (i == PKTPOOL_CB_MAX) { + err = BCME_ERROR; + goto done; + } + + ASSERT(pktp->ecbs[i].cb == NULL); + pktp->ecbs[i].cb = cb; + pktp->ecbs[i].arg = arg; + pktp->ecbcnt++; + +done: + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + return err; +} + +/** Calls registered callback functions */ +static int +pktpool_empty_notify(pktpool_t *pktp) +{ + int i; + + pktp->empty = TRUE; + for (i = 0; i < pktp->ecbcnt; i++) { + ASSERT(pktp->ecbs[i].cb != NULL); + pktp->ecbs[i].cb(pktp, pktp->ecbs[i].arg); + } + pktp->empty = FALSE; + + return 0; +} + +#ifdef BCMDBG_POOL +int +pktpool_dbg_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg) +{ + int err = 0; + int i; + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + ASSERT(cb); + + i = pktp->dbg_cbcnt; + if (i == PKTPOOL_CB_MAX) { + err = BCME_ERROR; + goto done; + } + + ASSERT(pktp->dbg_cbs[i].cb == NULL); + pktp->dbg_cbs[i].cb = cb; + pktp->dbg_cbs[i].arg = arg; + pktp->dbg_cbcnt++; + +done: + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + return err; +} + +int pktpool_dbg_notify(pktpool_t *pktp); + +int +pktpool_dbg_notify(pktpool_t *pktp) +{ + int i; + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + for (i = 0; i < pktp->dbg_cbcnt; i++) { + ASSERT(pktp->dbg_cbs[i].cb); + pktp->dbg_cbs[i].cb(pktp, pktp->dbg_cbs[i].arg); + } + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + return 0; +} + +int +pktpool_dbg_dump(pktpool_t *pktp) +{ + int i; + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + printf("pool len=%d maxlen=%d\n", pktp->dbg_qlen, pktp->maxlen); + for (i = 0; i < pktp->dbg_qlen; i++) { + ASSERT(pktp->dbg_q[i].p); + printf("%d, p: 0x%x dur:%lu us state:%d\n", i, + pktp->dbg_q[i].p, pktp->dbg_q[i].dur/100, PKTPOOLSTATE(pktp->dbg_q[i].p)); + } + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + return 0; +} + +int +pktpool_stats_dump(pktpool_t *pktp, pktpool_stats_t *stats) +{ + int i; + int state; + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + bzero(stats, sizeof(pktpool_stats_t)); + for (i = 0; i < pktp->dbg_qlen; i++) { + ASSERT(pktp->dbg_q[i].p != NULL); + + state = PKTPOOLSTATE(pktp->dbg_q[i].p); + switch (state) { + case POOL_TXENQ: + stats->enq++; break; + case POOL_TXDH: + stats->txdh++; break; + case POOL_TXD11: + stats->txd11++; break; + case POOL_RXDH: + stats->rxdh++; break; + case POOL_RXD11: + stats->rxd11++; break; + case POOL_RXFILL: + stats->rxfill++; break; + case POOL_IDLE: + stats->idle++; break; + } + } + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + return 0; +} + +int +pktpool_start_trigger(pktpool_t *pktp, void *p) +{ + uint32 cycles, i; + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + if (!PKTPOOL(OSH_NULL, p)) + goto done; + + OSL_GETCYCLES(cycles); + + for (i = 0; i < pktp->dbg_qlen; i++) { + ASSERT(pktp->dbg_q[i].p != NULL); + + if (pktp->dbg_q[i].p == p) { + pktp->dbg_q[i].cycles = cycles; + break; + } + } + +done: + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + return 0; +} + +int pktpool_stop_trigger(pktpool_t *pktp, void *p); + +int +pktpool_stop_trigger(pktpool_t *pktp, void *p) +{ + uint32 cycles, i; + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + if (!PKTPOOL(OSH_NULL, p)) + goto done; + + OSL_GETCYCLES(cycles); + + for (i = 0; i < pktp->dbg_qlen; i++) { + ASSERT(pktp->dbg_q[i].p != NULL); + + if (pktp->dbg_q[i].p == p) { + if (pktp->dbg_q[i].cycles == 0) + break; + + if (cycles >= pktp->dbg_q[i].cycles) + pktp->dbg_q[i].dur = cycles - pktp->dbg_q[i].cycles; + else + pktp->dbg_q[i].dur = + (((uint32)-1) - pktp->dbg_q[i].cycles) + cycles + 1; + + pktp->dbg_q[i].cycles = 0; + break; + } + } + +done: + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + return 0; +} +#endif /* BCMDBG_POOL */ + +int +pktpool_avail_notify_normal(osl_t *osh, pktpool_t *pktp) +{ + BCM_REFERENCE(osh); + ASSERT(pktp); + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + pktp->availcb_excl = NULL; + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + return 0; +} + +int +pktpool_avail_notify_exclusive(osl_t *osh, pktpool_t *pktp, pktpool_cb_t cb) +{ + int i; + int err; + BCM_REFERENCE(osh); + + ASSERT(pktp); + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + ASSERT(pktp->availcb_excl == NULL); + for (i = 0; i < pktp->cbcnt; i++) { + if (cb == pktp->cbs[i].cb) { + pktp->availcb_excl = &pktp->cbs[i]; + break; + } + } + + if (pktp->availcb_excl == NULL) + err = BCME_ERROR; + else + err = 0; + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + return err; +} + +static void +pktpool_avail_notify(pktpool_t *pktp) +{ + int i, k, idx; + int avail; + + ASSERT(pktp); + if (pktp->availcb_excl != NULL) { + pktp->availcb_excl->cb(pktp, pktp->availcb_excl->arg); + return; + } + + k = pktp->cbcnt - 1; + for (i = 0; i < pktp->cbcnt; i++) { + avail = pktp->avail; + + if (avail) { + if (pktp->cbtoggle) + idx = i; + else + idx = k--; + + ASSERT(pktp->cbs[idx].cb != NULL); + pktp->cbs[idx].cb(pktp, pktp->cbs[idx].arg); + } + } + + /* Alternate between filling from head or tail + */ + pktp->cbtoggle ^= 1; + + return; +} + +/** Gets an empty packet from the caller provided pool */ +void * +pktpool_get(pktpool_t *pktp) +{ + void *p; + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return NULL; + + p = pktpool_deq(pktp); + + if (p == NULL) { + /* Notify and try to reclaim tx pkts */ + if (pktp->ecbcnt) + pktpool_empty_notify(pktp); + + p = pktpool_deq(pktp); + if (p == NULL) + goto done; + } + +done: + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) + return NULL; + + return p; +} + +void +pktpool_free(pktpool_t *pktp, void *p) +{ + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return; + + ASSERT(p != NULL); +#ifdef BCMDBG_POOL + /* pktpool_stop_trigger(pktp, p); */ +#endif // endif + + pktpool_enq(pktp, p); + + /** + * Feed critical DMA with freshly freed packets, to avoid DMA starvation. + * If any avail callback functions are registered, send a notification + * that a new packet is available in the pool. + */ + if (pktp->cbcnt) { + /* To more efficiently use the cpu cycles, callbacks can be temporarily disabled. + * This allows to feed on burst basis as opposed to inefficient per-packet basis. + */ + if (pktp->emptycb_disable == EMPTYCB_ENABLED) { + /** + * If the call originated from pktpool_empty_notify, the just freed packet + * is needed in pktpool_get. + * Therefore don't call pktpool_avail_notify. + */ + if (pktp->empty == FALSE) + pktpool_avail_notify(pktp); + } else { + /** + * The callback is temporarily disabled, log that a packet has been freed. + */ + pktp->emptycb_disable = EMPTYCB_SKIPPED; + } + } + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) + return; +} + +/** Adds a caller provided (empty) packet to the caller provided pool */ +int +pktpool_add(pktpool_t *pktp, void *p) +{ + int err = 0; + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + ASSERT(p != NULL); + + if (pktp->n_pkts == pktp->maxlen) { + err = BCME_RANGE; + goto done; + } + + /* pkts in pool have same length */ + ASSERT(pktp->max_pkt_bytes == PKTLEN(OSH_NULL, p)); + PKTSETPOOL(OSH_NULL, p, TRUE, pktp); + + pktp->n_pkts++; + pktpool_enq(pktp, p); + +#ifdef BCMDBG_POOL + pktp->dbg_q[pktp->dbg_qlen++].p = p; +#endif // endif + +done: + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + return err; +} + +/** + * Force pktpool_setmaxlen () into RAM as it uses a constant + * (PKTPOOL_LEN_MAX) that may be changed post tapeout for ROM-based chips. + */ +int +BCMRAMFN(pktpool_setmaxlen)(pktpool_t *pktp, uint16 maxlen) +{ + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + if (maxlen > PKTPOOL_LEN_MAX) + maxlen = PKTPOOL_LEN_MAX; + + /* if pool is already beyond maxlen, then just cap it + * since we currently do not reduce the pool len + * already allocated + */ + pktp->maxlen = (pktp->n_pkts > maxlen) ? pktp->n_pkts : maxlen; + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + return pktp->maxlen; +} + +void +pktpool_emptycb_disable(pktpool_t *pktp, bool disable) +{ + ASSERT(pktp); + + /** + * To more efficiently use the cpu cycles, callbacks can be temporarily disabled. + * If callback is going to be re-enabled, check if any packet got + * freed and added back to the pool while callback was disabled. + * When this is the case do the callback now, provided that callback functions + * are registered and this call did not originate from pktpool_empty_notify. + */ + if ((!disable) && (pktp->cbcnt) && (pktp->empty == FALSE) && + (pktp->emptycb_disable == EMPTYCB_SKIPPED)) { + pktpool_avail_notify(pktp); + } + + /* Enable or temporarily disable callback when packet becomes available. */ + pktp->emptycb_disable = disable ? EMPTYCB_DISABLED : EMPTYCB_ENABLED; +} + +bool +pktpool_emptycb_disabled(pktpool_t *pktp) +{ + ASSERT(pktp); + return pktp->emptycb_disable != EMPTYCB_ENABLED; +} + +#ifdef BCMPKTPOOL +#include + +pktpool_t *pktpool_shared = NULL; + +#ifdef BCMFRAGPOOL +pktpool_t *pktpool_shared_lfrag = NULL; +#ifdef BCMRESVFRAGPOOL +pktpool_t *pktpool_resv_lfrag = NULL; +struct resv_info *resv_pool_info = NULL; +#endif /* BCMRESVFRAGPOOL */ +#endif /* BCMFRAGPOOL */ + +pktpool_t *pktpool_shared_rxlfrag = NULL; + +static osl_t *pktpool_osh = NULL; + +/** + * Initializes several packet pools and allocates packets within those pools. + */ +int +hnd_pktpool_init(osl_t *osh) +{ + int err = BCME_OK; + int n; + + /* Construct a packet pool registry before initializing packet pools */ + n = pktpool_attach(osh, PKTPOOL_MAXIMUM_ID); + if (n != PKTPOOL_MAXIMUM_ID) { + ASSERT(0); + err = BCME_ERROR; + goto error0; + } + + pktpool_shared = MALLOCZ(osh, sizeof(pktpool_t)); + if (pktpool_shared == NULL) { + ASSERT(0); + err = BCME_NOMEM; + goto error1; + } + +#if defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED) + pktpool_shared_lfrag = MALLOCZ(osh, sizeof(pktpool_t)); + if (pktpool_shared_lfrag == NULL) { + ASSERT(0); + err = BCME_NOMEM; + goto error2; + } +#if defined(BCMRESVFRAGPOOL) && !defined(BCMRESVFRAGPOOL_DISABLED) + resv_pool_info = hnd_resv_pool_alloc(osh); + if (resv_pool_info == NULL) { + ASSERT(0); + goto error2; + } + pktpool_resv_lfrag = resv_pool_info->pktp; + if (pktpool_resv_lfrag == NULL) { + ASSERT(0); + goto error2; + } +#endif /* RESVFRAGPOOL */ +#endif /* FRAGPOOL */ + +#if defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED) + pktpool_shared_rxlfrag = MALLOCZ(osh, sizeof(pktpool_t)); + if (pktpool_shared_rxlfrag == NULL) { + ASSERT(0); + err = BCME_NOMEM; + goto error3; + } +#endif // endif + + /* + * At this early stage, there's not enough memory to allocate all + * requested pkts in the shared pool. Need to add to the pool + * after reclaim + * + * n = NRXBUFPOST + SDPCMD_RXBUFS; + * + * Initialization of packet pools may fail (BCME_ERROR), if the packet pool + * registry is not initialized or the registry is depleted. + * + * A BCME_NOMEM error only indicates that the requested number of packets + * were not filled into the pool. + */ + n = 1; + MALLOC_SET_NOPERSIST(osh); /* Ensure subsequent allocations are non-persist */ + if ((err = pktpool_init(osh, pktpool_shared, + &n, PKTBUFSZ, FALSE, lbuf_basic)) != BCME_OK) { + ASSERT(0); + goto error4; + } + pktpool_setmaxlen(pktpool_shared, SHARED_POOL_LEN); + +#if defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED) + n = 1; + if ((err = pktpool_init(osh, pktpool_shared_lfrag, + &n, PKTFRAGSZ, TRUE, lbuf_frag)) != BCME_OK) { + ASSERT(0); + goto error5; + } + pktpool_setmaxlen(pktpool_shared_lfrag, SHARED_FRAG_POOL_LEN); +#if defined(BCMRESVFRAGPOOL) && !defined(BCMRESVFRAGPOOL_DISABLED) + n = 0; /* IMPORTANT: DO NOT allocate any packets in resv pool */ + if (pktpool_init(osh, pktpool_resv_lfrag, + &n, PKTFRAGSZ, TRUE, lbuf_frag) == BCME_ERROR) { + ASSERT(0); + goto error5; + } + pktpool_setmaxlen(pktpool_resv_lfrag, RESV_FRAG_POOL_LEN); +#endif /* RESVFRAGPOOL */ +#endif /* BCMFRAGPOOL */ +#if defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED) + n = 1; + if ((err = pktpool_init(osh, pktpool_shared_rxlfrag, + &n, PKTRXFRAGSZ, TRUE, lbuf_rxfrag)) != BCME_OK) { + ASSERT(0); + goto error6; + } + pktpool_setmaxlen(pktpool_shared_rxlfrag, SHARED_RXFRAG_POOL_LEN); +#endif // endif + +#if defined(BCMFRWDPOOLREORG) && !defined(BCMFRWDPOOLREORG_DISABLED) + /* Attach poolreorg module */ + if ((frwd_poolreorg_info = poolreorg_attach(osh, +#if defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED) + pktpool_shared_lfrag, +#else + NULL, +#endif // endif +#if defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED) + pktpool_shared_rxlfrag, +#else + NULL, +#endif // endif + pktpool_shared)) == NULL) { + ASSERT(0); + goto error7; + } +#endif /* defined(BCMFRWDPOOLREORG) && !defined(BCMFRWDPOOLREORG_DISABLED) */ + + pktpool_osh = osh; + MALLOC_CLEAR_NOPERSIST(osh); + + return BCME_OK; + +#if defined(BCMFRWDPOOLREORG) && !defined(BCMFRWDPOOLREORG_DISABLED) + /* detach poolreorg module */ + poolreorg_detach(frwd_poolreorg_info); +error7: +#endif /* defined(BCMFRWDPOOLREORG) && !defined(BCMFRWDPOOLREORG_DISABLED) */ + +#if defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED) + pktpool_deinit(osh, pktpool_shared_rxlfrag); +error6: +#endif // endif + +#if defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED) + pktpool_deinit(osh, pktpool_shared_lfrag); +error5: +#endif // endif + +#if (defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED)) || \ + (defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED)) + pktpool_deinit(osh, pktpool_shared); +#endif // endif + +error4: +#if defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED) + hnd_free(pktpool_shared_rxlfrag); + pktpool_shared_rxlfrag = (pktpool_t *)NULL; +error3: +#endif /* BCMRXFRAGPOOL */ + +#if defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED) + hnd_free(pktpool_shared_lfrag); + pktpool_shared_lfrag = (pktpool_t *)NULL; +error2: +#endif /* BCMFRAGPOOL */ + + hnd_free(pktpool_shared); + pktpool_shared = (pktpool_t *)NULL; + +error1: + pktpool_dettach(osh); +error0: + MALLOC_CLEAR_NOPERSIST(osh); + return err; +} /* hnd_pktpool_init */ + +/** is called at each 'wl up' */ +int +hnd_pktpool_fill(pktpool_t *pktpool, bool minimal) +{ + return (pktpool_fill(pktpool_osh, pktpool, minimal)); +} + +/** refills pktpools after reclaim, is called once */ +void +hnd_pktpool_refill(bool minimal) +{ + if (POOL_ENAB(pktpool_shared)) { +#if defined(SRMEM) + if (SRMEM_ENAB()) { + int maxlen = pktpool_max_pkts(pktpool_shared); + int n_pkts = pktpool_tot_pkts(pktpool_shared); + + for (; n_pkts < maxlen; n_pkts++) { + void *p; + if ((p = PKTSRGET(pktpool_max_pkt_bytes(pktpool_shared))) == NULL) + break; + pktpool_add(pktpool_shared, p); + } + } +#endif /* SRMEM */ + pktpool_fill(pktpool_osh, pktpool_shared, minimal); + } +/* fragpool reclaim */ +#ifdef BCMFRAGPOOL + if (POOL_ENAB(pktpool_shared_lfrag)) { + pktpool_fill(pktpool_osh, pktpool_shared_lfrag, minimal); + } +#endif /* BCMFRAGPOOL */ +/* rx fragpool reclaim */ +#ifdef BCMRXFRAGPOOL + if (POOL_ENAB(pktpool_shared_rxlfrag)) { + pktpool_fill(pktpool_osh, pktpool_shared_rxlfrag, minimal); + } +#endif // endif +#if defined(BCMFRAGPOOL) && defined(BCMRESVFRAGPOOL) + if (POOL_ENAB(pktpool_resv_lfrag)) { + int resv_size = (PKTFRAGSZ + LBUFFRAGSZ)*RESV_FRAG_POOL_LEN; + hnd_resv_pool_init(resv_pool_info, resv_size); + hnd_resv_pool_enable(resv_pool_info); + } +#endif /* BCMRESVFRAGPOOL */ +} +#endif /* BCMPKTPOOL */ diff --git a/bcmdhd.100.10.315.x/hnd_pktq.c b/bcmdhd.100.10.315.x/hnd_pktq.c new file mode 100644 index 0000000..dabeff0 --- /dev/null +++ b/bcmdhd.100.10.315.x/hnd_pktq.c @@ -0,0 +1,1428 @@ +/* + * HND generic pktq operation primitives + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: hnd_pktq.c 698847 2017-05-11 00:10:48Z $ + */ + +#include +#include +#include +#include +#include + +/* mutex macros for thread safe */ +#ifdef HND_PKTQ_THREAD_SAFE +#define HND_PKTQ_MUTEX_CREATE(name, mutex) osl_ext_mutex_create(name, mutex) +#define HND_PKTQ_MUTEX_DELETE(mutex) osl_ext_mutex_delete(mutex) +#define HND_PKTQ_MUTEX_ACQUIRE(mutex, msec) osl_ext_mutex_acquire(mutex, msec) +#define HND_PKTQ_MUTEX_RELEASE(mutex) osl_ext_mutex_release(mutex) +#else +#define HND_PKTQ_MUTEX_CREATE(name, mutex) OSL_EXT_SUCCESS +#define HND_PKTQ_MUTEX_DELETE(mutex) OSL_EXT_SUCCESS +#define HND_PKTQ_MUTEX_ACQUIRE(mutex, msec) OSL_EXT_SUCCESS +#define HND_PKTQ_MUTEX_RELEASE(mutex) OSL_EXT_SUCCESS +#endif /* HND_PKTQ_THREAD_SAFE */ + +/* status during txfifo sync */ +#if defined(WLAMPDU_MAC) && defined(PROP_TXSTATUS) +#define TXQ_PKT_DEL 0x01 +#define HEAD_PKT_FLUSHED 0xFF +#endif /* defined(WLAMPDU_MAC) && defined(PROP_TXSTATUS) */ +/* + * osl multiple-precedence packet queue + * hi_prec is always >= the number of the highest non-empty precedence + */ +void * BCMFASTPATH +pktq_penq(struct pktq *pq, int prec, void *p) +{ + struct pktq_prec *q; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return NULL; + + ASSERT(prec >= 0 && prec < pq->num_prec); + ASSERT(PKTLINK(p) == NULL); /* queueing chains not allowed */ + + ASSERT(!pktq_full(pq)); + ASSERT(!pktqprec_full(pq, prec)); + + q = &pq->q[prec]; + + if (q->head) + PKTSETLINK(q->tail, p); + else + q->head = p; + + q->tail = p; + q->n_pkts++; + + pq->n_pkts_tot++; + + if (pq->hi_prec < prec) + pq->hi_prec = (uint8)prec; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return NULL; + + return p; +} + +/* + * osl simple, non-priority packet queue + */ +void * BCMFASTPATH +spktq_enq(struct spktq *spq, void *p) +{ + struct pktq_prec *q; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&spq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return NULL; + + ASSERT(!spktq_full(spq)); + + PKTSETLINK(p, NULL); + + q = &spq->q; + + if (q->head) + PKTSETLINK(q->tail, p); + else + q->head = p; + + q->tail = p; + q->n_pkts++; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&spq->mutex) != OSL_EXT_SUCCESS) + return NULL; + + return p; +} + +void * BCMFASTPATH +pktq_penq_head(struct pktq *pq, int prec, void *p) +{ + struct pktq_prec *q; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return NULL; + + ASSERT(prec >= 0 && prec < pq->num_prec); + ASSERT(PKTLINK(p) == NULL); /* queueing chains not allowed */ + + ASSERT(!pktq_full(pq)); + ASSERT(!pktqprec_full(pq, prec)); + + q = &pq->q[prec]; + + if (q->head == NULL) + q->tail = p; + + PKTSETLINK(p, q->head); + q->head = p; + q->n_pkts++; + + pq->n_pkts_tot++; + + if (pq->hi_prec < prec) + pq->hi_prec = (uint8)prec; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return NULL; + + return p; +} + +void * BCMFASTPATH +spktq_enq_head(struct spktq *spq, void *p) +{ + struct pktq_prec *q; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&spq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return NULL; + + ASSERT(!spktq_full(spq)); + + PKTSETLINK(p, NULL); + + q = &spq->q; + + if (q->head == NULL) + q->tail = p; + + PKTSETLINK(p, q->head); + q->head = p; + q->n_pkts++; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&spq->mutex) != OSL_EXT_SUCCESS) + return NULL; + + return p; +} + +void * BCMFASTPATH +pktq_pdeq(struct pktq *pq, int prec) +{ + struct pktq_prec *q; + void *p; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return NULL; + + ASSERT(prec >= 0 && prec < pq->num_prec); + + q = &pq->q[prec]; + + if ((p = q->head) == NULL) + goto done; + + if ((q->head = PKTLINK(p)) == NULL) + q->tail = NULL; + + q->n_pkts--; + + pq->n_pkts_tot--; + +#ifdef WL_TXQ_STALL + q->dequeue_count++; +#endif // endif + + PKTSETLINK(p, NULL); + +done: + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return NULL; + + return p; +} + +void * BCMFASTPATH +spktq_deq(struct spktq *spq) +{ + struct pktq_prec *q; + void *p; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&spq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return NULL; + + q = &spq->q; + + if ((p = q->head) == NULL) + goto done; + + if ((q->head = PKTLINK(p)) == NULL) + q->tail = NULL; + + q->n_pkts--; + +#ifdef WL_TXQ_STALL + q->dequeue_count++; +#endif // endif + + PKTSETLINK(p, NULL); + +done: + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&spq->mutex) != OSL_EXT_SUCCESS) + return NULL; + + return p; +} + +void * BCMFASTPATH +pktq_pdeq_tail(struct pktq *pq, int prec) +{ + struct pktq_prec *q; + void *p, *prev; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return NULL; + + ASSERT(prec >= 0 && prec < pq->num_prec); + + q = &pq->q[prec]; + + if ((p = q->head) == NULL) + goto done; + + for (prev = NULL; p != q->tail; p = PKTLINK(p)) + prev = p; + + if (prev) + PKTSETLINK(prev, NULL); + else + q->head = NULL; + + q->tail = prev; + q->n_pkts--; + + pq->n_pkts_tot--; + +#ifdef WL_TXQ_STALL + q->dequeue_count++; +#endif // endif +done: + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return NULL; + + return p; +} + +void * BCMFASTPATH +spktq_deq_tail(struct spktq *spq) +{ + struct pktq_prec *q; + void *p, *prev; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&spq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return NULL; + + q = &spq->q; + + if ((p = q->head) == NULL) + goto done; + + for (prev = NULL; p != q->tail; p = PKTLINK(p)) + prev = p; + + if (prev) + PKTSETLINK(prev, NULL); + else + q->head = NULL; + + q->tail = prev; + q->n_pkts--; + +#ifdef WL_TXQ_STALL + q->dequeue_count++; +#endif // endif +done: + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&spq->mutex) != OSL_EXT_SUCCESS) + return NULL; + + return p; +} + +void * +pktq_peek_tail(struct pktq *pq, int *prec_out) +{ + int prec; + void *p = NULL; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return NULL; + + if (pq->n_pkts_tot == 0) + goto done; + + for (prec = 0; prec < pq->hi_prec; prec++) + if (pq->q[prec].head) + break; + + if (prec_out) + *prec_out = prec; + + p = pq->q[prec].tail; + +done: + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return NULL; + + return p; +} + +/* + * Append spktq 'list' to the tail of pktq 'pq' + */ +void BCMFASTPATH +pktq_append(struct pktq *pq, int prec, struct spktq *list) +{ + struct pktq_prec *q; + struct pktq_prec *list_q; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return; + + list_q = &list->q; + + /* empty list check */ + if (list_q->head == NULL) + goto done; + + ASSERT(prec >= 0 && prec < pq->num_prec); + ASSERT(PKTLINK(list_q->tail) == NULL); /* terminated list */ + + ASSERT(!pktq_full(pq)); + ASSERT(!pktqprec_full(pq, prec)); + + q = &pq->q[prec]; + + if (q->head) + PKTSETLINK(q->tail, list_q->head); + else + q->head = list_q->head; + + q->tail = list_q->tail; + q->n_pkts += list_q->n_pkts; + pq->n_pkts_tot += list_q->n_pkts; + + if (pq->hi_prec < prec) + pq->hi_prec = (uint8)prec; + +#ifdef WL_TXQ_STALL + list_q->dequeue_count += list_q->n_pkts; +#endif // endif + + list_q->head = NULL; + list_q->tail = NULL; + list_q->n_pkts = 0; + +done: + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return; +} + +/* + * Append spktq 'list' to the tail of spktq 'spq' + */ +void BCMFASTPATH +spktq_append(struct spktq *spq, struct spktq *list) +{ + struct pktq_prec *q; + struct pktq_prec *list_q; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&spq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return; + + list_q = &list->q; + + /* empty list check */ + if (list_q->head == NULL) + goto done; + + ASSERT(PKTLINK(list_q->tail) == NULL); /* terminated list */ + + ASSERT(!spktq_full(spq)); + + q = &spq->q; + + if (q->head) + PKTSETLINK(q->tail, list_q->head); + else + q->head = list_q->head; + + q->tail = list_q->tail; + q->n_pkts += list_q->n_pkts; + +#ifdef WL_TXQ_STALL + list_q->dequeue_count += list_q->n_pkts; +#endif // endif + + list_q->head = NULL; + list_q->tail = NULL; + list_q->n_pkts = 0; + +done: + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&spq->mutex) != OSL_EXT_SUCCESS) + return; +} + +/* + * Prepend spktq 'list' to the head of pktq 'pq' + */ +void BCMFASTPATH +pktq_prepend(struct pktq *pq, int prec, struct spktq *list) +{ + struct pktq_prec *q; + struct pktq_prec *list_q; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return; + + list_q = &list->q; + + /* empty list check */ + if (list_q->head == NULL) + goto done; + + ASSERT(prec >= 0 && prec < pq->num_prec); + ASSERT(PKTLINK(list_q->tail) == NULL); /* terminated list */ + + ASSERT(!pktq_full(pq)); + ASSERT(!pktqprec_full(pq, prec)); + + q = &pq->q[prec]; + + /* set the tail packet of list to point at the former pq head */ + PKTSETLINK(list_q->tail, q->head); + /* the new q head is the head of list */ + q->head = list_q->head; + + /* If the q tail was non-null, then it stays as is. + * If the q tail was null, it is now the tail of list + */ + if (q->tail == NULL) { + q->tail = list_q->tail; + } + + q->n_pkts += list_q->n_pkts; + pq->n_pkts_tot += list_q->n_pkts; + + if (pq->hi_prec < prec) + pq->hi_prec = (uint8)prec; + +#ifdef WL_TXQ_STALL + list_q->dequeue_count += list_q->n_pkts; +#endif // endif + + list_q->head = NULL; + list_q->tail = NULL; + list_q->n_pkts = 0; + +done: + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return; +} + +/* + * Prepend spktq 'list' to the head of spktq 'spq' + */ +void BCMFASTPATH +spktq_prepend(struct spktq *spq, struct spktq *list) +{ + struct pktq_prec *q; + struct pktq_prec *list_q; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&spq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return; + + list_q = &list->q; + + /* empty list check */ + if (list_q->head == NULL) + goto done; + + ASSERT(PKTLINK(list_q->tail) == NULL); /* terminated list */ + + ASSERT(!spktq_full(spq)); + + q = &spq->q; + + /* set the tail packet of list to point at the former pq head */ + PKTSETLINK(list_q->tail, q->head); + /* the new q head is the head of list */ + q->head = list_q->head; + + /* If the q tail was non-null, then it stays as is. + * If the q tail was null, it is now the tail of list + */ + if (q->tail == NULL) { + q->tail = list_q->tail; + } + + q->n_pkts += list_q->n_pkts; + +#ifdef WL_TXQ_STALL + list_q->dequeue_count += list_q->n_pkts; +#endif // endif + + list_q->head = NULL; + list_q->tail = NULL; + list_q->n_pkts = 0; + +done: + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&spq->mutex) != OSL_EXT_SUCCESS) + return; +} + +void * BCMFASTPATH +pktq_pdeq_prev(struct pktq *pq, int prec, void *prev_p) +{ + struct pktq_prec *q; + void *p = NULL; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return NULL; + + ASSERT(prec >= 0 && prec < pq->num_prec); + + q = &pq->q[prec]; + + if (prev_p == NULL) + goto done; + + if ((p = PKTLINK(prev_p)) == NULL) + goto done; + + q->n_pkts--; + + pq->n_pkts_tot--; + +#ifdef WL_TXQ_STALL + q->dequeue_count++; +#endif // endif + PKTSETLINK(prev_p, PKTLINK(p)); + PKTSETLINK(p, NULL); + +done: + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return NULL; + + return p; +} + +void * BCMFASTPATH +pktq_pdeq_with_fn(struct pktq *pq, int prec, ifpkt_cb_t fn, int arg) +{ + struct pktq_prec *q; + void *p, *prev = NULL; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return NULL; + + ASSERT(prec >= 0 && prec < pq->num_prec); + + q = &pq->q[prec]; + p = q->head; + + while (p) { + if (fn == NULL || (*fn)(p, arg)) { + break; + } else { + prev = p; + p = PKTLINK(p); + } + } + if (p == NULL) + goto done; + + if (prev == NULL) { + if ((q->head = PKTLINK(p)) == NULL) { + q->tail = NULL; + } + } else { + PKTSETLINK(prev, PKTLINK(p)); + if (q->tail == p) { + q->tail = prev; + } + } + + q->n_pkts--; + + pq->n_pkts_tot--; + +#ifdef WL_TXQ_STALL + q->dequeue_count++; +#endif // endif + PKTSETLINK(p, NULL); + +done: + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return NULL; + + return p; +} + +bool BCMFASTPATH +pktq_pdel(struct pktq *pq, void *pktbuf, int prec) +{ + bool ret = FALSE; + struct pktq_prec *q; + void *p = NULL; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return FALSE; + + ASSERT(prec >= 0 && prec < pq->num_prec); + + /* Should this just assert pktbuf? */ + if (!pktbuf) + goto done; + + q = &pq->q[prec]; + + if (q->head == pktbuf) { + if ((q->head = PKTLINK(pktbuf)) == NULL) + q->tail = NULL; + } else { + for (p = q->head; p && PKTLINK(p) != pktbuf; p = PKTLINK(p)) + ; + if (p == NULL) + goto done; + + PKTSETLINK(p, PKTLINK(pktbuf)); + if (q->tail == pktbuf) + q->tail = p; + } + + q->n_pkts--; + pq->n_pkts_tot--; + +#ifdef WL_TXQ_STALL + q->dequeue_count++; +#endif // endif + + PKTSETLINK(pktbuf, NULL); + ret = TRUE; + +done: + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return FALSE; + + return ret; +} + +static void +_pktq_pfilter(struct pktq *pq, int prec, pktq_filter_t fltr, void* fltr_ctx, + defer_free_pkt_fn_t defer, void *defer_ctx) +{ + struct pktq_prec wq; + struct pktq_prec *q; + void *p; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return; + + /* move the prec queue aside to a work queue */ + q = &pq->q[prec]; + + wq = *q; + + q->head = NULL; + q->tail = NULL; + q->n_pkts = 0; + +#ifdef WL_TXQ_STALL + q->dequeue_count += wq.n_pkts; +#endif // endif + + pq->n_pkts_tot -= wq.n_pkts; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return; + + /* start with the head of the work queue */ + while ((p = wq.head) != NULL) { + /* unlink the current packet from the list */ + wq.head = PKTLINK(p); + PKTSETLINK(p, NULL); + wq.n_pkts--; + +#ifdef WL_TXQ_STALL + wq.dequeue_count++; +#endif // endif + + /* call the filter function on current packet */ + ASSERT(fltr != NULL); + switch ((*fltr)(fltr_ctx, p)) { + case PKT_FILTER_NOACTION: + /* put this packet back */ + pktq_penq(pq, prec, p); + break; + + case PKT_FILTER_DELETE: + /* delete this packet */ + ASSERT(defer != NULL); + (*defer)(defer_ctx, p); + break; + + case PKT_FILTER_REMOVE: + /* pkt already removed from list */ + break; + + default: + ASSERT(0); + break; + } + } + + ASSERT(wq.n_pkts == 0); +} + +void +pktq_pfilter(struct pktq *pq, int prec, pktq_filter_t fltr, void* fltr_ctx, + defer_free_pkt_fn_t defer, void *defer_ctx, flush_free_pkt_fn_t flush, void *flush_ctx) +{ + _pktq_pfilter(pq, prec, fltr, fltr_ctx, defer, defer_ctx); + + ASSERT(flush != NULL); + (*flush)(flush_ctx); +} + +void +pktq_filter(struct pktq *pq, pktq_filter_t fltr, void* fltr_ctx, + defer_free_pkt_fn_t defer, void *defer_ctx, flush_free_pkt_fn_t flush, void *flush_ctx) +{ + bool filter = FALSE; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return; + + /* Optimize if pktq n_pkts = 0, just return. + * pktq len of 0 means pktq's prec q's are all empty. + */ + if (pq->n_pkts_tot > 0) { + filter = TRUE; + } + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return; + + if (filter) { + int prec; + + PKTQ_PREC_ITER(pq, prec) { + _pktq_pfilter(pq, prec, fltr, fltr_ctx, defer, defer_ctx); + } + + ASSERT(flush != NULL); + (*flush)(flush_ctx); + } +} + +void +spktq_filter(struct spktq *spq, pktq_filter_t fltr, void* fltr_ctx, + defer_free_pkt_fn_t defer, void *defer_ctx, flush_free_pkt_fn_t flush, void *flush_ctx) +{ + struct pktq_prec wq; + struct pktq_prec *q; + void *p = NULL; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&spq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return; + + q = &spq->q; + + /* Optimize if pktq_prec n_pkts = 0, just return. */ + if (q->n_pkts == 0) { + (void)HND_PKTQ_MUTEX_RELEASE(&spq->mutex); + return; + } + + wq = *q; + + q->head = NULL; + q->tail = NULL; + q->n_pkts = 0; + +#ifdef WL_TXQ_STALL + q->dequeue_count += wq.n_pkts; +#endif // endif + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&spq->mutex) != OSL_EXT_SUCCESS) + return; + + /* start with the head of the work queue */ + + while ((p = wq.head) != NULL) { + /* unlink the current packet from the list */ + wq.head = PKTLINK(p); + PKTSETLINK(p, NULL); + wq.n_pkts--; + +#ifdef WL_TXQ_STALL + wq.dequeue_count++; +#endif // endif + + /* call the filter function on current packet */ + ASSERT(fltr != NULL); + switch ((*fltr)(fltr_ctx, p)) { + case PKT_FILTER_NOACTION: + /* put this packet back */ + spktq_enq(spq, p); + break; + + case PKT_FILTER_DELETE: + /* delete this packet */ + ASSERT(defer != NULL); + (*defer)(defer_ctx, p); + break; + + case PKT_FILTER_REMOVE: + /* pkt already removed from list */ + break; + + default: + ASSERT(0); + break; + } + } + + ASSERT(wq.n_pkts == 0); + + ASSERT(flush != NULL); + (*flush)(flush_ctx); +} + +bool +pktq_init(struct pktq *pq, int num_prec, int max_pkts) +{ + int prec; + + ASSERT(num_prec > 0 && num_prec <= PKTQ_MAX_PREC); + + /* pq is variable size; only zero out what's requested */ + bzero(pq, OFFSETOF(struct pktq, q) + (sizeof(struct pktq_prec) * num_prec)); + + if (HND_PKTQ_MUTEX_CREATE("pktq", &pq->mutex) != OSL_EXT_SUCCESS) + return FALSE; + + pq->num_prec = (uint16)num_prec; + + pq->max_pkts = (uint16)max_pkts; + + for (prec = 0; prec < num_prec; prec++) + pq->q[prec].max_pkts = pq->max_pkts; + + return TRUE; +} + +bool +spktq_init(struct spktq *spq, int max_pkts) +{ + bzero(spq, sizeof(struct spktq)); + + if (HND_PKTQ_MUTEX_CREATE("spktq", &spq->mutex) != OSL_EXT_SUCCESS) + return FALSE; + + spq->q.max_pkts = (uint16)max_pkts; + + return TRUE; +} + +bool +pktq_deinit(struct pktq *pq) +{ + BCM_REFERENCE(pq); + if (HND_PKTQ_MUTEX_DELETE(&pq->mutex) != OSL_EXT_SUCCESS) + return FALSE; + + return TRUE; +} + +bool +spktq_deinit(struct spktq *spq) +{ + BCM_REFERENCE(spq); + if (HND_PKTQ_MUTEX_DELETE(&spq->mutex) != OSL_EXT_SUCCESS) + return FALSE; + + return TRUE; +} + +void +pktq_set_max_plen(struct pktq *pq, int prec, int max_pkts) +{ + ASSERT(prec >= 0 && prec < pq->num_prec); + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return; + + if (prec < pq->num_prec) + pq->q[prec].max_pkts = (uint16)max_pkts; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return; +} + +void * BCMFASTPATH +pktq_deq(struct pktq *pq, int *prec_out) +{ + struct pktq_prec *q; + void *p = NULL; + int prec; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return NULL; + + if (pq->n_pkts_tot == 0) + goto done; + + while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL) + pq->hi_prec--; + + q = &pq->q[prec]; + + if ((p = q->head) == NULL) + goto done; + + if ((q->head = PKTLINK(p)) == NULL) + q->tail = NULL; + + q->n_pkts--; + + pq->n_pkts_tot--; + +#ifdef WL_TXQ_STALL + q->dequeue_count++; +#endif // endif + + if (prec_out) + *prec_out = prec; + + PKTSETLINK(p, NULL); + +done: + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return NULL; + + return p; +} + +void * BCMFASTPATH +pktq_deq_tail(struct pktq *pq, int *prec_out) +{ + struct pktq_prec *q; + void *p = NULL, *prev; + int prec; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return NULL; + + if (pq->n_pkts_tot == 0) + goto done; + + for (prec = 0; prec < pq->hi_prec; prec++) + if (pq->q[prec].head) + break; + + q = &pq->q[prec]; + + if ((p = q->head) == NULL) + goto done; + + for (prev = NULL; p != q->tail; p = PKTLINK(p)) + prev = p; + + if (prev) + PKTSETLINK(prev, NULL); + else + q->head = NULL; + + q->tail = prev; + q->n_pkts--; + + pq->n_pkts_tot--; + +#ifdef WL_TXQ_STALL + q->dequeue_count++; +#endif // endif + + if (prec_out) + *prec_out = prec; + + PKTSETLINK(p, NULL); + +done: + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return NULL; + + return p; +} + +void * +pktq_peek(struct pktq *pq, int *prec_out) +{ + int prec; + void *p = NULL; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return NULL; + + if (pq->n_pkts_tot == 0) + goto done; + + while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL) + pq->hi_prec--; + + if (prec_out) + *prec_out = prec; + + p = pq->q[prec].head; + +done: + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return NULL; + + return p; +} + +void * +spktq_peek(struct spktq *spq) +{ + void *p = NULL; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&spq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return NULL; + + if (spq->q.n_pkts == 0) + goto done; + + p = spq->q.head; + +done: + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&spq->mutex) != OSL_EXT_SUCCESS) + return NULL; + + return p; +} + +void +pktq_pflush(osl_t *osh, struct pktq *pq, int prec, bool dir) +{ + void *p; + + /* no need for a mutex protection! */ + + /* start with the head of the list */ + while ((p = pktq_pdeq(pq, prec)) != NULL) { + + /* delete this packet */ + PKTFREE(osh, p, dir); + } +} + +void +spktq_flush(osl_t *osh, struct spktq *spq, bool dir) +{ + void *p; + + /* no need for a mutex protection! */ + + /* start with the head of the list */ + while ((p = spktq_deq(spq)) != NULL) { + + /* delete this packet */ + PKTFREE(osh, p, dir); + } +} + +void +pktq_flush(osl_t *osh, struct pktq *pq, bool dir) +{ + bool flush = FALSE; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return; + + /* Optimize flush, if pktq n_pkts_tot = 0, just return. + * pktq len of 0 means pktq's prec q's are all empty. + */ + if (pq->n_pkts_tot > 0) { + flush = TRUE; + } + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return; + + if (flush) { + int prec; + + PKTQ_PREC_ITER(pq, prec) { + pktq_pflush(osh, pq, prec, dir); + } + } +} + +/* Return sum of lengths of a specific set of precedences */ +int +pktq_mlen(struct pktq *pq, uint prec_bmp) +{ + int prec, len; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return 0; + + len = 0; + + for (prec = 0; prec <= pq->hi_prec; prec++) + if (prec_bmp & (1 << prec)) + len += pq->q[prec].n_pkts; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return 0; + + return len; +} + +/* Priority peek from a specific set of precedences */ +void * BCMFASTPATH +pktq_mpeek(struct pktq *pq, uint prec_bmp, int *prec_out) +{ + struct pktq_prec *q; + void *p = NULL; + int prec; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return NULL; + + if (pq->n_pkts_tot == 0) + goto done; + + while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL) + pq->hi_prec--; + + while ((prec_bmp & (1 << prec)) == 0 || pq->q[prec].head == NULL) + if (prec-- == 0) + goto done; + + q = &pq->q[prec]; + + if ((p = q->head) == NULL) + goto done; + + if (prec_out) + *prec_out = prec; + +done: + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return NULL; + + return p; +} +/* Priority dequeue from a specific set of precedences */ +void * BCMFASTPATH +pktq_mdeq(struct pktq *pq, uint prec_bmp, int *prec_out) +{ + struct pktq_prec *q; + void *p = NULL; + int prec; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return NULL; + + if (pq->n_pkts_tot == 0) + goto done; + + while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL) + pq->hi_prec--; + + while ((pq->q[prec].head == NULL) || ((prec_bmp & (1 << prec)) == 0)) + if (prec-- == 0) + goto done; + + q = &pq->q[prec]; + + if ((p = q->head) == NULL) + goto done; + + if ((q->head = PKTLINK(p)) == NULL) + q->tail = NULL; + + q->n_pkts--; + + // terence 20150308: fix for non-null pointer of skb->prev sent from ndo_start_xmit + if (q->n_pkts == 0) { + q->head = NULL; + q->tail = NULL; + } + +#ifdef WL_TXQ_STALL + q->dequeue_count++; +#endif // endif + + if (prec_out) + *prec_out = prec; + + pq->n_pkts_tot--; + + PKTSETLINK(p, NULL); + +done: + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return NULL; + + return p; +} + +#ifdef HND_PKTQ_THREAD_SAFE +int +pktqprec_avail_pkts(struct pktq *pq, int prec) +{ + int ret; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return 0; + + ASSERT(prec >= 0 && prec < pq->num_prec); + + ret = pq->q[prec].max_pkts - pq->q[prec].n_pkts; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return 0; + + return ret; +} + +bool +pktqprec_full(struct pktq *pq, int prec) +{ + bool ret; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return FALSE; + + ASSERT(prec >= 0 && prec < pq->num_prec); + + ret = pq->q[prec].n_pkts >= pq->q[prec].max_pkts; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return FALSE; + + return ret; +} + +int +pktq_avail(struct pktq *pq) +{ + int ret; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return 0; + + ret = pq->max_pkts - pq->n_pkts_tot; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return 0; + + return ret; +} + +int +spktq_avail(struct spktq *spq) +{ + int ret; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&spq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return 0; + + ret = spq->q.max_pkts - spq->q.n_pkts; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&spq->mutex) != OSL_EXT_SUCCESS) + return 0; + + return ret; +} + +bool +pktq_full(struct pktq *pq) +{ + bool ret; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return FALSE; + + ret = pq->n_pkts_tot >= pq->max_pkts; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return FALSE; + + return ret; +} + +bool +spktq_full(struct spktq *spq) +{ + bool ret; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&spq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return FALSE; + + ret = spq->q.n_pkts >= spq->q.max_pkts; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&spq->mutex) != OSL_EXT_SUCCESS) + return FALSE; + + return ret; +} + +#endif /* HND_PKTQ_THREAD_SAFE */ diff --git a/bcmdhd.100.10.315.x/hndlhl.c b/bcmdhd.100.10.315.x/hndlhl.c new file mode 100644 index 0000000..73973cb --- /dev/null +++ b/bcmdhd.100.10.315.x/hndlhl.c @@ -0,0 +1,537 @@ +/* + * Misc utility routines for accessing lhl specific features + * of the SiliconBackplane-based Broadcom chips. + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: hndpmu.c 547757 2015-04-13 10:18:04Z $ + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef BCMULP +#include +#endif // endif + +#define SI_LHL_EXT_WAKE_REQ_MASK_MAGIC 0x7FBBF7FF /* magic number for LHL EXT */ + +/* PmuRev1 has a 24-bit PMU RsrcReq timer. However it pushes all other bits + * upward. To make the code to run for all revs we use a variable to tell how + * many bits we need to shift. + */ +#define FLAGS_SHIFT 14 +#define LHL_ERROR(args) printf args + +void +si_lhl_setup(si_t *sih, osl_t *osh) +{ + if (CHIPID(sih->chip) == BCM43012_CHIP_ID) { + /* Enable PMU sleep mode0 */ + LHL_REG(sih, lhl_top_pwrseq_ctl_adr, LHL_PWRSEQ_CTL, PMU_SLEEP_MODE_2); + /* Modify as per the + BCM43012/LHL#LHL-RecommendedsettingforvariousPMUSleepModes: + */ + LHL_REG(sih, lhl_top_pwrup_ctl_adr, LHL_PWRUP_CTL_MASK, LHL_PWRUP_CTL); + LHL_REG(sih, lhl_top_pwrup2_ctl_adr, LHL_PWRUP2_CTL_MASK, LHL_PWRUP2_CTL); + LHL_REG(sih, lhl_top_pwrdn_ctl_adr, LHL_PWRDN_CTL_MASK, LHL_PWRDN_SLEEP_CNT); + LHL_REG(sih, lhl_top_pwrdn2_ctl_adr, LHL_PWRDN2_CTL_MASK, LHL_PWRDN2_CTL); + } else if (BCM4347_CHIP(sih->chip)) { + if (LHL_IS_PSMODE_1(sih)) { + LHL_REG(sih, lhl_top_pwrseq_ctl_adr, LHL_PWRSEQ_CTL, PMU_SLEEP_MODE_1); + } else { + LHL_REG(sih, lhl_top_pwrseq_ctl_adr, LHL_PWRSEQ_CTL, PMU_SLEEP_MODE_0); + } + + LHL_REG(sih, lhl_top_pwrup_ctl_adr, LHL_PWRUP_CTL_MASK, LHL_PWRUP_CTL_4347); + LHL_REG(sih, lhl_top_pwrup2_ctl_adr, LHL_PWRUP2_CTL_MASK, LHL_PWRUP2_CTL); + LHL_REG(sih, lhl_top_pwrdn_ctl_adr, + LHL_PWRDN_CTL_MASK, LHL_PWRDN_SLEEP_CNT); + LHL_REG(sih, lhl_top_pwrdn2_ctl_adr, LHL_PWRDN2_CTL_MASK, LHL_PWRDN2_CTL); + + /* + * Enable wakeup on GPIO1, PCIE clkreq and perst signal, + * GPIO[0] is mapped to GPIO1 + * GPIO[1] is mapped to PCIE perst + * GPIO[2] is mapped to PCIE clkreq + */ + + /* GPIO1 */ + /* Clear any old interrupt status */ + LHL_REG(sih, gpio_int_st_port_adr[0], + 1 << PCIE_GPIO1_GPIO_PIN, 1 << PCIE_GPIO1_GPIO_PIN); + /* active high level trigger */ + LHL_REG(sih, gpio_ctrl_iocfg_p_adr[PCIE_GPIO1_GPIO_PIN], ~0, + 1 << GCI_GPIO_STS_WL_DIN_SELECT); + LHL_REG(sih, gpio_int_en_port_adr[0], + 1 << PCIE_GPIO1_GPIO_PIN, 1 << PCIE_GPIO1_GPIO_PIN); + LHL_REG(sih, gpio_int_st_port_adr[0], + 1 << PCIE_GPIO1_GPIO_PIN, 1 << PCIE_GPIO1_GPIO_PIN); +#if !defined(_CFEZ_) + si_gci_set_functionsel(sih, 1, CC4347_FNSEL_SAMEASPIN); +#endif // endif + + /* PCIE perst */ + LHL_REG(sih, gpio_int_st_port_adr[0], + 1 << PCIE_PERST_GPIO_PIN, 1 << PCIE_PERST_GPIO_PIN); + LHL_REG(sih, gpio_ctrl_iocfg_p_adr[PCIE_PERST_GPIO_PIN], ~0, + (1 << GCI_GPIO_STS_EDGE_TRIG_BIT | + 1 << GCI_GPIO_STS_WL_DIN_SELECT)); + LHL_REG(sih, gpio_int_en_port_adr[0], + 1 << PCIE_PERST_GPIO_PIN, 1 << PCIE_PERST_GPIO_PIN); + LHL_REG(sih, gpio_int_st_port_adr[0], + 1 << PCIE_PERST_GPIO_PIN, 1 << PCIE_PERST_GPIO_PIN); + + /* PCIE clkreq */ + LHL_REG(sih, gpio_int_st_port_adr[0], + 1 << PCIE_CLKREQ_GPIO_PIN, 1 << PCIE_CLKREQ_GPIO_PIN); + LHL_REG(sih, gpio_ctrl_iocfg_p_adr[PCIE_CLKREQ_GPIO_PIN], ~0, + (1 << GCI_GPIO_STS_EDGE_TRIG_BIT | + 1 << GCI_GPIO_STS_NEG_EDGE_TRIG_BIT | + 1 << GCI_GPIO_STS_WL_DIN_SELECT)); + LHL_REG(sih, gpio_int_en_port_adr[0], + 1 << PCIE_CLKREQ_GPIO_PIN, 1 << PCIE_CLKREQ_GPIO_PIN); + LHL_REG(sih, gpio_int_st_port_adr[0], + 1 << PCIE_CLKREQ_GPIO_PIN, 1 << PCIE_CLKREQ_GPIO_PIN); + } +} + +/* To skip this function, specify a invalid "lpo_select" value in nvram */ +int +si_lhl_set_lpoclk(si_t *sih, osl_t *osh, uint32 lpo_force) +{ + gciregs_t *gciregs; + uint clk_det_cnt, status; + int lhl_wlclk_sel; + uint32 lpo = 0; + int timeout = 0; + gciregs = si_setcore(sih, GCI_CORE_ID, 0); + + ASSERT(gciregs != NULL); + + /* Apply nvram override to lpo */ + if ((lpo_force == LHL_LPO_AUTO) && ((lpo = (uint32)getintvar(NULL, "lpo_select")) == 0)) { + lpo = LHL_OSC_32k_ENAB; + } else { + lpo = lpo_force; + } + + /* Power up the desired LPO */ + switch (lpo) { + case LHL_EXT_LPO_ENAB: + LHL_REG(sih, lhl_main_ctl_adr, EXTLPO_BUF_PD, 0); + lhl_wlclk_sel = LHL_EXT_SEL; + break; + + case LHL_LPO1_ENAB: + LHL_REG(sih, lhl_main_ctl_adr, LPO1_PD_EN, 0); + lhl_wlclk_sel = LHL_LPO1_SEL; + break; + + case LHL_LPO2_ENAB: + LHL_REG(sih, lhl_main_ctl_adr, LPO2_PD_EN, 0); + lhl_wlclk_sel = LHL_LPO2_SEL; + break; + + case LHL_OSC_32k_ENAB: + LHL_REG(sih, lhl_main_ctl_adr, OSC_32k_PD, 0); + lhl_wlclk_sel = LHL_32k_SEL; + break; + + default: + goto done; + } + + LHL_REG(sih, lhl_clk_det_ctl_adr, + LHL_CLK_DET_CTL_AD_CNTR_CLK_SEL, lhl_wlclk_sel); + + /* Detect the desired LPO */ + + LHL_REG(sih, lhl_clk_det_ctl_adr, LHL_CLK_DET_CTL_ADR_LHL_CNTR_EN, 0); + LHL_REG(sih, lhl_clk_det_ctl_adr, + LHL_CLK_DET_CTL_ADR_LHL_CNTR_CLR, LHL_CLK_DET_CTL_ADR_LHL_CNTR_CLR); + timeout = 0; + clk_det_cnt = + ((R_REG(osh, &gciregs->lhl_clk_det_ctl_adr) & LHL_CLK_DET_CNT) >> + LHL_CLK_DET_CNT_SHIFT); + while (clk_det_cnt != 0 && timeout <= LPO_SEL_TIMEOUT) { + OSL_DELAY(10); + clk_det_cnt = + ((R_REG(osh, &gciregs->lhl_clk_det_ctl_adr) & LHL_CLK_DET_CNT) >> + LHL_CLK_DET_CNT_SHIFT); + timeout++; + } + + if (clk_det_cnt != 0) { + LHL_ERROR(("Clock not present as clear did not work timeout = %d\n", timeout)); + goto error; + } + LHL_REG(sih, lhl_clk_det_ctl_adr, LHL_CLK_DET_CTL_ADR_LHL_CNTR_CLR, 0); + LHL_REG(sih, lhl_clk_det_ctl_adr, LHL_CLK_DET_CTL_ADR_LHL_CNTR_EN, + LHL_CLK_DET_CTL_ADR_LHL_CNTR_EN); + clk_det_cnt = + ((R_REG(osh, &gciregs->lhl_clk_det_ctl_adr) & LHL_CLK_DET_CNT) >> + LHL_CLK_DET_CNT_SHIFT); + timeout = 0; + + while (clk_det_cnt <= CLK_DET_CNT_THRESH && timeout <= LPO_SEL_TIMEOUT) { + OSL_DELAY(10); + clk_det_cnt = + ((R_REG(osh, &gciregs->lhl_clk_det_ctl_adr) & LHL_CLK_DET_CNT) >> + LHL_CLK_DET_CNT_SHIFT); + timeout++; + } + + if (timeout >= LPO_SEL_TIMEOUT) { + LHL_ERROR(("LPO is not available timeout = %u\n, timeout", timeout)); + goto error; + } + + /* Select the desired LPO */ + + LHL_REG(sih, lhl_main_ctl_adr, + LHL_MAIN_CTL_ADR_LHL_WLCLK_SEL, (lhl_wlclk_sel) << LPO_SEL_SHIFT); + + status = ((R_REG(osh, &gciregs->lhl_clk_status_adr) & LHL_MAIN_CTL_ADR_FINAL_CLK_SEL) == + (unsigned)(((1 << lhl_wlclk_sel) << LPO_FINAL_SEL_SHIFT))) ? 1 : 0; + timeout = 0; + while (!status && timeout <= LPO_SEL_TIMEOUT) { + OSL_DELAY(10); + status = + ((R_REG(osh, &gciregs->lhl_clk_status_adr) & LHL_MAIN_CTL_ADR_FINAL_CLK_SEL) == + (unsigned)(((1 << lhl_wlclk_sel) << LPO_FINAL_SEL_SHIFT))) ? 1 : 0; + timeout++; + } + + if (timeout >= LPO_SEL_TIMEOUT) { + LHL_ERROR(("LPO is not available timeout = %u\n, timeout", timeout)); + goto error; + } + /* Power down the rest of the LPOs */ + + if (lpo != LHL_EXT_LPO_ENAB) { + LHL_REG(sih, lhl_main_ctl_adr, EXTLPO_BUF_PD, EXTLPO_BUF_PD); + } + + if (lpo != LHL_LPO1_ENAB) { + LHL_REG(sih, lhl_main_ctl_adr, LPO1_PD_EN, LPO1_PD_EN); + LHL_REG(sih, lhl_main_ctl_adr, LPO1_PD_SEL, LPO1_PD_SEL_VAL); + } + if (lpo != LHL_LPO2_ENAB) { + LHL_REG(sih, lhl_main_ctl_adr, LPO2_PD_EN, LPO2_PD_EN); + LHL_REG(sih, lhl_main_ctl_adr, LPO2_PD_SEL, LPO2_PD_SEL_VAL); + } + if (lpo != LHL_OSC_32k_ENAB) { + LHL_REG(sih, lhl_main_ctl_adr, OSC_32k_PD, OSC_32k_PD); + } + if (lpo != RADIO_LPO_ENAB) { + si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_06, LPO_SEL, 0); + } +done: + return BCME_OK; +error: + ROMMABLE_ASSERT(0); + return BCME_ERROR; +} + +void +si_lhl_timer_config(si_t *sih, osl_t *osh, int timer_type) +{ + uint origidx; + pmuregs_t *pmu = NULL; + + /* Remember original core before switch to chipc/pmu */ + origidx = si_coreidx(sih); + if (AOB_ENAB(sih)) { + pmu = si_setcore(sih, PMU_CORE_ID, 0); + } else { + pmu = si_setcoreidx(sih, SI_CC_IDX); + } + + ASSERT(pmu != NULL); + + switch (timer_type) { + case LHL_MAC_TIMER: + /* Enable MAC Timer interrupt */ + LHL_REG(sih, lhl_wl_mactim0_intrp_adr, + (LHL_WL_MACTIM0_INTRP_EN | LHL_WL_MACTIM0_INTRP_EDGE_TRIGGER), + (LHL_WL_MACTIM0_INTRP_EN | LHL_WL_MACTIM0_INTRP_EDGE_TRIGGER)); + + /* Programs bits for MACPHY_CLK_AVAIL and all its dependent bits in + * MacResourceReqMask0. + */ + PMU_REG(sih, mac_res_req_mask, ~0, si_pmu_rsrc_macphy_clk_deps(sih, osh, 0)); + + /* One time init of mac_res_req_timer to enable interrupt and clock request */ + HND_PMU_SYNC_WR(sih, pmu, pmu, osh, + PMUREGADDR(sih, pmu, pmu, mac_res_req_timer), + ((PRRT_ALP_REQ | PRRT_HQ_REQ | PRRT_INTEN) << FLAGS_SHIFT)); + + if (si_numd11coreunits(sih) > 1) { + LHL_REG(sih, lhl_wl_mactim1_intrp_adr, + (LHL_WL_MACTIM0_INTRP_EN | LHL_WL_MACTIM0_INTRP_EDGE_TRIGGER), + (LHL_WL_MACTIM0_INTRP_EN | LHL_WL_MACTIM0_INTRP_EDGE_TRIGGER)); + + PMU_REG(sih, mac_res_req_mask1, ~0, + si_pmu_rsrc_macphy_clk_deps(sih, osh, 1)); + + HND_PMU_SYNC_WR(sih, pmu, pmu, osh, + PMUREGADDR(sih, pmu, pmu, mac_res_req_timer1), + ((PRRT_ALP_REQ | PRRT_HQ_REQ | PRRT_INTEN) << FLAGS_SHIFT)); + } + + break; + + case LHL_ARM_TIMER: + /* Enable ARM Timer interrupt */ + LHL_REG(sih, lhl_wl_armtim0_intrp_adr, + (LHL_WL_ARMTIM0_INTRP_EN | LHL_WL_ARMTIM0_INTRP_EDGE_TRIGGER), + (LHL_WL_ARMTIM0_INTRP_EN | LHL_WL_ARMTIM0_INTRP_EDGE_TRIGGER)); + + /* Programs bits for HT_AVAIL and all its dependent bits in ResourceReqMask0 */ + PMU_REG(sih, res_req_mask, ~0, si_pmu_rsrc_ht_avail_clk_deps(sih, osh)); + + /* One time init of res_req_timer to enable interrupt and clock request + * For low power request only ALP (HT_AVAIL is anyway requested by res_req_mask) + */ + HND_PMU_SYNC_WR(sih, pmu, pmu, osh, + PMUREGADDR(sih, pmu, pmu, res_req_timer), + ((PRRT_ALP_REQ | PRRT_HQ_REQ | PRRT_INTEN) << FLAGS_SHIFT)); + break; + } + + /* Return to original core */ + si_setcoreidx(sih, origidx); +} + +void +si_lhl_timer_enable(si_t *sih) +{ + /* Enable clks for pmu int propagation */ + PMU_REG(sih, pmuintctrl0, PMU_INTC_ALP_REQ, PMU_INTC_ALP_REQ); + + PMU_REG(sih, pmuintmask0, RSRC_INTR_MASK_TIMER_INT_0, RSRC_INTR_MASK_TIMER_INT_0); + LHL_REG(sih, lhl_main_ctl_adr, LHL_FAST_WRITE_EN, LHL_FAST_WRITE_EN); + PMU_REG(sih, pmucontrol_ext, PCTL_EXT_USE_LHL_TIMER, PCTL_EXT_USE_LHL_TIMER); +} + +void +si_lhl_ilp_config(si_t *sih, osl_t *osh, uint32 ilp_period) +{ + gciregs_t *gciregs; + if (CHIPID(sih->chip) == BCM43012_CHIP_ID) { + gciregs = si_setcore(sih, GCI_CORE_ID, 0); + ASSERT(gciregs != NULL); + W_REG(osh, &gciregs->lhl_wl_ilp_val_adr, ilp_period); + } +} + +#ifdef BCMULP +void +si_lhl_disable_sdio_wakeup(si_t *sih) +{ + /* Disable the interrupt */ + LHL_REG(sih, gpio_int_en_port_adr[0], (1 << ULP_SDIO_CMD_PIN), 0); + + /* Clear the pending interrupt status */ + LHL_REG(sih, gpio_int_st_port_adr[0], (1 << ULP_SDIO_CMD_PIN), (1 << ULP_SDIO_CMD_PIN)); +} + +void +si_lhl_enable_sdio_wakeup(si_t *sih, osl_t *osh) +{ + + gciregs_t *gciregs; + pmuregs_t *pmu; + gciregs = si_setcore(sih, GCI_CORE_ID, 0); + ASSERT(gciregs != NULL); + if (CHIPID(sih->chip) == BCM43012_CHIP_ID) { + /* For SDIO_CMD configure P8 for wake on negedge + * LHL 0 -> edge trigger intr mode, + * 1 -> neg edge trigger intr mode , + * 6 -> din from wl side enable + */ + OR_REG(osh, &gciregs->gpio_ctrl_iocfg_p_adr[ULP_SDIO_CMD_PIN], + (1 << GCI_GPIO_STS_EDGE_TRIG_BIT | + 1 << GCI_GPIO_STS_NEG_EDGE_TRIG_BIT | + 1 << GCI_GPIO_STS_WL_DIN_SELECT)); + /* Clear any old interrupt status */ + OR_REG(osh, &gciregs->gpio_int_st_port_adr[0], 1 << ULP_SDIO_CMD_PIN); + + /* LHL GPIO[8] intr en , GPIO[8] is mapped to SDIO_CMD */ + /* Enable P8 to generate interrupt */ + OR_REG(osh, &gciregs->gpio_int_en_port_adr[0], 1 << ULP_SDIO_CMD_PIN); + + /* Clear LHL GPIO status to trigger GCI Interrupt */ + OR_REG(osh, &gciregs->gci_intstat, GCI_INTSTATUS_LHLWLWAKE); + /* Enable LHL GPIO Interrupt to trigger GCI Interrupt */ + OR_REG(osh, &gciregs->gci_intmask, GCI_INTMASK_LHLWLWAKE); + OR_REG(osh, &gciregs->gci_wakemask, GCI_WAKEMASK_LHLWLWAKE); + /* Note ->Enable GCI interrupt to trigger Chipcommon interrupt + * Set EciGciIntEn in IntMask and will be done from FCBS saved tuple + */ + /* Enable LHL to trigger extWake upto HT_AVAIL */ + /* LHL GPIO Interrupt is mapped to extWake[7] */ + pmu = si_setcore(sih, PMU_CORE_ID, 0); + ASSERT(pmu != NULL); + /* Set bit 4 and 7 in ExtWakeMask */ + W_REG(osh, &pmu->extwakemask[0], CI_ECI | CI_WECI); + /* Program bits for MACPHY_CLK_AVAIL rsrc in ExtWakeReqMaskN */ + W_REG(osh, &pmu->extwakereqmask[0], SI_LHL_EXT_WAKE_REQ_MASK_MAGIC); + /* Program 0 (no need to request explicitly for any backplane clk) */ + W_REG(osh, &pmu->extwakectrl, 0x0); + /* Note: Configure MAC/Ucode to receive interrupt + * it will be done from saved tuple using FCBS code + */ + } +} +#endif /* BCMULP */ + +lhl_reg_set_t lv_sleep_mode_4369_lhl_reg_set[] = +{ + /* set wl_sleep_en */ + {LHL_REG_OFF(lhl_top_pwrseq_ctl_adr), (1 << 0), (1 << 0)}, + + /* set top_pwrsw_en, top_slb_en, top_iso_en */ + {LHL_REG_OFF(lhl_top_pwrseq_ctl_adr), BCM_MASK32(5, 3), (0x0 << 3)}, + + /* set VMUX_asr_sel_en */ + {LHL_REG_OFF(lhl_top_pwrseq_ctl_adr), (1 << 8), (1 << 8)}, + + /* lhl_lp_main_ctl_adr, disable lp_mode_en, set CSR and ASR field enables for LV mode */ + {LHL_REG_OFF(lhl_lp_main_ctl_adr), BCM_MASK32(21, 0), 0x3F89FF}, + + /* lhl_lp_main_ctl1_adr, set CSR field values - CSR_adj - 0.64V and trim_adj -5mV */ + {LHL_REG_OFF(lhl_lp_main_ctl1_adr), BCM_MASK32(23, 0), 0x9E8F97}, + + /* lhl_lp_main_ctl2_adr, set ASR field values - ASR_adj - 0.76V and trim_adj +5mV */ + {LHL_REG_OFF(lhl_lp_main_ctl2_adr), BCM_MASK32(13, 0), 0x07EE}, + + /* lhl_lp_dn_ctl_adr, set down count for CSR fields- adj, mode, overi_dis */ + {LHL_REG_OFF(lhl_lp_dn_ctl_adr), ~0, ((LHL4369_CSR_OVERI_DIS_DWN_CNT << 16) | + (LHL4369_CSR_MODE_DWN_CNT << 8) | (LHL4369_CSR_ADJ_DWN_CNT << 0))}, + + /* lhl_lp_up_ctl_adr, set up count for CSR fields- adj, mode, overi_dis */ + {LHL_REG_OFF(lhl_lp_up_ctl_adr), ~0, ((LHL4369_CSR_OVERI_DIS_UP_CNT << 16) | + (LHL4369_CSR_MODE_UP_CNT << 8) | (LHL4369_CSR_ADJ_UP_CNT << 0))}, + + /* lhl_lp_dn_ctl1_adr, set down count for hpbg_chop_dis, ASR_adj, vddc_sw_dis */ + {LHL_REG_OFF(lhl_lp_dn_ctl1_adr), ~0, ((LHL4369_VDDC_SW_DIS_DWN_CNT << 24) | + (LHL4369_ASR_ADJ_DWN_CNT << 16) | (LHL4369_HPBG_CHOP_DIS_DWN_CNT << 0))}, + + /* lhl_lp_up_ctl1_adr, set up count for hpbg_chop_dis, ASR_adj, vddc_sw_dis */ + {LHL_REG_OFF(lhl_lp_up_ctl1_adr), ~0, ((LHL4369_VDDC_SW_DIS_UP_CNT << 24) | + (LHL4369_ASR_ADJ_UP_CNT << 16) | (LHL4369_HPBG_CHOP_DIS_UP_CNT << 0))}, + + /* lhl_lp_dn_ctl4_adr, set down count for ASR fields - + * clk4m_dis, lppfm_mode, mode_sel, manual_mode + */ + {LHL_REG_OFF(lhl_lp_dn_ctl4_adr), ~0, ((LHL4369_ASR_MANUAL_MODE_DWN_CNT << 24) | + (LHL4369_ASR_MODE_SEL_DWN_CNT << 16) | (LHL4369_ASR_LPPFM_MODE_DWN_CNT << 8) | + (LHL4369_ASR_CLK4M_DIS_DWN_CNT << 0))}, + + /* lhl_lp_up_ctl4_adr, set up count for ASR fields - + * clk4m_dis, lppfm_mode, mode_sel, manual_mode + */ + {LHL_REG_OFF(lhl_lp_up_ctl4_adr), ~0, ((LHL4369_ASR_MANUAL_MODE_UP_CNT << 24) | + (LHL4369_ASR_MODE_SEL_UP_CNT << 16)| (LHL4369_ASR_LPPFM_MODE_UP_CNT << 8) | + (LHL4369_ASR_CLK4M_DIS_UP_CNT << 0))}, + + /* lhl_lp_dn_ctl3_adr, set down count for hpbg_pu, srbg_ref, ASR_overi_dis, + * CSR_pfm_pwr_slice_en + */ + {LHL_REG_OFF(lhl_lp_dn_ctl3_adr), ~0, ((LHL4369_PFM_PWR_SLICE_DWN_CNT << 24) | + (LHL4369_ASR_OVERI_DIS_DWN_CNT << 16) | (LHL4369_SRBG_REF_SEL_DWN_CNT << 8) | + (LHL4369_HPBG_PU_EN_DWN_CNT << 0))}, + + /* lhl_lp_up_ctl3_adr, set up count for hpbg_pu, srbg_ref, ASR_overi_dis, + * CSR_pfm_pwr_slice_en + */ + {LHL_REG_OFF(lhl_lp_up_ctl3_adr), ~0, ((LHL4369_PFM_PWR_SLICE_UP_CNT << 24) | + (LHL4369_ASR_OVERI_DIS_UP_CNT << 16) | (LHL4369_SRBG_REF_SEL_UP_CNT << 8) | + (LHL4369_HPBG_PU_EN_UP_CNT << 0))}, + + /* lhl_lp_dn_ctl2_adr, set down count for CSR_trim_adj */ + {LHL_REG_OFF(lhl_lp_dn_ctl2_adr), ~0, (LHL4369_CSR_TRIM_ADJ_DWN_CNT << 16)}, + + /* lhl_lp_up_ctl2_adr, set up count for CSR_trim_adj */ + {LHL_REG_OFF(lhl_lp_up_ctl2_adr), ~0, (LHL4369_CSR_TRIM_ADJ_UP_CNT << 16)}, + + /* lhl_lp_dn_ctl5_adr, set down count for ASR_trim_adj */ + {LHL_REG_OFF(lhl_lp_dn_ctl5_adr), ~0, (LHL4369_ASR_TRIM_ADJ_DWN_CNT << 0)}, + + /* lhl_lp_up_ctl5_adr, set down count for ASR_trim_adj */ + {LHL_REG_OFF(lhl_lp_up_ctl5_adr), ~0, (LHL4369_ASR_TRIM_ADJ_UP_CNT << 0)}, + + /* Change the default down count values for the resources */ + /* lhl_top_pwrdn_ctl_adr, set down count for top_level_sleep, iso, slb and pwrsw */ + {LHL_REG_OFF(lhl_top_pwrdn_ctl_adr), ~0, ((LHL4369_PWRSW_EN_DWN_CNT << 24) | + (LHL4369_SLB_EN_DWN_CNT << 16) | (LHL4369_ISO_EN_DWN_CNT << 8))}, + + /* lhl_top_pwrdn2_ctl_adr, set down count for VMUX_asr_sel */ + {LHL_REG_OFF(lhl_top_pwrdn2_ctl_adr), ~0, (LHL4369_VMUX_ASR_SEL_DWN_CNT << 16)}, + + /* Change the default up count values for the resources */ + /* lhl_top_pwrup_ctl_adr, set up count for top_level_sleep, iso, slb and pwrsw */ + {LHL_REG_OFF(lhl_top_pwrup_ctl_adr), ~0, ((LHL4369_PWRSW_EN_UP_CNT << 24) | + (LHL4369_SLB_EN_UP_CNT << 16) | (LHL4369_ISO_EN_UP_CNT << 8))}, + + /* lhl_top_pwrdn2_ctl_adr, set down count for VMUX_asr_sel */ + {LHL_REG_OFF(lhl_top_pwrup2_ctl_adr), ~0, ((LHL4369_VMUX_ASR_SEL_UP_CNT << 16))}, + + /* Enable lhl interrupt */ + {LHL_REG_OFF(gci_intmask), (1 << 30), (1 << 30)}, + + /* Enable LHL Wake up */ + {LHL_REG_OFF(gci_wakemask), (1 << 30), (1 << 30)}, + + /* Making forceOTPpwrOn 0 */ + {LHL_REG_OFF(otpcontrol), (1 << 16), 0} +}; + +/* LV sleep mode summary: + * LV mode is where both ABUCK and CBUCK are programmed to low voltages during + * sleep, and VMUX selects ABUCK as VDDOUT_AON. LPLDO needs to power off. + * With ASR ON, LPLDO OFF + */ +void +si_set_lv_sleep_mode_lhl_config_4369(si_t *sih) +{ + uint i; + uint coreidx = si_findcoreidx(sih, GCI_CORE_ID, 0); + lhl_reg_set_t *regs = lv_sleep_mode_4369_lhl_reg_set; + + /* Enable LHL LV mode: + * lhl_top_pwrseq_ctl_adr, set wl_sleep_en, iso_en, slb_en, pwrsw_en,VMUX_asr_sel_en + */ + for (i = 0; i < ARRAYSIZE(lv_sleep_mode_4369_lhl_reg_set); i++) { + si_corereg(sih, coreidx, regs[i].offset, regs[i].mask, regs[i].val); + } +} diff --git a/bcmdhd.100.10.315.x/hndmem.c b/bcmdhd.100.10.315.x/hndmem.c new file mode 100644 index 0000000..6c8a513 --- /dev/null +++ b/bcmdhd.100.10.315.x/hndmem.c @@ -0,0 +1,429 @@ +/* + * Utility routines for configuring different memories in Broadcom chips. + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: $ + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define IS_MEMTYPE_VALID(mem) ((mem >= MEM_SOCRAM) && (mem < MEM_MAX)) +#define IS_MEMCONFIG_VALID(cfg) ((cfg >= PDA_CONFIG_CLEAR) && (cfg < PDA_CONFIG_MAX)) + +/* Returns the number of banks in a given memory */ +int +hndmem_num_banks(si_t *sih, int mem) +{ + uint32 savecore, mem_info; + int num_banks = 0; + gciregs_t *gciregs; + osl_t *osh = si_osh(sih); + + if (!IS_MEMTYPE_VALID(mem)) { + goto exit; + } + + savecore = si_coreidx(sih); + + /* TODO: Check whether SOCRAM core is present or not. If not, bail out */ + /* In future we need to add code for TCM based chips as well */ + if (!si_setcore(sih, SOCRAM_CORE_ID, 0)) { + goto exit; + } + + if (sih->gcirev >= 9) { + gciregs = si_setcore(sih, GCI_CORE_ID, 0); + + mem_info = R_REG(osh, &gciregs->wlan_mem_info); + + switch (mem) { + case MEM_SOCRAM: + num_banks = (mem_info & WLAN_MEM_INFO_REG_NUMSOCRAMBANKS_MASK) >> + WLAN_MEM_INFO_REG_NUMSOCRAMBANKS_SHIFT; + break; + case MEM_BM: + num_banks = (mem_info & WLAN_MEM_INFO_REG_NUMD11MACBM_MASK) >> + WLAN_MEM_INFO_REG_NUMD11MACBM_SHIFT; + break; + case MEM_UCM: + num_banks = (mem_info & WLAN_MEM_INFO_REG_NUMD11MACUCM_MASK) >> + WLAN_MEM_INFO_REG_NUMD11MACUCM_SHIFT; + break; + case MEM_SHM: + num_banks = (mem_info & WLAN_MEM_INFO_REG_NUMD11MACSHM_MASK) >> + WLAN_MEM_INFO_REG_NUMD11MACSHM_SHIFT; + break; + default: + ASSERT(0); + break; + } + } else { + /* TODO: Figure out bank information using SOCRAM registers */ + } + + si_setcoreidx(sih, savecore); +exit: + return num_banks; +} + +/* Returns the size of a give bank in a given memory */ +int +hndmem_bank_size(si_t *sih, hndmem_type_t mem, int bank_num) +{ + uint32 savecore, bank_info, reg_data; + int bank_sz = 0; + gciregs_t *gciregs; + osl_t *osh = si_osh(sih); + + if (!IS_MEMTYPE_VALID(mem)) { + goto exit; + } + + savecore = si_coreidx(sih); + + /* TODO: Check whether SOCRAM core is present or not. If not, bail out */ + /* In future we need to add code for TCM based chips as well */ + if (!si_setcore(sih, SOCRAM_CORE_ID, 0)) { + goto exit; + } + + if (sih->gcirev >= 9) { + gciregs = si_setcore(sih, GCI_CORE_ID, 0); + + reg_data = ((mem & + GCI_INDIRECT_ADDRESS_REG_GPIOINDEX_MASK) << + GCI_INDIRECT_ADDRESS_REG_GPIOINDEX_SHIFT) | + ((bank_num & GCI_INDIRECT_ADDRESS_REG_REGINDEX_MASK) + << GCI_INDIRECT_ADDRESS_REG_REGINDEX_SHIFT); + W_REG(osh, &gciregs->gci_indirect_addr, reg_data); + + bank_info = R_REG(osh, &gciregs->wlan_bankxinfo); + bank_sz = (bank_info & WLAN_BANKXINFO_BANK_SIZE_MASK) >> + WLAN_BANKXINFO_BANK_SIZE_SHIFT; + } else { + /* TODO: Figure out bank size using SOCRAM registers */ + } + + si_setcoreidx(sih, savecore); +exit: + return bank_sz; +} + +/* Returns the start address of given memory */ +uint32 +hndmem_mem_base(si_t *sih, hndmem_type_t mem) +{ + uint32 savecore, base_addr = 0; + + /* Currently only support of SOCRAM is available in hardware */ + if (mem != MEM_SOCRAM) { + goto exit; + } + + savecore = si_coreidx(sih); + + if (si_setcore(sih, SOCRAM_CORE_ID, 0)) + { + base_addr = si_get_slaveport_addr(sih, CORE_SLAVE_PORT_1, + CORE_BASE_ADDR_0, SOCRAM_CORE_ID, 0); + } else { + /* TODO: Add code to get the base address of TCM */ + base_addr = 0; + } + + si_setcoreidx(sih, savecore); + +exit: + return base_addr; +} + +#ifdef BCMDEBUG +char *hndmem_type_str[] = + { + "SOCRAM", /* 0 */ + "BM", /* 1 */ + "UCM", /* 2 */ + "SHM", /* 3 */ + }; + +/* Dumps the complete memory information */ +void +hndmem_dump_meminfo_all(si_t *sih) +{ + int mem, bank, bank_cnt, bank_sz; + + for (mem = MEM_SOCRAM; mem < MEM_MAX; mem++) { + bank_cnt = hndmem_num_banks(sih, mem); + + printf("\nMemtype: %s\n", hndmem_type_str[mem]); + for (bank = 0; bank < bank_cnt; bank++) { + bank_sz = hndmem_bank_size(sih, mem, bank); + printf("Bank-%d: %d KB\n", bank, bank_sz); + } + } +} +#endif /* BCMDEBUG */ + +/* Configures the Sleep PDA for a particular bank for a given memory type */ +int +hndmem_sleeppda_bank_config(si_t *sih, hndmem_type_t mem, int bank_num, + hndmem_config_t config, uint32 pda) +{ + uint32 savecore, reg_data; + gciregs_t *gciregs; + int err = BCME_OK; + osl_t *osh = si_osh(sih); + + /* TODO: Check whether SOCRAM core is present or not. If not, bail out */ + /* In future we need to add code for TCM based chips as well */ + if (!si_setcore(sih, SOCRAM_CORE_ID, 0)) { + err = BCME_UNSUPPORTED; + goto exit; + } + + /* Sleep PDA is supported only by GCI rev >= 9 */ + if (sih->gcirev < 9) { + err = BCME_UNSUPPORTED; + goto exit; + } + + if (!IS_MEMTYPE_VALID(mem)) { + err = BCME_BADOPTION; + goto exit; + } + + if (!IS_MEMCONFIG_VALID(config)) { + err = BCME_BADOPTION; + goto exit; + } + + savecore = si_coreidx(sih); + gciregs = si_setcore(sih, GCI_CORE_ID, 0); + + reg_data = ((mem & + GCI_INDIRECT_ADDRESS_REG_GPIOINDEX_MASK) << + GCI_INDIRECT_ADDRESS_REG_GPIOINDEX_SHIFT) | + ((bank_num & GCI_INDIRECT_ADDRESS_REG_REGINDEX_MASK) + << GCI_INDIRECT_ADDRESS_REG_REGINDEX_SHIFT); + + W_REG(osh, &gciregs->gci_indirect_addr, reg_data); + + if (config == PDA_CONFIG_SET_PARTIAL) { + W_REG(osh, &gciregs->wlan_bankxsleeppda, pda); + W_REG(osh, &gciregs->wlan_bankxkill, 0); + } + else if (config == PDA_CONFIG_SET_FULL) { + W_REG(osh, &gciregs->wlan_bankxsleeppda, WLAN_BANKX_SLEEPPDA_REG_SLEEPPDA_MASK); + W_REG(osh, &gciregs->wlan_bankxkill, WLAN_BANKX_PKILL_REG_SLEEPPDA_MASK); + } else { + W_REG(osh, &gciregs->wlan_bankxsleeppda, 0); + W_REG(osh, &gciregs->wlan_bankxkill, 0); + } + + si_setcoreidx(sih, savecore); + +exit: + return err; +} + +/* Configures the Active PDA for a particular bank for a given memory type */ +int +hndmem_activepda_bank_config(si_t *sih, hndmem_type_t mem, + int bank_num, hndmem_config_t config, uint32 pda) +{ + uint32 savecore, reg_data; + gciregs_t *gciregs; + int err = BCME_OK; + osl_t *osh = si_osh(sih); + + if (!IS_MEMTYPE_VALID(mem)) { + err = BCME_BADOPTION; + goto exit; + } + + if (!IS_MEMCONFIG_VALID(config)) { + err = BCME_BADOPTION; + goto exit; + } + + savecore = si_coreidx(sih); + + /* TODO: Check whether SOCRAM core is present or not. If not, bail out */ + /* In future we need to add code for TCM based chips as well */ + if (!si_setcore(sih, SOCRAM_CORE_ID, 0)) { + err = BCME_UNSUPPORTED; + goto exit; + } + + if (sih->gcirev >= 9) { + gciregs = si_setcore(sih, GCI_CORE_ID, 0); + + reg_data = ((mem & + GCI_INDIRECT_ADDRESS_REG_GPIOINDEX_MASK) << + GCI_INDIRECT_ADDRESS_REG_GPIOINDEX_SHIFT) | + ((bank_num & GCI_INDIRECT_ADDRESS_REG_REGINDEX_MASK) + << GCI_INDIRECT_ADDRESS_REG_REGINDEX_SHIFT); + + W_REG(osh, &gciregs->gci_indirect_addr, reg_data); + + if (config == PDA_CONFIG_SET_PARTIAL) { + W_REG(osh, &gciregs->wlan_bankxactivepda, pda); + } + else if (config == PDA_CONFIG_SET_FULL) { + W_REG(osh, &gciregs->wlan_bankxactivepda, + WLAN_BANKX_SLEEPPDA_REG_SLEEPPDA_MASK); + } else { + W_REG(osh, &gciregs->wlan_bankxactivepda, 0); + } + } else { + /* TODO: Configure SOCRAM PDA using SOCRAM registers */ + err = BCME_UNSUPPORTED; + } + + si_setcoreidx(sih, savecore); + +exit: + return err; +} + +/* Configures the Sleep PDA for all the banks for a given memory type */ +int +hndmem_sleeppda_config(si_t *sih, hndmem_type_t mem, hndmem_config_t config) +{ + int bank; + int num_banks = hndmem_num_banks(sih, mem); + int err = BCME_OK; + + /* Sleep PDA is supported only by GCI rev >= 9 */ + if (sih->gcirev < 9) { + err = BCME_UNSUPPORTED; + goto exit; + } + + if (!IS_MEMTYPE_VALID(mem)) { + err = BCME_BADOPTION; + goto exit; + } + + if (!IS_MEMCONFIG_VALID(config)) { + err = BCME_BADOPTION; + goto exit; + } + + for (bank = 0; bank < num_banks; bank++) + { + err = hndmem_sleeppda_bank_config(sih, mem, bank, config, 0); + } + +exit: + return err; +} + +/* Configures the Active PDA for all the banks for a given memory type */ +int +hndmem_activepda_config(si_t *sih, hndmem_type_t mem, hndmem_config_t config) +{ + int bank; + int num_banks = hndmem_num_banks(sih, mem); + int err = BCME_OK; + + if (!IS_MEMTYPE_VALID(mem)) { + err = BCME_BADOPTION; + goto exit; + } + + if (!IS_MEMCONFIG_VALID(config)) { + err = BCME_BADOPTION; + goto exit; + } + + for (bank = 0; bank < num_banks; bank++) + { + err = hndmem_activepda_bank_config(sih, mem, bank, config, 0); + } + +exit: + return err; +} + +/* Turn off/on all the possible banks in a given memory range. + * Currently this works only for SOCRAM as this is restricted by HW. + */ +int +hndmem_activepda_mem_config(si_t *sih, hndmem_type_t mem, uint32 mem_start, + uint32 size, hndmem_config_t config) +{ + int bank, bank_sz, num_banks; + int mem_end; + int bank_start_addr, bank_end_addr; + int err = BCME_OK; + + /* We can get bank size for only SOCRAM/TCM only. Support is not avilable + * for other memories (BM, UCM and SHM) + */ + if (mem != MEM_SOCRAM) { + err = BCME_UNSUPPORTED; + goto exit; + } + + num_banks = hndmem_num_banks(sih, mem); + bank_start_addr = hndmem_mem_base(sih, mem); + mem_end = mem_start + size - 1; + + for (bank = 0; bank < num_banks; bank++) + { + /* Bank size is spcified in bankXinfo register in terms on KBs */ + bank_sz = 1024 * hndmem_bank_size(sih, mem, bank); + + bank_end_addr = bank_start_addr + bank_sz - 1; + + if (config == PDA_CONFIG_SET_FULL) { + /* Check if the bank is completely overlapping with the given mem range */ + if ((mem_start <= bank_start_addr) && (mem_end >= bank_end_addr)) { + err = hndmem_activepda_bank_config(sih, mem, bank, config, 0); + } + } else { + /* Check if the bank is completely overlaped with the given mem range */ + if (((mem_start <= bank_start_addr) && (mem_end >= bank_end_addr)) || + /* Check if the bank is partially overlaped with the given range */ + ((mem_start <= bank_end_addr) && (mem_end >= bank_start_addr))) { + err = hndmem_activepda_bank_config(sih, mem, bank, config, 0); + } + } + + bank_start_addr += bank_sz; + } + +exit: + return err; +} diff --git a/bcmdhd.100.10.315.x/hndpmu.c b/bcmdhd.100.10.315.x/hndpmu.c new file mode 100644 index 0000000..3a6da91 --- /dev/null +++ b/bcmdhd.100.10.315.x/hndpmu.c @@ -0,0 +1,788 @@ +/* + * Misc utility routines for accessing PMU corerev specific features + * of the SiliconBackplane-based Broadcom chips. + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: hndpmu.c 714395 2017-08-04 08:22:31Z $ + */ + +/** + * @file + * Note: this file contains PLL/FLL related functions. A chip can contain multiple PLLs/FLLs. + * However, in the context of this file the baseband ('BB') PLL/FLL is referred to. + * + * Throughout this code, the prefixes 'pmu1_' and 'pmu2_' are used. + * They refer to different revisions of the PMU (which is at revision 18 @ Apr 25, 2012) + * pmu1_ marks the transition from PLL to ADFLL (Digital Frequency Locked Loop). It supports + * fractional frequency generation. pmu2_ does not support fractional frequency generation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if defined(BCMULP) +#include +#endif /* defined(BCMULP) */ +#include +#ifdef EVENT_LOG_COMPILE +#include +#endif // endif +#include +#include + +#define PMU_ERROR(args) + +#define PMU_MSG(args) + +/* To check in verbose debugging messages not intended + * to be on except on private builds. + */ +#define PMU_NONE(args) +#define flags_shift 14 + +/** contains resource bit positions for a specific chip */ +struct rsc_per_chip_s { + uint8 ht_avail; + uint8 macphy_clkavail; + uint8 ht_start; + uint8 otp_pu; + uint8 macphy_aux_clkavail; +}; + +typedef struct rsc_per_chip_s rsc_per_chip_t; + +#if defined(BCMPMU_STATS) && !defined(BCMPMU_STATS_DISABLED) +bool _pmustatsenab = TRUE; +#else +bool _pmustatsenab = FALSE; +#endif /* BCMPMU_STATS */ + +/** + * Balance between stable SDIO operation and power consumption is achieved using this function. + * Note that each drive strength table is for a specific VDDIO of the SDIO pads, ideally this + * function should read the VDDIO itself to select the correct table. For now it has been solved + * with the 'BCM_SDIO_VDDIO' preprocessor constant. + * + * 'drivestrength': desired pad drive strength in mA. Drive strength of 0 requests tri-state (if + * hardware supports this), if no hw support drive strength is not programmed. + */ +void +si_sdiod_drive_strength_init(si_t *sih, osl_t *osh, uint32 drivestrength) +{ + /* + * Note: + * This function used to set the SDIO drive strength via PMU_CHIPCTL1 for the + * 43143, 4330, 4334, 4336, 43362 chips. These chips are now no longer supported, so + * the code has been deleted. + * Newer chips have the SDIO drive strength setting via a GCI Chip Control register, + * but the bit definitions are chip-specific. We are keeping this function available + * (accessed via DHD 'sdiod_drive' IOVar) in case these newer chips need to provide access. + */ + UNUSED_PARAMETER(sih); + UNUSED_PARAMETER(osh); + UNUSED_PARAMETER(drivestrength); +} + +void +si_switch_pmu_dependency(si_t *sih, uint mode) +{ +#ifdef DUAL_PMU_SEQUENCE + osl_t *osh = si_osh(sih); + uint32 current_res_state; + uint32 min_mask, max_mask; + const pmu_res_depend_t *pmu_res_depend_table = NULL; + uint pmu_res_depend_table_sz = 0; + uint origidx; + pmuregs_t *pmu; + chipcregs_t *cc; + BCM_REFERENCE(cc); + + origidx = si_coreidx(sih); + if (AOB_ENAB(sih)) { + pmu = si_setcore(sih, PMU_CORE_ID, 0); + cc = si_setcore(sih, CC_CORE_ID, 0); + } else { + pmu = si_setcoreidx(sih, SI_CC_IDX); + cc = si_setcoreidx(sih, SI_CC_IDX); + } + ASSERT(pmu != NULL); + + current_res_state = R_REG(osh, &pmu->res_state); + min_mask = R_REG(osh, &pmu->min_res_mask); + max_mask = R_REG(osh, &pmu->max_res_mask); + W_REG(osh, &pmu->min_res_mask, (min_mask | current_res_state)); + switch (mode) { + case PMU_4364_1x1_MODE: + { + if (CHIPID(sih->chip) == BCM4364_CHIP_ID) { + pmu_res_depend_table = bcm4364a0_res_depend_1x1; + pmu_res_depend_table_sz = + ARRAYSIZE(bcm4364a0_res_depend_1x1); + max_mask = PMU_4364_MAX_MASK_1x1; + W_REG(osh, &pmu->res_table_sel, RES4364_SR_SAVE_RESTORE); + W_REG(osh, &pmu->res_updn_timer, PMU_4364_SAVE_RESTORE_UPDNTIME_1x1); +#if defined(SAVERESTORE) + if (SR_ENAB()) { + /* Disable 3x3 SR engine */ + W_REG(osh, &cc->sr1_control0, + CC_SR0_4364_SR_ENG_CLK_EN | + CC_SR0_4364_SR_RSRC_TRIGGER | + CC_SR0_4364_SR_WD_MEM_MIN_DIV | + CC_SR0_4364_SR_INVERT_CLK | + CC_SR0_4364_SR_ENABLE_HT | + CC_SR0_4364_SR_ALLOW_PIC | + CC_SR0_4364_SR_PMU_MEM_DISABLE); + } +#endif /* SAVERESTORE */ + } + break; + } + case PMU_4364_3x3_MODE: + { + if (CHIPID(sih->chip) == BCM4364_CHIP_ID) { + W_REG(osh, &pmu->res_table_sel, RES4364_SR_SAVE_RESTORE); + W_REG(osh, &pmu->res_updn_timer, + PMU_4364_SAVE_RESTORE_UPDNTIME_3x3); + /* Change the dependency table only if required */ + if ((max_mask != PMU_4364_MAX_MASK_3x3) || + (max_mask != PMU_4364_MAX_MASK_RSDB)) { + pmu_res_depend_table = bcm4364a0_res_depend_rsdb; + pmu_res_depend_table_sz = + ARRAYSIZE(bcm4364a0_res_depend_rsdb); + max_mask = PMU_4364_MAX_MASK_3x3; + } +#if defined(SAVERESTORE) + if (SR_ENAB()) { + /* Enable 3x3 SR engine */ + W_REG(osh, &cc->sr1_control0, + CC_SR0_4364_SR_ENG_CLK_EN | + CC_SR0_4364_SR_RSRC_TRIGGER | + CC_SR0_4364_SR_WD_MEM_MIN_DIV | + CC_SR0_4364_SR_INVERT_CLK | + CC_SR0_4364_SR_ENABLE_HT | + CC_SR0_4364_SR_ALLOW_PIC | + CC_SR0_4364_SR_PMU_MEM_DISABLE | + CC_SR0_4364_SR_ENG_EN_MASK); + } +#endif /* SAVERESTORE */ + } + break; + } + case PMU_4364_RSDB_MODE: + default: + { + if (CHIPID(sih->chip) == BCM4364_CHIP_ID) { + W_REG(osh, &pmu->res_table_sel, RES4364_SR_SAVE_RESTORE); + W_REG(osh, &pmu->res_updn_timer, + PMU_4364_SAVE_RESTORE_UPDNTIME_3x3); + /* Change the dependency table only if required */ + if ((max_mask != PMU_4364_MAX_MASK_3x3) || + (max_mask != PMU_4364_MAX_MASK_RSDB)) { + pmu_res_depend_table = + bcm4364a0_res_depend_rsdb; + pmu_res_depend_table_sz = + ARRAYSIZE(bcm4364a0_res_depend_rsdb); + max_mask = PMU_4364_MAX_MASK_RSDB; + } +#if defined(SAVERESTORE) + if (SR_ENAB()) { + /* Enable 3x3 SR engine */ + W_REG(osh, &cc->sr1_control0, + CC_SR0_4364_SR_ENG_CLK_EN | + CC_SR0_4364_SR_RSRC_TRIGGER | + CC_SR0_4364_SR_WD_MEM_MIN_DIV | + CC_SR0_4364_SR_INVERT_CLK | + CC_SR0_4364_SR_ENABLE_HT | + CC_SR0_4364_SR_ALLOW_PIC | + CC_SR0_4364_SR_PMU_MEM_DISABLE | + CC_SR0_4364_SR_ENG_EN_MASK); + } +#endif /* SAVERESTORE */ + } + break; + } + } + si_pmu_resdeptbl_upd(sih, osh, pmu, pmu_res_depend_table, pmu_res_depend_table_sz); + W_REG(osh, &pmu->max_res_mask, max_mask); + W_REG(osh, &pmu->min_res_mask, min_mask); + si_pmu_wait_for_steady_state(sih, osh, pmu); + /* Add some delay; allow resources to come up and settle. */ + OSL_DELAY(200); + si_setcoreidx(sih, origidx); +#endif /* DUAL_PMU_SEQUENCE */ +} + +#if defined(BCMULP) + +int +si_pmu_ulp_register(si_t *sih) +{ + return ulp_p1_module_register(ULP_MODULE_ID_PMU, &ulp_pmu_ctx, (void *)sih); +} + +static uint +si_pmu_ulp_get_retention_size_cb(void *handle, ulp_ext_info_t *einfo) +{ + ULP_DBG(("%s: sz: %d\n", __FUNCTION__, sizeof(si_pmu_ulp_cr_dat_t))); + return sizeof(si_pmu_ulp_cr_dat_t); +} + +static int +si_pmu_ulp_enter_cb(void *handle, ulp_ext_info_t *einfo, uint8 *cache_data) +{ + si_pmu_ulp_cr_dat_t crinfo = {0}; + crinfo.ilpcycles_per_sec = ilpcycles_per_sec; + ULP_DBG(("%s: ilpcycles_per_sec: %x\n", __FUNCTION__, ilpcycles_per_sec)); + memcpy(cache_data, (void*)&crinfo, sizeof(crinfo)); + return BCME_OK; +} + +static int +si_pmu_ulp_exit_cb(void *handle, uint8 *cache_data, + uint8 *p2_cache_data) +{ + si_pmu_ulp_cr_dat_t *crinfo = (si_pmu_ulp_cr_dat_t *)cache_data; + + ilpcycles_per_sec = crinfo->ilpcycles_per_sec; + ULP_DBG(("%s: ilpcycles_per_sec: %x, cache_data: %p\n", __FUNCTION__, + ilpcycles_per_sec, cache_data)); + return BCME_OK; +} + +void +si_pmu_ulp_chipconfig(si_t *sih, osl_t *osh) +{ + uint32 reg_val; + + BCM_REFERENCE(reg_val); + + if (CHIPID(sih->chip) == BCM43012_CHIP_ID) { + /* DS1 reset and clk enable init value config */ + si_pmu_chipcontrol(sih, PMU_CHIPCTL14, ~0x0, + (PMUCCTL14_43012_ARMCM3_RESET_INITVAL | + PMUCCTL14_43012_DOT11MAC_CLKEN_INITVAL | + PMUCCTL14_43012_SDIOD_RESET_INIVAL | + PMUCCTL14_43012_SDIO_CLK_DMN_RESET_INITVAL | + PMUCCTL14_43012_SOCRAM_CLKEN_INITVAL | + PMUCCTL14_43012_M2MDMA_RESET_INITVAL | + PMUCCTL14_43012_DOT11MAC_PHY_CLK_EN_INITVAL | + PMUCCTL14_43012_DOT11MAC_PHY_CNTL_EN_INITVAL)); + + /* Clear SFlash clock request and enable High Quality clock */ + CHIPC_REG(sih, clk_ctl_st, CCS_SFLASH_CLKREQ | CCS_HQCLKREQ, CCS_HQCLKREQ); + + reg_val = PMU_REG(sih, min_res_mask, ~0x0, ULP_MIN_RES_MASK); + ULP_DBG(("si_pmu_ulp_chipconfig: min_res_mask: 0x%08x\n", reg_val)); + + /* Force power switch off */ + si_pmu_chipcontrol(sih, PMU_CHIPCTL2, + (PMUCCTL02_43012_SUBCORE_PWRSW_FORCE_ON | + PMUCCTL02_43012_PHY_PWRSW_FORCE_ON), 0); + + } +} + +void +si_pmu_ulp_ilp_config(si_t *sih, osl_t *osh, uint32 ilp_period) +{ + pmuregs_t *pmu; + pmu = si_setcoreidx(sih, si_findcoreidx(sih, PMU_CORE_ID, 0)); + W_REG(osh, &pmu->ILPPeriod, ilp_period); + si_lhl_ilp_config(sih, osh, ilp_period); +} + +/** Initialize DS1 PMU hardware resources */ +void +si_pmu_ds1_res_init(si_t *sih, osl_t *osh) +{ + pmuregs_t *pmu; + uint origidx; + const pmu_res_updown_t *pmu_res_updown_table = NULL; + uint pmu_res_updown_table_sz = 0; + + /* Remember original core before switch to chipc/pmu */ + origidx = si_coreidx(sih); + if (AOB_ENAB(sih)) { + pmu = si_setcore(sih, PMU_CORE_ID, 0); + } else { + pmu = si_setcoreidx(sih, SI_CC_IDX); + } + ASSERT(pmu != NULL); + + switch (CHIPID(sih->chip)) { + case BCM43012_CHIP_ID: + pmu_res_updown_table = bcm43012a0_res_updown_ds1; + pmu_res_updown_table_sz = ARRAYSIZE(bcm43012a0_res_updown_ds1); + break; + + default: + break; + } + + /* Program up/down timers */ + while (pmu_res_updown_table_sz--) { + ASSERT(pmu_res_updown_table != NULL); + PMU_MSG(("DS1: Changing rsrc %d res_updn_timer to 0x%x\n", + pmu_res_updown_table[pmu_res_updown_table_sz].resnum, + pmu_res_updown_table[pmu_res_updown_table_sz].updown)); + W_REG(osh, &pmu->res_table_sel, + pmu_res_updown_table[pmu_res_updown_table_sz].resnum); + W_REG(osh, &pmu->res_updn_timer, + pmu_res_updown_table[pmu_res_updown_table_sz].updown); + } + + /* Return to original core */ + si_setcoreidx(sih, origidx); +} + +#endif /* defined(BCMULP) */ + +uint32 +si_pmu_wake_bit_offset(si_t *sih) +{ + uint32 wakebit; + + switch (CHIPID(sih->chip)) { + case BCM4347_CHIP_GRPID: + wakebit = CC2_4347_GCI2WAKE_MASK; + break; + default: + wakebit = 0; + ASSERT(0); + break; + } + + return wakebit; +} + +void si_pmu_set_min_res_mask(si_t *sih, osl_t *osh, uint min_res_mask) +{ + pmuregs_t *pmu; + uint origidx; + + /* Remember original core before switch to chipc/pmu */ + origidx = si_coreidx(sih); + if (AOB_ENAB(sih)) { + pmu = si_setcore(sih, PMU_CORE_ID, 0); + } + else { + pmu = si_setcoreidx(sih, SI_CC_IDX); + } + ASSERT(pmu != NULL); + + W_REG(osh, &pmu->min_res_mask, min_res_mask); + OSL_DELAY(100); + + /* Return to original core */ + si_setcoreidx(sih, origidx); +} + +bool +si_pmu_cap_fast_lpo(si_t *sih) +{ + return (PMU_REG(sih, core_cap_ext, 0, 0) & PCAP_EXT_USE_MUXED_ILP_CLK_MASK) ? TRUE : FALSE; +} + +int +si_pmu_fast_lpo_disable(si_t *sih) +{ + if (!si_pmu_cap_fast_lpo(sih)) { + PMU_ERROR(("%s: No Fast LPO capability\n", __FUNCTION__)); + return BCME_ERROR; + } + + PMU_REG(sih, pmucontrol_ext, + PCTL_EXT_FASTLPO_ENAB | + PCTL_EXT_FASTLPO_SWENAB | + PCTL_EXT_FASTLPO_PCIE_SWENAB, + 0); + OSL_DELAY(1000); + return BCME_OK; +} + +#ifdef BCMPMU_STATS +/* + * 8 pmu statistics timer default map + * + * for CORE_RDY_AUX measure, set as below for timer 6 and 7 instead of CORE_RDY_MAIN. + * //core-n active duration : pmu_rsrc_state(CORE_RDY_AUX) + * { SRC_CORE_RDY_AUX, FALSE, TRUE, LEVEL_HIGH}, + * //core-n active duration : pmu_rsrc_state(CORE_RDY_AUX) + * { SRC_CORE_RDY_AUX, FALSE, TRUE, EDGE_RISE} + */ +static pmu_stats_timer_t pmustatstimer[] = { + { SRC_LINK_IN_L12, FALSE, TRUE, PMU_STATS_LEVEL_HIGH}, //link_in_l12 + { SRC_LINK_IN_L23, FALSE, TRUE, PMU_STATS_LEVEL_HIGH}, //link_in_l23 + { SRC_PM_ST_IN_D0, FALSE, TRUE, PMU_STATS_LEVEL_HIGH}, //pm_st_in_d0 + { SRC_PM_ST_IN_D3, FALSE, TRUE, PMU_STATS_LEVEL_HIGH}, //pm_st_in_d3 + //deep-sleep duration : pmu_rsrc_state(XTAL_PU) + { SRC_XTAL_PU, FALSE, TRUE, PMU_STATS_LEVEL_LOW}, + //deep-sleep entry count : pmu_rsrc_state(XTAL_PU) + { SRC_XTAL_PU, FALSE, TRUE, PMU_STATS_EDGE_FALL}, + //core-n active duration : pmu_rsrc_state(CORE_RDY_MAIN) + { SRC_CORE_RDY_MAIN, FALSE, TRUE, PMU_STATS_LEVEL_HIGH}, + //core-n active duration : pmu_rsrc_state(CORE_RDY_MAIN) + { SRC_CORE_RDY_MAIN, FALSE, TRUE, PMU_STATS_EDGE_RISE} +}; + +static void +si_pmustatstimer_update(osl_t *osh, pmuregs_t *pmu, uint8 timerid) +{ + uint32 stats_timer_ctrl; + + W_REG(osh, &pmu->pmu_statstimer_addr, timerid); + stats_timer_ctrl = + ((pmustatstimer[timerid].src_num << PMU_ST_SRC_SHIFT) & + PMU_ST_SRC_MASK) | + ((pmustatstimer[timerid].cnt_mode << PMU_ST_CNT_MODE_SHIFT) & + PMU_ST_CNT_MODE_MASK) | + ((pmustatstimer[timerid].enable << PMU_ST_EN_SHIFT) & PMU_ST_EN_MASK) | + ((pmustatstimer[timerid].int_enable << PMU_ST_INT_EN_SHIFT) & PMU_ST_INT_EN_MASK); + W_REG(osh, &pmu->pmu_statstimer_ctrl, stats_timer_ctrl); + W_REG(osh, &pmu->pmu_statstimer_N, 0); +} + +void +si_pmustatstimer_int_enable(si_t *sih) +{ + pmuregs_t *pmu; + uint origidx; + osl_t *osh = si_osh(sih); + + /* Remember original core before switch to chipc/pmu */ + origidx = si_coreidx(sih); + if (AOB_ENAB(sih)) { + pmu = si_setcore(sih, PMU_CORE_ID, 0); + } else { + pmu = si_setcoreidx(sih, SI_CC_IDX); + } + ASSERT(pmu != NULL); + + OR_REG(osh, &pmu->pmuintmask0, PMU_INT_STAT_TIMER_INT_MASK); + + /* Return to original core */ + si_setcoreidx(sih, origidx); +} + +void +si_pmustatstimer_int_disable(si_t *sih) +{ + pmuregs_t *pmu; + uint origidx; + osl_t *osh = si_osh(sih); + + /* Remember original core before switch to chipc/pmu */ + origidx = si_coreidx(sih); + if (AOB_ENAB(sih)) { + pmu = si_setcore(sih, PMU_CORE_ID, 0); + } else { + pmu = si_setcoreidx(sih, SI_CC_IDX); + } + ASSERT(pmu != NULL); + + AND_REG(osh, &pmu->pmuintmask0, ~PMU_INT_STAT_TIMER_INT_MASK); + + /* Return to original core */ + si_setcoreidx(sih, origidx); +} + +void +si_pmustatstimer_init(si_t *sih) +{ + pmuregs_t *pmu; + uint origidx; + osl_t *osh = si_osh(sih); + uint32 core_cap_ext; + uint8 max_stats_timer_num; + int8 i; + + /* Remember original core before switch to chipc/pmu */ + origidx = si_coreidx(sih); + if (AOB_ENAB(sih)) { + pmu = si_setcore(sih, PMU_CORE_ID, 0); + } else { + pmu = si_setcoreidx(sih, SI_CC_IDX); + } + ASSERT(pmu != NULL); + + core_cap_ext = R_REG(osh, &pmu->core_cap_ext); + + max_stats_timer_num = ((core_cap_ext & PCAP_EXT_ST_NUM_MASK) >> PCAP_EXT_ST_NUM_SHIFT) + 1; + + for (i = 0; i < max_stats_timer_num; i++) { + si_pmustatstimer_update(osh, pmu, i); + } + + OR_REG(osh, &pmu->pmuintmask0, PMU_INT_STAT_TIMER_INT_MASK); + + /* Return to original core */ + si_setcoreidx(sih, origidx); +} + +void +si_pmustatstimer_dump(si_t *sih) +{ + pmuregs_t *pmu; + uint origidx; + osl_t *osh = si_osh(sih); + uint32 core_cap_ext, pmucapabilities, AlpPeriod, ILPPeriod, pmuintmask0, pmuintstatus; + uint8 max_stats_timer_num, max_stats_timer_src_num; + uint32 stat_timer_ctrl, stat_timer_N; + uint8 i; + uint32 current_time_ms = OSL_SYSUPTIME(); + + /* Remember original core before switch to chipc/pmu */ + origidx = si_coreidx(sih); + if (AOB_ENAB(sih)) { + pmu = si_setcore(sih, PMU_CORE_ID, 0); + } else { + pmu = si_setcoreidx(sih, SI_CC_IDX); + } + ASSERT(pmu != NULL); + + pmucapabilities = R_REG(osh, &pmu->pmucapabilities); + core_cap_ext = R_REG(osh, &pmu->core_cap_ext); + AlpPeriod = R_REG(osh, &pmu->slowclkperiod); + ILPPeriod = R_REG(osh, &pmu->ILPPeriod); + + max_stats_timer_num = ((core_cap_ext & PCAP_EXT_ST_NUM_MASK) >> + PCAP_EXT_ST_NUM_SHIFT) + 1; + max_stats_timer_src_num = ((core_cap_ext & PCAP_EXT_ST_SRC_NUM_MASK) >> + PCAP_EXT_ST_SRC_NUM_SHIFT) + 1; + + pmuintstatus = R_REG(osh, &pmu->pmuintstatus); + pmuintmask0 = R_REG(osh, &pmu->pmuintmask0); + + PMU_ERROR(("%s : TIME %d\n", __FUNCTION__, current_time_ms)); + + PMU_ERROR(("\tMAX Timer Num %d, MAX Source Num %d\n", + max_stats_timer_num, max_stats_timer_src_num)); + PMU_ERROR(("\tpmucapabilities 0x%8x, core_cap_ext 0x%8x, AlpPeriod 0x%8x, ILPPeriod 0x%8x, " + "pmuintmask0 0x%8x, pmuintstatus 0x%8x, pmurev %d\n", + pmucapabilities, core_cap_ext, AlpPeriod, ILPPeriod, + pmuintmask0, pmuintstatus, PMUREV(sih->pmurev))); + + for (i = 0; i < max_stats_timer_num; i++) { + W_REG(osh, &pmu->pmu_statstimer_addr, i); + stat_timer_ctrl = R_REG(osh, &pmu->pmu_statstimer_ctrl); + stat_timer_N = R_REG(osh, &pmu->pmu_statstimer_N); + PMU_ERROR(("\t Timer %d : control 0x%8x, %d\n", + i, stat_timer_ctrl, stat_timer_N)); + } + + /* Return to original core */ + si_setcoreidx(sih, origidx); +} + +void +si_pmustatstimer_start(si_t *sih, uint8 timerid) +{ + pmuregs_t *pmu; + uint origidx; + osl_t *osh = si_osh(sih); + + /* Remember original core before switch to chipc/pmu */ + origidx = si_coreidx(sih); + if (AOB_ENAB(sih)) { + pmu = si_setcore(sih, PMU_CORE_ID, 0); + } else { + pmu = si_setcoreidx(sih, SI_CC_IDX); + } + ASSERT(pmu != NULL); + + pmustatstimer[timerid].enable = TRUE; + + W_REG(osh, &pmu->pmu_statstimer_addr, timerid); + OR_REG(osh, &pmu->pmu_statstimer_ctrl, PMU_ST_ENAB << PMU_ST_EN_SHIFT); + + /* Return to original core */ + si_setcoreidx(sih, origidx); +} + +void +si_pmustatstimer_stop(si_t *sih, uint8 timerid) +{ + pmuregs_t *pmu; + uint origidx; + osl_t *osh = si_osh(sih); + + /* Remember original core before switch to chipc/pmu */ + origidx = si_coreidx(sih); + if (AOB_ENAB(sih)) { + pmu = si_setcore(sih, PMU_CORE_ID, 0); + } else { + pmu = si_setcoreidx(sih, SI_CC_IDX); + } + ASSERT(pmu != NULL); + + pmustatstimer[timerid].enable = FALSE; + + W_REG(osh, &pmu->pmu_statstimer_addr, timerid); + AND_REG(osh, &pmu->pmu_statstimer_ctrl, ~(PMU_ST_ENAB << PMU_ST_EN_SHIFT)); + + /* Return to original core */ + si_setcoreidx(sih, origidx); +} + +void +si_pmustatstimer_clear(si_t *sih, uint8 timerid) +{ + pmuregs_t *pmu; + uint origidx; + osl_t *osh = si_osh(sih); + + /* Remember original core before switch to chipc/pmu */ + origidx = si_coreidx(sih); + if (AOB_ENAB(sih)) { + pmu = si_setcore(sih, PMU_CORE_ID, 0); + } else { + pmu = si_setcoreidx(sih, SI_CC_IDX); + } + ASSERT(pmu != NULL); + + W_REG(osh, &pmu->pmu_statstimer_addr, timerid); + W_REG(osh, &pmu->pmu_statstimer_N, 0); + + /* Return to original core */ + si_setcoreidx(sih, origidx); +} + +void +si_pmustatstimer_clear_overflow(si_t *sih) +{ + uint8 i; + uint32 core_cap_ext; + uint8 max_stats_timer_num; + uint32 timerN; + pmuregs_t *pmu; + uint origidx; + osl_t *osh = si_osh(sih); + + /* Remember original core before switch to chipc/pmu */ + origidx = si_coreidx(sih); + if (AOB_ENAB(sih)) { + pmu = si_setcore(sih, PMU_CORE_ID, 0); + } else { + pmu = si_setcoreidx(sih, SI_CC_IDX); + } + ASSERT(pmu != NULL); + + core_cap_ext = R_REG(osh, &pmu->core_cap_ext); + max_stats_timer_num = ((core_cap_ext & PCAP_EXT_ST_NUM_MASK) >> PCAP_EXT_ST_NUM_SHIFT) + 1; + + for (i = 0; i < max_stats_timer_num; i++) { + W_REG(osh, &pmu->pmu_statstimer_addr, i); + timerN = R_REG(osh, &pmu->pmu_statstimer_N); + if (timerN == 0xFFFFFFFF) { + PMU_ERROR(("pmustatstimer overflow clear - timerid : %d\n", i)); + si_pmustatstimer_clear(sih, i); + } + } + + /* Return to original core */ + si_setcoreidx(sih, origidx); +} + +uint32 +si_pmustatstimer_read(si_t *sih, uint8 timerid) +{ + pmuregs_t *pmu; + uint origidx; + osl_t *osh = si_osh(sih); + uint32 stats_timer_N; + + /* Remember original core before switch to chipc/pmu */ + origidx = si_coreidx(sih); + if (AOB_ENAB(sih)) { + pmu = si_setcore(sih, PMU_CORE_ID, 0); + } else { + pmu = si_setcoreidx(sih, SI_CC_IDX); + } + ASSERT(pmu != NULL); + + W_REG(osh, &pmu->pmu_statstimer_addr, timerid); + stats_timer_N = R_REG(osh, &pmu->pmu_statstimer_N); + + /* Return to original core */ + si_setcoreidx(sih, origidx); + + return stats_timer_N; +} + +void +si_pmustatstimer_cfg_src_num(si_t *sih, uint8 src_num, uint8 timerid) +{ + pmuregs_t *pmu; + uint origidx; + osl_t *osh = si_osh(sih); + + /* Remember original core before switch to chipc/pmu */ + origidx = si_coreidx(sih); + if (AOB_ENAB(sih)) { + pmu = si_setcore(sih, PMU_CORE_ID, 0); + } else { + pmu = si_setcoreidx(sih, SI_CC_IDX); + } + ASSERT(pmu != NULL); + + pmustatstimer[timerid].src_num = src_num; + si_pmustatstimer_update(osh, pmu, timerid); + + /* Return to original core */ + si_setcoreidx(sih, origidx); +} + +void +si_pmustatstimer_cfg_cnt_mode(si_t *sih, uint8 cnt_mode, uint8 timerid) +{ + pmuregs_t *pmu; + uint origidx; + osl_t *osh = si_osh(sih); + + /* Remember original core before switch to chipc/pmu */ + origidx = si_coreidx(sih); + if (AOB_ENAB(sih)) { + pmu = si_setcore(sih, PMU_CORE_ID, 0); + } else { + pmu = si_setcoreidx(sih, SI_CC_IDX); + } + ASSERT(pmu != NULL); + + pmustatstimer[timerid].cnt_mode = cnt_mode; + si_pmustatstimer_update(osh, pmu, timerid); + + /* Return to original core */ + si_setcoreidx(sih, origidx); +} +#endif /* BCMPMU_STATS */ diff --git a/bcmdhd.100.10.315.x/include/802.11.h b/bcmdhd.100.10.315.x/include/802.11.h new file mode 100644 index 0000000..77c51d2 --- /dev/null +++ b/bcmdhd.100.10.315.x/include/802.11.h @@ -0,0 +1,5259 @@ +/* + * Fundamental types and constants relating to 802.11 + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: 802.11.h 765782 2018-06-05 10:47:00Z $ + */ + +#ifndef _802_11_H_ +#define _802_11_H_ + +#ifndef _TYPEDEFS_H_ +#include +#endif // endif + +#ifndef _NET_ETHERNET_H_ +#include +#endif // endif + +#include + +/* This marks the start of a packed structure section. */ +#include + +#define DOT11_TU_TO_US 1024 /* 802.11 Time Unit is 1024 microseconds */ + +/* Generic 802.11 frame constants */ +#define DOT11_A3_HDR_LEN 24 /* d11 header length with A3 */ +#define DOT11_A4_HDR_LEN 30 /* d11 header length with A4 */ +#define DOT11_MAC_HDR_LEN DOT11_A3_HDR_LEN /* MAC header length */ +#define DOT11_FCS_LEN 4u /* d11 FCS length */ +#define DOT11_ICV_LEN 4 /* d11 ICV length */ +#define DOT11_ICV_AES_LEN 8 /* d11 ICV/AES length */ +#define DOT11_MAX_ICV_AES_LEN 16 /* d11 MAX ICV/AES length */ +#define DOT11_QOS_LEN 2 /* d11 QoS length */ +#define DOT11_HTC_LEN 4 /* d11 HT Control field length */ + +#define DOT11_KEY_INDEX_SHIFT 6 /* d11 key index shift */ +#define DOT11_IV_LEN 4 /* d11 IV length */ +#define DOT11_IV_TKIP_LEN 8 /* d11 IV TKIP length */ +#define DOT11_IV_AES_OCB_LEN 4 /* d11 IV/AES/OCB length */ +#define DOT11_IV_AES_CCM_LEN 8 /* d11 IV/AES/CCM length */ +#define DOT11_IV_MAX_LEN 8 /* maximum iv len for any encryption */ + +/* Includes MIC */ +#define DOT11_MAX_MPDU_BODY_LEN 2304 /* max MPDU body length */ +/* A4 header + QoS + CCMP + PDU + ICV + FCS = 2352 */ +#define DOT11_MAX_MPDU_LEN (DOT11_A4_HDR_LEN + \ + DOT11_QOS_LEN + \ + DOT11_IV_AES_CCM_LEN + \ + DOT11_MAX_MPDU_BODY_LEN + \ + DOT11_ICV_LEN + \ + DOT11_FCS_LEN) /* d11 max MPDU length */ + +#define DOT11_MAX_SSID_LEN 32 /* d11 max ssid length */ + +/* dot11RTSThreshold */ +#define DOT11_DEFAULT_RTS_LEN 2347 /* d11 default RTS length */ +#define DOT11_MAX_RTS_LEN 2347 /* d11 max RTS length */ + +/* dot11FragmentationThreshold */ +#define DOT11_MIN_FRAG_LEN 256 /* d11 min fragmentation length */ +#define DOT11_MAX_FRAG_LEN 2346 /* Max frag is also limited by aMPDUMaxLength + * of the attached PHY + */ +#define DOT11_DEFAULT_FRAG_LEN 2346 /* d11 default fragmentation length */ + +/* dot11BeaconPeriod */ +#define DOT11_MIN_BEACON_PERIOD 1 /* d11 min beacon period */ +#define DOT11_MAX_BEACON_PERIOD 0xFFFF /* d11 max beacon period */ + +/* dot11DTIMPeriod */ +#define DOT11_MIN_DTIM_PERIOD 1 /* d11 min DTIM period */ +#define DOT11_MAX_DTIM_PERIOD 0xFF /* d11 max DTIM period */ + +/** 802.2 LLC/SNAP header used by 802.11 per 802.1H */ +#define DOT11_LLC_SNAP_HDR_LEN 8 /* d11 LLC/SNAP header length */ +/* minimum LLC header length; DSAP, SSAP, 8 bit Control (unnumbered) */ +#define DOT11_LLC_HDR_LEN_MIN 3 +#define DOT11_OUI_LEN 3 /* d11 OUI length */ +BWL_PRE_PACKED_STRUCT struct dot11_llc_snap_header { + uint8 dsap; /* always 0xAA */ + uint8 ssap; /* always 0xAA */ + uint8 ctl; /* always 0x03 */ + uint8 oui[DOT11_OUI_LEN]; /* RFC1042: 0x00 0x00 0x00 + * Bridge-Tunnel: 0x00 0x00 0xF8 + */ + uint16 type; /* ethertype */ +} BWL_POST_PACKED_STRUCT; + +/* RFC1042 header used by 802.11 per 802.1H */ +#define RFC1042_HDR_LEN (ETHER_HDR_LEN + DOT11_LLC_SNAP_HDR_LEN) /* RCF1042 header length */ + +/* Generic 802.11 MAC header */ +/** + * N.B.: This struct reflects the full 4 address 802.11 MAC header. + * The fields are defined such that the shorter 1, 2, and 3 + * address headers just use the first k fields. + */ +BWL_PRE_PACKED_STRUCT struct dot11_header { + uint16 fc; /* frame control */ + uint16 durid; /* duration/ID */ + struct ether_addr a1; /* address 1 */ + struct ether_addr a2; /* address 2 */ + struct ether_addr a3; /* address 3 */ + uint16 seq; /* sequence control */ + struct ether_addr a4; /* address 4 */ +} BWL_POST_PACKED_STRUCT; + +/* Control frames */ + +BWL_PRE_PACKED_STRUCT struct dot11_rts_frame { + uint16 fc; /* frame control */ + uint16 durid; /* duration/ID */ + struct ether_addr ra; /* receiver address */ + struct ether_addr ta; /* transmitter address */ +} BWL_POST_PACKED_STRUCT; +#define DOT11_RTS_LEN 16 /* d11 RTS frame length */ + +BWL_PRE_PACKED_STRUCT struct dot11_cts_frame { + uint16 fc; /* frame control */ + uint16 durid; /* duration/ID */ + struct ether_addr ra; /* receiver address */ +} BWL_POST_PACKED_STRUCT; +#define DOT11_CTS_LEN 10u /* d11 CTS frame length */ + +BWL_PRE_PACKED_STRUCT struct dot11_ack_frame { + uint16 fc; /* frame control */ + uint16 durid; /* duration/ID */ + struct ether_addr ra; /* receiver address */ +} BWL_POST_PACKED_STRUCT; +#define DOT11_ACK_LEN 10 /* d11 ACK frame length */ + +BWL_PRE_PACKED_STRUCT struct dot11_ps_poll_frame { + uint16 fc; /* frame control */ + uint16 durid; /* AID */ + struct ether_addr bssid; /* receiver address, STA in AP */ + struct ether_addr ta; /* transmitter address */ +} BWL_POST_PACKED_STRUCT; +#define DOT11_PS_POLL_LEN 16 /* d11 PS poll frame length */ + +BWL_PRE_PACKED_STRUCT struct dot11_cf_end_frame { + uint16 fc; /* frame control */ + uint16 durid; /* duration/ID */ + struct ether_addr ra; /* receiver address */ + struct ether_addr bssid; /* transmitter address, STA in AP */ +} BWL_POST_PACKED_STRUCT; +#define DOT11_CS_END_LEN 16 /* d11 CF-END frame length */ + +/** + * RWL wifi protocol: The Vendor Specific Action frame is defined for vendor-specific signaling + * category+OUI+vendor specific content ( this can be variable) + */ +BWL_PRE_PACKED_STRUCT struct dot11_action_wifi_vendor_specific { + uint8 category; + uint8 OUI[3]; + uint8 type; + uint8 subtype; + uint8 data[1040]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_action_wifi_vendor_specific dot11_action_wifi_vendor_specific_t; + +/** generic vendor specific action frame with variable length */ +BWL_PRE_PACKED_STRUCT struct dot11_action_vs_frmhdr { + uint8 category; + uint8 OUI[3]; + uint8 type; + uint8 subtype; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_action_vs_frmhdr dot11_action_vs_frmhdr_t; + +#define DOT11_ACTION_VS_HDR_LEN 6 + +#define BCM_ACTION_OUI_BYTE0 0x00 +#define BCM_ACTION_OUI_BYTE1 0x90 +#define BCM_ACTION_OUI_BYTE2 0x4c + +/* BA/BAR Control parameters */ +#define DOT11_BA_CTL_POLICY_NORMAL 0x0000 /* normal ack */ +#define DOT11_BA_CTL_POLICY_NOACK 0x0001 /* no ack */ +#define DOT11_BA_CTL_POLICY_MASK 0x0001 /* ack policy mask */ + +#define DOT11_BA_CTL_MTID 0x0002 /* multi tid BA */ +#define DOT11_BA_CTL_COMPRESSED 0x0004 /* compressed bitmap */ + +#define DOT11_BA_CTL_NUMMSDU_MASK 0x0FC0 /* num msdu in bitmap mask */ +#define DOT11_BA_CTL_NUMMSDU_SHIFT 6 /* num msdu in bitmap shift */ + +#define DOT11_BA_CTL_TID_MASK 0xF000 /* tid mask */ +#define DOT11_BA_CTL_TID_SHIFT 12 /* tid shift */ + +/** control frame header (BA/BAR) */ +BWL_PRE_PACKED_STRUCT struct dot11_ctl_header { + uint16 fc; /* frame control */ + uint16 durid; /* duration/ID */ + struct ether_addr ra; /* receiver address */ + struct ether_addr ta; /* transmitter address */ +} BWL_POST_PACKED_STRUCT; +#define DOT11_CTL_HDR_LEN 16 /* control frame hdr len */ + +/** BAR frame payload */ +BWL_PRE_PACKED_STRUCT struct dot11_bar { + uint16 bar_control; /* BAR Control */ + uint16 seqnum; /* Starting Sequence control */ +} BWL_POST_PACKED_STRUCT; +#define DOT11_BAR_LEN 4 /* BAR frame payload length */ + +#define DOT11_BA_BITMAP_LEN 128 /* bitmap length */ +#define DOT11_BA_CMP_BITMAP_LEN 8 /* compressed bitmap length */ +/** BA frame payload */ +BWL_PRE_PACKED_STRUCT struct dot11_ba { + uint16 ba_control; /* BA Control */ + uint16 seqnum; /* Starting Sequence control */ + uint8 bitmap[DOT11_BA_BITMAP_LEN]; /* Block Ack Bitmap */ +} BWL_POST_PACKED_STRUCT; +#define DOT11_BA_LEN 4 /* BA frame payload len (wo bitmap) */ + +/** Management frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_management_header { + uint16 fc; /* frame control */ + uint16 durid; /* duration/ID */ + struct ether_addr da; /* receiver address */ + struct ether_addr sa; /* transmitter address */ + struct ether_addr bssid; /* BSS ID */ + uint16 seq; /* sequence control */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_management_header dot11_management_header_t; +#define DOT11_MGMT_HDR_LEN 24 /* d11 management header length */ + +/* Management frame payloads */ + +BWL_PRE_PACKED_STRUCT struct dot11_bcn_prb { + uint32 timestamp[2]; + uint16 beacon_interval; + uint16 capability; +} BWL_POST_PACKED_STRUCT; +#define DOT11_BCN_PRB_LEN 12 /* 802.11 beacon/probe frame fixed length */ +#define DOT11_BCN_PRB_FIXED_LEN 12 /* 802.11 beacon/probe frame fixed length */ + +BWL_PRE_PACKED_STRUCT struct dot11_auth { + uint16 alg; /* algorithm */ + uint16 seq; /* sequence control */ + uint16 status; /* status code */ +} BWL_POST_PACKED_STRUCT; +#define DOT11_AUTH_FIXED_LEN 6 /* length of auth frame without challenge IE */ +#define DOT11_AUTH_SEQ_STATUS_LEN 4 /* length of auth frame without challenge IE and + * without algorithm + */ + +BWL_PRE_PACKED_STRUCT struct dot11_assoc_req { + uint16 capability; /* capability information */ + uint16 listen; /* listen interval */ +} BWL_POST_PACKED_STRUCT; +#define DOT11_ASSOC_REQ_FIXED_LEN 4 /* length of assoc frame without info elts */ + +BWL_PRE_PACKED_STRUCT struct dot11_reassoc_req { + uint16 capability; /* capability information */ + uint16 listen; /* listen interval */ + struct ether_addr ap; /* Current AP address */ +} BWL_POST_PACKED_STRUCT; +#define DOT11_REASSOC_REQ_FIXED_LEN 10 /* length of assoc frame without info elts */ + +BWL_PRE_PACKED_STRUCT struct dot11_assoc_resp { + uint16 capability; /* capability information */ + uint16 status; /* status code */ + uint16 aid; /* association ID */ +} BWL_POST_PACKED_STRUCT; +#define DOT11_ASSOC_RESP_FIXED_LEN 6 /* length of assoc resp frame without info elts */ + +BWL_PRE_PACKED_STRUCT struct dot11_action_measure { + uint8 category; + uint8 action; + uint8 token; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +#define DOT11_ACTION_MEASURE_LEN 3 /* d11 action measurement header length */ + +BWL_PRE_PACKED_STRUCT struct dot11_action_ht_ch_width { + uint8 category; + uint8 action; + uint8 ch_width; +} BWL_POST_PACKED_STRUCT; + +BWL_PRE_PACKED_STRUCT struct dot11_action_ht_mimops { + uint8 category; + uint8 action; + uint8 control; +} BWL_POST_PACKED_STRUCT; + +BWL_PRE_PACKED_STRUCT struct dot11_action_sa_query { + uint8 category; + uint8 action; + uint16 id; +} BWL_POST_PACKED_STRUCT; + +BWL_PRE_PACKED_STRUCT struct dot11_action_vht_oper_mode { + uint8 category; + uint8 action; + uint8 mode; +} BWL_POST_PACKED_STRUCT; + +/* These lengths assume 64 MU groups, as specified in 802.11ac-2013 */ +#define DOT11_ACTION_GID_MEMBERSHIP_LEN 8 /* bytes */ +#define DOT11_ACTION_GID_USER_POS_LEN 16 /* bytes */ +BWL_PRE_PACKED_STRUCT struct dot11_action_group_id { + uint8 category; + uint8 action; + uint8 membership_status[DOT11_ACTION_GID_MEMBERSHIP_LEN]; + uint8 user_position[DOT11_ACTION_GID_USER_POS_LEN]; +} BWL_POST_PACKED_STRUCT; + +#define SM_PWRSAVE_ENABLE 1 +#define SM_PWRSAVE_MODE 2 + +/* ************* 802.11h related definitions. ************* */ +BWL_PRE_PACKED_STRUCT struct dot11_power_cnst { + uint8 id; + uint8 len; + uint8 power; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_power_cnst dot11_power_cnst_t; + +BWL_PRE_PACKED_STRUCT struct dot11_power_cap { + int8 min; + int8 max; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_power_cap dot11_power_cap_t; + +BWL_PRE_PACKED_STRUCT struct dot11_tpc_rep { + uint8 id; + uint8 len; + uint8 tx_pwr; + uint8 margin; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tpc_rep dot11_tpc_rep_t; +#define DOT11_MNG_IE_TPC_REPORT_SIZE (sizeof(dot11_tpc_rep_t)) +#define DOT11_MNG_IE_TPC_REPORT_LEN 2 /* length of IE data, not including 2 byte header */ + +BWL_PRE_PACKED_STRUCT struct dot11_supp_channels { + uint8 id; + uint8 len; + uint8 first_channel; + uint8 num_channels; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_supp_channels dot11_supp_channels_t; + +/** + * Extension Channel Offset IE: 802.11n-D1.0 spec. added sideband + * offset for 40MHz operation. The possible 3 values are: + * 1 = above control channel + * 3 = below control channel + * 0 = no extension channel + */ +BWL_PRE_PACKED_STRUCT struct dot11_extch { + uint8 id; /* IE ID, 62, DOT11_MNG_EXT_CHANNEL_OFFSET */ + uint8 len; /* IE length */ + uint8 extch; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_extch dot11_extch_ie_t; + +BWL_PRE_PACKED_STRUCT struct dot11_brcm_extch { + uint8 id; /* IE ID, 221, DOT11_MNG_PROPR_ID */ + uint8 len; /* IE length */ + uint8 oui[3]; + uint8 type; /* type indicates what follows */ + uint8 extch; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_brcm_extch dot11_brcm_extch_ie_t; + +#define BRCM_EXTCH_IE_LEN 5 +#define BRCM_EXTCH_IE_TYPE 53 /* 802.11n ID not yet assigned */ +#define DOT11_EXTCH_IE_LEN 1 +#define DOT11_EXT_CH_MASK 0x03 /* extension channel mask */ +#define DOT11_EXT_CH_UPPER 0x01 /* ext. ch. on upper sb */ +#define DOT11_EXT_CH_LOWER 0x03 /* ext. ch. on lower sb */ +#define DOT11_EXT_CH_NONE 0x00 /* no extension ch. */ + +BWL_PRE_PACKED_STRUCT struct dot11_action_frmhdr { + uint8 category; + uint8 action; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_action_frmhdr dot11_action_frmhdr_t; + +/* Action Field length */ +#define DOT11_ACTION_CATEGORY_LEN 1u +#define DOT11_ACTION_ACTION_LEN 1u +#define DOT11_ACTION_DIALOG_TOKEN_LEN 1u +#define DOT11_ACTION_CAPABILITY_LEN 2u +#define DOT11_ACTION_STATUS_CODE_LEN 2u +#define DOT11_ACTION_REASON_CODE_LEN 2u +#define DOT11_ACTION_TARGET_CH_LEN 1u +#define DOT11_ACTION_OPER_CLASS_LEN 1u + +#define DOT11_ACTION_FRMHDR_LEN 2 + +/** CSA IE data structure */ +BWL_PRE_PACKED_STRUCT struct dot11_channel_switch { + uint8 id; /* id DOT11_MNG_CHANNEL_SWITCH_ID */ + uint8 len; /* length of IE */ + uint8 mode; /* mode 0 or 1 */ + uint8 channel; /* channel switch to */ + uint8 count; /* number of beacons before switching */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_channel_switch dot11_chan_switch_ie_t; + +#define DOT11_SWITCH_IE_LEN 3 /* length of IE data, not including 2 byte header */ +/* CSA mode - 802.11h-2003 $7.3.2.20 */ +#define DOT11_CSA_MODE_ADVISORY 0 /* no DOT11_CSA_MODE_NO_TX restriction imposed */ +#define DOT11_CSA_MODE_NO_TX 1 /* no transmission upon receiving CSA frame. */ + +BWL_PRE_PACKED_STRUCT struct dot11_action_switch_channel { + uint8 category; + uint8 action; + dot11_chan_switch_ie_t chan_switch_ie; /* for switch IE */ + dot11_brcm_extch_ie_t extch_ie; /* extension channel offset */ +} BWL_POST_PACKED_STRUCT; + +BWL_PRE_PACKED_STRUCT struct dot11_csa_body { + uint8 mode; /* mode 0 or 1 */ + uint8 reg; /* regulatory class */ + uint8 channel; /* channel switch to */ + uint8 count; /* number of beacons before switching */ +} BWL_POST_PACKED_STRUCT; + +/** 11n Extended Channel Switch IE data structure */ +BWL_PRE_PACKED_STRUCT struct dot11_ext_csa { + uint8 id; /* id DOT11_MNG_EXT_CSA_ID */ + uint8 len; /* length of IE */ + struct dot11_csa_body b; /* body of the ie */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ext_csa dot11_ext_csa_ie_t; +#define DOT11_EXT_CSA_IE_LEN 4 /* length of extended channel switch IE body */ + +BWL_PRE_PACKED_STRUCT struct dot11_action_ext_csa { + uint8 category; + uint8 action; + dot11_ext_csa_ie_t chan_switch_ie; /* for switch IE */ +} BWL_POST_PACKED_STRUCT; + +BWL_PRE_PACKED_STRUCT struct dot11y_action_ext_csa { + uint8 category; + uint8 action; + struct dot11_csa_body b; /* body of the ie */ +} BWL_POST_PACKED_STRUCT; + +/** Wide Bandwidth Channel Switch IE data structure */ +BWL_PRE_PACKED_STRUCT struct dot11_wide_bw_channel_switch { + uint8 id; /* id DOT11_MNG_WIDE_BW_CHANNEL_SWITCH_ID */ + uint8 len; /* length of IE */ + uint8 channel_width; /* new channel width */ + uint8 center_frequency_segment_0; /* center frequency segment 0 */ + uint8 center_frequency_segment_1; /* center frequency segment 1 */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_wide_bw_channel_switch dot11_wide_bw_chan_switch_ie_t; + +#define DOT11_WIDE_BW_SWITCH_IE_LEN 3 /* length of IE data, not including 2 byte header */ + +/** Channel Switch Wrapper IE data structure */ +BWL_PRE_PACKED_STRUCT struct dot11_channel_switch_wrapper { + uint8 id; /* id DOT11_MNG_WIDE_BW_CHANNEL_SWITCH_ID */ + uint8 len; /* length of IE */ + dot11_wide_bw_chan_switch_ie_t wb_chan_switch_ie; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_channel_switch_wrapper dot11_chan_switch_wrapper_ie_t; + +typedef enum wide_bw_chan_width { + WIDE_BW_CHAN_WIDTH_20 = 0, + WIDE_BW_CHAN_WIDTH_40 = 1, + WIDE_BW_CHAN_WIDTH_80 = 2, + WIDE_BW_CHAN_WIDTH_160 = 3, + WIDE_BW_CHAN_WIDTH_80_80 = 4 +} wide_bw_chan_width_t; + +/** Wide Bandwidth Channel IE data structure */ +BWL_PRE_PACKED_STRUCT struct dot11_wide_bw_channel { + uint8 id; /* id DOT11_MNG_WIDE_BW_CHANNEL_ID */ + uint8 len; /* length of IE */ + uint8 channel_width; /* channel width */ + uint8 center_frequency_segment_0; /* center frequency segment 0 */ + uint8 center_frequency_segment_1; /* center frequency segment 1 */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_wide_bw_channel dot11_wide_bw_chan_ie_t; + +#define DOT11_WIDE_BW_IE_LEN 3 /* length of IE data, not including 2 byte header */ +/** VHT Transmit Power Envelope IE data structure */ +BWL_PRE_PACKED_STRUCT struct dot11_vht_transmit_power_envelope { + uint8 id; /* id DOT11_MNG_WIDE_BW_CHANNEL_SWITCH_ID */ + uint8 len; /* length of IE */ + uint8 transmit_power_info; + uint8 local_max_transmit_power_20; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_vht_transmit_power_envelope dot11_vht_transmit_power_envelope_ie_t; + +/* vht transmit power envelope IE length depends on channel width */ +#define DOT11_VHT_TRANSMIT_PWR_ENVELOPE_IE_LEN_40MHZ 1 +#define DOT11_VHT_TRANSMIT_PWR_ENVELOPE_IE_LEN_80MHZ 2 +#define DOT11_VHT_TRANSMIT_PWR_ENVELOPE_IE_LEN_160MHZ 3 + +BWL_PRE_PACKED_STRUCT struct dot11_obss_coex { + uint8 id; + uint8 len; + uint8 info; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_obss_coex dot11_obss_coex_t; +#define DOT11_OBSS_COEXINFO_LEN 1 /* length of OBSS Coexistence INFO IE */ + +#define DOT11_OBSS_COEX_INFO_REQ 0x01 +#define DOT11_OBSS_COEX_40MHZ_INTOLERANT 0x02 +#define DOT11_OBSS_COEX_20MHZ_WIDTH_REQ 0x04 + +BWL_PRE_PACKED_STRUCT struct dot11_obss_chanlist { + uint8 id; + uint8 len; + uint8 regclass; + uint8 chanlist[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_obss_chanlist dot11_obss_chanlist_t; +#define DOT11_OBSS_CHANLIST_FIXED_LEN 1 /* fixed length of regclass */ + +BWL_PRE_PACKED_STRUCT struct dot11_extcap_ie { + uint8 id; + uint8 len; + uint8 cap[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_extcap_ie dot11_extcap_ie_t; + +#define DOT11_EXTCAP_LEN_COEX 1 +#define DOT11_EXTCAP_LEN_BT 3 +#define DOT11_EXTCAP_LEN_IW 4 +#define DOT11_EXTCAP_LEN_SI 6 + +#define DOT11_EXTCAP_LEN_TDLS 5 +#define DOT11_11AC_EXTCAP_LEN_TDLS 8 + +#define DOT11_EXTCAP_LEN_FMS 2 +#define DOT11_EXTCAP_LEN_PROXY_ARP 2 +#define DOT11_EXTCAP_LEN_TFS 3 +#define DOT11_EXTCAP_LEN_WNM_SLEEP 3 +#define DOT11_EXTCAP_LEN_TIMBC 3 +#define DOT11_EXTCAP_LEN_BSSTRANS 3 +#define DOT11_EXTCAP_LEN_DMS 4 +#define DOT11_EXTCAP_LEN_WNM_NOTIFICATION 6 +#define DOT11_EXTCAP_LEN_TDLS_WBW 8 +#define DOT11_EXTCAP_LEN_OPMODE_NOTIFICATION 8 + +/* TDLS Capabilities */ +#define DOT11_TDLS_CAP_TDLS 37 /* TDLS support */ +#define DOT11_TDLS_CAP_PU_BUFFER_STA 28 /* TDLS Peer U-APSD buffer STA support */ +#define DOT11_TDLS_CAP_PEER_PSM 20 /* TDLS Peer PSM support */ +#define DOT11_TDLS_CAP_CH_SW 30 /* TDLS Channel switch */ +#define DOT11_TDLS_CAP_PROH 38 /* TDLS prohibited */ +#define DOT11_TDLS_CAP_CH_SW_PROH 39 /* TDLS Channel switch prohibited */ +#define DOT11_TDLS_CAP_TDLS_WIDER_BW 61 /* TDLS Wider Band-Width */ + +#define TDLS_CAP_MAX_BIT 39 /* TDLS max bit defined in ext cap */ + +/* 802.11h/802.11k Measurement Request/Report IEs */ +/* Measurement Type field */ +#define DOT11_MEASURE_TYPE_BASIC 0 /* d11 measurement basic type */ +#define DOT11_MEASURE_TYPE_CCA 1 /* d11 measurement CCA type */ +#define DOT11_MEASURE_TYPE_RPI 2 /* d11 measurement RPI type */ +#define DOT11_MEASURE_TYPE_CHLOAD 3 /* d11 measurement Channel Load type */ +#define DOT11_MEASURE_TYPE_NOISE 4 /* d11 measurement Noise Histogram type */ +#define DOT11_MEASURE_TYPE_BEACON 5 /* d11 measurement Beacon type */ +#define DOT11_MEASURE_TYPE_FRAME 6 /* d11 measurement Frame type */ +#define DOT11_MEASURE_TYPE_STAT 7 /* d11 measurement STA Statistics type */ +#define DOT11_MEASURE_TYPE_LCI 8 /* d11 measurement LCI type */ +#define DOT11_MEASURE_TYPE_TXSTREAM 9 /* d11 measurement TX Stream type */ +#define DOT11_MEASURE_TYPE_MCDIAGS 10 /* d11 measurement multicast diagnostics */ +#define DOT11_MEASURE_TYPE_CIVICLOC 11 /* d11 measurement location civic */ +#define DOT11_MEASURE_TYPE_LOC_ID 12 /* d11 measurement location identifier */ +#define DOT11_MEASURE_TYPE_DIRCHANQ 13 /* d11 measurement dir channel quality */ +#define DOT11_MEASURE_TYPE_DIRMEAS 14 /* d11 measurement directional */ +#define DOT11_MEASURE_TYPE_DIRSTATS 15 /* d11 measurement directional stats */ +#define DOT11_MEASURE_TYPE_FTMRANGE 16 /* d11 measurement Fine Timing */ +#define DOT11_MEASURE_TYPE_PAUSE 255 /* d11 measurement pause type */ + +/* Measurement Request Modes */ +#define DOT11_MEASURE_MODE_PARALLEL (1<<0) /* d11 measurement parallel */ +#define DOT11_MEASURE_MODE_ENABLE (1<<1) /* d11 measurement enable */ +#define DOT11_MEASURE_MODE_REQUEST (1<<2) /* d11 measurement request */ +#define DOT11_MEASURE_MODE_REPORT (1<<3) /* d11 measurement report */ +#define DOT11_MEASURE_MODE_DUR (1<<4) /* d11 measurement dur mandatory */ +/* Measurement Report Modes */ +#define DOT11_MEASURE_MODE_LATE (1<<0) /* d11 measurement late */ +#define DOT11_MEASURE_MODE_INCAPABLE (1<<1) /* d11 measurement incapable */ +#define DOT11_MEASURE_MODE_REFUSED (1<<2) /* d11 measurement refuse */ +/* Basic Measurement Map bits */ +#define DOT11_MEASURE_BASIC_MAP_BSS ((uint8)(1<<0)) /* d11 measurement basic map BSS */ +#define DOT11_MEASURE_BASIC_MAP_OFDM ((uint8)(1<<1)) /* d11 measurement map OFDM */ +#define DOT11_MEASURE_BASIC_MAP_UKNOWN ((uint8)(1<<2)) /* d11 measurement map unknown */ +#define DOT11_MEASURE_BASIC_MAP_RADAR ((uint8)(1<<3)) /* d11 measurement map radar */ +#define DOT11_MEASURE_BASIC_MAP_UNMEAS ((uint8)(1<<4)) /* d11 measurement map unmeasuremnt */ + +BWL_PRE_PACKED_STRUCT struct dot11_meas_req { + uint8 id; + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + uint8 channel; + uint8 start_time[8]; + uint16 duration; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_meas_req dot11_meas_req_t; +#define DOT11_MNG_IE_MREQ_LEN 14 /* d11 measurement request IE length */ +/* length of Measure Request IE data not including variable len */ +#define DOT11_MNG_IE_MREQ_FIXED_LEN 3 /* d11 measurement request IE fixed length */ + +BWL_PRE_PACKED_STRUCT struct dot11_meas_req_loc { + uint8 id; + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + BWL_PRE_PACKED_STRUCT union + { + BWL_PRE_PACKED_STRUCT struct { + uint8 subject; + uint8 data[1]; + } BWL_POST_PACKED_STRUCT lci; + BWL_PRE_PACKED_STRUCT struct { + uint8 subject; + uint8 type; /* type of civic location */ + uint8 siu; /* service interval units */ + uint16 si; /* service interval */ + uint8 data[1]; + } BWL_POST_PACKED_STRUCT civic; + BWL_PRE_PACKED_STRUCT struct { + uint8 subject; + uint8 siu; /* service interval units */ + uint16 si; /* service interval */ + uint8 data[1]; + } BWL_POST_PACKED_STRUCT locid; + BWL_PRE_PACKED_STRUCT struct { + uint16 max_init_delay; /* maximum random initial delay */ + uint8 min_ap_count; + uint8 data[1]; + } BWL_POST_PACKED_STRUCT ftm_range; + } BWL_POST_PACKED_STRUCT req; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_meas_req_loc dot11_meas_req_loc_t; +#define DOT11_MNG_IE_MREQ_MIN_LEN 4 /* d11 measurement report IE length */ +#define DOT11_MNG_IE_MREQ_LCI_FIXED_LEN 4 /* d11 measurement report IE length */ +#define DOT11_MNG_IE_MREQ_CIVIC_FIXED_LEN 8 /* d11 measurement report IE length */ +#define DOT11_MNG_IE_MREQ_FRNG_FIXED_LEN 6 /* d11 measurement report IE length */ + +BWL_PRE_PACKED_STRUCT struct dot11_lci_subelement { + uint8 subelement; + uint8 length; + uint8 lci_data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_lci_subelement dot11_lci_subelement_t; + +BWL_PRE_PACKED_STRUCT struct dot11_colocated_bssid_list_se { + uint8 sub_id; + uint8 length; + uint8 max_bssid_ind; /* MaxBSSID Indicator */ + struct ether_addr bssid[1]; /* variable */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_colocated_bssid_list_se dot11_colocated_bssid_list_se_t; +#define DOT11_LCI_COLOCATED_BSSID_LIST_FIXED_LEN 3 +#define DOT11_LCI_COLOCATED_BSSID_SUBELEM_ID 7 + +BWL_PRE_PACKED_STRUCT struct dot11_civic_subelement { + uint8 type; /* type of civic location */ + uint8 subelement; + uint8 length; + uint8 civic_data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_civic_subelement dot11_civic_subelement_t; + +BWL_PRE_PACKED_STRUCT struct dot11_meas_rep { + uint8 id; + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + BWL_PRE_PACKED_STRUCT union + { + BWL_PRE_PACKED_STRUCT struct { + uint8 channel; + uint8 start_time[8]; + uint16 duration; + uint8 map; + } BWL_POST_PACKED_STRUCT basic; + BWL_PRE_PACKED_STRUCT struct { + uint8 subelement; + uint8 length; + uint8 data[1]; + } BWL_POST_PACKED_STRUCT lci; + BWL_PRE_PACKED_STRUCT struct { + uint8 type; /* type of civic location */ + uint8 subelement; + uint8 length; + uint8 data[1]; + } BWL_POST_PACKED_STRUCT civic; + BWL_PRE_PACKED_STRUCT struct { + uint8 exp_tsf[8]; + uint8 subelement; + uint8 length; + uint8 data[1]; + } BWL_POST_PACKED_STRUCT locid; + BWL_PRE_PACKED_STRUCT struct { + uint8 entry_count; + uint8 data[1]; + } BWL_POST_PACKED_STRUCT ftm_range; + uint8 data[1]; + } BWL_POST_PACKED_STRUCT rep; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_meas_rep dot11_meas_rep_t; +#define DOT11_MNG_IE_MREP_MIN_LEN 5 /* d11 measurement report IE length */ +#define DOT11_MNG_IE_MREP_LCI_FIXED_LEN 5 /* d11 measurement report IE length */ +#define DOT11_MNG_IE_MREP_CIVIC_FIXED_LEN 6 /* d11 measurement report IE length */ +#define DOT11_MNG_IE_MREP_LOCID_FIXED_LEN 13 /* d11 measurement report IE length */ +#define DOT11_MNG_IE_MREP_BASIC_FIXED_LEN 15 /* d11 measurement report IE length */ +#define DOT11_MNG_IE_MREP_FRNG_FIXED_LEN 4 + +/* length of Measure Report IE data not including variable len */ +#define DOT11_MNG_IE_MREP_FIXED_LEN 3 /* d11 measurement response IE fixed length */ + +BWL_PRE_PACKED_STRUCT struct dot11_meas_rep_basic { + uint8 channel; + uint8 start_time[8]; + uint16 duration; + uint8 map; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_meas_rep_basic dot11_meas_rep_basic_t; +#define DOT11_MEASURE_BASIC_REP_LEN 12 /* d11 measurement basic report length */ + +BWL_PRE_PACKED_STRUCT struct dot11_quiet { + uint8 id; + uint8 len; + uint8 count; /* TBTTs until beacon interval in quiet starts */ + uint8 period; /* Beacon intervals between periodic quiet periods ? */ + uint16 duration; /* Length of quiet period, in TU's */ + uint16 offset; /* TU's offset from TBTT in Count field */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_quiet dot11_quiet_t; + +BWL_PRE_PACKED_STRUCT struct chan_map_tuple { + uint8 channel; + uint8 map; +} BWL_POST_PACKED_STRUCT; +typedef struct chan_map_tuple chan_map_tuple_t; + +BWL_PRE_PACKED_STRUCT struct dot11_ibss_dfs { + uint8 id; + uint8 len; + uint8 eaddr[ETHER_ADDR_LEN]; + uint8 interval; + chan_map_tuple_t map[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ibss_dfs dot11_ibss_dfs_t; + +/* WME Elements */ +#define WME_OUI "\x00\x50\xf2" /* WME OUI */ +#define WME_OUI_LEN 3 +#define WME_OUI_TYPE 2 /* WME type */ +#define WME_TYPE 2 /* WME type, deprecated */ +#define WME_SUBTYPE_IE 0 /* Information Element */ +#define WME_SUBTYPE_PARAM_IE 1 /* Parameter Element */ +#define WME_SUBTYPE_TSPEC 2 /* Traffic Specification */ +#define WME_VER 1 /* WME version */ + +/* WME Access Category Indices (ACIs) */ +#define AC_BE 0 /* Best Effort */ +#define AC_BK 1 /* Background */ +#define AC_VI 2 /* Video */ +#define AC_VO 3 /* Voice */ +#define AC_COUNT 4 /* number of ACs */ + +typedef uint8 ac_bitmap_t; /* AC bitmap of (1 << AC_xx) */ + +#define AC_BITMAP_NONE 0x0 /* No ACs */ +#define AC_BITMAP_ALL 0xf /* All ACs */ +#define AC_BITMAP_TST(ab, ac) (((ab) & (1 << (ac))) != 0) +#define AC_BITMAP_SET(ab, ac) (((ab) |= (1 << (ac)))) +#define AC_BITMAP_RESET(ab, ac) (((ab) &= ~(1 << (ac)))) + +/* Management PKT Lifetime indices */ +/* Removing flag checks 'BCMINTERNAL || WLTEST' + * while merging MERGE BIS120RC4 to DINGO2 + */ +#define MGMT_ALL 0xffff +#define MGMT_AUTH_LT FC_SUBTYPE_AUTH +#define MGMT_ASSOC_LT FC_SUBTYPE_ASSOC_REQ + +/** WME Information Element (IE) */ +BWL_PRE_PACKED_STRUCT struct wme_ie { + uint8 oui[3]; + uint8 type; + uint8 subtype; + uint8 version; + uint8 qosinfo; +} BWL_POST_PACKED_STRUCT; +typedef struct wme_ie wme_ie_t; +#define WME_IE_LEN 7 /* WME IE length */ + +BWL_PRE_PACKED_STRUCT struct edcf_acparam { + uint8 ACI; + uint8 ECW; + uint16 TXOP; /* stored in network order (ls octet first) */ +} BWL_POST_PACKED_STRUCT; +typedef struct edcf_acparam edcf_acparam_t; + +/** WME Parameter Element (PE) */ +BWL_PRE_PACKED_STRUCT struct wme_param_ie { + uint8 oui[3]; + uint8 type; + uint8 subtype; + uint8 version; + uint8 qosinfo; + uint8 rsvd; + edcf_acparam_t acparam[AC_COUNT]; +} BWL_POST_PACKED_STRUCT; +typedef struct wme_param_ie wme_param_ie_t; +#define WME_PARAM_IE_LEN 24 /* WME Parameter IE length */ + +/* QoS Info field for IE as sent from AP */ +#define WME_QI_AP_APSD_MASK 0x80 /* U-APSD Supported mask */ +#define WME_QI_AP_APSD_SHIFT 7 /* U-APSD Supported shift */ +#define WME_QI_AP_COUNT_MASK 0x0f /* Parameter set count mask */ +#define WME_QI_AP_COUNT_SHIFT 0 /* Parameter set count shift */ + +/* QoS Info field for IE as sent from STA */ +#define WME_QI_STA_MAXSPLEN_MASK 0x60 /* Max Service Period Length mask */ +#define WME_QI_STA_MAXSPLEN_SHIFT 5 /* Max Service Period Length shift */ +#define WME_QI_STA_APSD_ALL_MASK 0xf /* APSD all AC bits mask */ +#define WME_QI_STA_APSD_ALL_SHIFT 0 /* APSD all AC bits shift */ +#define WME_QI_STA_APSD_BE_MASK 0x8 /* APSD AC_BE mask */ +#define WME_QI_STA_APSD_BE_SHIFT 3 /* APSD AC_BE shift */ +#define WME_QI_STA_APSD_BK_MASK 0x4 /* APSD AC_BK mask */ +#define WME_QI_STA_APSD_BK_SHIFT 2 /* APSD AC_BK shift */ +#define WME_QI_STA_APSD_VI_MASK 0x2 /* APSD AC_VI mask */ +#define WME_QI_STA_APSD_VI_SHIFT 1 /* APSD AC_VI shift */ +#define WME_QI_STA_APSD_VO_MASK 0x1 /* APSD AC_VO mask */ +#define WME_QI_STA_APSD_VO_SHIFT 0 /* APSD AC_VO shift */ + +/* ACI */ +#define EDCF_AIFSN_MIN 1 /* AIFSN minimum value */ +#define EDCF_AIFSN_MAX 15 /* AIFSN maximum value */ +#define EDCF_AIFSN_MASK 0x0f /* AIFSN mask */ +#define EDCF_ACM_MASK 0x10 /* ACM mask */ +#define EDCF_ACI_MASK 0x60 /* ACI mask */ +#define EDCF_ACI_SHIFT 5 /* ACI shift */ +#define EDCF_AIFSN_SHIFT 12 /* 4 MSB(0xFFF) in ifs_ctl for AC idx */ + +/* ECW */ +#define EDCF_ECW_MIN 0 /* cwmin/cwmax exponent minimum value */ +#define EDCF_ECW_MAX 15 /* cwmin/cwmax exponent maximum value */ +#define EDCF_ECW2CW(exp) ((1 << (exp)) - 1) +#define EDCF_ECWMIN_MASK 0x0f /* cwmin exponent form mask */ +#define EDCF_ECWMAX_MASK 0xf0 /* cwmax exponent form mask */ +#define EDCF_ECWMAX_SHIFT 4 /* cwmax exponent form shift */ + +/* TXOP */ +#define EDCF_TXOP_MIN 0 /* TXOP minimum value */ +#define EDCF_TXOP_MAX 65535 /* TXOP maximum value */ +#define EDCF_TXOP2USEC(txop) ((txop) << 5) + +/* Default BE ACI value for non-WME connection STA */ +#define NON_EDCF_AC_BE_ACI_STA 0x02 + +/* Default EDCF parameters that AP advertises for STA to use; WMM draft Table 12 */ +#define EDCF_AC_BE_ACI_STA 0x03 /* STA ACI value for best effort AC */ +#define EDCF_AC_BE_ECW_STA 0xA4 /* STA ECW value for best effort AC */ +#define EDCF_AC_BE_TXOP_STA 0x0000 /* STA TXOP value for best effort AC */ +#define EDCF_AC_BK_ACI_STA 0x27 /* STA ACI value for background AC */ +#define EDCF_AC_BK_ECW_STA 0xA4 /* STA ECW value for background AC */ +#define EDCF_AC_BK_TXOP_STA 0x0000 /* STA TXOP value for background AC */ +#define EDCF_AC_VI_ACI_STA 0x42 /* STA ACI value for video AC */ +#define EDCF_AC_VI_ECW_STA 0x43 /* STA ECW value for video AC */ +#define EDCF_AC_VI_TXOP_STA 0x005e /* STA TXOP value for video AC */ +#define EDCF_AC_VO_ACI_STA 0x62 /* STA ACI value for audio AC */ +#define EDCF_AC_VO_ECW_STA 0x32 /* STA ECW value for audio AC */ +#define EDCF_AC_VO_TXOP_STA 0x002f /* STA TXOP value for audio AC */ + +/* Default EDCF parameters that AP uses; WMM draft Table 14 */ +#define EDCF_AC_BE_ACI_AP 0x03 /* AP ACI value for best effort AC */ +#define EDCF_AC_BE_ECW_AP 0x64 /* AP ECW value for best effort AC */ +#define EDCF_AC_BE_TXOP_AP 0x0000 /* AP TXOP value for best effort AC */ +#define EDCF_AC_BK_ACI_AP 0x27 /* AP ACI value for background AC */ +#define EDCF_AC_BK_ECW_AP 0xA4 /* AP ECW value for background AC */ +#define EDCF_AC_BK_TXOP_AP 0x0000 /* AP TXOP value for background AC */ +#define EDCF_AC_VI_ACI_AP 0x41 /* AP ACI value for video AC */ +#define EDCF_AC_VI_ECW_AP 0x43 /* AP ECW value for video AC */ +#define EDCF_AC_VI_TXOP_AP 0x005e /* AP TXOP value for video AC */ +#define EDCF_AC_VO_ACI_AP 0x61 /* AP ACI value for audio AC */ +#define EDCF_AC_VO_ECW_AP 0x32 /* AP ECW value for audio AC */ +#define EDCF_AC_VO_TXOP_AP 0x002f /* AP TXOP value for audio AC */ + +/** EDCA Parameter IE */ +BWL_PRE_PACKED_STRUCT struct edca_param_ie { + uint8 qosinfo; + uint8 rsvd; + edcf_acparam_t acparam[AC_COUNT]; +} BWL_POST_PACKED_STRUCT; +typedef struct edca_param_ie edca_param_ie_t; +#define EDCA_PARAM_IE_LEN 18 /* EDCA Parameter IE length */ + +/** QoS Capability IE */ +BWL_PRE_PACKED_STRUCT struct qos_cap_ie { + uint8 qosinfo; +} BWL_POST_PACKED_STRUCT; +typedef struct qos_cap_ie qos_cap_ie_t; + +BWL_PRE_PACKED_STRUCT struct dot11_qbss_load_ie { + uint8 id; /* 11, DOT11_MNG_QBSS_LOAD_ID */ + uint8 length; + uint16 station_count; /* total number of STAs associated */ + uint8 channel_utilization; /* % of time, normalized to 255, QAP sensed medium busy */ + uint16 aac; /* available admission capacity */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_qbss_load_ie dot11_qbss_load_ie_t; +#define BSS_LOAD_IE_SIZE 7 /* BSS load IE size */ + +#define WLC_QBSS_LOAD_CHAN_FREE_MAX 0xff /* max for channel free score */ + +/* Estimated Service Parameters (ESP) IE - 802.11-2016 9.4.2.174 */ +typedef BWL_PRE_PACKED_STRUCT struct dot11_esp_ie { + uint8 id; + uint8 length; + uint8 id_ext; + /* variable len info */ + uint8 esp_info_lists[]; +} BWL_POST_PACKED_STRUCT dot11_esp_ie_t; + +#define DOT11_ESP_IE_HDR_SIZE (OFFSETOF(dot11_esp_ie_t, esp_info_lists)) + +/* ESP Information list - 802.11-2016 9.4.2.174 */ +typedef BWL_PRE_PACKED_STRUCT struct dot11_esp_ie_info_list { + /* acess category, data format, ba win size */ + uint8 ac_df_baws; + /* estimated air time fraction */ + uint8 eat_frac; + /* data PPDU duration target (50us units) */ + uint8 ppdu_dur; +} BWL_POST_PACKED_STRUCT dot11_esp_ie_info_list_t; + +#define DOT11_ESP_IE_INFO_LIST_SIZE (sizeof(dot11_esp_ie_info_list_t)) + +#define DOT11_ESP_NBR_INFO_LISTS 4u /* max nbr of esp information lists */ +#define DOT11_ESP_INFO_LIST_AC_BK 0u /* access category of esp information list AC_BK */ +#define DOT11_ESP_INFO_LIST_AC_BE 1u /* access category of esp information list AC_BE */ +#define DOT11_ESP_INFO_LIST_AC_VI 2u /* access category of esp information list AC_VI */ +#define DOT11_ESP_INFO_LIST_AC_VO 3u /* access category of esp information list AC_VO */ + +#define DOT11_ESP_INFO_LIST_DF_MASK 0x18 /* Data Format Mask */ +#define DOT11_ESP_INFO_LIST_BAWS_MASK 0xE0 /* BA window size mask */ + +/* nom_msdu_size */ +#define FIXED_MSDU_SIZE 0x8000 /* MSDU size is fixed */ +#define MSDU_SIZE_MASK 0x7fff /* (Nominal or fixed) MSDU size */ + +/* surplus_bandwidth */ +/* Represented as 3 bits of integer, binary point, 13 bits fraction */ +#define INTEGER_SHIFT 13 /* integer shift */ +#define FRACTION_MASK 0x1FFF /* fraction mask */ + +/** Management Notification Frame */ +BWL_PRE_PACKED_STRUCT struct dot11_management_notification { + uint8 category; /* DOT11_ACTION_NOTIFICATION */ + uint8 action; + uint8 token; + uint8 status; + uint8 data[1]; /* Elements */ +} BWL_POST_PACKED_STRUCT; +#define DOT11_MGMT_NOTIFICATION_LEN 4 /* Fixed length */ + +/** Timeout Interval IE */ +BWL_PRE_PACKED_STRUCT struct ti_ie { + uint8 ti_type; + uint32 ti_val; +} BWL_POST_PACKED_STRUCT; +typedef struct ti_ie ti_ie_t; +#define TI_TYPE_REASSOC_DEADLINE 1 +#define TI_TYPE_KEY_LIFETIME 2 + +#ifndef CISCO_AIRONET_OUI +#define CISCO_AIRONET_OUI "\x00\x40\x96" /* Cisco AIRONET OUI */ +#endif // endif +/* QoS FastLane IE. */ +BWL_PRE_PACKED_STRUCT struct ccx_qfl_ie { + uint8 id; /* 221, DOT11_MNG_VS_ID */ + uint8 length; /* 5 */ + uint8 oui[3]; /* 00:40:96 */ + uint8 type; /* 11 */ + uint8 data; +} BWL_POST_PACKED_STRUCT; +typedef struct ccx_qfl_ie ccx_qfl_ie_t; +#define CCX_QFL_IE_TYPE 11 +#define CCX_QFL_ENABLE_SHIFT 5 +#define CCX_QFL_ENALBE (1 << CCX_QFL_ENABLE_SHIFT) + +/* WME Action Codes */ +#define WME_ADDTS_REQUEST 0 /* WME ADDTS request */ +#define WME_ADDTS_RESPONSE 1 /* WME ADDTS response */ +#define WME_DELTS_REQUEST 2 /* WME DELTS request */ + +/* WME Setup Response Status Codes */ +#define WME_ADMISSION_ACCEPTED 0 /* WME admission accepted */ +#define WME_INVALID_PARAMETERS 1 /* WME invalide parameters */ +#define WME_ADMISSION_REFUSED 3 /* WME admission refused */ + +/* Macro to take a pointer to a beacon or probe response + * body and return the char* pointer to the SSID info element + */ +#define BCN_PRB_SSID(body) ((char*)(body) + DOT11_BCN_PRB_LEN) + +/* Authentication frame payload constants */ +#define DOT11_OPEN_SYSTEM 0 /* d11 open authentication */ +#define DOT11_SHARED_KEY 1 /* d11 shared authentication */ +#define DOT11_FAST_BSS 2 /* d11 fast bss authentication */ +#define DOT11_SAE 3 /* d11 simultaneous authentication of equals */ +#define DOT11_FILS_SKEY 4 /* d11 fils shared key authentication w/o pfs */ +#define DOT11_FILS_SKEY_PFS 5 /* d11 fils shared key authentication w/ pfs */ +#define DOT11_FILS_PKEY 6 /* d11 fils public key authentication */ +#define DOT11_CHALLENGE_LEN 128 /* d11 challenge text length */ + +/* Frame control macros */ +#define FC_PVER_MASK 0x3 /* PVER mask */ +#define FC_PVER_SHIFT 0 /* PVER shift */ +#define FC_TYPE_MASK 0xC /* type mask */ +#define FC_TYPE_SHIFT 2 /* type shift */ +#define FC_SUBTYPE_MASK 0xF0 /* subtype mask */ +#define FC_SUBTYPE_SHIFT 4 /* subtype shift */ +#define FC_TODS 0x100 /* to DS */ +#define FC_TODS_SHIFT 8 /* to DS shift */ +#define FC_FROMDS 0x200 /* from DS */ +#define FC_FROMDS_SHIFT 9 /* from DS shift */ +#define FC_MOREFRAG 0x400 /* more frag. */ +#define FC_MOREFRAG_SHIFT 10 /* more frag. shift */ +#define FC_RETRY 0x800 /* retry */ +#define FC_RETRY_SHIFT 11 /* retry shift */ +#define FC_PM 0x1000 /* PM */ +#define FC_PM_SHIFT 12 /* PM shift */ +#define FC_MOREDATA 0x2000 /* more data */ +#define FC_MOREDATA_SHIFT 13 /* more data shift */ +#define FC_WEP 0x4000 /* WEP */ +#define FC_WEP_SHIFT 14 /* WEP shift */ +#define FC_ORDER 0x8000 /* order */ +#define FC_ORDER_SHIFT 15 /* order shift */ + +/* sequence control macros */ +#define SEQNUM_SHIFT 4 /* seq. number shift */ +#define SEQNUM_MAX 0x1000 /* max seqnum + 1 */ +#define FRAGNUM_MASK 0xF /* frag. number mask */ + +/* Frame Control type/subtype defs */ + +/* FC Types */ +#define FC_TYPE_MNG 0 /* management type */ +#define FC_TYPE_CTL 1 /* control type */ +#define FC_TYPE_DATA 2 /* data type */ + +/* Management Subtypes */ +#define FC_SUBTYPE_ASSOC_REQ 0 /* assoc. request */ +#define FC_SUBTYPE_ASSOC_RESP 1 /* assoc. response */ +#define FC_SUBTYPE_REASSOC_REQ 2 /* reassoc. request */ +#define FC_SUBTYPE_REASSOC_RESP 3 /* reassoc. response */ +#define FC_SUBTYPE_PROBE_REQ 4 /* probe request */ +#define FC_SUBTYPE_PROBE_RESP 5 /* probe response */ +#define FC_SUBTYPE_BEACON 8 /* beacon */ +#define FC_SUBTYPE_ATIM 9 /* ATIM */ +#define FC_SUBTYPE_DISASSOC 10 /* disassoc. */ +#define FC_SUBTYPE_AUTH 11 /* authentication */ +#define FC_SUBTYPE_DEAUTH 12 /* de-authentication */ +#define FC_SUBTYPE_ACTION 13 /* action */ +#define FC_SUBTYPE_ACTION_NOACK 14 /* action no-ack */ + +/* Control Subtypes */ +#define FC_SUBTYPE_TRIGGER 2 /* Trigger frame */ +#define FC_SUBTYPE_CTL_WRAPPER 7 /* Control Wrapper */ +#define FC_SUBTYPE_BLOCKACK_REQ 8 /* Block Ack Req */ +#define FC_SUBTYPE_BLOCKACK 9 /* Block Ack */ +#define FC_SUBTYPE_PS_POLL 10 /* PS poll */ +#define FC_SUBTYPE_RTS 11 /* RTS */ +#define FC_SUBTYPE_CTS 12 /* CTS */ +#define FC_SUBTYPE_ACK 13 /* ACK */ +#define FC_SUBTYPE_CF_END 14 /* CF-END */ +#define FC_SUBTYPE_CF_END_ACK 15 /* CF-END ACK */ + +/* Data Subtypes */ +#define FC_SUBTYPE_DATA 0 /* Data */ +#define FC_SUBTYPE_DATA_CF_ACK 1 /* Data + CF-ACK */ +#define FC_SUBTYPE_DATA_CF_POLL 2 /* Data + CF-Poll */ +#define FC_SUBTYPE_DATA_CF_ACK_POLL 3 /* Data + CF-Ack + CF-Poll */ +#define FC_SUBTYPE_NULL 4 /* Null */ +#define FC_SUBTYPE_CF_ACK 5 /* CF-Ack */ +#define FC_SUBTYPE_CF_POLL 6 /* CF-Poll */ +#define FC_SUBTYPE_CF_ACK_POLL 7 /* CF-Ack + CF-Poll */ +#define FC_SUBTYPE_QOS_DATA 8 /* QoS Data */ +#define FC_SUBTYPE_QOS_DATA_CF_ACK 9 /* QoS Data + CF-Ack */ +#define FC_SUBTYPE_QOS_DATA_CF_POLL 10 /* QoS Data + CF-Poll */ +#define FC_SUBTYPE_QOS_DATA_CF_ACK_POLL 11 /* QoS Data + CF-Ack + CF-Poll */ +#define FC_SUBTYPE_QOS_NULL 12 /* QoS Null */ +#define FC_SUBTYPE_QOS_CF_POLL 14 /* QoS CF-Poll */ +#define FC_SUBTYPE_QOS_CF_ACK_POLL 15 /* QoS CF-Ack + CF-Poll */ + +/* Data Subtype Groups */ +#define FC_SUBTYPE_ANY_QOS(s) (((s) & 8) != 0) +#define FC_SUBTYPE_ANY_NULL(s) (((s) & 4) != 0) +#define FC_SUBTYPE_ANY_CF_POLL(s) (((s) & 2) != 0) +#define FC_SUBTYPE_ANY_CF_ACK(s) (((s) & 1) != 0) +#define FC_SUBTYPE_ANY_PSPOLL(s) (((s) & 10) != 0) + +/* Type/Subtype Combos */ +#define FC_KIND_MASK (FC_TYPE_MASK | FC_SUBTYPE_MASK) /* FC kind mask */ + +#define FC_KIND(t, s) (((t) << FC_TYPE_SHIFT) | ((s) << FC_SUBTYPE_SHIFT)) /* FC kind */ + +#define FC_SUBTYPE(fc) (((fc) & FC_SUBTYPE_MASK) >> FC_SUBTYPE_SHIFT) /* Subtype from FC */ +#define FC_TYPE(fc) (((fc) & FC_TYPE_MASK) >> FC_TYPE_SHIFT) /* Type from FC */ + +#define FC_ASSOC_REQ FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ASSOC_REQ) /* assoc. request */ +#define FC_ASSOC_RESP FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ASSOC_RESP) /* assoc. response */ +#define FC_REASSOC_REQ FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_REASSOC_REQ) /* reassoc. request */ +#define FC_REASSOC_RESP FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_REASSOC_RESP) /* reassoc. response */ +#define FC_PROBE_REQ FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_PROBE_REQ) /* probe request */ +#define FC_PROBE_RESP FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_PROBE_RESP) /* probe response */ +#define FC_BEACON FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_BEACON) /* beacon */ +#define FC_ATIM FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ATIM) /* ATIM */ +#define FC_DISASSOC FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_DISASSOC) /* disassoc */ +#define FC_AUTH FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_AUTH) /* authentication */ +#define FC_DEAUTH FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_DEAUTH) /* deauthentication */ +#define FC_ACTION FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ACTION) /* action */ +#define FC_ACTION_NOACK FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ACTION_NOACK) /* action no-ack */ + +#define FC_CTL_TRIGGER FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_TRIGGER) /* Trigger frame */ +#define FC_CTL_WRAPPER FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_CTL_WRAPPER) /* Control Wrapper */ +#define FC_BLOCKACK_REQ FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_BLOCKACK_REQ) /* Block Ack Req */ +#define FC_BLOCKACK FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_BLOCKACK) /* Block Ack */ +#define FC_PS_POLL FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_PS_POLL) /* PS poll */ +#define FC_RTS FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_RTS) /* RTS */ +#define FC_CTS FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_CTS) /* CTS */ +#define FC_ACK FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_ACK) /* ACK */ +#define FC_CF_END FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_CF_END) /* CF-END */ +#define FC_CF_END_ACK FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_CF_END_ACK) /* CF-END ACK */ + +#define FC_DATA FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_DATA) /* data */ +#define FC_NULL_DATA FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_NULL) /* null data */ +#define FC_DATA_CF_ACK FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_DATA_CF_ACK) /* data CF ACK */ +#define FC_QOS_DATA FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_QOS_DATA) /* QoS data */ +#define FC_QOS_NULL FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_QOS_NULL) /* QoS null */ + +/* QoS Control Field */ + +/* 802.1D Priority */ +#define QOS_PRIO_SHIFT 0 /* QoS priority shift */ +#define QOS_PRIO_MASK 0x0007 /* QoS priority mask */ +#define QOS_PRIO(qos) (((qos) & QOS_PRIO_MASK) >> QOS_PRIO_SHIFT) /* QoS priority */ + +/* Traffic Identifier */ +#define QOS_TID_SHIFT 0 /* QoS TID shift */ +#define QOS_TID_MASK 0x000f /* QoS TID mask */ +#define QOS_TID(qos) (((qos) & QOS_TID_MASK) >> QOS_TID_SHIFT) /* QoS TID */ + +/* End of Service Period (U-APSD) */ +#define QOS_EOSP_SHIFT 4 /* QoS End of Service Period shift */ +#define QOS_EOSP_MASK 0x0010 /* QoS End of Service Period mask */ +#define QOS_EOSP(qos) (((qos) & QOS_EOSP_MASK) >> QOS_EOSP_SHIFT) /* Qos EOSP */ + +/* Ack Policy */ +#define QOS_ACK_NORMAL_ACK 0 /* Normal Ack */ +#define QOS_ACK_NO_ACK 1 /* No Ack (eg mcast) */ +#define QOS_ACK_NO_EXP_ACK 2 /* No Explicit Ack */ +#define QOS_ACK_BLOCK_ACK 3 /* Block Ack */ +#define QOS_ACK_SHIFT 5 /* QoS ACK shift */ +#define QOS_ACK_MASK 0x0060 /* QoS ACK mask */ +#define QOS_ACK(qos) (((qos) & QOS_ACK_MASK) >> QOS_ACK_SHIFT) /* QoS ACK */ + +/* A-MSDU flag */ +#define QOS_AMSDU_SHIFT 7 /* AMSDU shift */ +#define QOS_AMSDU_MASK 0x0080 /* AMSDU mask */ + +/* Management Frames */ + +/* Management Frame Constants */ + +/* Fixed fields */ +#define DOT11_MNG_AUTH_ALGO_LEN 2 /* d11 management auth. algo. length */ +#define DOT11_MNG_AUTH_SEQ_LEN 2 /* d11 management auth. seq. length */ +#define DOT11_MNG_BEACON_INT_LEN 2 /* d11 management beacon interval length */ +#define DOT11_MNG_CAP_LEN 2 /* d11 management cap. length */ +#define DOT11_MNG_AP_ADDR_LEN 6 /* d11 management AP address length */ +#define DOT11_MNG_LISTEN_INT_LEN 2 /* d11 management listen interval length */ +#define DOT11_MNG_REASON_LEN 2 /* d11 management reason length */ +#define DOT11_MNG_AID_LEN 2 /* d11 management AID length */ +#define DOT11_MNG_STATUS_LEN 2 /* d11 management status length */ +#define DOT11_MNG_TIMESTAMP_LEN 8 /* d11 management timestamp length */ + +/* DUR/ID field in assoc resp is 0xc000 | AID */ +#define DOT11_AID_MASK 0x3fff /* d11 AID mask */ + +/* Reason Codes */ +#define DOT11_RC_RESERVED 0 /* d11 RC reserved */ +#define DOT11_RC_UNSPECIFIED 1 /* Unspecified reason */ +#define DOT11_RC_AUTH_INVAL 2 /* Previous authentication no longer valid */ +#define DOT11_RC_DEAUTH_LEAVING 3 /* Deauthenticated because sending station + * is leaving (or has left) IBSS or ESS + */ +#define DOT11_RC_INACTIVITY 4 /* Disassociated due to inactivity */ +#define DOT11_RC_BUSY 5 /* Disassociated because AP is unable to handle + * all currently associated stations + */ +#define DOT11_RC_INVAL_CLASS_2 6 /* Class 2 frame received from + * nonauthenticated station + */ +#define DOT11_RC_INVAL_CLASS_3 7 /* Class 3 frame received from + * nonassociated station + */ +#define DOT11_RC_DISASSOC_LEAVING 8 /* Disassociated because sending station is + * leaving (or has left) BSS + */ +#define DOT11_RC_NOT_AUTH 9 /* Station requesting (re)association is not + * authenticated with responding station + */ +#define DOT11_RC_BAD_PC 10 /* Unacceptable power capability element */ +#define DOT11_RC_BAD_CHANNELS 11 /* Unacceptable supported channels element */ + +/* 12 is unused by STA but could be used by AP/GO */ +#define DOT11_RC_DISASSOC_BTM 12 /* Disassociated due to BSS Transition Magmt */ + +/* 32-39 are QSTA specific reasons added in 11e */ +#define DOT11_RC_UNSPECIFIED_QOS 32 /* unspecified QoS-related reason */ +#define DOT11_RC_INSUFFCIENT_BW 33 /* QAP lacks sufficient bandwidth */ +#define DOT11_RC_EXCESSIVE_FRAMES 34 /* excessive number of frames need ack */ +#define DOT11_RC_TX_OUTSIDE_TXOP 35 /* transmitting outside the limits of txop */ +#define DOT11_RC_LEAVING_QBSS 36 /* QSTA is leaving the QBSS (or restting) */ +#define DOT11_RC_BAD_MECHANISM 37 /* does not want to use the mechanism */ +#define DOT11_RC_SETUP_NEEDED 38 /* mechanism needs a setup */ +#define DOT11_RC_TIMEOUT 39 /* timeout */ + +#define DOT11_RC_MESH_PEERING_CANCELLED 52 +#define DOT11_RC_MESH_MAX_PEERS 53 +#define DOT11_RC_MESH_CONFIG_POLICY_VIOLN 54 +#define DOT11_RC_MESH_CLOSE_RECVD 55 +#define DOT11_RC_MESH_MAX_RETRIES 56 +#define DOT11_RC_MESH_CONFIRM_TIMEOUT 57 +#define DOT11_RC_MESH_INVALID_GTK 58 +#define DOT11_RC_MESH_INCONSISTENT_PARAMS 59 + +#define DOT11_RC_MESH_INVALID_SEC_CAP 60 +#define DOT11_RC_MESH_PATHERR_NOPROXYINFO 61 +#define DOT11_RC_MESH_PATHERR_NOFWINFO 62 +#define DOT11_RC_MESH_PATHERR_DSTUNREACH 63 +#define DOT11_RC_MESH_MBSSMAC_EXISTS 64 +#define DOT11_RC_MESH_CHANSWITCH_REGREQ 65 +#define DOT11_RC_MESH_CHANSWITCH_UNSPEC 66 + +#define DOT11_RC_MAX 66 /* Reason codes > 66 are reserved */ + +#define DOT11_RC_TDLS_PEER_UNREACH 25 +#define DOT11_RC_TDLS_DOWN_UNSPECIFIED 26 + +/* Status Codes */ +#define DOT11_SC_SUCCESS 0 /* Successful */ +#define DOT11_SC_FAILURE 1 /* Unspecified failure */ +#define DOT11_SC_TDLS_WAKEUP_SCH_ALT 2 /* TDLS wakeup schedule rejected but alternative */ + /* schedule provided */ +#define DOT11_SC_TDLS_WAKEUP_SCH_REJ 3 /* TDLS wakeup schedule rejected */ +#define DOT11_SC_TDLS_SEC_DISABLED 5 /* TDLS Security disabled */ +#define DOT11_SC_LIFETIME_REJ 6 /* Unacceptable lifetime */ +#define DOT11_SC_NOT_SAME_BSS 7 /* Not in same BSS */ +#define DOT11_SC_CAP_MISMATCH 10 /* Cannot support all requested + * capabilities in the Capability + * Information field + */ +#define DOT11_SC_REASSOC_FAIL 11 /* Reassociation denied due to inability + * to confirm that association exists + */ +#define DOT11_SC_ASSOC_FAIL 12 /* Association denied due to reason + * outside the scope of this standard + */ +#define DOT11_SC_AUTH_MISMATCH 13 /* Responding station does not support + * the specified authentication + * algorithm + */ +#define DOT11_SC_AUTH_SEQ 14 /* Received an Authentication frame + * with authentication transaction + * sequence number out of expected + * sequence + */ +#define DOT11_SC_AUTH_CHALLENGE_FAIL 15 /* Authentication rejected because of + * challenge failure + */ +#define DOT11_SC_AUTH_TIMEOUT 16 /* Authentication rejected due to timeout + * waiting for next frame in sequence + */ +#define DOT11_SC_ASSOC_BUSY_FAIL 17 /* Association denied because AP is + * unable to handle additional + * associated stations + */ +#define DOT11_SC_ASSOC_RATE_MISMATCH 18 /* Association denied due to requesting + * station not supporting all of the + * data rates in the BSSBasicRateSet + * parameter + */ +#define DOT11_SC_ASSOC_SHORT_REQUIRED 19 /* Association denied due to requesting + * station not supporting the Short + * Preamble option + */ +#define DOT11_SC_ASSOC_PBCC_REQUIRED 20 /* Association denied due to requesting + * station not supporting the PBCC + * Modulation option + */ +#define DOT11_SC_ASSOC_AGILITY_REQUIRED 21 /* Association denied due to requesting + * station not supporting the Channel + * Agility option + */ +#define DOT11_SC_ASSOC_SPECTRUM_REQUIRED 22 /* Association denied because Spectrum + * Management capability is required. + */ +#define DOT11_SC_ASSOC_BAD_POWER_CAP 23 /* Association denied because the info + * in the Power Cap element is + * unacceptable. + */ +#define DOT11_SC_ASSOC_BAD_SUP_CHANNELS 24 /* Association denied because the info + * in the Supported Channel element is + * unacceptable + */ +#define DOT11_SC_ASSOC_SHORTSLOT_REQUIRED 25 /* Association denied due to requesting + * station not supporting the Short Slot + * Time option + */ +#define DOT11_SC_ASSOC_DSSSOFDM_REQUIRED 26 /* Association denied because requesting station + * does not support the DSSS-OFDM option + */ +#define DOT11_SC_ASSOC_HT_REQUIRED 27 /* Association denied because the requesting + * station does not support HT features + */ +#define DOT11_SC_ASSOC_R0KH_UNREACHABLE 28 /* Association denied due to AP + * being unable to reach the R0 Key Holder + */ +#define DOT11_SC_ASSOC_TRY_LATER 30 /* Association denied temporarily, try again later + */ +#define DOT11_SC_ASSOC_MFP_VIOLATION 31 /* Association denied due to Robust Management + * frame policy violation + */ + +#define DOT11_SC_DECLINED 37 /* request declined */ +#define DOT11_SC_INVALID_PARAMS 38 /* One or more params have invalid values */ +#define DOT11_SC_INVALID_PAIRWISE_CIPHER 42 /* invalid pairwise cipher */ +#define DOT11_SC_INVALID_AKMP 43 /* Association denied due to invalid AKMP */ +#define DOT11_SC_INVALID_RSNIE_CAP 45 /* invalid RSN IE capabilities */ +#define DOT11_SC_DLS_NOT_ALLOWED 48 /* DLS is not allowed in the BSS by policy */ +#define DOT11_SC_INVALID_PMKID 53 /* Association denied due to invalid PMKID */ +#define DOT11_SC_INVALID_MDID 54 /* Association denied due to invalid MDID */ +#define DOT11_SC_INVALID_FTIE 55 /* Association denied due to invalid FTIE */ + +#define DOT11_SC_ADV_PROTO_NOT_SUPPORTED 59 /* ad proto not supported */ +#define DOT11_SC_NO_OUTSTAND_REQ 60 /* no outstanding req */ +#define DOT11_SC_RSP_NOT_RX_FROM_SERVER 61 /* no response from server */ +#define DOT11_SC_TIMEOUT 62 /* timeout */ +#define DOT11_SC_QUERY_RSP_TOO_LARGE 63 /* query rsp too large */ +#define DOT11_SC_SERVER_UNREACHABLE 65 /* server unreachable */ + +#define DOT11_SC_UNEXP_MSG 70 /* Unexpected message */ +#define DOT11_SC_INVALID_SNONCE 71 /* Invalid SNonce */ +#define DOT11_SC_INVALID_RSNIE 72 /* Invalid contents of RSNIE */ + +#define DOT11_SC_ANTICLOG_TOCKEN_REQUIRED 76 /* Anti-clogging tocken required */ +#define DOT11_SC_INVALID_FINITE_CYCLIC_GRP 77 /* Invalid contents of RSNIE */ + +#define DOT11_SC_ASSOC_VHT_REQUIRED 104 /* Association denied because the requesting + * station does not support VHT features. + */ + +#define DOT11_SC_TRANSMIT_FAILURE 79 /* transmission failure */ + +/* Info Elts, length of INFORMATION portion of Info Elts */ +#define DOT11_MNG_DS_PARAM_LEN 1 /* d11 management DS parameter length */ +#define DOT11_MNG_IBSS_PARAM_LEN 2 /* d11 management IBSS parameter length */ + +/* TIM Info element has 3 bytes fixed info in INFORMATION field, + * followed by 1 to 251 bytes of Partial Virtual Bitmap + */ +#define DOT11_MNG_TIM_FIXED_LEN 3 /* d11 management TIM fixed length */ +#define DOT11_MNG_TIM_DTIM_COUNT 0 /* d11 management DTIM count */ +#define DOT11_MNG_TIM_DTIM_PERIOD 1 /* d11 management DTIM period */ +#define DOT11_MNG_TIM_BITMAP_CTL 2 /* d11 management TIM BITMAP control */ +#define DOT11_MNG_TIM_PVB 3 /* d11 management TIM PVB */ + +/* TLV defines */ +#define TLV_TAG_OFF 0 /* tag offset */ +#define TLV_LEN_OFF 1 /* length offset */ +#define TLV_HDR_LEN 2 /* header length */ +#define TLV_BODY_OFF 2 /* body offset */ +#define TLV_BODY_LEN_MAX 255 /* max body length */ + +/* Management Frame Information Element IDs */ +#define DOT11_MNG_SSID_ID 0 /* d11 management SSID id */ +#define DOT11_MNG_RATES_ID 1 /* d11 management rates id */ +#define DOT11_MNG_FH_PARMS_ID 2 /* d11 management FH parameter id */ +#define DOT11_MNG_DS_PARMS_ID 3 /* d11 management DS parameter id */ +#define DOT11_MNG_CF_PARMS_ID 4 /* d11 management CF parameter id */ +#define DOT11_MNG_TIM_ID 5 /* d11 management TIM id */ +#define DOT11_MNG_IBSS_PARMS_ID 6 /* d11 management IBSS parameter id */ +#define DOT11_MNG_COUNTRY_ID 7 /* d11 management country id */ +#define DOT11_MNG_HOPPING_PARMS_ID 8 /* d11 management hopping parameter id */ +#define DOT11_MNG_HOPPING_TABLE_ID 9 /* d11 management hopping table id */ +#define DOT11_MNG_FTM_SYNC_INFO_ID 9 /* 11mc D4.3 */ +#define DOT11_MNG_REQUEST_ID 10 /* d11 management request id */ +#define DOT11_MNG_QBSS_LOAD_ID 11 /* d11 management QBSS Load id */ +#define DOT11_MNG_EDCA_PARAM_ID 12 /* 11E EDCA Parameter id */ +#define DOT11_MNG_TSPEC_ID 13 /* d11 management TSPEC id */ +#define DOT11_MNG_TCLAS_ID 14 /* d11 management TCLAS id */ +#define DOT11_MNG_CHALLENGE_ID 16 /* d11 management chanllenge id */ +#define DOT11_MNG_PWR_CONSTRAINT_ID 32 /* 11H PowerConstraint */ +#define DOT11_MNG_PWR_CAP_ID 33 /* 11H PowerCapability */ +#define DOT11_MNG_TPC_REQUEST_ID 34 /* 11H TPC Request */ +#define DOT11_MNG_TPC_REPORT_ID 35 /* 11H TPC Report */ +#define DOT11_MNG_SUPP_CHANNELS_ID 36 /* 11H Supported Channels */ +#define DOT11_MNG_CHANNEL_SWITCH_ID 37 /* 11H ChannelSwitch Announcement */ +#define DOT11_MNG_MEASURE_REQUEST_ID 38 /* 11H MeasurementRequest */ +#define DOT11_MNG_MEASURE_REPORT_ID 39 /* 11H MeasurementReport */ +#define DOT11_MNG_QUIET_ID 40 /* 11H Quiet */ +#define DOT11_MNG_IBSS_DFS_ID 41 /* 11H IBSS_DFS */ +#define DOT11_MNG_ERP_ID 42 /* d11 management ERP id */ +#define DOT11_MNG_TS_DELAY_ID 43 /* d11 management TS Delay id */ +#define DOT11_MNG_TCLAS_PROC_ID 44 /* d11 management TCLAS processing id */ +#define DOT11_MNG_HT_CAP 45 /* d11 mgmt HT cap id */ +#define DOT11_MNG_QOS_CAP_ID 46 /* 11E QoS Capability id */ +#define DOT11_MNG_NONERP_ID 47 /* d11 management NON-ERP id */ +#define DOT11_MNG_RSN_ID 48 /* d11 management RSN id */ +#define DOT11_MNG_EXT_RATES_ID 50 /* d11 management ext. rates id */ +#define DOT11_MNG_AP_CHREP_ID 51 /* 11k AP Channel report id */ +#define DOT11_MNG_NEIGHBOR_REP_ID 52 /* 11k & 11v Neighbor report id */ +#define DOT11_MNG_RCPI_ID 53 /* 11k RCPI */ +#define DOT11_MNG_MDIE_ID 54 /* 11r Mobility domain id */ +#define DOT11_MNG_FTIE_ID 55 /* 11r Fast Bss Transition id */ +#define DOT11_MNG_FT_TI_ID 56 /* 11r Timeout Interval id */ +#define DOT11_MNG_RDE_ID 57 /* 11r RIC Data Element id */ +#define DOT11_MNG_REGCLASS_ID 59 /* d11 management regulatory class id */ +#define DOT11_MNG_EXT_CSA_ID 60 /* d11 Extended CSA */ +#define DOT11_MNG_HT_ADD 61 /* d11 mgmt additional HT info */ +#define DOT11_MNG_EXT_CHANNEL_OFFSET 62 /* d11 mgmt ext channel offset */ +#define DOT11_MNG_BSS_AVR_ACCESS_DELAY_ID 63 /* 11k bss average access delay */ +#define DOT11_MNG_ANTENNA_ID 64 /* 11k antenna id */ +#define DOT11_MNG_RSNI_ID 65 /* 11k RSNI id */ +#define DOT11_MNG_MEASUREMENT_PILOT_TX_ID 66 /* 11k measurement pilot tx info id */ +#define DOT11_MNG_BSS_AVAL_ADMISSION_CAP_ID 67 /* 11k bss aval admission cap id */ +#define DOT11_MNG_BSS_AC_ACCESS_DELAY_ID 68 /* 11k bss AC access delay id */ +#define DOT11_MNG_WAPI_ID 68 /* d11 management WAPI id */ +#define DOT11_MNG_TIME_ADVERTISE_ID 69 /* 11p time advertisement */ +#define DOT11_MNG_RRM_CAP_ID 70 /* 11k radio measurement capability */ +#define DOT11_MNG_MULTIPLE_BSSID_ID 71 /* 11k multiple BSSID id */ +#define DOT11_MNG_HT_BSS_COEXINFO_ID 72 /* d11 mgmt OBSS Coexistence INFO */ +#define DOT11_MNG_HT_BSS_CHANNEL_REPORT_ID 73 /* d11 mgmt OBSS Intolerant Channel list */ +#define DOT11_MNG_HT_OBSS_ID 74 /* d11 mgmt OBSS HT info */ +#define DOT11_MNG_MMIE_ID 76 /* d11 mgmt MIC IE */ +#define DOT11_MNG_FMS_DESCR_ID 86 /* 11v FMS descriptor */ +#define DOT11_MNG_FMS_REQ_ID 87 /* 11v FMS request id */ +#define DOT11_MNG_FMS_RESP_ID 88 /* 11v FMS response id */ +#define DOT11_MNG_BSS_MAX_IDLE_PERIOD_ID 90 /* 11v bss max idle id */ +#define DOT11_MNG_TFS_REQUEST_ID 91 /* 11v tfs request id */ +#define DOT11_MNG_TFS_RESPONSE_ID 92 /* 11v tfs response id */ +#define DOT11_MNG_WNM_SLEEP_MODE_ID 93 /* 11v wnm-sleep mode id */ +#define DOT11_MNG_TIMBC_REQ_ID 94 /* 11v TIM broadcast request id */ +#define DOT11_MNG_TIMBC_RESP_ID 95 /* 11v TIM broadcast response id */ +#define DOT11_MNG_CHANNEL_USAGE 97 /* 11v channel usage */ +#define DOT11_MNG_TIME_ZONE_ID 98 /* 11v time zone */ +#define DOT11_MNG_DMS_REQUEST_ID 99 /* 11v dms request id */ +#define DOT11_MNG_DMS_RESPONSE_ID 100 /* 11v dms response id */ +#define DOT11_MNG_LINK_IDENTIFIER_ID 101 /* 11z TDLS Link Identifier IE */ +#define DOT11_MNG_WAKEUP_SCHEDULE_ID 102 /* 11z TDLS Wakeup Schedule IE */ +#define DOT11_MNG_CHANNEL_SWITCH_TIMING_ID 104 /* 11z TDLS Channel Switch Timing IE */ +#define DOT11_MNG_PTI_CONTROL_ID 105 /* 11z TDLS PTI Control IE */ +#define DOT11_MNG_PU_BUFFER_STATUS_ID 106 /* 11z TDLS PU Buffer Status IE */ +#define DOT11_MNG_INTERWORKING_ID 107 /* 11u interworking */ +#define DOT11_MNG_ADVERTISEMENT_ID 108 /* 11u advertisement protocol */ +#define DOT11_MNG_EXP_BW_REQ_ID 109 /* 11u expedited bandwith request */ +#define DOT11_MNG_QOS_MAP_ID 110 /* 11u QoS map set */ +#define DOT11_MNG_ROAM_CONSORT_ID 111 /* 11u roaming consortium */ +#define DOT11_MNG_EMERGCY_ALERT_ID 112 /* 11u emergency alert identifier */ +#define DOT11_MNG_MESH_CONFIG 113 /* Mesh Configuration */ +#define DOT11_MNG_MESH_ID 114 /* Mesh ID */ +#define DOT11_MNG_MESH_PEER_MGMT_ID 117 /* Mesh PEER MGMT IE */ +#define DOT11_MNG_EXT_CAP_ID 127 /* d11 mgmt ext capability */ +#define DOT11_MNG_EXT_PREQ_ID 130 /* Mesh PREQ IE */ +#define DOT11_MNG_EXT_PREP_ID 131 /* Mesh PREP IE */ +#define DOT11_MNG_EXT_PERR_ID 132 /* Mesh PERR IE */ +#define DOT11_MNG_VHT_CAP_ID 191 /* d11 mgmt VHT cap id */ +#define DOT11_MNG_VHT_OPERATION_ID 192 /* d11 mgmt VHT op id */ +#define DOT11_MNG_EXT_BSSLOAD_ID 193 /* d11 mgmt VHT extended bss load id */ +#define DOT11_MNG_WIDE_BW_CHANNEL_SWITCH_ID 194 /* Wide BW Channel Switch IE */ +#define DOT11_MNG_VHT_TRANSMIT_POWER_ENVELOPE_ID 195 /* VHT transmit Power Envelope IE */ +#define DOT11_MNG_CHANNEL_SWITCH_WRAPPER_ID 196 /* Channel Switch Wrapper IE */ +#define DOT11_MNG_AID_ID 197 /* Association ID IE */ +#define DOT11_MNG_OPER_MODE_NOTIF_ID 199 /* d11 mgmt VHT oper mode notif */ +#define DOT11_MNG_RNR_ID 201 +#define DOT11_MNG_FTM_PARAMS_ID 206 +#define DOT11_MNG_TWT_ID 216 /* 11ah D5.0 */ +#define DOT11_MNG_WPA_ID 221 /* d11 management WPA id */ +#define DOT11_MNG_PROPR_ID 221 +/* should start using this one instead of above two */ +#define DOT11_MNG_VS_ID 221 /* d11 management Vendor Specific IE */ +#define DOT11_MNG_MESH_CSP_ID 222 /* d11 Mesh Channel Switch Parameter */ +#define DOT11_MNG_FILS_IND_ID 240 /* 11ai FILS Indication element */ +#define DOT11_MNG_FRAGMENT_ID 242 /* IE's fragment ID */ + +/* The follwing ID extensions should be defined >= 255 + * i.e. the values should include 255 (DOT11_MNG_ID_EXT_ID + ID Extension). + */ +#define DOT11_MNG_ID_EXT_ID 255 /* Element ID Extension 11mc D4.3 */ +#define EXT_MNG_OWE_DH_PARAM_ID 32u /* OWE DH Param ID - RFC 8110 */ +#define DOT11_MNG_OWE_DH_PARAM_ID (DOT11_MNG_ID_EXT_ID + EXT_MNG_OWE_DH_PARAM_ID) +#define EXT_MNG_HE_CAP_ID 35u /* HE Capabilities, 11ax */ +#define DOT11_MNG_HE_CAP_ID (DOT11_MNG_ID_EXT_ID + EXT_MNG_HE_CAP_ID) +#define EXT_MNG_HE_OP_ID 36u /* HE Operation IE, 11ax */ +#define DOT11_MNG_HE_OP_ID (DOT11_MNG_ID_EXT_ID + EXT_MNG_HE_OP_ID) +#define EXT_MNG_RAPS_ID 37u /* OFDMA Random Access Parameter Set */ +#define DOT11_MNG_RAPS_ID (DOT11_MNG_ID_EXT_ID + EXT_MNG_RAPS_ID) +#define EXT_MNG_MU_EDCA_ID 38u /* MU EDCA Parameter Set */ +#define DOT11_MNG_MU_EDCA_ID (DOT11_MNG_ID_EXT_ID + EXT_MNG_MU_EDCA_ID) +#define EXT_MNG_SRPS_ID 39u /* Spatial Reuse Parameter Set */ +#define DOT11_MNG_SRPS_ID (DOT11_MNG_ID_EXT_ID + EXT_MNG_SRPS_ID) +#define EXT_MNG_BSSCOLOR_CHANGE_ID 42u /* BSS Color Change Announcement */ +#define DOT11_MNG_BSSCOLOR_CHANGE_ID (DOT11_MNG_ID_EXT_ID + EXT_MNG_BSSCOLOR_CHANGE_ID) + +/* FILS and OCE ext ids */ +#define FILS_EXTID_MNG_REQ_PARAMS 2u /* FILS Request Parameters element */ +#define DOT11_MNG_FILS_REQ_PARAMS (DOT11_MNG_ID_EXT_ID + FILS_EXTID_MNG_REQ_PARAMS) +#define FILS_EXTID_MNG_KEY_CONFIRMATION_ID 3u /* FILS Key Confirmation element */ +#define DOT11_MNG_FILS_KEY_CONFIRMATION (DOT11_MNG_ID_EXT_ID +\ + FILS_EXTID_MNG_KEY_CONFIRMATION_ID) +#define FILS_EXTID_MNG_SESSION_ID 4u /* FILS Session element */ +#define DOT11_MNG_FILS_SESSION (DOT11_MNG_ID_EXT_ID + FILS_EXTID_MNG_SESSION_ID) +#define FILS_EXTID_MNG_HLP_CONTAINER_ID 5u /* FILS HLP Container element */ +#define DOT11_MNG_FILS_HLP_CONTAINER (DOT11_MNG_ID_EXT_ID +\ + FILS_EXTID_MNG_HLP_CONTAINER_ID) +#define FILS_EXTID_MNG_KEY_DELIVERY_ID 7u /* FILS Key Delivery element */ +#define DOT11_MNG_FILS_KEY_DELIVERY (DOT11_MNG_ID_EXT_ID +\ + FILS_EXTID_MNG_KEY_DELIVERY_ID) +#define FILS_EXTID_MNG_WRAPPED_DATA_ID 8u /* FILS Wrapped Data element */ +#define DOT11_MNG_FILS_WRAPPED_DATA (DOT11_MNG_ID_EXT_ID +\ + FILS_EXTID_MNG_WRAPPED_DATA_ID) +#define OCE_EXTID_MNG_ESP_ID 11u /* Estimated Service Parameters element */ +#define DOT11_MNG_ESP (DOT11_MNG_ID_EXT_ID + OCE_EXTID_MNG_ESP_ID) +#define FILS_EXTID_MNG_NONCE_ID 13u /* FILS Nonce element */ +#define DOT11_MNG_FILS_NONCE (DOT11_MNG_ID_EXT_ID + FILS_EXTID_MNG_NONCE_ID) + +/* deprecated definitions, do not use, to be deleted later */ +#define FILS_HLP_CONTAINER_EXT_ID FILS_EXTID_MNG_HLP_CONTAINER_ID +#define DOT11_ESP_EXT_ID OCE_EXTID_MNG_ESP_ID +#define FILS_REQ_PARAMS_EXT_ID FILS_EXTID_MNG_REQ_PARAMS +/* End of deprecated definitions */ + +#define DOT11_MNG_IE_ID_EXT_MATCH(_ie, _id) (\ + ((_ie)->id == DOT11_MNG_ID_EXT_ID) && \ + ((_ie)->len > 0) && \ + ((_id) == ((uint8 *)(_ie) + TLV_HDR_LEN)[0])) + +#define DOT11_MNG_IE_ID_EXT_INIT(_ie, _id, _len) do {\ + (_ie)->id = DOT11_MNG_ID_EXT_ID; \ + (_ie)->len = _len; \ + (_ie)->id_ext = _id; \ + } while (0) + +/* Rate Defines */ + +/* Valid rates for the Supported Rates and Extended Supported Rates IEs. + * Encoding is the rate in 500kbps units, rouding up for fractional values. + * 802.11-2012, section 6.5.5.2, DATA_RATE parameter enumerates all the values. + * The rate values cover DSSS, HR/DSSS, ERP, and OFDM phy rates. + * The defines below do not cover the rates specific to 10MHz, {3, 4.5, 27}, + * and 5MHz, {1.5, 2.25, 3, 4.5, 13.5}, which are not supported by Broadcom devices. + */ + +#define DOT11_RATE_1M 2 /* 1 Mbps in 500kbps units */ +#define DOT11_RATE_2M 4 /* 2 Mbps in 500kbps units */ +#define DOT11_RATE_5M5 11 /* 5.5 Mbps in 500kbps units */ +#define DOT11_RATE_11M 22 /* 11 Mbps in 500kbps units */ +#define DOT11_RATE_6M 12 /* 6 Mbps in 500kbps units */ +#define DOT11_RATE_9M 18 /* 9 Mbps in 500kbps units */ +#define DOT11_RATE_12M 24 /* 12 Mbps in 500kbps units */ +#define DOT11_RATE_18M 36 /* 18 Mbps in 500kbps units */ +#define DOT11_RATE_24M 48 /* 24 Mbps in 500kbps units */ +#define DOT11_RATE_36M 72 /* 36 Mbps in 500kbps units */ +#define DOT11_RATE_48M 96 /* 48 Mbps in 500kbps units */ +#define DOT11_RATE_54M 108 /* 54 Mbps in 500kbps units */ +#define DOT11_RATE_MAX 108 /* highest rate (54 Mbps) in 500kbps units */ + +/* Supported Rates and Extended Supported Rates IEs + * The supported rates octets are defined a the MSB indicatin a Basic Rate + * and bits 0-6 as the rate value + */ +#define DOT11_RATE_BASIC 0x80 /* flag for a Basic Rate */ +#define DOT11_RATE_MASK 0x7F /* mask for numeric part of rate */ + +/* BSS Membership Selector parameters + * 802.11-2016 (and 802.11ax-D1.1), Sec 9.4.2.3 + * These selector values are advertised in Supported Rates and Extended Supported Rates IEs + * in the supported rates list with the Basic rate bit set. + * Constants below include the basic bit. + */ +#define DOT11_BSS_MEMBERSHIP_HT 0xFF /* Basic 0x80 + 127, HT Required to join */ +#define DOT11_BSS_MEMBERSHIP_VHT 0xFE /* Basic 0x80 + 126, VHT Required to join */ +#define DOT11_BSS_MEMBERSHIP_HE 0xFD /* Basic 0x80 + 125, HE Required to join */ + +/* ERP info element bit values */ +#define DOT11_MNG_ERP_LEN 1 /* ERP is currently 1 byte long */ +#define DOT11_MNG_NONERP_PRESENT 0x01 /* NonERP (802.11b) STAs are present + *in the BSS + */ +#define DOT11_MNG_USE_PROTECTION 0x02 /* Use protection mechanisms for + *ERP-OFDM frames + */ +#define DOT11_MNG_BARKER_PREAMBLE 0x04 /* Short Preambles: 0 == allowed, + * 1 == not allowed + */ +/* TS Delay element offset & size */ +#define DOT11_MGN_TS_DELAY_LEN 4 /* length of TS DELAY IE */ +#define TS_DELAY_FIELD_SIZE 4 /* TS DELAY field size */ + +/* Capability Information Field */ +#define DOT11_CAP_ESS 0x0001 /* d11 cap. ESS */ +#define DOT11_CAP_IBSS 0x0002 /* d11 cap. IBSS */ +#define DOT11_CAP_POLLABLE 0x0004 /* d11 cap. pollable */ +#define DOT11_CAP_POLL_RQ 0x0008 /* d11 cap. poll request */ +#define DOT11_CAP_PRIVACY 0x0010 /* d11 cap. privacy */ +#define DOT11_CAP_SHORT 0x0020 /* d11 cap. short */ +#define DOT11_CAP_PBCC 0x0040 /* d11 cap. PBCC */ +#define DOT11_CAP_AGILITY 0x0080 /* d11 cap. agility */ +#define DOT11_CAP_SPECTRUM 0x0100 /* d11 cap. spectrum */ +#define DOT11_CAP_QOS 0x0200 /* d11 cap. qos */ +#define DOT11_CAP_SHORTSLOT 0x0400 /* d11 cap. shortslot */ +#define DOT11_CAP_APSD 0x0800 /* d11 cap. apsd */ +#define DOT11_CAP_RRM 0x1000 /* d11 cap. 11k radio measurement */ +#define DOT11_CAP_CCK_OFDM 0x2000 /* d11 cap. CCK/OFDM */ +#define DOT11_CAP_DELAY_BA 0x4000 /* d11 cap. delayed block ack */ +#define DOT11_CAP_IMMEDIATE_BA 0x8000 /* d11 cap. immediate block ack */ + +/* Extended capabilities IE bitfields */ +/* 20/40 BSS Coexistence Management support bit position */ +#define DOT11_EXT_CAP_OBSS_COEX_MGMT 0 +/* Extended Channel Switching support bit position */ +#define DOT11_EXT_CAP_EXT_CHAN_SWITCHING 2 +/* scheduled PSMP support bit position */ +#define DOT11_EXT_CAP_SPSMP 6 +/* Flexible Multicast Service */ +#define DOT11_EXT_CAP_FMS 11 +/* proxy ARP service support bit position */ +#define DOT11_EXT_CAP_PROXY_ARP 12 +/* Civic Location */ +#define DOT11_EXT_CAP_CIVIC_LOC 14 +/* Geospatial Location */ +#define DOT11_EXT_CAP_LCI 15 +/* Traffic Filter Service */ +#define DOT11_EXT_CAP_TFS 16 +/* WNM-Sleep Mode */ +#define DOT11_EXT_CAP_WNM_SLEEP 17 +/* TIM Broadcast service */ +#define DOT11_EXT_CAP_TIMBC 18 +/* BSS Transition Management support bit position */ +#define DOT11_EXT_CAP_BSSTRANS_MGMT 19 +/* Direct Multicast Service */ +#define DOT11_EXT_CAP_DMS 26 +/* Interworking support bit position */ +#define DOT11_EXT_CAP_IW 31 +/* QoS map support bit position */ +#define DOT11_EXT_CAP_QOS_MAP 32 +/* service Interval granularity bit position and mask */ +#define DOT11_EXT_CAP_SI 41 +#define DOT11_EXT_CAP_SI_MASK 0x0E +/* Location Identifier service */ +#define DOT11_EXT_CAP_IDENT_LOC 44 +/* WNM notification */ +#define DOT11_EXT_CAP_WNM_NOTIF 46 +/* Operating mode notification - VHT (11ac D3.0 - 8.4.2.29) */ +#define DOT11_EXT_CAP_OPER_MODE_NOTIF 62 +/* Fine timing measurement - D3.0 */ +#define DOT11_EXT_CAP_FTM_RESPONDER 70 +#define DOT11_EXT_CAP_FTM_INITIATOR 71 /* tentative 11mcd3.0 */ +#define DOT11_EXT_CAP_FILS 72 /* FILS Capability */ +/* TWT support */ +#define DOT11_EXT_CAP_TWT_REQUESTER 75 +#define DOT11_EXT_CAP_TWT_RESPONDER 76 +/* TODO: Update DOT11_EXT_CAP_MAX_IDX to reflect the highest offset. + * Note: DOT11_EXT_CAP_MAX_IDX must only be used in attach path. + * It will cause ROM invalidation otherwise. + */ +#define DOT11_EXT_CAP_MAX_IDX 76 + +#define DOT11_EXT_CAP_MAX_BIT_IDX 95 /* !!!update this please!!! */ + +/* extended capability */ +#ifndef DOT11_EXTCAP_LEN_MAX +#define DOT11_EXTCAP_LEN_MAX ((DOT11_EXT_CAP_MAX_BIT_IDX + 8) >> 3) +#endif // endif +BWL_PRE_PACKED_STRUCT struct dot11_extcap { + uint8 extcap[DOT11_EXTCAP_LEN_MAX]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_extcap dot11_extcap_t; + +/* VHT Operating mode bit fields - (11ac D8.0/802.11-2016 - 9.4.1.53) */ +#define DOT11_OPER_MODE_CHANNEL_WIDTH_SHIFT 0 +#define DOT11_OPER_MODE_CHANNEL_WIDTH_MASK 0x3 +#define DOT11_OPER_MODE_160_8080_BW_SHIFT 2 +#define DOT11_OPER_MODE_160_8080_BW_MASK 0x04 +#define DOT11_OPER_MODE_NOLDPC_SHIFT 3 +#define DOT11_OPER_MODE_NOLDPC_MASK 0x08 +#define DOT11_OPER_MODE_RXNSS_SHIFT 4 +#define DOT11_OPER_MODE_RXNSS_MASK 0x70 +#define DOT11_OPER_MODE_RXNSS_TYPE_SHIFT 7 +#define DOT11_OPER_MODE_RXNSS_TYPE_MASK 0x80 + +#define DOT11_OPER_MODE(type, nss, chanw) (\ + ((type) << DOT11_OPER_MODE_RXNSS_TYPE_SHIFT &\ + DOT11_OPER_MODE_RXNSS_TYPE_MASK) |\ + (((nss) - 1) << DOT11_OPER_MODE_RXNSS_SHIFT & DOT11_OPER_MODE_RXNSS_MASK) |\ + ((chanw) << DOT11_OPER_MODE_CHANNEL_WIDTH_SHIFT &\ + DOT11_OPER_MODE_CHANNEL_WIDTH_MASK)) + +#define DOT11_D8_OPER_MODE(type, nss, ldpc, bw160_8080, chanw) (\ + ((type) << DOT11_OPER_MODE_RXNSS_TYPE_SHIFT &\ + DOT11_OPER_MODE_RXNSS_TYPE_MASK) |\ + (((nss) - 1) << DOT11_OPER_MODE_RXNSS_SHIFT & DOT11_OPER_MODE_RXNSS_MASK) |\ + ((ldpc) << DOT11_OPER_MODE_NOLDPC_SHIFT & DOT11_OPER_MODE_NOLDPC_MASK) |\ + ((bw160_8080) << DOT11_OPER_MODE_160_8080_BW_SHIFT &\ + DOT11_OPER_MODE_160_8080_BW_MASK) |\ + ((chanw) << DOT11_OPER_MODE_CHANNEL_WIDTH_SHIFT &\ + DOT11_OPER_MODE_CHANNEL_WIDTH_MASK)) + +#define DOT11_OPER_MODE_CHANNEL_WIDTH(mode) \ + (((mode) & DOT11_OPER_MODE_CHANNEL_WIDTH_MASK)\ + >> DOT11_OPER_MODE_CHANNEL_WIDTH_SHIFT) +#define DOT11_OPER_MODE_160_8080(mode) \ + (((mode) & DOT11_OPER_MODE_160_8080_BW_MASK)\ + >> DOT11_OPER_MODE_160_8080_BW_SHIFT) +#define DOT11_OPER_MODE_RXNSS(mode) \ + ((((mode) & DOT11_OPER_MODE_RXNSS_MASK) \ + >> DOT11_OPER_MODE_RXNSS_SHIFT) + 1) +#define DOT11_OPER_MODE_RXNSS_TYPE(mode) \ + (((mode) & DOT11_OPER_MODE_RXNSS_TYPE_MASK)\ + >> DOT11_OPER_MODE_RXNSS_TYPE_SHIFT) + +#define DOT11_OPER_MODE_20MHZ 0 +#define DOT11_OPER_MODE_40MHZ 1 +#define DOT11_OPER_MODE_80MHZ 2 +#define DOT11_OPER_MODE_160MHZ 3 +#define DOT11_OPER_MODE_8080MHZ 3 +#define DOT11_OPER_MODE_1608080MHZ 1 + +#define DOT11_OPER_MODE_CHANNEL_WIDTH_20MHZ(mode) (\ + ((mode) & DOT11_OPER_MODE_CHANNEL_WIDTH_MASK) == DOT11_OPER_MODE_20MHZ) +#define DOT11_OPER_MODE_CHANNEL_WIDTH_40MHZ(mode) (\ + ((mode) & DOT11_OPER_MODE_CHANNEL_WIDTH_MASK) == DOT11_OPER_MODE_40MHZ) +#define DOT11_OPER_MODE_CHANNEL_WIDTH_80MHZ(mode) (\ + ((mode) & DOT11_OPER_MODE_CHANNEL_WIDTH_MASK) == DOT11_OPER_MODE_80MHZ) +#define DOT11_OPER_MODE_CHANNEL_WIDTH_160MHZ(mode) (\ + ((mode) & DOT11_OPER_MODE_160_8080_BW_MASK)) +#define DOT11_OPER_MODE_CHANNEL_WIDTH_8080MHZ(mode) (\ + ((mode) & DOT11_OPER_MODE_160_8080_BW_MASK)) + +/* Operating mode information element 802.11ac D3.0 - 8.4.2.168 */ +BWL_PRE_PACKED_STRUCT struct dot11_oper_mode_notif_ie { + uint8 mode; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_oper_mode_notif_ie dot11_oper_mode_notif_ie_t; + +#define DOT11_OPER_MODE_NOTIF_IE_LEN 1 + +/* Extended Capability Information Field */ +#define DOT11_OBSS_COEX_MNG_SUPPORT 0x01 /* 20/40 BSS Coexistence Management support */ + +/* + * Action Frame Constants + */ +#define DOT11_ACTION_HDR_LEN 2 /* action frame category + action field */ +#define DOT11_ACTION_CAT_OFF 0 /* category offset */ +#define DOT11_ACTION_ACT_OFF 1 /* action offset */ + +/* Action Category field (sec 8.4.1.11) */ +#define DOT11_ACTION_CAT_ERR_MASK 0x80 /* category error mask */ +#define DOT11_ACTION_CAT_MASK 0x7F /* category mask */ +#define DOT11_ACTION_CAT_SPECT_MNG 0 /* category spectrum management */ +#define DOT11_ACTION_CAT_QOS 1 /* category QoS */ +#define DOT11_ACTION_CAT_DLS 2 /* category DLS */ +#define DOT11_ACTION_CAT_BLOCKACK 3 /* category block ack */ +#define DOT11_ACTION_CAT_PUBLIC 4 /* category public */ +#define DOT11_ACTION_CAT_RRM 5 /* category radio measurements */ +#define DOT11_ACTION_CAT_FBT 6 /* category fast bss transition */ +#define DOT11_ACTION_CAT_HT 7 /* category for HT */ +#define DOT11_ACTION_CAT_SA_QUERY 8 /* security association query */ +#define DOT11_ACTION_CAT_PDPA 9 /* protected dual of public action */ +#define DOT11_ACTION_CAT_WNM 10 /* category for WNM */ +#define DOT11_ACTION_CAT_UWNM 11 /* category for Unprotected WNM */ +#define DOT11_ACTION_CAT_MESH 13 /* category for Mesh */ +#define DOT11_ACTION_CAT_SELFPROT 15 /* category for Mesh, self protected */ +#define DOT11_ACTION_NOTIFICATION 17 +#define DOT11_ACTION_CAT_VHT 21 /* VHT action */ +#define DOT11_ACTION_CAT_S1G 22 /* S1G action */ +#define DOT11_ACTION_CAT_HE 27 /* HE action frame */ +#define DOT11_ACTION_CAT_FILS 26 /* FILS action frame */ +#define DOT11_ACTION_CAT_VSP 126 /* protected vendor specific */ +#define DOT11_ACTION_CAT_VS 127 /* category Vendor Specific */ + +/* Spectrum Management Action IDs (sec 7.4.1) */ +#define DOT11_SM_ACTION_M_REQ 0 /* d11 action measurement request */ +#define DOT11_SM_ACTION_M_REP 1 /* d11 action measurement response */ +#define DOT11_SM_ACTION_TPC_REQ 2 /* d11 action TPC request */ +#define DOT11_SM_ACTION_TPC_REP 3 /* d11 action TPC response */ +#define DOT11_SM_ACTION_CHANNEL_SWITCH 4 /* d11 action channel switch */ +#define DOT11_SM_ACTION_EXT_CSA 5 /* d11 extened CSA for 11n */ + +/* QoS action ids */ +#define DOT11_QOS_ACTION_ADDTS_REQ 0 /* d11 action ADDTS request */ +#define DOT11_QOS_ACTION_ADDTS_RESP 1 /* d11 action ADDTS response */ +#define DOT11_QOS_ACTION_DELTS 2 /* d11 action DELTS */ +#define DOT11_QOS_ACTION_SCHEDULE 3 /* d11 action schedule */ +#define DOT11_QOS_ACTION_QOS_MAP 4 /* d11 action QOS map */ + +/* HT action ids */ +#define DOT11_ACTION_ID_HT_CH_WIDTH 0 /* notify channel width action id */ +#define DOT11_ACTION_ID_HT_MIMO_PS 1 /* mimo ps action id */ + +/* Public action ids */ +#define DOT11_PUB_ACTION_BSS_COEX_MNG 0 /* 20/40 Coexistence Management action id */ +#define DOT11_PUB_ACTION_CHANNEL_SWITCH 4 /* d11 action channel switch */ +#define DOT11_PUB_ACTION_VENDOR_SPEC 9 /* Vendor specific */ +#define DOT11_PUB_ACTION_GAS_CB_REQ 12 /* GAS Comeback Request */ +#define DOT11_PUB_ACTION_FTM_REQ 32 /* FTM request */ +#define DOT11_PUB_ACTION_FTM 33 /* FTM measurement */ +#define DOT11_PUB_ACTION_FTM_REQ_TRIGGER_START 1u /* FTM request start trigger */ +#define DOT11_PUB_ACTION_FTM_REQ_TRIGGER_STOP 0u /* FTM request stop trigger */ + +/* Block Ack action types */ +#define DOT11_BA_ACTION_ADDBA_REQ 0 /* ADDBA Req action frame type */ +#define DOT11_BA_ACTION_ADDBA_RESP 1 /* ADDBA Resp action frame type */ +#define DOT11_BA_ACTION_DELBA 2 /* DELBA action frame type */ + +/* ADDBA action parameters */ +#define DOT11_ADDBA_PARAM_AMSDU_SUP 0x0001 /* AMSDU supported under BA */ +#define DOT11_ADDBA_PARAM_POLICY_MASK 0x0002 /* policy mask(ack vs delayed) */ +#define DOT11_ADDBA_PARAM_POLICY_SHIFT 1 /* policy shift */ +#define DOT11_ADDBA_PARAM_TID_MASK 0x003c /* tid mask */ +#define DOT11_ADDBA_PARAM_TID_SHIFT 2 /* tid shift */ +#define DOT11_ADDBA_PARAM_BSIZE_MASK 0xffc0 /* buffer size mask */ +#define DOT11_ADDBA_PARAM_BSIZE_SHIFT 6 /* buffer size shift */ + +#define DOT11_ADDBA_POLICY_DELAYED 0 /* delayed BA policy */ +#define DOT11_ADDBA_POLICY_IMMEDIATE 1 /* immediate BA policy */ + +/* Fast Transition action types */ +#define DOT11_FT_ACTION_FT_RESERVED 0 +#define DOT11_FT_ACTION_FT_REQ 1 /* FBT request - for over-the-DS FBT */ +#define DOT11_FT_ACTION_FT_RES 2 /* FBT response - for over-the-DS FBT */ +#define DOT11_FT_ACTION_FT_CON 3 /* FBT confirm - for OTDS with RRP */ +#define DOT11_FT_ACTION_FT_ACK 4 /* FBT ack */ + +/* DLS action types */ +#define DOT11_DLS_ACTION_REQ 0 /* DLS Request */ +#define DOT11_DLS_ACTION_RESP 1 /* DLS Response */ +#define DOT11_DLS_ACTION_TD 2 /* DLS Teardown */ + +/* Wireless Network Management (WNM) action types */ +#define DOT11_WNM_ACTION_EVENT_REQ 0 +#define DOT11_WNM_ACTION_EVENT_REP 1 +#define DOT11_WNM_ACTION_DIAG_REQ 2 +#define DOT11_WNM_ACTION_DIAG_REP 3 +#define DOT11_WNM_ACTION_LOC_CFG_REQ 4 +#define DOT11_WNM_ACTION_LOC_RFG_RESP 5 +#define DOT11_WNM_ACTION_BSSTRANS_QUERY 6 +#define DOT11_WNM_ACTION_BSSTRANS_REQ 7 +#define DOT11_WNM_ACTION_BSSTRANS_RESP 8 +#define DOT11_WNM_ACTION_FMS_REQ 9 +#define DOT11_WNM_ACTION_FMS_RESP 10 +#define DOT11_WNM_ACTION_COL_INTRFRNCE_REQ 11 +#define DOT11_WNM_ACTION_COL_INTRFRNCE_REP 12 +#define DOT11_WNM_ACTION_TFS_REQ 13 +#define DOT11_WNM_ACTION_TFS_RESP 14 +#define DOT11_WNM_ACTION_TFS_NOTIFY_REQ 15 +#define DOT11_WNM_ACTION_WNM_SLEEP_REQ 16 +#define DOT11_WNM_ACTION_WNM_SLEEP_RESP 17 +#define DOT11_WNM_ACTION_TIMBC_REQ 18 +#define DOT11_WNM_ACTION_TIMBC_RESP 19 +#define DOT11_WNM_ACTION_QOS_TRFC_CAP_UPD 20 +#define DOT11_WNM_ACTION_CHAN_USAGE_REQ 21 +#define DOT11_WNM_ACTION_CHAN_USAGE_RESP 22 +#define DOT11_WNM_ACTION_DMS_REQ 23 +#define DOT11_WNM_ACTION_DMS_RESP 24 +#define DOT11_WNM_ACTION_TMNG_MEASUR_REQ 25 +#define DOT11_WNM_ACTION_NOTFCTN_REQ 26 +#define DOT11_WNM_ACTION_NOTFCTN_RESP 27 +#define DOT11_WNM_ACTION_TFS_NOTIFY_RESP 28 + +/* Unprotected Wireless Network Management (WNM) action types */ +#define DOT11_UWNM_ACTION_TIM 0 +#define DOT11_UWNM_ACTION_TIMING_MEASUREMENT 1 + +#define DOT11_MNG_COUNTRY_ID_LEN 3 + +/* VHT category action types - 802.11ac D3.0 - 8.5.23.1 */ +#define DOT11_VHT_ACTION_CBF 0 /* Compressed Beamforming */ +#define DOT11_VHT_ACTION_GID_MGMT 1 /* Group ID Management */ +#define DOT11_VHT_ACTION_OPER_MODE_NOTIF 2 /* Operating mode notif'n */ + +/* FILS category action types - 802.11ai D11.0 - 9.6.8.1 */ +#define DOT11_FILS_ACTION_DISCOVERY 34 /* FILS Discovery */ + +/** DLS Request frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_dls_req { + uint8 category; /* category of action frame (2) */ + uint8 action; /* DLS action: req (0) */ + struct ether_addr da; /* destination address */ + struct ether_addr sa; /* source address */ + uint16 cap; /* capability */ + uint16 timeout; /* timeout value */ + uint8 data[1]; /* IE:support rate, extend support rate, HT cap */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_dls_req dot11_dls_req_t; +#define DOT11_DLS_REQ_LEN 18 /* Fixed length */ + +/** DLS response frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_dls_resp { + uint8 category; /* category of action frame (2) */ + uint8 action; /* DLS action: req (0) */ + uint16 status; /* status code field */ + struct ether_addr da; /* destination address */ + struct ether_addr sa; /* source address */ + uint8 data[1]; /* optional: capability, rate ... */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_dls_resp dot11_dls_resp_t; +#define DOT11_DLS_RESP_LEN 16 /* Fixed length */ + +/* ************* 802.11v related definitions. ************* */ + +/** BSS Management Transition Query frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_bsstrans_query { + uint8 category; /* category of action frame (10) */ + uint8 action; /* WNM action: trans_query (6) */ + uint8 token; /* dialog token */ + uint8 reason; /* transition query reason */ + uint8 data[1]; /* Elements */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_bsstrans_query dot11_bsstrans_query_t; +#define DOT11_BSSTRANS_QUERY_LEN 4 /* Fixed length */ + +/* BTM transition reason */ +#define DOT11_BSSTRANS_REASON_UNSPECIFIED 0 +#define DOT11_BSSTRANS_REASON_EXC_FRAME_LOSS 1 +#define DOT11_BSSTRANS_REASON_EXC_TRAFFIC_DELAY 2 +#define DOT11_BSSTRANS_REASON_INSUFF_QOS_CAPACITY 3 +#define DOT11_BSSTRANS_REASON_FIRST_ASSOC 4 +#define DOT11_BSSTRANS_REASON_LOAD_BALANCING 5 +#define DOT11_BSSTRANS_REASON_BETTER_AP_FOUND 6 +#define DOT11_BSSTRANS_REASON_DEAUTH_RX 7 +#define DOT11_BSSTRANS_REASON_8021X_EAP_AUTH_FAIL 8 +#define DOT11_BSSTRANS_REASON_4WAY_HANDSHK_FAIL 9 +#define DOT11_BSSTRANS_REASON_MANY_REPLAYCNT_FAIL 10 +#define DOT11_BSSTRANS_REASON_MANY_DATAMIC_FAIL 11 +#define DOT11_BSSTRANS_REASON_EXCEED_MAX_RETRANS 12 +#define DOT11_BSSTRANS_REASON_MANY_BCAST_DISASSOC_RX 13 +#define DOT11_BSSTRANS_REASON_MANY_BCAST_DEAUTH_RX 14 +#define DOT11_BSSTRANS_REASON_PREV_TRANSITION_FAIL 15 +#define DOT11_BSSTRANS_REASON_LOW_RSSI 16 +#define DOT11_BSSTRANS_REASON_ROAM_FROM_NON_80211 17 +#define DOT11_BSSTRANS_REASON_RX_BTM_REQ 18 +#define DOT11_BSSTRANS_REASON_PREF_LIST_INCLUDED 19 +#define DOT11_BSSTRANS_REASON_LEAVING_ESS 20 + +/** BSS Management Transition Request frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_bsstrans_req { + uint8 category; /* category of action frame (10) */ + uint8 action; /* WNM action: trans_req (7) */ + uint8 token; /* dialog token */ + uint8 reqmode; /* transition request mode */ + uint16 disassoc_tmr; /* disassociation timer */ + uint8 validity_intrvl; /* validity interval */ + uint8 data[1]; /* optional: BSS term duration, ... */ + /* ...session info URL, candidate list */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_bsstrans_req dot11_bsstrans_req_t; +#define DOT11_BSSTRANS_REQ_LEN 7 /* Fixed length */ + +/* BSS Mgmt Transition Request Mode Field - 802.11v */ +#define DOT11_BSSTRANS_REQMODE_PREF_LIST_INCL 0x01 +#define DOT11_BSSTRANS_REQMODE_ABRIDGED 0x02 +#define DOT11_BSSTRANS_REQMODE_DISASSOC_IMMINENT 0x04 +#define DOT11_BSSTRANS_REQMODE_BSS_TERM_INCL 0x08 +#define DOT11_BSSTRANS_REQMODE_ESS_DISASSOC_IMNT 0x10 + +/** BSS Management transition response frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_bsstrans_resp { + uint8 category; /* category of action frame (10) */ + uint8 action; /* WNM action: trans_resp (8) */ + uint8 token; /* dialog token */ + uint8 status; /* transition status */ + uint8 term_delay; /* validity interval */ + uint8 data[1]; /* optional: BSSID target, candidate list */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_bsstrans_resp dot11_bsstrans_resp_t; +#define DOT11_BSSTRANS_RESP_LEN 5 /* Fixed length */ + +/* BSS Mgmt Transition Response Status Field */ +#define DOT11_BSSTRANS_RESP_STATUS_ACCEPT 0 +#define DOT11_BSSTRANS_RESP_STATUS_REJECT 1 +#define DOT11_BSSTRANS_RESP_STATUS_REJ_INSUFF_BCN 2 +#define DOT11_BSSTRANS_RESP_STATUS_REJ_INSUFF_CAP 3 +#define DOT11_BSSTRANS_RESP_STATUS_REJ_TERM_UNDESIRED 4 +#define DOT11_BSSTRANS_RESP_STATUS_REJ_TERM_DELAY_REQ 5 +#define DOT11_BSSTRANS_RESP_STATUS_REJ_BSS_LIST_PROVIDED 6 +#define DOT11_BSSTRANS_RESP_STATUS_REJ_NO_SUITABLE_BSS 7 +#define DOT11_BSSTRANS_RESP_STATUS_REJ_LEAVING_ESS 8 + +/** BSS Max Idle Period element */ +BWL_PRE_PACKED_STRUCT struct dot11_bss_max_idle_period_ie { + uint8 id; /* 90, DOT11_MNG_BSS_MAX_IDLE_PERIOD_ID */ + uint8 len; + uint16 max_idle_period; /* in unit of 1000 TUs */ + uint8 idle_opt; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_bss_max_idle_period_ie dot11_bss_max_idle_period_ie_t; +#define DOT11_BSS_MAX_IDLE_PERIOD_IE_LEN 3 /* bss max idle period IE size */ +#define DOT11_BSS_MAX_IDLE_PERIOD_OPT_PROTECTED 1 /* BSS max idle option */ + +/** TIM Broadcast request element */ +BWL_PRE_PACKED_STRUCT struct dot11_timbc_req_ie { + uint8 id; /* 94, DOT11_MNG_TIMBC_REQ_ID */ + uint8 len; + uint8 interval; /* in unit of beacon interval */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_timbc_req_ie dot11_timbc_req_ie_t; +#define DOT11_TIMBC_REQ_IE_LEN 1 /* Fixed length */ + +/** TIM Broadcast request frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_timbc_req { + uint8 category; /* category of action frame (10) */ + uint8 action; /* WNM action: DOT11_WNM_ACTION_TIMBC_REQ(18) */ + uint8 token; /* dialog token */ + uint8 data[1]; /* TIM broadcast request element */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_timbc_req dot11_timbc_req_t; +#define DOT11_TIMBC_REQ_LEN 3 /* Fixed length */ + +/** TIM Broadcast response element */ +BWL_PRE_PACKED_STRUCT struct dot11_timbc_resp_ie { + uint8 id; /* 95, DOT11_MNG_TIM_BROADCAST_RESP_ID */ + uint8 len; + uint8 status; /* status of add request */ + uint8 interval; /* in unit of beacon interval */ + int32 offset; /* in unit of ms */ + uint16 high_rate; /* in unit of 0.5 Mb/s */ + uint16 low_rate; /* in unit of 0.5 Mb/s */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_timbc_resp_ie dot11_timbc_resp_ie_t; +#define DOT11_TIMBC_DENY_RESP_IE_LEN 1 /* Deny. Fixed length */ +#define DOT11_TIMBC_ACCEPT_RESP_IE_LEN 10 /* Accept. Fixed length */ + +#define DOT11_TIMBC_STATUS_ACCEPT 0 +#define DOT11_TIMBC_STATUS_ACCEPT_TSTAMP 1 +#define DOT11_TIMBC_STATUS_DENY 2 +#define DOT11_TIMBC_STATUS_OVERRIDDEN 3 +#define DOT11_TIMBC_STATUS_RESERVED 4 + +/** TIM Broadcast request frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_timbc_resp { + uint8 category; /* category of action frame (10) */ + uint8 action; /* action: DOT11_WNM_ACTION_TIMBC_RESP(19) */ + uint8 token; /* dialog token */ + uint8 data[1]; /* TIM broadcast response element */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_timbc_resp dot11_timbc_resp_t; +#define DOT11_TIMBC_RESP_LEN 3 /* Fixed length */ + +/** TIM element */ +BWL_PRE_PACKED_STRUCT struct dot11_tim_ie { + uint8 id; /* 5, DOT11_MNG_TIM_ID */ + uint8 len; /* 4 - 255 */ + uint8 dtim_count; /* DTIM decrementing counter */ + uint8 dtim_period; /* DTIM period */ + uint8 bitmap_control; /* AID 0 + bitmap offset */ + uint8 pvb[1]; /* Partial Virtual Bitmap, variable length */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tim_ie dot11_tim_ie_t; +#define DOT11_TIM_IE_FIXED_LEN 3 /* Fixed length, without id and len */ +#define DOT11_TIM_IE_FIXED_TOTAL_LEN 5 /* Fixed length, with id and len */ + +/** TIM Broadcast frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_timbc { + uint8 category; /* category of action frame (11) */ + uint8 action; /* action: TIM (0) */ + uint8 check_beacon; /* need to check-beacon */ + uint8 tsf[8]; /* Time Synchronization Function */ + dot11_tim_ie_t tim_ie; /* TIM element */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_timbc dot11_timbc_t; +#define DOT11_TIMBC_HDR_LEN (sizeof(dot11_timbc_t) - sizeof(dot11_tim_ie_t)) +#define DOT11_TIMBC_FIXED_LEN (sizeof(dot11_timbc_t) - 1) /* Fixed length */ +#define DOT11_TIMBC_LEN 11 /* Fixed length */ + +/** TCLAS frame classifier type */ +BWL_PRE_PACKED_STRUCT struct dot11_tclas_fc_hdr { + uint8 type; + uint8 mask; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tclas_fc_hdr dot11_tclas_fc_hdr_t; +#define DOT11_TCLAS_FC_HDR_LEN 2 /* Fixed length */ + +#define DOT11_TCLAS_MASK_0 0x1 +#define DOT11_TCLAS_MASK_1 0x2 +#define DOT11_TCLAS_MASK_2 0x4 +#define DOT11_TCLAS_MASK_3 0x8 +#define DOT11_TCLAS_MASK_4 0x10 +#define DOT11_TCLAS_MASK_5 0x20 +#define DOT11_TCLAS_MASK_6 0x40 +#define DOT11_TCLAS_MASK_7 0x80 + +#define DOT11_TCLAS_FC_0_ETH 0 +#define DOT11_TCLAS_FC_1_IP 1 +#define DOT11_TCLAS_FC_2_8021Q 2 +#define DOT11_TCLAS_FC_3_OFFSET 3 +#define DOT11_TCLAS_FC_4_IP_HIGHER 4 +#define DOT11_TCLAS_FC_5_8021D 5 + +/** TCLAS frame classifier type 0 parameters for Ethernet */ +BWL_PRE_PACKED_STRUCT struct dot11_tclas_fc_0_eth { + uint8 type; + uint8 mask; + uint8 sa[ETHER_ADDR_LEN]; + uint8 da[ETHER_ADDR_LEN]; + uint16 eth_type; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tclas_fc_0_eth dot11_tclas_fc_0_eth_t; +#define DOT11_TCLAS_FC_0_ETH_LEN 16 + +/** TCLAS frame classifier type 1 parameters for IPV4 */ +BWL_PRE_PACKED_STRUCT struct dot11_tclas_fc_1_ipv4 { + uint8 type; + uint8 mask; + uint8 version; + uint32 src_ip; + uint32 dst_ip; + uint16 src_port; + uint16 dst_port; + uint8 dscp; + uint8 protocol; + uint8 reserved; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tclas_fc_1_ipv4 dot11_tclas_fc_1_ipv4_t; +#define DOT11_TCLAS_FC_1_IPV4_LEN 18 + +/** TCLAS frame classifier type 2 parameters for 802.1Q */ +BWL_PRE_PACKED_STRUCT struct dot11_tclas_fc_2_8021q { + uint8 type; + uint8 mask; + uint16 tci; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tclas_fc_2_8021q dot11_tclas_fc_2_8021q_t; +#define DOT11_TCLAS_FC_2_8021Q_LEN 4 + +/** TCLAS frame classifier type 3 parameters for filter offset */ +BWL_PRE_PACKED_STRUCT struct dot11_tclas_fc_3_filter { + uint8 type; + uint8 mask; + uint16 offset; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tclas_fc_3_filter dot11_tclas_fc_3_filter_t; +#define DOT11_TCLAS_FC_3_FILTER_LEN 4 + +/** TCLAS frame classifier type 4 parameters for IPV4 is the same as TCLAS type 1 */ +typedef struct dot11_tclas_fc_1_ipv4 dot11_tclas_fc_4_ipv4_t; +#define DOT11_TCLAS_FC_4_IPV4_LEN DOT11_TCLAS_FC_1_IPV4_LEN + +/** TCLAS frame classifier type 4 parameters for IPV6 */ +BWL_PRE_PACKED_STRUCT struct dot11_tclas_fc_4_ipv6 { + uint8 type; + uint8 mask; + uint8 version; + uint8 saddr[16]; + uint8 daddr[16]; + uint16 src_port; + uint16 dst_port; + uint8 dscp; + uint8 nexthdr; + uint8 flow_lbl[3]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tclas_fc_4_ipv6 dot11_tclas_fc_4_ipv6_t; +#define DOT11_TCLAS_FC_4_IPV6_LEN 44 + +/** TCLAS frame classifier type 5 parameters for 802.1D */ +BWL_PRE_PACKED_STRUCT struct dot11_tclas_fc_5_8021d { + uint8 type; + uint8 mask; + uint8 pcp; + uint8 cfi; + uint16 vid; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tclas_fc_5_8021d dot11_tclas_fc_5_8021d_t; +#define DOT11_TCLAS_FC_5_8021D_LEN 6 + +/** TCLAS frame classifier type parameters */ +BWL_PRE_PACKED_STRUCT union dot11_tclas_fc { + uint8 data[1]; + dot11_tclas_fc_hdr_t hdr; + dot11_tclas_fc_0_eth_t t0_eth; + dot11_tclas_fc_1_ipv4_t t1_ipv4; + dot11_tclas_fc_2_8021q_t t2_8021q; + dot11_tclas_fc_3_filter_t t3_filter; + dot11_tclas_fc_4_ipv4_t t4_ipv4; + dot11_tclas_fc_4_ipv6_t t4_ipv6; + dot11_tclas_fc_5_8021d_t t5_8021d; +} BWL_POST_PACKED_STRUCT; +typedef union dot11_tclas_fc dot11_tclas_fc_t; + +#define DOT11_TCLAS_FC_MIN_LEN 4 /* Classifier Type 2 has the min size */ +#define DOT11_TCLAS_FC_MAX_LEN 254 + +/** TCLAS element */ +BWL_PRE_PACKED_STRUCT struct dot11_tclas_ie { + uint8 id; /* 14, DOT11_MNG_TCLAS_ID */ + uint8 len; + uint8 user_priority; + dot11_tclas_fc_t fc; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tclas_ie dot11_tclas_ie_t; +#define DOT11_TCLAS_IE_LEN 3u /* Fixed length, include id and len */ + +/** TCLAS processing element */ +BWL_PRE_PACKED_STRUCT struct dot11_tclas_proc_ie { + uint8 id; /* 44, DOT11_MNG_TCLAS_PROC_ID */ + uint8 len; + uint8 process; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tclas_proc_ie dot11_tclas_proc_ie_t; +#define DOT11_TCLAS_PROC_IE_LEN 3 /* Fixed length, include id and len */ + +#define DOT11_TCLAS_PROC_LEN 1u /* Proc ie length is always 1 byte */ + +#define DOT11_TCLAS_PROC_MATCHALL 0 /* All high level element need to match */ +#define DOT11_TCLAS_PROC_MATCHONE 1 /* One high level element need to match */ +#define DOT11_TCLAS_PROC_NONMATCH 2 /* Non match to any high level element */ + +/* TSPEC element defined in 802.11 std section 8.4.2.32 - Not supported */ +#define DOT11_TSPEC_IE_LEN 57 /* Fixed length */ + +/** TFS request element */ +BWL_PRE_PACKED_STRUCT struct dot11_tfs_req_ie { + uint8 id; /* 91, DOT11_MNG_TFS_REQUEST_ID */ + uint8 len; + uint8 tfs_id; + uint8 actcode; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tfs_req_ie dot11_tfs_req_ie_t; +#define DOT11_TFS_REQ_IE_LEN 2 /* Fixed length, without id and len */ + +/** TFS request action codes (bitfield) */ +#define DOT11_TFS_ACTCODE_DELETE 1 +#define DOT11_TFS_ACTCODE_NOTIFY 2 + +/** TFS request subelement IDs */ +#define DOT11_TFS_REQ_TFS_SE_ID 1 +#define DOT11_TFS_REQ_VENDOR_SE_ID 221 + +/** TFS subelement */ +BWL_PRE_PACKED_STRUCT struct dot11_tfs_se { + uint8 sub_id; + uint8 len; + uint8 data[1]; /* TCLAS element(s) + optional TCLAS proc */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tfs_se dot11_tfs_se_t; + +/** TFS response element */ +BWL_PRE_PACKED_STRUCT struct dot11_tfs_resp_ie { + uint8 id; /* 92, DOT11_MNG_TFS_RESPONSE_ID */ + uint8 len; + uint8 tfs_id; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tfs_resp_ie dot11_tfs_resp_ie_t; +#define DOT11_TFS_RESP_IE_LEN 1u /* Fixed length, without id and len */ + +/** TFS response subelement IDs (same subelments, but different IDs than in TFS request */ +#define DOT11_TFS_RESP_TFS_STATUS_SE_ID 1 +#define DOT11_TFS_RESP_TFS_SE_ID 2 +#define DOT11_TFS_RESP_VENDOR_SE_ID 221 + +/** TFS status subelement */ +BWL_PRE_PACKED_STRUCT struct dot11_tfs_status_se { + uint8 sub_id; /* 92, DOT11_MNG_TFS_RESPONSE_ID */ + uint8 len; + uint8 resp_st; + uint8 data[1]; /* Potential dot11_tfs_se_t included */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tfs_status_se dot11_tfs_status_se_t; +#define DOT11_TFS_STATUS_SE_LEN 1 /* Fixed length, without id and len */ + +/* Following Definition should be merged to FMS_TFS macro below */ +/* TFS Response status code. Identical to FMS Element status, without N/A */ +#define DOT11_TFS_STATUS_ACCEPT 0 +#define DOT11_TFS_STATUS_DENY_FORMAT 1 +#define DOT11_TFS_STATUS_DENY_RESOURCE 2 +#define DOT11_TFS_STATUS_DENY_POLICY 4 +#define DOT11_TFS_STATUS_DENY_UNSPECIFIED 5 +#define DOT11_TFS_STATUS_ALTPREF_POLICY 7 +#define DOT11_TFS_STATUS_ALTPREF_TCLAS_UNSUPP 14 + +/* FMS Element Status and TFS Response Status Definition */ +#define DOT11_FMS_TFS_STATUS_ACCEPT 0 +#define DOT11_FMS_TFS_STATUS_DENY_FORMAT 1 +#define DOT11_FMS_TFS_STATUS_DENY_RESOURCE 2 +#define DOT11_FMS_TFS_STATUS_DENY_MULTIPLE_DI 3 +#define DOT11_FMS_TFS_STATUS_DENY_POLICY 4 +#define DOT11_FMS_TFS_STATUS_DENY_UNSPECIFIED 5 +#define DOT11_FMS_TFS_STATUS_ALT_DIFF_DI 6 +#define DOT11_FMS_TFS_STATUS_ALT_POLICY 7 +#define DOT11_FMS_TFS_STATUS_ALT_CHANGE_DI 8 +#define DOT11_FMS_TFS_STATUS_ALT_MCRATE 9 +#define DOT11_FMS_TFS_STATUS_TERM_POLICY 10 +#define DOT11_FMS_TFS_STATUS_TERM_RESOURCE 11 +#define DOT11_FMS_TFS_STATUS_TERM_HIGHER_PRIO 12 +#define DOT11_FMS_TFS_STATUS_ALT_CHANGE_MDI 13 +#define DOT11_FMS_TFS_STATUS_ALT_TCLAS_UNSUPP 14 + +/** TFS Management Request frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_tfs_req { + uint8 category; /* category of action frame (10) */ + uint8 action; /* WNM action: TFS request (13) */ + uint8 token; /* dialog token */ + uint8 data[1]; /* Elements */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tfs_req dot11_tfs_req_t; +#define DOT11_TFS_REQ_LEN 3 /* Fixed length */ + +/** TFS Management Response frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_tfs_resp { + uint8 category; /* category of action frame (10) */ + uint8 action; /* WNM action: TFS request (14) */ + uint8 token; /* dialog token */ + uint8 data[1]; /* Elements */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tfs_resp dot11_tfs_resp_t; +#define DOT11_TFS_RESP_LEN 3 /* Fixed length */ + +/** TFS Management Notify frame request header */ +BWL_PRE_PACKED_STRUCT struct dot11_tfs_notify_req { + uint8 category; /* category of action frame (10) */ + uint8 action; /* WNM action: TFS notify request (15) */ + uint8 tfs_id_cnt; /* TFS IDs count */ + uint8 tfs_id[1]; /* Array of TFS IDs */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tfs_notify_req dot11_tfs_notify_req_t; +#define DOT11_TFS_NOTIFY_REQ_LEN 3 /* Fixed length */ + +/** TFS Management Notify frame response header */ +BWL_PRE_PACKED_STRUCT struct dot11_tfs_notify_resp { + uint8 category; /* category of action frame (10) */ + uint8 action; /* WNM action: TFS notify response (28) */ + uint8 tfs_id_cnt; /* TFS IDs count */ + uint8 tfs_id[1]; /* Array of TFS IDs */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tfs_notify_resp dot11_tfs_notify_resp_t; +#define DOT11_TFS_NOTIFY_RESP_LEN 3 /* Fixed length */ + +/** WNM-Sleep Management Request frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_wnm_sleep_req { + uint8 category; /* category of action frame (10) */ + uint8 action; /* WNM action: wnm-sleep request (16) */ + uint8 token; /* dialog token */ + uint8 data[1]; /* Elements */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_wnm_sleep_req dot11_wnm_sleep_req_t; +#define DOT11_WNM_SLEEP_REQ_LEN 3 /* Fixed length */ + +/** WNM-Sleep Management Response frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_wnm_sleep_resp { + uint8 category; /* category of action frame (10) */ + uint8 action; /* WNM action: wnm-sleep request (17) */ + uint8 token; /* dialog token */ + uint16 key_len; /* key data length */ + uint8 data[1]; /* Elements */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_wnm_sleep_resp dot11_wnm_sleep_resp_t; +#define DOT11_WNM_SLEEP_RESP_LEN 5 /* Fixed length */ + +#define DOT11_WNM_SLEEP_SUBELEM_ID_GTK 0 +#define DOT11_WNM_SLEEP_SUBELEM_ID_IGTK 1 + +BWL_PRE_PACKED_STRUCT struct dot11_wnm_sleep_subelem_gtk { + uint8 sub_id; + uint8 len; + uint16 key_info; + uint8 key_length; + uint8 rsc[8]; + uint8 key[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_wnm_sleep_subelem_gtk dot11_wnm_sleep_subelem_gtk_t; +#define DOT11_WNM_SLEEP_SUBELEM_GTK_FIXED_LEN 11 /* without sub_id, len, and key */ +#define DOT11_WNM_SLEEP_SUBELEM_GTK_MAX_LEN 43 /* without sub_id and len */ + +BWL_PRE_PACKED_STRUCT struct dot11_wnm_sleep_subelem_igtk { + uint8 sub_id; + uint8 len; + uint16 key_id; + uint8 pn[6]; + uint8 key[16]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_wnm_sleep_subelem_igtk dot11_wnm_sleep_subelem_igtk_t; +#define DOT11_WNM_SLEEP_SUBELEM_IGTK_LEN 24 /* Fixed length */ + +BWL_PRE_PACKED_STRUCT struct dot11_wnm_sleep_ie { + uint8 id; /* 93, DOT11_MNG_WNM_SLEEP_MODE_ID */ + uint8 len; + uint8 act_type; + uint8 resp_status; + uint16 interval; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_wnm_sleep_ie dot11_wnm_sleep_ie_t; +#define DOT11_WNM_SLEEP_IE_LEN 4 /* Fixed length */ + +#define DOT11_WNM_SLEEP_ACT_TYPE_ENTER 0 +#define DOT11_WNM_SLEEP_ACT_TYPE_EXIT 1 + +#define DOT11_WNM_SLEEP_RESP_ACCEPT 0 +#define DOT11_WNM_SLEEP_RESP_UPDATE 1 +#define DOT11_WNM_SLEEP_RESP_DENY 2 +#define DOT11_WNM_SLEEP_RESP_DENY_TEMP 3 +#define DOT11_WNM_SLEEP_RESP_DENY_KEY 4 +#define DOT11_WNM_SLEEP_RESP_DENY_INUSE 5 +#define DOT11_WNM_SLEEP_RESP_LAST 6 + +/** DMS Management Request frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_dms_req { + uint8 category; /* category of action frame (10) */ + uint8 action; /* WNM action: dms request (23) */ + uint8 token; /* dialog token */ + uint8 data[1]; /* Elements */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_dms_req dot11_dms_req_t; +#define DOT11_DMS_REQ_LEN 3 /* Fixed length */ + +/** DMS Management Response frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_dms_resp { + uint8 category; /* category of action frame (10) */ + uint8 action; /* WNM action: dms request (24) */ + uint8 token; /* dialog token */ + uint8 data[1]; /* Elements */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_dms_resp dot11_dms_resp_t; +#define DOT11_DMS_RESP_LEN 3 /* Fixed length */ + +/** DMS request element */ +BWL_PRE_PACKED_STRUCT struct dot11_dms_req_ie { + uint8 id; /* 99, DOT11_MNG_DMS_REQUEST_ID */ + uint8 len; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_dms_req_ie dot11_dms_req_ie_t; +#define DOT11_DMS_REQ_IE_LEN 2 /* Fixed length */ + +/** DMS response element */ +BWL_PRE_PACKED_STRUCT struct dot11_dms_resp_ie { + uint8 id; /* 100, DOT11_MNG_DMS_RESPONSE_ID */ + uint8 len; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_dms_resp_ie dot11_dms_resp_ie_t; +#define DOT11_DMS_RESP_IE_LEN 2 /* Fixed length */ + +/** DMS request descriptor */ +BWL_PRE_PACKED_STRUCT struct dot11_dms_req_desc { + uint8 dms_id; + uint8 len; + uint8 type; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_dms_req_desc dot11_dms_req_desc_t; +#define DOT11_DMS_REQ_DESC_LEN 3 /* Fixed length */ + +#define DOT11_DMS_REQ_TYPE_ADD 0 +#define DOT11_DMS_REQ_TYPE_REMOVE 1 +#define DOT11_DMS_REQ_TYPE_CHANGE 2 + +/** DMS response status */ +BWL_PRE_PACKED_STRUCT struct dot11_dms_resp_st { + uint8 dms_id; + uint8 len; + uint8 type; + uint16 lsc; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_dms_resp_st dot11_dms_resp_st_t; +#define DOT11_DMS_RESP_STATUS_LEN 5 /* Fixed length */ + +#define DOT11_DMS_RESP_TYPE_ACCEPT 0 +#define DOT11_DMS_RESP_TYPE_DENY 1 +#define DOT11_DMS_RESP_TYPE_TERM 2 + +#define DOT11_DMS_RESP_LSC_UNSUPPORTED 0xFFFF + +/** WNM-Notification Request frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_wnm_notif_req { + uint8 category; /* category of action frame (10) */ + uint8 action; /* WNM action: Notification request (26) */ + uint8 token; /* dialog token */ + uint8 type; /* type */ + uint8 data[1]; /* Sub-elements */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_wnm_notif_req dot11_wnm_notif_req_t; +#define DOT11_WNM_NOTIF_REQ_LEN 4 /* Fixed length */ + +/** FMS Management Request frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_fms_req { + uint8 category; /* category of action frame (10) */ + uint8 action; /* WNM action: fms request (9) */ + uint8 token; /* dialog token */ + uint8 data[1]; /* Elements */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_fms_req dot11_fms_req_t; +#define DOT11_FMS_REQ_LEN 3 /* Fixed length */ + +/** FMS Management Response frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_fms_resp { + uint8 category; /* category of action frame (10) */ + uint8 action; /* WNM action: fms request (10) */ + uint8 token; /* dialog token */ + uint8 data[1]; /* Elements */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_fms_resp dot11_fms_resp_t; +#define DOT11_FMS_RESP_LEN 3 /* Fixed length */ + +/** FMS Descriptor element */ +BWL_PRE_PACKED_STRUCT struct dot11_fms_desc { + uint8 id; + uint8 len; + uint8 num_fms_cnt; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_fms_desc dot11_fms_desc_t; +#define DOT11_FMS_DESC_LEN 1 /* Fixed length */ + +#define DOT11_FMS_CNTR_MAX 0x8 +#define DOT11_FMS_CNTR_ID_MASK 0x7 +#define DOT11_FMS_CNTR_ID_SHIFT 0x0 +#define DOT11_FMS_CNTR_COUNT_MASK 0xf1 +#define DOT11_FMS_CNTR_SHIFT 0x3 + +/** FMS request element */ +BWL_PRE_PACKED_STRUCT struct dot11_fms_req_ie { + uint8 id; + uint8 len; + uint8 fms_token; /* token used to identify fms stream set */ + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_fms_req_ie dot11_fms_req_ie_t; +#define DOT11_FMS_REQ_IE_FIX_LEN 1 /* Fixed length */ + +BWL_PRE_PACKED_STRUCT struct dot11_rate_id_field { + uint8 mask; + uint8 mcs_idx; + uint16 rate; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rate_id_field dot11_rate_id_field_t; +#define DOT11_RATE_ID_FIELD_MCS_SEL_MASK 0x7 +#define DOT11_RATE_ID_FIELD_MCS_SEL_OFFSET 0 +#define DOT11_RATE_ID_FIELD_RATETYPE_MASK 0x18 +#define DOT11_RATE_ID_FIELD_RATETYPE_OFFSET 3 +#define DOT11_RATE_ID_FIELD_LEN sizeof(dot11_rate_id_field_t) + +/** FMS request subelements */ +BWL_PRE_PACKED_STRUCT struct dot11_fms_se { + uint8 sub_id; + uint8 len; + uint8 interval; + uint8 max_interval; + dot11_rate_id_field_t rate; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_fms_se dot11_fms_se_t; +#define DOT11_FMS_REQ_SE_LEN 6 /* Fixed length */ + +#define DOT11_FMS_REQ_SE_ID_FMS 1 /* FMS subelement */ +#define DOT11_FMS_REQ_SE_ID_VS 221 /* Vendor Specific subelement */ + +/** FMS response element */ +BWL_PRE_PACKED_STRUCT struct dot11_fms_resp_ie { + uint8 id; + uint8 len; + uint8 fms_token; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_fms_resp_ie dot11_fms_resp_ie_t; +#define DOT11_FMS_RESP_IE_FIX_LEN 1 /* Fixed length */ + +/* FMS status subelements */ +#define DOT11_FMS_STATUS_SE_ID_FMS 1 /* FMS Status */ +#define DOT11_FMS_STATUS_SE_ID_TCLAS 2 /* TCLAS Status */ +#define DOT11_FMS_STATUS_SE_ID_VS 221 /* Vendor Specific subelement */ + +/** FMS status subelement */ +BWL_PRE_PACKED_STRUCT struct dot11_fms_status_se { + uint8 sub_id; + uint8 len; + uint8 status; + uint8 interval; + uint8 max_interval; + uint8 fmsid; + uint8 counter; + dot11_rate_id_field_t rate; + uint8 mcast_addr[ETHER_ADDR_LEN]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_fms_status_se dot11_fms_status_se_t; +#define DOT11_FMS_STATUS_SE_LEN 15 /* Fixed length */ + +/** TCLAS status subelement */ +BWL_PRE_PACKED_STRUCT struct dot11_tclas_status_se { + uint8 sub_id; + uint8 len; + uint8 fmsid; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tclas_status_se dot11_tclas_status_se_t; +#define DOT11_TCLAS_STATUS_SE_LEN 1 /* Fixed length */ + +BWL_PRE_PACKED_STRUCT struct dot11_addba_req { + uint8 category; /* category of action frame (3) */ + uint8 action; /* action: addba req */ + uint8 token; /* identifier */ + uint16 addba_param_set; /* parameter set */ + uint16 timeout; /* timeout in seconds */ + uint16 start_seqnum; /* starting sequence number */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_addba_req dot11_addba_req_t; +#define DOT11_ADDBA_REQ_LEN 9 /* length of addba req frame */ + +BWL_PRE_PACKED_STRUCT struct dot11_addba_resp { + uint8 category; /* category of action frame (3) */ + uint8 action; /* action: addba resp */ + uint8 token; /* identifier */ + uint16 status; /* status of add request */ + uint16 addba_param_set; /* negotiated parameter set */ + uint16 timeout; /* negotiated timeout in seconds */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_addba_resp dot11_addba_resp_t; +#define DOT11_ADDBA_RESP_LEN 9 /* length of addba resp frame */ + +/* DELBA action parameters */ +#define DOT11_DELBA_PARAM_INIT_MASK 0x0800 /* initiator mask */ +#define DOT11_DELBA_PARAM_INIT_SHIFT 11 /* initiator shift */ +#define DOT11_DELBA_PARAM_TID_MASK 0xf000 /* tid mask */ +#define DOT11_DELBA_PARAM_TID_SHIFT 12 /* tid shift */ + +BWL_PRE_PACKED_STRUCT struct dot11_delba { + uint8 category; /* category of action frame (3) */ + uint8 action; /* action: addba req */ + uint16 delba_param_set; /* paarmeter set */ + uint16 reason; /* reason for dellba */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_delba dot11_delba_t; +#define DOT11_DELBA_LEN 6 /* length of delba frame */ + +/* SA Query action field value */ +#define SA_QUERY_REQUEST 0 +#define SA_QUERY_RESPONSE 1 + +/* ************* 802.11r related definitions. ************* */ + +/** Over-the-DS Fast Transition Request frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_ft_req { + uint8 category; /* category of action frame (6) */ + uint8 action; /* action: ft req */ + uint8 sta_addr[ETHER_ADDR_LEN]; + uint8 tgt_ap_addr[ETHER_ADDR_LEN]; + uint8 data[1]; /* Elements */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ft_req dot11_ft_req_t; +#define DOT11_FT_REQ_FIXED_LEN 14 + +/** Over-the-DS Fast Transition Response frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_ft_res { + uint8 category; /* category of action frame (6) */ + uint8 action; /* action: ft resp */ + uint8 sta_addr[ETHER_ADDR_LEN]; + uint8 tgt_ap_addr[ETHER_ADDR_LEN]; + uint16 status; /* status code */ + uint8 data[1]; /* Elements */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ft_res dot11_ft_res_t; +#define DOT11_FT_RES_FIXED_LEN 16 + +/** RDE RIC Data Element. */ +BWL_PRE_PACKED_STRUCT struct dot11_rde_ie { + uint8 id; /* 11r, DOT11_MNG_RDE_ID */ + uint8 length; + uint8 rde_id; /* RDE identifier. */ + uint8 rd_count; /* Resource Descriptor Count. */ + uint16 status; /* Status Code. */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rde_ie dot11_rde_ie_t; + +/* 11r - Size of the RDE (RIC Data Element) IE, including TLV header. */ +#define DOT11_MNG_RDE_IE_LEN sizeof(dot11_rde_ie_t) + +/* ************* 802.11k related definitions. ************* */ + +/* Radio measurements enabled capability ie */ +#define DOT11_RRM_CAP_LEN 5 /* length of rrm cap bitmap */ +#define RCPI_IE_LEN 1 +#define RSNI_IE_LEN 1 +BWL_PRE_PACKED_STRUCT struct dot11_rrm_cap_ie { + uint8 cap[DOT11_RRM_CAP_LEN]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rrm_cap_ie dot11_rrm_cap_ie_t; + +/* Bitmap definitions for cap ie */ +#define DOT11_RRM_CAP_LINK 0 +#define DOT11_RRM_CAP_NEIGHBOR_REPORT 1 +#define DOT11_RRM_CAP_PARALLEL 2 +#define DOT11_RRM_CAP_REPEATED 3 +#define DOT11_RRM_CAP_BCN_PASSIVE 4 +#define DOT11_RRM_CAP_BCN_ACTIVE 5 +#define DOT11_RRM_CAP_BCN_TABLE 6 +#define DOT11_RRM_CAP_BCN_REP_COND 7 +#define DOT11_RRM_CAP_FM 8 +#define DOT11_RRM_CAP_CLM 9 +#define DOT11_RRM_CAP_NHM 10 +#define DOT11_RRM_CAP_SM 11 +#define DOT11_RRM_CAP_LCIM 12 +#define DOT11_RRM_CAP_LCIA 13 +#define DOT11_RRM_CAP_TSCM 14 +#define DOT11_RRM_CAP_TTSCM 15 +#define DOT11_RRM_CAP_AP_CHANREP 16 +#define DOT11_RRM_CAP_RMMIB 17 +/* bit18-bit23, not used for RRM_IOVAR */ +#define DOT11_RRM_CAP_MPC0 24 +#define DOT11_RRM_CAP_MPC1 25 +#define DOT11_RRM_CAP_MPC2 26 +#define DOT11_RRM_CAP_MPTI 27 +#define DOT11_RRM_CAP_NBRTSFO 28 +#define DOT11_RRM_CAP_RCPI 29 +#define DOT11_RRM_CAP_RSNI 30 +#define DOT11_RRM_CAP_BSSAAD 31 +#define DOT11_RRM_CAP_BSSAAC 32 +#define DOT11_RRM_CAP_AI 33 +#define DOT11_RRM_CAP_FTM_RANGE 34 +#define DOT11_RRM_CAP_CIVIC_LOC 35 +#define DOT11_RRM_CAP_IDENT_LOC 36 +#define DOT11_RRM_CAP_LAST 36 + +#ifdef WL11K_ALL_MEAS +#define DOT11_RRM_CAP_LINK_ENAB (1 << DOT11_RRM_CAP_LINK) +#define DOT11_RRM_CAP_FM_ENAB (1 << (DOT11_RRM_CAP_FM - 8)) +#define DOT11_RRM_CAP_CLM_ENAB (1 << (DOT11_RRM_CAP_CLM - 8)) +#define DOT11_RRM_CAP_NHM_ENAB (1 << (DOT11_RRM_CAP_NHM - 8)) +#define DOT11_RRM_CAP_SM_ENAB (1 << (DOT11_RRM_CAP_SM - 8)) +#define DOT11_RRM_CAP_LCIM_ENAB (1 << (DOT11_RRM_CAP_LCIM - 8)) +#define DOT11_RRM_CAP_TSCM_ENAB (1 << (DOT11_RRM_CAP_TSCM - 8)) +#ifdef WL11K_AP +#define DOT11_RRM_CAP_MPC0_ENAB (1 << (DOT11_RRM_CAP_MPC0 - 24)) +#define DOT11_RRM_CAP_MPC1_ENAB (1 << (DOT11_RRM_CAP_MPC1 - 24)) +#define DOT11_RRM_CAP_MPC2_ENAB (1 << (DOT11_RRM_CAP_MPC2 - 24)) +#define DOT11_RRM_CAP_MPTI_ENAB (1 << (DOT11_RRM_CAP_MPTI - 24)) +#else +#define DOT11_RRM_CAP_MPC0_ENAB 0 +#define DOT11_RRM_CAP_MPC1_ENAB 0 +#define DOT11_RRM_CAP_MPC2_ENAB 0 +#define DOT11_RRM_CAP_MPTI_ENAB 0 +#endif /* WL11K_AP */ +#define DOT11_RRM_CAP_CIVIC_LOC_ENAB (1 << (DOT11_RRM_CAP_CIVIC_LOC - 32)) +#define DOT11_RRM_CAP_IDENT_LOC_ENAB (1 << (DOT11_RRM_CAP_IDENT_LOC - 32)) +#else +#define DOT11_RRM_CAP_LINK_ENAB 0 +#define DOT11_RRM_CAP_FM_ENAB 0 +#define DOT11_RRM_CAP_CLM_ENAB 0 +#define DOT11_RRM_CAP_NHM_ENAB 0 +#define DOT11_RRM_CAP_SM_ENAB 0 +#define DOT11_RRM_CAP_LCIM_ENAB 0 +#define DOT11_RRM_CAP_TSCM_ENAB 0 +#define DOT11_RRM_CAP_MPC0_ENAB 0 +#define DOT11_RRM_CAP_MPC1_ENAB 0 +#define DOT11_RRM_CAP_MPC2_ENAB 0 +#define DOT11_RRM_CAP_MPTI_ENAB 0 +#define DOT11_RRM_CAP_CIVIC_LOC_ENAB 0 +#define DOT11_RRM_CAP_IDENT_LOC_ENAB 0 +#endif /* WL11K_ALL_MEAS */ +#ifdef WL11K_NBR_MEAS +#define DOT11_RRM_CAP_NEIGHBOR_REPORT_ENAB (1 << DOT11_RRM_CAP_NEIGHBOR_REPORT) +#else +#define DOT11_RRM_CAP_NEIGHBOR_REPORT_ENAB 0 +#endif /* WL11K_NBR_MEAS */ +#ifdef WL11K_BCN_MEAS +#define DOT11_RRM_CAP_BCN_PASSIVE_ENAB (1 << DOT11_RRM_CAP_BCN_PASSIVE) +#define DOT11_RRM_CAP_BCN_ACTIVE_ENAB (1 << DOT11_RRM_CAP_BCN_ACTIVE) +#else +#define DOT11_RRM_CAP_BCN_PASSIVE_ENAB 0 +#define DOT11_RRM_CAP_BCN_ACTIVE_ENAB 0 +#endif /* WL11K_BCN_MEAS */ +#define DOT11_RRM_CAP_MPA_MASK 0x7 +/* Operating Class (formerly "Regulatory Class") definitions */ +#define DOT11_OP_CLASS_NONE 255 + +BWL_PRE_PACKED_STRUCT struct do11_ap_chrep { + uint8 id; + uint8 len; + uint8 reg; + uint8 chanlist[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct do11_ap_chrep dot11_ap_chrep_t; + +/* Radio Measurements action ids */ +#define DOT11_RM_ACTION_RM_REQ 0 /* Radio measurement request */ +#define DOT11_RM_ACTION_RM_REP 1 /* Radio measurement report */ +#define DOT11_RM_ACTION_LM_REQ 2 /* Link measurement request */ +#define DOT11_RM_ACTION_LM_REP 3 /* Link measurement report */ +#define DOT11_RM_ACTION_NR_REQ 4 /* Neighbor report request */ +#define DOT11_RM_ACTION_NR_REP 5 /* Neighbor report response */ +#define DOT11_PUB_ACTION_MP 7 /* Measurement Pilot public action id */ + +/** Generic radio measurement action frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_rm_action { + uint8 category; /* category of action frame (5) */ + uint8 action; /* radio measurement action */ + uint8 token; /* dialog token */ + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rm_action dot11_rm_action_t; +#define DOT11_RM_ACTION_LEN 3 + +BWL_PRE_PACKED_STRUCT struct dot11_rmreq { + uint8 category; /* category of action frame (5) */ + uint8 action; /* radio measurement action */ + uint8 token; /* dialog token */ + uint16 reps; /* no. of repetitions */ + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmreq dot11_rmreq_t; +#define DOT11_RMREQ_LEN 5 + +BWL_PRE_PACKED_STRUCT struct dot11_rm_ie { + uint8 id; + uint8 len; + uint8 token; + uint8 mode; + uint8 type; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rm_ie dot11_rm_ie_t; +#define DOT11_RM_IE_LEN 5 + +/* Definitions for "mode" bits in rm req */ +#define DOT11_RMREQ_MODE_PARALLEL 1 +#define DOT11_RMREQ_MODE_ENABLE 2 +#define DOT11_RMREQ_MODE_REQUEST 4 +#define DOT11_RMREQ_MODE_REPORT 8 +#define DOT11_RMREQ_MODE_DURMAND 0x10 /* Duration Mandatory */ + +/* Definitions for "mode" bits in rm rep */ +#define DOT11_RMREP_MODE_LATE 1 +#define DOT11_RMREP_MODE_INCAPABLE 2 +#define DOT11_RMREP_MODE_REFUSED 4 + +BWL_PRE_PACKED_STRUCT struct dot11_rmreq_bcn { + uint8 id; + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + uint8 reg; + uint8 channel; + uint16 interval; + uint16 duration; + uint8 bcn_mode; + struct ether_addr bssid; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmreq_bcn dot11_rmreq_bcn_t; +#define DOT11_RMREQ_BCN_LEN 18 + +BWL_PRE_PACKED_STRUCT struct dot11_rmrep_bcn { + uint8 reg; + uint8 channel; + uint32 starttime[2]; + uint16 duration; + uint8 frame_info; + uint8 rcpi; + uint8 rsni; + struct ether_addr bssid; + uint8 antenna_id; + uint32 parent_tsf; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmrep_bcn dot11_rmrep_bcn_t; +#define DOT11_RMREP_BCN_LEN 26 + +/* Beacon request measurement mode */ +#define DOT11_RMREQ_BCN_PASSIVE 0 +#define DOT11_RMREQ_BCN_ACTIVE 1 +#define DOT11_RMREQ_BCN_TABLE 2 + +/* Sub-element IDs for Beacon Request */ +#define DOT11_RMREQ_BCN_SSID_ID 0 +#define DOT11_RMREQ_BCN_REPINFO_ID 1 +#define DOT11_RMREQ_BCN_REPDET_ID 2 +#define DOT11_RMREQ_BCN_REQUEST_ID 10 +#define DOT11_RMREQ_BCN_APCHREP_ID DOT11_MNG_AP_CHREP_ID +#define DOT11_RMREQ_BCN_LAST_RPT_IND_REQ_ID 164 + +/* Reporting Detail element definition */ +#define DOT11_RMREQ_BCN_REPDET_FIXED 0 /* Fixed length fields only */ +#define DOT11_RMREQ_BCN_REPDET_REQUEST 1 /* + requested information elems */ +#define DOT11_RMREQ_BCN_REPDET_ALL 2 /* All fields */ + +/* Reporting Information (reporting condition) element definition */ +#define DOT11_RMREQ_BCN_REPINFO_LEN 2 /* Beacon Reporting Information length */ +#define DOT11_RMREQ_BCN_REPCOND_DEFAULT 0 /* Report to be issued after each measurement */ + +/* Last Beacon Report Indication Request definition */ +#define DOT11_RMREQ_BCN_LAST_RPT_IND_REQ_ENAB 1 + +BWL_PRE_PACKED_STRUCT struct dot11_rmrep_last_bcn_rpt_ind_req { + uint8 id; /* DOT11_RMREQ_BCN_LAST_RPT_IND_REQ_ID */ + uint8 len; /* length of remaining fields */ + uint8 data; /* data = 1 means last bcn rpt ind requested */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmrep_last_bcn_rpt_ind_req dot11_rmrep_last_bcn_rpt_ind_req_t; + +/* Sub-element IDs for Beacon Report */ +#define DOT11_RMREP_BCN_FRM_BODY 1 +#define DOT11_RMREP_BCN_FRM_BODY_FRAG_ID 2 +#define DOT11_RMREP_BCN_LAST_RPT_IND 164 +#define DOT11_RMREP_BCN_FRM_BODY_LEN_MAX 224 /* 802.11k-2008 7.3.2.22.6 */ + +BWL_PRE_PACKED_STRUCT struct dot11_rmrep_bcn_frm_body_frag_id { + uint8 id; /* DOT11_RMREP_BCN_FRM_BODY_FRAG_ID */ + uint8 len; /* length of remaining fields */ + uint8 bcn_rpt_id; /* Bcn rpt instance ID */ + uint8 frag_info; /* fragment Id(7 bits) | More fragments(1 bit) */ +} BWL_POST_PACKED_STRUCT; + +typedef struct dot11_rmrep_bcn_frm_body_frag_id dot11_rmrep_bcn_frm_body_frag_id_t; +#define DOT11_RMREP_BCNRPT_FRAG_ID_DATA_LEN 2 +#define DOT11_RMREP_BCNRPT_FRAG_ID_SE_LEN sizeof(dot11_rmrep_bcn_frm_body_frag_id_t) +#define DOT11_RMREP_BCNRPT_FRAG_ID_NUM_SHIFT 1 + +BWL_PRE_PACKED_STRUCT struct dot11_rmrep_last_bcn_rpt_ind { + uint8 id; /* DOT11_RMREP_BCN_LAST_RPT_IND */ + uint8 len; /* length of remaining fields */ + uint8 data; /* data = 1 is last bcn rpt */ +} BWL_POST_PACKED_STRUCT; + +typedef struct dot11_rmrep_last_bcn_rpt_ind dot11_rmrep_last_bcn_rpt_ind_t; +#define DOT11_RMREP_LAST_BCN_RPT_IND_DATA_LEN 1 +#define DOT11_RMREP_LAST_BCN_RPT_IND_SE_LEN sizeof(dot11_rmrep_last_bcn_rpt_ind_t) + +/* Sub-element IDs for Frame Report */ +#define DOT11_RMREP_FRAME_COUNT_REPORT 1 + +/* Channel load request */ +BWL_PRE_PACKED_STRUCT struct dot11_rmreq_chanload { + uint8 id; + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + uint8 reg; + uint8 channel; + uint16 interval; + uint16 duration; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmreq_chanload dot11_rmreq_chanload_t; +#define DOT11_RMREQ_CHANLOAD_LEN 11 + +/** Channel load report */ +BWL_PRE_PACKED_STRUCT struct dot11_rmrep_chanload { + uint8 reg; + uint8 channel; + uint32 starttime[2]; + uint16 duration; + uint8 channel_load; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmrep_chanload dot11_rmrep_chanload_t; +#define DOT11_RMREP_CHANLOAD_LEN 13 + +/** Noise histogram request */ +BWL_PRE_PACKED_STRUCT struct dot11_rmreq_noise { + uint8 id; + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + uint8 reg; + uint8 channel; + uint16 interval; + uint16 duration; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmreq_noise dot11_rmreq_noise_t; +#define DOT11_RMREQ_NOISE_LEN 11 + +/** Noise histogram report */ +BWL_PRE_PACKED_STRUCT struct dot11_rmrep_noise { + uint8 reg; + uint8 channel; + uint32 starttime[2]; + uint16 duration; + uint8 antid; + uint8 anpi; + uint8 ipi0_dens; + uint8 ipi1_dens; + uint8 ipi2_dens; + uint8 ipi3_dens; + uint8 ipi4_dens; + uint8 ipi5_dens; + uint8 ipi6_dens; + uint8 ipi7_dens; + uint8 ipi8_dens; + uint8 ipi9_dens; + uint8 ipi10_dens; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmrep_noise dot11_rmrep_noise_t; +#define DOT11_RMREP_NOISE_LEN 25 + +/** Frame request */ +BWL_PRE_PACKED_STRUCT struct dot11_rmreq_frame { + uint8 id; + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + uint8 reg; + uint8 channel; + uint16 interval; + uint16 duration; + uint8 req_type; + struct ether_addr ta; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmreq_frame dot11_rmreq_frame_t; +#define DOT11_RMREQ_FRAME_LEN 18 + +/** Frame report */ +BWL_PRE_PACKED_STRUCT struct dot11_rmrep_frame { + uint8 reg; + uint8 channel; + uint32 starttime[2]; + uint16 duration; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmrep_frame dot11_rmrep_frame_t; +#define DOT11_RMREP_FRAME_LEN 12 + +/** Frame report entry */ +BWL_PRE_PACKED_STRUCT struct dot11_rmrep_frmentry { + struct ether_addr ta; + struct ether_addr bssid; + uint8 phy_type; + uint8 avg_rcpi; + uint8 last_rsni; + uint8 last_rcpi; + uint8 ant_id; + uint16 frame_cnt; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmrep_frmentry dot11_rmrep_frmentry_t; +#define DOT11_RMREP_FRMENTRY_LEN 19 + +/** STA statistics request */ +BWL_PRE_PACKED_STRUCT struct dot11_rmreq_stat { + uint8 id; + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + struct ether_addr peer; + uint16 interval; + uint16 duration; + uint8 group_id; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmreq_stat dot11_rmreq_stat_t; +#define DOT11_RMREQ_STAT_LEN 16 + +/** STA statistics report */ +BWL_PRE_PACKED_STRUCT struct dot11_rmrep_stat { + uint16 duration; + uint8 group_id; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmrep_stat dot11_rmrep_stat_t; + +/* Statistics Group Report: Group IDs */ +enum { + DOT11_RRM_STATS_GRP_ID_0 = 0, + DOT11_RRM_STATS_GRP_ID_1, + DOT11_RRM_STATS_GRP_ID_2, + DOT11_RRM_STATS_GRP_ID_3, + DOT11_RRM_STATS_GRP_ID_4, + DOT11_RRM_STATS_GRP_ID_5, + DOT11_RRM_STATS_GRP_ID_6, + DOT11_RRM_STATS_GRP_ID_7, + DOT11_RRM_STATS_GRP_ID_8, + DOT11_RRM_STATS_GRP_ID_9, + DOT11_RRM_STATS_GRP_ID_10, + DOT11_RRM_STATS_GRP_ID_11, + DOT11_RRM_STATS_GRP_ID_12, + DOT11_RRM_STATS_GRP_ID_13, + DOT11_RRM_STATS_GRP_ID_14, + DOT11_RRM_STATS_GRP_ID_15, + DOT11_RRM_STATS_GRP_ID_16 +}; + +/* Statistics Group Report: Group Data length */ +#define DOT11_RRM_STATS_RPT_LEN_GRP_ID_0 28 +typedef struct rrm_stat_group_0 { + uint32 txfrag; + uint32 txmulti; + uint32 txfail; + uint32 rxframe; + uint32 rxmulti; + uint32 rxbadfcs; + uint32 txframe; +} rrm_stat_group_0_t; + +#define DOT11_RRM_STATS_RPT_LEN_GRP_ID_1 24 +typedef struct rrm_stat_group_1 { + uint32 txretry; + uint32 txretries; + uint32 rxdup; + uint32 txrts; + uint32 rtsfail; + uint32 ackfail; +} rrm_stat_group_1_t; + +/* group 2-9 use same qos data structure (tid 0-7), total 52 bytes */ +#define DOT11_RRM_STATS_RPT_LEN_GRP_ID_2_9 52 +typedef struct rrm_stat_group_qos { + uint32 txfrag; + uint32 txfail; + uint32 txretry; + uint32 txretries; + uint32 rxdup; + uint32 txrts; + uint32 rtsfail; + uint32 ackfail; + uint32 rxfrag; + uint32 txframe; + uint32 txdrop; + uint32 rxmpdu; + uint32 rxretries; +} rrm_stat_group_qos_t; + +/* dot11BSSAverageAccessDelay Group (only available at an AP): 8 byte */ +#define DOT11_RRM_STATS_RPT_LEN_GRP_ID_10 8 +typedef BWL_PRE_PACKED_STRUCT struct rrm_stat_group_10 { + uint8 apavgdelay; + uint8 avgdelaybe; + uint8 avgdelaybg; + uint8 avgdelayvi; + uint8 avgdelayvo; + uint16 stacount; + uint8 chanutil; +} BWL_POST_PACKED_STRUCT rrm_stat_group_10_t; + +/* AMSDU, 40 bytes */ +#define DOT11_RRM_STATS_RPT_LEN_GRP_ID_11 40 +typedef struct rrm_stat_group_11 { + uint32 txamsdu; + uint32 amsdufail; + uint32 amsduretry; + uint32 amsduretries; + uint32 txamsdubyte_h; + uint32 txamsdubyte_l; + uint32 amsduackfail; + uint32 rxamsdu; + uint32 rxamsdubyte_h; + uint32 rxamsdubyte_l; +} rrm_stat_group_11_t; + +/* AMPDU, 36 bytes */ +#define DOT11_RRM_STATS_RPT_LEN_GRP_ID_12 36 +typedef struct rrm_stat_group_12 { + uint32 txampdu; + uint32 txmpdu; + uint32 txampdubyte_h; + uint32 txampdubyte_l; + uint32 rxampdu; + uint32 rxmpdu; + uint32 rxampdubyte_h; + uint32 rxampdubyte_l; + uint32 ampducrcfail; +} rrm_stat_group_12_t; + +/* BACK etc, 36 bytes */ +#define DOT11_RRM_STATS_RPT_LEN_GRP_ID_13 36 +typedef struct rrm_stat_group_13 { + uint32 rximpbarfail; + uint32 rxexpbarfail; + uint32 chanwidthsw; + uint32 txframe20mhz; + uint32 txframe40mhz; + uint32 rxframe20mhz; + uint32 rxframe40mhz; + uint32 psmpgrantdur; + uint32 psmpuseddur; +} rrm_stat_group_13_t; + +/* RD Dual CTS etc, 36 bytes */ +#define DOT11_RRM_STATS_RPT_LEN_GRP_ID_14 36 +typedef struct rrm_stat_group_14 { + uint32 grantrdgused; + uint32 grantrdgunused; + uint32 txframeingrantrdg; + uint32 txbyteingrantrdg_h; + uint32 txbyteingrantrdg_l; + uint32 dualcts; + uint32 dualctsfail; + uint32 rtslsi; + uint32 rtslsifail; +} rrm_stat_group_14_t; + +/* bf and STBC etc, 20 bytes */ +#define DOT11_RRM_STATS_RPT_LEN_GRP_ID_15 20 +typedef struct rrm_stat_group_15 { + uint32 bfframe; + uint32 stbccts; + uint32 stbcctsfail; + uint32 nonstbccts; + uint32 nonstbcctsfail; +} rrm_stat_group_15_t; + +/* RSNA, 28 bytes */ +#define DOT11_RRM_STATS_RPT_LEN_GRP_ID_16 28 +typedef struct rrm_stat_group_16 { + uint32 rsnacmacicverr; + uint32 rsnacmacreplay; + uint32 rsnarobustmgmtccmpreplay; + uint32 rsnatkipicverr; + uint32 rsnatkipicvreplay; + uint32 rsnaccmpdecrypterr; + uint32 rsnaccmpreplay; +} rrm_stat_group_16_t; + +/* Transmit stream/category measurement request */ +BWL_PRE_PACKED_STRUCT struct dot11_rmreq_tx_stream { + uint8 id; + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + uint16 interval; + uint16 duration; + struct ether_addr peer; + uint8 traffic_id; + uint8 bin0_range; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmreq_tx_stream dot11_rmreq_tx_stream_t; +#define DOT11_RMREQ_TXSTREAM_LEN 17 + +/** Transmit stream/category measurement report */ +BWL_PRE_PACKED_STRUCT struct dot11_rmrep_tx_stream { + uint32 starttime[2]; + uint16 duration; + struct ether_addr peer; + uint8 traffic_id; + uint8 reason; + uint32 txmsdu_cnt; + uint32 msdu_discarded_cnt; + uint32 msdufailed_cnt; + uint32 msduretry_cnt; + uint32 cfpolls_lost_cnt; + uint32 avrqueue_delay; + uint32 avrtx_delay; + uint8 bin0_range; + uint32 bin0; + uint32 bin1; + uint32 bin2; + uint32 bin3; + uint32 bin4; + uint32 bin5; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmrep_tx_stream dot11_rmrep_tx_stream_t; +#define DOT11_RMREP_TXSTREAM_LEN 71 + +typedef struct rrm_tscm { + uint32 msdu_tx; + uint32 msdu_exp; + uint32 msdu_fail; + uint32 msdu_retries; + uint32 cfpolls_lost; + uint32 queue_delay; + uint32 tx_delay_sum; + uint32 tx_delay_cnt; + uint32 bin0_range_us; + uint32 bin0; + uint32 bin1; + uint32 bin2; + uint32 bin3; + uint32 bin4; + uint32 bin5; +} rrm_tscm_t; +enum { + DOT11_FTM_LOCATION_SUBJ_LOCAL = 0, /* Where am I? */ + DOT11_FTM_LOCATION_SUBJ_REMOTE = 1, /* Where are you? */ + DOT11_FTM_LOCATION_SUBJ_THIRDPARTY = 2 /* Where is he/she? */ +}; + +BWL_PRE_PACKED_STRUCT struct dot11_rmreq_ftm_lci { + uint8 id; + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + uint8 subj; + + /* Following 3 fields are unused. Keep for ROM compatibility. */ + uint8 lat_res; + uint8 lon_res; + uint8 alt_res; + + /* optional sub-elements */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmreq_ftm_lci dot11_rmreq_ftm_lci_t; +#define DOT11_RMREQ_LCI_LEN 9 + +BWL_PRE_PACKED_STRUCT struct dot11_rmrep_ftm_lci { + uint8 id; + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + uint8 lci_sub_id; + uint8 lci_sub_len; + /* optional LCI field */ + /* optional sub-elements */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmrep_ftm_lci dot11_rmrep_ftm_lci_t; + +#define DOT11_FTM_LCI_SUBELEM_ID 0 +#define DOT11_FTM_LCI_SUBELEM_LEN 2 +#define DOT11_FTM_LCI_FIELD_LEN 16 +#define DOT11_FTM_LCI_UNKNOWN_LEN 2 + +BWL_PRE_PACKED_STRUCT struct dot11_rmreq_ftm_civic { + uint8 id; + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + uint8 subj; + uint8 civloc_type; + uint8 siu; /* service interval units */ + uint16 si; /* service interval */ + /* optional sub-elements */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmreq_ftm_civic dot11_rmreq_ftm_civic_t; +#define DOT11_RMREQ_CIVIC_LEN 10 + +BWL_PRE_PACKED_STRUCT struct dot11_rmrep_ftm_civic { + uint8 id; + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + uint8 civloc_type; + uint8 civloc_sub_id; + uint8 civloc_sub_len; + /* optional location civic field */ + /* optional sub-elements */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmrep_ftm_civic dot11_rmrep_ftm_civic_t; + +#define DOT11_FTM_CIVIC_LOC_TYPE_RFC4776 0 +#define DOT11_FTM_CIVIC_SUBELEM_ID 0 +#define DOT11_FTM_CIVIC_SUBELEM_LEN 2 +#define DOT11_FTM_CIVIC_LOC_SI_NONE 0 +#define DOT11_FTM_CIVIC_TYPE_LEN 1 +#define DOT11_FTM_CIVIC_UNKNOWN_LEN 3 + +/* Location Identifier measurement request */ +BWL_PRE_PACKED_STRUCT struct dot11_rmreq_locid { + uint8 id; + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + uint8 subj; + uint8 siu; + uint16 si; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmreq_locid dot11_rmreq_locid_t; +#define DOT11_RMREQ_LOCID_LEN 9 + +/* Location Identifier measurement report */ +BWL_PRE_PACKED_STRUCT struct dot11_rmrep_locid { + uint8 id; + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + uint8 exp_tsf[8]; + uint8 locid_sub_id; + uint8 locid_sub_len; + /* optional location identifier field */ + /* optional sub-elements */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmrep_locid dot11_rmrep_locid_t; +#define DOT11_LOCID_UNKNOWN_LEN 10 +#define DOT11_LOCID_SUBELEM_ID 0 + +BWL_PRE_PACKED_STRUCT struct dot11_ftm_range_subel { + uint8 id; + uint8 len; + uint16 max_age; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ftm_range_subel dot11_ftm_range_subel_t; +#define DOT11_FTM_RANGE_SUBELEM_ID 4 +#define DOT11_FTM_RANGE_SUBELEM_LEN 2 + +BWL_PRE_PACKED_STRUCT struct dot11_rmreq_ftm_range { + uint8 id; + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + uint16 max_init_delay; /* maximum random initial delay */ + uint8 min_ap_count; + uint8 data[1]; + /* neighbor report sub-elements */ + /* optional sub-elements */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmreq_ftm_range dot11_rmreq_ftm_range_t; +#define DOT11_RMREQ_FTM_RANGE_LEN 8 + +#define DOT11_FTM_RANGE_LEN 3 +BWL_PRE_PACKED_STRUCT struct dot11_ftm_range_entry { + uint32 start_tsf; /* 4 lsb of tsf */ + struct ether_addr bssid; + uint8 range[DOT11_FTM_RANGE_LEN]; + uint8 max_err[DOT11_FTM_RANGE_LEN]; + uint8 rsvd; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ftm_range_entry dot11_ftm_range_entry_t; +#define DOT11_FTM_RANGE_ENTRY_MAX_COUNT 15 + +enum { + DOT11_FTM_RANGE_ERROR_AP_INCAPABLE = 2, + DOT11_FTM_RANGE_ERROR_AP_FAILED = 3, + DOT11_FTM_RANGE_ERROR_TX_FAILED = 8, + DOT11_FTM_RANGE_ERROR_MAX +}; + +BWL_PRE_PACKED_STRUCT struct dot11_ftm_range_error_entry { + uint32 start_tsf; /* 4 lsb of tsf */ + struct ether_addr bssid; + uint8 code; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ftm_range_error_entry dot11_ftm_range_error_entry_t; +#define DOT11_FTM_RANGE_ERROR_ENTRY_MAX_COUNT 11 + +BWL_PRE_PACKED_STRUCT struct dot11_rmrep_ftm_range { + uint8 id; + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + uint8 entry_count; + uint8 data[2]; /* includes pad */ + /* + dot11_ftm_range_entry_t entries[entry_count]; + uint8 error_count; + dot11_ftm_error_entry_t errors[error_count]; + */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmrep_ftm_range dot11_rmrep_ftm_range_t; + +#define DOT11_FTM_RANGE_REP_MIN_LEN 6 /* No extra byte for error_count */ +#define DOT11_FTM_RANGE_ENTRY_CNT_MAX 15 +#define DOT11_FTM_RANGE_ERROR_CNT_MAX 11 +#define DOT11_FTM_RANGE_REP_FIXED_LEN 1 /* No extra byte for error_count */ +/** Measurement pause request */ +BWL_PRE_PACKED_STRUCT struct dot11_rmreq_pause_time { + uint8 id; + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + uint16 pause_time; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmreq_pause_time dot11_rmreq_pause_time_t; +#define DOT11_RMREQ_PAUSE_LEN 7 + +/* Neighbor Report subelements ID (11k & 11v) */ +#define DOT11_NGBR_TSF_INFO_SE_ID 1 +#define DOT11_NGBR_CCS_SE_ID 2 +#define DOT11_NGBR_BSSTRANS_PREF_SE_ID 3 +#define DOT11_NGBR_BSS_TERM_DUR_SE_ID 4 +#define DOT11_NGBR_BEARING_SE_ID 5 +#define DOT11_NGBR_WIDE_BW_CHAN_SE_ID 6 + +/** Neighbor Report, BSS Transition Candidate Preference subelement */ +BWL_PRE_PACKED_STRUCT struct dot11_ngbr_bsstrans_pref_se { + uint8 sub_id; + uint8 len; + uint8 preference; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ngbr_bsstrans_pref_se dot11_ngbr_bsstrans_pref_se_t; +#define DOT11_NGBR_BSSTRANS_PREF_SE_LEN 1 +#define DOT11_NGBR_BSSTRANS_PREF_SE_IE_LEN 3 +#define DOT11_NGBR_BSSTRANS_PREF_SE_HIGHEST 0xff + +/** Neighbor Report, BSS Termination Duration subelement */ +BWL_PRE_PACKED_STRUCT struct dot11_ngbr_bss_term_dur_se { + uint8 sub_id; + uint8 len; + uint8 tsf[8]; + uint16 duration; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ngbr_bss_term_dur_se dot11_ngbr_bss_term_dur_se_t; +#define DOT11_NGBR_BSS_TERM_DUR_SE_LEN 10 + +/* Neighbor Report BSSID Information Field */ +#define DOT11_NGBR_BI_REACHABILTY_UNKN 0x0002 +#define DOT11_NGBR_BI_REACHABILTY 0x0003 +#define DOT11_NGBR_BI_SEC 0x0004 +#define DOT11_NGBR_BI_KEY_SCOPE 0x0008 +#define DOT11_NGBR_BI_CAP 0x03f0 +#define DOT11_NGBR_BI_CAP_SPEC_MGMT 0x0010 +#define DOT11_NGBR_BI_CAP_QOS 0x0020 +#define DOT11_NGBR_BI_CAP_APSD 0x0040 +#define DOT11_NGBR_BI_CAP_RDIO_MSMT 0x0080 +#define DOT11_NGBR_BI_CAP_DEL_BA 0x0100 +#define DOT11_NGBR_BI_CAP_IMM_BA 0x0200 +#define DOT11_NGBR_BI_MOBILITY 0x0400 +#define DOT11_NGBR_BI_HT 0x0800 +#define DOT11_NGBR_BI_VHT 0x1000 +#define DOT11_NGBR_BI_FTM 0x2000 + +/** Neighbor Report element (11k & 11v) */ +BWL_PRE_PACKED_STRUCT struct dot11_neighbor_rep_ie { + uint8 id; + uint8 len; + struct ether_addr bssid; + uint32 bssid_info; + uint8 reg; /* Operating class */ + uint8 channel; + uint8 phytype; + uint8 data[1]; /* Variable size subelements */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_neighbor_rep_ie dot11_neighbor_rep_ie_t; +#define DOT11_NEIGHBOR_REP_IE_FIXED_LEN 13u + +/* MLME Enumerations */ +#define DOT11_BSSTYPE_INFRASTRUCTURE 0 /* d11 infrastructure */ +#define DOT11_BSSTYPE_INDEPENDENT 1 /* d11 independent */ +#define DOT11_BSSTYPE_ANY 2 /* d11 any BSS type */ +#define DOT11_BSSTYPE_MESH 3 /* d11 Mesh */ +#define DOT11_SCANTYPE_ACTIVE 0 /* d11 scan active */ +#define DOT11_SCANTYPE_PASSIVE 1 /* d11 scan passive */ + +/** Link Measurement */ +BWL_PRE_PACKED_STRUCT struct dot11_lmreq { + uint8 category; /* category of action frame (5) */ + uint8 action; /* radio measurement action */ + uint8 token; /* dialog token */ + uint8 txpwr; /* Transmit Power Used */ + uint8 maxtxpwr; /* Max Transmit Power */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_lmreq dot11_lmreq_t; +#define DOT11_LMREQ_LEN 5 + +BWL_PRE_PACKED_STRUCT struct dot11_lmrep { + uint8 category; /* category of action frame (5) */ + uint8 action; /* radio measurement action */ + uint8 token; /* dialog token */ + dot11_tpc_rep_t tpc; /* TPC element */ + uint8 rxant; /* Receive Antenna ID */ + uint8 txant; /* Transmit Antenna ID */ + uint8 rcpi; /* RCPI */ + uint8 rsni; /* RSNI */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_lmrep dot11_lmrep_t; +#define DOT11_LMREP_LEN 11 + +#define DOT11_MP_CAP_SPECTRUM 0x01 /* d11 cap. spectrum */ +#define DOT11_MP_CAP_SHORTSLOT 0x02 /* d11 cap. shortslot */ +/* Measurement Pilot */ +BWL_PRE_PACKED_STRUCT struct dot11_mprep { + uint8 cap_info; /* Condensed capability Info. */ + uint8 country[2]; /* Condensed country string */ + uint8 opclass; /* Op. Class */ + uint8 channel; /* Channel */ + uint8 mp_interval; /* Measurement Pilot Interval */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_mprep dot11_mprep_t; +#define DOT11_MPREP_LEN 6 + +/* 802.11 BRCM "Compromise" Pre N constants */ +#define PREN_PREAMBLE 24 /* green field preamble time */ +#define PREN_MM_EXT 12 /* extra mixed mode preamble time */ +#define PREN_PREAMBLE_EXT 4 /* extra preamble (multiply by unique_streams-1) */ + +/* 802.11N PHY constants */ +#define RIFS_11N_TIME 2 /* NPHY RIFS time */ + +/* 802.11 HT PLCP format 802.11n-2009, sec 20.3.9.4.3 + * HT-SIG is composed of two 24 bit parts, HT-SIG1 and HT-SIG2 + */ +/* HT-SIG1 */ +#define HT_SIG1_MCS_MASK 0x00007F +#define HT_SIG1_CBW 0x000080 +#define HT_SIG1_HT_LENGTH 0xFFFF00 + +/* HT-SIG2 */ +#define HT_SIG2_SMOOTHING 0x000001 +#define HT_SIG2_NOT_SOUNDING 0x000002 +#define HT_SIG2_RESERVED 0x000004 +#define HT_SIG2_AGGREGATION 0x000008 +#define HT_SIG2_STBC_MASK 0x000030 +#define HT_SIG2_STBC_SHIFT 4 +#define HT_SIG2_FEC_CODING 0x000040 +#define HT_SIG2_SHORT_GI 0x000080 +#define HT_SIG2_ESS_MASK 0x000300 +#define HT_SIG2_ESS_SHIFT 8 +#define HT_SIG2_CRC 0x03FC00 +#define HT_SIG2_TAIL 0x1C0000 + +/* HT Timing-related parameters (802.11-2012, sec 20.3.6) */ +#define HT_T_LEG_PREAMBLE 16 +#define HT_T_L_SIG 4 +#define HT_T_SIG 8 +#define HT_T_LTF1 4 +#define HT_T_GF_LTF1 8 +#define HT_T_LTFs 4 +#define HT_T_STF 4 +#define HT_T_GF_STF 8 +#define HT_T_SYML 4 + +#define HT_N_SERVICE 16 /* bits in SERVICE field */ +#define HT_N_TAIL 6 /* tail bits per BCC encoder */ + +/* 802.11 A PHY constants */ +#define APHY_SLOT_TIME 9 /* APHY slot time */ +#define APHY_SIFS_TIME 16 /* APHY SIFS time */ +#define APHY_DIFS_TIME (APHY_SIFS_TIME + (2 * APHY_SLOT_TIME)) /* APHY DIFS time */ +#define APHY_PREAMBLE_TIME 16 /* APHY preamble time */ +#define APHY_SIGNAL_TIME 4 /* APHY signal time */ +#define APHY_SYMBOL_TIME 4 /* APHY symbol time */ +#define APHY_SERVICE_NBITS 16 /* APHY service nbits */ +#define APHY_TAIL_NBITS 6 /* APHY tail nbits */ +#define APHY_CWMIN 15 /* APHY cwmin */ +#define APHY_PHYHDR_DUR 20 /* APHY PHY Header Duration */ + +/* 802.11 B PHY constants */ +#define BPHY_SLOT_TIME 20 /* BPHY slot time */ +#define BPHY_SIFS_TIME 10 /* BPHY SIFS time */ +#define BPHY_DIFS_TIME 50 /* BPHY DIFS time */ +#define BPHY_PLCP_TIME 192 /* BPHY PLCP time */ +#define BPHY_PLCP_SHORT_TIME 96 /* BPHY PLCP short time */ +#define BPHY_CWMIN 31 /* BPHY cwmin */ +#define BPHY_SHORT_PHYHDR_DUR 96 /* BPHY Short PHY Header Duration */ +#define BPHY_LONG_PHYHDR_DUR 192 /* BPHY Long PHY Header Duration */ + +/* 802.11 G constants */ +#define DOT11_OFDM_SIGNAL_EXTENSION 6 /* d11 OFDM signal extension */ + +#define PHY_CWMAX 1023 /* PHY cwmax */ + +#define DOT11_MAXNUMFRAGS 16 /* max # fragments per MSDU */ + +/* 802.11 VHT constants */ + +typedef int vht_group_id_t; + +/* for VHT-A1 */ +/* SIG-A1 reserved bits */ +#define VHT_SIGA1_CONST_MASK 0x800004 + +#define VHT_SIGA1_BW_MASK 0x000003 +#define VHT_SIGA1_20MHZ_VAL 0x000000 +#define VHT_SIGA1_40MHZ_VAL 0x000001 +#define VHT_SIGA1_80MHZ_VAL 0x000002 +#define VHT_SIGA1_160MHZ_VAL 0x000003 + +#define VHT_SIGA1_STBC 0x000008 + +#define VHT_SIGA1_GID_MASK 0x0003f0 +#define VHT_SIGA1_GID_SHIFT 4 +#define VHT_SIGA1_GID_TO_AP 0x00 +#define VHT_SIGA1_GID_NOT_TO_AP 0x3f +#define VHT_SIGA1_GID_MAX_GID 0x3f + +#define VHT_SIGA1_NSTS_SHIFT_MASK_USER0 0x001C00 +#define VHT_SIGA1_NSTS_SHIFT 10 +#define VHT_SIGA1_MAX_USERPOS 3 + +#define VHT_SIGA1_PARTIAL_AID_MASK 0x3fe000 +#define VHT_SIGA1_PARTIAL_AID_SHIFT 13 + +#define VHT_SIGA1_TXOP_PS_NOT_ALLOWED 0x400000 + +/* for VHT-A2 */ +#define VHT_SIGA2_GI_NONE 0x000000 +#define VHT_SIGA2_GI_SHORT 0x000001 +#define VHT_SIGA2_GI_W_MOD10 0x000002 +#define VHT_SIGA2_CODING_LDPC 0x000004 +#define VHT_SIGA2_LDPC_EXTRA_OFDM_SYM 0x000008 +#define VHT_SIGA2_BEAMFORM_ENABLE 0x000100 +#define VHT_SIGA2_MCS_SHIFT 4 + +#define VHT_SIGA2_B9_RESERVED 0x000200 +#define VHT_SIGA2_TAIL_MASK 0xfc0000 +#define VHT_SIGA2_TAIL_VALUE 0x000000 + +/* VHT Timing-related parameters (802.11ac D4.0, sec 22.3.6) */ +#define VHT_T_LEG_PREAMBLE 16 +#define VHT_T_L_SIG 4 +#define VHT_T_SIG_A 8 +#define VHT_T_LTF 4 +#define VHT_T_STF 4 +#define VHT_T_SIG_B 4 +#define VHT_T_SYML 4 + +#define VHT_N_SERVICE 16 /* bits in SERVICE field */ +#define VHT_N_TAIL 6 /* tail bits per BCC encoder */ + +/** dot11Counters Table - 802.11 spec., Annex D */ +typedef struct d11cnt { + uint32 txfrag; /* dot11TransmittedFragmentCount */ + uint32 txmulti; /* dot11MulticastTransmittedFrameCount */ + uint32 txfail; /* dot11FailedCount */ + uint32 txretry; /* dot11RetryCount */ + uint32 txretrie; /* dot11MultipleRetryCount */ + uint32 rxdup; /* dot11FrameduplicateCount */ + uint32 txrts; /* dot11RTSSuccessCount */ + uint32 txnocts; /* dot11RTSFailureCount */ + uint32 txnoack; /* dot11ACKFailureCount */ + uint32 rxfrag; /* dot11ReceivedFragmentCount */ + uint32 rxmulti; /* dot11MulticastReceivedFrameCount */ + uint32 rxcrc; /* dot11FCSErrorCount */ + uint32 txfrmsnt; /* dot11TransmittedFrameCount */ + uint32 rxundec; /* dot11WEPUndecryptableCount */ +} d11cnt_t; + +#define BRCM_PROP_OUI "\x00\x90\x4C" + +#define BRCM_FTM_IE_TYPE 14 + +/* #define HT_CAP_IE_TYPE 51 + * #define HT_ADD_IE_TYPE 52 + * #define BRCM_EXTCH_IE_TYPE 53 + * #define MEMBER_OF_BRCM_PROP_IE_TYPE 54 + * #define BRCM_RELMACST_IE_TYPE 55 + * #define BRCM_EVT_WL_BSS_INFO 64 + * #define RWL_ACTION_WIFI_FRAG_TYPE 85 + * #define BTC_INFO_BRCM_PROP_IE_TYPE 90 + * #define ULB_BRCM_PROP_IE_TYPE 91 + * #define SDB_BRCM_PROP_IE_TYPE 92 + */ + +/* Action frame type for RWL */ +#define RWL_WIFI_DEFAULT 0 +#define RWL_WIFI_FIND_MY_PEER 9 /* Used while finding server */ +#define RWL_WIFI_FOUND_PEER 10 /* Server response to the client */ +#define RWL_ACTION_WIFI_FRAG_TYPE 85 /* Fragment indicator for receiver */ + +#define PROXD_AF_TYPE 11 /* Wifi proximity action frame type */ +#define BRCM_RELMACST_AF_TYPE 12 /* RMC action frame type */ + +/* Action frame type for FTM Initiator Report */ +#define BRCM_FTM_VS_AF_TYPE 14 +enum { + BRCM_FTM_VS_INITIATOR_RPT_SUBTYPE = 1, /* FTM Initiator Report */ + BRCM_FTM_VS_COLLECT_SUBTYPE = 2, /* FTM Collect debug protocol */ +}; + +/* brcm syscap_ie cap */ +#define BRCM_SYSCAP_WET_TUNNEL 0x0100 /* Device with WET_TUNNEL support */ + +#define BRCM_OUI "\x00\x10\x18" /* Broadcom OUI */ + +/** BRCM info element */ +BWL_PRE_PACKED_STRUCT struct brcm_ie { + uint8 id; /* IE ID, 221, DOT11_MNG_PROPR_ID */ + uint8 len; /* IE length */ + uint8 oui[3]; + uint8 ver; /* type/ver of this IE */ + uint8 assoc; /* # of assoc STAs */ + uint8 flags; /* misc flags */ + uint8 flags1; /* misc flags */ + uint16 amsdu_mtu_pref; /* preferred A-MSDU MTU */ +} BWL_POST_PACKED_STRUCT; +typedef struct brcm_ie brcm_ie_t; +#define BRCM_IE_LEN 11 /* BRCM IE length */ +#define BRCM_IE_VER 2 /* BRCM IE version */ +#define BRCM_IE_LEGACY_AES_VER 1 /* BRCM IE legacy AES version */ + +/* brcm_ie flags */ +#define BRF_ABCAP 0x1 /* afterburner is obsolete, defined for backward compat */ +#define BRF_ABRQRD 0x2 /* afterburner is obsolete, defined for backward compat */ +#define BRF_LZWDS 0x4 /* lazy wds enabled */ +#define BRF_BLOCKACK 0x8 /* BlockACK capable */ +#define BRF_ABCOUNTER_MASK 0xf0 /* afterburner is obsolete, defined for backward compat */ +#define BRF_PROP_11N_MCS 0x10 /* re-use afterburner bit */ +#define BRF_MEDIA_CLIENT 0x20 /* re-use afterburner bit to indicate media client device */ + +#define GET_BRF_PROP_11N_MCS(brcm_ie) \ + (!((brcm_ie)->flags & BRF_ABCAP) && ((brcm_ie)->flags & BRF_PROP_11N_MCS)) + +/* brcm_ie flags1 */ +#define BRF1_AMSDU 0x1 /* A-MSDU capable */ +#define BRF1_WNM 0x2 /* WNM capable */ +#define BRF1_WMEPS 0x4 /* AP is capable of handling WME + PS w/o APSD */ +#define BRF1_PSOFIX 0x8 /* AP has fixed PS mode out-of-order packets */ +#define BRF1_RX_LARGE_AGG 0x10 /* device can rx large aggregates */ +#define BRF1_RFAWARE_DCS 0x20 /* RFAWARE dynamic channel selection (DCS) */ +#define BRF1_SOFTAP 0x40 /* Configure as Broadcom SOFTAP */ +#define BRF1_DWDS 0x80 /* DWDS capable */ + +/** Vendor IE structure */ +BWL_PRE_PACKED_STRUCT struct vndr_ie { + uchar id; + uchar len; + uchar oui [3]; + uchar data [1]; /* Variable size data */ +} BWL_POST_PACKED_STRUCT; +typedef struct vndr_ie vndr_ie_t; + +#define VNDR_IE_HDR_LEN 2u /* id + len field */ +#define VNDR_IE_MIN_LEN 3u /* size of the oui field */ +#define VNDR_IE_FIXED_LEN (VNDR_IE_HDR_LEN + VNDR_IE_MIN_LEN) + +#define VNDR_IE_MAX_LEN 255u /* vendor IE max length, without ID and len */ + +/** BRCM PROP DEVICE PRIMARY MAC ADDRESS IE */ +BWL_PRE_PACKED_STRUCT struct member_of_brcm_prop_ie { + uchar id; + uchar len; + uchar oui[3]; + uint8 type; /* type indicates what follows */ + struct ether_addr ea; /* Device Primary MAC Adrress */ +} BWL_POST_PACKED_STRUCT; +typedef struct member_of_brcm_prop_ie member_of_brcm_prop_ie_t; + +#define MEMBER_OF_BRCM_PROP_IE_LEN 10 /* IE max length */ +#define MEMBER_OF_BRCM_PROP_IE_HDRLEN (sizeof(member_of_brcm_prop_ie_t)) +#define MEMBER_OF_BRCM_PROP_IE_TYPE 54 + +/** BRCM Reliable Multicast IE */ +BWL_PRE_PACKED_STRUCT struct relmcast_brcm_prop_ie { + uint8 id; + uint8 len; + uint8 oui[3]; + uint8 type; /* type indicates what follows */ + struct ether_addr ea; /* The ack sender's MAC Adrress */ + struct ether_addr mcast_ea; /* The multicast MAC address */ + uint8 updtmo; /* time interval(second) for client to send null packet to report its rssi */ +} BWL_POST_PACKED_STRUCT; +typedef struct relmcast_brcm_prop_ie relmcast_brcm_prop_ie_t; + +/* IE length */ +/* BRCM_PROP_IE_LEN = sizeof(relmcast_brcm_prop_ie_t)-((sizeof (id) + sizeof (len)))? */ +#define RELMCAST_BRCM_PROP_IE_LEN (sizeof(relmcast_brcm_prop_ie_t)-(2*sizeof(uint8))) + +#define RELMCAST_BRCM_PROP_IE_TYPE 55 + +/* BRCM BTC IE */ +BWL_PRE_PACKED_STRUCT struct btc_brcm_prop_ie { + uint8 id; + uint8 len; + uint8 oui[3]; + uint8 type; /* type inidicates what follows */ + uint32 info; +} BWL_POST_PACKED_STRUCT; +typedef struct btc_brcm_prop_ie btc_brcm_prop_ie_t; + +#define BTC_INFO_BRCM_PROP_IE_TYPE 90 +#define BRCM_BTC_INFO_TYPE_LEN (sizeof(btc_brcm_prop_ie_t) - (2 * sizeof(uint8))) + +/* ************* HT definitions. ************* */ +#define MCSSET_LEN 16 /* 16-bits per 8-bit set to give 128-bits bitmap of MCS Index */ +#define MAX_MCS_NUM (128) /* max mcs number = 128 */ +#define BASIC_HT_MCS 0xFFu /* HT MCS supported rates */ + +BWL_PRE_PACKED_STRUCT struct ht_cap_ie { + uint16 cap; + uint8 params; + uint8 supp_mcs[MCSSET_LEN]; + uint16 ext_htcap; + uint32 txbf_cap; + uint8 as_cap; +} BWL_POST_PACKED_STRUCT; +typedef struct ht_cap_ie ht_cap_ie_t; + +BWL_PRE_PACKED_STRUCT struct dot11_ht_cap_ie { + uint8 id; + uint8 len; + ht_cap_ie_t ht_cap; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ht_cap_ie dot11_ht_cap_ie_t; + +/* CAP IE: HT 1.0 spec. simply stole a 802.11 IE, we use our prop. IE until this is resolved */ +/* the capability IE is primarily used to convey this nodes abilities */ +BWL_PRE_PACKED_STRUCT struct ht_prop_cap_ie { + uint8 id; /* IE ID, 221, DOT11_MNG_PROPR_ID */ + uint8 len; /* IE length */ + uint8 oui[3]; + uint8 type; /* type indicates what follows */ + ht_cap_ie_t cap_ie; +} BWL_POST_PACKED_STRUCT; +typedef struct ht_prop_cap_ie ht_prop_cap_ie_t; + +#define HT_PROP_IE_OVERHEAD 4 /* overhead bytes for prop oui ie */ +#define HT_CAP_IE_LEN 26 /* HT capability len (based on .11n d2.0) */ +#define HT_CAP_IE_TYPE 51 + +#define HT_CAP_LDPC_CODING 0x0001 /* Support for rx of LDPC coded pkts */ +#define HT_CAP_40MHZ 0x0002 /* FALSE:20Mhz, TRUE:20/40MHZ supported */ +#define HT_CAP_MIMO_PS_MASK 0x000C /* Mimo PS mask */ +#define HT_CAP_MIMO_PS_SHIFT 0x0002 /* Mimo PS shift */ +#define HT_CAP_MIMO_PS_OFF 0x0003 /* Mimo PS, no restriction */ +#define HT_CAP_MIMO_PS_RTS 0x0001 /* Mimo PS, send RTS/CTS around MIMO frames */ +#define HT_CAP_MIMO_PS_ON 0x0000 /* Mimo PS, MIMO disallowed */ +#define HT_CAP_GF 0x0010 /* Greenfield preamble support */ +#define HT_CAP_SHORT_GI_20 0x0020 /* 20MHZ short guard interval support */ +#define HT_CAP_SHORT_GI_40 0x0040 /* 40Mhz short guard interval support */ +#define HT_CAP_TX_STBC 0x0080 /* Tx STBC support */ +#define HT_CAP_RX_STBC_MASK 0x0300 /* Rx STBC mask */ +#define HT_CAP_RX_STBC_SHIFT 8 /* Rx STBC shift */ +#define HT_CAP_DELAYED_BA 0x0400 /* delayed BA support */ +#define HT_CAP_MAX_AMSDU 0x0800 /* Max AMSDU size in bytes , 0=3839, 1=7935 */ + +#define HT_CAP_DSSS_CCK 0x1000 /* DSSS/CCK supported by the BSS */ +#define HT_CAP_PSMP 0x2000 /* Power Save Multi Poll support */ +#define HT_CAP_40MHZ_INTOLERANT 0x4000 /* 40MHz Intolerant */ +#define HT_CAP_LSIG_TXOP 0x8000 /* L-SIG TXOP protection support */ + +#define HT_CAP_RX_STBC_NO 0x0 /* no rx STBC support */ +#define HT_CAP_RX_STBC_ONE_STREAM 0x1 /* rx STBC support of 1 spatial stream */ +#define HT_CAP_RX_STBC_TWO_STREAM 0x2 /* rx STBC support of 1-2 spatial streams */ +#define HT_CAP_RX_STBC_THREE_STREAM 0x3 /* rx STBC support of 1-3 spatial streams */ + +#define HT_CAP_TXBF_CAP_IMPLICIT_TXBF_RX 0x1 +#define HT_CAP_TXBF_CAP_NDP_RX 0x8 +#define HT_CAP_TXBF_CAP_NDP_TX 0x10 +#define HT_CAP_TXBF_CAP_EXPLICIT_CSI 0x100 +#define HT_CAP_TXBF_CAP_EXPLICIT_NC_STEERING 0x200 +#define HT_CAP_TXBF_CAP_EXPLICIT_C_STEERING 0x400 +#define HT_CAP_TXBF_CAP_EXPLICIT_CSI_FB_MASK 0x1800 +#define HT_CAP_TXBF_CAP_EXPLICIT_CSI_FB_SHIFT 11 +#define HT_CAP_TXBF_CAP_EXPLICIT_NC_FB_MASK 0x6000 +#define HT_CAP_TXBF_CAP_EXPLICIT_NC_FB_SHIFT 13 +#define HT_CAP_TXBF_CAP_EXPLICIT_C_FB_MASK 0x18000 +#define HT_CAP_TXBF_CAP_EXPLICIT_C_FB_SHIFT 15 +#define HT_CAP_TXBF_CAP_CSI_BFR_ANT_SHIFT 19 +#define HT_CAP_TXBF_CAP_NC_BFR_ANT_SHIFT 21 +#define HT_CAP_TXBF_CAP_C_BFR_ANT_SHIFT 23 +#define HT_CAP_TXBF_CAP_C_BFR_ANT_MASK 0x1800000 + +#define HT_CAP_TXBF_CAP_CHAN_ESTIM_SHIFT 27 +#define HT_CAP_TXBF_CAP_CHAN_ESTIM_MASK 0x18000000 + +#define HT_CAP_TXBF_FB_TYPE_NONE 0 +#define HT_CAP_TXBF_FB_TYPE_DELAYED 1 +#define HT_CAP_TXBF_FB_TYPE_IMMEDIATE 2 +#define HT_CAP_TXBF_FB_TYPE_BOTH 3 + +#define HT_CAP_TX_BF_CAP_EXPLICIT_CSI_FB_MASK 0x400 +#define HT_CAP_TX_BF_CAP_EXPLICIT_CSI_FB_SHIFT 10 +#define HT_CAP_TX_BF_CAP_EXPLICIT_COMPRESSED_FB_MASK 0x18000 +#define HT_CAP_TX_BF_CAP_EXPLICIT_COMPRESSED_FB_SHIFT 15 + +#define HT_CAP_MCS_FLAGS_SUPP_BYTE 12 /* byte offset in HT Cap Supported MCS for various flags */ +#define HT_CAP_MCS_RX_8TO15_BYTE_OFFSET 1 +#define HT_CAP_MCS_FLAGS_TX_RX_UNEQUAL 0x02 +#define HT_CAP_MCS_FLAGS_MAX_SPATIAL_STREAM_MASK 0x0C + +#define VHT_MAX_MPDU 11454 /* max mpdu size for now (bytes) */ +#define VHT_MPDU_MSDU_DELTA 56 /* Difference in spec - vht mpdu, amsdu len */ +/* Max AMSDU len - per spec */ +#define VHT_MAX_AMSDU (VHT_MAX_MPDU - VHT_MPDU_MSDU_DELTA) + +#define HT_MAX_AMSDU 7935 /* max amsdu size (bytes) per the HT spec */ +#define HT_MIN_AMSDU 3835 /* min amsdu size (bytes) per the HT spec */ + +#define HT_PARAMS_RX_FACTOR_MASK 0x03 /* ampdu rcv factor mask */ +#define HT_PARAMS_DENSITY_MASK 0x1C /* ampdu density mask */ +#define HT_PARAMS_DENSITY_SHIFT 2 /* ampdu density shift */ + +/* HT/AMPDU specific define */ +#define AMPDU_MAX_MPDU_DENSITY 7 /* max mpdu density; in 1/4 usec units */ +#define AMPDU_DENSITY_NONE 0 /* No density requirement */ +#define AMPDU_DENSITY_1over4_US 1 /* 1/4 us density */ +#define AMPDU_DENSITY_1over2_US 2 /* 1/2 us density */ +#define AMPDU_DENSITY_1_US 3 /* 1 us density */ +#define AMPDU_DENSITY_2_US 4 /* 2 us density */ +#define AMPDU_DENSITY_4_US 5 /* 4 us density */ +#define AMPDU_DENSITY_8_US 6 /* 8 us density */ +#define AMPDU_DENSITY_16_US 7 /* 16 us density */ +#define AMPDU_RX_FACTOR_8K 0 /* max rcv ampdu len (8kb) */ +#define AMPDU_RX_FACTOR_16K 1 /* max rcv ampdu len (16kb) */ +#define AMPDU_RX_FACTOR_32K 2 /* max rcv ampdu len (32kb) */ +#define AMPDU_RX_FACTOR_64K 3 /* max rcv ampdu len (64kb) */ + +/* AMPDU RX factors for VHT rates */ +#define AMPDU_RX_FACTOR_128K 4 /* max rcv ampdu len (128kb) */ +#define AMPDU_RX_FACTOR_256K 5 /* max rcv ampdu len (256kb) */ +#define AMPDU_RX_FACTOR_512K 6 /* max rcv ampdu len (512kb) */ +#define AMPDU_RX_FACTOR_1024K 7 /* max rcv ampdu len (1024kb) */ + +#define AMPDU_RX_FACTOR_BASE 8*1024 /* ampdu factor base for rx len */ +#define AMPDU_RX_FACTOR_BASE_PWR 13 /* ampdu factor base for rx len in power of 2 */ + +#define AMPDU_DELIMITER_LEN 4u /* length of ampdu delimiter */ +#define AMPDU_DELIMITER_LEN_MAX 63 /* max length of ampdu delimiter(enforced in HW) */ + +#define HT_CAP_EXT_PCO 0x0001 +#define HT_CAP_EXT_PCO_TTIME_MASK 0x0006 +#define HT_CAP_EXT_PCO_TTIME_SHIFT 1 +#define HT_CAP_EXT_MCS_FEEDBACK_MASK 0x0300 +#define HT_CAP_EXT_MCS_FEEDBACK_SHIFT 8 +#define HT_CAP_EXT_HTC 0x0400 +#define HT_CAP_EXT_RD_RESP 0x0800 + +/** 'ht_add' is called 'HT Operation' information element in the 802.11 standard */ +BWL_PRE_PACKED_STRUCT struct ht_add_ie { + uint8 ctl_ch; /* control channel number */ + uint8 byte1; /* ext ch,rec. ch. width, RIFS support */ + uint16 opmode; /* operation mode */ + uint16 misc_bits; /* misc bits */ + uint8 basic_mcs[MCSSET_LEN]; /* required MCS set */ +} BWL_POST_PACKED_STRUCT; +typedef struct ht_add_ie ht_add_ie_t; + +/* ADD IE: HT 1.0 spec. simply stole a 802.11 IE, we use our prop. IE until this is resolved */ +/* the additional IE is primarily used to convey the current BSS configuration */ +BWL_PRE_PACKED_STRUCT struct ht_prop_add_ie { + uint8 id; /* IE ID, 221, DOT11_MNG_PROPR_ID */ + uint8 len; /* IE length */ + uint8 oui[3]; + uint8 type; /* indicates what follows */ + ht_add_ie_t add_ie; +} BWL_POST_PACKED_STRUCT; +typedef struct ht_prop_add_ie ht_prop_add_ie_t; + +#define HT_ADD_IE_LEN 22 +#define HT_ADD_IE_TYPE 52 + +/* byte1 defn's */ +#define HT_BW_ANY 0x04 /* set, STA can use 20 or 40MHz */ +#define HT_RIFS_PERMITTED 0x08 /* RIFS allowed */ + +/* opmode defn's */ +#define HT_OPMODE_MASK 0x0003 /* protection mode mask */ +#define HT_OPMODE_SHIFT 0 /* protection mode shift */ +#define HT_OPMODE_PURE 0x0000 /* protection mode PURE */ +#define HT_OPMODE_OPTIONAL 0x0001 /* protection mode optional */ +#define HT_OPMODE_HT20IN40 0x0002 /* protection mode 20MHz HT in 40MHz BSS */ +#define HT_OPMODE_MIXED 0x0003 /* protection mode Mixed Mode */ +#define HT_OPMODE_NONGF 0x0004 /* protection mode non-GF */ +#define DOT11N_TXBURST 0x0008 /* Tx burst limit */ +#define DOT11N_OBSS_NONHT 0x0010 /* OBSS Non-HT STA present */ +#define HT_OPMODE_CCFS2_MASK 0x1fe0 /* Channel Center Frequency Segment 2 mask */ +#define HT_OPMODE_CCFS2_SHIFT 5 /* Channel Center Frequency Segment 2 shift */ + +/* misc_bites defn's */ +#define HT_BASIC_STBC_MCS 0x007f /* basic STBC MCS */ +#define HT_DUAL_STBC_PROT 0x0080 /* Dual STBC Protection */ +#define HT_SECOND_BCN 0x0100 /* Secondary beacon support */ +#define HT_LSIG_TXOP 0x0200 /* L-SIG TXOP Protection full support */ +#define HT_PCO_ACTIVE 0x0400 /* PCO active */ +#define HT_PCO_PHASE 0x0800 /* PCO phase */ +#define HT_DUALCTS_PROTECTION 0x0080 /* DUAL CTS protection needed */ + +/* Tx Burst Limits */ +#define DOT11N_2G_TXBURST_LIMIT 6160 /* 2G band Tx burst limit per 802.11n Draft 1.10 (usec) */ +#define DOT11N_5G_TXBURST_LIMIT 3080 /* 5G band Tx burst limit per 802.11n Draft 1.10 (usec) */ + +/* Macros for opmode */ +#define GET_HT_OPMODE(add_ie) ((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_MASK) \ + >> HT_OPMODE_SHIFT) +#define HT_MIXEDMODE_PRESENT(add_ie) ((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_MASK) \ + == HT_OPMODE_MIXED) /* mixed mode present */ +#define HT_HT20_PRESENT(add_ie) ((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_MASK) \ + == HT_OPMODE_HT20IN40) /* 20MHz HT present */ +#define HT_OPTIONAL_PRESENT(add_ie) ((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_MASK) \ + == HT_OPMODE_OPTIONAL) /* Optional protection present */ +#define HT_USE_PROTECTION(add_ie) (HT_HT20_PRESENT((add_ie)) || \ + HT_MIXEDMODE_PRESENT((add_ie))) /* use protection */ +#define HT_NONGF_PRESENT(add_ie) ((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_NONGF) \ + == HT_OPMODE_NONGF) /* non-GF present */ +#define DOT11N_TXBURST_PRESENT(add_ie) ((ltoh16_ua(&add_ie->opmode) & DOT11N_TXBURST) \ + == DOT11N_TXBURST) /* Tx Burst present */ +#define DOT11N_OBSS_NONHT_PRESENT(add_ie) ((ltoh16_ua(&add_ie->opmode) & DOT11N_OBSS_NONHT) \ + == DOT11N_OBSS_NONHT) /* OBSS Non-HT present */ +#define HT_OPMODE_CCFS2_GET(add_ie) ((ltoh16_ua(&(add_ie)->opmode) & HT_OPMODE_CCFS2_MASK) \ + >> HT_OPMODE_CCFS2_SHIFT) /* get CCFS2 */ +#define HT_OPMODE_CCFS2_SET(add_ie, ccfs2) do { /* set CCFS2 */ \ + (add_ie)->opmode &= htol16(~HT_OPMODE_CCFS2_MASK); \ + (add_ie)->opmode |= htol16(((ccfs2) << HT_OPMODE_CCFS2_SHIFT) & HT_OPMODE_CCFS2_MASK); \ +} while (0) + +/* Macros for HT MCS field access */ +#define HT_CAP_MCS_BITMASK(supp_mcs) \ + ((supp_mcs)[HT_CAP_MCS_RX_8TO15_BYTE_OFFSET]) +#define HT_CAP_MCS_TX_RX_UNEQUAL(supp_mcs) \ + ((supp_mcs)[HT_CAP_MCS_FLAGS_SUPP_BYTE] & HT_CAP_MCS_FLAGS_TX_RX_UNEQUAL) +#define HT_CAP_MCS_TX_STREAM_SUPPORT(supp_mcs) \ + ((supp_mcs)[HT_CAP_MCS_FLAGS_SUPP_BYTE] & HT_CAP_MCS_FLAGS_MAX_SPATIAL_STREAM_MASK) + +BWL_PRE_PACKED_STRUCT struct obss_params { + uint16 passive_dwell; + uint16 active_dwell; + uint16 bss_widthscan_interval; + uint16 passive_total; + uint16 active_total; + uint16 chanwidth_transition_dly; + uint16 activity_threshold; +} BWL_POST_PACKED_STRUCT; +typedef struct obss_params obss_params_t; + +BWL_PRE_PACKED_STRUCT struct dot11_obss_ie { + uint8 id; + uint8 len; + obss_params_t obss_params; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_obss_ie dot11_obss_ie_t; +#define DOT11_OBSS_SCAN_IE_LEN sizeof(obss_params_t) /* HT OBSS len (based on 802.11n d3.0) */ + +/* HT control field */ +#define HT_CTRL_LA_TRQ 0x00000002 /* sounding request */ +#define HT_CTRL_LA_MAI 0x0000003C /* MCS request or antenna selection indication */ +#define HT_CTRL_LA_MAI_SHIFT 2 +#define HT_CTRL_LA_MAI_MRQ 0x00000004 /* MCS request */ +#define HT_CTRL_LA_MAI_MSI 0x00000038 /* MCS request sequence identifier */ +#define HT_CTRL_LA_MFSI 0x000001C0 /* MFB sequence identifier */ +#define HT_CTRL_LA_MFSI_SHIFT 6 +#define HT_CTRL_LA_MFB_ASELC 0x0000FE00 /* MCS feedback, antenna selection command/data */ +#define HT_CTRL_LA_MFB_ASELC_SH 9 +#define HT_CTRL_LA_ASELC_CMD 0x00000C00 /* ASEL command */ +#define HT_CTRL_LA_ASELC_DATA 0x0000F000 /* ASEL data */ +#define HT_CTRL_CAL_POS 0x00030000 /* Calibration position */ +#define HT_CTRL_CAL_SEQ 0x000C0000 /* Calibration sequence */ +#define HT_CTRL_CSI_STEERING 0x00C00000 /* CSI/Steering */ +#define HT_CTRL_CSI_STEER_SHIFT 22 +#define HT_CTRL_CSI_STEER_NFB 0 /* no fedback required */ +#define HT_CTRL_CSI_STEER_CSI 1 /* CSI, H matrix */ +#define HT_CTRL_CSI_STEER_NCOM 2 /* non-compressed beamforming */ +#define HT_CTRL_CSI_STEER_COM 3 /* compressed beamforming */ +#define HT_CTRL_NDP_ANNOUNCE 0x01000000 /* NDP announcement */ +#define HT_CTRL_AC_CONSTRAINT 0x40000000 /* AC Constraint */ +#define HT_CTRL_RDG_MOREPPDU 0x80000000 /* RDG/More PPDU */ + +/* ************* VHT definitions. ************* */ + +/** + * VHT Capabilites IE (sec 8.4.2.160) + */ + +BWL_PRE_PACKED_STRUCT struct vht_cap_ie { + uint32 vht_cap_info; + /* supported MCS set - 64 bit field */ + uint16 rx_mcs_map; + uint16 rx_max_rate; + uint16 tx_mcs_map; + uint16 tx_max_rate; +} BWL_POST_PACKED_STRUCT; +typedef struct vht_cap_ie vht_cap_ie_t; + +/* 4B cap_info + 8B supp_mcs */ +#define VHT_CAP_IE_LEN 12 + +/* VHT Capabilities Info field - 32bit - in VHT Cap IE */ +#define VHT_CAP_INFO_MAX_MPDU_LEN_MASK 0x00000003 +#define VHT_CAP_INFO_SUPP_CHAN_WIDTH_MASK 0x0000000c +#define VHT_CAP_INFO_LDPC 0x00000010 +#define VHT_CAP_INFO_SGI_80MHZ 0x00000020 +#define VHT_CAP_INFO_SGI_160MHZ 0x00000040 +#define VHT_CAP_INFO_TX_STBC 0x00000080 +#define VHT_CAP_INFO_RX_STBC_MASK 0x00000700 +#define VHT_CAP_INFO_RX_STBC_SHIFT 8 +#define VHT_CAP_INFO_SU_BEAMFMR 0x00000800 +#define VHT_CAP_INFO_SU_BEAMFMEE 0x00001000 +#define VHT_CAP_INFO_NUM_BMFMR_ANT_MASK 0x0000e000 +#define VHT_CAP_INFO_NUM_BMFMR_ANT_SHIFT 13 +#define VHT_CAP_INFO_NUM_SOUNDING_DIM_MASK 0x00070000 +#define VHT_CAP_INFO_NUM_SOUNDING_DIM_SHIFT 16 +#define VHT_CAP_INFO_MU_BEAMFMR 0x00080000 +#define VHT_CAP_INFO_MU_BEAMFMEE 0x00100000 +#define VHT_CAP_INFO_TXOPPS 0x00200000 +#define VHT_CAP_INFO_HTCVHT 0x00400000 +#define VHT_CAP_INFO_AMPDU_MAXLEN_EXP_MASK 0x03800000 +#define VHT_CAP_INFO_AMPDU_MAXLEN_EXP_SHIFT 23 +#define VHT_CAP_INFO_LINK_ADAPT_CAP_MASK 0x0c000000 +#define VHT_CAP_INFO_LINK_ADAPT_CAP_SHIFT 26 +#define VHT_CAP_INFO_EXT_NSS_BW_SUP_MASK 0xc0000000 +#define VHT_CAP_INFO_EXT_NSS_BW_SUP_SHIFT 30 + +/* get Extended NSS BW Support passing vht cap info */ +#define VHT_CAP_EXT_NSS_BW_SUP(cap_info) \ + (((cap_info) & VHT_CAP_INFO_EXT_NSS_BW_SUP_MASK) >> VHT_CAP_INFO_EXT_NSS_BW_SUP_SHIFT) + +/* VHT CAP INFO extended NSS BW support - refer to IEEE 802.11 REVmc D8.0 Figure 9-559 */ +#define VHT_CAP_INFO_EXT_NSS_BW_HALF_160 1 /* 160MHz at half NSS CAP */ +#define VHT_CAP_INFO_EXT_NSS_BW_HALF_160_80P80 2 /* 160 & 80p80 MHz at half NSS CAP */ + +/* VHT Supported MCS Set - 64-bit - in VHT Cap IE */ +#define VHT_CAP_SUPP_MCS_RX_HIGHEST_RATE_MASK 0x1fff +#define VHT_CAP_SUPP_MCS_RX_HIGHEST_RATE_SHIFT 0 +#define VHT_CAP_SUPP_CHAN_WIDTH_SHIFT 5 + +#define VHT_CAP_SUPP_MCS_TX_HIGHEST_RATE_MASK 0x1fff +#define VHT_CAP_SUPP_MCS_TX_HIGHEST_RATE_SHIFT 0 + +/* defines for field(s) in vht_cap_ie->rx_max_rate */ +#define VHT_CAP_MAX_NSTS_MASK 0xe000 +#define VHT_CAP_MAX_NSTS_SHIFT 13 + +/* defines for field(s) in vht_cap_ie->tx_max_rate */ +#define VHT_CAP_EXT_NSS_BW_CAP 0x2000 + +#define VHT_CAP_MCS_MAP_0_7 0 +#define VHT_CAP_MCS_MAP_0_8 1 +#define VHT_CAP_MCS_MAP_0_9 2 +#define VHT_CAP_MCS_MAP_NONE 3 +#define VHT_CAP_MCS_MAP_S 2 /* num bits for 1-stream */ +#define VHT_CAP_MCS_MAP_M 0x3 /* mask for 1-stream */ +/* assumes VHT_CAP_MCS_MAP_NONE is 3 and 2 bits are used for encoding */ +#define VHT_CAP_MCS_MAP_NONE_ALL 0xffff + +/* VHT rates bitmap */ +#define VHT_CAP_MCS_0_7_RATEMAP 0x00ff +#define VHT_CAP_MCS_0_8_RATEMAP 0x01ff +#define VHT_CAP_MCS_0_9_RATEMAP 0x03ff +#define VHT_CAP_MCS_FULL_RATEMAP VHT_CAP_MCS_0_9_RATEMAP + +#define VHT_PROP_MCS_MAP_10_11 0 +#define VHT_PROP_MCS_MAP_UNUSED1 1 +#define VHT_PROP_MCS_MAP_UNUSED2 2 +#define VHT_PROP_MCS_MAP_NONE 3 +#define VHT_PROP_MCS_MAP_NONE_ALL 0xffff + +/* VHT prop rates bitmap */ +#define VHT_PROP_MCS_10_11_RATEMAP 0x0c00 +#define VHT_PROP_MCS_FULL_RATEMAP VHT_PROP_MCS_10_11_RATEMAP + +#if !defined(VHT_CAP_MCS_MAP_0_9_NSS3) +/* mcsmap with MCS0-9 for Nss = 3 */ +#define VHT_CAP_MCS_MAP_0_9_NSS3 \ + ((VHT_CAP_MCS_MAP_0_9 << VHT_MCS_MAP_GET_SS_IDX(1)) | \ + (VHT_CAP_MCS_MAP_0_9 << VHT_MCS_MAP_GET_SS_IDX(2)) | \ + (VHT_CAP_MCS_MAP_0_9 << VHT_MCS_MAP_GET_SS_IDX(3))) +#endif /* !VHT_CAP_MCS_MAP_0_9_NSS3 */ + +#define VHT_CAP_MCS_MAP_NSS_MAX 8 + +/* get mcsmap with given mcs for given nss streams */ +#define VHT_CAP_MCS_MAP_CREATE(mcsmap, nss, mcs) \ + do { \ + int i; \ + for (i = 1; i <= nss; i++) { \ + VHT_MCS_MAP_SET_MCS_PER_SS(i, mcs, mcsmap); \ + } \ + } while (0) + +/* Map the mcs code to mcs bit map */ +#define VHT_MCS_CODE_TO_MCS_MAP(mcs_code) \ + ((mcs_code == VHT_CAP_MCS_MAP_0_7) ? VHT_CAP_MCS_0_7_RATEMAP : \ + (mcs_code == VHT_CAP_MCS_MAP_0_8) ? VHT_CAP_MCS_0_8_RATEMAP : \ + (mcs_code == VHT_CAP_MCS_MAP_0_9) ? VHT_CAP_MCS_0_9_RATEMAP : 0) + +#define VHT_PROP_MCS_CODE_TO_PROP_MCS_MAP(mcs_code) \ + ((mcs_code == VHT_PROP_MCS_MAP_10_11) ? VHT_PROP_MCS_10_11_RATEMAP : 0) + +/* Map the mcs bit map to mcs code */ +#define VHT_MCS_MAP_TO_MCS_CODE(mcs_map) \ + ((mcs_map == VHT_CAP_MCS_0_7_RATEMAP) ? VHT_CAP_MCS_MAP_0_7 : \ + (mcs_map == VHT_CAP_MCS_0_8_RATEMAP) ? VHT_CAP_MCS_MAP_0_8 : \ + (mcs_map == VHT_CAP_MCS_0_9_RATEMAP) ? VHT_CAP_MCS_MAP_0_9 : VHT_CAP_MCS_MAP_NONE) + +#define VHT_PROP_MCS_MAP_TO_PROP_MCS_CODE(mcs_map) \ + (((mcs_map & 0xc00) == 0xc00) ? VHT_PROP_MCS_MAP_10_11 : VHT_PROP_MCS_MAP_NONE) + +/** VHT Capabilities Supported Channel Width */ +typedef enum vht_cap_chan_width { + VHT_CAP_CHAN_WIDTH_SUPPORT_MANDATORY = 0x00, + VHT_CAP_CHAN_WIDTH_SUPPORT_160 = 0x04, + VHT_CAP_CHAN_WIDTH_SUPPORT_160_8080 = 0x08 +} vht_cap_chan_width_t; + +/** VHT Capabilities Supported max MPDU LEN (sec 8.4.2.160.2) */ +typedef enum vht_cap_max_mpdu_len { + VHT_CAP_MPDU_MAX_4K = 0x00, + VHT_CAP_MPDU_MAX_8K = 0x01, + VHT_CAP_MPDU_MAX_11K = 0x02 +} vht_cap_max_mpdu_len_t; + +/* Maximum MPDU Length byte counts for the VHT Capabilities advertised limits */ +#define VHT_MPDU_LIMIT_4K 3895 +#define VHT_MPDU_LIMIT_8K 7991 +#define VHT_MPDU_LIMIT_11K 11454 + +/** + * VHT Operation IE (sec 8.4.2.161) + */ + +BWL_PRE_PACKED_STRUCT struct vht_op_ie { + uint8 chan_width; + uint8 chan1; + uint8 chan2; + uint16 supp_mcs; /* same def as above in vht cap */ +} BWL_POST_PACKED_STRUCT; +typedef struct vht_op_ie vht_op_ie_t; + +/* 3B VHT Op info + 2B Basic MCS */ +#define VHT_OP_IE_LEN 5 + +typedef enum vht_op_chan_width { + VHT_OP_CHAN_WIDTH_20_40 = 0, + VHT_OP_CHAN_WIDTH_80 = 1, + VHT_OP_CHAN_WIDTH_160 = 2, /* deprecated - IEEE 802.11 REVmc D8.0 Table 11-25 */ + VHT_OP_CHAN_WIDTH_80_80 = 3 /* deprecated - IEEE 802.11 REVmc D8.0 Table 11-25 */ +} vht_op_chan_width_t; + +#define VHT_OP_INFO_LEN 3 + +/* AID length */ +#define AID_IE_LEN 2 +/** + * BRCM vht features IE header + * The header if the fixed part of the IE + * On the 5GHz band this is the entire IE, + * on 2.4GHz the VHT IEs as defined in the 802.11ac + * specification follows + * + * + * VHT features rates bitmap. + * Bit0: 5G MCS 0-9 BW 160MHz + * Bit1: 5G MCS 0-9 support BW 80MHz + * Bit2: 5G MCS 0-9 support BW 20MHz + * Bit3: 2.4G MCS 0-9 support BW 20MHz + * Bits:4-7 Reserved for future use + * + */ +#define VHT_FEATURES_IE_TYPE 0x4 +BWL_PRE_PACKED_STRUCT struct vht_features_ie_hdr { + uint8 oui[3]; + uint8 type; /* type of this IE = 4 */ + uint8 rate_mask; /* VHT rate mask */ +} BWL_POST_PACKED_STRUCT; +typedef struct vht_features_ie_hdr vht_features_ie_hdr_t; + +/* Def for rx & tx basic mcs maps - ea ss num has 2 bits of info */ +#define VHT_MCS_MAP_GET_SS_IDX(nss) (((nss)-1) * VHT_CAP_MCS_MAP_S) +#define VHT_MCS_MAP_GET_MCS_PER_SS(nss, mcsMap) \ + (((mcsMap) >> VHT_MCS_MAP_GET_SS_IDX(nss)) & VHT_CAP_MCS_MAP_M) +#define VHT_MCS_MAP_SET_MCS_PER_SS(nss, numMcs, mcsMap) \ + do { \ + (mcsMap) &= (~(VHT_CAP_MCS_MAP_M << VHT_MCS_MAP_GET_SS_IDX(nss))); \ + (mcsMap) |= (((numMcs) & VHT_CAP_MCS_MAP_M) << VHT_MCS_MAP_GET_SS_IDX(nss)); \ + } while (0) +#define VHT_MCS_SS_SUPPORTED(nss, mcsMap) \ + (VHT_MCS_MAP_GET_MCS_PER_SS((nss), (mcsMap)) != VHT_CAP_MCS_MAP_NONE) + +/* Get the max ss supported from the mcs map */ +#define VHT_MAX_SS_SUPPORTED(mcsMap) \ + VHT_MCS_SS_SUPPORTED(8, mcsMap) ? 8 : \ + VHT_MCS_SS_SUPPORTED(7, mcsMap) ? 7 : \ + VHT_MCS_SS_SUPPORTED(6, mcsMap) ? 6 : \ + VHT_MCS_SS_SUPPORTED(5, mcsMap) ? 5 : \ + VHT_MCS_SS_SUPPORTED(4, mcsMap) ? 4 : \ + VHT_MCS_SS_SUPPORTED(3, mcsMap) ? 3 : \ + VHT_MCS_SS_SUPPORTED(2, mcsMap) ? 2 : \ + VHT_MCS_SS_SUPPORTED(1, mcsMap) ? 1 : 0 + +/* ************* WPA definitions. ************* */ +#define WPA_OUI "\x00\x50\xF2" /* WPA OUI */ +#define WPA_OUI_LEN 3 /* WPA OUI length */ +#define WPA_OUI_TYPE 1 +#define WPA_VERSION 1 /* WPA version */ +#define WPA_VERSION_LEN 2 /* WPA version length */ +#define WPA2_OUI "\x00\x0F\xAC" /* WPA2 OUI */ +#define WPA2_OUI_LEN 3 /* WPA2 OUI length */ +#define WPA2_VERSION 1 /* WPA2 version */ +#define WPA2_VERSION_LEN 2 /* WAP2 version length */ +#define MAX_RSNE_SUPPORTED_VERSION WPA2_VERSION /* Max supported version */ + +/* ************* WPS definitions. ************* */ +#define WPS_OUI "\x00\x50\xF2" /* WPS OUI */ +#define WPS_OUI_LEN 3 /* WPS OUI length */ +#define WPS_OUI_TYPE 4 + +/* ************* WFA definitions. ************* */ +#define WFA_OUI "\x50\x6F\x9A" /* WFA OUI */ +#define WFA_OUI_LEN 3 /* WFA OUI length */ +#define WFA_OUI_TYPE_P2P 9 + +#ifdef WL_LEGACY_P2P +#define APPLE_OUI "\x00\x17\xF2" /* MACOSX OUI */ +#define APPLE_OUI_LEN 3 +#define APPLE_OUI_TYPE_P2P 5 +#endif /* WL_LEGACY_P2P */ + +#ifndef WL_LEGACY_P2P +#define P2P_OUI WFA_OUI +#define P2P_OUI_LEN WFA_OUI_LEN +#define P2P_OUI_TYPE WFA_OUI_TYPE_P2P +#else +#define P2P_OUI APPLE_OUI +#define P2P_OUI_LEN APPLE_OUI_LEN +#define P2P_OUI_TYPE APPLE_OUI_TYPE_P2P +#endif /* !WL_LEGACY_P2P */ + +#define WFA_OUI_TYPE_TPC 8 +#ifdef WLTDLS +#define WFA_OUI_TYPE_TPQ 4 /* WFD Tunneled Probe ReQuest */ +#define WFA_OUI_TYPE_TPS 5 /* WFD Tunneled Probe ReSponse */ +#define WFA_OUI_TYPE_WFD 10 +#endif /* WTDLS */ +#define WFA_OUI_TYPE_HS20 0x10 +#define WFA_OUI_TYPE_OSEN 0x12 +#define WFA_OUI_TYPE_NAN 0x13 +#define WFA_OUI_TYPE_MBO 0x16 +#define WFA_OUI_TYPE_MBO_OCE 0x16 + +/* RSN authenticated key managment suite */ +#define RSN_AKM_NONE 0 /* None (IBSS) */ +#define RSN_AKM_UNSPECIFIED 1 /* Over 802.1x */ +#define RSN_AKM_PSK 2 /* Pre-shared Key */ +#define RSN_AKM_FBT_1X 3 /* Fast Bss transition using 802.1X */ +#define RSN_AKM_FBT_PSK 4 /* Fast Bss transition using Pre-shared Key */ +/* RSN_AKM_MFP_1X and RSN_AKM_MFP_PSK are not used any more + * Just kept here to avoid build issue in BISON/CARIBOU branch + */ +#define RSN_AKM_MFP_1X 5 /* SHA256 key derivation, using 802.1X */ +#define RSN_AKM_MFP_PSK 6 /* SHA256 key derivation, using Pre-shared Key */ +#define RSN_AKM_SHA256_1X 5 /* SHA256 key derivation, using 802.1X */ +#define RSN_AKM_SHA256_PSK 6 /* SHA256 key derivation, using Pre-shared Key */ +#define RSN_AKM_TPK 7 /* TPK(TDLS Peer Key) handshake */ +#define RSN_AKM_SAE_PSK 8 /* AKM for SAE with 4-way handshake */ +#define RSN_AKM_SAE_FBT 9 /* AKM for SAE with FBT */ +#define RSN_AKM_SUITEB_SHA256_1X 11 /* Suite B SHA256 */ +#define RSN_AKM_SUITEB_SHA384_1X 12 /* Suite B-192 SHA384 */ +#define RSN_AKM_FBT_SHA384_1X 13 /* FBT SHA384 */ +#define RSN_AKM_FILS_SHA256 14 /* SHA256 key derivation, using FILS */ +#define RSN_AKM_FILS_SHA384 15 /* SHA384 key derivation, using FILS */ +#define RSN_AKM_FBT_SHA256_FILS 16 +#define RSN_AKM_FBT_SHA384_FILS 17 +#define RSN_AKM_OWE 18 /* RFC 8110 OWE */ +#define RSN_AKM_FBT_SHA384_PSK 19 +#define RSN_AKM_PSK_SHA384 20 +/* OSEN authenticated key managment suite */ +#define OSEN_AKM_UNSPECIFIED RSN_AKM_UNSPECIFIED /* Over 802.1x */ + +/* Key related defines */ +#define DOT11_MAX_DEFAULT_KEYS 4 /* number of default keys */ +#define DOT11_MAX_IGTK_KEYS 2 +#define DOT11_MAX_KEY_SIZE 32 /* max size of any key */ +#define DOT11_MAX_IV_SIZE 16 /* max size of any IV */ +#define DOT11_EXT_IV_FLAG (1<<5) /* flag to indicate IV is > 4 bytes */ +#define DOT11_WPA_KEY_RSC_LEN 8 /* WPA RSC key len */ + +#define WEP1_KEY_SIZE 5 /* max size of any WEP key */ +#define WEP1_KEY_HEX_SIZE 10 /* size of WEP key in hex. */ +#define WEP128_KEY_SIZE 13 /* max size of any WEP key */ +#define WEP128_KEY_HEX_SIZE 26 /* size of WEP key in hex. */ +#define TKIP_MIC_SIZE 8 /* size of TKIP MIC */ +#define TKIP_EOM_SIZE 7 /* max size of TKIP EOM */ +#define TKIP_EOM_FLAG 0x5a /* TKIP EOM flag byte */ +#define TKIP_KEY_SIZE 32 /* size of any TKIP key, includs MIC keys */ +#define TKIP_TK_SIZE 16 +#define TKIP_MIC_KEY_SIZE 8 +#define TKIP_MIC_AUTH_TX 16 /* offset to Authenticator MIC TX key */ +#define TKIP_MIC_AUTH_RX 24 /* offset to Authenticator MIC RX key */ +#define TKIP_MIC_SUP_RX TKIP_MIC_AUTH_TX /* offset to Supplicant MIC RX key */ +#define TKIP_MIC_SUP_TX TKIP_MIC_AUTH_RX /* offset to Supplicant MIC TX key */ +#define AES_KEY_SIZE 16 /* size of AES key */ +#define AES_MIC_SIZE 8 /* size of AES MIC */ +#define BIP_KEY_SIZE 16 /* size of BIP key */ +#define BIP_MIC_SIZE 8 /* sizeof BIP MIC */ + +#define AES_GCM_MIC_SIZE 16 /* size of MIC for 128-bit GCM - .11adD9 */ + +#define AES256_KEY_SIZE 32 /* size of AES 256 key - .11acD5 */ +#define AES256_MIC_SIZE 16 /* size of MIC for 256 bit keys, incl BIP */ + +/* WCN */ +#define WCN_OUI "\x00\x50\xf2" /* WCN OUI */ +#define WCN_TYPE 4 /* WCN type */ + +#ifdef BCMWAPI_WPI +#define SMS4_KEY_LEN 16 +#define SMS4_WPI_CBC_MAC_LEN 16 +#endif // endif + +/* 802.11r protocol definitions */ + +/** Mobility Domain IE */ +BWL_PRE_PACKED_STRUCT struct dot11_mdid_ie { + uint8 id; + uint8 len; /* DOT11_MDID_IE_DATA_LEN (3) */ + uint16 mdid; /* Mobility Domain Id */ + uint8 cap; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_mdid_ie dot11_mdid_ie_t; + +/* length of data portion of Mobility Domain IE */ +#define DOT11_MDID_IE_DATA_LEN 3 + +#define FBT_MDID_CAP_OVERDS 0x01 /* Fast Bss transition over the DS support */ +#define FBT_MDID_CAP_RRP 0x02 /* Resource request protocol support */ + +/** Fast Bss Transition IE */ +BWL_PRE_PACKED_STRUCT struct dot11_ft_ie { + uint8 id; + uint8 len; /* At least equal to DOT11_FT_IE_FIXED_LEN (82) */ + uint16 mic_control; /* Mic Control */ + uint8 mic[16]; + uint8 anonce[32]; + uint8 snonce[32]; + /* Optional sub-elements follow */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ft_ie dot11_ft_ie_t; + +/* Fixed length of data portion of Fast BSS Transition IE. There could be + * optional parameters, which if present, could raise the FT IE length to 255. + */ +#define DOT11_FT_IE_FIXED_LEN 82 + +#define TIE_TYPE_RESERVED 0 +#define TIE_TYPE_REASSOC_DEADLINE 1 +#define TIE_TYPE_KEY_LIEFTIME 2 +#define TIE_TYPE_ASSOC_COMEBACK 3 +BWL_PRE_PACKED_STRUCT struct dot11_timeout_ie { + uint8 id; + uint8 len; + uint8 type; /* timeout interval type */ + uint32 value; /* timeout interval value */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_timeout_ie dot11_timeout_ie_t; + +/** GTK ie */ +BWL_PRE_PACKED_STRUCT struct dot11_gtk_ie { + uint8 id; + uint8 len; + uint16 key_info; + uint8 key_len; + uint8 rsc[8]; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_gtk_ie dot11_gtk_ie_t; + +/** Management MIC ie */ +BWL_PRE_PACKED_STRUCT struct mmic_ie { + uint8 id; /* IE ID: DOT11_MNG_MMIE_ID */ + uint8 len; /* IE length */ + uint16 key_id; /* key id */ + uint8 ipn[6]; /* ipn */ + uint8 mic[16]; /* mic */ +} BWL_POST_PACKED_STRUCT; +typedef struct mmic_ie mmic_ie_t; + +/* 802.11r-2008, 11A.10.3 - RRB frame format */ +BWL_PRE_PACKED_STRUCT struct dot11_ft_rrb_frame { + uint8 frame_type; /* 1 for RRB */ + uint8 packet_type; /* 0 for Request 1 for Response */ + uint16 len; + uint8 cur_ap_addr[ETHER_ADDR_LEN]; + uint8 data[1]; /* IEs Received/Sent in FT Action Req/Resp Frame */ +} BWL_POST_PACKED_STRUCT; + +typedef struct dot11_ft_rrb_frame dot11_ft_rrb_frame_t; + +#define DOT11_FT_RRB_FIXED_LEN 10 +#define DOT11_FT_REMOTE_FRAME_TYPE 1 +#define DOT11_FT_PACKET_REQ 0 +#define DOT11_FT_PACKET_RESP 1 + +#define BSSID_INVALID "\x00\x00\x00\x00\x00\x00" +#define BSSID_BROADCAST "\xFF\xFF\xFF\xFF\xFF\xFF" + +#ifdef BCMWAPI_WAI +#define WAPI_IE_MIN_LEN 20 /* WAPI IE min length */ +#define WAPI_VERSION 1 /* WAPI version */ +#define WAPI_VERSION_LEN 2 /* WAPI version length */ +#define WAPI_OUI "\x00\x14\x72" /* WAPI OUI */ +#define WAPI_OUI_LEN DOT11_OUI_LEN /* WAPI OUI length */ +#endif /* BCMWAPI_WAI */ + +/* ************* WMM Parameter definitions. ************* */ +#define WMM_OUI "\x00\x50\xF2" /* WNN OUI */ +#define WMM_OUI_LEN 3 /* WMM OUI length */ +#define WMM_OUI_TYPE 2 /* WMM OUT type */ +#define WMM_VERSION 1 +#define WMM_VERSION_LEN 1 + +/* WMM OUI subtype */ +#define WMM_OUI_SUBTYPE_PARAMETER 1 +#define WMM_PARAMETER_IE_LEN 24 + +/** Link Identifier Element */ +BWL_PRE_PACKED_STRUCT struct link_id_ie { + uint8 id; + uint8 len; + struct ether_addr bssid; + struct ether_addr tdls_init_mac; + struct ether_addr tdls_resp_mac; +} BWL_POST_PACKED_STRUCT; +typedef struct link_id_ie link_id_ie_t; +#define TDLS_LINK_ID_IE_LEN 18u + +/** Link Wakeup Schedule Element */ +BWL_PRE_PACKED_STRUCT struct wakeup_sch_ie { + uint8 id; + uint8 len; + uint32 offset; /* in ms between TSF0 and start of 1st Awake Window */ + uint32 interval; /* in ms bwtween the start of 2 Awake Windows */ + uint32 awake_win_slots; /* in backof slots, duration of Awake Window */ + uint32 max_wake_win; /* in ms, max duration of Awake Window */ + uint16 idle_cnt; /* number of consecutive Awake Windows */ +} BWL_POST_PACKED_STRUCT; +typedef struct wakeup_sch_ie wakeup_sch_ie_t; +#define TDLS_WAKEUP_SCH_IE_LEN 18 + +/** Channel Switch Timing Element */ +BWL_PRE_PACKED_STRUCT struct channel_switch_timing_ie { + uint8 id; + uint8 len; + uint16 switch_time; /* in ms, time to switch channels */ + uint16 switch_timeout; /* in ms */ +} BWL_POST_PACKED_STRUCT; +typedef struct channel_switch_timing_ie channel_switch_timing_ie_t; +#define TDLS_CHANNEL_SWITCH_TIMING_IE_LEN 4 + +/** PTI Control Element */ +BWL_PRE_PACKED_STRUCT struct pti_control_ie { + uint8 id; + uint8 len; + uint8 tid; + uint16 seq_control; +} BWL_POST_PACKED_STRUCT; +typedef struct pti_control_ie pti_control_ie_t; +#define TDLS_PTI_CONTROL_IE_LEN 3 + +/** PU Buffer Status Element */ +BWL_PRE_PACKED_STRUCT struct pu_buffer_status_ie { + uint8 id; + uint8 len; + uint8 status; +} BWL_POST_PACKED_STRUCT; +typedef struct pu_buffer_status_ie pu_buffer_status_ie_t; +#define TDLS_PU_BUFFER_STATUS_IE_LEN 1 +#define TDLS_PU_BUFFER_STATUS_AC_BK 1 +#define TDLS_PU_BUFFER_STATUS_AC_BE 2 +#define TDLS_PU_BUFFER_STATUS_AC_VI 4 +#define TDLS_PU_BUFFER_STATUS_AC_VO 8 + +/* TDLS Action Field Values */ +#define TDLS_SETUP_REQ 0 +#define TDLS_SETUP_RESP 1 +#define TDLS_SETUP_CONFIRM 2 +#define TDLS_TEARDOWN 3 +#define TDLS_PEER_TRAFFIC_IND 4 +#define TDLS_CHANNEL_SWITCH_REQ 5 +#define TDLS_CHANNEL_SWITCH_RESP 6 +#define TDLS_PEER_PSM_REQ 7 +#define TDLS_PEER_PSM_RESP 8 +#define TDLS_PEER_TRAFFIC_RESP 9 +#define TDLS_DISCOVERY_REQ 10 + +/* 802.11z TDLS Public Action Frame action field */ +#define TDLS_DISCOVERY_RESP 14 + +/* 802.11u GAS action frames */ +#define GAS_REQUEST_ACTION_FRAME 10 +#define GAS_RESPONSE_ACTION_FRAME 11 +#define GAS_COMEBACK_REQUEST_ACTION_FRAME 12 +#define GAS_COMEBACK_RESPONSE_ACTION_FRAME 13 + +/* FTM - fine timing measurement public action frames */ +BWL_PRE_PACKED_STRUCT struct dot11_ftm_req { + uint8 category; /* category of action frame (4) */ + uint8 action; /* public action (32) */ + uint8 trigger; /* trigger/continue? */ + /* optional lci, civic loc, ftm params */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ftm_req dot11_ftm_req_t; + +BWL_PRE_PACKED_STRUCT struct dot11_ftm { + uint8 category; /* category of action frame (4) */ + uint8 action; /* public action (33) */ + uint8 dialog; /* dialog token */ + uint8 follow_up; /* follow up dialog token */ + uint8 tod[6]; /* t1 - last depart timestamp */ + uint8 toa[6]; /* t4 - last ack arrival timestamp */ + uint8 tod_err[2]; /* t1 error */ + uint8 toa_err[2]; /* t4 error */ + /* optional lci report, civic loc report, ftm params */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ftm dot11_ftm_t; + +#define DOT11_FTM_ERR_NOT_CONT_OFFSET 1 +#define DOT11_FTM_ERR_NOT_CONT_MASK 0x80 +#define DOT11_FTM_ERR_NOT_CONT_SHIFT 7 +#define DOT11_FTM_ERR_NOT_CONT(_err) (((_err)[DOT11_FTM_ERR_NOT_CONT_OFFSET] & \ + DOT11_FTM_ERR_NOT_CONT_MASK) >> DOT11_FTM_ERR_NOT_CONT_SHIFT) +#define DOT11_FTM_ERR_SET_NOT_CONT(_err, _val) do {\ + uint8 _err2 = (_err)[DOT11_FTM_ERR_NOT_CONT_OFFSET]; \ + _err2 &= ~DOT11_FTM_ERR_NOT_CONT_MASK; \ + _err2 |= ((_val) << DOT11_FTM_ERR_NOT_CONT_SHIFT) & DOT11_FTM_ERR_NOT_CONT_MASK; \ + (_err)[DOT11_FTM_ERR_NOT_CONT_OFFSET] = _err2; \ +} while (0) + +#define DOT11_FTM_ERR_MAX_ERR_OFFSET 0 +#define DOT11_FTM_ERR_MAX_ERR_MASK 0x7fff +#define DOT11_FTM_ERR_MAX_ERR_SHIFT 0 +#define DOT11_FTM_ERR_MAX_ERR(_err) (((((_err)[1] & 0x7f) << 8) | (_err)[0])) +#define DOT11_FTM_ERR_SET_MAX_ERR(_err, _val) do {\ + uint16 _val2; \ + uint16 _not_cont; \ + _val2 = (((_val) & DOT11_FTM_ERR_MAX_ERR_MASK) << DOT11_FTM_ERR_MAX_ERR_SHIFT); \ + _val2 = (_val2 > 0x3fff) ? 0 : _val2; /* not expecting > 16ns error */ \ + _not_cont = DOT11_FTM_ERR_NOT_CONT(_err); \ + (_err)[0] = _val2 & 0xff; \ + (_err)[1] = (_val2 >> 8) & 0xff; \ + DOT11_FTM_ERR_SET_NOT_CONT(_err, _not_cont); \ +} while (0) + +#if defined(DOT11_FTM_ERR_ROM_COMPAT) +/* incorrect defs - here for ROM compatibiity */ +#undef DOT11_FTM_ERR_NOT_CONT_OFFSET +#undef DOT11_FTM_ERR_NOT_CONT_MASK +#undef DOT11_FTM_ERR_NOT_CONT_SHIFT +#undef DOT11_FTM_ERR_NOT_CONT +#undef DOT11_FTM_ERR_SET_NOT_CONT + +#define DOT11_FTM_ERR_NOT_CONT_OFFSET 0 +#define DOT11_FTM_ERR_NOT_CONT_MASK 0x0001 +#define DOT11_FTM_ERR_NOT_CONT_SHIFT 0 +#define DOT11_FTM_ERR_NOT_CONT(_err) (((_err)[DOT11_FTM_ERR_NOT_CONT_OFFSET] & \ + DOT11_FTM_ERR_NOT_CONT_MASK) >> DOT11_FTM_ERR_NOT_CONT_SHIFT) +#define DOT11_FTM_ERR_SET_NOT_CONT(_err, _val) do {\ + uint8 _err2 = (_err)[DOT11_FTM_ERR_NOT_CONT_OFFSET]; \ + _err2 &= ~DOT11_FTM_ERR_NOT_CONT_MASK; \ + _err2 |= ((_val) << DOT11_FTM_ERR_NOT_CONT_SHIFT) & DOT11_FTM_ERR_NOT_CONT_MASK; \ + (_err)[DOT11_FTM_ERR_NOT_CONT_OFFSET] = _err2; \ +} while (0) + +#undef DOT11_FTM_ERR_MAX_ERR_OFFSET +#undef DOT11_FTM_ERR_MAX_ERR_MASK +#undef DOT11_FTM_ERR_MAX_ERR_SHIFT +#undef DOT11_FTM_ERR_MAX_ERR +#undef DOT11_FTM_ERR_SET_MAX_ERR + +#define DOT11_FTM_ERR_MAX_ERR_OFFSET 0 +#define DOT11_FTM_ERR_MAX_ERR_MASK 0xfff7 +#define DOT11_FTM_ERR_MAX_ERR_SHIFT 1 +#define DOT11_FTM_ERR_MAX_ERR(_err) ((((_err)[1] << 7) | (_err)[0]) >> 1) +#define DOT11_FTM_ERR_SET_MAX_ERR(_err, _val) do {\ + uint16 _val2; \ + _val2 = (((_val) << DOT11_FTM_ERR_MAX_ERR_SHIFT) |\ + ((_err)[DOT11_FTM_ERR_NOT_CONT_OFFSET] & DOT11_FTM_ERR_NOT_CONT_MASK)); \ + (_err)[0] = _val2 & 0xff; \ + (_err)[1] = _val2 >> 8 & 0xff; \ +} while (0) +#endif /* DOT11_FTM_ERR_ROM_COMPAT */ + +BWL_PRE_PACKED_STRUCT struct dot11_ftm_params { + uint8 id; /* DOT11_MNG_FTM_PARAM_ID 8.4.2.166 11mcd2.6/2014 - revisit */ + uint8 len; + uint8 info[9]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ftm_params dot11_ftm_params_t; +#define DOT11_FTM_PARAMS_IE_LEN (sizeof(dot11_ftm_params_t) - 2) + +#define FTM_PARAMS_FIELD(_p, _off, _mask, _shift) (((_p)->info[(_off)] & (_mask)) >> (_shift)) +#define FTM_PARAMS_SET_FIELD(_p, _off, _mask, _shift, _val) do {\ + uint8 _ptmp = (_p)->info[_off] & ~(_mask); \ + (_p)->info[(_off)] = _ptmp | (((_val) << (_shift)) & (_mask)); \ +} while (0) + +#define FTM_PARAMS_STATUS_OFFSET 0 +#define FTM_PARAMS_STATUS_MASK 0x03 +#define FTM_PARAMS_STATUS_SHIFT 0 +#define FTM_PARAMS_STATUS(_p) FTM_PARAMS_FIELD(_p, FTM_PARAMS_STATUS_OFFSET, \ + FTM_PARAMS_STATUS_MASK, FTM_PARAMS_STATUS_SHIFT) +#define FTM_PARAMS_SET_STATUS(_p, _status) FTM_PARAMS_SET_FIELD(_p, \ + FTM_PARAMS_STATUS_OFFSET, FTM_PARAMS_STATUS_MASK, FTM_PARAMS_STATUS_SHIFT, _status) + +#define FTM_PARAMS_VALUE_OFFSET 0 +#define FTM_PARAMS_VALUE_MASK 0x7c +#define FTM_PARAMS_VALUE_SHIFT 2 +#define FTM_PARAMS_VALUE(_p) FTM_PARAMS_FIELD(_p, FTM_PARAMS_VALUE_OFFSET, \ + FTM_PARAMS_VALUE_MASK, FTM_PARAMS_VALUE_SHIFT) +#define FTM_PARAMS_SET_VALUE(_p, _value) FTM_PARAMS_SET_FIELD(_p, \ + FTM_PARAMS_VALUE_OFFSET, FTM_PARAMS_VALUE_MASK, FTM_PARAMS_VALUE_SHIFT, _value) +#define FTM_PARAMS_MAX_VALUE 32 + +#define FTM_PARAMS_NBURSTEXP_OFFSET 1 +#define FTM_PARAMS_NBURSTEXP_MASK 0x0f +#define FTM_PARAMS_NBURSTEXP_SHIFT 0 +#define FTM_PARAMS_NBURSTEXP(_p) FTM_PARAMS_FIELD(_p, FTM_PARAMS_NBURSTEXP_OFFSET, \ + FTM_PARAMS_NBURSTEXP_MASK, FTM_PARAMS_NBURSTEXP_SHIFT) +#define FTM_PARAMS_SET_NBURSTEXP(_p, _bexp) FTM_PARAMS_SET_FIELD(_p, \ + FTM_PARAMS_NBURSTEXP_OFFSET, FTM_PARAMS_NBURSTEXP_MASK, FTM_PARAMS_NBURSTEXP_SHIFT, \ + _bexp) + +#define FTM_PARAMS_NBURST(_p) (1 << FTM_PARAMS_NBURSTEXP(_p)) + +enum { + FTM_PARAMS_NBURSTEXP_NOPREF = 15 +}; + +enum { + FTM_PARAMS_BURSTTMO_NOPREF = 15 +}; + +#define FTM_PARAMS_BURSTTMO_OFFSET 1 +#define FTM_PARAMS_BURSTTMO_MASK 0xf0 +#define FTM_PARAMS_BURSTTMO_SHIFT 4 +#define FTM_PARAMS_BURSTTMO(_p) FTM_PARAMS_FIELD(_p, FTM_PARAMS_BURSTTMO_OFFSET, \ + FTM_PARAMS_BURSTTMO_MASK, FTM_PARAMS_BURSTTMO_SHIFT) +/* set timeout in params using _tmo where timeout = 2^(_tmo) * 250us */ +#define FTM_PARAMS_SET_BURSTTMO(_p, _tmo) FTM_PARAMS_SET_FIELD(_p, \ + FTM_PARAMS_BURSTTMO_OFFSET, FTM_PARAMS_BURSTTMO_MASK, FTM_PARAMS_BURSTTMO_SHIFT, (_tmo)+2) + +#define FTM_PARAMS_BURSTTMO_USEC(_val) ((1 << ((_val)-2)) * 250) +#define FTM_PARAMS_BURSTTMO_VALID(_val) ((((_val) < 12 && (_val) > 1)) || \ + (_val) == FTM_PARAMS_BURSTTMO_NOPREF) +#define FTM_PARAMS_BURSTTMO_MAX_MSEC 128 /* 2^9 * 250us */ +#define FTM_PARAMS_BURSTTMO_MAX_USEC 128000 /* 2^9 * 250us */ + +#define FTM_PARAMS_MINDELTA_OFFSET 2 +#define FTM_PARAMS_MINDELTA_USEC(_p) ((_p)->info[FTM_PARAMS_MINDELTA_OFFSET] * 100) +#define FTM_PARAMS_SET_MINDELTA_USEC(_p, _delta) do { \ + (_p)->info[FTM_PARAMS_MINDELTA_OFFSET] = (_delta) / 100; \ +} while (0) + +enum { + FTM_PARAMS_MINDELTA_NOPREF = 0 +}; + +#define FTM_PARAMS_PARTIAL_TSF(_p) ((_p)->info[4] << 8 | (_p)->info[3]) +#define FTM_PARAMS_SET_PARTIAL_TSF(_p, _partial_tsf) do { \ + (_p)->info[3] = (_partial_tsf) & 0xff; \ + (_p)->info[4] = ((_partial_tsf) >> 8) & 0xff; \ +} while (0) + +#define FTM_PARAMS_PARTIAL_TSF_MASK 0x0000000003fffc00ULL +#define FTM_PARAMS_PARTIAL_TSF_SHIFT 10 +#define FTM_PARAMS_PARTIAL_TSF_BIT_LEN 16 +#define FTM_PARAMS_PARTIAL_TSF_MAX 0xffff + +/* FTM can indicate upto 62k TUs forward and 1k TU backward */ +#define FTM_PARAMS_TSF_FW_HI (63487 << 10) /* in micro sec */ +#define FTM_PARAMS_TSF_BW_LOW (64512 << 10) /* in micro sec */ +#define FTM_PARAMS_TSF_BW_HI (65535 << 10) /* in micro sec */ +#define FTM_PARAMS_TSF_FW_MAX FTM_PARAMS_TSF_FW_HI +#define FTM_PARAMS_TSF_BW_MAX (FTM_PARAMS_TSF_BW_HI - FTM_PARAMS_TSF_BW_LOW) + +#define FTM_PARAMS_PTSFNOPREF_OFFSET 5 +#define FTM_PARAMS_PTSFNOPREF_MASK 0x1 +#define FTM_PARAMS_PTSFNOPREF_SHIFT 0 +#define FTM_PARAMS_PTSFNOPREF(_p) FTM_PARAMS_FIELD(_p, FTM_PARAMS_PTSFNOPREF_OFFSET, \ + FTM_PARAMS_PTSFNOPREF_MASK, FTM_PARAMS_PTSFNOPREF_SHIFT) +#define FTM_PARAMS_SET_PTSFNOPREF(_p, _nopref) FTM_PARAMS_SET_FIELD(_p, \ + FTM_PARAMS_PTSFNOPREF_OFFSET, FTM_PARAMS_PTSFNOPREF_MASK, \ + FTM_PARAMS_PTSFNOPREF_SHIFT, _nopref) + +#define FTM_PARAMS_ASAP_OFFSET 5 +#define FTM_PARAMS_ASAP_MASK 0x4 +#define FTM_PARAMS_ASAP_SHIFT 2 +#define FTM_PARAMS_ASAP(_p) FTM_PARAMS_FIELD(_p, FTM_PARAMS_ASAP_OFFSET, \ + FTM_PARAMS_ASAP_MASK, FTM_PARAMS_ASAP_SHIFT) +#define FTM_PARAMS_SET_ASAP(_p, _asap) FTM_PARAMS_SET_FIELD(_p, \ + FTM_PARAMS_ASAP_OFFSET, FTM_PARAMS_ASAP_MASK, FTM_PARAMS_ASAP_SHIFT, _asap) + +/* FTM1 - AKA ASAP Capable */ +#define FTM_PARAMS_FTM1_OFFSET 5 +#define FTM_PARAMS_FTM1_MASK 0x02 +#define FTM_PARAMS_FTM1_SHIFT 1 +#define FTM_PARAMS_FTM1(_p) FTM_PARAMS_FIELD(_p, FTM_PARAMS_FTM1_OFFSET, \ + FTM_PARAMS_FTM1_MASK, FTM_PARAMS_FTM1_SHIFT) +#define FTM_PARAMS_SET_FTM1(_p, _ftm1) FTM_PARAMS_SET_FIELD(_p, \ + FTM_PARAMS_FTM1_OFFSET, FTM_PARAMS_FTM1_MASK, FTM_PARAMS_FTM1_SHIFT, _ftm1) + +#define FTM_PARAMS_FTMS_PER_BURST_OFFSET 5 +#define FTM_PARAMS_FTMS_PER_BURST_MASK 0xf8 +#define FTM_PARAMS_FTMS_PER_BURST_SHIFT 3 +#define FTM_PARAMS_FTMS_PER_BURST(_p) FTM_PARAMS_FIELD(_p, FTM_PARAMS_FTMS_PER_BURST_OFFSET, \ + FTM_PARAMS_FTMS_PER_BURST_MASK, FTM_PARAMS_FTMS_PER_BURST_SHIFT) +#define FTM_PARAMS_SET_FTMS_PER_BURST(_p, _nftms) FTM_PARAMS_SET_FIELD(_p, \ + FTM_PARAMS_FTMS_PER_BURST_OFFSET, FTM_PARAMS_FTMS_PER_BURST_MASK, \ + FTM_PARAMS_FTMS_PER_BURST_SHIFT, _nftms) + +enum { + FTM_PARAMS_FTMS_PER_BURST_NOPREF = 0 +}; + +#define FTM_PARAMS_CHAN_INFO_OFFSET 6 +#define FTM_PARAMS_CHAN_INFO_MASK 0xfc +#define FTM_PARAMS_CHAN_INFO_SHIFT 2 +#define FTM_PARAMS_CHAN_INFO(_p) FTM_PARAMS_FIELD(_p, FTM_PARAMS_CHAN_INFO_OFFSET, \ + FTM_PARAMS_CHAN_INFO_MASK, FTM_PARAMS_CHAN_INFO_SHIFT) +#define FTM_PARAMS_SET_CHAN_INFO(_p, _ci) FTM_PARAMS_SET_FIELD(_p, \ + FTM_PARAMS_CHAN_INFO_OFFSET, FTM_PARAMS_CHAN_INFO_MASK, FTM_PARAMS_CHAN_INFO_SHIFT, _ci) + +/* burst period - units of 100ms */ +#define FTM_PARAMS_BURST_PERIOD(_p) (((_p)->info[8] << 8) | (_p)->info[7]) +#define FTM_PARAMS_SET_BURST_PERIOD(_p, _bp) do {\ + (_p)->info[7] = (_bp) & 0xff; \ + (_p)->info[8] = ((_bp) >> 8) & 0xff; \ +} while (0) + +#define FTM_PARAMS_BURST_PERIOD_MS(_p) (FTM_PARAMS_BURST_PERIOD(_p) * 100) + +enum { + FTM_PARAMS_BURST_PERIOD_NOPREF = 0 +}; + +/* FTM status values - last updated from 11mcD4.0 */ +enum { + FTM_PARAMS_STATUS_RESERVED = 0, + FTM_PARAMS_STATUS_SUCCESSFUL = 1, + FTM_PARAMS_STATUS_INCAPABLE = 2, + FTM_PARAMS_STATUS_FAILED = 3, + /* Below are obsolte */ + FTM_PARAMS_STATUS_OVERRIDDEN = 4, + FTM_PARAMS_STATUS_ASAP_INCAPABLE = 5, + FTM_PARAMS_STATUS_ASAP_FAILED = 6, + /* rest are reserved */ +}; + +enum { + FTM_PARAMS_CHAN_INFO_NO_PREF = 0, + FTM_PARAMS_CHAN_INFO_RESERVE1 = 1, + FTM_PARAMS_CHAN_INFO_RESERVE2 = 2, + FTM_PARAMS_CHAN_INFO_RESERVE3 = 3, + FTM_PARAMS_CHAN_INFO_NON_HT_5 = 4, + FTM_PARAMS_CHAN_INFO_RESERVE5 = 5, + FTM_PARAMS_CHAN_INFO_NON_HT_10 = 6, + FTM_PARAMS_CHAN_INFO_RESERVE7 = 7, + FTM_PARAMS_CHAN_INFO_NON_HT_20 = 8, /* excludes 2.4G, and High rate DSSS */ + FTM_PARAMS_CHAN_INFO_HT_MF_20 = 9, + FTM_PARAMS_CHAN_INFO_VHT_20 = 10, + FTM_PARAMS_CHAN_INFO_HT_MF_40 = 11, + FTM_PARAMS_CHAN_INFO_VHT_40 = 12, + FTM_PARAMS_CHAN_INFO_VHT_80 = 13, + FTM_PARAMS_CHAN_INFO_VHT_80_80 = 14, + FTM_PARAMS_CHAN_INFO_VHT_160_2_RFLOS = 15, + FTM_PARAMS_CHAN_INFO_VHT_160 = 16, + /* Reserved from 17 - 30 */ + FTM_PARAMS_CHAN_INFO_DMG_2160 = 31, + /* Reserved from 32 - 63 */ + FTM_PARAMS_CHAN_INFO_MAX = 63 +}; + +/* tag_ID/length/value_buffer tuple */ +typedef BWL_PRE_PACKED_STRUCT struct { + uint8 id; + uint8 len; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT ftm_vs_tlv_t; + +BWL_PRE_PACKED_STRUCT struct dot11_ftm_vs_ie { + uint8 id; /* DOT11_MNG_VS_ID */ + uint8 len; /* length following */ + uint8 oui[3]; /* BRCM_PROP_OUI (or Customer) */ + uint8 sub_type; /* BRCM_FTM_IE_TYPE (or Customer) */ + uint8 version; + ftm_vs_tlv_t tlvs[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ftm_vs_ie dot11_ftm_vs_ie_t; + +/* ftm vs api version */ +#define BCM_FTM_VS_PARAMS_VERSION 0x01 + +/* ftm vendor specific information tlv types */ +enum { + FTM_VS_TLV_NONE = 0, + FTM_VS_TLV_REQ_PARAMS = 1, /* additional request params (in FTM_REQ) */ + FTM_VS_TLV_MEAS_INFO = 2, /* measurement information (in FTM_MEAS) */ + FTM_VS_TLV_SEC_PARAMS = 3, /* security parameters (in either) */ + FTM_VS_TLV_SEQ_PARAMS = 4, /* toast parameters (FTM_REQ, BRCM proprietary) */ + FTM_VS_TLV_MF_BUF = 5, /* multi frame buffer - may span ftm vs ie's */ + FTM_VS_TLV_TIMING_PARAMS = 6, /* timing adjustments */ + FTM_VS_TLV_MF_STATS_BUF = 7 /* multi frame statistics buffer */ + /* add additional types above */ +}; + +/* the following definitions are *DEPRECATED* and moved to implemenetion files. They + * are retained here because previous (May 2016) some branches use them + */ +#define FTM_TPK_LEN 16 +#define FTM_RI_RR_BUF_LEN 32 +#define FTM_TPK_RI_RR_LEN 13 +#define FTM_TPK_RI_RR_LEN_SECURE_2_0 28 +#define FTM_TPK_DIGEST_LEN 32 +#define FTM_TPK_BUFFER_LEN 128 +#define FTM_TPK_RI_PHY_LEN 7 +#define FTM_TPK_RR_PHY_LEN 7 +#define FTM_TPK_DATA_BUFFER_LEN 88 +#define FTM_TPK_LEN_SECURE_2_0 32 +#define FTM_TPK_RI_PHY_LEN_SECURE_2_0 14 +#define FTM_TPK_RR_PHY_LEN_SECURE_2_0 14 + +BWL_PRE_PACKED_STRUCT struct dot11_ftm_vs_params { + uint8 id; /* DOT11_MNG_VS_ID */ + uint8 len; + uint8 oui[3]; /* Proprietary OUI, BRCM_PROP_OUI */ + uint8 bcm_vs_id; + ftm_vs_tlv_t ftm_tpk_ri_rr[1]; /* ftm_TPK_ri_rr place holder */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ftm_vs_params dot11_ftm_vs_tpk_ri_rr_params_t; +#define DOT11_FTM_VS_LEN (sizeof(dot11_ftm_vs_tpk_ri_rr_params_t) - TLV_HDR_LEN) +/* end *DEPRECATED* ftm definitions */ + +BWL_PRE_PACKED_STRUCT struct dot11_ftm_sync_info { + uint8 id; /* Extended - 255 11mc D4.3 */ + uint8 len; + uint8 id_ext; + uint8 tsf_sync_info[4]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ftm_sync_info dot11_ftm_sync_info_t; + +/* ftm tsf sync info ie len - includes id ext */ +#define DOT11_FTM_SYNC_INFO_IE_LEN (sizeof(dot11_ftm_sync_info_t) - TLV_HDR_LEN) + +#define DOT11_FTM_IS_SYNC_INFO_IE(_ie) (\ + DOT11_MNG_IE_ID_EXT_MATCH(_ie, DOT11_MNG_FTM_SYNC_INFO) && \ + (_ie)->len == DOT11_FTM_SYNC_INFO_IE_LEN) + +BWL_PRE_PACKED_STRUCT struct dot11_dh_param_ie { + uint8 id; /* OWE */ + uint8 len; + uint8 ext_id; /* EXT_MNG_OWE_DH_PARAM_ID */ + uint16 group; + uint8 pub_key[0]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_dh_param_ie dot11_dh_param_ie_t; + +#define DOT11_DH_EXTID_OFFSET (OFFSETOF(dot11_dh_param_ie_t, ext_id)) + +#define DOT11_OWE_DH_PARAM_IE(_ie) (\ + DOT11_MNG_IE_ID_EXT_MATCH(_ie, EXT_MNG_OWE_DH_PARAM_ID)) + +#define DOT11_MNG_OWE_IE_ID_EXT_INIT(_ie, _id, _len) do {\ + (_ie)->id = DOT11_MNG_ID_EXT_ID; \ + (_ie)->len = _len; \ + (_ie)->ext_id = _id; \ +} while (0) + +/* 802.11u interworking access network options */ +#define IW_ANT_MASK 0x0f +#define IW_INTERNET_MASK 0x10 +#define IW_ASRA_MASK 0x20 +#define IW_ESR_MASK 0x40 +#define IW_UESA_MASK 0x80 + +/* 802.11u interworking access network type */ +#define IW_ANT_PRIVATE_NETWORK 0 +#define IW_ANT_PRIVATE_NETWORK_WITH_GUEST 1 +#define IW_ANT_CHARGEABLE_PUBLIC_NETWORK 2 +#define IW_ANT_FREE_PUBLIC_NETWORK 3 +#define IW_ANT_PERSONAL_DEVICE_NETWORK 4 +#define IW_ANT_EMERGENCY_SERVICES_NETWORK 5 +#define IW_ANT_TEST_NETWORK 14 +#define IW_ANT_WILDCARD_NETWORK 15 + +#define IW_ANT_LEN 1 +#define IW_VENUE_LEN 2 +#define IW_HESSID_LEN 6 +#define IW_HESSID_OFF (IW_ANT_LEN + IW_VENUE_LEN) +#define IW_MAX_LEN (IW_ANT_LEN + IW_VENUE_LEN + IW_HESSID_LEN) + +/* 802.11u advertisement protocol */ +#define ADVP_ANQP_PROTOCOL_ID 0 +#define ADVP_MIH_PROTOCOL_ID 1 + +/* 802.11u advertisement protocol masks */ +#define ADVP_QRL_MASK 0x7f +#define ADVP_PAME_BI_MASK 0x80 + +/* 802.11u advertisement protocol values */ +#define ADVP_QRL_REQUEST 0x00 +#define ADVP_QRL_RESPONSE 0x7f +#define ADVP_PAME_BI_DEPENDENT 0x00 +#define ADVP_PAME_BI_INDEPENDENT ADVP_PAME_BI_MASK + +/* 802.11u ANQP information ID */ +#define ANQP_ID_QUERY_LIST 256 +#define ANQP_ID_CAPABILITY_LIST 257 +#define ANQP_ID_VENUE_NAME_INFO 258 +#define ANQP_ID_EMERGENCY_CALL_NUMBER_INFO 259 +#define ANQP_ID_NETWORK_AUTHENTICATION_TYPE_INFO 260 +#define ANQP_ID_ROAMING_CONSORTIUM_LIST 261 +#define ANQP_ID_IP_ADDRESS_TYPE_AVAILABILITY_INFO 262 +#define ANQP_ID_NAI_REALM_LIST 263 +#define ANQP_ID_G3PP_CELLULAR_NETWORK_INFO 264 +#define ANQP_ID_AP_GEOSPATIAL_LOCATION 265 +#define ANQP_ID_AP_CIVIC_LOCATION 266 +#define ANQP_ID_AP_LOCATION_PUBLIC_ID_URI 267 +#define ANQP_ID_DOMAIN_NAME_LIST 268 +#define ANQP_ID_EMERGENCY_ALERT_ID_URI 269 +#define ANQP_ID_EMERGENCY_NAI 271 +#define ANQP_ID_NEIGHBOR_REPORT 272 +#define ANQP_ID_VENDOR_SPECIFIC_LIST 56797 + +/* 802.11u ANQP ID len */ +#define ANQP_INFORMATION_ID_LEN 2 + +/* 802.11u ANQP OUI */ +#define ANQP_OUI_SUBTYPE 9 + +/* 802.11u venue name */ +#define VENUE_LANGUAGE_CODE_SIZE 3 +#define VENUE_NAME_SIZE 255 + +/* 802.11u venue groups */ +#define VENUE_UNSPECIFIED 0 +#define VENUE_ASSEMBLY 1 +#define VENUE_BUSINESS 2 +#define VENUE_EDUCATIONAL 3 +#define VENUE_FACTORY 4 +#define VENUE_INSTITUTIONAL 5 +#define VENUE_MERCANTILE 6 +#define VENUE_RESIDENTIAL 7 +#define VENUE_STORAGE 8 +#define VENUE_UTILITY 9 +#define VENUE_VEHICULAR 10 +#define VENUE_OUTDOOR 11 + +/* 802.11u network authentication type indicator */ +#define NATI_UNSPECIFIED -1 +#define NATI_ACCEPTANCE_OF_TERMS_CONDITIONS 0 +#define NATI_ONLINE_ENROLLMENT_SUPPORTED 1 +#define NATI_HTTP_HTTPS_REDIRECTION 2 +#define NATI_DNS_REDIRECTION 3 + +/* 802.11u IP address type availability - IPv6 */ +#define IPA_IPV6_SHIFT 0 +#define IPA_IPV6_MASK (0x03 << IPA_IPV6_SHIFT) +#define IPA_IPV6_NOT_AVAILABLE 0x00 +#define IPA_IPV6_AVAILABLE 0x01 +#define IPA_IPV6_UNKNOWN_AVAILABILITY 0x02 + +/* 802.11u IP address type availability - IPv4 */ +#define IPA_IPV4_SHIFT 2 +#define IPA_IPV4_MASK (0x3f << IPA_IPV4_SHIFT) +#define IPA_IPV4_NOT_AVAILABLE 0x00 +#define IPA_IPV4_PUBLIC 0x01 +#define IPA_IPV4_PORT_RESTRICT 0x02 +#define IPA_IPV4_SINGLE_NAT 0x03 +#define IPA_IPV4_DOUBLE_NAT 0x04 +#define IPA_IPV4_PORT_RESTRICT_SINGLE_NAT 0x05 +#define IPA_IPV4_PORT_RESTRICT_DOUBLE_NAT 0x06 +#define IPA_IPV4_UNKNOWN_AVAILABILITY 0x07 + +/* 802.11u NAI realm encoding */ +#define REALM_ENCODING_RFC4282 0 +#define REALM_ENCODING_UTF8 1 + +/* 802.11u IANA EAP method type numbers */ +#define REALM_EAP_TLS 13 +#define REALM_EAP_LEAP 17 +#define REALM_EAP_SIM 18 +#define REALM_EAP_TTLS 21 +#define REALM_EAP_AKA 23 +#define REALM_EAP_PEAP 25 +#define REALM_EAP_FAST 43 +#define REALM_EAP_PSK 47 +#define REALM_EAP_AKAP 50 +#define REALM_EAP_EXPANDED 254 + +/* 802.11u authentication ID */ +#define REALM_EXPANDED_EAP 1 +#define REALM_NON_EAP_INNER_AUTHENTICATION 2 +#define REALM_INNER_AUTHENTICATION_EAP 3 +#define REALM_EXPANDED_INNER_EAP 4 +#define REALM_CREDENTIAL 5 +#define REALM_TUNNELED_EAP_CREDENTIAL 6 +#define REALM_VENDOR_SPECIFIC_EAP 221 + +/* 802.11u non-EAP inner authentication type */ +#define REALM_RESERVED_AUTH 0 +#define REALM_PAP 1 +#define REALM_CHAP 2 +#define REALM_MSCHAP 3 +#define REALM_MSCHAPV2 4 + +/* 802.11u credential type */ +#define REALM_SIM 1 +#define REALM_USIM 2 +#define REALM_NFC 3 +#define REALM_HARDWARE_TOKEN 4 +#define REALM_SOFTOKEN 5 +#define REALM_CERTIFICATE 6 +#define REALM_USERNAME_PASSWORD 7 +#define REALM_SERVER_SIDE 8 +#define REALM_RESERVED_CRED 9 +#define REALM_VENDOR_SPECIFIC_CRED 10 + +/* 802.11u 3GPP PLMN */ +#define G3PP_GUD_VERSION 0 +#define G3PP_PLMN_LIST_IE 0 + +/* AP Location Public ID Info encoding */ +#define PUBLIC_ID_URI_FQDN_SE_ID 0 +/* URI/FQDN Descriptor field values */ +#define LOCATION_ENCODING_HELD 1 +#define LOCATION_ENCODING_SUPL 2 +#define URI_FQDN_SIZE 255 + +/** hotspot2.0 indication element (vendor specific) */ +BWL_PRE_PACKED_STRUCT struct hs20_ie { + uint8 oui[3]; + uint8 type; + uint8 config; +} BWL_POST_PACKED_STRUCT; +typedef struct hs20_ie hs20_ie_t; +#define HS20_IE_LEN 5 /* HS20 IE length */ + +/** IEEE 802.11 Annex E */ +typedef enum { + DOT11_2GHZ_20MHZ_CLASS_12 = 81, /* Ch 1-11 */ + DOT11_5GHZ_20MHZ_CLASS_1 = 115, /* Ch 36-48 */ + DOT11_5GHZ_20MHZ_CLASS_2_DFS = 118, /* Ch 52-64 */ + DOT11_5GHZ_20MHZ_CLASS_3 = 124, /* Ch 149-161 */ + DOT11_5GHZ_20MHZ_CLASS_4_DFS = 121, /* Ch 100-140 */ + DOT11_5GHZ_20MHZ_CLASS_5 = 125, /* Ch 149-165 */ + DOT11_5GHZ_40MHZ_CLASS_22 = 116, /* Ch 36-44, lower */ + DOT11_5GHZ_40MHZ_CLASS_23_DFS = 119, /* Ch 52-60, lower */ + DOT11_5GHZ_40MHZ_CLASS_24_DFS = 122, /* Ch 100-132, lower */ + DOT11_5GHZ_40MHZ_CLASS_25 = 126, /* Ch 149-157, lower */ + DOT11_5GHZ_40MHZ_CLASS_27 = 117, /* Ch 40-48, upper */ + DOT11_5GHZ_40MHZ_CLASS_28_DFS = 120, /* Ch 56-64, upper */ + DOT11_5GHZ_40MHZ_CLASS_29_DFS = 123, /* Ch 104-136, upper */ + DOT11_5GHZ_40MHZ_CLASS_30 = 127, /* Ch 153-161, upper */ + DOT11_2GHZ_40MHZ_CLASS_32 = 83, /* Ch 1-7, lower */ + DOT11_2GHZ_40MHZ_CLASS_33 = 84, /* Ch 5-11, upper */ +} dot11_op_class_t; + +/* QoS map */ +#define QOS_MAP_FIXED_LENGTH (8 * 2) /* DSCP ranges fixed with 8 entries */ + +#define BCM_AIBSS_IE_TYPE 56 + +/* This marks the end of a packed structure section. */ +#include + +#endif /* _802_11_H_ */ diff --git a/bcmdhd.100.10.315.x/include/802.11e.h b/bcmdhd.100.10.315.x/include/802.11e.h new file mode 100644 index 0000000..73dec3c --- /dev/null +++ b/bcmdhd.100.10.315.x/include/802.11e.h @@ -0,0 +1,139 @@ +/* + * 802.11e protocol header file + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: 802.11e.h 700076 2017-05-17 14:42:22Z $ + */ + +#ifndef _802_11e_H_ +#define _802_11e_H_ + +#ifndef _TYPEDEFS_H_ +#include +#endif // endif + +/* This marks the start of a packed structure section. */ +#include + +/* WME Traffic Specification (TSPEC) element */ +#define WME_TSPEC_HDR_LEN 2 /* WME TSPEC header length */ +#define WME_TSPEC_BODY_OFF 2 /* WME TSPEC body offset */ + +#define WME_CATEGORY_CODE_OFFSET 0 /* WME Category code offset */ +#define WME_ACTION_CODE_OFFSET 1 /* WME Action code offset */ +#define WME_TOKEN_CODE_OFFSET 2 /* WME Token code offset */ +#define WME_STATUS_CODE_OFFSET 3 /* WME Status code offset */ + +BWL_PRE_PACKED_STRUCT struct tsinfo { + uint8 octets[3]; +} BWL_POST_PACKED_STRUCT; + +typedef struct tsinfo tsinfo_t; + +/* 802.11e TSPEC IE */ +typedef BWL_PRE_PACKED_STRUCT struct tspec { + uint8 oui[DOT11_OUI_LEN]; /* WME_OUI */ + uint8 type; /* WME_TYPE */ + uint8 subtype; /* WME_SUBTYPE_TSPEC */ + uint8 version; /* WME_VERSION */ + tsinfo_t tsinfo; /* TS Info bit field */ + uint16 nom_msdu_size; /* (Nominal or fixed) MSDU Size (bytes) */ + uint16 max_msdu_size; /* Maximum MSDU Size (bytes) */ + uint32 min_srv_interval; /* Minimum Service Interval (us) */ + uint32 max_srv_interval; /* Maximum Service Interval (us) */ + uint32 inactivity_interval; /* Inactivity Interval (us) */ + uint32 suspension_interval; /* Suspension Interval (us) */ + uint32 srv_start_time; /* Service Start Time (us) */ + uint32 min_data_rate; /* Minimum Data Rate (bps) */ + uint32 mean_data_rate; /* Mean Data Rate (bps) */ + uint32 peak_data_rate; /* Peak Data Rate (bps) */ + uint32 max_burst_size; /* Maximum Burst Size (bytes) */ + uint32 delay_bound; /* Delay Bound (us) */ + uint32 min_phy_rate; /* Minimum PHY Rate (bps) */ + uint16 surplus_bw; /* Surplus Bandwidth Allowance (range 1.0-8.0) */ + uint16 medium_time; /* Medium Time (32 us/s periods) */ +} BWL_POST_PACKED_STRUCT tspec_t; + +#define WME_TSPEC_LEN (sizeof(tspec_t)) /* not including 2-bytes of header */ + +/* ts_info */ +/* 802.1D priority is duplicated - bits 13-11 AND bits 3-1 */ +#define TS_INFO_TID_SHIFT 1 /* TS info. TID shift */ +#define TS_INFO_TID_MASK (0xf << TS_INFO_TID_SHIFT) /* TS info. TID mask */ +#define TS_INFO_CONTENTION_SHIFT 7 /* TS info. contention shift */ +#define TS_INFO_CONTENTION_MASK (0x1 << TS_INFO_CONTENTION_SHIFT) /* TS info. contention mask */ +#define TS_INFO_DIRECTION_SHIFT 5 /* TS info. direction shift */ +#define TS_INFO_DIRECTION_MASK (0x3 << TS_INFO_DIRECTION_SHIFT) /* TS info. direction mask */ +#define TS_INFO_PSB_SHIFT 2 /* TS info. PSB bit Shift */ +#define TS_INFO_PSB_MASK (1 << TS_INFO_PSB_SHIFT) /* TS info. PSB mask */ +#define TS_INFO_UPLINK (0 << TS_INFO_DIRECTION_SHIFT) /* TS info. uplink */ +#define TS_INFO_DOWNLINK (1 << TS_INFO_DIRECTION_SHIFT) /* TS info. downlink */ +#define TS_INFO_BIDIRECTIONAL (3 << TS_INFO_DIRECTION_SHIFT) /* TS info. bidirectional */ +#define TS_INFO_USER_PRIO_SHIFT 3 /* TS info. user priority shift */ +/* TS info. user priority mask */ +#define TS_INFO_USER_PRIO_MASK (0x7 << TS_INFO_USER_PRIO_SHIFT) + +/* Macro to get/set bit(s) field in TSINFO */ +#define WLC_CAC_GET_TID(pt) ((((pt).octets[0]) & TS_INFO_TID_MASK) >> TS_INFO_TID_SHIFT) +#define WLC_CAC_GET_DIR(pt) ((((pt).octets[0]) & \ + TS_INFO_DIRECTION_MASK) >> TS_INFO_DIRECTION_SHIFT) +#define WLC_CAC_GET_PSB(pt) ((((pt).octets[1]) & TS_INFO_PSB_MASK) >> TS_INFO_PSB_SHIFT) +#define WLC_CAC_GET_USER_PRIO(pt) ((((pt).octets[1]) & \ + TS_INFO_USER_PRIO_MASK) >> TS_INFO_USER_PRIO_SHIFT) + +#define WLC_CAC_SET_TID(pt, id) ((((pt).octets[0]) & (~TS_INFO_TID_MASK)) | \ + ((id) << TS_INFO_TID_SHIFT)) +#define WLC_CAC_SET_USER_PRIO(pt, prio) ((((pt).octets[0]) & (~TS_INFO_USER_PRIO_MASK)) | \ + ((prio) << TS_INFO_USER_PRIO_SHIFT)) + +/* 802.11e QBSS Load IE */ +#define QBSS_LOAD_IE_LEN 5 /* QBSS Load IE length */ +#define QBSS_LOAD_AAC_OFF 3 /* AAC offset in IE */ + +#define CAC_ADDTS_RESP_TIMEOUT 1000 /* default ADDTS response timeout in ms */ + /* DEFVAL dot11ADDTSResponseTimeout = 1s */ + +/* 802.11e ADDTS status code */ +#define DOT11E_STATUS_ADMISSION_ACCEPTED 0 /* TSPEC Admission accepted status */ +#define DOT11E_STATUS_ADDTS_INVALID_PARAM 1 /* TSPEC invalid parameter status */ +#define DOT11E_STATUS_ADDTS_REFUSED_NSBW 3 /* ADDTS refused (non-sufficient BW) */ +#define DOT11E_STATUS_ADDTS_REFUSED_AWHILE 47 /* ADDTS refused but could retry later */ +#ifdef BCMCCX +#define CCX_STATUS_ASSOC_DENIED_UNKNOWN 0xc8 /* unspecified QoS related failure */ +#define CCX_STATUS_ASSOC_DENIED_AP_POLICY 0xc9 /* TSPEC refused due to AP policy */ +#define CCX_STATUS_ASSOC_DENIED_NO_BW 0xca /* Assoc denied due to AP insufficient BW */ +#define CCX_STATUS_ASSOC_DENIED_BAD_PARAM 0xcb /* one or more TSPEC with invalid parameter */ +#endif /* BCMCCX */ + +/* 802.11e DELTS status code */ +#define DOT11E_STATUS_QSTA_LEAVE_QBSS 36 /* STA leave QBSS */ +#define DOT11E_STATUS_END_TS 37 /* END TS */ +#define DOT11E_STATUS_UNKNOWN_TS 38 /* UNKNOWN TS */ +#define DOT11E_STATUS_QSTA_REQ_TIMEOUT 39 /* STA ADDTS request timeout */ + +/* This marks the end of a packed structure section. */ +#include + +#endif /* _802_11e_CAC_H_ */ diff --git a/bcmdhd.100.10.315.x/include/802.11s.h b/bcmdhd.100.10.315.x/include/802.11s.h new file mode 100644 index 0000000..96121d8 --- /dev/null +++ b/bcmdhd.100.10.315.x/include/802.11s.h @@ -0,0 +1,334 @@ +/* + * Fundamental types and constants relating to 802.11s Mesh + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: 802.11s.h 700076 2017-05-17 14:42:22Z $ + */ + +#ifndef _802_11s_h_ +#define _802_11s_h_ + +/* This marks the start of a packed structure section. */ +#include + +#define DOT11_MESH_FLAGS_AE_MASK 0x3 +#define DOT11_MESH_FLAGS_AE_SHIFT 0 + +#define DOT11_MESH_CONNECTED_AS_SET 7 +#define DOT11_MESH_NUMBER_PEERING_SET 1 +#define DOT11_MESH_MESH_GWSET 0 + +#define DOT11_MESH_ACTION_LINK_MET_REP 0 +#define DOT11_MESH_ACTION_PATH_SEL 1 +#define DOT11_MESH_ACTION_GATE_ANN 2 +#define DOT11_MESH_ACTION_CONG_CONT_NOTIF 3 +#define DOT11_MESH_ACTION_MCCA_SETUP_REQ 4 +#define DOT11_MESH_ACTION_MCCA_SETUP_REP 5 +#define DOT11_MESH_ACTION_MCCA_ADVT_REQ 6 +#define DOT11_MESH_ACTION_MCCA_ADVT 7 +#define DOT11_MESH_ACTION_MCCA_TEARDOWN 8 +#define DOT11_MESH_ACTION_TBTT_ADJ_REQ 9 +#define DOT11_MESH_ACTION_TBTT_ADJ_RESP 10 + +/* self-protected action field values: 7-57v24 */ +#define DOT11_SELFPROT_ACTION_MESH_PEER_OPEN 1 +#define DOT11_SELFPROT_ACTION_MESH_PEER_CONFM 2 +#define DOT11_SELFPROT_ACTION_MESH_PEER_CLOSE 3 +#define DOT11_SELFPROT_ACTION_MESH_PEER_GK_INF 4 +#define DOT11_SELFPROT_ACTION_MESH_PEER_GK_ACK 5 + +#define DOT11_MESH_AUTH_PROTO_NONE 0 +#define DOT11_MESH_AUTH_PROTO_SAE 1 +#define DOT11_MESH_AUTH_PROTO_8021X 2 +#define DOT11_MESH_AUTH_PROTO_VS 255 + +#define DOT11_MESH_PATHSEL_LEN 2 +#define DOT11_MESH_PERR_LEN1 2 /* Least PERR length fixed */ +#define DOT11_MESH_PERR_LEN2 13 /* Least PERR length variable */ +#define DOT11_MESH_PREP_LEN 31 /* Least PREP length */ +#define DOT11_MESH_PREQ_LEN 37 /* Least PREQ length */ + +#define DOT11_MESH_PATHSEL_PROTID_HWMP 1 +#define DOT11_MESH_PATHSEL_METRICID_ALM 1 /* Air link metric */ +#define DOT11_MESH_CONGESTCTRL_NONE 0 +#define DOT11_MESH_CONGESTCTRL_SP 1 +#define DOT11_MESH_SYNCMETHOD_NOFFSET 1 + +BWL_PRE_PACKED_STRUCT struct dot11_meshctrl_hdr { + uint8 flags; /* flag bits such as ae etc */ + uint8 ttl; /* time to live */ + uint32 seq; /* sequence control */ + struct ether_addr a5; /* optional address 5 */ + struct ether_addr a6; /* optional address 6 */ +} BWL_POST_PACKED_STRUCT; + +/* Mesh Path Selection Action Frame */ +BWL_PRE_PACKED_STRUCT struct dot11_mesh_pathsel { + uint8 category; + uint8 meshaction; + uint8 data[]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_mesh_pathsel dot11_mesh_pathsel_t; + +/* Mesh PREQ IE */ +BWL_PRE_PACKED_STRUCT struct mesh_preq_ie { + uint8 id; + uint8 len; + uint8 flags; + uint8 hop_count; + uint8 ttl; + uint32 pathdis_id; + struct ether_addr originator_addr; + uint32 originator_seq; + union { + BWL_PRE_PACKED_STRUCT struct { + struct ether_addr target_ext_add; + uint32 lifetime; + uint32 metric; + uint8 target_count; + uint8 data[]; + } BWL_POST_PACKED_STRUCT oea; + + BWL_PRE_PACKED_STRUCT struct { + uint32 lifetime; + uint32 metric; + uint8 target_count; + uint8 data[]; + } BWL_POST_PACKED_STRUCT noea; + } u; +} BWL_POST_PACKED_STRUCT; +typedef struct mesh_preq_ie mesh_preq_ie_t; + +/* Target info (part of Mesh PREQ IE) */ +BWL_PRE_PACKED_STRUCT struct mesh_targetinfo { + uint8 target_flag; + struct ether_addr target_addr; + uint32 target_seq; +} BWL_POST_PACKED_STRUCT; +typedef struct mesh_targetinfo mesh_targetinfo_t; + +/* Mesh PREP IE */ +BWL_PRE_PACKED_STRUCT struct mesh_prep_ie { + uint8 id; + uint8 len; + uint8 flags; + uint8 hop_count; + uint8 ttl; + struct ether_addr target_addr; + uint32 target_seq; + union { + BWL_PRE_PACKED_STRUCT struct { + struct ether_addr target_ext_add; + uint32 lifetime; + uint32 metric; + uint8 target_count; + struct ether_addr originator_addr; + uint32 originator_seq; + } BWL_POST_PACKED_STRUCT oea; + + BWL_PRE_PACKED_STRUCT struct { + uint32 lifetime; + uint32 metric; + uint8 target_count; + struct ether_addr originator_addr; + uint32 originator_seq; + } BWL_POST_PACKED_STRUCT noea; + } u; +} BWL_POST_PACKED_STRUCT; +typedef struct mesh_prep_ie mesh_prep_ie_t; + +/* Mesh PERR IE */ +struct mesh_perr_ie { + uint8 id; + uint8 len; + uint8 ttl; + uint8 num_dest; + uint8 data[]; +}; +typedef struct mesh_perr_ie mesh_perr_ie_t; + +/* Destination info is part of PERR IE */ +BWL_PRE_PACKED_STRUCT struct mesh_perr_destinfo { + uint8 flags; + struct ether_addr destination_addr; + uint32 dest_seq; + union { + BWL_PRE_PACKED_STRUCT struct { + struct ether_addr dest_ext_addr; + } BWL_POST_PACKED_STRUCT dea; + + BWL_PRE_PACKED_STRUCT struct { + /* 1 byte reason code to be populated manually in software */ + uint16 reason_code; + } BWL_POST_PACKED_STRUCT nodea; + } u; +} BWL_POST_PACKED_STRUCT; +typedef struct mesh_perr_destinfo mesh_perr_destinfo_t; + +/* Mesh peering action frame hdr */ +BWL_PRE_PACKED_STRUCT struct mesh_peering_frmhdr { + uint8 category; + uint8 action; + union { + struct { + uint16 capability; + } open; + struct { + uint16 capability; + uint16 AID; + } confirm; + uint8 data[1]; + } u; +} BWL_POST_PACKED_STRUCT; +typedef struct mesh_peering_frmhdr mesh_peering_frmhdr_t; + +/* Mesh peering mgmt IE */ +BWL_PRE_PACKED_STRUCT struct mesh_peer_mgmt_ie_common { + uint16 mesh_peer_prot_id; + uint16 local_link_id; +} BWL_POST_PACKED_STRUCT; +typedef struct mesh_peer_mgmt_ie_common mesh_peer_mgmt_ie_common_t; +#define MESH_PEER_MGMT_IE_OPEN_LEN (4) + +BWL_PRE_PACKED_STRUCT struct mesh_peer_mgmt_ie_cfm { + mesh_peer_mgmt_ie_common_t common; + uint16 peer_link_id; +} BWL_POST_PACKED_STRUCT; +typedef struct mesh_peer_mgmt_ie_cfm mesh_peer_mgmt_ie_cfm_t; +#define MESH_PEER_MGMT_IE_CONF_LEN (6) + +BWL_PRE_PACKED_STRUCT struct mesh_peer_mgmt_ie_close { + mesh_peer_mgmt_ie_common_t common; + /* uint16 peer_link_id; + * simplicity: not supported, TODO for future + */ + uint16 reason_code; +} BWL_POST_PACKED_STRUCT; +typedef struct mesh_peer_mgmt_ie_close mesh_peer_mgmt_ie_close_t; +#define MESH_PEER_MGMT_IE_CLOSE_LEN (6) + +struct mesh_config_ie { + uint8 activ_path_sel_prot_id; + uint8 activ_path_sel_metric_id; + uint8 cong_ctl_mode_id; + uint8 sync_method_id; + uint8 auth_prot_id; + uint8 mesh_formation_info; + uint8 mesh_cap; +}; +typedef struct mesh_config_ie mesh_config_ie_t; +#define MESH_CONFIG_IE_LEN (7) + +/* Mesh peering states */ +#define MESH_PEERING_IDLE 0 +#define MESH_PEERING_OPEN_SNT 1 +#define MESH_PEERING_CNF_RCVD 2 +#define MESH_PEERING_OPEN_RCVD 3 +#define MESH_PEERING_ESTAB 4 +#define MESH_PEERING_HOLDING 5 +#define MESH_PEERING_LAST_STATE 6 +/* for debugging: mapping strings */ +#define MESH_PEERING_STATE_STRINGS \ + {"IDLE ", "OPNSNT", "CNFRCV", "OPNRCV", "ESTAB ", "HOLDNG"} + +#ifdef WLMESH +typedef BWL_PRE_PACKED_STRUCT struct mesh_peer_info { + /* mesh_peer_instance as given in the spec. Note that, peer address + * is stored in scb + */ + uint16 mesh_peer_prot_id; + uint16 local_link_id; + uint16 peer_link_id; + /* AID generated by *peer* to self & received in peer_confirm */ + uint16 peer_aid; + + /* TODO: no mention in spec? possibly used in PS case. Note that aid generated + * from self to peer is stored in scb. + */ + uint8 state; + /* TODO: struct mesh_peer_info *next; this field is required + * if multiple peerings per same src is allowed, which is + * true as per spec. + */ +} BWL_POST_PACKED_STRUCT mesh_peer_info_t; + +typedef BWL_PRE_PACKED_STRUCT struct mesh_peer_info_ext { + mesh_peer_info_t peer_info; + uint16 local_aid; /* AID generated by *local* to peer */ + struct ether_addr ea; /* peer ea */ + uint32 entry_state; /* see MESH_PEER_ENTRY_STATE_ACTIVE etc; valid + * ONLY for internal peering requests + */ + int rssi; +} BWL_POST_PACKED_STRUCT mesh_peer_info_ext_t; + +/* #ifdef WLMESH */ +typedef BWL_PRE_PACKED_STRUCT struct mesh_peer_info_dump { + uint32 buflen; + uint32 version; + uint32 count; /* number of results */ + mesh_peer_info_ext_t mpi_ext[1]; +} BWL_POST_PACKED_STRUCT mesh_peer_info_dump_t; +#define WL_MESH_PEER_RES_FIXED_SIZE (sizeof(mesh_peer_info_dump_t) - sizeof(mesh_peer_info_ext_t)) + +#endif /* WLMESH */ + +/* once an entry is added into mesh_peer_list, if peering is lost, it will +* get retried for peering, MAX_MESH_PEER_ENTRY_RETRIES times. after wards, it +* wont get retried and will be moved to MESH_PEER_ENTRY_STATE_TIMEDOUT state, +* until user adds it again explicitely, when its entry_state is changed +* to MESH_PEER_ENTRY_STATE_ACTIVE and tried again. +*/ +#define MAX_MESH_SELF_PEER_ENTRY_RETRIES 3 +#define MESH_SELF_PEER_ENTRY_STATE_ACTIVE 1 +#define MESH_SELF_PEER_ENTRY_STATE_TIMEDOUT 2 + +/** Mesh Channel Switch Parameter IE data structure */ +BWL_PRE_PACKED_STRUCT struct dot11_mcsp_body { + uint8 ttl; /* remaining number of hops allowed for this element. */ + uint8 flags; /* attributes of this channel switch attempt */ + uint8 reason; /* reason for the mesh channel switch */ + uint16 precedence; /* random value in the range 0 to 65535 */ +} BWL_POST_PACKED_STRUCT; + +#define DOT11_MCSP_TTL_DEFAULT 1 +#define DOT11_MCSP_FLAG_TRANS_RESTRICT 0x1 /* no transmit except frames with mcsp */ +#define DOT11_MCSP_FLAG_INIT 0x2 /* initiates the channel switch attempt */ +#define DOT11_MCSP_FLAG_REASON 0x4 /* validity of reason code field */ +#define DOT11_MCSP_REASON_REGULATORY 0 /* meet regulatory requirements */ +#define DOT11_MCSP_REASON_UNSPECIFIED 1 /* unspecified reason */ + +BWL_PRE_PACKED_STRUCT struct dot11_mesh_csp { + uint8 id; /* id DOT11_MNG_MESH_CSP_ID */ + uint8 len; /* length of IE */ + struct dot11_mcsp_body body; /* body of the ie */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_mesh_csp dot11_mesh_csp_ie_t; +#define DOT11_MESH_CSP_IE_LEN 5 /* length of mesh channel switch parameter IE body */ + +/* This marks the end of a packed structure section. */ +#include + +#endif /* #ifndef _802_11s_H_ */ diff --git a/bcmdhd.100.10.315.x/include/802.1d.h b/bcmdhd.100.10.315.x/include/802.1d.h new file mode 100644 index 0000000..efc60b8 --- /dev/null +++ b/bcmdhd.100.10.315.x/include/802.1d.h @@ -0,0 +1,53 @@ +/* + * Fundamental types and constants relating to 802.1D + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: 802.1d.h 700076 2017-05-17 14:42:22Z $ + */ + +#ifndef _802_1_D_ +#define _802_1_D_ + +/* 802.1D priority defines */ +#define PRIO_8021D_NONE 2 /* None = - */ +#define PRIO_8021D_BK 1 /* BK - Background */ +#define PRIO_8021D_BE 0 /* BE - Best-effort */ +#define PRIO_8021D_EE 3 /* EE - Excellent-effort */ +#define PRIO_8021D_CL 4 /* CL - Controlled Load */ +#define PRIO_8021D_VI 5 /* Vi - Video */ +#define PRIO_8021D_VO 6 /* Vo - Voice */ +#define PRIO_8021D_NC 7 /* NC - Network Control */ +#define MAXPRIO 7 /* 0-7 */ +#define NUMPRIO (MAXPRIO + 1) + +#define ALLPRIO -1 /* All prioirty */ + +/* Converts prio to precedence since the numerical value of + * PRIO_8021D_BE and PRIO_8021D_NONE are swapped. + */ +#define PRIO2PREC(prio) \ + (((prio) == PRIO_8021D_NONE || (prio) == PRIO_8021D_BE) ? ((prio^2)) : (prio)) + +#endif /* _802_1_D__ */ diff --git a/bcmdhd.100.10.315.x/include/802.3.h b/bcmdhd.100.10.315.x/include/802.3.h new file mode 100644 index 0000000..b96b362 --- /dev/null +++ b/bcmdhd.100.10.315.x/include/802.3.h @@ -0,0 +1,55 @@ +/* + * Fundamental constants relating to 802.3 + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: 802.3.h 700076 2017-05-17 14:42:22Z $ + */ + +#ifndef _802_3_h_ +#define _802_3_h_ + +/* This marks the start of a packed structure section. */ +#include + +#define SNAP_HDR_LEN 6 /* 802.3 SNAP header length */ +#define DOT3_OUI_LEN 3 /* 802.3 oui length */ + +BWL_PRE_PACKED_STRUCT struct dot3_mac_llc_snap_header { + uint8 ether_dhost[ETHER_ADDR_LEN]; /* dest mac */ + uint8 ether_shost[ETHER_ADDR_LEN]; /* src mac */ + uint16 length; /* frame length incl header */ + uint8 dsap; /* always 0xAA */ + uint8 ssap; /* always 0xAA */ + uint8 ctl; /* always 0x03 */ + uint8 oui[DOT3_OUI_LEN]; /* RFC1042: 0x00 0x00 0x00 + * Bridge-Tunnel: 0x00 0x00 0xF8 + */ + uint16 type; /* ethertype */ +} BWL_POST_PACKED_STRUCT; + +/* This marks the end of a packed structure section. */ +#include + +#endif /* #ifndef _802_3_h_ */ diff --git a/bcmdhd.100.10.315.x/include/aidmp.h b/bcmdhd.100.10.315.x/include/aidmp.h new file mode 100644 index 0000000..aae2178 --- /dev/null +++ b/bcmdhd.100.10.315.x/include/aidmp.h @@ -0,0 +1,429 @@ +/* + * Broadcom AMBA Interconnect definitions. + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: aidmp.h 617751 2016-02-08 09:04:22Z $ + */ + +#ifndef _AIDMP_H +#define _AIDMP_H + +/* Manufacturer Ids */ +#define MFGID_ARM 0x43b +#define MFGID_BRCM 0x4bf +#define MFGID_MIPS 0x4a7 + +/* Component Classes */ +#define CC_SIM 0 +#define CC_EROM 1 +#define CC_CORESIGHT 9 +#define CC_VERIF 0xb +#define CC_OPTIMO 0xd +#define CC_GEN 0xe +#define CC_PRIMECELL 0xf + +/* Enumeration ROM registers */ +#define ER_EROMENTRY 0x000 +#define ER_REMAPCONTROL 0xe00 +#define ER_REMAPSELECT 0xe04 +#define ER_MASTERSELECT 0xe10 +#define ER_ITCR 0xf00 +#define ER_ITIP 0xf04 + +/* Erom entries */ +#define ER_TAG 0xe +#define ER_TAG1 0x6 +#define ER_VALID 1 +#define ER_CI 0 +#define ER_MP 2 +#define ER_ADD 4 +#define ER_END 0xe +#define ER_BAD 0xffffffff +#define ER_SZ_MAX 4096 /* 4KB */ + +/* EROM CompIdentA */ +#define CIA_MFG_MASK 0xfff00000 +#define CIA_MFG_SHIFT 20 +#define CIA_CID_MASK 0x000fff00 +#define CIA_CID_SHIFT 8 +#define CIA_CCL_MASK 0x000000f0 +#define CIA_CCL_SHIFT 4 + +/* EROM CompIdentB */ +#define CIB_REV_MASK 0xff000000 +#define CIB_REV_SHIFT 24 +#define CIB_NSW_MASK 0x00f80000 +#define CIB_NSW_SHIFT 19 +#define CIB_NMW_MASK 0x0007c000 +#define CIB_NMW_SHIFT 14 +#define CIB_NSP_MASK 0x00003e00 +#define CIB_NSP_SHIFT 9 +#define CIB_NMP_MASK 0x000001f0 +#define CIB_NMP_SHIFT 4 + +/* EROM MasterPortDesc */ +#define MPD_MUI_MASK 0x0000ff00 +#define MPD_MUI_SHIFT 8 +#define MPD_MP_MASK 0x000000f0 +#define MPD_MP_SHIFT 4 + +/* EROM AddrDesc */ +#define AD_ADDR_MASK 0xfffff000 +#define AD_SP_MASK 0x00000f00 +#define AD_SP_SHIFT 8 +#define AD_ST_MASK 0x000000c0 +#define AD_ST_SHIFT 6 +#define AD_ST_SLAVE 0x00000000 +#define AD_ST_BRIDGE 0x00000040 +#define AD_ST_SWRAP 0x00000080 +#define AD_ST_MWRAP 0x000000c0 +#define AD_SZ_MASK 0x00000030 +#define AD_SZ_SHIFT 4 +#define AD_SZ_4K 0x00000000 +#define AD_SZ_8K 0x00000010 +#define AD_SZ_16K 0x00000020 +#define AD_SZ_SZD 0x00000030 +#define AD_AG32 0x00000008 +#define AD_ADDR_ALIGN 0x00000fff +#define AD_SZ_BASE 0x00001000 /* 4KB */ + +/* EROM SizeDesc */ +#define SD_SZ_MASK 0xfffff000 +#define SD_SG32 0x00000008 +#define SD_SZ_ALIGN 0x00000fff + +#if !defined(_LANGUAGE_ASSEMBLY) && !defined(__ASSEMBLY__) + +typedef volatile struct _aidmp { + uint32 oobselina30; /* 0x000 */ + uint32 oobselina74; /* 0x004 */ + uint32 PAD[6]; + uint32 oobselinb30; /* 0x020 */ + uint32 oobselinb74; /* 0x024 */ + uint32 PAD[6]; + uint32 oobselinc30; /* 0x040 */ + uint32 oobselinc74; /* 0x044 */ + uint32 PAD[6]; + uint32 oobselind30; /* 0x060 */ + uint32 oobselind74; /* 0x064 */ + uint32 PAD[38]; + uint32 oobselouta30; /* 0x100 */ + uint32 oobselouta74; /* 0x104 */ + uint32 PAD[6]; + uint32 oobseloutb30; /* 0x120 */ + uint32 oobseloutb74; /* 0x124 */ + uint32 PAD[6]; + uint32 oobseloutc30; /* 0x140 */ + uint32 oobseloutc74; /* 0x144 */ + uint32 PAD[6]; + uint32 oobseloutd30; /* 0x160 */ + uint32 oobseloutd74; /* 0x164 */ + uint32 PAD[38]; + uint32 oobsynca; /* 0x200 */ + uint32 oobseloutaen; /* 0x204 */ + uint32 PAD[6]; + uint32 oobsyncb; /* 0x220 */ + uint32 oobseloutben; /* 0x224 */ + uint32 PAD[6]; + uint32 oobsyncc; /* 0x240 */ + uint32 oobseloutcen; /* 0x244 */ + uint32 PAD[6]; + uint32 oobsyncd; /* 0x260 */ + uint32 oobseloutden; /* 0x264 */ + uint32 PAD[38]; + uint32 oobaextwidth; /* 0x300 */ + uint32 oobainwidth; /* 0x304 */ + uint32 oobaoutwidth; /* 0x308 */ + uint32 PAD[5]; + uint32 oobbextwidth; /* 0x320 */ + uint32 oobbinwidth; /* 0x324 */ + uint32 oobboutwidth; /* 0x328 */ + uint32 PAD[5]; + uint32 oobcextwidth; /* 0x340 */ + uint32 oobcinwidth; /* 0x344 */ + uint32 oobcoutwidth; /* 0x348 */ + uint32 PAD[5]; + uint32 oobdextwidth; /* 0x360 */ + uint32 oobdinwidth; /* 0x364 */ + uint32 oobdoutwidth; /* 0x368 */ + uint32 PAD[37]; + uint32 ioctrlset; /* 0x400 */ + uint32 ioctrlclear; /* 0x404 */ + uint32 ioctrl; /* 0x408 */ + uint32 PAD[61]; + uint32 iostatus; /* 0x500 */ + uint32 PAD[127]; + uint32 ioctrlwidth; /* 0x700 */ + uint32 iostatuswidth; /* 0x704 */ + uint32 PAD[62]; + uint32 resetctrl; /* 0x800 */ + uint32 resetstatus; /* 0x804 */ + uint32 resetreadid; /* 0x808 */ + uint32 resetwriteid; /* 0x80c */ + uint32 PAD[60]; + uint32 errlogctrl; /* 0x900 */ + uint32 errlogdone; /* 0x904 */ + uint32 errlogstatus; /* 0x908 */ + uint32 errlogaddrlo; /* 0x90c */ + uint32 errlogaddrhi; /* 0x910 */ + uint32 errlogid; /* 0x914 */ + uint32 errloguser; /* 0x918 */ + uint32 errlogflags; /* 0x91c */ + uint32 PAD[56]; + uint32 intstatus; /* 0xa00 */ + uint32 PAD[255]; + uint32 config; /* 0xe00 */ + uint32 PAD[63]; + uint32 itcr; /* 0xf00 */ + uint32 PAD[3]; + uint32 itipooba; /* 0xf10 */ + uint32 itipoobb; /* 0xf14 */ + uint32 itipoobc; /* 0xf18 */ + uint32 itipoobd; /* 0xf1c */ + uint32 PAD[4]; + uint32 itipoobaout; /* 0xf30 */ + uint32 itipoobbout; /* 0xf34 */ + uint32 itipoobcout; /* 0xf38 */ + uint32 itipoobdout; /* 0xf3c */ + uint32 PAD[4]; + uint32 itopooba; /* 0xf50 */ + uint32 itopoobb; /* 0xf54 */ + uint32 itopoobc; /* 0xf58 */ + uint32 itopoobd; /* 0xf5c */ + uint32 PAD[4]; + uint32 itopoobain; /* 0xf70 */ + uint32 itopoobbin; /* 0xf74 */ + uint32 itopoobcin; /* 0xf78 */ + uint32 itopoobdin; /* 0xf7c */ + uint32 PAD[4]; + uint32 itopreset; /* 0xf90 */ + uint32 PAD[15]; + uint32 peripherialid4; /* 0xfd0 */ + uint32 peripherialid5; /* 0xfd4 */ + uint32 peripherialid6; /* 0xfd8 */ + uint32 peripherialid7; /* 0xfdc */ + uint32 peripherialid0; /* 0xfe0 */ + uint32 peripherialid1; /* 0xfe4 */ + uint32 peripherialid2; /* 0xfe8 */ + uint32 peripherialid3; /* 0xfec */ + uint32 componentid0; /* 0xff0 */ + uint32 componentid1; /* 0xff4 */ + uint32 componentid2; /* 0xff8 */ + uint32 componentid3; /* 0xffc */ +} aidmp_t; + +#endif /* !_LANGUAGE_ASSEMBLY && !__ASSEMBLY__ */ + +/* Out-of-band Router registers */ +#define OOB_BUSCONFIG 0x020 +#define OOB_STATUSA 0x100 +#define OOB_STATUSB 0x104 +#define OOB_STATUSC 0x108 +#define OOB_STATUSD 0x10c +#define OOB_ENABLEA0 0x200 +#define OOB_ENABLEA1 0x204 +#define OOB_ENABLEA2 0x208 +#define OOB_ENABLEA3 0x20c +#define OOB_ENABLEB0 0x280 +#define OOB_ENABLEB1 0x284 +#define OOB_ENABLEB2 0x288 +#define OOB_ENABLEB3 0x28c +#define OOB_ENABLEC0 0x300 +#define OOB_ENABLEC1 0x304 +#define OOB_ENABLEC2 0x308 +#define OOB_ENABLEC3 0x30c +#define OOB_ENABLED0 0x380 +#define OOB_ENABLED1 0x384 +#define OOB_ENABLED2 0x388 +#define OOB_ENABLED3 0x38c +#define OOB_ITCR 0xf00 +#define OOB_ITIPOOBA 0xf10 +#define OOB_ITIPOOBB 0xf14 +#define OOB_ITIPOOBC 0xf18 +#define OOB_ITIPOOBD 0xf1c +#define OOB_ITOPOOBA 0xf30 +#define OOB_ITOPOOBB 0xf34 +#define OOB_ITOPOOBC 0xf38 +#define OOB_ITOPOOBD 0xf3c + +/* DMP wrapper registers */ +#define AI_OOBSELINA30 0x000 +#define AI_OOBSELINA74 0x004 +#define AI_OOBSELINB30 0x020 +#define AI_OOBSELINB74 0x024 +#define AI_OOBSELINC30 0x040 +#define AI_OOBSELINC74 0x044 +#define AI_OOBSELIND30 0x060 +#define AI_OOBSELIND74 0x064 +#define AI_OOBSELOUTA30 0x100 +#define AI_OOBSELOUTA74 0x104 +#define AI_OOBSELOUTB30 0x120 +#define AI_OOBSELOUTB74 0x124 +#define AI_OOBSELOUTC30 0x140 +#define AI_OOBSELOUTC74 0x144 +#define AI_OOBSELOUTD30 0x160 +#define AI_OOBSELOUTD74 0x164 +#define AI_OOBSYNCA 0x200 +#define AI_OOBSELOUTAEN 0x204 +#define AI_OOBSYNCB 0x220 +#define AI_OOBSELOUTBEN 0x224 +#define AI_OOBSYNCC 0x240 +#define AI_OOBSELOUTCEN 0x244 +#define AI_OOBSYNCD 0x260 +#define AI_OOBSELOUTDEN 0x264 +#define AI_OOBAEXTWIDTH 0x300 +#define AI_OOBAINWIDTH 0x304 +#define AI_OOBAOUTWIDTH 0x308 +#define AI_OOBBEXTWIDTH 0x320 +#define AI_OOBBINWIDTH 0x324 +#define AI_OOBBOUTWIDTH 0x328 +#define AI_OOBCEXTWIDTH 0x340 +#define AI_OOBCINWIDTH 0x344 +#define AI_OOBCOUTWIDTH 0x348 +#define AI_OOBDEXTWIDTH 0x360 +#define AI_OOBDINWIDTH 0x364 +#define AI_OOBDOUTWIDTH 0x368 + +#define AI_IOCTRLSET 0x400 +#define AI_IOCTRLCLEAR 0x404 +#define AI_IOCTRL 0x408 +#define AI_IOSTATUS 0x500 +#define AI_RESETCTRL 0x800 +#define AI_RESETSTATUS 0x804 + +#define AI_IOCTRLWIDTH 0x700 +#define AI_IOSTATUSWIDTH 0x704 + +#define AI_RESETREADID 0x808 +#define AI_RESETWRITEID 0x80c +#define AI_ERRLOGCTRL 0x900 +#define AI_ERRLOGDONE 0x904 +#define AI_ERRLOGSTATUS 0x908 +#define AI_ERRLOGADDRLO 0x90c +#define AI_ERRLOGADDRHI 0x910 +#define AI_ERRLOGID 0x914 +#define AI_ERRLOGUSER 0x918 +#define AI_ERRLOGFLAGS 0x91c +#define AI_INTSTATUS 0xa00 +#define AI_CONFIG 0xe00 +#define AI_ITCR 0xf00 +#define AI_ITIPOOBA 0xf10 +#define AI_ITIPOOBB 0xf14 +#define AI_ITIPOOBC 0xf18 +#define AI_ITIPOOBD 0xf1c +#define AI_ITIPOOBAOUT 0xf30 +#define AI_ITIPOOBBOUT 0xf34 +#define AI_ITIPOOBCOUT 0xf38 +#define AI_ITIPOOBDOUT 0xf3c +#define AI_ITOPOOBA 0xf50 +#define AI_ITOPOOBB 0xf54 +#define AI_ITOPOOBC 0xf58 +#define AI_ITOPOOBD 0xf5c +#define AI_ITOPOOBAIN 0xf70 +#define AI_ITOPOOBBIN 0xf74 +#define AI_ITOPOOBCIN 0xf78 +#define AI_ITOPOOBDIN 0xf7c +#define AI_ITOPRESET 0xf90 +#define AI_PERIPHERIALID4 0xfd0 +#define AI_PERIPHERIALID5 0xfd4 +#define AI_PERIPHERIALID6 0xfd8 +#define AI_PERIPHERIALID7 0xfdc +#define AI_PERIPHERIALID0 0xfe0 +#define AI_PERIPHERIALID1 0xfe4 +#define AI_PERIPHERIALID2 0xfe8 +#define AI_PERIPHERIALID3 0xfec +#define AI_COMPONENTID0 0xff0 +#define AI_COMPONENTID1 0xff4 +#define AI_COMPONENTID2 0xff8 +#define AI_COMPONENTID3 0xffc + +/* resetctrl */ +#define AIRC_RESET 1 + +/* errlogctrl */ +#define AIELC_TO_EXP_MASK 0x0000001f0 /* backplane timeout exponent */ +#define AIELC_TO_EXP_SHIFT 4 +#define AIELC_TO_ENAB_SHIFT 9 /* backplane timeout enable */ + +/* errlogdone */ +#define AIELD_ERRDONE_MASK 0x3 + +/* errlogstatus */ +#define AIELS_SLAVE_ERR 0x1 +#define AIELS_TIMEOUT 0x2 +#define AIELS_DECODE 0x3 +#define AIELS_TIMEOUT_MASK 0x3 + +/* errorlog status bit map, for SW use */ +#define AXI_WRAP_STS_NONE (0) +#define AXI_WRAP_STS_TIMEOUT (1<<0) +#define AXI_WRAP_STS_SLAVE_ERR (1<<1) +#define AXI_WRAP_STS_DECODE_ERR (1<<2) +#define AXI_WRAP_STS_PCI_RD_ERR (1<<3) +#define AXI_WRAP_STS_WRAP_RD_ERR (1<<4) +#define AXI_WRAP_STS_SET_CORE_FAIL (1<<5) + +/* errlogFrags */ +#define AXI_ERRLOG_FLAGS_WRITE_REQ (1<<24) + +/* config */ +#define AICFG_OOB 0x00000020 +#define AICFG_IOS 0x00000010 +#define AICFG_IOC 0x00000008 +#define AICFG_TO 0x00000004 +#define AICFG_ERRL 0x00000002 +#define AICFG_RST 0x00000001 + +/* bit defines for AI_OOBSELOUTB74 reg */ +#define OOB_SEL_OUTEN_B_5 15 +#define OOB_SEL_OUTEN_B_6 23 + +/* AI_OOBSEL for A/B/C/D, 0-7 */ +#define AI_OOBSEL_MASK 0x1F +#define AI_OOBSEL_0_SHIFT 0 +#define AI_OOBSEL_1_SHIFT 8 +#define AI_OOBSEL_2_SHIFT 16 +#define AI_OOBSEL_3_SHIFT 24 +#define AI_OOBSEL_4_SHIFT 0 +#define AI_OOBSEL_5_SHIFT 8 +#define AI_OOBSEL_6_SHIFT 16 +#define AI_OOBSEL_7_SHIFT 24 +#define AI_IOCTRL_ENABLE_D11_PME (1 << 14) + +/* bit Specific for AI_OOBSELOUTB30 */ +#define OOB_B_ALP_REQUEST 0 +#define OOB_B_HT_REQUEST 1 +#define OOB_B_ILP_REQUEST 2 +#define OOB_B_ALP_AVAIL_REQUEST 3 +#define OOB_B_HT_AVAIL_REQUEST 4 + +/* mask for interrupts from each core to wrapper */ +#define AI_OOBSELINA74_CORE_MASK 0x80808080 +#define AI_OOBSELINA30_CORE_MASK 0x80808080 + +/* axi id mask in the error log id */ +#define AI_ERRLOGID_AXI_ID_MASK 0x07 + +#endif /* _AIDMP_H */ diff --git a/bcmdhd.100.10.315.x/include/bcm_cfg.h b/bcmdhd.100.10.315.x/include/bcm_cfg.h new file mode 100644 index 0000000..f77dace --- /dev/null +++ b/bcmdhd.100.10.315.x/include/bcm_cfg.h @@ -0,0 +1,32 @@ +/* + * BCM common config options + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcm_cfg.h 672943 2016-11-30 08:54:06Z $ + */ + +#ifndef _bcm_cfg_h_ +#define _bcm_cfg_h_ +#endif /* _bcm_cfg_h_ */ diff --git a/bcmdhd.100.10.315.x/include/bcm_mpool_pub.h b/bcmdhd.100.10.315.x/include/bcm_mpool_pub.h new file mode 100644 index 0000000..90a14e2 --- /dev/null +++ b/bcmdhd.100.10.315.x/include/bcm_mpool_pub.h @@ -0,0 +1,350 @@ +/* + * Memory pools library, Public interface + * + * API Overview + * + * This package provides a memory allocation subsystem based on pools of + * homogenous objects. + * + * Instrumentation is available for reporting memory utilization both + * on a per-data-structure basis and system wide. + * + * There are two main types defined in this API. + * + * pool manager: A singleton object that acts as a factory for + * pool allocators. It also is used for global + * instrumentation, such as reporting all blocks + * in use across all data structures. The pool manager + * creates and provides individual memory pools + * upon request to application code. + * + * memory pool: An object for allocating homogenous memory blocks. + * + * Global identifiers in this module use the following prefixes: + * bcm_mpm_* Memory pool manager + * bcm_mp_* Memory pool + * + * There are two main types of memory pools: + * + * prealloc: The contiguous memory block of objects can either be supplied + * by the client or malloc'ed by the memory manager. The objects are + * allocated out of a block of memory and freed back to the block. + * + * heap: The memory pool allocator uses the heap (malloc/free) for memory. + * In this case, the pool allocator is just providing statistics + * and instrumentation on top of the heap, without modifying the heap + * allocation implementation. + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcm_mpool_pub.h 535090 2015-02-17 04:49:01Z $ + */ + +#ifndef _BCM_MPOOL_PUB_H +#define _BCM_MPOOL_PUB_H 1 + +#include /* needed for uint16 */ + +/* +************************************************************************** +* +* Type definitions, handles +* +************************************************************************** +*/ + +/* Forward declaration of OSL handle. */ +struct osl_info; + +/* Forward declaration of string buffer. */ +struct bcmstrbuf; + +/* + * Opaque type definition for the pool manager handle. This object is used for global + * memory pool operations such as obtaining a new pool, deleting a pool, iterating and + * instrumentation/debugging. + */ +struct bcm_mpm_mgr; +typedef struct bcm_mpm_mgr *bcm_mpm_mgr_h; + +/* + * Opaque type definition for an instance of a pool. This handle is used for allocating + * and freeing memory through the pool, as well as management/instrumentation on this + * specific pool. + */ +struct bcm_mp_pool; +typedef struct bcm_mp_pool *bcm_mp_pool_h; + +/* + * To make instrumentation more readable, every memory + * pool must have a readable name. Pool names are up to + * 8 bytes including '\0' termination. (7 printable characters.) + */ +#define BCM_MP_NAMELEN 8 + +/* + * Type definition for pool statistics. + */ +typedef struct bcm_mp_stats { + char name[BCM_MP_NAMELEN]; /* Name of this pool. */ + unsigned int objsz; /* Object size allocated in this pool */ + uint16 nobj; /* Total number of objects in this pool */ + uint16 num_alloc; /* Number of objects currently allocated */ + uint16 high_water; /* Max number of allocated objects. */ + uint16 failed_alloc; /* Failed allocations. */ +} bcm_mp_stats_t; + +/* +************************************************************************** +* +* API Routines on the pool manager. +* +************************************************************************** +*/ + +/* + * bcm_mpm_init() - initialize the whole memory pool system. + * + * Parameters: + * osh: INPUT Operating system handle. Needed for heap memory allocation. + * max_pools: INPUT Maximum number of mempools supported. + * mgr: OUTPUT The handle is written with the new pools manager object/handle. + * + * Returns: + * BCME_OK Object initialized successfully. May be used. + * BCME_NOMEM Initialization failed due to no memory. Object must not be used. + */ +int bcm_mpm_init(struct osl_info *osh, int max_pools, bcm_mpm_mgr_h *mgrp); + +/* + * bcm_mpm_deinit() - de-initialize the whole memory pool system. + * + * Parameters: + * mgr: INPUT Pointer to pool manager handle. + * + * Returns: + * BCME_OK Memory pool manager successfully de-initialized. + * other Indicated error occured during de-initialization. + */ +int bcm_mpm_deinit(bcm_mpm_mgr_h *mgrp); + +/* + * bcm_mpm_create_prealloc_pool() - Create a new pool for fixed size objects. The + * pool uses a contiguous block of pre-alloced + * memory. The memory block may either be provided + * by the client or dynamically allocated by the + * pool manager. + * + * Parameters: + * mgr: INPUT The handle to the pool manager + * obj_sz: INPUT Size of objects that will be allocated by the new pool + * Must be >= sizeof(void *). + * nobj: INPUT Maximum number of concurrently existing objects to support + * memstart INPUT Pointer to the memory to use, or NULL to malloc() + * memsize INPUT Number of bytes referenced from memstart (for error checking). + * Must be 0 if 'memstart' is NULL. + * poolname INPUT For instrumentation, the name of the pool + * newp: OUTPUT The handle for the new pool, if creation is successful + * + * Returns: + * BCME_OK Pool created ok. + * other Pool not created due to indicated error. newpoolp set to NULL. + * + * + */ +int bcm_mpm_create_prealloc_pool(bcm_mpm_mgr_h mgr, + unsigned int obj_sz, + int nobj, + void *memstart, + unsigned int memsize, + const char poolname[BCM_MP_NAMELEN], + bcm_mp_pool_h *newp); + +/* + * bcm_mpm_delete_prealloc_pool() - Delete a memory pool. This should only be called after + * all memory objects have been freed back to the pool. + * + * Parameters: + * mgr: INPUT The handle to the pools manager + * pool: INPUT The handle of the pool to delete + * + * Returns: + * BCME_OK Pool deleted ok. + * other Pool not deleted due to indicated error. + * + */ +int bcm_mpm_delete_prealloc_pool(bcm_mpm_mgr_h mgr, bcm_mp_pool_h *poolp); + +/* + * bcm_mpm_create_heap_pool() - Create a new pool for fixed size objects. The memory + * pool allocator uses the heap (malloc/free) for memory. + * In this case, the pool allocator is just providing + * statistics and instrumentation on top of the heap, + * without modifying the heap allocation implementation. + * + * Parameters: + * mgr: INPUT The handle to the pool manager + * obj_sz: INPUT Size of objects that will be allocated by the new pool + * poolname INPUT For instrumentation, the name of the pool + * newp: OUTPUT The handle for the new pool, if creation is successful + * + * Returns: + * BCME_OK Pool created ok. + * other Pool not created due to indicated error. newpoolp set to NULL. + * + * + */ +int bcm_mpm_create_heap_pool(bcm_mpm_mgr_h mgr, unsigned int obj_sz, + const char poolname[BCM_MP_NAMELEN], + bcm_mp_pool_h *newp); + +/* + * bcm_mpm_delete_heap_pool() - Delete a memory pool. This should only be called after + * all memory objects have been freed back to the pool. + * + * Parameters: + * mgr: INPUT The handle to the pools manager + * pool: INPUT The handle of the pool to delete + * + * Returns: + * BCME_OK Pool deleted ok. + * other Pool not deleted due to indicated error. + * + */ +int bcm_mpm_delete_heap_pool(bcm_mpm_mgr_h mgr, bcm_mp_pool_h *poolp); + +/* + * bcm_mpm_stats() - Return stats for all pools + * + * Parameters: + * mgr: INPUT The handle to the pools manager + * stats: OUTPUT Array of pool statistics. + * nentries: MOD Max elements in 'stats' array on INPUT. Actual number + * of array elements copied to 'stats' on OUTPUT. + * + * Returns: + * BCME_OK Ok + * other Error getting stats. + * + */ +int bcm_mpm_stats(bcm_mpm_mgr_h mgr, bcm_mp_stats_t *stats, int *nentries); + +/* + * bcm_mpm_dump() - Display statistics on all pools + * + * Parameters: + * mgr: INPUT The handle to the pools manager + * b: OUTPUT Output buffer. + * + * Returns: + * BCME_OK Ok + * other Error during dump. + * + */ +int bcm_mpm_dump(bcm_mpm_mgr_h mgr, struct bcmstrbuf *b); + +/* + * bcm_mpm_get_obj_size() - The size of memory objects may need to be padded to + * compensate for alignment requirements of the objects. + * This function provides the padded object size. If clients + * pre-allocate a memory slab for a memory pool, the + * padded object size should be used by the client to allocate + * the memory slab (in order to provide sufficent space for + * the maximum number of objects). + * + * Parameters: + * mgr: INPUT The handle to the pools manager. + * obj_sz: INPUT Input object size. + * padded_obj_sz: OUTPUT Padded object size. + * + * Returns: + * BCME_OK Ok + * BCME_BADARG Bad arguments. + * + */ +int bcm_mpm_get_obj_size(bcm_mpm_mgr_h mgr, unsigned int obj_sz, unsigned int *padded_obj_sz); + +/* +*************************************************************************** +* +* API Routines on a specific pool. +* +*************************************************************************** +*/ + +/* + * bcm_mp_alloc() - Allocate a memory pool object. + * + * Parameters: + * pool: INPUT The handle to the pool. + * + * Returns: + * A pointer to the new object. NULL on error. + * + */ +void* bcm_mp_alloc(bcm_mp_pool_h pool); + +/* + * bcm_mp_free() - Free a memory pool object. + * + * Parameters: + * pool: INPUT The handle to the pool. + * objp: INPUT A pointer to the object to free. + * + * Returns: + * BCME_OK Ok + * other Error during free. + * + */ +int bcm_mp_free(bcm_mp_pool_h pool, void *objp); + +/* + * bcm_mp_stats() - Return stats for this pool + * + * Parameters: + * pool: INPUT The handle to the pool + * stats: OUTPUT Pool statistics + * + * Returns: + * BCME_OK Ok + * other Error getting statistics. + * + */ +void bcm_mp_stats(bcm_mp_pool_h pool, bcm_mp_stats_t *stats); + +/* + * bcm_mp_dump() - Dump a pool + * + * Parameters: + * pool: INPUT The handle to the pool + * b OUTPUT Output buffer + * + * Returns: + * BCME_OK Ok + * other Error during dump. + * + */ +int bcm_mp_dump(bcm_mp_pool_h pool, struct bcmstrbuf *b); + +#endif /* _BCM_MPOOL_PUB_H */ diff --git a/bcmdhd.100.10.315.x/include/bcm_ring.h b/bcmdhd.100.10.315.x/include/bcm_ring.h new file mode 100644 index 0000000..3736175 --- /dev/null +++ b/bcmdhd.100.10.315.x/include/bcm_ring.h @@ -0,0 +1,613 @@ +/* + * bcm_ring.h : Ring context abstraction + * The ring context tracks the WRITE and READ indices where elements may be + * produced and consumed respectively. All elements in the ring need to be + * fixed size. + * + * NOTE: A ring of size N, may only hold N-1 elements. + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcm_ring.h 700321 2017-05-18 16:09:07Z $ + */ +#ifndef __bcm_ring_included__ +#define __bcm_ring_included__ +/* + * API Notes: + * + * Ring manipulation API allows for: + * Pending operations: Often before some work can be completed, it may be + * desired that several resources are available, e.g. space for production in + * a ring. Approaches such as, #1) reserve resources one by one and return them + * if another required resource is not available, or #2) employ a two pass + * algorithm of first testing whether all resources are available, have a + * an impact on performance critical code. The approach taken here is more akin + * to approach #2, where a test for resource availability essentially also + * provides the index for production in an un-committed state. + * The same approach is taken for the consumer side. + * + * - Pending production: Fetch the next index where a ring element may be + * produced. The caller may not commit the WRITE of the element. + * - Pending consumption: Fetch the next index where a ring element may be + * consumed. The caller may not commut the READ of the element. + * + * Producer side API: + * - bcm_ring_is_full : Test whether ring is full + * - bcm_ring_prod : Fetch index where an element may be produced (commit) + * - bcm_ring_prod_pend: Fetch index where an element may be produced (pending) + * - bcm_ring_prod_done: Commit a previous pending produce fetch + * - bcm_ring_prod_avail: Fetch total number free slots eligible for production + * + * Consumer side API: + * - bcm_ring_is_empty : Test whether ring is empty + * - bcm_ring_cons : Fetch index where an element may be consumed (commit) + * - bcm_ring_cons_pend: Fetch index where an element may be consumed (pending) + * - bcm_ring_cons_done: Commit a previous pending consume fetch + * - bcm_ring_cons_avail: Fetch total number elements eligible for consumption + * + * - bcm_ring_sync_read: Sync read offset in peer ring, from local ring + * - bcm_ring_sync_write: Sync write offset in peer ring, from local ring + * + * +---------------------------------------------------------------------------- + * + * Design Notes: + * Following items are not tracked in a ring context (design decision) + * - width of a ring element. + * - depth of the ring. + * - base of the buffer, where the elements are stored. + * - count of number of free slots in the ring + * + * Implementation Notes: + * - When BCM_RING_DEBUG is enabled, need explicit bcm_ring_init(). + * - BCM_RING_EMPTY and BCM_RING_FULL are (-1) + * + * +---------------------------------------------------------------------------- + * + * Usage Notes: + * An application may incarnate a ring of some fixed sized elements, by defining + * - a ring data buffer to store the ring elements. + * - depth of the ring (max number of elements managed by ring context). + * Preferrably, depth may be represented as a constant. + * - width of a ring element: to be used in pointer arithmetic with the ring's + * data buffer base and an index to fetch the ring element. + * + * Use bcm_workq_t to instantiate a pair of workq constructs, one for the + * producer and the other for the consumer, both pointing to the same circular + * buffer. The producer may operate on it's own local workq and flush the write + * index to the consumer. Likewise the consumer may use its local workq and + * flush the read index to the producer. This way we do not repeatedly access + * the peer's context. The two peers may reside on different CPU cores with a + * private L1 data cache. + * +---------------------------------------------------------------------------- + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: bcm_ring.h 700321 2017-05-18 16:09:07Z $ + * + * -*- Mode: C; tab-width: 4; indent-tabs-mode: t; c-basic-offset: 4 -*- + * vim: set ts=4 noet sw=4 tw=80: + * + * +---------------------------------------------------------------------------- + */ + +#ifdef ____cacheline_aligned +#define __ring_aligned ____cacheline_aligned +#else +#define __ring_aligned +#endif // endif + +/* Conditional compile for debug */ +/* #define BCM_RING_DEBUG */ + +#define BCM_RING_EMPTY (-1) +#define BCM_RING_FULL (-1) +#define BCM_RING_NULL ((bcm_ring_t *)NULL) + +#if defined(BCM_RING_DEBUG) +#define RING_ASSERT(exp) ASSERT(exp) +#define BCM_RING_IS_VALID(ring) (((ring) != BCM_RING_NULL) && \ + ((ring)->self == (ring))) +#else /* ! BCM_RING_DEBUG */ +#define RING_ASSERT(exp) do {} while (0) +#define BCM_RING_IS_VALID(ring) ((ring) != BCM_RING_NULL) +#endif /* ! BCM_RING_DEBUG */ + +#define BCM_RING_SIZE_IS_VALID(ring_size) ((ring_size) > 0) + +/* + * +---------------------------------------------------------------------------- + * Ring Context + * +---------------------------------------------------------------------------- + */ +typedef struct bcm_ring { /* Ring context */ +#if defined(BCM_RING_DEBUG) + struct bcm_ring *self; /* ptr to self for IS VALID test */ +#endif /* BCM_RING_DEBUG */ + int write __ring_aligned; /* WRITE index in a circular ring */ + int read __ring_aligned; /* READ index in a circular ring */ +} bcm_ring_t; + +static INLINE void bcm_ring_init(bcm_ring_t *ring); +static INLINE void bcm_ring_copy(bcm_ring_t *to, bcm_ring_t *from); +static INLINE bool bcm_ring_is_empty(bcm_ring_t *ring); + +static INLINE int __bcm_ring_next_write(bcm_ring_t *ring, const int ring_size); + +static INLINE bool __bcm_ring_full(bcm_ring_t *ring, int next_write); +static INLINE bool bcm_ring_is_full(bcm_ring_t *ring, const int ring_size); + +static INLINE void bcm_ring_prod_done(bcm_ring_t *ring, int write); +static INLINE int bcm_ring_prod_pend(bcm_ring_t *ring, int *pend_write, + const int ring_size); +static INLINE int bcm_ring_prod(bcm_ring_t *ring, const int ring_size); + +static INLINE void bcm_ring_cons_done(bcm_ring_t *ring, int read); +static INLINE int bcm_ring_cons_pend(bcm_ring_t *ring, int *pend_read, + const int ring_size); +static INLINE int bcm_ring_cons(bcm_ring_t *ring, const int ring_size); + +static INLINE void bcm_ring_sync_read(bcm_ring_t *peer, const bcm_ring_t *self); +static INLINE void bcm_ring_sync_write(bcm_ring_t *peer, const bcm_ring_t *self); + +static INLINE int bcm_ring_prod_avail(const bcm_ring_t *ring, + const int ring_size); +static INLINE int bcm_ring_cons_avail(const bcm_ring_t *ring, + const int ring_size); +static INLINE void bcm_ring_cons_all(bcm_ring_t *ring); + +/** + * bcm_ring_init - initialize a ring context. + * @ring: pointer to a ring context + */ +static INLINE void +bcm_ring_init(bcm_ring_t *ring) +{ + ASSERT(ring != (bcm_ring_t *)NULL); +#if defined(BCM_RING_DEBUG) + ring->self = ring; +#endif /* BCM_RING_DEBUG */ + ring->write = 0; + ring->read = 0; +} + +/** + * bcm_ring_copy - copy construct a ring + * @to: pointer to the new ring context + * @from: pointer to orig ring context + */ +static INLINE void +bcm_ring_copy(bcm_ring_t *to, bcm_ring_t *from) +{ + bcm_ring_init(to); + + to->write = from->write; + to->read = from->read; +} + +/** + * bcm_ring_is_empty - "Boolean" test whether ring is empty. + * @ring: pointer to a ring context + * + * PS. does not return BCM_RING_EMPTY value. + */ +static INLINE bool +bcm_ring_is_empty(bcm_ring_t *ring) +{ + RING_ASSERT(BCM_RING_IS_VALID(ring)); + return (ring->read == ring->write); +} + +/** + * __bcm_ring_next_write - determine the index where the next write may occur + * (with wrap-around). + * @ring: pointer to a ring context + * @ring_size: size of the ring + * + * PRIVATE INTERNAL USE ONLY. + */ +static INLINE int +__bcm_ring_next_write(bcm_ring_t *ring, const int ring_size) +{ + RING_ASSERT(BCM_RING_IS_VALID(ring) && BCM_RING_SIZE_IS_VALID(ring_size)); + return ((ring->write + 1) % ring_size); +} + +/** + * __bcm_ring_full - support function for ring full test. + * @ring: pointer to a ring context + * @next_write: next location in ring where an element is to be produced + * + * PRIVATE INTERNAL USE ONLY. + */ +static INLINE bool +__bcm_ring_full(bcm_ring_t *ring, int next_write) +{ + return (next_write == ring->read); +} + +/** + * bcm_ring_is_full - "Boolean" test whether a ring is full. + * @ring: pointer to a ring context + * @ring_size: size of the ring + * + * PS. does not return BCM_RING_FULL value. + */ +static INLINE bool +bcm_ring_is_full(bcm_ring_t *ring, const int ring_size) +{ + int next_write; + RING_ASSERT(BCM_RING_IS_VALID(ring) && BCM_RING_SIZE_IS_VALID(ring_size)); + next_write = __bcm_ring_next_write(ring, ring_size); + return __bcm_ring_full(ring, next_write); +} + +/** + * bcm_ring_prod_done - commit a previously pending index where production + * was requested. + * @ring: pointer to a ring context + * @write: index into ring upto where production was done. + * +---------------------------------------------------------------------------- + */ +static INLINE void +bcm_ring_prod_done(bcm_ring_t *ring, int write) +{ + RING_ASSERT(BCM_RING_IS_VALID(ring)); + ring->write = write; +} + +/** + * bcm_ring_prod_pend - Fetch in "pend" mode, the index where an element may be + * produced. + * @ring: pointer to a ring context + * @pend_write: next index, after the returned index + * @ring_size: size of the ring + */ +static INLINE int +bcm_ring_prod_pend(bcm_ring_t *ring, int *pend_write, const int ring_size) +{ + int rtn; + RING_ASSERT(BCM_RING_IS_VALID(ring) && BCM_RING_SIZE_IS_VALID(ring_size)); + *pend_write = __bcm_ring_next_write(ring, ring_size); + if (__bcm_ring_full(ring, *pend_write)) { + *pend_write = BCM_RING_FULL; + rtn = BCM_RING_FULL; + } else { + /* production is not committed, caller needs to explicitly commit */ + rtn = ring->write; + } + return rtn; +} + +/** + * bcm_ring_prod - Fetch and "commit" the next index where a ring element may + * be produced. + * @ring: pointer to a ring context + * @ring_size: size of the ring + */ +static INLINE int +bcm_ring_prod(bcm_ring_t *ring, const int ring_size) +{ + int next_write, prod_write; + RING_ASSERT(BCM_RING_IS_VALID(ring) && BCM_RING_SIZE_IS_VALID(ring_size)); + + next_write = __bcm_ring_next_write(ring, ring_size); + if (__bcm_ring_full(ring, next_write)) { + prod_write = BCM_RING_FULL; + } else { + prod_write = ring->write; + bcm_ring_prod_done(ring, next_write); /* "commit" production */ + } + return prod_write; +} + +/** + * bcm_ring_cons_done - commit a previously pending read + * @ring: pointer to a ring context + * @read: index upto which elements have been consumed. + */ +static INLINE void +bcm_ring_cons_done(bcm_ring_t *ring, int read) +{ + RING_ASSERT(BCM_RING_IS_VALID(ring)); + ring->read = read; +} + +/** + * bcm_ring_cons_pend - fetch in "pend" mode, the next index where a ring + * element may be consumed. + * @ring: pointer to a ring context + * @pend_read: index into ring upto which elements may be consumed. + * @ring_size: size of the ring + */ +static INLINE int +bcm_ring_cons_pend(bcm_ring_t *ring, int *pend_read, const int ring_size) +{ + int rtn; + RING_ASSERT(BCM_RING_IS_VALID(ring) && BCM_RING_SIZE_IS_VALID(ring_size)); + if (bcm_ring_is_empty(ring)) { + *pend_read = BCM_RING_EMPTY; + rtn = BCM_RING_EMPTY; + } else { + *pend_read = (ring->read + 1) % ring_size; + /* production is not committed, caller needs to explicitly commit */ + rtn = ring->read; + } + return rtn; +} + +/** + * bcm_ring_cons - fetch and "commit" the next index where a ring element may + * be consumed. + * @ring: pointer to a ring context + * @ring_size: size of the ring + */ +static INLINE int +bcm_ring_cons(bcm_ring_t *ring, const int ring_size) +{ + int cons_read; + RING_ASSERT(BCM_RING_IS_VALID(ring) && BCM_RING_SIZE_IS_VALID(ring_size)); + if (bcm_ring_is_empty(ring)) { + cons_read = BCM_RING_EMPTY; + } else { + cons_read = ring->read; + ring->read = (ring->read + 1) % ring_size; /* read is committed */ + } + return cons_read; +} + +/** + * bcm_ring_sync_read - on consumption, update peer's read index. + * @peer: pointer to peer's producer ring context + * @self: pointer to consumer's ring context + */ +static INLINE void +bcm_ring_sync_read(bcm_ring_t *peer, const bcm_ring_t *self) +{ + RING_ASSERT(BCM_RING_IS_VALID(peer)); + RING_ASSERT(BCM_RING_IS_VALID(self)); + peer->read = self->read; /* flush read update to peer producer */ +} + +/** + * bcm_ring_sync_write - on consumption, update peer's write index. + * @peer: pointer to peer's consumer ring context + * @self: pointer to producer's ring context + */ +static INLINE void +bcm_ring_sync_write(bcm_ring_t *peer, const bcm_ring_t *self) +{ + RING_ASSERT(BCM_RING_IS_VALID(peer)); + RING_ASSERT(BCM_RING_IS_VALID(self)); + peer->write = self->write; /* flush write update to peer consumer */ +} + +/** + * bcm_ring_prod_avail - fetch total number of available empty slots in the + * ring for production. + * @ring: pointer to a ring context + * @ring_size: size of the ring + */ +static INLINE int +bcm_ring_prod_avail(const bcm_ring_t *ring, const int ring_size) +{ + int prod_avail; + RING_ASSERT(BCM_RING_IS_VALID(ring) && BCM_RING_SIZE_IS_VALID(ring_size)); + if (ring->write >= ring->read) { + prod_avail = (ring_size - (ring->write - ring->read) - 1); + } else { + prod_avail = (ring->read - (ring->write + 1)); + } + ASSERT(prod_avail < ring_size); + return prod_avail; +} + +/** + * bcm_ring_cons_avail - fetch total number of available elements for consumption. + * @ring: pointer to a ring context + * @ring_size: size of the ring + */ +static INLINE int +bcm_ring_cons_avail(const bcm_ring_t *ring, const int ring_size) +{ + int cons_avail; + RING_ASSERT(BCM_RING_IS_VALID(ring) && BCM_RING_SIZE_IS_VALID(ring_size)); + if (ring->read == ring->write) { + cons_avail = 0; + } else if (ring->read > ring->write) { + cons_avail = ((ring_size - ring->read) + ring->write); + } else { + cons_avail = ring->write - ring->read; + } + ASSERT(cons_avail < ring_size); + return cons_avail; +} + +/** + * bcm_ring_cons_all - set ring in state where all elements are consumed. + * @ring: pointer to a ring context + */ +static INLINE void +bcm_ring_cons_all(bcm_ring_t *ring) +{ + ring->read = ring->write; +} + +/** + * Work Queue + * A work Queue is composed of a ring of work items, of a specified depth. + * It HAS-A bcm_ring object, comprising of a RD and WR offset, to implement a + * producer/consumer circular ring. + */ + +struct bcm_workq { + bcm_ring_t ring; /* Ring context abstraction */ + struct bcm_workq *peer; /* Peer workq context */ + void *buffer; /* Buffer storage for work items in workQ */ + int ring_size; /* Depth of workQ */ +} __ring_aligned; + +typedef struct bcm_workq bcm_workq_t; + +/* #define BCM_WORKQ_DEBUG */ +#if defined(BCM_WORKQ_DEBUG) +#define WORKQ_ASSERT(exp) ASSERT(exp) +#else /* ! BCM_WORKQ_DEBUG */ +#define WORKQ_ASSERT(exp) do {} while (0) +#endif /* ! BCM_WORKQ_DEBUG */ + +#define WORKQ_AUDIT(workq) \ + WORKQ_ASSERT((workq) != BCM_WORKQ_NULL); \ + WORKQ_ASSERT(WORKQ_PEER(workq) != BCM_WORKQ_NULL); \ + WORKQ_ASSERT((workq)->buffer == WORKQ_PEER(workq)->buffer); \ + WORKQ_ASSERT((workq)->ring_size == WORKQ_PEER(workq)->ring_size); + +#define BCM_WORKQ_NULL ((bcm_workq_t *)NULL) + +#define WORKQ_PEER(workq) ((workq)->peer) +#define WORKQ_RING(workq) (&((workq)->ring)) +#define WORKQ_PEER_RING(workq) (&((workq)->peer->ring)) + +#define WORKQ_ELEMENT(__elem_type, __workq, __index) ({ \ + WORKQ_ASSERT((__workq) != BCM_WORKQ_NULL); \ + WORKQ_ASSERT((__index) < ((__workq)->ring_size)); \ + ((__elem_type *)((__workq)->buffer)) + (__index); \ +}) + +static INLINE void bcm_workq_init(bcm_workq_t *workq, bcm_workq_t *workq_peer, + void *buffer, int ring_size); + +static INLINE bool bcm_workq_is_empty(bcm_workq_t *workq_prod); + +static INLINE void bcm_workq_prod_sync(bcm_workq_t *workq_prod); +static INLINE void bcm_workq_cons_sync(bcm_workq_t *workq_cons); + +static INLINE void bcm_workq_prod_refresh(bcm_workq_t *workq_prod); +static INLINE void bcm_workq_cons_refresh(bcm_workq_t *workq_cons); + +/** + * bcm_workq_init - initialize a workq + * @workq: pointer to a workq context + * @buffer: pointer to a pre-allocated circular buffer to serve as a ring + * @ring_size: size of the ring in terms of max number of elements. + */ +static INLINE void +bcm_workq_init(bcm_workq_t *workq, bcm_workq_t *workq_peer, + void *buffer, int ring_size) +{ + ASSERT(workq != BCM_WORKQ_NULL); + ASSERT(workq_peer != BCM_WORKQ_NULL); + ASSERT(buffer != NULL); + ASSERT(ring_size > 0); + + WORKQ_PEER(workq) = workq_peer; + WORKQ_PEER(workq_peer) = workq; + + bcm_ring_init(WORKQ_RING(workq)); + bcm_ring_init(WORKQ_RING(workq_peer)); + + workq->buffer = workq_peer->buffer = buffer; + workq->ring_size = workq_peer->ring_size = ring_size; +} + +/** + * bcm_workq_empty - test whether there is work + * @workq_prod: producer's workq + */ +static INLINE bool +bcm_workq_is_empty(bcm_workq_t *workq_prod) +{ + return bcm_ring_is_empty(WORKQ_RING(workq_prod)); +} + +/** + * bcm_workq_prod_sync - Commit the producer write index to peer workq's ring + * @workq_prod: producer's workq whose write index must be synced to peer + */ +static INLINE void +bcm_workq_prod_sync(bcm_workq_t *workq_prod) +{ + WORKQ_AUDIT(workq_prod); + + /* cons::write <--- prod::write */ + bcm_ring_sync_write(WORKQ_PEER_RING(workq_prod), WORKQ_RING(workq_prod)); +} + +/** + * bcm_workq_cons_sync - Commit the consumer read index to the peer workq's ring + * @workq_cons: consumer's workq whose read index must be synced to peer + */ +static INLINE void +bcm_workq_cons_sync(bcm_workq_t *workq_cons) +{ + WORKQ_AUDIT(workq_cons); + + /* prod::read <--- cons::read */ + bcm_ring_sync_read(WORKQ_PEER_RING(workq_cons), WORKQ_RING(workq_cons)); +} + +/** + * bcm_workq_prod_refresh - Fetch the updated consumer's read index + * @workq_prod: producer's workq whose read index must be refreshed from peer + */ +static INLINE void +bcm_workq_prod_refresh(bcm_workq_t *workq_prod) +{ + WORKQ_AUDIT(workq_prod); + + /* prod::read <--- cons::read */ + bcm_ring_sync_read(WORKQ_RING(workq_prod), WORKQ_PEER_RING(workq_prod)); +} + +/** + * bcm_workq_cons_refresh - Fetch the updated producer's write index + * @workq_cons: consumer's workq whose write index must be refreshed from peer + */ +static INLINE void +bcm_workq_cons_refresh(bcm_workq_t *workq_cons) +{ + WORKQ_AUDIT(workq_cons); + + /* cons::write <--- prod::write */ + bcm_ring_sync_write(WORKQ_RING(workq_cons), WORKQ_PEER_RING(workq_cons)); +} + +#endif /* ! __bcm_ring_h_included__ */ diff --git a/bcmdhd.100.10.315.x/include/bcmbloom.h b/bcmdhd.100.10.315.x/include/bcmbloom.h new file mode 100644 index 0000000..62e8b01 --- /dev/null +++ b/bcmdhd.100.10.315.x/include/bcmbloom.h @@ -0,0 +1,79 @@ +/* + * Bloom filter support + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmbloom.h 714397 2017-08-04 08:24:38Z $ + */ + +#ifndef _bcmbloom_h_ +#define _bcmbloom_h_ + +#include +#ifdef BCMDRIVER +#include +#else +#include /* For size_t */ +#endif // endif + +struct bcm_bloom_filter; +typedef struct bcm_bloom_filter bcm_bloom_filter_t; + +typedef void* (*bcm_bloom_alloc_t)(void *ctx, uint size); +typedef void (*bcm_bloom_free_t)(void *ctx, void *buf, uint size); +typedef uint (*bcm_bloom_hash_t)(void* ctx, uint idx, const uint8 *tag, uint len); + +/* create/allocate a bloom filter. filter size can be 0 for validate only filters */ +int bcm_bloom_create(bcm_bloom_alloc_t alloc_cb, + bcm_bloom_free_t free_cb, void *callback_ctx, uint max_hash, + uint filter_size /* bytes */, bcm_bloom_filter_t **bloom); + +/* destroy bloom filter */ +int bcm_bloom_destroy(bcm_bloom_filter_t **bloom, bcm_bloom_free_t free_cb); + +/* add a hash function to filter, return an index */ +int bcm_bloom_add_hash(bcm_bloom_filter_t *filter, bcm_bloom_hash_t hash, uint *idx); + +/* remove the hash function at index from filter */ +int bcm_bloom_remove_hash(bcm_bloom_filter_t *filter, uint idx); + +/* check if given tag is member of the filter. If buf is NULL and/or buf_len is 0 + * then use the internal state. BCME_OK if member, BCME_NOTFOUND if not, + * or other error (e.g. BADARG) + */ +bool bcm_bloom_is_member(bcm_bloom_filter_t *filter, + const uint8 *tag, uint tag_len, const uint8 *buf, uint buf_len); + +/* add a member to the filter. invalid for validate_only filters */ +int bcm_bloom_add_member(bcm_bloom_filter_t *filter, const uint8 *tag, uint tag_len); + +/* no support for remove member */ + +/* get the filter data from state. BCME_BUFTOOSHORT w/ required length in buf_len + * if supplied size is insufficient + */ +int bcm_bloom_get_filter_data(bcm_bloom_filter_t *filter, + uint buf_size, uint8 *buf, uint *buf_len); + +#endif /* _bcmbloom_h_ */ diff --git a/bcmdhd.100.10.315.x/include/bcmcdc.h b/bcmdhd.100.10.315.x/include/bcmcdc.h new file mode 100644 index 0000000..f642142 --- /dev/null +++ b/bcmdhd.100.10.315.x/include/bcmcdc.h @@ -0,0 +1,121 @@ +/* + * CDC network driver ioctl/indication encoding + * Broadcom 802.11abg Networking Device Driver + * + * Definitions subject to change without notice. + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmcdc.h 700076 2017-05-17 14:42:22Z $ + */ +#ifndef _bcmcdc_h_ +#define _bcmcdc_h_ +#include + +typedef struct cdc_ioctl { + uint32 cmd; /* ioctl command value */ + uint32 len; /* lower 16: output buflen; upper 16: input buflen (excludes header) */ + uint32 flags; /* flag defns given below */ + uint32 status; /* status code returned from the device */ +} cdc_ioctl_t; + +/* Max valid buffer size that can be sent to the dongle */ +#define CDC_MAX_MSG_SIZE ETHER_MAX_LEN + +/* len field is divided into input and output buffer lengths */ +#define CDCL_IOC_OUTLEN_MASK 0x0000FFFF /* maximum or expected response length, */ + /* excluding IOCTL header */ +#define CDCL_IOC_OUTLEN_SHIFT 0 +#define CDCL_IOC_INLEN_MASK 0xFFFF0000 /* input buffer length, excluding IOCTL header */ +#define CDCL_IOC_INLEN_SHIFT 16 + +/* CDC flag definitions */ +#define CDCF_IOC_ERROR 0x01 /* 0=success, 1=ioctl cmd failed */ +#define CDCF_IOC_SET 0x02 /* 0=get, 1=set cmd */ +#define CDCF_IOC_OVL_IDX_MASK 0x3c /* overlay region index mask */ +#define CDCF_IOC_OVL_RSV 0x40 /* 1=reserve this overlay region */ +#define CDCF_IOC_OVL 0x80 /* 1=this ioctl corresponds to an overlay */ +#define CDCF_IOC_ACTION_MASK 0xfe /* SET/GET, OVL_IDX, OVL_RSV, OVL mask */ +#define CDCF_IOC_ACTION_SHIFT 1 /* SET/GET, OVL_IDX, OVL_RSV, OVL shift */ +#define CDCF_IOC_IF_MASK 0xF000 /* I/F index */ +#define CDCF_IOC_IF_SHIFT 12 +#define CDCF_IOC_ID_MASK 0xFFFF0000 /* used to uniquely id an ioctl req/resp pairing */ +#define CDCF_IOC_ID_SHIFT 16 /* # of bits of shift for ID Mask */ + +#define CDC_IOC_IF_IDX(flags) (((flags) & CDCF_IOC_IF_MASK) >> CDCF_IOC_IF_SHIFT) +#define CDC_IOC_ID(flags) (((flags) & CDCF_IOC_ID_MASK) >> CDCF_IOC_ID_SHIFT) + +#define CDC_GET_IF_IDX(hdr) \ + ((int)((((hdr)->flags) & CDCF_IOC_IF_MASK) >> CDCF_IOC_IF_SHIFT)) +#define CDC_SET_IF_IDX(hdr, idx) \ + ((hdr)->flags = (((hdr)->flags & ~CDCF_IOC_IF_MASK) | ((idx) << CDCF_IOC_IF_SHIFT))) + +/* + * BDC header + * + * The BDC header is used on data packets to convey priority across USB. + */ + +struct bdc_header { + uint8 flags; /* Flags */ + uint8 priority; /* 802.1d Priority 0:2 bits, 4:7 USB flow control info */ + uint8 flags2; + uint8 dataOffset; /* Offset from end of BDC header to packet data, in + * 4-byte words. Leaves room for optional headers. + */ +}; + +#define BDC_HEADER_LEN 4 + +/* flags field bitmap */ +#define BDC_FLAG_EXEMPT 0x03 /* EXT_STA: encryption exemption (host -> dongle?) */ +#define BDC_FLAG_80211_PKT 0x01 /* Packet is in 802.11 format (dongle -> host) */ +#define BDC_FLAG_SUM_GOOD 0x04 /* Dongle has verified good RX checksums */ +#define BDC_FLAG_SUM_NEEDED 0x08 /* Dongle needs to do TX checksums: host->device */ +#define BDC_FLAG_EVENT_MSG 0x08 /* Payload contains an event msg: device->host */ +#define BDC_FLAG_VER_MASK 0xf0 /* Protocol version mask */ +#define BDC_FLAG_VER_SHIFT 4 /* Protocol version shift */ + +/* priority field bitmap */ +#define BDC_PRIORITY_MASK 0x07 +#define BDC_PRIORITY_FC_MASK 0xf0 /* flow control info mask */ +#define BDC_PRIORITY_FC_SHIFT 4 /* flow control info shift */ + +/* flags2 field bitmap */ +#define BDC_FLAG2_IF_MASK 0x0f /* interface index (host <-> dongle) */ +#define BDC_FLAG2_IF_SHIFT 0 +#define BDC_FLAG2_FC_FLAG 0x10 /* flag to indicate if pkt contains */ + /* FLOW CONTROL info only */ + +/* version numbers */ +#define BDC_PROTO_VER_1 1 /* Old Protocol version */ +#define BDC_PROTO_VER 2 /* Protocol version */ + +/* flags2.if field access macros */ +#define BDC_GET_IF_IDX(hdr) \ + ((int)((((hdr)->flags2) & BDC_FLAG2_IF_MASK) >> BDC_FLAG2_IF_SHIFT)) +#define BDC_SET_IF_IDX(hdr, idx) \ + ((hdr)->flags2 = (((hdr)->flags2 & ~BDC_FLAG2_IF_MASK) | ((idx) << BDC_FLAG2_IF_SHIFT))) + +#endif /* _bcmcdc_h_ */ diff --git a/bcmdhd.100.10.315.x/include/bcmdefs.h b/bcmdhd.100.10.315.x/include/bcmdefs.h new file mode 100644 index 0000000..a29c46e --- /dev/null +++ b/bcmdhd.100.10.315.x/include/bcmdefs.h @@ -0,0 +1,600 @@ +/* + * Misc system wide definitions + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmdefs.h 700870 2017-05-22 19:05:22Z $ + */ + +#ifndef _bcmdefs_h_ +#define _bcmdefs_h_ + +/* + * One doesn't need to include this file explicitly, gets included automatically if + * typedefs.h is included. + */ + +/* Use BCM_REFERENCE to suppress warnings about intentionally-unused function + * arguments or local variables. + */ +#define BCM_REFERENCE(data) ((void)(data)) + +/* Allow for suppressing unused variable warnings. */ +#ifdef __GNUC__ +#define UNUSED_VAR __attribute__ ((unused)) +#else +#define UNUSED_VAR +#endif // endif + +/* GNU GCC 4.6+ supports selectively turning off a warning. + * Define these diagnostic macros to help suppress cast-qual warning + * until all the work can be done to fix the casting issues. + */ +#if defined(__GNUC__) && defined(STRICT_GCC_WARNINGS) && (__GNUC__ > 4 || (__GNUC__ == \ + 4 && __GNUC_MINOR__ >= 6)) +#define GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST() \ + _Pragma("GCC diagnostic push") \ + _Pragma("GCC diagnostic ignored \"-Wcast-qual\"") +#define GCC_DIAGNOSTIC_POP() \ + _Pragma("GCC diagnostic pop") +#else +#define GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST() +#define GCC_DIAGNOSTIC_POP() +#endif /* Diagnostic macros not defined */ + +/* Compile-time assert can be used in place of ASSERT if the expression evaluates + * to a constant at compile time. + */ +#define STATIC_ASSERT(expr) { \ + /* Make sure the expression is constant. */ \ + typedef enum { _STATIC_ASSERT_NOT_CONSTANT = (expr) } _static_assert_e UNUSED_VAR; \ + /* Make sure the expression is true. */ \ + typedef char STATIC_ASSERT_FAIL[(expr) ? 1 : -1] UNUSED_VAR; \ +} + +/* Reclaiming text and data : + * The following macros specify special linker sections that can be reclaimed + * after a system is considered 'up'. + * BCMATTACHFN is also used for detach functions (it's not worth having a BCMDETACHFN, + * as in most cases, the attach function calls the detach function to clean up on error). + */ +#if defined(BCM_RECLAIM) + +extern bool bcm_reclaimed; +extern bool bcm_attach_part_reclaimed; +extern bool bcm_preattach_part_reclaimed; +extern bool bcm_postattach_part_reclaimed; + +#define RECLAIMED() (bcm_reclaimed) +#define ATTACH_PART_RECLAIMED() (bcm_attach_part_reclaimed) +#define PREATTACH_PART_RECLAIMED() (bcm_preattach_part_reclaimed) +#define POSTATTACH_PART_RECLAIMED() (bcm_postattach_part_reclaimed) + +#if defined(BCM_RECLAIM_ATTACH_FN_DATA) +#define _data __attribute__ ((__section__ (".dataini2." #_data))) _data +#define _fn __attribute__ ((__section__ (".textini2." #_fn), noinline)) _fn + +/* Relocate attach symbols to save-restore region to increase pre-reclaim heap size. */ +#define BCM_SRM_ATTACH_DATA(_data) __attribute__ ((__section__ (".datasrm." #_data))) _data +#define BCM_SRM_ATTACH_FN(_fn) __attribute__ ((__section__ (".textsrm." #_fn), noinline)) _fn + +#ifndef PREATTACH_NORECLAIM +#define BCMPREATTACHDATA(_data) __attribute__ ((__section__ (".dataini3." #_data))) _data +#define BCMPREATTACHFN(_fn) __attribute__ ((__section__ (".textini3." #_fn), noinline)) _fn +#else +#define BCMPREATTACHDATA(_data) __attribute__ ((__section__ (".dataini2." #_data))) _data +#define BCMPREATTACHFN(_fn) __attribute__ ((__section__ (".textini2." #_fn), noinline)) _fn +#endif /* PREATTACH_NORECLAIM */ +#define BCMPOSTATTACHDATA(_data) __attribute__ ((__section__ (".dataini5." #_data))) _data +#define BCMPOSTATTACHFN(_fn) __attribute__ ((__section__ (".textini5." #_fn), noinline)) _fn +#else /* BCM_RECLAIM_ATTACH_FN_DATA */ +#define _data _data +#define _fn _fn +#define BCMPREATTACHDATA(_data) _data +#define BCMPREATTACHFN(_fn) _fn +#define BCMPOSTATTACHDATA(_data) _data +#define BCMPOSTATTACHFN(_fn) _fn +#endif /* BCM_RECLAIM_ATTACH_FN_DATA */ + +#ifdef BCMDBG_SR +/* + * Don't reclaim so we can compare SR ASM + */ +#define BCMPREATTACHDATASR(_data) _data +#define BCMPREATTACHFNSR(_fn) _fn +#define BCMATTACHDATASR(_data) _data +#define BCMATTACHFNSR(_fn) _fn +#else +#define BCMPREATTACHDATASR(_data) BCMPREATTACHDATA(_data) +#define BCMPREATTACHFNSR(_fn) BCMPREATTACHFN(_fn) +#define BCMATTACHDATASR(_data) _data +#define BCMATTACHFNSR(_fn) _fn +#endif // endif + +#if defined(BCM_RECLAIM_INIT_FN_DATA) +#define _data __attribute__ ((__section__ (".dataini1." #_data))) _data +#define _fn __attribute__ ((__section__ (".textini1." #_fn), noinline)) _fn +#define CONST +#else /* BCM_RECLAIM_INIT_FN_DATA */ +#define _data _data +#define _fn _fn +#ifndef CONST +#define CONST const +#endif // endif +#endif /* BCM_RECLAIM_INIT_FN_DATA */ + +/* Non-manufacture or internal attach function/dat */ +#define BCMNMIATTACHFN(_fn) _fn +#define BCMNMIATTACHDATA(_data) _data + +#if defined(BCM_CISDUMP_NO_RECLAIM) +#define BCMCISDUMPATTACHFN(_fn) _fn +#define BCMCISDUMPATTACHDATA(_data) _data +#else +#define BCMCISDUMPATTACHFN(_fn) BCMNMIATTACHFN(_fn) +#define BCMCISDUMPATTACHDATA(_data) BCMNMIATTACHDATA(_data) +#endif // endif + +/* SROM with OTP support */ +#if defined(BCMOTPSROM) +#define BCMSROMATTACHFN(_fn) _fn +#define BCMSROMATTACHDATA(_data) _data +#else +#define BCMSROMATTACHFN(_fn) BCMNMIATTACHFN(_fn) +#define BCMSROMATTACHDATA(_data) BCMNMIATTACHFN(_data) +#endif /* BCMOTPSROM */ + +#if defined(BCM_CISDUMP_NO_RECLAIM) +#define BCMSROMCISDUMPATTACHFN(_fn) _fn +#define BCMSROMCISDUMPATTACHDATA(_data) _data +#else +#define BCMSROMCISDUMPATTACHFN(_fn) BCMSROMATTACHFN(_fn) +#define BCMSROMCISDUMPATTACHDATA(_data) BCMSROMATTACHDATA(_data) +#endif /* BCM_CISDUMP_NO_RECLAIM */ + +#ifdef BCMNODOWN +#define _fn _fn +#else +#define _fn _fn +#endif // endif + +#else /* BCM_RECLAIM */ + +#define bcm_reclaimed (1) +#define bcm_attach_part_reclaimed (1) +#define bcm_preattach_part_reclaimed (1) +#define bcm_postattach_part_reclaimed (1) +#define _data _data +#define _fn _fn +#define BCM_SRM_ATTACH_DATA(_data) _data +#define BCM_SRM_ATTACH_FN(_fn) _fn +#define BCMPREATTACHDATA(_data) _data +#define BCMPREATTACHFN(_fn) _fn +#define BCMPOSTATTACHDATA(_data) _data +#define BCMPOSTATTACHFN(_fn) _fn +#define _data _data +#define _fn _fn +#define _fn _fn +#define BCMNMIATTACHFN(_fn) _fn +#define BCMNMIATTACHDATA(_data) _data +#define BCMSROMATTACHFN(_fn) _fn +#define BCMSROMATTACHDATA(_data) _data +#define BCMPREATTACHFNSR(_fn) _fn +#define BCMPREATTACHDATASR(_data) _data +#define BCMATTACHFNSR(_fn) _fn +#define BCMATTACHDATASR(_data) _data +#define BCMSROMATTACHFN(_fn) _fn +#define BCMSROMATTACHDATA(_data) _data +#define BCMCISDUMPATTACHFN(_fn) _fn +#define BCMCISDUMPATTACHDATA(_data) _data +#define BCMSROMCISDUMPATTACHFN(_fn) _fn +#define BCMSROMCISDUMPATTACHDATA(_data) _data +#define CONST const + +#define RECLAIMED() (bcm_reclaimed) +#define ATTACH_PART_RECLAIMED() (bcm_attach_part_reclaimed) +#define PREATTACH_PART_RECLAIMED() (bcm_preattach_part_reclaimed) +#define POSTATTACH_PART_RECLAIMED() (bcm_postattach_part_reclaimed) + +#endif /* BCM_RECLAIM */ + +#define BCMUCODEDATA(_data) _data + +#if defined(BCM_DMA_CT) && !defined(BCM_DMA_CT_DISABLED) +#define BCMUCODEFN(_fn) _fn +#else +#define BCMUCODEFN(_fn) _fn +#endif /* BCM_DMA_CT */ + +#if !defined STB +#undef BCM47XX_CA9 +#endif /* STB */ + +/* BCMFASTPATH Related Macro defines +*/ +#ifndef BCMFASTPATH +#if defined(STB) +#define BCMFASTPATH __attribute__ ((__section__ (".text.fastpath"))) +#define BCMFASTPATH_HOST __attribute__ ((__section__ (".text.fastpath_host"))) +#else /* mips || BCM47XX_CA9 || STB */ +#define BCMFASTPATH +#define BCMFASTPATH_HOST +#endif // endif +#endif /* BCMFASTPATH */ + +/* Use the BCMRAMFN() macro to tag functions in source that must be included in RAM (excluded from + * ROM). This should eliminate the need to manually specify these functions in the ROM config file. + * It should only be used in special cases where the function must be in RAM for *all* ROM-based + * chips. + */ + #define BCMRAMFN(_fn) _fn + +/* Use BCMSPECSYM() macro to tag symbols going to a special output section in the binary. */ +#define BCMSPECSYM(_sym) __attribute__ ((__section__ (".special." #_sym))) _sym + +#define STATIC static + +/* Bus types */ +#define SI_BUS 0 /* SOC Interconnect */ +#define PCI_BUS 1 /* PCI target */ +#define PCMCIA_BUS 2 /* PCMCIA target */ +#define SDIO_BUS 3 /* SDIO target */ +#define JTAG_BUS 4 /* JTAG */ +#define USB_BUS 5 /* USB (does not support R/W REG) */ +#define SPI_BUS 6 /* gSPI target */ +#define RPC_BUS 7 /* RPC target */ + +/* Allows size optimization for single-bus image */ +#ifdef BCMBUSTYPE +#define BUSTYPE(bus) (BCMBUSTYPE) +#else +#define BUSTYPE(bus) (bus) +#endif // endif + +#ifdef BCMBUSCORETYPE +#define BUSCORETYPE(ct) (BCMBUSCORETYPE) +#else +#define BUSCORETYPE(ct) (ct) +#endif // endif + +/* Allows size optimization for single-backplane image */ +#ifdef BCMCHIPTYPE +#define CHIPTYPE(bus) (BCMCHIPTYPE) +#else +#define CHIPTYPE(bus) (bus) +#endif // endif + +/* Allows size optimization for SPROM support */ +#if defined(BCMSPROMBUS) +#define SPROMBUS (BCMSPROMBUS) +#elif defined(SI_PCMCIA_SROM) +#define SPROMBUS (PCMCIA_BUS) +#else +#define SPROMBUS (PCI_BUS) +#endif // endif + +/* Allows size optimization for single-chip image */ +#ifdef BCMCHIPID +#define CHIPID(chip) (BCMCHIPID) +#else +#define CHIPID(chip) (chip) +#endif // endif + +#ifdef BCMCHIPREV +#define CHIPREV(rev) (BCMCHIPREV) +#else +#define CHIPREV(rev) (rev) +#endif // endif + +#ifdef BCMPCIEREV +#define PCIECOREREV(rev) (BCMPCIEREV) +#else +#define PCIECOREREV(rev) (rev) +#endif // endif + +#ifdef BCMPMUREV +#define PMUREV(rev) (BCMPMUREV) +#else +#define PMUREV(rev) (rev) +#endif // endif + +#ifdef BCMCCREV +#define CCREV(rev) (BCMCCREV) +#else +#define CCREV(rev) (rev) +#endif // endif + +#ifdef BCMGCIREV +#define GCIREV(rev) (BCMGCIREV) +#else +#define GCIREV(rev) (rev) +#endif // endif + +#ifdef BCMCR4REV +#define CR4REV (BCMCR4REV) +#endif // endif + +/* Defines for DMA Address Width - Shared between OSL and HNDDMA */ +#define DMADDR_MASK_32 0x0 /* Address mask for 32-bits */ +#define DMADDR_MASK_30 0xc0000000 /* Address mask for 30-bits */ +#define DMADDR_MASK_26 0xFC000000 /* Address maks for 26-bits */ +#define DMADDR_MASK_0 0xffffffff /* Address mask for 0-bits (hi-part) */ + +#define DMADDRWIDTH_26 26 /* 26-bit addressing capability */ +#define DMADDRWIDTH_30 30 /* 30-bit addressing capability */ +#define DMADDRWIDTH_32 32 /* 32-bit addressing capability */ +#define DMADDRWIDTH_63 63 /* 64-bit addressing capability */ +#define DMADDRWIDTH_64 64 /* 64-bit addressing capability */ + +typedef struct { + uint32 loaddr; + uint32 hiaddr; +} dma64addr_t; + +#define PHYSADDR64HI(_pa) ((_pa).hiaddr) +#define PHYSADDR64HISET(_pa, _val) \ + do { \ + (_pa).hiaddr = (_val); \ + } while (0) +#define PHYSADDR64LO(_pa) ((_pa).loaddr) +#define PHYSADDR64LOSET(_pa, _val) \ + do { \ + (_pa).loaddr = (_val); \ + } while (0) + +#ifdef BCMDMA64OSL +typedef dma64addr_t dmaaddr_t; +#define PHYSADDRHI(_pa) PHYSADDR64HI(_pa) +#define PHYSADDRHISET(_pa, _val) PHYSADDR64HISET(_pa, _val) +#define PHYSADDRLO(_pa) PHYSADDR64LO(_pa) +#define PHYSADDRLOSET(_pa, _val) PHYSADDR64LOSET(_pa, _val) +#define PHYSADDRTOULONG(_pa, _ulong) \ + do { \ + _ulong = ((unsigned long long)(_pa).hiaddr << 32) | ((_pa).loaddr); \ + } while (0) + +#else +typedef unsigned long dmaaddr_t; +#define PHYSADDRHI(_pa) (0) +#define PHYSADDRHISET(_pa, _val) +#define PHYSADDRLO(_pa) ((_pa)) +#define PHYSADDRLOSET(_pa, _val) \ + do { \ + (_pa) = (_val); \ + } while (0) +#endif /* BCMDMA64OSL */ +#define PHYSADDRISZERO(_pa) (PHYSADDRLO(_pa) == 0 && PHYSADDRHI(_pa) == 0) + +/* One physical DMA segment */ +typedef struct { + dmaaddr_t addr; + uint32 length; +} hnddma_seg_t; + +#define MAX_DMA_SEGS 8 + +typedef struct { + void *oshdmah; /* Opaque handle for OSL to store its information */ + uint origsize; /* Size of the virtual packet */ + uint nsegs; + hnddma_seg_t segs[MAX_DMA_SEGS]; +} hnddma_seg_map_t; + +/* packet headroom necessary to accommodate the largest header in the system, (i.e TXOFF). + * By doing, we avoid the need to allocate an extra buffer for the header when bridging to WL. + * There is a compile time check in wlc.c which ensure that this value is at least as big + * as TXOFF. This value is used in dma_rxfill (hnddma.c). + */ + +#if defined(BCM_RPC_NOCOPY) || defined(BCM_RCP_TXNOCOPY) +/* add 40 bytes to allow for extra RPC header and info */ +#define BCMEXTRAHDROOM 260 +#else /* BCM_RPC_NOCOPY || BCM_RPC_TXNOCOPY */ +#if defined(STB) +#define BCMEXTRAHDROOM 224 +#else +#define BCMEXTRAHDROOM 204 +#endif // endif +#endif /* BCM_RPC_NOCOPY || BCM_RPC_TXNOCOPY */ + +/* Packet alignment for most efficient SDIO (can change based on platform) */ +#ifndef SDALIGN +#define SDALIGN 32 +#endif // endif + +/* Headroom required for dongle-to-host communication. Packets allocated + * locally in the dongle (e.g. for CDC ioctls or RNDIS messages) should + * leave this much room in front for low-level message headers which may + * be needed to get across the dongle bus to the host. (These messages + * don't go over the network, so room for the full WL header above would + * be a waste.). +*/ +#define BCMDONGLEHDRSZ 12 +#define BCMDONGLEPADSZ 16 + +#define BCMDONGLEOVERHEAD (BCMDONGLEHDRSZ + BCMDONGLEPADSZ) + +#if defined(NO_BCMDBG_ASSERT) +# undef BCMDBG_ASSERT +# undef BCMASSERT_LOG +#endif // endif + +#if defined(BCMASSERT_LOG) +#define BCMASSERT_SUPPORT +#endif // endif + +/* Macros for doing definition and get/set of bitfields + * Usage example, e.g. a three-bit field (bits 4-6): + * #define _M BITFIELD_MASK(3) + * #define _S 4 + * ... + * regval = R_REG(osh, ®s->regfoo); + * field = GFIELD(regval, ); + * regval = SFIELD(regval, , 1); + * W_REG(osh, ®s->regfoo, regval); + */ +#define BITFIELD_MASK(width) \ + (((unsigned)1 << (width)) - 1) +#define GFIELD(val, field) \ + (((val) >> field ## _S) & field ## _M) +#define SFIELD(val, field, bits) \ + (((val) & (~(field ## _M << field ## _S))) | \ + ((unsigned)(bits) << field ## _S)) + +/* define BCMSMALL to remove misc features for memory-constrained environments */ +#ifdef BCMSMALL +#undef BCMSPACE +#define bcmspace FALSE /* if (bcmspace) code is discarded */ +#else +#define BCMSPACE +#define bcmspace TRUE /* if (bcmspace) code is retained */ +#endif // endif + +/* Max. nvram variable table size */ +#ifndef MAXSZ_NVRAM_VARS +#ifdef LARGE_NVRAM_MAXSZ +#define MAXSZ_NVRAM_VARS (LARGE_NVRAM_MAXSZ * 2) +#else +#define LARGE_NVRAM_MAXSZ 8192 +#define MAXSZ_NVRAM_VARS (LARGE_NVRAM_MAXSZ * 2) +#endif /* LARGE_NVRAM_MAXSZ */ +#endif /* !MAXSZ_NVRAM_VARS */ + +/* ROM_ENAB_RUNTIME_CHECK may be set based upon the #define below (for ROM builds). It may also + * be defined via makefiles (e.g. ROM auto abandon unoptimized compiles). + */ + +#ifdef BCMLFRAG /* BCMLFRAG support enab macros */ + extern bool _bcmlfrag; + #if defined(ROM_ENAB_RUNTIME_CHECK) || !defined(DONGLEBUILD) + #define BCMLFRAG_ENAB() (_bcmlfrag) + #elif defined(BCMLFRAG_DISABLED) + #define BCMLFRAG_ENAB() (0) + #else + #define BCMLFRAG_ENAB() (1) + #endif +#else + #define BCMLFRAG_ENAB() (0) +#endif /* BCMLFRAG_ENAB */ + +#ifdef BCMPCIEDEV /* BCMPCIEDEV support enab macros */ +extern bool _pciedevenab; + #if defined(ROM_ENAB_RUNTIME_CHECK) + #define BCMPCIEDEV_ENAB() (_pciedevenab) + #elif defined(BCMPCIEDEV_ENABLED) + #define BCMPCIEDEV_ENAB() 1 + #else + #define BCMPCIEDEV_ENAB() 0 + #endif +#else + #define BCMPCIEDEV_ENAB() 0 +#endif /* BCMPCIEDEV */ + +#ifdef BCMRESVFRAGPOOL /* BCMRESVFRAGPOOL support enab macros */ +extern bool _resvfragpool_enab; + #if defined(ROM_ENAB_RUNTIME_CHECK) || !defined(DONGLEBUILD) + #define BCMRESVFRAGPOOL_ENAB() (_resvfragpool_enab) + #elif defined(BCMRESVFRAGPOOL_ENABLED) + #define BCMRESVFRAGPOOL_ENAB() 1 + #else + #define BCMRESVFRAGPOOL_ENAB() 0 + #endif +#else + #define BCMRESVFRAGPOOL_ENAB() 0 +#endif /* BCMPCIEDEV */ + + #define BCMSDIODEV_ENAB() 0 + +/* Max size for reclaimable NVRAM array */ +#ifdef DL_NVRAM +#define NVRAM_ARRAY_MAXSIZE DL_NVRAM +#else +#define NVRAM_ARRAY_MAXSIZE MAXSZ_NVRAM_VARS +#endif /* DL_NVRAM */ + +extern uint32 gFWID; + +#ifdef BCMFRWDPOOLREORG /* BCMFRWDPOOLREORG support enab macros */ + extern bool _bcmfrwdpoolreorg; + #if defined(ROM_ENAB_RUNTIME_CHECK) || !defined(DONGLEBUILD) + #define BCMFRWDPOOLREORG_ENAB() (_bcmfrwdpoolreorg) + #elif defined(BCMFRWDPOOLREORG_DISABLED) + #define BCMFRWDPOOLREORG_ENAB() (0) + #else + #define BCMFRWDPOOLREORG_ENAB() (1) + #endif +#else + #define BCMFRWDPOOLREORG_ENAB() (0) +#endif /* BCMFRWDPOOLREORG */ + +#ifdef BCMPOOLRECLAIM /* BCMPOOLRECLAIM support enab macros */ + extern bool _bcmpoolreclaim; + #if defined(ROM_ENAB_RUNTIME_CHECK) || !defined(DONGLEBUILD) + #define BCMPOOLRECLAIM_ENAB() (_bcmpoolreclaim) + #elif defined(BCMPOOLRECLAIM_DISABLED) + #define BCMPOOLRECLAIM_ENAB() (0) + #else + #define BCMPOOLRECLAIM_ENAB() (1) + #endif +#else + #define BCMPOOLRECLAIM_ENAB() (0) +#endif /* BCMPOOLRECLAIM */ + +/* Chip related low power flags (lpflags) */ + +#ifndef PAD +#define _PADLINE(line) pad ## line +#define _XSTR(line) _PADLINE(line) +#define PAD _XSTR(__LINE__) +#endif // endif + +#ifndef FRAG_HEADROOM +#define FRAG_HEADROOM 224 /* In absence of SFD, use default headroom of 224 */ +#endif // endif + +#define MODULE_DETACH(var, detach_func)\ + if (var) { \ + detach_func(var); \ + (var) = NULL; \ + } +#define MODULE_DETACH_2(var1, var2, detach_func) detach_func(var1, var2) +#define MODULE_DETACH_TYPECASTED(var, detach_func) detach_func(var) + +/* When building ROML image use runtime conditional to cause the compiler + * to compile everything but not to complain "defined but not used" + * as #ifdef would cause at the callsites. + * In the end functions called under if (0) {} will not be linked + * into the final binary if they're not called from other places either. + */ +#define BCM_ATTACH_REF_DECL() +#define BCM_ATTACH_REF() (1) + +/* Const in ROM else normal data in RAM */ +#if defined(ROM_ENAB_RUNTIME_CHECK) + #define ROMCONST CONST +#else + #define ROMCONST +#endif // endif + +#endif /* _bcmdefs_h_ */ diff --git a/bcmdhd.100.10.315.x/include/bcmdevs.h b/bcmdhd.100.10.315.x/include/bcmdevs.h new file mode 100644 index 0000000..30b2a87 --- /dev/null +++ b/bcmdhd.100.10.315.x/include/bcmdevs.h @@ -0,0 +1,936 @@ +/* + * Broadcom device-specific manifest constants. + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmdevs.h 760196 2018-04-30 10:57:59Z $ + */ + +#ifndef _BCMDEVS_H +#define _BCMDEVS_H + +/* PCI vendor IDs */ +#define VENDOR_EPIGRAM 0xfeda +#define VENDOR_BROADCOM 0x14e4 +#define VENDOR_3COM 0x10b7 +#define VENDOR_NETGEAR 0x1385 +#define VENDOR_DIAMOND 0x1092 +#define VENDOR_INTEL 0x8086 +#define VENDOR_DELL 0x1028 +#define VENDOR_HP 0x103c +#define VENDOR_HP_COMPAQ 0x0e11 +#define VENDOR_APPLE 0x106b +#define VENDOR_SI_IMAGE 0x1095 /* Silicon Image, used by Arasan SDIO Host */ +#define VENDOR_BUFFALO 0x1154 /* Buffalo vendor id */ +#define VENDOR_TI 0x104c /* Texas Instruments */ +#define VENDOR_RICOH 0x1180 /* Ricoh */ +#define VENDOR_JMICRON 0x197b + +/* PCMCIA vendor IDs */ +#define VENDOR_BROADCOM_PCMCIA 0x02d0 + +/* SDIO vendor IDs */ +#define VENDOR_BROADCOM_SDIO 0x00BF + +/* DONGLE VID/PIDs */ +#define BCM_DNGL_VID 0x0a5c +#define BCM_DNGL_BL_PID_4328 0xbd12 +#define BCM_DNGL_BL_PID_4322 0xbd13 +#define BCM_DNGL_BL_PID_4319 0xbd16 +#define BCM_DNGL_BL_PID_43236 0xbd17 +#define BCM_DNGL_BL_PID_4332 0xbd18 +#define BCM_DNGL_BL_PID_4360 0xbd1d +#define BCM_DNGL_BL_PID_43143 0xbd1e +#define BCM_DNGL_BL_PID_43242 0xbd1f +#define BCM_DNGL_BL_PID_4335 0xbd20 +#define BCM_DNGL_BL_PID_4350 0xbd23 +#define BCM_DNGL_BL_PID_4345 0xbd24 +#define BCM_DNGL_BL_PID_4349 0xbd25 +#define BCM_DNGL_BL_PID_4354 0xbd26 +#define BCM_DNGL_BL_PID_43569 0xbd27 +#define BCM_DNGL_BL_PID_4373 0xbd29 + +#define BCM_DNGL_BDC_PID 0x0bdc +#define BCM_DNGL_JTAG_PID 0x4a44 + +#ifdef DEPRECATED +#define BCM_DNGL_BL_PID_43239 0xbd1b +#define BCM_DNGL_BL_PID_4324 0xbd1c +#define BCM_DNGL_BL_PID_43242 0xbd1f +#define BCM_DNGL_BL_PID_43909 0xbd28 +#endif // endif + +/* PCI Device IDs */ +#ifdef DEPRECATED /* These products have been deprecated */ +#define BCM4210_DEVICE_ID 0x1072 /* never used */ +#define BCM4230_DEVICE_ID 0x1086 /* never used */ +#define BCM4401_ENET_ID 0x170c /* 4401b0 production enet cards */ +#define BCM3352_DEVICE_ID 0x3352 /* bcm3352 device id */ +#define BCM3360_DEVICE_ID 0x3360 /* bcm3360 device id */ +#define BCM4211_DEVICE_ID 0x4211 +#define BCM4231_DEVICE_ID 0x4231 +#define BCM4303_D11B_ID 0x4303 /* 4303 802.11b */ +#define BCM4311_D11G_ID 0x4311 /* 4311 802.11b/g id */ +#define BCM4311_D11DUAL_ID 0x4312 /* 4311 802.11a/b/g id */ +#define BCM4311_D11A_ID 0x4313 /* 4311 802.11a id */ +#define BCM4328_D11DUAL_ID 0x4314 /* 4328/4312 802.11a/g id */ +#define BCM4328_D11G_ID 0x4315 /* 4328/4312 802.11g id */ +#define BCM4328_D11A_ID 0x4316 /* 4328/4312 802.11a id */ +#define BCM4318_D11A_ID 0x431a /* 4318 802.11a id */ +#define BCM4325_D11DUAL_ID 0x431b /* 4325 802.11a/g id */ +#define BCM4325_D11G_ID 0x431c /* 4325 802.11g id */ +#define BCM4325_D11A_ID 0x431d /* 4325 802.11a id */ +#define BCM4306_UART_ID 0x4322 /* 4306 uart */ +#define BCM4306_V90_ID 0x4323 /* 4306 v90 codec */ +#define BCM4306_D11G_ID2 0x4325 /* BCM4306_D11G_ID; INF w/loose binding war */ +#define BCM4321_D11N_ID 0x4328 /* 4321 802.11n dualband id */ +#define BCM4321_D11N2G_ID 0x4329 /* 4321 802.11n 2.4Ghz band id */ +#define BCM4321_D11N5G_ID 0x432a /* 4321 802.11n 5Ghz band id */ +#define BCM4322_D11N_ID 0x432b /* 4322 802.11n dualband device */ +#define BCM4322_D11N2G_ID 0x432c /* 4322 802.11n 2.4GHz device */ +#define BCM4322_D11N5G_ID 0x432d /* 4322 802.11n 5GHz device */ +#define BCM4329_D11N_ID 0x432e /* 4329 802.11n dualband device */ +#define BCM4329_D11N2G_ID 0x432f /* 4329 802.11n 2.4G device */ +#define BCM4329_D11N5G_ID 0x4330 /* 4329 802.11n 5G device */ +#define BCM4314_D11N2G_ID 0x4364 /* 4314 802.11n 2.4G device */ +#define BCM43143_D11N2G_ID 0x4366 /* 43143 802.11n 2.4G device */ +#define BCM4315_D11DUAL_ID 0x4334 /* 4315 802.11a/g id */ +#define BCM4315_D11G_ID 0x4335 /* 4315 802.11g id */ +#define BCM4315_D11A_ID 0x4336 /* 4315 802.11a id */ +#define BCM4319_D11N_ID 0x4337 /* 4319 802.11n dualband device */ +#define BCM4319_D11N2G_ID 0x4338 /* 4319 802.11n 2.4G device */ +#define BCM4319_D11N5G_ID 0x4339 /* 4319 802.11n 5G device */ +#define BCM43221_D11N2G_ID 0x4341 /* 43221 802.11n 2.4GHz device */ +#define BCM43222_D11N_ID 0x4350 /* 43222 802.11n dualband device */ +#define BCM43222_D11N2G_ID 0x4351 /* 43222 802.11n 2.4GHz device */ +#define BCM43222_D11N5G_ID 0x4352 /* 43222 802.11n 5GHz device */ +#define BCM43225_D11N2G_ID 0x4357 /* 43225 802.11n 2.4GHz device */ +#define BCM43226_D11N_ID 0x4354 /* 43226 802.11n dualband device */ +#define BCM43228_D11N5G_ID 0x435a /* 43228 802.11n 5GHz device */ +#define BCM43231_D11N2G_ID 0x4340 /* 43231 802.11n 2.4GHz device */ +#define BCM43237_D11N_ID 0x4355 /* 43237 802.11n dualband device */ +#define BCM43237_D11N5G_ID 0x4356 /* 43237 802.11n 5GHz device */ +#define BCM43239_D11N_ID 0x4370 /* 43239 802.11n dualband device */ +#define BCM4324_D11N_ID 0x4374 /* 4324 802.11n dualband device */ +#define BCM43242_D11N_ID 0x4367 /* 43242 802.11n dualband device */ +#define BCM43242_D11N2G_ID 0x4368 /* 43242 802.11n 2.4G device */ +#define BCM43242_D11N5G_ID 0x4369 /* 43242 802.11n 5G device */ +#define BCM4330_D11N_ID 0x4360 /* 4330 802.11n dualband device */ +#define BCM4330_D11N2G_ID 0x4361 /* 4330 802.11n 2.4G device */ +#define BCM4330_D11N5G_ID 0x4362 /* 4330 802.11n 5G device */ +#define BCM4334_D11N_ID 0x4380 /* 4334 802.11n dualband device */ +#define BCM4334_D11N2G_ID 0x4381 /* 4334 802.11n 2.4G device */ +#define BCM4334_D11N5G_ID 0x4382 /* 4334 802.11n 5G device */ +#define BCM43342_D11N_ID 0x4383 /* 43342 802.11n dualband device */ +#define BCM43342_D11N2G_ID 0x4384 /* 43342 802.11n 2.4G device */ +#define BCM43342_D11N5G_ID 0x4385 /* 43342 802.11n 5G device */ +#define BCM43341_D11N_ID 0x4386 /* 43341 802.11n dualband device */ +#define BCM43341_D11N2G_ID 0x4387 /* 43341 802.11n 2.4G device */ +#define BCM43341_D11N5G_ID 0x4388 /* 43341 802.11n 5G device */ +#define BCM4336_D11N_ID 0x4343 /* 4336 802.11n 2.4GHz device */ +#define BCM43362_D11N_ID 0x4363 /* 43362 802.11n 2.4GHz device */ +#define BCM43421_D11N_ID 0xA99D /* 43421 802.11n dualband device */ +#define BCM43909_D11AC_ID 0x43d0 /* 43909 802.11ac dualband device */ +#define BCM43909_D11AC2G_ID 0x43d1 /* 43909 802.11ac 2.4G device */ +#define BCM43909_D11AC5G_ID 0x43d2 /* 43909 802.11ac 5G device */ +#endif /* DEPRECATED */ +/* DEPRECATED but used */ +#define BCM4306_D11G_ID 0x4320 /* 4306 802.11g */ +#define BCM4306_D11A_ID 0x4321 /* 4306 802.11a */ +#define BCM4306_D11DUAL_ID 0x4324 /* 4306 dual A+B */ +#define BCM43142_D11N2G_ID 0x4365 /* 43142 802.11n 2.4G device */ +#define BCM4313_D11N2G_ID 0x4727 /* 4313 802.11n 2.4G device */ +#define BCM4318_D11G_ID 0x4318 /* 4318 802.11b/g id */ +#define BCM4318_D11DUAL_ID 0x4319 /* 4318 802.11a/b/g id */ +#define BCM43224_D11N_ID 0x4353 /* 43224 802.11n dualband device */ +#define BCM43224_D11N_ID_VEN1 0x0576 /* Vendor specific 43224 802.11n db device */ +#define BCM43227_D11N2G_ID 0x4358 /* 43228 802.11n 2.4GHz device */ +#define BCM43228_D11N_ID 0x4359 /* 43228 802.11n DualBand device */ +#define BCM4331_D11N_ID 0x4331 /* 4331 802.11n dualband id */ +#define BCM4331_D11N2G_ID 0x4332 /* 4331 802.11n 2.4Ghz band id */ +#define BCM4331_D11N5G_ID 0x4333 /* 4331 802.11n 5Ghz band id */ +/* DEPRECATED */ + +#define BCM43236_D11N_ID 0x4346 /* 43236 802.11n dualband device */ +#define BCM43236_D11N2G_ID 0x4347 /* 43236 802.11n 2.4GHz device */ +#define BCM43236_D11N5G_ID 0x4348 /* 43236 802.11n 5GHz device */ +#define BCM6362_D11N_ID 0x435f /* 6362 802.11n dualband device */ +#define BCM6362_D11N2G_ID 0x433f /* 6362 802.11n 2.4Ghz band id */ +#define BCM6362_D11N5G_ID 0x434f /* 6362 802.11n 5Ghz band id */ +#define BCM43217_D11N2G_ID 0x43a9 /* 43217 802.11n 2.4GHz device */ +#define BCM43131_D11N2G_ID 0x43aa /* 43131 802.11n 2.4GHz device */ +#define BCM4360_D11AC_ID 0x43a0 +#define BCM4360_D11AC2G_ID 0x43a1 +#define BCM4360_D11AC5G_ID 0x43a2 +#define BCM4345_D11AC_ID 0x43ab /* 4345 802.11ac dualband device */ +#define BCM4345_D11AC2G_ID 0x43ac /* 4345 802.11ac 2.4G device */ +#define BCM4345_D11AC5G_ID 0x43ad /* 4345 802.11ac 5G device */ +#define BCM43455_D11AC_ID 0x43e3 /* 43455 802.11ac dualband device */ +#define BCM43455_D11AC2G_ID 0x43e4 /* 43455 802.11ac 2.4G device */ +#define BCM43455_D11AC5G_ID 0x43e5 /* 43455 802.11ac 5G device */ +#define BCM4335_D11AC_ID 0x43ae +#define BCM4335_D11AC2G_ID 0x43af +#define BCM4335_D11AC5G_ID 0x43b0 +#define BCM4352_D11AC_ID 0x43b1 /* 4352 802.11ac dualband device */ +#define BCM4352_D11AC2G_ID 0x43b2 /* 4352 802.11ac 2.4G device */ +#define BCM4352_D11AC5G_ID 0x43b3 /* 4352 802.11ac 5G device */ +#define BCM43602_D11AC_ID 0x43ba /* ac dualband PCI devid SPROM programmed */ +#define BCM43602_D11AC2G_ID 0x43bb /* 43602 802.11ac 2.4G device */ +#define BCM43602_D11AC5G_ID 0x43bc /* 43602 802.11ac 5G device */ +#define BCM4349_D11AC_ID 0x4349 /* 4349 802.11ac dualband device */ +#define BCM4349_D11AC2G_ID 0x43dd /* 4349 802.11ac 2.4G device */ +#define BCM4349_D11AC5G_ID 0x43de /* 4349 802.11ac 5G device */ +#define BCM53573_D11AC_ID 0x43b4 /* 53573 802.11ac dualband device */ +#define BCM53573_D11AC2G_ID 0x43b5 /* 53573 802.11ac 2.4G device */ +#define BCM53573_D11AC5G_ID 0x43b6 /* 53573 802.11ac 5G device */ +#define BCM47189_D11AC_ID 0x43c6 /* 47189 802.11ac dualband device */ +#define BCM47189_D11AC2G_ID 0x43c7 /* 47189 802.11ac 2.4G device */ +#define BCM47189_D11AC5G_ID 0x43c8 /* 47189 802.11ac 5G device */ +#define BCM4355_D11AC_ID 0x43dc /* 4355 802.11ac dualband device */ +#define BCM4355_D11AC2G_ID 0x43fc /* 4355 802.11ac 2.4G device */ +#define BCM4355_D11AC5G_ID 0x43fd /* 4355 802.11ac 5G device */ +#define BCM4359_D11AC_ID 0x43ef /* 4359 802.11ac dualband device */ +#define BCM4359_D11AC2G_ID 0x43fe /* 4359 802.11ac 2.4G device */ +#define BCM4359_D11AC5G_ID 0x43ff /* 4359 802.11ac 5G device */ +#define BCM43596_D11AC_ID 0x4415 /* 43596 802.11ac dualband device */ +#define BCM43596_D11AC2G_ID 0x4416 /* 43596 802.11ac 2.4G device */ +#define BCM43596_D11AC5G_ID 0x4417 /* 43596 802.11ac 5G device */ +#define BCM43597_D11AC_ID 0x441c /* 43597 802.11ac dualband device */ +#define BCM43597_D11AC2G_ID 0x441d /* 43597 802.11ac 2.4G device */ +#define BCM43597_D11AC5G_ID 0x441e /* 43597 802.11ac 5G device */ +#define BCM43012_D11N_ID 0xA804 /* 43012 802.11n dualband device */ +#define BCM43012_D11N2G_ID 0xA805 /* 43012 802.11n 2.4G device */ +#define BCM43012_D11N5G_ID 0xA806 /* 43012 802.11n 5G device */ +#define BCM43014_D11N_ID 0x4495 /* 43014 802.11n dualband device */ +#define BCM43014_D11N2G_ID 0x4496 /* 43014 802.11n 2.4G device */ +#define BCM43014_D11N5G_ID 0x4497 /* 43014 802.11n 5G device */ + +/* PCI Subsystem ID */ +#define BCM94313HMGBL_SSID_VEN1 0x0608 +#define BCM94313HMG_SSID_VEN1 0x0609 +#define BCM943142HM_SSID_VEN1 0x0611 + +#define BCM4350_D11AC_ID 0x43a3 +#define BCM4350_D11AC2G_ID 0x43a4 +#define BCM4350_D11AC5G_ID 0x43a5 + +#define BCM43556_D11AC_ID 0x43b7 +#define BCM43556_D11AC2G_ID 0x43b8 +#define BCM43556_D11AC5G_ID 0x43b9 + +#define BCM43558_D11AC_ID 0x43c0 +#define BCM43558_D11AC2G_ID 0x43c1 +#define BCM43558_D11AC5G_ID 0x43c2 + +#define BCM43566_D11AC_ID 0x43d3 +#define BCM43566_D11AC2G_ID 0x43d4 +#define BCM43566_D11AC5G_ID 0x43d5 + +#define BCM43568_D11AC_ID 0x43d6 +#define BCM43568_D11AC2G_ID 0x43d7 +#define BCM43568_D11AC5G_ID 0x43d8 + +#define BCM43569_D11AC_ID 0x43d9 +#define BCM43569_D11AC2G_ID 0x43da +#define BCM43569_D11AC5G_ID 0x43db + +#define BCM43570_D11AC_ID 0x43d9 +#define BCM43570_D11AC2G_ID 0x43da +#define BCM43570_D11AC5G_ID 0x43db + +#define BCM4354_D11AC_ID 0x43df /* 4354 802.11ac dualband device */ +#define BCM4354_D11AC2G_ID 0x43e0 /* 4354 802.11ac 2.4G device */ +#define BCM4354_D11AC5G_ID 0x43e1 /* 4354 802.11ac 5G device */ +#define BCM43430_D11N2G_ID 0x43e2 /* 43430 802.11n 2.4G device */ +#define BCM43018_D11N2G_ID 0x441b /* 43018 802.11n 2.4G device */ + +#define BCM4347_D11AC_ID 0x440a /* 4347 802.11ac dualband device */ +#define BCM4347_D11AC2G_ID 0x440b /* 4347 802.11ac 2.4G device */ +#define BCM4347_D11AC5G_ID 0x440c /* 4347 802.11ac 5G device */ + +#define BCM4361_D11AC_ID 0x441f /* 4361 802.11ac dualband device */ +#define BCM4361_D11AC2G_ID 0x4420 /* 4361 802.11ac 2.4G device */ +#define BCM4361_D11AC5G_ID 0x4421 /* 4361 802.11ac 5G device */ + +#define BCM4362_D11AX_ID 0x4490 /* 4362 802.11ax dualband device */ +#define BCM4362_D11AX2G_ID 0x4491 /* 4362 802.11ax 2.4G device */ +#define BCM4362_D11AX5G_ID 0x4492 /* 4362 802.11ax 5G device */ +#define BCM43751_D11AX_ID 0x4490 /* 43751 802.11ax dualband device */ +#define BCM43751_D11AX2G_ID 0x4491 /* 43751 802.11ax 2.4G device */ +#define BCM43751_D11AX5G_ID 0x4492 /* 43751 802.11ax 5G device */ + +#define BCM4364_D11AC_ID 0x4464 /* 4364 802.11ac dualband device */ +#define BCM4364_D11AC2G_ID 0x446a /* 4364 802.11ac 2.4G device */ +#define BCM4364_D11AC5G_ID 0x446b /* 4364 802.11ac 5G device */ + +#define BCM4365_D11AC_ID 0x43ca +#define BCM4365_D11AC2G_ID 0x43cb +#define BCM4365_D11AC5G_ID 0x43cc + +#define BCM4366_D11AC_ID 0x43c3 +#define BCM4366_D11AC2G_ID 0x43c4 +#define BCM4366_D11AC5G_ID 0x43c5 + +/* TBD change below values */ +#define BCM4369_D11AX_ID 0x4470 /* 4369 802.11ax dualband device */ +#define BCM4369_D11AX2G_ID 0x4471 /* 4369 802.11ax 2.4G device */ +#define BCM4369_D11AX5G_ID 0x4472 /* 4369 802.11ax 5G device */ + +#define BCM4375_D11AX_ID 0x4475 /* 4375 802.11ax dualband device */ +#define BCM4375_D11AX2G_ID 0x4476 /* 4375 802.11ax 2.4G device */ +#define BCM4375_D11AX5G_ID 0x4477 /* 4375 802.11ax 5G device */ + +#define BCM43349_D11N_ID 0x43e6 /* 43349 802.11n dualband id */ +#define BCM43349_D11N2G_ID 0x43e7 /* 43349 802.11n 2.4Ghz band id */ +#define BCM43349_D11N5G_ID 0x43e8 /* 43349 802.11n 5Ghz band id */ + +#define BCM4358_D11AC_ID 0x43e9 /* 4358 802.11ac dualband device */ +#define BCM4358_D11AC2G_ID 0x43ea /* 4358 802.11ac 2.4G device */ +#define BCM4358_D11AC5G_ID 0x43eb /* 4358 802.11ac 5G device */ + +#define BCM4356_D11AC_ID 0x43ec /* 4356 802.11ac dualband device */ +#define BCM4356_D11AC2G_ID 0x43ed /* 4356 802.11ac 2.4G device */ +#define BCM4356_D11AC5G_ID 0x43ee /* 4356 802.11ac 5G device */ + +#define BCM4371_D11AC_ID 0x440d /* 4371 802.11ac dualband device */ +#define BCM4371_D11AC2G_ID 0x440e /* 4371 802.11ac 2.4G device */ +#define BCM4371_D11AC5G_ID 0x440f /* 4371 802.11ac 5G device */ +#define BCM7271_D11AC_ID 0x4410 /* 7271 802.11ac dualband device */ +#define BCM7271_D11AC2G_ID 0x4411 /* 7271 802.11ac 2.4G device */ +#define BCM7271_D11AC5G_ID 0x4412 /* 7271 802.11ac 5G device */ + +#define BCM4373_D11AC_ID 0x4418 /* 4373 802.11ac dualband device */ +#define BCM4373_D11AC2G_ID 0x4419 /* 4373 802.11ac 2.4G device */ +#define BCM4373_D11AC5G_ID 0x441a /* 4373 802.11ac 5G device */ + +#define BCMGPRS_UART_ID 0x4333 /* Uart id used by 4306/gprs card */ +#define BCMGPRS2_UART_ID 0x4344 /* Uart id used by 4306/gprs card */ +#define FPGA_JTAGM_ID 0x43f0 /* FPGA jtagm device id */ +#define BCM_JTAGM_ID 0x43f1 /* BCM jtagm device id */ +#define SDIOH_FPGA_ID 0x43f2 /* sdio host fpga */ +#define BCM_SDIOH_ID 0x43f3 /* BCM sdio host id */ +#define SDIOD_FPGA_ID 0x43f4 /* sdio device fpga */ +#define SPIH_FPGA_ID 0x43f5 /* PCI SPI Host Controller FPGA */ +#define BCM_SPIH_ID 0x43f6 /* Synopsis SPI Host Controller */ +#define MIMO_FPGA_ID 0x43f8 /* FPGA mimo minimacphy device id */ +#define BCM_JTAGM2_ID 0x43f9 /* BCM alternate jtagm device id */ +#define SDHCI_FPGA_ID 0x43fa /* Standard SDIO Host Controller FPGA */ +#define BCM4402_ENET_ID 0x4402 /* 4402 enet */ +#define BCM4402_V90_ID 0x4403 /* 4402 v90 codec */ +#define BCM4410_DEVICE_ID 0x4410 /* bcm44xx family pci iline */ +#define BCM4412_DEVICE_ID 0x4412 /* bcm44xx family pci enet */ +#define BCM4430_DEVICE_ID 0x4430 /* bcm44xx family cardbus iline */ +#define BCM4432_DEVICE_ID 0x4432 /* bcm44xx family cardbus enet */ +#define BCM4704_ENET_ID 0x4706 /* 4704 enet (Use 47XX_ENET_ID instead!) */ +#define BCM4710_DEVICE_ID 0x4710 /* 4710 primary function 0 */ +#define BCM47XX_AUDIO_ID 0x4711 /* 47xx audio codec */ +#define BCM47XX_V90_ID 0x4712 /* 47xx v90 codec */ +#define BCM47XX_ENET_ID 0x4713 /* 47xx enet */ +#define BCM47XX_EXT_ID 0x4714 /* 47xx external i/f */ +#define BCM47XX_GMAC_ID 0x4715 /* 47xx Unimac based GbE */ +#define BCM47XX_USBH_ID 0x4716 /* 47xx usb host */ +#define BCM47XX_USBD_ID 0x4717 /* 47xx usb device */ +#define BCM47XX_IPSEC_ID 0x4718 /* 47xx ipsec */ +#define BCM47XX_ROBO_ID 0x4719 /* 47xx/53xx roboswitch core */ +#define BCM47XX_USB20H_ID 0x471a /* 47xx usb 2.0 host */ +#define BCM47XX_USB20D_ID 0x471b /* 47xx usb 2.0 device */ +#define BCM47XX_ATA100_ID 0x471d /* 47xx parallel ATA */ +#define BCM47XX_SATAXOR_ID 0x471e /* 47xx serial ATA & XOR DMA */ +#define BCM47XX_GIGETH_ID 0x471f /* 47xx GbE (5700) */ +#ifdef DEPRECATED /* These products have been deprecated */ +#define BCM4712_MIPS_ID 0x4720 /* 4712 base devid */ +#define BCM4716_DEVICE_ID 0x4722 /* 4716 base devid */ +#endif /* DEPRECATED */ +#define BCM47XX_USB30H_ID 0x472a /* 47xx usb 3.0 host */ +#define BCM47XX_USB30D_ID 0x472b /* 47xx usb 3.0 device */ +#define BCM47XX_USBHUB_ID 0x472c /* 47xx usb hub */ +#define BCM47XX_SMBUS_EMU_ID 0x47fe /* 47xx emulated SMBus device */ +#define BCM47XX_XOR_EMU_ID 0x47ff /* 47xx emulated XOR engine */ +#define EPI41210_DEVICE_ID 0xa0fa /* bcm4210 */ +#define EPI41230_DEVICE_ID 0xa10e /* bcm4230 */ +#define JINVANI_SDIOH_ID 0x4743 /* Jinvani SDIO Gold Host */ +#define BCM27XX_SDIOH_ID 0x2702 /* BCM27xx Standard SDIO Host */ +#define PCIXX21_FLASHMEDIA_ID 0x803b /* TI PCI xx21 Standard Host Controller */ +#define PCIXX21_SDIOH_ID 0x803c /* TI PCI xx21 Standard Host Controller */ +#define R5C822_SDIOH_ID 0x0822 /* Ricoh Co Ltd R5C822 SD/SDIO/MMC/MS/MSPro Host */ +#define JMICRON_SDIOH_ID 0x2381 /* JMicron Standard SDIO Host Controller */ + +#define BCM43452_D11AC_ID 0x47ab /* 43452 802.11ac dualband device */ +#define BCM43452_D11AC2G_ID 0x47ac /* 43452 802.11ac 2.4G device */ +#define BCM43452_D11AC5G_ID 0x47ad /* 43452 802.11ac 5G device */ + +/* Chip IDs */ +#ifdef DEPRECATED /* These products have been deprecated */ +#define BCM4306_CHIP_ID 0x4306 /* 4306 chipcommon chipid */ +#define BCM4311_CHIP_ID 0x4311 /* 4311 PCIe 802.11a/b/g */ +#define BCM43111_CHIP_ID 43111 /* 43111 chipcommon chipid (OTP chipid) */ +#define BCM43112_CHIP_ID 43112 /* 43112 chipcommon chipid (OTP chipid) */ +#define BCM4312_CHIP_ID 0x4312 /* 4312 chipcommon chipid */ +#define BCM4314_CHIP_ID 0x4314 /* 4314 chipcommon chipid */ +#define BCM43142_CHIP_ID 43142 /* 43142 chipcommon chipid */ +#define BCM43143_CHIP_ID 43143 /* 43143 chipcommon chipid */ +#define BCM4313_CHIP_ID 0x4313 /* 4313 chip id */ +#define BCM4315_CHIP_ID 0x4315 /* 4315 chip id */ +#define BCM4318_CHIP_ID 0x4318 /* 4318 chipcommon chipid */ +#define BCM4319_CHIP_ID 0x4319 /* 4319 chip id */ +#define BCM4320_CHIP_ID 0x4320 /* 4320 chipcommon chipid */ +#define BCM4321_CHIP_ID 0x4321 /* 4321 chipcommon chipid */ +#define BCM4322_CHIP_ID 0x4322 /* 4322 chipcommon chipid */ +#define BCM43221_CHIP_ID 43221 /* 43221 chipcommon chipid (OTP chipid) */ +#define BCM43222_CHIP_ID 43222 /* 43222 chipcommon chipid */ +#define BCM43224_CHIP_ID 43224 /* 43224 chipcommon chipid */ +#define BCM43225_CHIP_ID 43225 /* 43225 chipcommon chipid */ +#define BCM43226_CHIP_ID 43226 /* 43226 chipcommon chipid */ +#define BCM43227_CHIP_ID 43227 /* 43227 chipcommon chipid */ +#define BCM43228_CHIP_ID 43228 /* 43228 chipcommon chipid */ +#define BCM43231_CHIP_ID 43231 /* 43231 chipcommon chipid (OTP chipid) */ +#define BCM43237_CHIP_ID 43237 /* 43237 chipcommon chipid */ +#define BCM43239_CHIP_ID 43239 /* 43239 chipcommon chipid */ +#define BCM4324_CHIP_ID 0x4324 /* 4324 chipcommon chipid */ +#define BCM43242_CHIP_ID 43242 /* 43242 chipcommon chipid */ +#define BCM43243_CHIP_ID 43243 /* 43243 chipcommon chipid */ +#define BCM4325_CHIP_ID 0x4325 /* 4325 chip id */ +#define BCM4328_CHIP_ID 0x4328 /* 4328 chip id */ +#define BCM4329_CHIP_ID 0x4329 /* 4329 chipcommon chipid */ +#define BCM4331_CHIP_ID 0x4331 /* 4331 chipcommon chipid */ +#define BCM4334_CHIP_ID 0x4334 /* 4334 chipcommon chipid */ +#define BCM43349_CHIP_ID 43349 /* 43349(0xA955) chipcommon chipid */ +#define BCM43340_CHIP_ID 43340 /* 43340 chipcommon chipid */ +#define BCM43341_CHIP_ID 43341 /* 43341 chipcommon chipid */ +#define BCM43342_CHIP_ID 43342 /* 43342 chipcommon chipid */ +#define BCM4342_CHIP_ID 4342 /* 4342 chipcommon chipid (OTP, RBBU) */ +#define BCM43420_CHIP_ID 43420 /* 43420 chipcommon chipid (OTP, RBBU) */ +#define BCM43421_CHIP_ID 43421 /* 43224 chipcommon chipid (OTP, RBBU) */ +#define BCM43431_CHIP_ID 43431 /* 4331 chipcommon chipid (OTP, RBBU) */ +#define BCM43909_CHIP_ID 0xab85 /* 43909 chipcommon chipid */ +#define BCM4712_CHIP_ID 0x4712 /* 4712 chipcommon chipid */ +#define BCM4716_CHIP_ID 0x4716 /* 4716 chipcommon chipid */ +#define BCM4748_CHIP_ID 0x4748 /* 4716 chipcommon chipid (OTP, RBBU) */ +#endif /* DEPRECATED */ + +/* DEPRECATED but still referenced in components - start */ +#define BCM47162_CHIP_ID 47162 /* 47162 chipcommon chipid */ +#define BCM5354_CHIP_ID 0x5354 /* 5354 chipcommon chipid */ +/* DEPRECATED but still referenced in components - end */ + +#define BCM43217_CHIP_ID 43217 /* 43217 chip id (OTP chipid) */ +#define BCM43131_CHIP_ID 43131 /* 43131 chip id (OTP chipid) */ +#define BCM43234_CHIP_ID 43234 /* 43234 chipcommon chipid */ +#define BCM43235_CHIP_ID 43235 /* 43235 chipcommon chipid */ +#define BCM43236_CHIP_ID 43236 /* 43236 chipcommon chipid */ +#define BCM43238_CHIP_ID 43238 /* 43238 chipcommon chipid */ +#define BCM43428_CHIP_ID 43428 /* 43228 chipcommon chipid (OTP, RBBU) */ +#define BCM43460_CHIP_ID 43460 /* 4360 chipcommon chipid (OTP, RBBU) */ +#define BCM43362_CHIP_ID 43362 /* 43362 chipcommon chipid */ +#define BCM4330_CHIP_ID 0x4330 /* 4330 chipcommon chipid */ +#define BCM43465_CHIP_ID 43465 /* 4366 chipcommon chipid (OTP, RBBU) */ +#define BCM43525_CHIP_ID 43525 /* 4365 chipcommon chipid (OTP, RBBU) */ +#define BCM47452_CHIP_ID 47452 /* 53573 chipcommon chipid (OTP, RBBU) */ +#define BCM6362_CHIP_ID 0x6362 /* 6362 chipcommon chipid */ +#define BCM43143_CHIP_ID 43143 /* 43143 chipcommon chipid */ +#define BCM4324_CHIP_ID 0x4324 /* 4324 chipcommon chipid */ +#define BCM43242_CHIP_ID 43242 /* 43242 chipcommon chipid */ +#define BCM4334_CHIP_ID 0x4334 /* 4334 chipcommon chipid */ +#define BCM4335_CHIP_ID 0x4335 /* 4335 chipcommon chipid */ +#define BCM4339_CHIP_ID 0x4339 /* 4339 chipcommon chipid */ +#define BCM4360_CHIP_ID 0x4360 /* 4360 chipcommon chipid */ +#define BCM4364_CHIP_ID 0x4364 /* 4364 chipcommon chipid */ +#define BCM4352_CHIP_ID 0x4352 /* 4352 chipcommon chipid */ +#define BCM43526_CHIP_ID 0xAA06 +#define BCM43340_CHIP_ID 43340 /* 43340 chipcommon chipid */ +#define BCM43341_CHIP_ID 43341 /* 43341 chipcommon chipid */ +#define BCM4350_CHIP_ID 0x4350 /* 4350 chipcommon chipid */ +#define BCM4354_CHIP_ID 0x4354 /* 4354 chipcommon chipid */ +#define BCM4356_CHIP_ID 0x4356 /* 4356 chipcommon chipid */ +#define BCM4371_CHIP_ID 0x4371 /* 4371 chipcommon chipid */ +#define BCM43556_CHIP_ID 0xAA24 /* 43556 chipcommon chipid */ +#define BCM43558_CHIP_ID 0xAA26 /* 43558 chipcommon chipid */ +#define BCM43562_CHIP_ID 0xAA2A /* 43562 chipcommon chipid */ +#define BCM43566_CHIP_ID 0xAA2E /* 43566 chipcommon chipid */ +#define BCM43567_CHIP_ID 0xAA2F /* 43567 chipcommon chipid */ +#define BCM43568_CHIP_ID 0xAA30 /* 43568 chipcommon chipid */ +#define BCM43569_CHIP_ID 0xAA31 /* 43569 chipcommon chipid */ +#define BCM43570_CHIP_ID 0xAA32 /* 43570 chipcommon chipid */ +#define BCM4358_CHIP_ID 0x4358 /* 4358 chipcommon chipid */ +#define BCM43012_CHIP_ID 0xA804 /* 43012 chipcommon chipid */ +#define BCM43014_CHIP_ID 0xA806 /* 43014 chipcommon chipid */ +#define BCM4369_CHIP_ID 0x4369 /* 4369 chipcommon chipid */ + +#define BCM4350_CHIP(chipid) ((CHIPID(chipid) == BCM4350_CHIP_ID) || \ + (CHIPID(chipid) == BCM4354_CHIP_ID) || \ + (CHIPID(chipid) == BCM43556_CHIP_ID) || \ + (CHIPID(chipid) == BCM43558_CHIP_ID) || \ + (CHIPID(chipid) == BCM43566_CHIP_ID) || \ + (CHIPID(chipid) == BCM43567_CHIP_ID) || \ + (CHIPID(chipid) == BCM43568_CHIP_ID) || \ + (CHIPID(chipid) == BCM43569_CHIP_ID) || \ + (CHIPID(chipid) == BCM43570_CHIP_ID) || \ + (CHIPID(chipid) == BCM4358_CHIP_ID)) /* 4350 variations */ + +#define BCM4345_CHIP_ID 0x4345 /* 4345 chipcommon chipid */ +#define BCM43454_CHIP_ID 43454 /* 43454 chipcommon chipid */ +#define BCM43455_CHIP_ID 43455 /* 43455 chipcommon chipid */ +#define BCM43457_CHIP_ID 43457 /* 43457 chipcommon chipid */ +#define BCM43458_CHIP_ID 43458 /* 43458 chipcommon chipid */ + +#define BCM4345_CHIP(chipid) (CHIPID(chipid) == BCM4345_CHIP_ID || \ + CHIPID(chipid) == BCM43454_CHIP_ID || \ + CHIPID(chipid) == BCM43455_CHIP_ID || \ + CHIPID(chipid) == BCM43457_CHIP_ID || \ + CHIPID(chipid) == BCM43458_CHIP_ID) + +#define CASE_BCM4345_CHIP case BCM4345_CHIP_ID: /* fallthrough */ \ + case BCM43454_CHIP_ID: /* fallthrough */ \ + case BCM43455_CHIP_ID: /* fallthrough */ \ + case BCM43457_CHIP_ID: /* fallthrough */ \ + case BCM43458_CHIP_ID + +#define BCM43430_CHIP_ID 43430 /* 43430 chipcommon chipid */ +#define BCM43018_CHIP_ID 43018 /* 43018 chipcommon chipid */ +#define BCM4349_CHIP_ID 0x4349 /* 4349 chipcommon chipid */ +#define BCM4355_CHIP_ID 0x4355 /* 4355 chipcommon chipid */ +#define BCM4359_CHIP_ID 0x4359 /* 4359 chipcommon chipid */ +#define BCM4349_CHIP(chipid) ((CHIPID(chipid) == BCM4349_CHIP_ID) || \ + (CHIPID(chipid) == BCM4355_CHIP_ID) || \ + (CHIPID(chipid) == BCM4359_CHIP_ID)) + +#define BCM4355_CHIP(chipid) (CHIPID(chipid) == BCM4355_CHIP_ID) + +#define BCM4349_CHIP_GRPID BCM4349_CHIP_ID: \ + case BCM4355_CHIP_ID: \ + case BCM4359_CHIP_ID +#define BCM43596_CHIP_ID 43596 /* 43596 chipcommon chipid */ + +#define BCM4347_CHIP_ID 0x4347 /* 4347 chipcommon chipid */ +#define BCM4357_CHIP_ID 0x4357 /* 4357 chipcommon chipid */ +#define BCM4361_CHIP_ID 0x4361 /* 4361 chipcommon chipid */ +#define BCM4369_CHIP_ID 0x4369 /* 4369/ chipcommon chipid */ +#define BCM4375_CHIP_ID 0x4375 /* 4375/ chipcommon chipid */ +#define BCM4377_CHIP_ID 0x4377 /* 4377/ chipcommon chipid */ +#define BCM4362_CHIP_ID 0x4362 /* 4362 chipcommon chipid */ +#define BCM43751_CHIP_ID 0xAAE7 /* 43751 chipcommon chipid */ + +#define BCM4347_CHIP(chipid) ((CHIPID(chipid) == BCM4347_CHIP_ID) || \ + (CHIPID(chipid) == BCM4357_CHIP_ID) || \ + (CHIPID(chipid) == BCM4361_CHIP_ID)) +#define BCM4347_CHIP_GRPID BCM4347_CHIP_ID: \ + case BCM4357_CHIP_ID: \ + case BCM4361_CHIP_ID + +#define BCM4369_CHIP(chipid) ((CHIPID(chipid) == BCM4369_CHIP_ID) || \ + (CHIPID(chipid) == BCM4377_CHIP_ID)) +#define BCM4369_CHIP_GRPID BCM4369_CHIP_ID: \ + case BCM4377_CHIP_ID + +#define BCM4362_CHIP(chipid) (CHIPID(chipid) == BCM4362_CHIP_ID) +#define BCM4362_CHIP_GRPID BCM4362_CHIP_ID + +#define BCM4365_CHIP_ID 0x4365 /* 4365 chipcommon chipid */ +#define BCM4366_CHIP_ID 0x4366 /* 4366 chipcommon chipid */ +#define BCM43664_CHIP_ID 43664 /* 4366E chipcommon chipid */ +#define BCM43666_CHIP_ID 43666 /* 4365E chipcommon chipid */ +#define BCM4365_CHIP(chipid) ((CHIPID(chipid) == BCM4365_CHIP_ID) || \ + (CHIPID(chipid) == BCM4366_CHIP_ID) || \ + (CHIPID(chipid) == BCM43664_CHIP_ID) || \ + (CHIPID(chipid) == BCM43666_CHIP_ID)) +#define CASE_BCM4365_CHIP case BCM4365_CHIP_ID: /* fallthrough */ \ + case BCM4366_CHIP_ID: /* fallthrough */ \ + case BCM43664_CHIP_ID: /* fallthrough */ \ + case BCM43666_CHIP_ID + +#define BCM43602_CHIP_ID 0xaa52 /* 43602 chipcommon chipid */ +#define BCM43462_CHIP_ID 0xa9c6 /* 43462 chipcommon chipid */ +#define BCM43522_CHIP_ID 0xaa02 /* 43522 chipcommon chipid */ +#define BCM43602_CHIP(chipid) ((CHIPID(chipid) == BCM43602_CHIP_ID) || \ + (CHIPID(chipid) == BCM43462_CHIP_ID) || \ + (CHIPID(chipid) == BCM43522_CHIP_ID)) /* 43602 variations */ +#define BCM43012_CHIP(chipid) (CHIPID(chipid) == BCM43012_CHIP_ID) +#define CASE_BCM43602_CHIP case BCM43602_CHIP_ID: /* fallthrough */ \ + case BCM43462_CHIP_ID: /* fallthrough */ \ + case BCM43522_CHIP_ID + +#define BCM4402_CHIP_ID 0x4402 /* 4402 chipid */ +#define BCM4704_CHIP_ID 0x4704 /* 4704 chipcommon chipid */ +#define BCM4707_CHIP_ID 53010 /* 4707 chipcommon chipid */ +#define BCM47094_CHIP_ID 53030 /* 47094 chipcommon chipid */ +#define BCM53018_CHIP_ID 53018 /* 53018 chipcommon chipid */ +#define BCM4707_CHIP(chipid) (((chipid) == BCM4707_CHIP_ID) || \ + ((chipid) == BCM53018_CHIP_ID) || \ + ((chipid) == BCM47094_CHIP_ID)) +#define BCM4710_CHIP_ID 0x4710 /* 4710 chipid */ +#define BCM4785_CHIP_ID 0x4785 /* 4785 chipcommon chipid */ +#define BCM5350_CHIP_ID 0x5350 /* 5350 chipcommon chipid */ +#define BCM5352_CHIP_ID 0x5352 /* 5352 chipcommon chipid */ +#define BCM5365_CHIP_ID 0x5365 /* 5365 chipcommon chipid */ +#define BCM53573_CHIP_ID 53573 /* 53573 chipcommon chipid */ +#define BCM53574_CHIP_ID 53574 /* 53574 chipcommon chipid */ +#define BCM53573_CHIP(chipid) ((CHIPID(chipid) == BCM53573_CHIP_ID) || \ + (CHIPID(chipid) == BCM53574_CHIP_ID) || \ + (CHIPID(chipid) == BCM47452_CHIP_ID)) +#define BCM53573_CHIP_GRPID BCM53573_CHIP_ID : \ + case BCM53574_CHIP_ID : \ + case BCM47452_CHIP_ID +#define BCM53573_DEVICE(devid) (((devid) == BCM53573_D11AC_ID) || \ + ((devid) == BCM53573_D11AC2G_ID) || \ + ((devid) == BCM53573_D11AC5G_ID) || \ + ((devid) == BCM47189_D11AC_ID) || \ + ((devid) == BCM47189_D11AC2G_ID) || \ + ((devid) == BCM47189_D11AC5G_ID)) + +#define BCM7271_CHIP_ID 0x05c9 /* 7271 chipcommon chipid */ +#define BCM7271_CHIP(chipid) ((CHIPID(chipid) == BCM7271_CHIP_ID)) + +#define BCM4373_CHIP_ID 0x4373 /* 4373 chipcommon chipid */ + +/* Package IDs */ +#ifdef DEPRECATED /* These products have been deprecated */ +#define BCM4303_PKG_ID 2 /* 4303 package id */ +#define BCM4309_PKG_ID 1 /* 4309 package id */ +#define BCM4712LARGE_PKG_ID 0 /* 340pin 4712 package id */ +#define BCM4712SMALL_PKG_ID 1 /* 200pin 4712 package id */ +#define BCM4712MID_PKG_ID 2 /* 225pin 4712 package id */ +#define BCM4328USBD11G_PKG_ID 2 /* 4328 802.11g USB package id */ +#define BCM4328USBDUAL_PKG_ID 3 /* 4328 802.11a/g USB package id */ +#define BCM4328SDIOD11G_PKG_ID 4 /* 4328 802.11g SDIO package id */ +#define BCM4328SDIODUAL_PKG_ID 5 /* 4328 802.11a/g SDIO package id */ +#define BCM4329_289PIN_PKG_ID 0 /* 4329 289-pin package id */ +#define BCM4329_182PIN_PKG_ID 1 /* 4329N 182-pin package id */ +#define BCM5354E_PKG_ID 1 /* 5354E package id */ +#define BCM4716_PKG_ID 8 /* 4716 package id */ +#define BCM4717_PKG_ID 9 /* 4717 package id */ +#define BCM4718_PKG_ID 10 /* 4718 package id */ +#define BCM4331TT_PKG_ID 8 /* 4331 12x12 package id */ +#define BCM4331TN_PKG_ID 9 /* 4331 12x9 package id */ +#define BCM4331TNA0_PKG_ID 0xb /* 4331 12x9 package id */ +#endif /* DEPRECATED */ +#define BCM47189_PKG_ID 1 /* 47189 package id */ +#define BCM53573_PKG_ID 0 /* 53573 package id */ + +#define HDLSIM5350_PKG_ID 1 /* HDL simulator package id for a 5350 */ +#define HDLSIM_PKG_ID 14 /* HDL simulator package id */ +#define HWSIM_PKG_ID 15 /* Hardware simulator package id */ + +#define BCM4707_PKG_ID 1 /* 4707 package id */ +#define BCM4708_PKG_ID 2 /* 4708 package id */ +#define BCM4709_PKG_ID 0 /* 4709 package id */ + +#define PCIXX21_FLASHMEDIA0_ID 0x8033 /* TI PCI xx21 Standard Host Controller */ +#define PCIXX21_SDIOH0_ID 0x8034 /* TI PCI xx21 Standard Host Controller */ + +#define BCM4335_WLCSP_PKG_ID (0x0) /* WLCSP Module/Mobile SDIO/HSIC. */ +#define BCM4335_FCBGA_PKG_ID (0x1) /* FCBGA PC/Embeded/Media PCIE/SDIO */ +#define BCM4335_WLBGA_PKG_ID (0x2) /* WLBGA COB/Mobile SDIO/HSIC. */ +#define BCM4335_FCBGAD_PKG_ID (0x3) /* FCBGA Debug Debug/Dev All if's. */ +#define BCM4335_PKG_MASK (0x3) +#define BCM43602_12x12_PKG_ID (0x1) /* 12x12 pins package, used for e.g. router designs */ + +/* boardflags */ +#define BFL_BTC2WIRE 0x00000001 /* old 2wire Bluetooth coexistence, OBSOLETE */ +#define BFL_BTCOEX 0x00000001 /* Board supports BTCOEX */ +#define BFL_PACTRL 0x00000002 /* Board has gpio 9 controlling the PA */ +#define BFL_AIRLINEMODE 0x00000004 /* Board implements gpio radio disable indication */ +#define BFL_ADCDIV 0x00000008 /* Board has the rssi ADC divider */ +#define BFL_DIS_256QAM 0x00000008 +#define BFL_ENETROBO 0x00000010 /* Board has robo switch or core */ +#define BFL_TSSIAVG 0x00000010 /* TSSI averaging for ACPHY chips */ +#define BFL_NOPLLDOWN 0x00000020 /* Not ok to power down the chip pll and oscillator */ +#define BFL_CCKHIPWR 0x00000040 /* Can do high-power CCK transmission */ +#define BFL_ENETADM 0x00000080 /* Board has ADMtek switch */ +#define BFL_ENETVLAN 0x00000100 /* Board has VLAN capability */ +#define BFL_LTECOEX 0x00000200 /* LTE Coex enabled */ +#define BFL_NOPCI 0x00000400 /* Board leaves PCI floating */ +#define BFL_FEM 0x00000800 /* Board supports the Front End Module */ +#define BFL_EXTLNA 0x00001000 /* Board has an external LNA in 2.4GHz band */ +#define BFL_HGPA 0x00002000 /* Board has a high gain PA */ +#define BFL_BTC2WIRE_ALTGPIO 0x00004000 /* Board's BTC 2wire is in the alternate gpios */ +#define BFL_ALTIQ 0x00008000 /* Alternate I/Q settings */ +#define BFL_NOPA 0x00010000 /* Board has no PA */ +#define BFL_RSSIINV 0x00020000 /* Board's RSSI uses positive slope(not TSSI) */ +#define BFL_PAREF 0x00040000 /* Board uses the PARef LDO */ +#define BFL_3TSWITCH 0x00080000 /* Board uses a triple throw switch shared with BT */ +#define BFL_PHASESHIFT 0x00100000 /* Board can support phase shifter */ +#define BFL_BUCKBOOST 0x00200000 /* Power topology uses BUCKBOOST */ +#define BFL_FEM_BT 0x00400000 /* Board has FEM and switch to share antenna w/ BT */ +#define BFL_NOCBUCK 0x00800000 /* Power topology doesn't use CBUCK */ +#define BFL_CCKFAVOREVM 0x01000000 /* Favor CCK EVM over spectral mask */ +#define BFL_PALDO 0x02000000 /* Power topology uses PALDO */ +#define BFL_LNLDO2_2P5 0x04000000 /* Select 2.5V as LNLDO2 output voltage */ +#define BFL_FASTPWR 0x08000000 +#define BFL_UCPWRCTL_MININDX 0x08000000 /* Enforce min power index to avoid FEM damage */ +#define BFL_EXTLNA_5GHz 0x10000000 /* Board has an external LNA in 5GHz band */ +#define BFL_TRSW_1by2 0x20000000 /* Board has 2 TRSW's in 1by2 designs */ +#define BFL_GAINBOOSTA01 0x20000000 /* 5g Gainboost for core0 and core1 */ +#define BFL_LO_TRSW_R_5GHz 0x40000000 /* In 5G do not throw TRSW to T for clipLO gain */ +#define BFL_ELNA_GAINDEF 0x80000000 /* Backoff InitGain based on elna_2g/5g field + * when this flag is set + */ +#define BFL_EXTLNA_TX 0x20000000 /* Temp boardflag to indicate to */ + +/* boardflags2 */ +#define BFL2_RXBB_INT_REG_DIS 0x00000001 /* Board has an external rxbb regulator */ +#define BFL2_APLL_WAR 0x00000002 /* Flag to implement alternative A-band PLL settings */ +#define BFL2_TXPWRCTRL_EN 0x00000004 /* Board permits enabling TX Power Control */ +#define BFL2_2X4_DIV 0x00000008 /* Board supports the 2X4 diversity switch */ +#define BFL2_5G_PWRGAIN 0x00000010 /* Board supports 5G band power gain */ +#define BFL2_PCIEWAR_OVR 0x00000020 /* Board overrides ASPM and Clkreq settings */ +#define BFL2_CAESERS_BRD 0x00000040 /* Board is Caesers brd (unused by sw) */ +#define BFL2_WLCX_ATLAS 0x00000040 /* Board flag to initialize ECI for WLCX on FL-ATLAS */ +#define BFL2_BTC3WIRE 0x00000080 /* Board support legacy 3 wire or 4 wire */ +#define BFL2_BTCLEGACY 0x00000080 /* Board support legacy 3/4 wire, to replace + * BFL2_BTC3WIRE + */ +#define BFL2_SKWRKFEM_BRD 0x00000100 /* 4321mcm93 board uses Skyworks FEM */ +#define BFL2_SPUR_WAR 0x00000200 /* Board has a WAR for clock-harmonic spurs */ +#define BFL2_GPLL_WAR 0x00000400 /* Flag to narrow G-band PLL loop b/w */ +#define BFL2_TRISTATE_LED 0x00000800 /* Tri-state the LED */ +#define BFL2_SINGLEANT_CCK 0x00001000 /* Tx CCK pkts on Ant 0 only */ +#define BFL2_2G_SPUR_WAR 0x00002000 /* WAR to reduce and avoid clock-harmonic spurs in 2G */ +#define BFL2_BPHY_ALL_TXCORES 0x00004000 /* Transmit bphy frames using all tx cores */ +#define BFL2_FCC_BANDEDGE_WAR 0x00008000 /* Activates WAR to improve FCC bandedge performance */ +#define BFL2_DAC_SPUR_IMPROVEMENT 0x00008000 /* Reducing DAC Spurs */ +#define BFL2_GPLL_WAR2 0x00010000 /* Flag to widen G-band PLL loop b/w */ +#define BFL2_REDUCED_PA_TURNONTIME 0x00010000 /* Flag to reduce PA turn on Time */ +#define BFL2_IPALVLSHIFT_3P3 0x00020000 +#define BFL2_INTERNDET_TXIQCAL 0x00040000 /* Use internal envelope detector for TX IQCAL */ +#define BFL2_XTALBUFOUTEN 0x00080000 /* Keep the buffered Xtal output from radio on */ + /* Most drivers will turn it off without this flag */ + /* to save power. */ + +#define BFL2_ANAPACTRL_2G 0x00100000 /* 2G ext PAs are controlled by analog PA ctrl lines */ +#define BFL2_ANAPACTRL_5G 0x00200000 /* 5G ext PAs are controlled by analog PA ctrl lines */ +#define BFL2_ELNACTRL_TRSW_2G 0x00400000 /* AZW4329: 2G gmode_elna_gain controls TR Switch */ +#define BFL2_BT_SHARE_ANT0 0x00800000 /* share core0 antenna with BT */ +#define BFL2_TEMPSENSE_HIGHER 0x01000000 /* The tempsense threshold can sustain higher value + * than programmed. The exact delta is decided by + * driver per chip/boardtype. This can be used + * when tempsense qualification happens after shipment + */ +#define BFL2_BTC3WIREONLY 0x02000000 /* standard 3 wire btc only. 4 wire not supported */ +#define BFL2_PWR_NOMINAL 0x04000000 /* 0: power reduction on, 1: no power reduction */ +#define BFL2_EXTLNA_PWRSAVE 0x08000000 /* boardflag to enable ucode to apply power save */ + /* ucode control of eLNA during Tx */ +#define BFL2_SDR_EN 0x20000000 /* SDR enabled or disabled */ +#define BFL2_DYNAMIC_VMID 0x10000000 /* boardflag to enable dynamic Vmid idle TSSI CAL */ +#define BFL2_LNA1BYPFORTR2G 0x40000000 /* acphy, enable lna1 bypass for clip gain, 2g */ +#define BFL2_LNA1BYPFORTR5G 0x80000000 /* acphy, enable lna1 bypass for clip gain, 5g */ + +/* SROM 11 - 11ac boardflag definitions */ +#define BFL_SROM11_BTCOEX 0x00000001 /* Board supports BTCOEX */ +#define BFL_SROM11_WLAN_BT_SH_XTL 0x00000002 /* bluetooth and wlan share same crystal */ +#define BFL_SROM11_EXTLNA 0x00001000 /* Board has an external LNA in 2.4GHz band */ +#define BFL_SROM11_EPA_TURNON_TIME 0x00018000 /* 2 bits for different PA turn on times */ +#define BFL_SROM11_EPA_TURNON_TIME_SHIFT 15 +#define BFL_SROM11_PRECAL_TX_IDX 0x00040000 /* Dedicated TX IQLOCAL IDX values */ + /* per subband, as derived from 43602A1 MCH5 */ +#define BFL_SROM11_EXTLNA_5GHz 0x10000000 /* Board has an external LNA in 5GHz band */ +#define BFL_SROM11_GAINBOOSTA01 0x20000000 /* 5g Gainboost for core0 and core1 */ +#define BFL2_SROM11_APLL_WAR 0x00000002 /* Flag to implement alternative A-band PLL settings */ +#define BFL2_SROM11_ANAPACTRL_2G 0x00100000 /* 2G ext PAs are ctrl-ed by analog PA ctrl lines */ +#define BFL2_SROM11_ANAPACTRL_5G 0x00200000 /* 5G ext PAs are ctrl-ed by analog PA ctrl lines */ +#define BFL2_SROM11_SINGLEANT_CCK 0x00001000 /* Tx CCK pkts on Ant 0 only */ +#define BFL2_SROM11_EPA_ON_DURING_TXIQLOCAL 0x00020000 /* Keep ext. PA's on in TX IQLO CAL */ + +/* boardflags3 */ +#define BFL3_FEMCTRL_SUB 0x00000007 /* acphy, subrevs of femctrl on top of srom_femctrl */ +#define BFL3_RCAL_WAR 0x00000008 /* acphy, rcal war active on this board (4335a0) */ +#define BFL3_TXGAINTBLID 0x00000070 /* acphy, txgain table id */ +#define BFL3_TXGAINTBLID_SHIFT 0x4 /* acphy, txgain table id shift bit */ +#define BFL3_TSSI_DIV_WAR 0x00000080 /* acphy, Seperate paparam for 20/40/80 */ +#define BFL3_TSSI_DIV_WAR_SHIFT 0x7 /* acphy, Seperate paparam for 20/40/80 shift bit */ +#define BFL3_FEMTBL_FROM_NVRAM 0x00000100 /* acphy, femctrl table is read from nvram */ +#define BFL3_FEMTBL_FROM_NVRAM_SHIFT 0x8 /* acphy, femctrl table is read from nvram */ +#define BFL3_AGC_CFG_2G 0x00000200 /* acphy, gain control configuration for 2G */ +#define BFL3_AGC_CFG_5G 0x00000400 /* acphy, gain control configuration for 5G */ +#define BFL3_PPR_BIT_EXT 0x00000800 /* acphy, bit position for 1bit extension for ppr */ +#define BFL3_PPR_BIT_EXT_SHIFT 11 /* acphy, bit shift for 1bit extension for ppr */ +#define BFL3_BBPLL_SPR_MODE_DIS 0x00001000 /* acphy, disables bbpll spur modes */ +#define BFL3_RCAL_OTP_VAL_EN 0x00002000 /* acphy, to read rcal_trim value from otp */ +#define BFL3_2GTXGAINTBL_BLANK 0x00004000 /* acphy, blank the first X ticks of 2g gaintbl */ +#define BFL3_2GTXGAINTBL_BLANK_SHIFT 14 /* acphy, blank the first X ticks of 2g gaintbl */ +#define BFL3_5GTXGAINTBL_BLANK 0x00008000 /* acphy, blank the first X ticks of 5g gaintbl */ +#define BFL3_5GTXGAINTBL_BLANK_SHIFT 15 /* acphy, blank the first X ticks of 5g gaintbl */ +#define BFL3_PHASETRACK_MAX_ALPHABETA 0x00010000 /* acphy, to max out alpha,beta to 511 */ +#define BFL3_PHASETRACK_MAX_ALPHABETA_SHIFT 16 /* acphy, to max out alpha,beta to 511 */ +/* acphy, to use backed off gaintbl for lte-coex */ +#define BFL3_LTECOEX_GAINTBL_EN 0x00060000 +/* acphy, to use backed off gaintbl for lte-coex */ +#define BFL3_LTECOEX_GAINTBL_EN_SHIFT 17 +#define BFL3_5G_SPUR_WAR 0x00080000 /* acphy, enable spur WAR in 5G band */ +#define BFL3_1X1_RSDB_ANT 0x01000000 /* to find if 2-ant RSDB board or 1-ant RSDB board */ +#define BFL3_1X1_RSDB_ANT_SHIFT 24 + +/* acphy: lpmode2g and lpmode_5g related boardflags */ +#define BFL3_ACPHY_LPMODE_2G 0x00300000 /* bits 20:21 for lpmode_2g choice */ +#define BFL3_ACPHY_LPMODE_2G_SHIFT 20 + +#define BFL3_ACPHY_LPMODE_5G 0x00C00000 /* bits 22:23 for lpmode_5g choice */ +#define BFL3_ACPHY_LPMODE_5G_SHIFT 22 + +#define BFL3_EXT_LPO_ISCLOCK 0x02000000 /* External LPO is clock, not x-tal */ +#define BFL3_FORCE_INT_LPO_SEL 0x04000000 /* Force internal lpo */ +#define BFL3_FORCE_EXT_LPO_SEL 0x08000000 /* Force external lpo */ + +#define BFL3_EN_BRCM_IMPBF 0x10000000 /* acphy, Allow BRCM Implicit TxBF */ +#define BFL3_AVVMID_FROM_NVRAM 0x40000000 /* Read Av Vmid from NVRAM */ +#define BFL3_VLIN_EN_FROM_NVRAM 0x80000000 /* Read Vlin En from NVRAM */ + +#define BFL3_AVVMID_FROM_NVRAM_SHIFT 30 /* Read Av Vmid from NVRAM */ +#define BFL3_VLIN_EN_FROM_NVRAM_SHIFT 31 /* Enable Vlin from NVRAM */ + +/* boardflags4 for SROM12/SROM13 */ +#define BFL4_SROM12_4dBPAD (1 << 0) /* To distinguigh between normal and 4dB pad board */ +#define BFL4_SROM12_2G_DETTYPE (1 << 1) /* Determine power detector type for 2G */ +#define BFL4_SROM12_5G_DETTYPE (1 << 2) /* Determine power detector type for 5G */ +#define BFL4_SROM13_DETTYPE_EN (1 << 3) /* using pa_dettype from SROM13 flags */ +#define BFL4_SROM13_CCK_SPUR_EN (1 << 4) /* using cck spur reduction setting in 4366 */ +#define BFL4_SROM13_1P5V_CBUCK (1 << 7) /* using 1.5V cbuck board in 4366 */ +#define BFL4_SROM13_EN_SW_TXRXCHAIN_MASK (1 << 8) /* Enable/disable bit for sw chain mask */ + +#define BFL4_4364_HARPOON 0x0100 /* Harpoon module 4364 */ +#define BFL4_4364_GODZILLA 0x0200 /* Godzilla module 4364 */ +#define BFL4_BTCOEX_OVER_SECI 0x00000400 /* Enable btcoex over gci seci */ + +/* papd params */ +#define PAPD_TX_ATTN_2G 0xFF +#define PAPD_TX_ATTN_5G 0xFF00 +#define PAPD_TX_ATTN_5G_SHIFT 8 +#define PAPD_RX_ATTN_2G 0xFF +#define PAPD_RX_ATTN_5G 0xFF00 +#define PAPD_RX_ATTN_5G_SHIFT 8 +#define PAPD_CAL_IDX_2G 0xFF +#define PAPD_CAL_IDX_5G 0xFF00 +#define PAPD_CAL_IDX_5G_SHIFT 8 +#define PAPD_BBMULT_2G 0xFF +#define PAPD_BBMULT_5G 0xFF00 +#define PAPD_BBMULT_5G_SHIFT 8 +#define TIA_GAIN_MODE_2G 0xFF +#define TIA_GAIN_MODE_5G 0xFF00 +#define TIA_GAIN_MODE_5G_SHIFT 8 +#define PAPD_EPS_OFFSET_2G 0xFFFF +#define PAPD_EPS_OFFSET_5G 0xFFFF0000 +#define PAPD_EPS_OFFSET_5G_SHIFT 16 +#define PAPD_CALREF_DB_2G 0xFF +#define PAPD_CALREF_DB_5G 0xFF00 +#define PAPD_CALREF_DB_5G_SHIFT 8 + +/* board specific GPIO assignment, gpio 0-3 are also customer-configurable led */ +#define BOARD_GPIO_BTC3W_IN 0x850 /* bit 4 is RF_ACTIVE, bit 6 is STATUS, bit 11 is PRI */ +#define BOARD_GPIO_BTC3W_OUT 0x020 /* bit 5 is TX_CONF */ +#define BOARD_GPIO_BTCMOD_IN 0x010 /* bit 4 is the alternate BT Coexistence Input */ +#define BOARD_GPIO_BTCMOD_OUT 0x020 /* bit 5 is the alternate BT Coexistence Out */ +#define BOARD_GPIO_BTC_IN 0x080 /* bit 7 is BT Coexistence Input */ +#define BOARD_GPIO_BTC_OUT 0x100 /* bit 8 is BT Coexistence Out */ +#define BOARD_GPIO_PACTRL 0x200 /* bit 9 controls the PA on new 4306 boards */ +#define BOARD_GPIO_12 0x1000 /* gpio 12 */ +#define BOARD_GPIO_13 0x2000 /* gpio 13 */ +#define BOARD_GPIO_BTC4_IN 0x0800 /* gpio 11, coex4, in */ +#define BOARD_GPIO_BTC4_BT 0x2000 /* gpio 12, coex4, bt active */ +#define BOARD_GPIO_BTC4_STAT 0x4000 /* gpio 14, coex4, status */ +#define BOARD_GPIO_BTC4_WLAN 0x8000 /* gpio 15, coex4, wlan active */ +#define BOARD_GPIO_1_WLAN_PWR 0x02 /* throttle WLAN power on X21 board */ +#define BOARD_GPIO_2_WLAN_PWR 0x04 /* throttle WLAN power on X29C board */ +#define BOARD_GPIO_3_WLAN_PWR 0x08 /* throttle WLAN power on X28 board */ +#define BOARD_GPIO_4_WLAN_PWR 0x10 /* throttle WLAN power on X19 board */ +#define BOARD_GPIO_13_WLAN_PWR 0x2000 /* throttle WLAN power on X14 board */ + +#define GPIO_BTC4W_OUT_4312 0x010 /* bit 4 is BT_IODISABLE */ + +#define PCI_CFG_GPIO_SCS 0x10 /* PCI config space bit 4 for 4306c0 slow clock source */ +#define PCI_CFG_GPIO_HWRAD 0x20 /* PCI config space GPIO 13 for hw radio disable */ +#define PCI_CFG_GPIO_XTAL 0x40 /* PCI config space GPIO 14 for Xtal power-up */ +#define PCI_CFG_GPIO_PLL 0x80 /* PCI config space GPIO 15 for PLL power-down */ + +/* power control defines */ +#define PLL_DELAY 150 /* us pll on delay */ +#define FREF_DELAY 200 /* us fref change delay */ +#define MIN_SLOW_CLK 32 /* us Slow clock period */ +#define XTAL_ON_DELAY 1000 /* us crystal power-on delay */ + +/* 43012 wlbga Board */ +#define BCM943012WLREF_SSID 0x07d7 + +/* 43012 fcbga Board */ +#define BCM943012FCREF_SSID 0x07d4 + +/* 43602 Boards, unclear yet what boards will be created. */ +#define BCM943602RSVD1_SSID 0x06a5 +#define BCM943602RSVD2_SSID 0x06a6 +#define BCM943602X87 0X0133 +#define BCM943602X87P2 0X0152 +#define BCM943602X87P3 0X0153 +#define BCM943602X238 0X0132 +#define BCM943602X238D 0X014A +#define BCM943602X238DP2 0X0155 +#define BCM943602X238DP3 0X0156 +#define BCM943602X100 0x0761 +#define BCM943602X100GS 0x0157 +#define BCM943602X100P2 0x015A + +/* # of GPIO pins */ +#define GPIO_NUMPINS 32 + +/* These values are used by dhd host driver. */ +#define RDL_RAM_BASE_4319 0x60000000 +#define RDL_RAM_BASE_4329 0x60000000 +#define RDL_RAM_SIZE_4319 0x48000 +#define RDL_RAM_SIZE_4329 0x48000 +#define RDL_RAM_SIZE_43236 0x70000 +#define RDL_RAM_BASE_43236 0x60000000 +#define RDL_RAM_SIZE_4328 0x60000 +#define RDL_RAM_BASE_4328 0x80000000 +#define RDL_RAM_SIZE_4322 0x60000 +#define RDL_RAM_BASE_4322 0x60000000 +#define RDL_RAM_SIZE_4360 0xA0000 +#define RDL_RAM_BASE_4360 0x60000000 +#define RDL_RAM_SIZE_43242 0x90000 +#define RDL_RAM_BASE_43242 0x60000000 +#define RDL_RAM_SIZE_43143 0x70000 +#define RDL_RAM_BASE_43143 0x60000000 +#define RDL_RAM_SIZE_4350 0xC0000 +#define RDL_RAM_BASE_4350 0x180800 + +/* generic defs for nvram "muxenab" bits +* Note: these differ for 4335a0. refer bcmchipc.h for specific mux options. +*/ +#define MUXENAB_UART 0x00000001 +#define MUXENAB_GPIO 0x00000002 +#define MUXENAB_ERCX 0x00000004 /* External Radio BT coex */ +#define MUXENAB_JTAG 0x00000008 +#define MUXENAB_HOST_WAKE 0x00000010 /* configure GPIO for SDIO host_wake */ +#define MUXENAB_I2S_EN 0x00000020 +#define MUXENAB_I2S_MASTER 0x00000040 +#define MUXENAB_I2S_FULL 0x00000080 +#define MUXENAB_SFLASH 0x00000100 +#define MUXENAB_RFSWCTRL0 0x00000200 +#define MUXENAB_RFSWCTRL1 0x00000400 +#define MUXENAB_RFSWCTRL2 0x00000800 +#define MUXENAB_SECI 0x00001000 +#define MUXENAB_BT_LEGACY 0x00002000 +#define MUXENAB_HOST_WAKE1 0x00004000 /* configure alternative GPIO for SDIO host_wake */ + +/* Boot flags */ +#define FLASH_KERNEL_NFLASH 0x00000001 +#define FLASH_BOOT_NFLASH 0x00000002 + +#endif /* _BCMDEVS_H */ diff --git a/bcmdhd.100.10.315.x/include/bcmdhcp.h b/bcmdhd.100.10.315.x/include/bcmdhcp.h new file mode 100644 index 0000000..aad986c --- /dev/null +++ b/bcmdhd.100.10.315.x/include/bcmdhcp.h @@ -0,0 +1,92 @@ +/* + * Fundamental constants relating to DHCP Protocol + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmdhcp.h 700076 2017-05-17 14:42:22Z $ + */ + +#ifndef _bcmdhcp_h_ +#define _bcmdhcp_h_ + +/* DHCP params */ +#define DHCP_TYPE_OFFSET 0 /* DHCP type (request|reply) offset */ +#define DHCP_TID_OFFSET 4 /* DHCP transition id offset */ +#define DHCP_FLAGS_OFFSET 10 /* DHCP flags offset */ +#define DHCP_CIADDR_OFFSET 12 /* DHCP client IP address offset */ +#define DHCP_YIADDR_OFFSET 16 /* DHCP your IP address offset */ +#define DHCP_GIADDR_OFFSET 24 /* DHCP relay agent IP address offset */ +#define DHCP_CHADDR_OFFSET 28 /* DHCP client h/w address offset */ +#define DHCP_OPT_OFFSET 236 /* DHCP options offset */ + +#define DHCP_OPT_MSGTYPE 53 /* DHCP message type */ +#define DHCP_OPT_MSGTYPE_REQ 3 +#define DHCP_OPT_MSGTYPE_ACK 5 /* DHCP message type - ACK */ + +#define DHCP_OPT_CODE_OFFSET 0 /* Option identifier */ +#define DHCP_OPT_LEN_OFFSET 1 /* Option data length */ +#define DHCP_OPT_DATA_OFFSET 2 /* Option data */ + +#define DHCP_OPT_CODE_CLIENTID 61 /* Option identifier */ + +#define DHCP_TYPE_REQUEST 1 /* DHCP request (discover|request) */ +#define DHCP_TYPE_REPLY 2 /* DHCP reply (offset|ack) */ + +#define DHCP_PORT_SERVER 67 /* DHCP server UDP port */ +#define DHCP_PORT_CLIENT 68 /* DHCP client UDP port */ + +#define DHCP_FLAG_BCAST 0x8000 /* DHCP broadcast flag */ + +#define DHCP_FLAGS_LEN 2 /* DHCP flags field length */ + +#define DHCP6_TYPE_SOLICIT 1 /* DHCP6 solicit */ +#define DHCP6_TYPE_ADVERTISE 2 /* DHCP6 advertise */ +#define DHCP6_TYPE_REQUEST 3 /* DHCP6 request */ +#define DHCP6_TYPE_CONFIRM 4 /* DHCP6 confirm */ +#define DHCP6_TYPE_RENEW 5 /* DHCP6 renew */ +#define DHCP6_TYPE_REBIND 6 /* DHCP6 rebind */ +#define DHCP6_TYPE_REPLY 7 /* DHCP6 reply */ +#define DHCP6_TYPE_RELEASE 8 /* DHCP6 release */ +#define DHCP6_TYPE_DECLINE 9 /* DHCP6 decline */ +#define DHCP6_TYPE_RECONFIGURE 10 /* DHCP6 reconfigure */ +#define DHCP6_TYPE_INFOREQ 11 /* DHCP6 information request */ +#define DHCP6_TYPE_RELAYFWD 12 /* DHCP6 relay forward */ +#define DHCP6_TYPE_RELAYREPLY 13 /* DHCP6 relay reply */ + +#define DHCP6_TYPE_OFFSET 0 /* DHCP6 type offset */ + +#define DHCP6_MSG_OPT_OFFSET 4 /* Offset of options in client server messages */ +#define DHCP6_RELAY_OPT_OFFSET 34 /* Offset of options in relay messages */ + +#define DHCP6_OPT_CODE_OFFSET 0 /* Option identifier */ +#define DHCP6_OPT_LEN_OFFSET 2 /* Option data length */ +#define DHCP6_OPT_DATA_OFFSET 4 /* Option data */ + +#define DHCP6_OPT_CODE_CLIENTID 1 /* DHCP6 CLIENTID option */ +#define DHCP6_OPT_CODE_SERVERID 2 /* DHCP6 SERVERID option */ + +#define DHCP6_PORT_SERVER 547 /* DHCP6 server UDP port */ +#define DHCP6_PORT_CLIENT 546 /* DHCP6 client UDP port */ + +#endif /* #ifndef _bcmdhcp_h_ */ diff --git a/bcmdhd.100.10.315.x/include/bcmendian.h b/bcmdhd.100.10.315.x/include/bcmendian.h new file mode 100644 index 0000000..c9a95ae --- /dev/null +++ b/bcmdhd.100.10.315.x/include/bcmendian.h @@ -0,0 +1,416 @@ +/* + * Byte order utilities + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmendian.h 633810 2016-04-25 16:46:55Z $ + * + * This file by default provides proper behavior on little-endian architectures. + * On big-endian architectures, IL_BIGENDIAN should be defined. + */ + +#ifndef _BCMENDIAN_H_ +#define _BCMENDIAN_H_ + +#include + +/* Reverse the bytes in a 16-bit value */ +#define BCMSWAP16(val) \ + ((uint16)((((uint16)(val) & (uint16)0x00ffU) << 8) | \ + (((uint16)(val) & (uint16)0xff00U) >> 8))) + +/* Reverse the bytes in a 32-bit value */ +#define BCMSWAP32(val) \ + ((uint32)((((uint32)(val) & (uint32)0x000000ffU) << 24) | \ + (((uint32)(val) & (uint32)0x0000ff00U) << 8) | \ + (((uint32)(val) & (uint32)0x00ff0000U) >> 8) | \ + (((uint32)(val) & (uint32)0xff000000U) >> 24))) + +/* Reverse the two 16-bit halves of a 32-bit value */ +#define BCMSWAP32BY16(val) \ + ((uint32)((((uint32)(val) & (uint32)0x0000ffffU) << 16) | \ + (((uint32)(val) & (uint32)0xffff0000U) >> 16))) + +/* Reverse the bytes in a 64-bit value */ +#define BCMSWAP64(val) \ + ((uint64)((((uint64)(val) & 0x00000000000000ffULL) << 56) | \ + (((uint64)(val) & 0x000000000000ff00ULL) << 40) | \ + (((uint64)(val) & 0x0000000000ff0000ULL) << 24) | \ + (((uint64)(val) & 0x00000000ff000000ULL) << 8) | \ + (((uint64)(val) & 0x000000ff00000000ULL) >> 8) | \ + (((uint64)(val) & 0x0000ff0000000000ULL) >> 24) | \ + (((uint64)(val) & 0x00ff000000000000ULL) >> 40) | \ + (((uint64)(val) & 0xff00000000000000ULL) >> 56))) + +/* Reverse the two 32-bit halves of a 64-bit value */ +#define BCMSWAP64BY32(val) \ + ((uint64)((((uint64)(val) & 0x00000000ffffffffULL) << 32) | \ + (((uint64)(val) & 0xffffffff00000000ULL) >> 32))) + +/* Byte swapping macros + * Host <=> Network (Big Endian) for 16- and 32-bit values + * Host <=> Little-Endian for 16- and 32-bit values + */ +#ifndef hton16 +#define HTON16(i) BCMSWAP16(i) +#define hton16(i) bcmswap16(i) +#define HTON32(i) BCMSWAP32(i) +#define hton32(i) bcmswap32(i) +#define NTOH16(i) BCMSWAP16(i) +#define ntoh16(i) bcmswap16(i) +#define NTOH32(i) BCMSWAP32(i) +#define ntoh32(i) bcmswap32(i) +#define LTOH16(i) (i) +#define ltoh16(i) (i) +#define LTOH32(i) (i) +#define ltoh32(i) (i) +#define HTOL16(i) (i) +#define htol16(i) (i) +#define HTOL32(i) (i) +#define htol32(i) (i) +#define HTOL64(i) (i) +#define htol64(i) (i) +#endif /* hton16 */ + +#define ltoh16_buf(buf, i) +#define htol16_buf(buf, i) +#define ltoh32_buf(buf, i) +#define htol32_buf(buf, i) +#define ltoh64_buf(buf, i) +#define htol64_buf(buf, i) + +/* Unaligned loads and stores in host byte order */ +#define load32_ua(a) ltoh32_ua(a) +#define store32_ua(a, v) htol32_ua_store(v, a) +#define load16_ua(a) ltoh16_ua(a) +#define store16_ua(a, v) htol16_ua_store(v, a) +#define load64_ua(a) ltoh64_ua(a) +#define store64_ua(a, v) htol64_ua_store(v, a) + +#define _LTOH16_UA(cp) ((cp)[0] | ((cp)[1] << 8)) +#define _LTOH32_UA(cp) ((cp)[0] | ((cp)[1] << 8) | ((cp)[2] << 16) | ((cp)[3] << 24)) +#define _NTOH16_UA(cp) (((cp)[0] << 8) | (cp)[1]) +#define _NTOH32_UA(cp) (((cp)[0] << 24) | ((cp)[1] << 16) | ((cp)[2] << 8) | (cp)[3]) + +#define _LTOH64_UA(cp) ((uint64)(cp)[0] | ((uint64)(cp)[1] << 8) | \ + ((uint64)(cp)[2] << 16) | ((uint64)(cp)[3] << 24) | \ + ((uint64)(cp)[4] << 32) | ((uint64)(cp)[5] << 40) | \ + ((uint64)(cp)[6] << 48) | ((uint64)(cp)[7] << 56)) + +#define _NTOH64_UA(cp) ((uint64)(cp)[7] | ((uint64)(cp)[6] << 8) | \ + ((uint64)(cp)[5] << 16) | ((uint64)(cp)[4] << 24) | \ + ((uint64)(cp)[3] << 32) | ((uint64)(cp)[2] << 40) | \ + ((uint64)(cp)[1] << 48) | ((uint64)(cp)[0] << 56)) + +#define ltoh_ua(ptr) \ + (sizeof(*(ptr)) == sizeof(uint8) ? *(const uint8 *)(ptr) : \ + sizeof(*(ptr)) == sizeof(uint16) ? _LTOH16_UA((const uint8 *)(ptr)) : \ + sizeof(*(ptr)) == sizeof(uint32) ? _LTOH32_UA((const uint8 *)(ptr)) : \ + *(uint8 *)0) + +#define ntoh_ua(ptr) \ + (sizeof(*(ptr)) == sizeof(uint8) ? *(const uint8 *)(ptr) : \ + sizeof(*(ptr)) == sizeof(uint16) ? _NTOH16_UA((const uint8 *)(ptr)) : \ + sizeof(*(ptr)) == sizeof(uint32) ? _NTOH32_UA((const uint8 *)(ptr)) : \ + *(uint8 *)0) + +#ifdef __GNUC__ + +/* GNU macro versions avoid referencing the argument multiple times, while also + * avoiding the -fno-inline used in ROM builds. + */ + +#define bcmswap16(val) ({ \ + uint16 _val = (val); \ + BCMSWAP16(_val); \ +}) + +#define bcmswap32(val) ({ \ + uint32 _val = (val); \ + BCMSWAP32(_val); \ +}) + +#define bcmswap64(val) ({ \ + uint64 _val = (val); \ + BCMSWAP64(_val); \ +}) + +#define bcmswap32by16(val) ({ \ + uint32 _val = (val); \ + BCMSWAP32BY16(_val); \ +}) + +#define bcmswap16_buf(buf, len) ({ \ + uint16 *_buf = (uint16 *)(buf); \ + uint _wds = (len) / 2; \ + while (_wds--) { \ + *_buf = bcmswap16(*_buf); \ + _buf++; \ + } \ +}) + +#define bcmswap32_buf(buf, len) ({ \ + uint32 *_buf = (uint32 *)(buf); \ + uint _wds = (len) / 4; \ + while (_wds--) { \ + *_buf = bcmswap32(*_buf); \ + _buf++; \ + } \ +}) + +#define bcmswap64_buf(buf, len) ({ \ + uint64 *_buf = (uint64 *)(buf); \ + uint _wds = (len) / 8; \ + while (_wds--) { \ + *_buf = bcmswap64(*_buf); \ + _buf++; \ + } \ +}) + +#define htol16_ua_store(val, bytes) ({ \ + uint16 _val = (val); \ + uint8 *_bytes = (uint8 *)(bytes); \ + _bytes[0] = _val & 0xff; \ + _bytes[1] = _val >> 8; \ +}) + +#define htol32_ua_store(val, bytes) ({ \ + uint32 _val = (val); \ + uint8 *_bytes = (uint8 *)(bytes); \ + _bytes[0] = _val & 0xff; \ + _bytes[1] = (_val >> 8) & 0xff; \ + _bytes[2] = (_val >> 16) & 0xff; \ + _bytes[3] = _val >> 24; \ +}) + +#define htol64_ua_store(val, bytes) ({ \ + uint64 _val = (val); \ + uint8 *_bytes = (uint8 *)(bytes); \ + int i; \ + for (i = 0; i < (int)sizeof(_val); ++i) { \ + *_bytes++ = _val & 0xff; \ + _val >>= 8; \ + } \ +}) + +#define hton16_ua_store(val, bytes) ({ \ + uint16 _val = (val); \ + uint8 *_bytes = (uint8 *)(bytes); \ + _bytes[0] = _val >> 8; \ + _bytes[1] = _val & 0xff; \ +}) + +#define hton32_ua_store(val, bytes) ({ \ + uint32 _val = (val); \ + uint8 *_bytes = (uint8 *)(bytes); \ + _bytes[0] = _val >> 24; \ + _bytes[1] = (_val >> 16) & 0xff; \ + _bytes[2] = (_val >> 8) & 0xff; \ + _bytes[3] = _val & 0xff; \ +}) + +#define ltoh16_ua(bytes) ({ \ + const uint8 *_bytes = (const uint8 *)(bytes); \ + _LTOH16_UA(_bytes); \ +}) + +#define ltoh32_ua(bytes) ({ \ + const uint8 *_bytes = (const uint8 *)(bytes); \ + _LTOH32_UA(_bytes); \ +}) + +#define ltoh64_ua(bytes) ({ \ + const uint8 *_bytes = (const uint8 *)(bytes); \ + _LTOH64_UA(_bytes); \ +}) + +#define ntoh16_ua(bytes) ({ \ + const uint8 *_bytes = (const uint8 *)(bytes); \ + _NTOH16_UA(_bytes); \ +}) + +#define ntoh32_ua(bytes) ({ \ + const uint8 *_bytes = (const uint8 *)(bytes); \ + _NTOH32_UA(_bytes); \ +}) + +#define ntoh64_ua(bytes) ({ \ + const uint8 *_bytes = (const uint8 *)(bytes); \ + _NTOH64_UA(_bytes); \ +}) + +#else /* !__GNUC__ */ + +/* Inline versions avoid referencing the argument multiple times */ +static INLINE uint16 +bcmswap16(uint16 val) +{ + return BCMSWAP16(val); +} + +static INLINE uint32 +bcmswap32(uint32 val) +{ + return BCMSWAP32(val); +} + +static INLINE uint64 +bcmswap64(uint64 val) +{ + return BCMSWAP64(val); +} + +static INLINE uint32 +bcmswap32by16(uint32 val) +{ + return BCMSWAP32BY16(val); +} + +/* Reverse pairs of bytes in a buffer (not for high-performance use) */ +/* buf - start of buffer of shorts to swap */ +/* len - byte length of buffer */ +static INLINE void +bcmswap16_buf(uint16 *buf, uint len) +{ + len = len / 2; + + while (len--) { + *buf = bcmswap16(*buf); + buf++; + } +} + +/* + * Store 16-bit value to unaligned little-endian byte array. + */ +static INLINE void +htol16_ua_store(uint16 val, uint8 *bytes) +{ + bytes[0] = val & 0xff; + bytes[1] = val >> 8; +} + +/* + * Store 32-bit value to unaligned little-endian byte array. + */ +static INLINE void +htol32_ua_store(uint32 val, uint8 *bytes) +{ + bytes[0] = val & 0xff; + bytes[1] = (val >> 8) & 0xff; + bytes[2] = (val >> 16) & 0xff; + bytes[3] = val >> 24; +} + +/* + * Store 64-bit value to unaligned little-endian byte array. + */ +static INLINE void +htol64_ua_store(uint64 val, uint8 *bytes) +{ + int i; + for (i = 0; i < sizeof(val); ++i) { + *bytes++ = (uint8)(val & 0xff); + val >>= 8; + } +} + +/* + * Store 16-bit value to unaligned network-(big-)endian byte array. + */ +static INLINE void +hton16_ua_store(uint16 val, uint8 *bytes) +{ + bytes[0] = val >> 8; + bytes[1] = val & 0xff; +} + +/* + * Store 32-bit value to unaligned network-(big-)endian byte array. + */ +static INLINE void +hton32_ua_store(uint32 val, uint8 *bytes) +{ + bytes[0] = val >> 24; + bytes[1] = (val >> 16) & 0xff; + bytes[2] = (val >> 8) & 0xff; + bytes[3] = val & 0xff; +} + +/* + * Load 16-bit value from unaligned little-endian byte array. + */ +static INLINE uint16 +ltoh16_ua(const void *bytes) +{ + return _LTOH16_UA((const uint8 *)bytes); +} + +/* + * Load 32-bit value from unaligned little-endian byte array. + */ +static INLINE uint32 +ltoh32_ua(const void *bytes) +{ + return _LTOH32_UA((const uint8 *)bytes); +} + +/* + * Load 64-bit value from unaligned little-endian byte array. + */ +static INLINE uint64 +ltoh64_ua(const void *bytes) +{ + return _LTOH64_UA((const uint8 *)bytes); +} + +/* + * Load 16-bit value from unaligned big-(network-)endian byte array. + */ +static INLINE uint16 +ntoh16_ua(const void *bytes) +{ + return _NTOH16_UA((const uint8 *)bytes); +} + +/* + * Load 32-bit value from unaligned big-(network-)endian byte array. + */ +static INLINE uint32 +ntoh32_ua(const void *bytes) +{ + return _NTOH32_UA((const uint8 *)bytes); +} + +/* + * Load 64-bit value from unaligned big-(network-)endian byte array. + */ +static INLINE uint64 +ntoh64_ua(const void *bytes) +{ + return _NTOH64_UA((const uint8 *)bytes); +} + +#endif /* !__GNUC__ */ +#endif /* !_BCMENDIAN_H_ */ diff --git a/bcmdhd.100.10.315.x/include/bcmeth.h b/bcmdhd.100.10.315.x/include/bcmeth.h new file mode 100644 index 0000000..761ea44 --- /dev/null +++ b/bcmdhd.100.10.315.x/include/bcmeth.h @@ -0,0 +1,115 @@ +/* + * Broadcom Ethernettype protocol definitions + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmeth.h 701825 2017-05-26 16:45:27Z $ + */ + +/* + * Broadcom Ethernet protocol defines + */ + +#ifndef _BCMETH_H_ +#define _BCMETH_H_ + +#ifndef _TYPEDEFS_H_ +#include +#endif // endif + +/* This marks the start of a packed structure section. */ +#include + +/* ETHER_TYPE_BRCM is defined in ethernet.h */ + +/* + * Following the 2byte BRCM ether_type is a 16bit BRCM subtype field + * in one of two formats: (only subtypes 32768-65535 are in use now) + * + * subtypes 0-32767: + * 8 bit subtype (0-127) + * 8 bit length in bytes (0-255) + * + * subtypes 32768-65535: + * 16 bit big-endian subtype + * 16 bit big-endian length in bytes (0-65535) + * + * length is the number of additional bytes beyond the 4 or 6 byte header + * + * Reserved values: + * 0 reserved + * 5-15 reserved for iLine protocol assignments + * 17-126 reserved, assignable + * 127 reserved + * 32768 reserved + * 32769-65534 reserved, assignable + * 65535 reserved + */ + +/* + * While adding the subtypes and their specific processing code make sure + * bcmeth_bcm_hdr_t is the first data structure in the user specific data structure definition + */ + +#define BCMILCP_SUBTYPE_RATE 1 +#define BCMILCP_SUBTYPE_LINK 2 +#define BCMILCP_SUBTYPE_CSA 3 +#define BCMILCP_SUBTYPE_LARQ 4 +#define BCMILCP_SUBTYPE_VENDOR 5 +#define BCMILCP_SUBTYPE_FLH 17 + +#define BCMILCP_SUBTYPE_VENDOR_LONG 32769 +#define BCMILCP_SUBTYPE_CERT 32770 +#define BCMILCP_SUBTYPE_SES 32771 + +#define BCMILCP_BCM_SUBTYPE_RESERVED 0 +#define BCMILCP_BCM_SUBTYPE_EVENT 1 +#define BCMILCP_BCM_SUBTYPE_SES 2 +/* + * The EAPOL type is not used anymore. Instead EAPOL messages are now embedded + * within BCMILCP_BCM_SUBTYPE_EVENT type messages + */ +/* #define BCMILCP_BCM_SUBTYPE_EAPOL 3 */ +#define BCMILCP_BCM_SUBTYPE_DPT 4 +#define BCMILCP_BCM_SUBTYPE_DNGLEVENT 5 + +#define BCMILCP_BCM_SUBTYPEHDR_MINLENGTH 8 +#define BCMILCP_BCM_SUBTYPEHDR_VERSION 0 +#define BCMILCP_BCM_SUBTYPE_EVENT_DATA_PAD 2 + +/* These fields are stored in network order */ +typedef BWL_PRE_PACKED_STRUCT struct bcmeth_hdr +{ + uint16 subtype; /* Vendor specific..32769 */ + uint16 length; + uint8 version; /* Version is 0 */ + uint8 oui[3]; /* Broadcom OUI */ + /* user specific Data */ + uint16 usr_subtype; +} BWL_POST_PACKED_STRUCT bcmeth_hdr_t; + +/* This marks the end of a packed structure section. */ +#include + +#endif /* _BCMETH_H_ */ diff --git a/bcmdhd.100.10.315.x/include/bcmevent.h b/bcmdhd.100.10.315.x/include/bcmevent.h new file mode 100644 index 0000000..c26093b --- /dev/null +++ b/bcmdhd.100.10.315.x/include/bcmevent.h @@ -0,0 +1,1188 @@ +/* + * Broadcom Event protocol definitions + * + * Dependencies: bcmeth.h + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmevent.h 768756 2018-06-21 05:44:28Z $ + * + */ + +/* + * Broadcom Ethernet Events protocol defines + * + */ + +#ifndef _BCMEVENT_H_ +#define _BCMEVENT_H_ + +#include +/* #include -- TODO: req., excluded to overwhelming coupling (break up ethernet.h) */ +#include +#if defined(DNGL_EVENT_SUPPORT) +#include +#endif // endif + +/* This marks the start of a packed structure section. */ +#include + +#define BCM_EVENT_MSG_VERSION 2 /* wl_event_msg_t struct version */ +#define BCM_MSG_IFNAME_MAX 16 /* max length of interface name */ + +/* flags */ +#define WLC_EVENT_MSG_LINK 0x01 /* link is up */ +#define WLC_EVENT_MSG_FLUSHTXQ 0x02 /* flush tx queue on MIC error */ +#define WLC_EVENT_MSG_GROUP 0x04 /* group MIC error */ +#define WLC_EVENT_MSG_UNKBSS 0x08 /* unknown source bsscfg */ +#define WLC_EVENT_MSG_UNKIF 0x10 /* unknown source OS i/f */ + +/* these fields are stored in network order */ + +/* version 1 */ +typedef BWL_PRE_PACKED_STRUCT struct +{ + uint16 version; + uint16 flags; /* see flags below */ + uint32 event_type; /* Message (see below) */ + uint32 status; /* Status code (see below) */ + uint32 reason; /* Reason code (if applicable) */ + uint32 auth_type; /* WLC_E_AUTH */ + uint32 datalen; /* data buf */ + struct ether_addr addr; /* Station address (if applicable) */ + char ifname[BCM_MSG_IFNAME_MAX]; /* name of the packet incoming interface */ +} BWL_POST_PACKED_STRUCT wl_event_msg_v1_t; + +/* the current version */ +typedef BWL_PRE_PACKED_STRUCT struct +{ + uint16 version; + uint16 flags; /* see flags below */ + uint32 event_type; /* Message (see below) */ + uint32 status; /* Status code (see below) */ + uint32 reason; /* Reason code (if applicable) */ + uint32 auth_type; /* WLC_E_AUTH */ + uint32 datalen; /* data buf */ + struct ether_addr addr; /* Station address (if applicable) */ + char ifname[BCM_MSG_IFNAME_MAX]; /* name of the packet incoming interface */ + uint8 ifidx; /* destination OS i/f index */ + uint8 bsscfgidx; /* source bsscfg index */ +} BWL_POST_PACKED_STRUCT wl_event_msg_t; + +/* used by driver msgs */ +typedef BWL_PRE_PACKED_STRUCT struct bcm_event { + struct ether_header eth; + bcmeth_hdr_t bcm_hdr; + wl_event_msg_t event; + /* data portion follows */ +} BWL_POST_PACKED_STRUCT bcm_event_t; + +/* + * used by host event + * note: if additional event types are added, it should go with is_wlc_event_frame() as well. + */ +typedef union bcm_event_msg_u { + wl_event_msg_t event; +#if defined(DNGL_EVENT_SUPPORT) + bcm_dngl_event_msg_t dngl_event; +#endif // endif + + /* add new event here */ +} bcm_event_msg_u_t; + +#define BCM_MSG_LEN (sizeof(bcm_event_t) - sizeof(bcmeth_hdr_t) - sizeof(struct ether_header)) + +/* Event messages */ +#define WLC_E_SET_SSID 0 /* indicates status of set SSID */ +#define WLC_E_JOIN 1 /* differentiates join IBSS from found (WLC_E_START) IBSS */ +#define WLC_E_START 2 /* STA founded an IBSS or AP started a BSS */ +#define WLC_E_AUTH 3 /* 802.11 AUTH request */ +#define WLC_E_AUTH_IND 4 /* 802.11 AUTH indication */ +#define WLC_E_DEAUTH 5 /* 802.11 DEAUTH request */ +#define WLC_E_DEAUTH_IND 6 /* 802.11 DEAUTH indication */ +#define WLC_E_ASSOC 7 /* 802.11 ASSOC request */ +#define WLC_E_ASSOC_IND 8 /* 802.11 ASSOC indication */ +#define WLC_E_REASSOC 9 /* 802.11 REASSOC request */ +#define WLC_E_REASSOC_IND 10 /* 802.11 REASSOC indication */ +#define WLC_E_DISASSOC 11 /* 802.11 DISASSOC request */ +#define WLC_E_DISASSOC_IND 12 /* 802.11 DISASSOC indication */ +#define WLC_E_QUIET_START 13 /* 802.11h Quiet period started */ +#define WLC_E_QUIET_END 14 /* 802.11h Quiet period ended */ +#define WLC_E_BEACON_RX 15 /* BEACONS received/lost indication */ +#define WLC_E_LINK 16 /* generic link indication */ +#define WLC_E_MIC_ERROR 17 /* TKIP MIC error occurred */ +#define WLC_E_NDIS_LINK 18 /* NDIS style link indication */ +#define WLC_E_ROAM 19 /* roam complete: indicate status & reason */ +#define WLC_E_TXFAIL 20 /* change in dot11FailedCount (txfail) */ +#define WLC_E_PMKID_CACHE 21 /* WPA2 pmkid cache indication */ +#define WLC_E_RETROGRADE_TSF 22 /* current AP's TSF value went backward */ +#define WLC_E_PRUNE 23 /* AP was pruned from join list for reason */ +#define WLC_E_AUTOAUTH 24 /* report AutoAuth table entry match for join attempt */ +#define WLC_E_EAPOL_MSG 25 /* Event encapsulating an EAPOL message */ +#define WLC_E_SCAN_COMPLETE 26 /* Scan results are ready or scan was aborted */ +#define WLC_E_ADDTS_IND 27 /* indicate to host addts fail/success */ +#define WLC_E_DELTS_IND 28 /* indicate to host delts fail/success */ +#define WLC_E_BCNSENT_IND 29 /* indicate to host of beacon transmit */ +#define WLC_E_BCNRX_MSG 30 /* Send the received beacon up to the host */ +#define WLC_E_BCNLOST_MSG 31 /* indicate to host loss of beacon */ +#define WLC_E_ROAM_PREP 32 /* before attempting to roam association */ +#define WLC_E_PFN_NET_FOUND 33 /* PFN network found event */ +#define WLC_E_PFN_NET_LOST 34 /* PFN network lost event */ +#define WLC_E_RESET_COMPLETE 35 +#define WLC_E_JOIN_START 36 +#define WLC_E_ROAM_START 37 /* roam attempt started: indicate reason */ +#define WLC_E_ASSOC_START 38 +#define WLC_E_IBSS_ASSOC 39 +#define WLC_E_RADIO 40 +#define WLC_E_PSM_WATCHDOG 41 /* PSM microcode watchdog fired */ + +#define WLC_E_PROBREQ_MSG 44 /* probe request received */ +#define WLC_E_SCAN_CONFIRM_IND 45 +#define WLC_E_PSK_SUP 46 /* WPA Handshake fail */ +#define WLC_E_COUNTRY_CODE_CHANGED 47 +#define WLC_E_EXCEEDED_MEDIUM_TIME 48 /* WMMAC excedded medium time */ +#define WLC_E_ICV_ERROR 49 /* WEP ICV error occurred */ +#define WLC_E_UNICAST_DECODE_ERROR 50 /* Unsupported unicast encrypted frame */ +#define WLC_E_MULTICAST_DECODE_ERROR 51 /* Unsupported multicast encrypted frame */ +#define WLC_E_TRACE 52 +#define WLC_E_IF 54 /* I/F change (for dongle host notification) */ +#define WLC_E_P2P_DISC_LISTEN_COMPLETE 55 /* listen state expires */ +#define WLC_E_RSSI 56 /* indicate RSSI change based on configured levels */ +#define WLC_E_PFN_BEST_BATCHING 57 /* PFN best network batching event */ +#define WLC_E_EXTLOG_MSG 58 +#define WLC_E_ACTION_FRAME 59 /* Action frame Rx */ +#define WLC_E_ACTION_FRAME_COMPLETE 60 /* Action frame Tx complete */ +#define WLC_E_PRE_ASSOC_IND 61 /* assoc request received */ +#define WLC_E_PRE_REASSOC_IND 62 /* re-assoc request received */ +#define WLC_E_CHANNEL_ADOPTED 63 +#define WLC_E_AP_STARTED 64 /* AP started */ +#define WLC_E_DFS_AP_STOP 65 /* AP stopped due to DFS */ +#define WLC_E_DFS_AP_RESUME 66 /* AP resumed due to DFS */ +#define WLC_E_WAI_STA_EVENT 67 /* WAI stations event */ +#define WLC_E_WAI_MSG 68 /* event encapsulating an WAI message */ +#define WLC_E_ESCAN_RESULT 69 /* escan result event */ +#define WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE 70 /* action frame off channel complete */ +#define WLC_E_PROBRESP_MSG 71 /* probe response received */ +#define WLC_E_P2P_PROBREQ_MSG 72 /* P2P Probe request received */ +#define WLC_E_DCS_REQUEST 73 +#define WLC_E_FIFO_CREDIT_MAP 74 /* credits for D11 FIFOs. [AC0,AC1,AC2,AC3,BC_MC,ATIM] */ +#define WLC_E_ACTION_FRAME_RX 75 /* Received action frame event WITH + * wl_event_rx_frame_data_t header + */ +#define WLC_E_WAKE_EVENT 76 /* Wake Event timer fired, used for wake WLAN test mode */ +#define WLC_E_RM_COMPLETE 77 /* Radio measurement complete */ +#define WLC_E_HTSFSYNC 78 /* Synchronize TSF with the host */ +#define WLC_E_OVERLAY_REQ 79 /* request an overlay IOCTL/iovar from the host */ +#define WLC_E_CSA_COMPLETE_IND 80 /* 802.11 CHANNEL SWITCH ACTION completed */ +#define WLC_E_EXCESS_PM_WAKE_EVENT 81 /* excess PM Wake Event to inform host */ +#define WLC_E_PFN_SCAN_NONE 82 /* no PFN networks around */ +/* PFN BSSID network found event, conflict/share with WLC_E_PFN_SCAN_NONE */ +#define WLC_E_PFN_BSSID_NET_FOUND 82 +#define WLC_E_PFN_SCAN_ALLGONE 83 /* last found PFN network gets lost */ +/* PFN BSSID network lost event, conflict/share with WLC_E_PFN_SCAN_ALLGONE */ +#define WLC_E_PFN_BSSID_NET_LOST 83 +#define WLC_E_GTK_PLUMBED 84 +#define WLC_E_ASSOC_IND_NDIS 85 /* 802.11 ASSOC indication for NDIS only */ +#define WLC_E_REASSOC_IND_NDIS 86 /* 802.11 REASSOC indication for NDIS only */ +#define WLC_E_ASSOC_REQ_IE 87 +#define WLC_E_ASSOC_RESP_IE 88 +#define WLC_E_ASSOC_RECREATED 89 /* association recreated on resume */ +#define WLC_E_ACTION_FRAME_RX_NDIS 90 /* rx action frame event for NDIS only */ +#define WLC_E_AUTH_REQ 91 /* authentication request received */ +#define WLC_E_TDLS_PEER_EVENT 92 /* discovered peer, connected/disconnected peer */ +#define WLC_E_SPEEDY_RECREATE_FAIL 93 /* fast assoc recreation failed */ +#define WLC_E_NATIVE 94 /* port-specific event and payload (e.g. NDIS) */ +#define WLC_E_PKTDELAY_IND 95 /* event for tx pkt delay suddently jump */ +#define WLC_E_PSTA_PRIMARY_INTF_IND 99 /* psta primary interface indication */ +#define WLC_E_NAN 100 /* NAN event - Reserved for future */ +#define WLC_E_BEACON_FRAME_RX 101 +#define WLC_E_SERVICE_FOUND 102 /* desired service found */ +#define WLC_E_GAS_FRAGMENT_RX 103 /* GAS fragment received */ +#define WLC_E_GAS_COMPLETE 104 /* GAS sessions all complete */ +#define WLC_E_P2PO_ADD_DEVICE 105 /* New device found by p2p offload */ +#define WLC_E_P2PO_DEL_DEVICE 106 /* device has been removed by p2p offload */ +#define WLC_E_WNM_STA_SLEEP 107 /* WNM event to notify STA enter sleep mode */ +#define WLC_E_TXFAIL_THRESH 108 /* Indication of MAC tx failures (exhaustion of + * 802.11 retries) exceeding threshold(s) + */ +#define WLC_E_PROXD 109 /* Proximity Detection event */ +#define WLC_E_IBSS_COALESCE 110 /* IBSS Coalescing */ +#define WLC_E_AIBSS_TXFAIL 110 /* TXFAIL event for AIBSS, re using event 110 */ +#define WLC_E_BSS_LOAD 114 /* Inform host of beacon bss load */ +#define WLC_E_MIMO_PWR_SAVE 115 /* Inform host MIMO PWR SAVE learning events */ +#define WLC_E_LEAKY_AP_STATS 116 /* Inform host leaky Ap stats events */ +#define WLC_E_ALLOW_CREDIT_BORROW 117 /* Allow or disallow wlfc credit borrowing in DHD */ +#define WLC_E_MSCH 120 /* Multiple channel scheduler event */ +#define WLC_E_CSA_START_IND 121 +#define WLC_E_CSA_DONE_IND 122 +#define WLC_E_CSA_FAILURE_IND 123 +#define WLC_E_CCA_CHAN_QUAL 124 /* CCA based channel quality report */ +#define WLC_E_BSSID 125 /* to report change in BSSID while roaming */ +#define WLC_E_TX_STAT_ERROR 126 /* tx error indication */ +#define WLC_E_BCMC_CREDIT_SUPPORT 127 /* credit check for BCMC supported */ +#define WLC_E_PEER_TIMEOUT 128 /* silently drop a STA because of inactivity */ +#define WLC_E_BT_WIFI_HANDOVER_REQ 130 /* Handover Request Initiated */ +#define WLC_E_SPW_TXINHIBIT 131 /* Southpaw TxInhibit notification */ +#define WLC_E_FBT_AUTH_REQ_IND 132 /* FBT Authentication Request Indication */ +#define WLC_E_RSSI_LQM 133 /* Enhancement addition for WLC_E_RSSI */ +#define WLC_E_PFN_GSCAN_FULL_RESULT 134 /* Full probe/beacon (IEs etc) results */ +#define WLC_E_PFN_SWC 135 /* Significant change in rssi of bssids being tracked */ +#define WLC_E_AUTHORIZED 136 /* a STA been authroized for traffic */ +#define WLC_E_PROBREQ_MSG_RX 137 /* probe req with wl_event_rx_frame_data_t header */ +#define WLC_E_PFN_SCAN_COMPLETE 138 /* PFN completed scan of network list */ +#define WLC_E_RMC_EVENT 139 /* RMC Event */ +#define WLC_E_DPSTA_INTF_IND 140 /* DPSTA interface indication */ +#define WLC_E_RRM 141 /* RRM Event */ +#define WLC_E_PFN_SSID_EXT 142 /* SSID EXT event */ +#define WLC_E_ROAM_EXP_EVENT 143 /* Expanded roam event */ +#define WLC_E_ULP 146 /* ULP entered indication */ +#define WLC_E_MACDBG 147 /* Ucode debugging event */ +#define WLC_E_RESERVED 148 /* reserved */ +#define WLC_E_PRE_ASSOC_RSEP_IND 149 /* assoc resp received */ +#define WLC_E_PSK_AUTH 150 /* PSK AUTH WPA2-PSK 4 WAY Handshake failure */ +#define WLC_E_TKO 151 /* TCP keepalive offload */ +#define WLC_E_SDB_TRANSITION 152 /* SDB mode-switch event */ +#define WLC_E_NATOE_NFCT 153 /* natoe event */ +#define WLC_E_TEMP_THROTTLE 154 /* Temperature throttling control event */ +#define WLC_E_LINK_QUALITY 155 /* Link quality measurement complete */ +#define WLC_E_BSSTRANS_RESP 156 /* BSS Transition Response received */ +#define WLC_E_TWT_SETUP 157 /* TWT Setup Complete event */ +#define WLC_E_HE_TWT_SETUP 157 /* TODO:Remove after merging TWT changes to trunk */ +#define WLC_E_NAN_CRITICAL 158 /* NAN Critical Event */ +#define WLC_E_NAN_NON_CRITICAL 159 /* NAN Non-Critical Event */ +#define WLC_E_RADAR_DETECTED 160 /* Radar Detected event */ +#define WLC_E_RANGING_EVENT 161 /* Ranging event */ +#define WLC_E_INVALID_IE 162 /* Received invalid IE */ +#define WLC_E_MODE_SWITCH 163 /* Mode switch event */ +#define WLC_E_PKT_FILTER 164 /* Packet filter event */ +#define WLC_E_DMA_TXFLUSH_COMPLETE 165 /* TxFlush done before changing + * tx/rxchain + */ +#define WLC_E_FBT 166 /* FBT event */ +#define WLC_E_PFN_SCAN_BACKOFF 167 /* PFN SCAN Backoff event */ +#define WLC_E_PFN_BSSID_SCAN_BACKOFF 168 /* PFN BSSID SCAN BAckoff event */ +#define WLC_E_AGGR_EVENT 169 /* Aggregated event */ +#define WLC_E_TVPM_MITIGATION 171 /* Change in mitigation applied by TVPM */ +#define WLC_E_SCAN_START 172 /* Deprecated */ +#define WLC_E_SCAN 172 /* Scan event */ +#define WLC_E_MBO 173 /* MBO event */ +#define WLC_E_PHY_CAL 174 /* Phy calibration start indication to host */ +#define WLC_E_RPSNOA 175 /* Radio power save start/end indication to host */ +#define WLC_E_ADPS 176 /* ADPS event */ +#define WLC_E_SLOTTED_BSS_PEER_OP 177 /* Per peer SCB delete */ +#define WLC_E_HWA 178 /* HWA events */ +#define WLC_E_GTK_KEYROT_NO_CHANSW 179 /* Avoid Chanswitch while GTK key rotation */ +#define WLC_E_ONBODY_STATUS_CHANGE 180 /* Indication of onbody status change */ +#define WLC_E_LAST 181 /* highest val + 1 for range checking */ +#if (WLC_E_LAST > 181) +#error "WLC_E_LAST: Invalid value for last event; must be <= 181." +#endif /* WLC_E_LAST */ + +/* define an API for getting the string name of an event */ +extern const char *bcmevent_get_name(uint event_type); +extern void wl_event_to_host_order(wl_event_msg_t * evt); +extern void wl_event_to_network_order(wl_event_msg_t * evt); + +/* validate if the event is proper and if valid copy event header to event */ +extern int is_wlc_event_frame(void *pktdata, uint pktlen, uint16 exp_usr_subtype, + bcm_event_msg_u_t *out_event); + +/* conversion between host and network order for events */ +void wl_event_to_host_order(wl_event_msg_t * evt); +void wl_event_to_network_order(wl_event_msg_t * evt); + +/* Event status codes */ +#define WLC_E_STATUS_SUCCESS 0 /* operation was successful */ +#define WLC_E_STATUS_FAIL 1 /* operation failed */ +#define WLC_E_STATUS_TIMEOUT 2 /* operation timed out */ +#define WLC_E_STATUS_NO_NETWORKS 3 /* failed due to no matching network found */ +#define WLC_E_STATUS_ABORT 4 /* operation was aborted */ +#define WLC_E_STATUS_NO_ACK 5 /* protocol failure: packet not ack'd */ +#define WLC_E_STATUS_UNSOLICITED 6 /* AUTH or ASSOC packet was unsolicited */ +#define WLC_E_STATUS_ATTEMPT 7 /* attempt to assoc to an auto auth configuration */ +#define WLC_E_STATUS_PARTIAL 8 /* scan results are incomplete */ +#define WLC_E_STATUS_NEWSCAN 9 /* scan aborted by another scan */ +#define WLC_E_STATUS_NEWASSOC 10 /* scan aborted due to assoc in progress */ +#define WLC_E_STATUS_11HQUIET 11 /* 802.11h quiet period started */ +#define WLC_E_STATUS_SUPPRESS 12 /* user disabled scanning (WLC_SET_SCANSUPPRESS) */ +#define WLC_E_STATUS_NOCHANS 13 /* no allowable channels to scan */ +#ifdef BCMCCX +#define WLC_E_STATUS_CCXFASTRM 14 /* scan aborted due to CCX fast roam */ +#endif /* BCMCCX */ +#define WLC_E_STATUS_CS_ABORT 15 /* abort channel select */ +#define WLC_E_STATUS_ERROR 16 /* request failed due to error */ +#define WLC_E_STATUS_SLOTTED_PEER_ADD 17 /* Slotted scb for peer addition status */ +#define WLC_E_STATUS_SLOTTED_PEER_DEL 18 /* Slotted scb for peer deletion status */ +#define WLC_E_STATUS_RXBCN 19 /* Rx Beacon event for FAKEAP feature */ +#define WLC_E_STATUS_INVALID 0xff /* Invalid status code to init variables. */ + +/* 4-way handshake event type */ +#define WLC_E_PSK_AUTH_SUB_EAPOL_START 1 /* EAPOL start */ +#define WLC_E_PSK_AUTH_SUB_EAPOL_DONE 2 /* EAPOL end */ +/* GTK event type */ +#define WLC_E_PSK_AUTH_SUB_GTK_DONE 3 /* GTK end */ + +/* 4-way handshake event status code */ +#define WLC_E_STATUS_PSK_AUTH_WPA_TIMOUT 1 /* operation timed out */ +#define WLC_E_STATUS_PSK_AUTH_MIC_WPA_ERR 2 /* MIC error */ +#define WLC_E_STATUS_PSK_AUTH_IE_MISMATCH_ERR 3 /* IE Missmatch error */ +#define WLC_E_STATUS_PSK_AUTH_REPLAY_COUNT_ERR 4 +#define WLC_E_STATUS_PSK_AUTH_PEER_BLACKISTED 5 /* Blaclisted peer */ +#define WLC_E_STATUS_PSK_AUTH_GTK_REKEY_FAIL 6 /* GTK event status code */ + +/* SDB transition status code */ +#define WLC_E_STATUS_SDB_START 1 +#define WLC_E_STATUS_SDB_COMPLETE 2 +/* Slice-swap status code */ +#define WLC_E_STATUS_SLICE_SWAP_START 3 +#define WLC_E_STATUS_SLICE_SWAP_COMPLETE 4 + +/* SDB transition reason code */ +#define WLC_E_REASON_HOST_DIRECT 0 +#define WLC_E_REASON_INFRA_ASSOC 1 +#define WLC_E_REASON_INFRA_ROAM 2 +#define WLC_E_REASON_INFRA_DISASSOC 3 +#define WLC_E_REASON_NO_MODE_CHANGE_NEEDED 4 +#define WLC_E_REASON_AWDL_ENABLE 5 +#define WLC_E_REASON_AWDL_DISABLE 6 + +/* WLC_E_SDB_TRANSITION event data */ +#define WL_MAX_BSSCFG 4 +#define WL_EVENT_SDB_TRANSITION_VER 1 +typedef struct wl_event_sdb_data { + uint8 wlunit; /* Core index */ + uint8 is_iftype; /* Interface Type(Station, SoftAP, P2P_GO, P2P_GC */ + uint16 chanspec; /* Interface Channel/Chanspec */ + char ssidbuf[(4 * 32) + 1]; /* SSID_FMT_BUF_LEN: ((4 * DOT11_MAX_SSID_LEN) + 1) */ +} wl_event_sdb_data_t; + +typedef struct wl_event_sdb_trans { + uint8 version; /* Event Data Version */ + uint8 rsdb_mode; + uint8 enable_bsscfg; + uint8 reserved; + struct wl_event_sdb_data values[WL_MAX_BSSCFG]; +} wl_event_sdb_trans_t; + +/* reason codes for WLC_E_GTK_KEYROT_NO_CHANSW event */ +#define WLC_E_GTKKEYROT_SCANDELAY 0 /* Delay scan while gtk in progress */ +#define WLC_E_GTKKEYROT_SKIPCHANSW_AWDL 1 /* Avoid chansw by awdl while gtk in progress */ +#define WLC_E_GTKKEYROT_SKIPCHANSW_P2P 2 /* Avoid chansw by p2p while gtk in progress */ + +/* roam reason codes */ +#define WLC_E_REASON_INITIAL_ASSOC 0 /* initial assoc */ +#define WLC_E_REASON_LOW_RSSI 1 /* roamed due to low RSSI */ +#define WLC_E_REASON_DEAUTH 2 /* roamed due to DEAUTH indication */ +#define WLC_E_REASON_DISASSOC 3 /* roamed due to DISASSOC indication */ +#define WLC_E_REASON_BCNS_LOST 4 /* roamed due to lost beacons */ + +#define WLC_E_REASON_FAST_ROAM_FAILED 5 /* roamed due to fast roam failure */ +#define WLC_E_REASON_DIRECTED_ROAM 6 /* roamed due to request by AP */ +#define WLC_E_REASON_TSPEC_REJECTED 7 /* roamed due to TSPEC rejection */ +#define WLC_E_REASON_BETTER_AP 8 /* roamed due to finding better AP */ +#define WLC_E_REASON_MINTXRATE 9 /* roamed because at mintxrate for too long */ +#define WLC_E_REASON_TXFAIL 10 /* We can hear AP, but AP can't hear us */ +/* retained for precommit auto-merging errors; remove once all branches are synced */ +#define WLC_E_REASON_REQUESTED_ROAM 11 +#define WLC_E_REASON_BSSTRANS_REQ 11 /* roamed due to BSS Transition request by AP */ +#define WLC_E_REASON_LOW_RSSI_CU 12 /* roamed due to low RSSI and Channel Usage */ +#define WLC_E_REASON_RADAR_DETECTED 13 /* roamed due to radar detection by STA */ +#define WLC_E_REASON_CSA 14 /* roamed due to CSA from AP */ +#define WLC_E_REASON_ESTM_LOW 15 /* roamed due to ESTM low tput */ +#define WLC_E_REASON_LAST 16 /* NOTE: increment this as you add reasons above */ + +/* prune reason codes */ +#define WLC_E_PRUNE_ENCR_MISMATCH 1 /* encryption mismatch */ +#define WLC_E_PRUNE_BCAST_BSSID 2 /* AP uses a broadcast BSSID */ +#define WLC_E_PRUNE_MAC_DENY 3 /* STA's MAC addr is in AP's MAC deny list */ +#define WLC_E_PRUNE_MAC_NA 4 /* STA's MAC addr is not in AP's MAC allow list */ +#define WLC_E_PRUNE_REG_PASSV 5 /* AP not allowed due to regulatory restriction */ +#define WLC_E_PRUNE_SPCT_MGMT 6 /* AP does not support STA locale spectrum mgmt */ +#define WLC_E_PRUNE_RADAR 7 /* AP is on a radar channel of STA locale */ +#define WLC_E_RSN_MISMATCH 8 /* STA does not support AP's RSN */ +#define WLC_E_PRUNE_NO_COMMON_RATES 9 /* No rates in common with AP */ +#define WLC_E_PRUNE_BASIC_RATES 10 /* STA does not support all basic rates of BSS */ +#ifdef BCMCCX +#define WLC_E_PRUNE_CCXFAST_PREVAP 11 /* CCX FAST ROAM: prune previous AP */ +#endif /* def BCMCCX */ +#define WLC_E_PRUNE_CIPHER_NA 12 /* BSS's cipher not supported */ +#define WLC_E_PRUNE_KNOWN_STA 13 /* AP is already known to us as a STA */ +#ifdef BCMCCX +#define WLC_E_PRUNE_CCXFAST_DROAM 14 /* CCX FAST ROAM: prune unqualified AP */ +#endif /* def BCMCCX */ +#define WLC_E_PRUNE_WDS_PEER 15 /* AP is already known to us as a WDS peer */ +#define WLC_E_PRUNE_QBSS_LOAD 16 /* QBSS LOAD - AAC is too low */ +#define WLC_E_PRUNE_HOME_AP 17 /* prune home AP */ +#ifdef BCMCCX +#define WLC_E_PRUNE_AP_BLOCKED 18 /* prune blocked AP */ +#define WLC_E_PRUNE_NO_DIAG_SUPPORT 19 /* prune due to diagnostic mode not supported */ +#endif /* BCMCCX */ +#define WLC_E_PRUNE_AUTH_RESP_MAC 20 /* suppress auth resp by MAC filter */ + +/* WPA failure reason codes carried in the WLC_E_PSK_SUP event */ +#define WLC_E_SUP_OTHER 0 /* Other reason */ +#define WLC_E_SUP_DECRYPT_KEY_DATA 1 /* Decryption of key data failed */ +#define WLC_E_SUP_BAD_UCAST_WEP128 2 /* Illegal use of ucast WEP128 */ +#define WLC_E_SUP_BAD_UCAST_WEP40 3 /* Illegal use of ucast WEP40 */ +#define WLC_E_SUP_UNSUP_KEY_LEN 4 /* Unsupported key length */ +#define WLC_E_SUP_PW_KEY_CIPHER 5 /* Unicast cipher mismatch in pairwise key */ +#define WLC_E_SUP_MSG3_TOO_MANY_IE 6 /* WPA IE contains > 1 RSN IE in key msg 3 */ +#define WLC_E_SUP_MSG3_IE_MISMATCH 7 /* WPA IE mismatch in key message 3 */ +#define WLC_E_SUP_NO_INSTALL_FLAG 8 /* INSTALL flag unset in 4-way msg */ +#define WLC_E_SUP_MSG3_NO_GTK 9 /* encapsulated GTK missing from msg 3 */ +#define WLC_E_SUP_GRP_KEY_CIPHER 10 /* Multicast cipher mismatch in group key */ +#define WLC_E_SUP_GRP_MSG1_NO_GTK 11 /* encapsulated GTK missing from group msg 1 */ +#define WLC_E_SUP_GTK_DECRYPT_FAIL 12 /* GTK decrypt failure */ +#define WLC_E_SUP_SEND_FAIL 13 /* message send failure */ +#define WLC_E_SUP_DEAUTH 14 /* received FC_DEAUTH */ +#define WLC_E_SUP_WPA_PSK_TMO 15 /* WPA PSK 4-way handshake timeout */ +#define WLC_E_SUP_WPA_PSK_M1_TMO 16 /* WPA PSK 4-way handshake M1 timeout */ +#define WLC_E_SUP_WPA_PSK_M3_TMO 17 /* WPA PSK 4-way handshake M3 timeout */ +#define WLC_E_SUP_GTK_UPDATE_FAIL 18 /* GTK update failure */ +#define WLC_E_SUP_TK_UPDATE_FAIL 19 /* TK update failure */ +#define WLC_E_SUP_KEY_INSTALL_FAIL 20 /* Buffered key install failure */ + +/* Ucode reason codes carried in the WLC_E_MACDBG event */ +#define WLC_E_MACDBG_LIST_PSM 0 /* Dump list update for PSM registers */ +#define WLC_E_MACDBG_LIST_PSMX 1 /* Dump list update for PSMx registers */ +#define WLC_E_MACDBG_REGALL 2 /* Dump all registers */ + +/* Event data for events that include frames received over the air */ +/* WLC_E_PROBRESP_MSG + * WLC_E_P2P_PROBREQ_MSG + * WLC_E_ACTION_FRAME_RX + */ + +#define MAX_PHY_CORE_NUM 4u + +#define BCM_RX_FRAME_DATA_VERSION_2 2u + +typedef BWL_PRE_PACKED_STRUCT struct wl_event_rx_frame_data_v2 { + uint16 version; + uint16 len; + uint16 channel; /* Matches chanspec_t format from bcmwifi_channels.h */ + uint16 pad; + int32 rssi; + uint32 mactime; + uint32 rate; + int8 per_core_rssi[MAX_PHY_CORE_NUM]; +} BWL_POST_PACKED_STRUCT wl_event_rx_frame_data_v2_t; + +typedef BWL_PRE_PACKED_STRUCT struct wl_event_rx_frame_data_v1 { + uint16 version; + uint16 channel; /* Matches chanspec_t format from bcmwifi_channels.h */ + int32 rssi; + uint32 mactime; + uint32 rate; +} BWL_POST_PACKED_STRUCT wl_event_rx_frame_data_v1_t; + +#define BCM_RX_FRAME_DATA_VERSION_1 1u + +#ifndef WL_EVENT_RX_FRAME_DATA_ALIAS +#define BCM_RX_FRAME_DATA_VERSION BCM_RX_FRAME_DATA_VERSION_1 +typedef wl_event_rx_frame_data_v1_t wl_event_rx_frame_data_t; +#endif // endif + +/* WLC_E_IF event data */ +typedef struct wl_event_data_if { + uint8 ifidx; /* RTE virtual device index (for dongle) */ + uint8 opcode; /* see I/F opcode */ + uint8 reserved; /* bit mask (WLC_E_IF_FLAGS_XXX ) */ + uint8 bssidx; /* bsscfg index */ + uint8 role; /* see I/F role */ +} wl_event_data_if_t; + +/* WLC_E_NATOE event data */ +typedef struct wl_event_data_natoe { + uint32 natoe_active; + uint32 sta_ip; + uint16 start_port; + uint16 end_port; +} wl_event_data_natoe_t; + +/* opcode in WLC_E_IF event */ +#define WLC_E_IF_ADD 1 /* bsscfg add */ +#define WLC_E_IF_DEL 2 /* bsscfg delete */ +#define WLC_E_IF_CHANGE 3 /* bsscfg role change */ + +/* I/F role code in WLC_E_IF event */ +#define WLC_E_IF_ROLE_STA 0 /* Infra STA */ +#define WLC_E_IF_ROLE_AP 1 /* Access Point */ +#define WLC_E_IF_ROLE_WDS 2 /* WDS link */ +#define WLC_E_IF_ROLE_P2P_GO 3 /* P2P Group Owner */ +#define WLC_E_IF_ROLE_P2P_CLIENT 4 /* P2P Client */ +#define WLC_E_IF_ROLE_IBSS 8 /* IBSS */ +#define WLC_E_IF_ROLE_NAN 9 /* NAN */ + +/* WLC_E_RSSI event data */ +typedef struct wl_event_data_rssi { + int32 rssi; + int32 snr; + int32 noise; +} wl_event_data_rssi_t; + +/* WLC_E_IF flag */ +#define WLC_E_IF_FLAGS_BSSCFG_NOIF 0x1 /* no host I/F creation needed */ + +/* Reason codes for LINK */ +#define WLC_E_LINK_BCN_LOSS 1 /* Link down because of beacon loss */ +#define WLC_E_LINK_DISASSOC 2 /* Link down because of disassoc */ +#define WLC_E_LINK_ASSOC_REC 3 /* Link down because assoc recreate failed */ +#define WLC_E_LINK_BSSCFG_DIS 4 /* Link down due to bsscfg down */ + +/* WLC_E_NDIS_LINK event data */ +typedef BWL_PRE_PACKED_STRUCT struct ndis_link_parms { + struct ether_addr peer_mac; /* 6 bytes */ + uint16 chanspec; /* 2 bytes */ + uint32 link_speed; /* current datarate in units of 500 Kbit/s */ + uint32 max_link_speed; /* max possible datarate for link in units of 500 Kbit/s */ + int32 rssi; /* average rssi */ +} BWL_POST_PACKED_STRUCT ndis_link_parms_t; + +/* reason codes for WLC_E_OVERLAY_REQ event */ +#define WLC_E_OVL_DOWNLOAD 0 /* overlay download request */ +#define WLC_E_OVL_UPDATE_IND 1 /* device indication of host overlay update */ + +/* reason codes for WLC_E_TDLS_PEER_EVENT event */ +#define WLC_E_TDLS_PEER_DISCOVERED 0 /* peer is ready to establish TDLS */ +#define WLC_E_TDLS_PEER_CONNECTED 1 +#define WLC_E_TDLS_PEER_DISCONNECTED 2 + +/* reason codes for WLC_E_RMC_EVENT event */ +#define WLC_E_REASON_RMC_NONE 0 +#define WLC_E_REASON_RMC_AR_LOST 1 +#define WLC_E_REASON_RMC_AR_NO_ACK 2 + +#ifdef WLTDLS +/* TDLS Action Category code */ +#define TDLS_AF_CATEGORY 12 +/* Wi-Fi Display (WFD) Vendor Specific Category */ +/* used for WFD Tunneled Probe Request and Response */ +#define TDLS_VENDOR_SPECIFIC 127 +/* TDLS Action Field Values */ +#define TDLS_ACTION_SETUP_REQ 0 +#define TDLS_ACTION_SETUP_RESP 1 +#define TDLS_ACTION_SETUP_CONFIRM 2 +#define TDLS_ACTION_TEARDOWN 3 +#define WLAN_TDLS_SET_PROBE_WFD_IE 11 +#define WLAN_TDLS_SET_SETUP_WFD_IE 12 +#define WLAN_TDLS_SET_WFD_ENABLED 13 +#define WLAN_TDLS_SET_WFD_DISABLED 14 +#endif // endif + +/* WLC_E_RANGING_EVENT subtypes */ +#define WLC_E_RANGING_RESULTS 0 + +#define PHY_CAL_EVT_VERSION 1 +typedef struct wlc_phy_cal_info { + uint16 version; /* structure version */ + uint16 length; /* length of the rest of the structure - pad */ + uint16 chanspec; + uint8 start; + uint8 phase; + int16 temp; + uint8 reason; + uint8 pad; +} wlc_phy_cal_info_t; + +/* GAS event data */ +typedef BWL_PRE_PACKED_STRUCT struct wl_event_gas { + uint16 channel; /* channel of GAS protocol */ + uint8 dialog_token; /* GAS dialog token */ + uint8 fragment_id; /* fragment id */ + uint16 status_code; /* status code on GAS completion */ + uint16 data_len; /* length of data to follow */ + uint8 data[1]; /* variable length specified by data_len */ +} BWL_POST_PACKED_STRUCT wl_event_gas_t; + +/* service discovery TLV */ +typedef BWL_PRE_PACKED_STRUCT struct wl_sd_tlv { + uint16 length; /* length of response_data */ + uint8 protocol; /* service protocol type */ + uint8 transaction_id; /* service transaction id */ + uint8 status_code; /* status code */ + uint8 data[1]; /* response data */ +} BWL_POST_PACKED_STRUCT wl_sd_tlv_t; + +/* service discovery event data */ +typedef BWL_PRE_PACKED_STRUCT struct wl_event_sd { + uint16 channel; /* channel */ + uint8 count; /* number of tlvs */ + wl_sd_tlv_t tlv[1]; /* service discovery TLV */ +} BWL_POST_PACKED_STRUCT wl_event_sd_t; + +/* WLC_E_PKT_FILTER event sub-classification codes */ +#define WLC_E_PKT_FILTER_TIMEOUT 1 /* Matching packet not received in last timeout seconds */ + +/* Note: proxd has a new API (ver 3.0) deprecates the following */ + +/* Reason codes for WLC_E_PROXD */ +#define WLC_E_PROXD_FOUND 1 /* Found a proximity device */ +#define WLC_E_PROXD_GONE 2 /* Lost a proximity device */ +#define WLC_E_PROXD_START 3 /* used by: target */ +#define WLC_E_PROXD_STOP 4 /* used by: target */ +#define WLC_E_PROXD_COMPLETED 5 /* used by: initiator completed */ +#define WLC_E_PROXD_ERROR 6 /* used by both initiator and target */ +#define WLC_E_PROXD_COLLECT_START 7 /* used by: target & initiator */ +#define WLC_E_PROXD_COLLECT_STOP 8 /* used by: target */ +#define WLC_E_PROXD_COLLECT_COMPLETED 9 /* used by: initiator completed */ +#define WLC_E_PROXD_COLLECT_ERROR 10 /* used by both initiator and target */ +#define WLC_E_PROXD_NAN_EVENT 11 /* used by both initiator and target */ +#define WLC_E_PROXD_TS_RESULTS 12 /* used by: initiator completed */ + +/* proxd_event data */ +typedef struct ftm_sample { + uint32 value; /* RTT in ns */ + int8 rssi; /* RSSI */ +} ftm_sample_t; + +typedef struct ts_sample { + uint32 t1; + uint32 t2; + uint32 t3; + uint32 t4; +} ts_sample_t; + +typedef BWL_PRE_PACKED_STRUCT struct proxd_event_data { + uint16 ver; /* version */ + uint16 mode; /* mode: target/initiator */ + uint16 method; /* method: rssi/TOF/AOA */ + uint8 err_code; /* error classification */ + uint8 TOF_type; /* one way or two way TOF */ + uint8 OFDM_frame_type; /* legacy or VHT */ + uint8 bandwidth; /* Bandwidth is 20, 40,80, MHZ */ + struct ether_addr peer_mac; /* (e.g for tgt:initiator's */ + uint32 distance; /* dst to tgt, units meter */ + uint32 meanrtt; /* mean delta */ + uint32 modertt; /* Mode delta */ + uint32 medianrtt; /* median RTT */ + uint32 sdrtt; /* Standard deviation of RTT */ + int32 gdcalcresult; /* Software or Hardware Kind of redundant, but if */ + /* frame type is VHT, then we should do it by hardware */ + int16 avg_rssi; /* avg rssi accroos the ftm frames */ + int16 validfrmcnt; /* Firmware's valid frame counts */ + int32 peer_router_info; /* Peer router information if available in TLV, */ + /* We will add this field later */ + int32 var1; /* average of group delay */ + int32 var2; /* average of threshold crossing */ + int32 var3; /* difference between group delay and threshold crossing */ + /* raw Fine Time Measurements (ftm) data */ + uint16 ftm_unit; /* ftm cnt resolution in picoseconds , 6250ps - default */ + uint16 ftm_cnt; /* num of rtd measurments/length in the ftm buffer */ + ftm_sample_t ftm_buff[1]; /* 1 ... ftm_cnt */ +} BWL_POST_PACKED_STRUCT wl_proxd_event_data_t; + +typedef BWL_PRE_PACKED_STRUCT struct proxd_event_ts_results { + uint16 ver; /* version */ + uint16 mode; /* mode: target/initiator */ + uint16 method; /* method: rssi/TOF/AOA */ + uint8 err_code; /* error classification */ + uint8 TOF_type; /* one way or two way TOF */ + uint16 ts_cnt; /* number of timestamp measurements */ + ts_sample_t ts_buff[1]; /* Timestamps */ +} BWL_POST_PACKED_STRUCT wl_proxd_event_ts_results_t; + +/* Video Traffic Interference Monitor Event */ +#define INTFER_EVENT_VERSION 1 +#define INTFER_STREAM_TYPE_NONTCP 1 +#define INTFER_STREAM_TYPE_TCP 2 +#define WLINTFER_STATS_NSMPLS 4 +typedef struct wl_intfer_event { + uint16 version; /* version */ + uint16 status; /* status */ + uint8 txfail_histo[WLINTFER_STATS_NSMPLS]; /* txfail histo */ +} wl_intfer_event_t; + +#define RRM_EVENT_VERSION 0 +typedef struct wl_rrm_event { + int16 version; + int16 len; + int16 cat; /* Category */ + int16 subevent; + char payload[1]; /* Measurement payload */ +} wl_rrm_event_t; + +/* WLC_E_PSTA_PRIMARY_INTF_IND event data */ +typedef struct wl_psta_primary_intf_event { + struct ether_addr prim_ea; /* primary intf ether addr */ +} wl_psta_primary_intf_event_t; + +/* WLC_E_DPSTA_INTF_IND event data */ +typedef enum { + WL_INTF_PSTA = 1, + WL_INTF_DWDS = 2 +} wl_dpsta_intf_type; + +typedef struct wl_dpsta_intf_event { + wl_dpsta_intf_type intf_type; /* dwds/psta intf register */ +} wl_dpsta_intf_event_t; + +/* ********** NAN protocol events/subevents ********** */ +#ifndef NAN_EVENT_BUFFER_SIZE +#define NAN_EVENT_BUFFER_SIZE 512 /* max size */ +#endif /* NAN_EVENT_BUFFER_SIZE */ +/* NAN Events sent by firmware */ + +/* + * If you make changes to this enum, dont forget to update the mask (if need be). + */ +typedef enum wl_nan_events { + WL_NAN_EVENT_START = 1, /* NAN cluster started */ + WL_NAN_EVENT_JOIN = 2, /* To be deprecated */ + WL_NAN_EVENT_ROLE = 3, /* Role changed */ + WL_NAN_EVENT_SCAN_COMPLETE = 4, /* To be deprecated */ + WL_NAN_EVENT_DISCOVERY_RESULT = 5, /* Subscribe Received */ + WL_NAN_EVENT_REPLIED = 6, /* Publish Sent */ + WL_NAN_EVENT_TERMINATED = 7, /* sub / pub is terminated */ + WL_NAN_EVENT_RECEIVE = 8, /* Follow up Received */ + WL_NAN_EVENT_STATUS_CHG = 9, /* change in nan_mac status */ + WL_NAN_EVENT_MERGE = 10, /* Merged to a NAN cluster */ + WL_NAN_EVENT_STOP = 11, /* To be deprecated */ + WL_NAN_EVENT_P2P = 12, /* Unused */ + WL_NAN_EVENT_WINDOW_BEGIN_P2P = 13, /* Unused */ + WL_NAN_EVENT_WINDOW_BEGIN_MESH = 14, /* Unused */ + WL_NAN_EVENT_WINDOW_BEGIN_IBSS = 15, /* Unused */ + WL_NAN_EVENT_WINDOW_BEGIN_RANGING = 16, /* Unused */ + WL_NAN_EVENT_POST_DISC = 17, /* Event for post discovery data */ + WL_NAN_EVENT_DATA_IF_ADD = 18, /* Unused */ + WL_NAN_EVENT_DATA_PEER_ADD = 19, /* Event for peer add */ + /* nan 2.0 */ + WL_NAN_EVENT_PEER_DATAPATH_IND = 20, /* Incoming DP req */ + WL_NAN_EVENT_DATAPATH_ESTB = 21, /* DP Established */ + WL_NAN_EVENT_SDF_RX = 22, /* SDF payload */ + WL_NAN_EVENT_DATAPATH_END = 23, /* DP Terminate recvd */ + WL_NAN_EVENT_BCN_RX = 24, /* received beacon payload */ + WL_NAN_EVENT_PEER_DATAPATH_RESP = 25, /* Peer's DP response */ + WL_NAN_EVENT_PEER_DATAPATH_CONF = 26, /* Peer's DP confirm */ + WL_NAN_EVENT_RNG_REQ_IND = 27, /* Range Request */ + WL_NAN_EVENT_RNG_RPT_IND = 28, /* Range Report */ + WL_NAN_EVENT_RNG_TERM_IND = 29, /* Range Termination */ + WL_NAN_EVENT_PEER_DATAPATH_SEC_INST = 30, /* Peer's DP sec install */ + WL_NAN_EVENT_TXS = 31, /* for tx status of follow-up and SDFs */ + WL_NAN_EVENT_DW_START = 32, /* dw start */ + WL_NAN_EVENT_DW_END = 33, /* dw end */ + WL_NAN_EVENT_CHAN_BOUNDARY = 34, /* channel switch event */ + WL_NAN_EVENT_MR_CHANGED = 35, /* AMR or IMR changed event during DW */ + WL_NAN_EVENT_RNG_RESP_IND = 36, /* Range Response Rx */ + WL_NAN_EVENT_PEER_SCHED_UPD_NOTIF = 37, /* Peer's schedule update notification */ + WL_NAN_EVENT_PEER_SCHED_REQ = 38, /* Peer's schedule request */ + WL_NAN_EVENT_PEER_SCHED_RESP = 39, /* Peer's schedule response */ + WL_NAN_EVENT_PEER_SCHED_CONF = 40, /* Peer's schedule confirm */ + WL_NAN_EVENT_SENT_DATAPATH_END = 41, /* Sent DP terminate frame */ + WL_NAN_EVENT_SLOT_START = 42, /* SLOT_START event */ + WL_NAN_EVENT_SLOT_END = 43, /* SLOT_END event */ + WL_NAN_EVENT_HOST_ASSIST_REQ = 44, /* Requesting host assist */ + WL_NAN_EVENT_RX_MGMT_FRM = 45, /* NAN management frame received */ + + WL_NAN_EVENT_INVALID /* delimiter for max value */ +} nan_app_events_e; + +#define NAN_EV_MASK(ev) (1 << (ev - 1)) +#define IS_NAN_EVT_ON(var, evt) ((var & (1 << (evt-1))) != 0) + +#define NAN_EV_MASK_SET(var, evt) \ + ((evt < WL_NAN_EVMASK_EXTN_LEN * 8) ? \ + ((*((uint8 *)var + ((evt - 1)/8))) |= (1 << ((evt - 1) %8))) : 0) +#define IS_NAN_EVENT_ON(var, evt) \ + ((evt < WL_NAN_EVMASK_EXTN_LEN * 8) && \ + (((*((uint8 *)var + ((evt - 1)/8))) & (1 << ((evt - 1) %8))) != 0)) + +/* ******************* end of NAN section *************** */ + +typedef enum wl_scan_events { + WL_SCAN_START = 1, + WL_SCAN_END = 2 +} wl_scan_events; + +/* WLC_E_ULP event data */ +#define WL_ULP_EVENT_VERSION 1 +#define WL_ULP_DISABLE_CONSOLE 1 /* Disable console message on ULP entry */ +#define WL_ULP_UCODE_DOWNLOAD 2 /* Download ULP ucode file */ + +typedef struct wl_ulp_event { + uint16 version; + uint16 ulp_dongle_action; +} wl_ulp_event_t; + +/* TCP keepalive event data */ +typedef BWL_PRE_PACKED_STRUCT struct wl_event_tko { + uint8 index; /* TCP connection index, 0 to max-1 */ + uint8 pad[3]; /* 4-byte struct alignment */ +} BWL_POST_PACKED_STRUCT wl_event_tko_t; + +typedef struct { + uint8 radar_type; /* one of RADAR_TYPE_XXX */ + uint16 min_pw; /* minimum pulse-width (usec * 20) */ + uint16 max_pw; /* maximum pulse-width (usec * 20) */ + uint16 min_pri; /* minimum pulse repetition interval (usec) */ + uint16 max_pri; /* maximum pulse repetition interval (usec) */ + uint16 subband; /* subband/frequency */ +} radar_detected_event_info_t; +typedef struct wl_event_radar_detect_data { + + uint32 version; + uint16 current_chanspec; /* chanspec on which the radar is recieved */ + uint16 target_chanspec; /* Target chanspec after detection of radar on current_chanspec */ + radar_detected_event_info_t radar_info[2]; +} wl_event_radar_detect_data_t; + +#define WL_EVENT_MODESW_VER_1 1 +#define WL_EVENT_MODESW_VER_CURRENT WL_EVENT_MODESW_VER_1 + +#define WL_E_MODESW_FLAG_MASK_DEVICE 0x01u /* mask of device: belongs to local or peer */ +#define WL_E_MODESW_FLAG_MASK_FROM 0x02u /* mask of origin: firmware or user */ +#define WL_E_MODESW_FLAG_MASK_STATE 0x0Cu /* mask of state: modesw progress state */ + +#define WL_E_MODESW_FLAG_DEVICE_LOCAL 0x00u /* flag - device: info is about self/local */ +#define WL_E_MODESW_FLAG_DEVICE_PEER 0x01u /* flag - device: info is about peer */ + +#define WL_E_MODESW_FLAG_FROM_FIRMWARE 0x00u /* flag - from: request is from firmware */ +#define WL_E_MODESW_FLAG_FROM_USER 0x02u /* flag - from: request is from user/iov */ + +#define WL_E_MODESW_FLAG_STATE_REQUESTED 0x00u /* flag - state: mode switch request */ +#define WL_E_MODESW_FLAG_STATE_INITIATED 0x04u /* flag - state: switch initiated */ +#define WL_E_MODESW_FLAG_STATE_COMPLETE 0x08u /* flag - state: switch completed/success */ +#define WL_E_MODESW_FLAG_STATE_FAILURE 0x0Cu /* flag - state: failed to switch */ + +/* Get sizeof *X including variable data's length where X is pointer to wl_event_mode_switch_t */ +#define WL_E_MODESW_SIZE(X) (sizeof(*(X)) + (X)->length) + +/* Get variable data's length where X is pointer to wl_event_mode_switch_t */ +#define WL_E_MODESW_DATA_SIZE(X) (((X)->length > sizeof(*(X))) ? ((X)->length - sizeof(*(X))) : 0) + +#define WL_E_MODESW_REASON_UNKNOWN 0u /* reason: UNKNOWN */ +#define WL_E_MODESW_REASON_ACSD 1u /* reason: ACSD (based on events from FW */ +#define WL_E_MODESW_REASON_OBSS_DBS 2u /* reason: OBSS DBS (eg. on interference) */ +#define WL_E_MODESW_REASON_DFS 3u /* reason: DFS (eg. on subband radar) */ +#define WL_E_MODESW_REASON_DYN160 4u /* reason: DYN160 (160/2x2 - 80/4x4) */ + +/* event structure for WLC_E_MODE_SWITCH */ +typedef struct { + uint16 version; + uint16 length; /* size including 'data' field */ + uint16 opmode_from; + uint16 opmode_to; + uint32 flags; /* bit 0: peer(/local==0); + * bit 1: user(/firmware==0); + * bits 3,2: 00==requested, 01==initiated, + * 10==complete, 11==failure; + * rest: reserved + */ + uint16 reason; /* value 0: unknown, 1: ACSD, 2: OBSS_DBS, + * 3: DFS, 4: DYN160, rest: reserved + */ + uint16 data_offset; /* offset to 'data' from beginning of this struct. + * fields may be added between data_offset and data + */ + /* ADD NEW FIELDS HERE */ + uint8 data[]; /* reason specific data; could be empty */ +} wl_event_mode_switch_t; + +/* when reason in WLC_E_MODE_SWITCH is DYN160, data will carry the following structure */ +typedef struct { + uint16 trigger; /* value 0: MU to SU, 1: SU to MU, 2: metric_dyn160, 3:re-/assoc, + * 4: disassoc, 5: rssi, 6: traffic, 7: interference, + * 8: chanim_stats + */ + struct ether_addr sta_addr; /* causal STA's MAC address when known */ + uint16 metric_160_80; /* latest dyn160 metric */ + uint8 nss; /* NSS of the STA */ + uint8 bw; /* BW of the STA */ + int8 rssi; /* RSSI of the STA */ + uint8 traffic; /* internal metric of traffic */ +} wl_event_mode_switch_dyn160; + +#define WL_EVENT_FBT_VER_1 1 + +#define WL_E_FBT_TYPE_FBT_OTD_AUTH 1 +#define WL_E_FBT_TYPE_FBT_OTA_AUTH 2 + +/* event structure for WLC_E_FBT */ +typedef struct { + uint16 version; + uint16 length; /* size including 'data' field */ + uint16 type; /* value 0: unknown, 1: FBT OTD Auth Req */ + uint16 data_offset; /* offset to 'data' from beginning of this struct. + * fields may be added between data_offset and data + */ + /* ADD NEW FIELDS HERE */ + uint8 data[]; /* type specific data; could be empty */ +} wl_event_fbt_t; + +/* TWT Setup Completion is designed to notify the user of TWT Setup process + * status. When 'status' field is value of BCME_OK, the user must check the + * 'setup_cmd' field value in 'wl_twt_sdesc_t' structure that at the end of + * the event data to see the response from the TWT Responding STA; when + * 'status' field is value of BCME_ERROR or non BCME_OK, user must not use + * anything from 'wl_twt_sdesc_t' structure as it is the TWT Requesting STA's + * own TWT parameter. + */ + +#define WL_TWT_SETUP_CPLT_VER 0 + +/* TWT Setup Completion event data */ +typedef struct wl_twt_setup_cplt { + uint16 version; + uint16 length; /* the byte count of fields from 'dialog' onwards */ + uint8 dialog; /* the dialog token user supplied to the TWT setup API */ + uint8 pad[3]; + int32 status; + /* wl_twt_sdesc_t desc; - defined in wlioctl.h */ +} wl_twt_setup_cplt_t; + +#define WL_INVALID_IE_EVENT_VERSION 0 + +/* Invalid IE Event data */ +typedef struct wl_invalid_ie_event { + uint16 version; + uint16 len; /* Length of the invalid IE copy */ + uint16 type; /* Type/subtype of the frame which contains the invalid IE */ + uint16 error; /* error code of the wrong IE, defined in ie_error_code_t */ + uint8 ie[]; /* Variable length buffer for the invalid IE copy */ +} wl_invalid_ie_event_t; + +/* Fixed header portion of Invalid IE Event */ +typedef struct wl_invalid_ie_event_hdr { + uint16 version; + uint16 len; /* Length of the invalid IE copy */ + uint16 type; /* Type/subtype of the frame which contains the invalid IE */ + uint16 error; /* error code of the wrong IE, defined in ie_error_code_t */ + /* var length IE data follows */ +} wl_invalid_ie_event_hdr_t; + +typedef enum ie_error_code { + IE_ERROR_OUT_OF_RANGE = 0x01 +} ie_error_code_t; + +/* This marks the end of a packed structure section. */ +#include + +/* reason of channel switch */ +typedef enum { + CHANSW_DFS = 10, /* channel switch due to DFS module */ + CHANSW_HOMECH_REQ = 14, /* channel switch due to HOME Channel Request */ + CHANSW_STA = 15, /* channel switch due to STA */ + CHANSW_SOFTAP = 16, /* channel switch due to SodtAP */ + CHANSW_AIBSS = 17, /* channel switch due to AIBSS */ + CHANSW_NAN = 18, /* channel switch due to NAN */ + CHANSW_NAN_DISC = 19, /* channel switch due to NAN Disc */ + CHANSW_NAN_SCHED = 20, /* channel switch due to NAN Sched */ + CHANSW_AWDL_AW = 21, /* channel switch due to AWDL aw */ + CHANSW_AWDL_SYNC = 22, /* channel switch due to AWDL sync */ + CHANSW_AWDL_CAL = 23, /* channel switch due to AWDL Cal */ + CHANSW_AWDL_PSF = 24, /* channel switch due to AWDL PSF */ + CHANSW_AWDL_OOB_AF = 25, /* channel switch due to AWDL OOB action frame */ + CHANSW_TDLS = 26, /* channel switch due to TDLS */ + CHANSW_PROXD = 27, /* channel switch due to PROXD */ + CHANSW_SLOTTED_BSS = 28, /* channel switch due to slotted bss */ + CHANSW_SLOTTED_CMN_SYNC = 29, /* channel switch due to Common Sync Layer */ + CHANSW_SLOTTED_BSS_CAL = 30, /* channel switch due to Cal request from slotted bss */ + CHANSW_MAX_NUMBER = 31 /* max channel switch reason */ +} wl_chansw_reason_t; + +#define CHANSW_REASON(reason) (1 << reason) + +#define EVENT_AGGR_DATA_HDR_LEN 8 + +typedef struct event_aggr_data { + uint16 num_events; /* No of events aggregated */ + uint16 len; /* length of the aggregated events, excludes padding */ + uint8 pad[4]; /* Padding to make aggr event packet header aligned + * on 64-bit boundary, for a 64-bit host system. + */ + uint8 data[]; /* Aggregate buffer containing Events */ +} event_aggr_data_t; + +/* WLC_E_TVPM_MITIGATION event structure version */ +#define WL_TVPM_MITIGATION_VERSION 1 + +/* TVPM mitigation on/off status bits */ +#define WL_TVPM_MITIGATION_TXDC 0x1 +#define WL_TVPM_MITIGATION_TXPOWER 0x2 +#define WL_TVPM_MITIGATION_TXCHAINS 0x4 + +/* Event structure for WLC_E_TVPM_MITIGATION */ +typedef struct wl_event_tvpm_mitigation { + uint16 version; /* structure version */ + uint16 length; /* length of this structure */ + uint32 timestamp_ms; /* millisecond timestamp */ + uint8 slice; /* slice number */ + uint8 pad; + uint16 on_off; /* mitigation status bits */ +} wl_event_tvpm_mitigation_t; + +/* Event structures for sub health checks of PHY */ + +#define WL_PHY_HC_DESENSE_STATS_VER (1) +typedef struct wl_hc_desense_stats { + uint16 version; + uint16 chanspec; + int8 allowed_weakest_rssi; /* based on weakest link RSSI */ + uint8 ofdm_desense; /* Desense requested for OFDM */ + uint8 bphy_desense; /* Desense requested for bphy */ + int8 glitch_upd_wait; /* wait post ACI mitigation */ +} wl_hc_desense_stats_v1_t; + +#define WL_PHY_HC_TEMP_STATS_VER (1) +typedef struct wl_hc_temp_stats { + uint16 version; + uint16 chanspec; + int16 curtemp; /* Temperature */ + uint8 temp_disthresh; /* Threshold to reduce tx chain */ + uint8 temp_enthresh; /* Threshold to increase tx chains */ + uint tempsense_period; /* Temperature check period */ + bool heatedup; /* 1: temp throttling on */ + uint8 bitmap; /* Indicating rx and tx chains */ + uint8 pad[2]; +} wl_hc_temp_stats_v1_t; + +#define WL_PHY_HC_TEMP_STATS_VER_2 (2) +typedef struct { + uint16 version; + uint16 chanspec; + int16 curtemp; /* Temperature */ + uint8 pad[2]; +} wl_hc_temp_stats_v2_t; + +#define WL_PHY_HC_VCOCAL_STATS_VER (1) +typedef struct wl_hc_vcocal_stats { + uint16 version; + uint16 chanspec; + int16 curtemp; /* Temperature */ + /* Ring buffer - Maintains history of previous 16 wake/sleep cycles */ + uint16 vcocal_status_wake; + uint16 vcocal_status_sleep; + uint16 plllock_status_wake; + uint16 plllock_status_sleep; + /* Cal Codes */ + uint16 cc_maincap; + uint16 cc_secondcap; + uint16 cc_auxcap; +} wl_hc_vcocal_stats_v1_t; + +#define WL_PHY_HC_TXPWR_STATS_VER (1) +typedef struct wl_hc_tx_stats { + uint16 version; + uint16 chanspec; + int8 tgt_pwr[MAX_PHY_CORE_NUM]; /* Target pwr (qdBm) */ + int8 estPwr[MAX_PHY_CORE_NUM]; /* Rate corrected (qdBm) */ + int8 estPwr_adj[MAX_PHY_CORE_NUM]; /* Max power (qdBm) */ + uint8 baseindex[MAX_PHY_CORE_NUM]; /* Tx base index */ + int16 temp; /* Temperature */ + uint16 TxCtrlWrd[3]; /* 6 PHY ctrl bytes */ + int8 min_txpower; /* min tx power per ant */ + uint8 pad[3]; +} wl_hc_txpwr_stats_v1_t; + +#define WL_PHY_HC_TXPWR_STATS_VER_2 (2) +typedef struct { + uint16 version; + uint16 chanspec; + int8 tgt_pwr[MAX_PHY_CORE_NUM]; /* Target pwr (qdBm) */ + uint8 estPwr[MAX_PHY_CORE_NUM]; /* Rate corrected (qdBm) */ + uint8 estPwr_adj[MAX_PHY_CORE_NUM]; /* Max power (qdBm) */ + uint8 baseindex[MAX_PHY_CORE_NUM]; /* Tx base index */ + int16 temp; /* Temperature */ + uint16 TxCtrlWrd[3]; /* 6 PHY ctrl bytes */ + int8 min_txpower; /* min tx power per ant */ + uint8 pad[3]; +} wl_hc_txpwr_stats_v2_t; + +typedef enum wl_mbo_event_type { + WL_MBO_E_CELLULAR_NW_SWITCH = 1, + /* ADD before this */ + WL_MBO_E_LAST = 2, /* highest val + 1 for range checking */ +} wl_mbo_event_type_t; + +/* WLC_E_MBO event structure version */ +#define WL_MBO_EVT_VER 1 + +struct wl_event_mbo { + uint16 version; /* structure version */ + uint16 length; /* length of the rest of the structure from type */ + wl_mbo_event_type_t type; /* Event type */ + uint8 data[]; /* Variable length data */ +}; + +/* WLC_E_MBO_CELLULAR_NW_SWITCH event structure version */ +#define WL_MBO_CELLULAR_NW_SWITCH_VER 1 + +/* WLC_E_MBO_CELLULAR_NW_SWITCH event data */ +struct wl_event_mbo_cell_nw_switch { + uint16 version; /* structure version */ + uint16 length; /* length of the rest of the structure from reason */ + /* Reason of switch as per MBO Tech spec */ + uint8 reason; + /* pad */ + uint8 pad; + /* delay after which re-association can be tried to current BSS (seconds) */ + uint16 reassoc_delay; + /* How long current association will be there (milli seconds). + * This is zero if not known or value is overflowing. + */ + uint32 assoc_time_remain; +}; + +/* WLC_E_HWA Event structure */ +typedef struct wl_event_hwa { + uint16 version; /* structure version */ + uint16 length; /* length of structure */ + uint32 sub_type; /* Sub event type */ + uint8 data[0]; /* variable length data */ +} wl_event_hwa_t; + +#define WL_HWA_EVENT_VER 1 + +typedef enum wl_event_hwa_subtype { + WL_HWA_EVENT_SUBTYPE_ERROR = 1, + WL_HWA_EVENT_SUBTYPE_LAST = 2 +} wl_event_hwa_subtype_t; + +/* WLC_E_ADPS status */ +enum { + WL_E_STATUS_ADPS_DEAUTH = 0, + WL_E_STATUS_ADPS_MAX +}; + +/* WLC_E_ADPS event data */ +#define WL_EVENT_ADPS_VER_1 1 + +/* WLC_E_ADPS event type */ +#define WL_E_TYPE_ADPS_BAD_AP 1 + +typedef struct wl_event_adps_bad_ap { + uint32 status; + uint32 reason; + struct ether_addr ea; /* bssid */ +} wl_event_adps_bad_ap_t; + +typedef struct wl_event_adps { + uint16 version; /* structure version */ + uint16 length; /* length of structure */ + uint32 type; /* event type */ + uint8 data[]; /* variable length data */ +} wl_event_adps_v1_t; + +typedef wl_event_adps_v1_t wl_event_adps_t; +#endif /* _BCMEVENT_H_ */ diff --git a/bcmdhd.100.10.315.x/include/bcmiov.h b/bcmdhd.100.10.315.x/include/bcmiov.h new file mode 100644 index 0000000..585914d --- /dev/null +++ b/bcmdhd.100.10.315.x/include/bcmiov.h @@ -0,0 +1,356 @@ +/* + * bcmiov.h + * Common iovar handling/parsing support - batching, parsing, sub-cmd dispatch etc. + * To be used in firmware and host apps or dhd - reducing code size, + * duplication, and maintenance overhead. + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id$ + */ + +#ifndef _bcmiov_h_ +#define _bcmiov_h_ + +#include +#include +#include +#ifdef BCMDRIVER +#include +#else +#include /* For size_t */ +#endif /* BCMDRIVER */ + +/* Forward declarations */ +typedef uint16 bcm_iov_cmd_id_t; +typedef uint16 bcm_iov_cmd_flags_t; +typedef uint16 bcm_iov_cmd_mflags_t; +typedef struct bcm_iov_cmd_info bcm_iov_cmd_info_t; +typedef struct bcm_iov_cmd_digest bcm_iov_cmd_digest_t; +typedef struct bcm_iov_cmd_tlv_info bcm_iov_cmd_tlv_info_t; +typedef struct bcm_iov_buf bcm_iov_buf_t; +typedef struct bcm_iov_batch_buf bcm_iov_batch_buf_t; +typedef struct bcm_iov_parse_context bcm_iov_parse_context_t; +typedef struct bcm_iov_sub_cmd_context bcm_iov_sub_cmd_context_t; + +typedef void* (*bcm_iov_malloc_t)(void* alloc_ctx, size_t len); +typedef void (*bcm_iov_free_t)(void* alloc_ctx, void *buf, size_t len); + +typedef uint8 bcm_iov_tlp_data_type_t; +typedef struct bcm_iov_tlp bcm_iov_tlp_t; +typedef struct bcm_iov_tlp_node bcm_iov_tlp_node_t; +typedef struct bcm_iov_batch_subcmd bcm_iov_batch_subcmd_t; + +/* + * iov validation handler - All the common checks that are required + * for processing of iovars for any given command. + */ +typedef int (*bcm_iov_cmd_validate_t)(const bcm_iov_cmd_digest_t *dig, + uint32 actionid, const uint8* ibuf, size_t ilen, uint8 *obuf, size_t *olen); + +/* iov get handler - process subcommand specific input and return output. + * input and output may overlap, so the callee needs to check if + * that is supported. For xtlv data a tlv digest is provided to make + * parsing simpler. Output tlvs may be packed into output buffer using + * bcm xtlv support. olen is input/output parameter. On input contains + * max available obuf length and callee must fill the correct length + * to represent the length of output returned. + */ +typedef int (*bcm_iov_cmd_get_t)(const bcm_iov_cmd_digest_t *dig, + const uint8* ibuf, size_t ilen, uint8 *obuf, size_t *olen); + +/* iov set handler - process subcommand specific input and return output + * input and output may overlap, so the callee needs to check if + * that is supported. olen is input/output parameter. On input contains + * max available obuf length and callee must fill the correct length + * to represent the length of output returned. + */ +typedef int (*bcm_iov_cmd_set_t)(const bcm_iov_cmd_digest_t *dig, + const uint8* ibuf, size_t ilen, uint8 *obuf, size_t *olen); + +/* iov (sub-cmd) batch - a vector of commands. count can be zero + * to support a version query. Each command is a tlv - whose data + * portion may have an optional return status, followed by a fixed + * length data header, optionally followed by tlvs. + * cmd = type|length|[header][tlvs] + */ + +/* + * Batch sub-commands have status length included in the + * response length packed in TLV. + */ +#define BCM_IOV_STATUS_LEN sizeof(uint32) + +/* batch version is indicated by setting high bit. */ +#define BCM_IOV_BATCH_MASK 0x8000 + +/* + * Batched commands will have the following memory layout + * +--------+---------+--------+-------+ + * |version |count | is_set |sub-cmd| + * +--------+---------+--------+-------+ + * version >= 0x8000 + * count = number of sub-commands encoded in the iov buf + * sub-cmd one or more sub-commands for processing + * Where sub-cmd is padded byte buffer with memory layout as follows + * +--------+---------+-----------------------+-------------+------ + * |cmd-id |length |IN(options) OUT(status)|command data |...... + * +--------+---------+-----------------------+-------------+------ + * cmd-id =sub-command ID + * length = length of this sub-command + * IN(options) = On input processing options/flags for this command + * OUT(status) on output processing status for this command + * command data = encapsulated IOVAR data as a single structure or packed TLVs for each + * individual sub-command. + */ +struct bcm_iov_batch_subcmd { + uint16 id; + uint16 len; + union { + uint32 options; + uint32 status; + } u; + uint8 data[1]; +}; + +struct bcm_iov_batch_buf { + uint16 version; + uint8 count; + uint8 is_set; /* to differentiate set or get */ + struct bcm_iov_batch_subcmd cmds[0]; +}; + +/* non-batched command version = major|minor w/ major <= 127 */ +struct bcm_iov_buf { + uint16 version; + uint16 len; + bcm_iov_cmd_id_t id; + uint16 data[1]; /* 32 bit alignment may be repurposed by the command */ + /* command specific data follows */ +}; + +/* iov options flags */ +enum { + BCM_IOV_CMD_OPT_ALIGN_NONE = 0x0000, + BCM_IOV_CMD_OPT_ALIGN32 = 0x0001, + BCM_IOV_CMD_OPT_TERMINATE_SUB_CMDS = 0x0002 +}; + +/* iov command flags */ +enum { + BCM_IOV_CMD_FLAG_NONE = 0, + BCM_IOV_CMD_FLAG_STATUS_PRESENT = (1 << 0), /* status present at data start - output only */ + BCM_IOV_CMD_FLAG_XTLV_DATA = (1 << 1), /* data is a set of xtlvs */ + BCM_IOV_CMD_FLAG_HDR_IN_LEN = (1 << 2), /* length starts at version - non-bacthed only */ + BCM_IOV_CMD_FLAG_NOPAD = (1 << 3) /* No padding needed after iov_buf */ +}; + +/* information about the command, xtlv options and xtlvs_off are meaningful + * only if XTLV_DATA cmd flag is selected + */ +struct bcm_iov_cmd_info { + bcm_iov_cmd_id_t cmd; /* the (sub)command - module specific */ + bcm_iov_cmd_flags_t flags; /* checked by bcmiov but set by module */ + bcm_iov_cmd_mflags_t mflags; /* owned and checked by module */ + bcm_xtlv_opts_t xtlv_opts; + bcm_iov_cmd_validate_t validate_h; /* command validation handler */ + bcm_iov_cmd_get_t get_h; + bcm_iov_cmd_set_t set_h; + uint16 xtlvs_off; /* offset to beginning of xtlvs in cmd data */ + uint16 min_len_set; + uint16 max_len_set; + uint16 min_len_get; + uint16 max_len_get; +}; + +/* tlv digest to support parsing of xtlvs for commands w/ tlv data; the tlv + * digest is available in the handler for the command. The count and order in + * which tlvs appear in the digest are exactly the same as the order of tlvs + * passed in the registration for the command. Unknown tlvs are ignored. + * If registered tlvs are missing datap will be NULL. common iov rocessing + * acquires an input digest to process input buffer. The handler is responsible + * for constructing an output digest and use packing functions to generate + * the output buffer. The handler may use the input digest as output digest once + * the tlv data is extracted and used. Multiple tlv support involves allocation of + * tlp nodes, except the first, as required, + */ + +/* tlp data type indicates if the data is not used/invalid, input or output */ +enum { + BCM_IOV_TLP_NODE_INVALID = 0, + BCM_IOV_TLP_NODE_IN = 1, + BCM_IOV_TLP_NODE_OUT = 2 +}; + +struct bcm_iov_tlp { + uint16 type; + uint16 len; + uint16 nodeix; /* node index */ +}; + +/* tlp data for a given tlv - multiple tlvs of same type chained */ +struct bcm_iov_tlp_node { + uint8 *next; /* multiple tlv support */ + bcm_iov_tlp_data_type_t type; + uint8 *data; /* pointer to data in buffer or state */ +}; + +struct bcm_iov_cmd_digest { + uint32 version; /* Version */ + void *cmd_ctx; + struct wlc_bsscfg *bsscfg; + const bcm_iov_cmd_info_t *cmd_info; + uint16 max_tlps; /* number of tlps allocated */ + uint16 max_nodes; /* number of nods allocated */ + uint16 num_tlps; /* number of tlps valid */ + uint16 num_nodes; /* number of nods valid */ + uint16 tlps_off; /* offset to tlps */ + uint16 nodes_off; /* offset to nodes */ + /* + * bcm_iov_tlp_t tlps[max_tlps]; + * bcm_iov_tlp_node_t nodes[max_nodes] + */ +}; + +/* get length callback - default length is min_len taken from digest */ +typedef size_t (*bcm_iov_xtlv_get_len_t)(const bcm_iov_cmd_digest_t *dig, + const bcm_iov_cmd_tlv_info_t *tlv_info); + +/* pack to buffer data callback. under some conditions it might + * not be a straight copy and can refer to context(ual) information and + * endian conversions... + */ +typedef void (*bcm_iov_xtlv_pack_t)(const bcm_iov_cmd_digest_t *dig, + const bcm_iov_cmd_tlv_info_t *tlv_info, + uint8 *out_buf, const uint8 *in_data, size_t len); + +struct bcm_iov_cmd_tlv_info { + uint16 id; + uint16 min_len; /* inclusive */ + uint16 max_len; /* inclusive */ + bcm_iov_xtlv_get_len_t get_len; + bcm_iov_xtlv_pack_t pack; +}; + +/* + * module private parse context. Default version type len is uint16 + */ +enum { + BCM_IOV_PARSE_CMD_NONE = 0 +}; +typedef uint32 parse_context_opts_t; + +/* get digest callback */ +typedef int (*bcm_iov_get_digest_t)(void *cmd_ctx, bcm_iov_cmd_digest_t **dig); + +typedef struct bcm_iov_parse_config { + parse_context_opts_t options; /* to handle different ver lengths */ + bcm_iov_malloc_t alloc_fn; + bcm_iov_free_t free_fn; + bcm_iov_get_digest_t dig_fn; + int max_regs; + void *alloc_ctx; +} bcm_iov_parse_config_t; + +/* API */ + +/* All calls return an integer status code BCME_* unless otherwise indicated */ + +/* return length of allocation for 'num_cmds' commands. data_len + * includes length of data for all the commands excluding the headers + */ +size_t bcm_iov_get_alloc_len(int num_cmds, size_t data_len); + +/* create parsing context using allocator provided; max_regs provides + * the number of allowed registrations for commands using the context + * sub-components of a module may register their own commands indepdently + * using the parsing context. If digest callback is NULL or returns NULL, + * the (input) digest is allocated using the provided allocators and released on + * completion of processing. + */ +int bcm_iov_create_parse_context(const bcm_iov_parse_config_t *parse_cfg, + bcm_iov_parse_context_t **parse_ctx); + +/* free the parsing context; ctx is set to NULL on exit */ +int bcm_iov_free_parse_context(bcm_iov_parse_context_t **ctx, bcm_iov_free_t free_fn); + +/* Return the command context for the module */ +void *bcm_iov_get_cmd_ctx_info(bcm_iov_parse_context_t *parse_ctx); + +/* register a command info vector along with supported tlvs. Each command + * may support a subset of tlvs + */ +int bcm_iov_register_commands(bcm_iov_parse_context_t *parse_ctx, void *cmd_ctx, + const bcm_iov_cmd_info_t *info, size_t num_cmds, + const bcm_iov_cmd_tlv_info_t *tlv_info, size_t num_tlvs); + +/* pack the xtlvs provided in the digest. may returns BCME_BUFTOOSHORT, but the + * out_len is set to required length in that case. + */ +int bcm_iov_pack_xtlvs(const bcm_iov_cmd_digest_t *dig, bcm_xtlv_opts_t xtlv_opts, + uint8 *out_buf, size_t out_size, size_t *out_len); + +#ifdef BCMDRIVER +/* wlc modules register their iovar(s) using the parsing context w/ wlc layer + * during attach. + */ +struct wlc_if; +struct wlc_info; +extern struct wlc_bsscfg *bcm_iov_bsscfg_find_from_wlcif(struct wlc_info *wlc, + struct wlc_if *wlcif); +int bcm_iov_doiovar(void *parse_ctx, uint32 id, void *params, uint params_len, + void *arg, uint arg_len, uint vsize, struct wlc_if *intf); +#endif /* BCMDRIVER */ + +/* parsing context helpers */ + +/* get the maximum number of tlvs - can be used to allocate digest for all + * commands. the digest can be shared. Negative values are BCM_*, >=0, the + * number of tlvs + */ +int bcm_iov_parse_get_max_tlvs(const bcm_iov_parse_context_t *ctx); + +/* common packing support */ + +/* pack a buffer of uint8s - memcpy wrapper */ +int bcm_iov_pack_buf(const bcm_iov_cmd_digest_t *dig, uint8 *buf, + const uint8 *data, size_t len); + +#define bcm_iov_packv_u8 bcm_iov_pack_buf + +/* + * pack a buffer with uint16s - serialized in LE order, data points to uint16 + * length is not checked. + */ +int bcm_iov_packv_u16(const bcm_iov_cmd_digest_t *dig, uint8 *buf, + const uint16 *data, int n); + +/* + * pack a buffer with uint32s - serialized in LE order - data points to uint32 + * length is not checked. + */ +int bcm_iov_packv_u32(const bcm_iov_cmd_digest_t *dig, uint8 *buf, + const uint32 *data, int n); + +#endif /* _bcmiov_h_ */ diff --git a/bcmdhd.100.10.315.x/include/bcmip.h b/bcmdhd.100.10.315.x/include/bcmip.h new file mode 100644 index 0000000..9415488 --- /dev/null +++ b/bcmdhd.100.10.315.x/include/bcmip.h @@ -0,0 +1,250 @@ +/* + * Fundamental constants relating to IP Protocol + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmip.h 705929 2017-06-20 00:06:46Z $ + */ + +#ifndef _bcmip_h_ +#define _bcmip_h_ + +#ifndef _TYPEDEFS_H_ +#include +#endif // endif + +/* This marks the start of a packed structure section. */ +#include + +/* IPV4 and IPV6 common */ +#define IP_VER_OFFSET 0x0 /* offset to version field */ +#define IP_VER_MASK 0xf0 /* version mask */ +#define IP_VER_SHIFT 4 /* version shift */ +#define IP_VER_4 4 /* version number for IPV4 */ +#define IP_VER_6 6 /* version number for IPV6 */ + +#define IP_VER(ip_body) \ + ((((uint8 *)(ip_body))[IP_VER_OFFSET] & IP_VER_MASK) >> IP_VER_SHIFT) + +#define IP_PROT_ICMP 0x1 /* ICMP protocol */ +#define IP_PROT_IGMP 0x2 /* IGMP protocol */ +#define IP_PROT_TCP 0x6 /* TCP protocol */ +#define IP_PROT_UDP 0x11 /* UDP protocol type */ +#define IP_PROT_GRE 0x2f /* GRE protocol type */ +#define IP_PROT_ICMP6 0x3a /* ICMPv6 protocol type */ + +/* IPV4 field offsets */ +#define IPV4_VER_HL_OFFSET 0 /* version and ihl byte offset */ +#define IPV4_TOS_OFFSET 1 /* type of service offset */ +#define IPV4_PKTLEN_OFFSET 2 /* packet length offset */ +#define IPV4_PKTFLAG_OFFSET 6 /* more-frag,dont-frag flag offset */ +#define IPV4_PROT_OFFSET 9 /* protocol type offset */ +#define IPV4_CHKSUM_OFFSET 10 /* IP header checksum offset */ +#define IPV4_SRC_IP_OFFSET 12 /* src IP addr offset */ +#define IPV4_DEST_IP_OFFSET 16 /* dest IP addr offset */ +#define IPV4_OPTIONS_OFFSET 20 /* IP options offset */ +#define IPV4_MIN_HEADER_LEN 20 /* Minimum size for an IP header (no options) */ + +/* IPV4 field decodes */ +#define IPV4_VER_MASK 0xf0 /* IPV4 version mask */ +#define IPV4_VER_SHIFT 4 /* IPV4 version shift */ + +#define IPV4_HLEN_MASK 0x0f /* IPV4 header length mask */ +#define IPV4_HLEN(ipv4_body) (4 * (((uint8 *)(ipv4_body))[IPV4_VER_HL_OFFSET] & IPV4_HLEN_MASK)) + +#define IPV4_HLEN_MIN (4 * 5) /* IPV4 header minimum length */ + +#define IPV4_ADDR_LEN 4 /* IPV4 address length */ + +#define IPV4_ADDR_NULL(a) ((((uint8 *)(a))[0] | ((uint8 *)(a))[1] | \ + ((uint8 *)(a))[2] | ((uint8 *)(a))[3]) == 0) + +#define IPV4_ADDR_BCAST(a) ((((uint8 *)(a))[0] & ((uint8 *)(a))[1] & \ + ((uint8 *)(a))[2] & ((uint8 *)(a))[3]) == 0xff) + +#define IPV4_TOS_DSCP_MASK 0xfc /* DiffServ codepoint mask */ +#define IPV4_TOS_DSCP_SHIFT 2 /* DiffServ codepoint shift */ + +#define IPV4_TOS(ipv4_body) (((uint8 *)(ipv4_body))[IPV4_TOS_OFFSET]) + +#define IPV4_TOS_PREC_MASK 0xe0 /* Historical precedence mask */ +#define IPV4_TOS_PREC_SHIFT 5 /* Historical precedence shift */ + +#define IPV4_TOS_LOWDELAY 0x10 /* Lowest delay requested */ +#define IPV4_TOS_THROUGHPUT 0x8 /* Best throughput requested */ +#define IPV4_TOS_RELIABILITY 0x4 /* Most reliable delivery requested */ + +#define IPV4_TOS_ROUTINE 0 +#define IPV4_TOS_PRIORITY 1 +#define IPV4_TOS_IMMEDIATE 2 +#define IPV4_TOS_FLASH 3 +#define IPV4_TOS_FLASHOVERRIDE 4 +#define IPV4_TOS_CRITICAL 5 +#define IPV4_TOS_INETWORK_CTRL 6 +#define IPV4_TOS_NETWORK_CTRL 7 + +#define IPV4_PROT(ipv4_body) (((uint8 *)(ipv4_body))[IPV4_PROT_OFFSET]) + +#define IPV4_FRAG_RESV 0x8000 /* Reserved */ +#define IPV4_FRAG_DONT 0x4000 /* Don't fragment */ +#define IPV4_FRAG_MORE 0x2000 /* More fragments */ +#define IPV4_FRAG_OFFSET_MASK 0x1fff /* Fragment offset */ + +#define IPV4_ADDR_STR_LEN 16 /* Max IP address length in string format */ + +/* IPV4 packet formats */ +BWL_PRE_PACKED_STRUCT struct ipv4_addr { + uint8 addr[IPV4_ADDR_LEN]; +} BWL_POST_PACKED_STRUCT; + +BWL_PRE_PACKED_STRUCT struct ipv4_hdr { + uint8 version_ihl; /* Version and Internet Header Length */ + uint8 tos; /* Type Of Service */ + uint16 tot_len; /* Number of bytes in packet (max 65535) */ + uint16 id; + uint16 frag; /* 3 flag bits and fragment offset */ + uint8 ttl; /* Time To Live */ + uint8 prot; /* Protocol */ + uint16 hdr_chksum; /* IP header checksum */ + uint8 src_ip[IPV4_ADDR_LEN]; /* Source IP Address */ + uint8 dst_ip[IPV4_ADDR_LEN]; /* Destination IP Address */ +} BWL_POST_PACKED_STRUCT; + +/* IPV6 field offsets */ +#define IPV6_PAYLOAD_LEN_OFFSET 4 /* payload length offset */ +#define IPV6_NEXT_HDR_OFFSET 6 /* next header/protocol offset */ +#define IPV6_HOP_LIMIT_OFFSET 7 /* hop limit offset */ +#define IPV6_SRC_IP_OFFSET 8 /* src IP addr offset */ +#define IPV6_DEST_IP_OFFSET 24 /* dst IP addr offset */ + +/* IPV6 field decodes */ +#define IPV6_TRAFFIC_CLASS(ipv6_body) \ + (((((uint8 *)(ipv6_body))[0] & 0x0f) << 4) | \ + ((((uint8 *)(ipv6_body))[1] & 0xf0) >> 4)) + +#define IPV6_FLOW_LABEL(ipv6_body) \ + (((((uint8 *)(ipv6_body))[1] & 0x0f) << 16) | \ + (((uint8 *)(ipv6_body))[2] << 8) | \ + (((uint8 *)(ipv6_body))[3])) + +#define IPV6_PAYLOAD_LEN(ipv6_body) \ + ((((uint8 *)(ipv6_body))[IPV6_PAYLOAD_LEN_OFFSET + 0] << 8) | \ + ((uint8 *)(ipv6_body))[IPV6_PAYLOAD_LEN_OFFSET + 1]) + +#define IPV6_NEXT_HDR(ipv6_body) \ + (((uint8 *)(ipv6_body))[IPV6_NEXT_HDR_OFFSET]) + +#define IPV6_PROT(ipv6_body) IPV6_NEXT_HDR(ipv6_body) + +#define IPV6_ADDR_LEN 16 /* IPV6 address length */ + +/* IPV4 TOS or IPV6 Traffic Classifier or 0 */ +#define IP_TOS46(ip_body) \ + (IP_VER(ip_body) == IP_VER_4 ? IPV4_TOS(ip_body) : \ + IP_VER(ip_body) == IP_VER_6 ? IPV6_TRAFFIC_CLASS(ip_body) : 0) + +#define IP_DSCP46(ip_body) (IP_TOS46(ip_body) >> IPV4_TOS_DSCP_SHIFT); + +/* IPV4 or IPV6 Protocol Classifier or 0 */ +#define IP_PROT46(ip_body) \ + (IP_VER(ip_body) == IP_VER_4 ? IPV4_PROT(ip_body) : \ + IP_VER(ip_body) == IP_VER_6 ? IPV6_PROT(ip_body) : 0) + +/* IPV6 extension headers (options) */ +#define IPV6_EXTHDR_HOP 0 +#define IPV6_EXTHDR_ROUTING 43 +#define IPV6_EXTHDR_FRAGMENT 44 +#define IPV6_EXTHDR_AUTH 51 +#define IPV6_EXTHDR_NONE 59 +#define IPV6_EXTHDR_DEST 60 + +#define IPV6_EXTHDR(prot) (((prot) == IPV6_EXTHDR_HOP) || \ + ((prot) == IPV6_EXTHDR_ROUTING) || \ + ((prot) == IPV6_EXTHDR_FRAGMENT) || \ + ((prot) == IPV6_EXTHDR_AUTH) || \ + ((prot) == IPV6_EXTHDR_NONE) || \ + ((prot) == IPV6_EXTHDR_DEST)) + +#define IPV6_MIN_HLEN 40 + +#define IPV6_EXTHDR_LEN(eh) ((((struct ipv6_exthdr *)(eh))->hdrlen + 1) << 3) + +BWL_PRE_PACKED_STRUCT struct ipv6_exthdr { + uint8 nexthdr; + uint8 hdrlen; +} BWL_POST_PACKED_STRUCT; + +BWL_PRE_PACKED_STRUCT struct ipv6_exthdr_frag { + uint8 nexthdr; + uint8 rsvd; + uint16 frag_off; + uint32 ident; +} BWL_POST_PACKED_STRUCT; + +static INLINE int32 +ipv6_exthdr_len(uint8 *h, uint8 *proto) +{ + uint16 len = 0, hlen; + struct ipv6_exthdr *eh = (struct ipv6_exthdr *)h; + + while (IPV6_EXTHDR(eh->nexthdr)) { + if (eh->nexthdr == IPV6_EXTHDR_NONE) + return -1; + else if (eh->nexthdr == IPV6_EXTHDR_FRAGMENT) + hlen = 8; + else if (eh->nexthdr == IPV6_EXTHDR_AUTH) + hlen = (eh->hdrlen + 2) << 2; + else + hlen = IPV6_EXTHDR_LEN(eh); + + len += hlen; + eh = (struct ipv6_exthdr *)(h + len); + } + + *proto = eh->nexthdr; + return len; +} + +#define IPV4_ISMULTI(a) (((a) & 0xf0000000) == 0xe0000000) + +#define IPV4_MCAST_TO_ETHER_MCAST(ipv4, ether) \ +{ \ + ether[0] = 0x01; \ + ether[1] = 0x00; \ + ether[2] = 0x5E; \ + ether[3] = (ipv4 & 0x7f0000) >> 16; \ + ether[4] = (ipv4 & 0xff00) >> 8; \ + ether[5] = (ipv4 & 0xff); \ +} + +/* This marks the end of a packed structure section. */ +#include + +#define IPV4_ADDR_STR "%d.%d.%d.%d" +#define IPV4_ADDR_TO_STR(addr) ((uint32)addr & 0xff000000) >> 24, \ + ((uint32)addr & 0x00ff0000) >> 16, \ + ((uint32)addr & 0x0000ff00) >> 8, \ + ((uint32)addr & 0x000000ff) + +#endif /* _bcmip_h_ */ diff --git a/bcmdhd.100.10.315.x/include/bcmipv6.h b/bcmdhd.100.10.315.x/include/bcmipv6.h new file mode 100644 index 0000000..c6f1f05 --- /dev/null +++ b/bcmdhd.100.10.315.x/include/bcmipv6.h @@ -0,0 +1,161 @@ +/* + * Fundamental constants relating to Neighbor Discovery Protocol + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmipv6.h 700076 2017-05-17 14:42:22Z $ + */ + +#ifndef _bcmipv6_h_ +#define _bcmipv6_h_ + +#ifndef _TYPEDEFS_H_ +#include +#endif // endif + +/* This marks the start of a packed structure section. */ +#include + +/* Extension headers */ +#define IPV6_EXT_HOP 0 +#define IPV6_EXT_ROUTE 43 +#define IPV6_EXT_FRAG 44 +#define IPV6_EXT_DEST 60 +#define IPV6_EXT_ESEC 50 +#define IPV6_EXT_AUTH 51 + +/* Minimum size (extension header "word" length) */ +#define IPV6_EXT_WORD 8 + +/* Offsets for most extension headers */ +#define IPV6_EXT_NEXTHDR 0 +#define IPV6_EXT_HDRLEN 1 + +/* Constants specific to fragmentation header */ +#define IPV6_FRAG_MORE_MASK 0x0001 +#define IPV6_FRAG_MORE_SHIFT 0 +#define IPV6_FRAG_OFFS_MASK 0xfff8 +#define IPV6_FRAG_OFFS_SHIFT 3 + +/* For icmpv6 */ +#define ICMPV6_HEADER_TYPE 0x3A +#define ICMPV6_PKT_TYPE_RA 134 +#define ICMPV6_PKT_TYPE_NS 135 +#define ICMPV6_PKT_TYPE_NA 136 + +#define ICMPV6_ND_OPT_TYPE_TARGET_MAC 2 +#define ICMPV6_ND_OPT_TYPE_SRC_MAC 1 + +#define ICMPV6_ND_OPT_LEN_LINKADDR 1 + +#define ICMPV6_ND_OPT_LEN_LINKADDR 1 + +#define IPV6_VERSION 6 +#define IPV6_HOP_LIMIT 255 + +#define IPV6_ADDR_NULL(a) ((a[0] | a[1] | a[2] | a[3] | a[4] | \ + a[5] | a[6] | a[7] | a[8] | a[9] | \ + a[10] | a[11] | a[12] | a[13] | \ + a[14] | a[15]) == 0) + +#define IPV6_ADDR_LOCAL(a) (((a[0] == 0xfe) && (a[1] & 0x80))? TRUE: FALSE) + +/* IPV6 address */ +BWL_PRE_PACKED_STRUCT struct ipv6_addr { + uint8 addr[16]; +} BWL_POST_PACKED_STRUCT; + +/* ICMPV6 Header */ +BWL_PRE_PACKED_STRUCT struct icmp6_hdr { + uint8 icmp6_type; + uint8 icmp6_code; + uint16 icmp6_cksum; + BWL_PRE_PACKED_STRUCT union { + uint32 reserved; + BWL_PRE_PACKED_STRUCT struct nd_advt { + uint32 reserved1:5, + override:1, + solicited:1, + router:1, + reserved2:24; + } BWL_POST_PACKED_STRUCT nd_advt; + } BWL_POST_PACKED_STRUCT opt; +} BWL_POST_PACKED_STRUCT; + +/* Ipv6 Header Format */ +BWL_PRE_PACKED_STRUCT struct ipv6_hdr { + uint8 priority:4, + version:4; + uint8 flow_lbl[3]; + uint16 payload_len; + uint8 nexthdr; + uint8 hop_limit; + struct ipv6_addr saddr; + struct ipv6_addr daddr; +} BWL_POST_PACKED_STRUCT; + +/* Neighbor Advertisement/Solicitation Packet Structure */ +BWL_PRE_PACKED_STRUCT struct bcm_nd_msg { + struct icmp6_hdr icmph; + struct ipv6_addr target; +} BWL_POST_PACKED_STRUCT; + +/* Neighibor Solicitation/Advertisement Optional Structure */ +BWL_PRE_PACKED_STRUCT struct nd_msg_opt { + uint8 type; + uint8 len; + uint8 mac_addr[ETHER_ADDR_LEN]; +} BWL_POST_PACKED_STRUCT; + +/* Ipv6 Fragmentation Header */ +BWL_PRE_PACKED_STRUCT struct ipv6_frag { + uint8 nexthdr; + uint8 reserved; + uint16 frag_offset; + uint32 ident; +} BWL_POST_PACKED_STRUCT; + +/* This marks the end of a packed structure section. */ +#include + +static const struct ipv6_addr all_node_ipv6_maddr = { + { 0xff, 0x2, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 1 + }}; + +#define IPV6_ISMULTI(a) (a[0] == 0xff) + +#define IPV6_MCAST_TO_ETHER_MCAST(ipv6, ether) \ +{ \ + ether[0] = 0x33; \ + ether[1] = 0x33; \ + ether[2] = ipv6[12]; \ + ether[3] = ipv6[13]; \ + ether[4] = ipv6[14]; \ + ether[5] = ipv6[15]; \ +} + +#endif /* !defined(_bcmipv6_h_) */ diff --git a/bcmdhd.100.10.315.x/include/bcmmsgbuf.h b/bcmdhd.100.10.315.x/include/bcmmsgbuf.h new file mode 100644 index 0000000..d9ff587 --- /dev/null +++ b/bcmdhd.100.10.315.x/include/bcmmsgbuf.h @@ -0,0 +1,1340 @@ +/* + * MSGBUF network driver ioctl/indication encoding + * Broadcom 802.11abg Networking Device Driver + * + * Definitions subject to change without notice. + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmmsgbuf.h 764293 2018-05-24 17:44:43Z $ + */ +#ifndef _bcmmsgbuf_h_ +#define _bcmmsgbuf_h_ + +#include +#include +#include + +#define MSGBUF_MAX_MSG_SIZE ETHER_MAX_LEN + +#define D2H_EPOCH_MODULO 253 /* sequence number wrap */ +#define D2H_EPOCH_INIT_VAL (D2H_EPOCH_MODULO + 1) + +#define H2D_EPOCH_MODULO 253 /* sequence number wrap */ +#define H2D_EPOCH_INIT_VAL (H2D_EPOCH_MODULO + 1) + +#define H2DRING_TXPOST_ITEMSIZE 48 +#define H2DRING_RXPOST_ITEMSIZE 32 +#define H2DRING_CTRL_SUB_ITEMSIZE 40 + +#define D2HRING_TXCMPLT_ITEMSIZE 24 +#define D2HRING_RXCMPLT_ITEMSIZE 40 + +#define D2HRING_TXCMPLT_ITEMSIZE_PREREV7 16 +#define D2HRING_RXCMPLT_ITEMSIZE_PREREV7 32 + +#define D2HRING_CTRL_CMPLT_ITEMSIZE 24 +#define H2DRING_INFO_BUFPOST_ITEMSIZE H2DRING_CTRL_SUB_ITEMSIZE +#define D2HRING_INFO_BUFCMPLT_ITEMSIZE D2HRING_CTRL_CMPLT_ITEMSIZE + +#define D2HRING_SNAPSHOT_CMPLT_ITEMSIZE 20 + +#define H2DRING_TXPOST_MAX_ITEM 512 +#define H2DRING_RXPOST_MAX_ITEM 512 +#define H2DRING_CTRL_SUB_MAX_ITEM 64 +#define D2HRING_TXCMPLT_MAX_ITEM 1024 +#define D2HRING_RXCMPLT_MAX_ITEM 512 + +#define H2DRING_DYNAMIC_INFO_MAX_ITEM 32 +#define D2HRING_DYNAMIC_INFO_MAX_ITEM 32 + +#define D2HRING_EDL_ITEMSIZE 2048u +#define D2HRING_EDL_MAX_ITEM 256u +#define D2HRING_EDL_WATERMARK (D2HRING_EDL_MAX_ITEM >> 5u) + +#define D2HRING_CTRL_CMPLT_MAX_ITEM 64 + +enum { + DNGL_TO_HOST_MSGBUF, + HOST_TO_DNGL_MSGBUF +}; + +enum { + HOST_TO_DNGL_TXP_DATA, + HOST_TO_DNGL_RXP_DATA, + HOST_TO_DNGL_CTRL, + DNGL_TO_HOST_DATA, + DNGL_TO_HOST_CTRL +}; + +#define MESSAGE_PAYLOAD(a) (a & MSG_TYPE_INTERNAL_USE_START) ? TRUE : FALSE +#define PCIEDEV_FIRMWARE_TSINFO 0x1 +#define PCIEDEV_FIRMWARE_TSINFO_FIRST 0x1 +#define PCIEDEV_FIRMWARE_TSINFO_MIDDLE 0x2 +#define PCIEDEV_BTLOG_POST 0x3 +#define PCIEDEV_BT_SNAPSHOT_POST 0x4 + +#ifdef PCIE_API_REV1 + +#define BCMMSGBUF_DUMMY_REF(a, b) do {BCM_REFERENCE((a));BCM_REFERENCE((b));} while (0) + +#define BCMMSGBUF_API_IFIDX(a) 0 +#define BCMMSGBUF_API_SEQNUM(a) 0 +#define BCMMSGBUF_IOCTL_XTID(a) 0 +#define BCMMSGBUF_IOCTL_PKTID(a) ((a)->cmd_id) + +#define BCMMSGBUF_SET_API_IFIDX(a, b) BCMMSGBUF_DUMMY_REF(a, b) +#define BCMMSGBUF_SET_API_SEQNUM(a, b) BCMMSGBUF_DUMMY_REF(a, b) +#define BCMMSGBUF_IOCTL_SET_PKTID(a, b) (BCMMSGBUF_IOCTL_PKTID(a) = (b)) +#define BCMMSGBUF_IOCTL_SET_XTID(a, b) BCMMSGBUF_DUMMY_REF(a, b) + +#else /* PCIE_API_REV1 */ + +#define BCMMSGBUF_API_IFIDX(a) ((a)->if_id) +#define BCMMSGBUF_IOCTL_PKTID(a) ((a)->pkt_id) +#define BCMMSGBUF_API_SEQNUM(a) ((a)->u.seq.seq_no) +#define BCMMSGBUF_IOCTL_XTID(a) ((a)->xt_id) + +#define BCMMSGBUF_SET_API_IFIDX(a, b) (BCMMSGBUF_API_IFIDX((a)) = (b)) +#define BCMMSGBUF_SET_API_SEQNUM(a, b) (BCMMSGBUF_API_SEQNUM((a)) = (b)) +#define BCMMSGBUF_IOCTL_SET_PKTID(a, b) (BCMMSGBUF_IOCTL_PKTID((a)) = (b)) +#define BCMMSGBUF_IOCTL_SET_XTID(a, b) (BCMMSGBUF_IOCTL_XTID((a)) = (b)) + +#endif /* PCIE_API_REV1 */ + +/* utility data structures */ + +union addr64 { + struct { + uint32 low; + uint32 high; + }; + struct { + uint32 low_addr; + uint32 high_addr; + }; + uint64 u64; +} DECLSPEC_ALIGN(8); + +typedef union addr64 bcm_addr64_t; + +/* IOCTL req Hdr */ +/* cmn Msg Hdr */ +typedef struct cmn_msg_hdr { + /** message type */ + uint8 msg_type; + /** interface index this is valid for */ + uint8 if_id; + /* flags */ + uint8 flags; + /** sequence number */ + uint8 epoch; + /** packet Identifier for the associated host buffer */ + uint32 request_id; +} cmn_msg_hdr_t; + +/** message type */ +typedef enum bcmpcie_msgtype { + MSG_TYPE_GEN_STATUS = 0x1, + MSG_TYPE_RING_STATUS = 0x2, + MSG_TYPE_FLOW_RING_CREATE = 0x3, + MSG_TYPE_FLOW_RING_CREATE_CMPLT = 0x4, + /* Enum value as copied from BISON 7.15: new generic message */ + MSG_TYPE_RING_CREATE_CMPLT = 0x4, + MSG_TYPE_FLOW_RING_DELETE = 0x5, + MSG_TYPE_FLOW_RING_DELETE_CMPLT = 0x6, + /* Enum value as copied from BISON 7.15: new generic message */ + MSG_TYPE_RING_DELETE_CMPLT = 0x6, + MSG_TYPE_FLOW_RING_FLUSH = 0x7, + MSG_TYPE_FLOW_RING_FLUSH_CMPLT = 0x8, + MSG_TYPE_IOCTLPTR_REQ = 0x9, + MSG_TYPE_IOCTLPTR_REQ_ACK = 0xA, + MSG_TYPE_IOCTLRESP_BUF_POST = 0xB, + MSG_TYPE_IOCTL_CMPLT = 0xC, + MSG_TYPE_EVENT_BUF_POST = 0xD, + MSG_TYPE_WL_EVENT = 0xE, + MSG_TYPE_TX_POST = 0xF, + MSG_TYPE_TX_STATUS = 0x10, + MSG_TYPE_RXBUF_POST = 0x11, + MSG_TYPE_RX_CMPLT = 0x12, + MSG_TYPE_LPBK_DMAXFER = 0x13, + MSG_TYPE_LPBK_DMAXFER_CMPLT = 0x14, + MSG_TYPE_FLOW_RING_RESUME = 0x15, + MSG_TYPE_FLOW_RING_RESUME_CMPLT = 0x16, + MSG_TYPE_FLOW_RING_SUSPEND = 0x17, + MSG_TYPE_FLOW_RING_SUSPEND_CMPLT = 0x18, + MSG_TYPE_INFO_BUF_POST = 0x19, + MSG_TYPE_INFO_BUF_CMPLT = 0x1A, + MSG_TYPE_H2D_RING_CREATE = 0x1B, + MSG_TYPE_D2H_RING_CREATE = 0x1C, + MSG_TYPE_H2D_RING_CREATE_CMPLT = 0x1D, + MSG_TYPE_D2H_RING_CREATE_CMPLT = 0x1E, + MSG_TYPE_H2D_RING_CONFIG = 0x1F, + MSG_TYPE_D2H_RING_CONFIG = 0x20, + MSG_TYPE_H2D_RING_CONFIG_CMPLT = 0x21, + MSG_TYPE_D2H_RING_CONFIG_CMPLT = 0x22, + MSG_TYPE_H2D_MAILBOX_DATA = 0x23, + MSG_TYPE_D2H_MAILBOX_DATA = 0x24, + MSG_TYPE_TIMSTAMP_BUFPOST = 0x25, + MSG_TYPE_HOSTTIMSTAMP = 0x26, + MSG_TYPE_HOSTTIMSTAMP_CMPLT = 0x27, + MSG_TYPE_FIRMWARE_TIMESTAMP = 0x28, + MSG_TYPE_SNAPSHOT_UPLOAD = 0x29, + MSG_TYPE_SNAPSHOT_CMPLT = 0x2A, + MSG_TYPE_H2D_RING_DELETE = 0x2B, + MSG_TYPE_D2H_RING_DELETE = 0x2C, + MSG_TYPE_H2D_RING_DELETE_CMPLT = 0x2D, + MSG_TYPE_D2H_RING_DELETE_CMPLT = 0x2E, + MSG_TYPE_API_MAX_RSVD = 0x3F +} bcmpcie_msg_type_t; + +typedef enum bcmpcie_msgtype_int { + MSG_TYPE_INTERNAL_USE_START = 0x40, + MSG_TYPE_EVENT_PYLD = 0x41, + MSG_TYPE_IOCT_PYLD = 0x42, + MSG_TYPE_RX_PYLD = 0x43, + MSG_TYPE_HOST_FETCH = 0x44, + MSG_TYPE_LPBK_DMAXFER_PYLD = 0x45, + MSG_TYPE_TXMETADATA_PYLD = 0x46, + MSG_TYPE_INDX_UPDATE = 0x47, + MSG_TYPE_INFO_PYLD = 0x48, + MSG_TYPE_TS_EVENT_PYLD = 0x49, + MSG_TYPE_PVT_BTLOG_CMPLT = 0x4A, + MSG_TYPE_BTLOG_PYLD = 0x4B, + MSG_TYPE_HMAPTEST_PYLD = 0x4C, + MSG_TYPE_PVT_BT_SNAPSHOT_CMPLT = 0x4D, + MSG_TYPE_BT_SNAPSHOT_PYLD = 0x4E +} bcmpcie_msgtype_int_t; + +typedef enum bcmpcie_msgtype_u { + MSG_TYPE_TX_BATCH_POST = 0x80, + MSG_TYPE_IOCTL_REQ = 0x81, + MSG_TYPE_HOST_EVNT = 0x82, /* console related */ + MSG_TYPE_LOOPBACK = 0x83 +} bcmpcie_msgtype_u_t; + +/** + * D2H ring host wakeup soft doorbell, override the PCIE doorbell. + * Host configures an <32bit address,value> tuple, and dongle uses SBTOPCIE + * Transl0 to write specified value to host address. + * + * Use case: 32bit Address mapped to HW Accelerator Core/Thread Wakeup Register + * and value is Core/Thread context. Host will ensure routing the 32bit address + * offerred to PCIE to the mapped register. + * + * D2H_RING_CONFIG_SUBTYPE_SOFT_DOORBELL + */ +typedef struct bcmpcie_soft_doorbell { + uint32 value; /* host defined value to be written, eg HW threadid */ + bcm_addr64_t haddr; /* host address, eg thread wakeup register address */ + uint16 items; /* interrupt coalescing: item count before wakeup */ + uint16 msecs; /* interrupt coalescing: timeout in millisecs */ +} bcmpcie_soft_doorbell_t; + +/** + * D2H interrupt using MSI instead of INTX + * Host configures MSI vector offset for each D2H interrupt + * + * D2H_RING_CONFIG_SUBTYPE_MSI_DOORBELL + */ +typedef enum bcmpcie_msi_intr_idx { + MSI_INTR_IDX_CTRL_CMPL_RING = 0, + MSI_INTR_IDX_TXP_CMPL_RING = 1, + MSI_INTR_IDX_RXP_CMPL_RING = 2, + MSI_INTR_IDX_INFO_CMPL_RING = 3, + MSI_INTR_IDX_MAILBOX = 4, + MSI_INTR_IDX_MAX = 5 +} bcmpcie_msi_intr_idx_t; + +#define BCMPCIE_D2H_MSI_OFFSET_SINGLE 0 +typedef enum bcmpcie_msi_offset_type { + BCMPCIE_D2H_MSI_OFFSET_MB0 = 2, + BCMPCIE_D2H_MSI_OFFSET_MB1 = 3, + BCMPCIE_D2H_MSI_OFFSET_DB0 = 4, + BCMPCIE_D2H_MSI_OFFSET_DB1 = 5, + BCMPCIE_D2H_MSI_OFFSET_H1_DB0 = 6, + BCMPCIE_D2H_MSI_OFFSET_MAX = 7 +} bcmpcie_msi_offset_type_t; + +typedef struct bcmpcie_msi_offset { + uint16 intr_idx; /* interrupt index */ + uint16 msi_offset; /* msi vector offset */ +} bcmpcie_msi_offset_t; + +typedef struct bcmpcie_msi_offset_config { + uint32 len; + bcmpcie_msi_offset_t bcmpcie_msi_offset[MSI_INTR_IDX_MAX]; +} bcmpcie_msi_offset_config_t; + +#define BCMPCIE_D2H_MSI_OFFSET_DEFAULT BCMPCIE_D2H_MSI_OFFSET_DB1 + +#define BCMPCIE_D2H_MSI_SINGLE 0xFFFE + +/* if_id */ +#define BCMPCIE_CMNHDR_IFIDX_PHYINTF_SHFT 5 +#define BCMPCIE_CMNHDR_IFIDX_PHYINTF_MAX 0x7 +#define BCMPCIE_CMNHDR_IFIDX_PHYINTF_MASK \ + (BCMPCIE_CMNHDR_IFIDX_PHYINTF_MAX << BCMPCIE_CMNHDR_IFIDX_PHYINTF_SHFT) +#define BCMPCIE_CMNHDR_IFIDX_VIRTINTF_SHFT 0 +#define BCMPCIE_CMNHDR_IFIDX_VIRTINTF_MAX 0x1F +#define BCMPCIE_CMNHDR_IFIDX_VIRTINTF_MASK \ + (BCMPCIE_CMNHDR_IFIDX_PHYINTF_MAX << BCMPCIE_CMNHDR_IFIDX_PHYINTF_SHFT) + +/* flags */ +#define BCMPCIE_CMNHDR_FLAGS_DMA_R_IDX 0x1 +#define BCMPCIE_CMNHDR_FLAGS_DMA_R_IDX_INTR 0x2 +#define BCMPCIE_CMNHDR_FLAGS_TS_SEQNUM_INIT 0x4 +#define BCMPCIE_CMNHDR_FLAGS_PHASE_BIT 0x80 +#define BCMPCIE_CMNHDR_PHASE_BIT_INIT 0x80 + +/* IOCTL request message */ +typedef struct ioctl_req_msg { + /** common message header */ + cmn_msg_hdr_t cmn_hdr; + /** ioctl command type */ + uint32 cmd; + /** ioctl transaction ID, to pair with a ioctl response */ + uint16 trans_id; + /** input arguments buffer len */ + uint16 input_buf_len; + /** expected output len */ + uint16 output_buf_len; + /** to align the host address on 8 byte boundary */ + uint16 rsvd[3]; + /** always align on 8 byte boundary */ + bcm_addr64_t host_input_buf_addr; + /* rsvd */ + uint32 rsvd1[2]; +} ioctl_req_msg_t; + +/** buffer post messages for device to use to return IOCTL responses, Events */ +typedef struct ioctl_resp_evt_buf_post_msg { + /** common message header */ + cmn_msg_hdr_t cmn_hdr; + /** length of the host buffer supplied */ + uint16 host_buf_len; + /** to align the host address on 8 byte boundary */ + uint16 reserved[3]; + /** always align on 8 byte boundary */ + bcm_addr64_t host_buf_addr; + uint32 rsvd[4]; +} ioctl_resp_evt_buf_post_msg_t; + +/* buffer post messages for device to use to return dbg buffers */ +typedef ioctl_resp_evt_buf_post_msg_t info_buf_post_msg_t; + +#define DHD_INFOBUF_RX_BUFPOST_PKTSZ (2 * 1024) + +#define DHD_BTLOG_RX_BUFPOST_PKTSZ (2 * 1024) + +/* An infobuf host buffer starts with a 32 bit (LE) version. */ +#define PCIE_INFOBUF_V1 1 +/* Infobuf v1 type MSGTRACE's data is exactly the same as the MSGTRACE data that + * is wrapped previously/also in a WLC_E_TRACE event. See structure + * msgrace_hdr_t in msgtrace.h. +*/ +#define PCIE_INFOBUF_V1_TYPE_MSGTRACE 1 + +/* Infobuf v1 type LOGTRACE data is exactly the same as the LOGTRACE data that + * is wrapped previously/also in a WLC_E_TRACE event. See structure + * msgrace_hdr_t in msgtrace.h. (The only difference between a MSGTRACE + * and a LOGTRACE is the "trace type" field.) +*/ +#define PCIE_INFOBUF_V1_TYPE_LOGTRACE 2 + +/* An infobuf version 1 host buffer has a single TLV. The information on the + * version 1 types follow this structure definition. (int's LE) +*/ +typedef struct info_buf_payload_hdr_s { + uint16 type; + uint16 length; +} info_buf_payload_hdr_t; + +/* BT logs/memory to DMA directly from BT memory to host */ +typedef struct info_buf_btlog_s { + void (*status_cb)(void *ctx, void *p, int error); /* obsolete - to be removed */ + void *ctx; + dma64addr_t src_addr; + uint32 length; + bool (*pcie_status_cb)(osl_t *osh, void *p, int error); + uint32 bt_intstatus; + int error; +} info_buf_btlog_t; + +/** snapshot upload request message */ +typedef struct snapshot_upload_request_msg { + /** common message header */ + cmn_msg_hdr_t cmn_hdr; + /** length of the snaphost buffer supplied */ + uint32 snapshot_buf_len; + /** type of snapshot */ + uint8 snapshot_type; + /** snapshot param */ + uint8 snapshot_param; + /** to align the host address on 8 byte boundary */ + uint8 reserved[2]; + /** always align on 8 byte boundary */ + bcm_addr64_t host_buf_addr; + uint32 rsvd[4]; +} snapshot_upload_request_msg_t; + +/** snapshot types */ +typedef enum bcmpcie_snapshot_type { + SNAPSHOT_TYPE_BT = 0, /* Bluetooth SRAM and patch RAM */ + SNAPSHOT_TYPE_WLAN_SOCRAM = 1, /* WLAN SOCRAM */ + SNAPSHOT_TYPE_WLAN_HEAP = 2, /* WLAN HEAP */ + SNAPSHOT_TYPE_WLAN_REGISTER = 3 /* WLAN registers */ +} bcmpcie_snapshot_type_t; + +#define PCIE_DMA_XFER_FLG_D11_LPBK_MASK 0xF +#define PCIE_DMA_XFER_FLG_D11_LPBK_SHIFT 2 +#define PCIE_DMA_XFER_FLG_CORE_NUMBER_MASK 3 +#define PCIE_DMA_XFER_FLG_CORE_NUMBER_SHIFT 0 + +typedef struct pcie_dma_xfer_params { + /** common message header */ + cmn_msg_hdr_t cmn_hdr; + + /** always align on 8 byte boundary */ + bcm_addr64_t host_input_buf_addr; + + /** always align on 8 byte boundary */ + bcm_addr64_t host_ouput_buf_addr; + + /** length of transfer */ + uint32 xfer_len; + /** delay before doing the src txfer */ + uint32 srcdelay; + /** delay before doing the dest txfer */ + uint32 destdelay; + uint8 rsvd[3]; + /* bit0: D11 DMA loopback flag */ + uint8 flags; +} pcie_dma_xfer_params_t; + +/** Complete msgbuf hdr for flow ring update from host to dongle */ +typedef struct tx_flowring_create_request { + cmn_msg_hdr_t msg; + uint8 da[ETHER_ADDR_LEN]; + uint8 sa[ETHER_ADDR_LEN]; + uint8 tid; + uint8 if_flags; + uint16 flow_ring_id; + uint8 tc; + /* priority_ifrmmask is to define core mask in ifrm mode. + * currently it is not used for priority. so uses solely for ifrm mask + */ + uint8 priority_ifrmmask; + uint16 int_vector; + uint16 max_items; + uint16 len_item; + bcm_addr64_t flow_ring_ptr; +} tx_flowring_create_request_t; + +typedef struct tx_flowring_delete_request { + cmn_msg_hdr_t msg; + uint16 flow_ring_id; + uint16 reason; + uint32 rsvd[7]; +} tx_flowring_delete_request_t; + +typedef tx_flowring_delete_request_t d2h_ring_delete_req_t; +typedef tx_flowring_delete_request_t h2d_ring_delete_req_t; + +typedef struct tx_flowring_flush_request { + cmn_msg_hdr_t msg; + uint16 flow_ring_id; + uint16 reason; + uint32 rsvd[7]; +} tx_flowring_flush_request_t; + +/** Subtypes for ring_config_req control message */ +typedef enum ring_config_subtype { + /** Default D2H PCIE doorbell override using ring_config_req msg */ + D2H_RING_CONFIG_SUBTYPE_SOFT_DOORBELL = 1, /* Software doorbell */ + D2H_RING_CONFIG_SUBTYPE_MSI_DOORBELL = 2 /* MSI configuration */ +} ring_config_subtype_t; + +typedef struct ring_config_req { + cmn_msg_hdr_t msg; + uint16 subtype; + uint16 ring_id; + uint32 rsvd; + union { + uint32 data[6]; + /** D2H_RING_CONFIG_SUBTYPE_SOFT_DOORBELL */ + bcmpcie_soft_doorbell_t soft_doorbell; + /** D2H_RING_CONFIG_SUBTYPE_MSI_DOORBELL */ + bcmpcie_msi_offset_config_t msi_offset; + }; +} ring_config_req_t; + +/* data structure to use to create on the fly d2h rings */ +typedef struct d2h_ring_create_req { + cmn_msg_hdr_t msg; + uint16 ring_id; + uint16 ring_type; + uint32 flags; + bcm_addr64_t ring_ptr; + uint16 max_items; + uint16 len_item; + uint32 rsvd[3]; +} d2h_ring_create_req_t; + +/* data structure to use to create on the fly h2d rings */ +#define MAX_COMPLETION_RING_IDS_ASSOCIATED 4 +typedef struct h2d_ring_create_req { + cmn_msg_hdr_t msg; + uint16 ring_id; + uint8 ring_type; + uint8 n_completion_ids; + uint32 flags; + bcm_addr64_t ring_ptr; + uint16 max_items; + uint16 len_item; + uint16 completion_ring_ids[MAX_COMPLETION_RING_IDS_ASSOCIATED]; + uint32 rsvd; +} h2d_ring_create_req_t; + +typedef struct d2h_ring_config_req { + cmn_msg_hdr_t msg; + uint16 d2h_ring_config_subtype; + uint16 d2h_ring_id; + uint32 d2h_ring_config_data[4]; + uint32 rsvd[3]; +} d2h_ring_config_req_t; + +typedef struct h2d_ring_config_req { + cmn_msg_hdr_t msg; + uint16 h2d_ring_config_subtype; + uint16 h2d_ring_id; + uint32 h2d_ring_config_data; + uint32 rsvd[6]; +} h2d_ring_config_req_t; + +typedef struct h2d_mailbox_data { + cmn_msg_hdr_t msg; + uint32 mail_box_data; + uint32 rsvd[7]; +} h2d_mailbox_data_t; +typedef struct host_timestamp_msg { + cmn_msg_hdr_t msg; + uint16 xt_id; /* transaction ID */ + uint16 input_data_len; /* data len at the host_buf_addr, data in TLVs */ + uint16 seqnum; /* number of times host captured the timestamp */ + uint16 rsvd; + /* always align on 8 byte boundary */ + bcm_addr64_t host_buf_addr; + /* rsvd */ + uint32 rsvd1[4]; +} host_timestamp_msg_t; + +/* buffer post message for timestamp events MSG_TYPE_TIMSTAMP_BUFPOST */ +typedef ioctl_resp_evt_buf_post_msg_t ts_buf_post_msg_t; + +typedef union ctrl_submit_item { + ioctl_req_msg_t ioctl_req; + ioctl_resp_evt_buf_post_msg_t resp_buf_post; + pcie_dma_xfer_params_t dma_xfer; + tx_flowring_create_request_t flow_create; + tx_flowring_delete_request_t flow_delete; + tx_flowring_flush_request_t flow_flush; + ring_config_req_t ring_config_req; + d2h_ring_create_req_t d2h_create; + h2d_ring_create_req_t h2d_create; + d2h_ring_config_req_t d2h_config; + h2d_ring_config_req_t h2d_config; + h2d_mailbox_data_t h2d_mailbox_data; + host_timestamp_msg_t host_ts; + ts_buf_post_msg_t ts_buf_post; + d2h_ring_delete_req_t d2h_delete; + h2d_ring_delete_req_t h2d_delete; + unsigned char check[H2DRING_CTRL_SUB_ITEMSIZE]; +} ctrl_submit_item_t; + +typedef struct info_ring_submit_item { + info_buf_post_msg_t info_buf_post; + unsigned char check[H2DRING_INFO_BUFPOST_ITEMSIZE]; +} info_sumbit_item_t; + +/** Control Completion messages (20 bytes) */ +typedef struct compl_msg_hdr { + /** status for the completion */ + int16 status; + /** submisison flow ring id which generated this status */ + union { + uint16 ring_id; + uint16 flow_ring_id; + }; +} compl_msg_hdr_t; + +/** XOR checksum or a magic number to audit DMA done */ +typedef uint32 dma_done_t; + +#define MAX_CLKSRC_ID 0xF +#define TX_PKT_RETRY_CNT_0_MASK 0x000000FF +#define TX_PKT_RETRY_CNT_0_SHIFT 0 +#define TX_PKT_RETRY_CNT_1_MASK 0x0000FF00 +#define TX_PKT_RETRY_CNT_1_SHIFT 8 +#define TX_PKT_RETRY_CNT_2_MASK 0x00FF0000 +#define TX_PKT_RETRY_CNT_2_SHIFT 16 +#define TX_PKT_BAND_INFO 0x0F000000 +#define TX_PKT_BAND_INFO_SHIFT 24 +#define TX_PKT_VALID_INFO 0xF0000000 +#define TX_PKT_VALID_INFO_SHIFT 28 + +typedef struct ts_timestamp_srcid { + union { + uint32 ts_low; /* time stamp low 32 bits */ + uint32 rate_spec; /* use ratespec */ + }; + union { + uint32 ts_high; /* time stamp high 28 bits */ + union { + uint32 ts_high_ext :28; /* time stamp high 28 bits */ + uint32 clk_id_ext :3; /* clock ID source */ + uint32 phase :1; /* Phase bit */ + dma_done_t marker_ext; + }; + uint32 tx_pkt_band_retry_info; + }; +} ts_timestamp_srcid_t; + +typedef ts_timestamp_srcid_t ipc_timestamp_t; + +typedef struct ts_timestamp { + uint32 low; + uint32 high; +} ts_timestamp_t; + +typedef ts_timestamp_t tick_count_64_t; +typedef ts_timestamp_t ts_timestamp_ns_64_t; +typedef ts_timestamp_t ts_correction_m_t; +typedef ts_timestamp_t ts_correction_b_t; + +/* completion header status codes */ +#define BCMPCIE_SUCCESS 0 +#define BCMPCIE_NOTFOUND 1 +#define BCMPCIE_NOMEM 2 +#define BCMPCIE_BADOPTION 3 +#define BCMPCIE_RING_IN_USE 4 +#define BCMPCIE_RING_ID_INVALID 5 +#define BCMPCIE_PKT_FLUSH 6 +#define BCMPCIE_NO_EVENT_BUF 7 +#define BCMPCIE_NO_RX_BUF 8 +#define BCMPCIE_NO_IOCTLRESP_BUF 9 +#define BCMPCIE_MAX_IOCTLRESP_BUF 10 +#define BCMPCIE_MAX_EVENT_BUF 11 +#define BCMPCIE_BAD_PHASE 12 +#define BCMPCIE_INVALID_CPL_RINGID 13 +#define BCMPCIE_RING_TYPE_INVALID 14 +#define BCMPCIE_NO_TS_EVENT_BUF 15 +#define BCMPCIE_MAX_TS_EVENT_BUF 16 +#define BCMPCIE_PCIE_NO_BTLOG_BUF 17 +#define BCMPCIE_BT_DMA_ERR 18 +#define BCMPCIE_BT_DMA_DESCR_FETCH_ERR 19 +#define BCMPCIE_SNAPSHOT_ERR 20 +#define BCMPCIE_NOT_READY 21 +#define BCMPCIE_INVALID_DATA 22 +#define BCMPCIE_NO_RESPONSE 23 +#define BCMPCIE_NO_CLOCK 24 + +/** IOCTL completion response */ +typedef struct ioctl_compl_resp_msg { + /** common message header */ + cmn_msg_hdr_t cmn_hdr; + /** completion message header */ + compl_msg_hdr_t compl_hdr; + /** response buffer len where a host buffer is involved */ + uint16 resp_len; + /** transaction id to pair with a request */ + uint16 trans_id; + /** cmd id */ + uint32 cmd; + /** XOR checksum or a magic number to audit DMA done */ + dma_done_t marker; +} ioctl_comp_resp_msg_t; + +/** IOCTL request acknowledgement */ +typedef struct ioctl_req_ack_msg { + /** common message header */ + cmn_msg_hdr_t cmn_hdr; + /** completion message header */ + compl_msg_hdr_t compl_hdr; + /** cmd id */ + uint32 cmd; + uint32 rsvd; + /** XOR checksum or a magic number to audit DMA done */ + dma_done_t marker; +} ioctl_req_ack_msg_t; + +/** WL event message: send from device to host */ +typedef struct wlevent_req_msg { + /** common message header */ + cmn_msg_hdr_t cmn_hdr; + /** completion message header */ + compl_msg_hdr_t compl_hdr; + /** event data len valid with the event buffer */ + uint16 event_data_len; + /** sequence number */ + uint16 seqnum; + /** rsvd */ + uint32 rsvd; + /** XOR checksum or a magic number to audit DMA done */ + dma_done_t marker; +} wlevent_req_msg_t; + +/** dma xfer complete message */ +typedef struct pcie_dmaxfer_cmplt { + /** common message header */ + cmn_msg_hdr_t cmn_hdr; + /** completion message header */ + compl_msg_hdr_t compl_hdr; + uint32 rsvd[2]; + /** XOR checksum or a magic number to audit DMA done */ + dma_done_t marker; +} pcie_dmaxfer_cmplt_t; + +/** general status message */ +typedef struct pcie_gen_status { + /** common message header */ + cmn_msg_hdr_t cmn_hdr; + /** completion message header */ + compl_msg_hdr_t compl_hdr; + uint32 rsvd[2]; + /** XOR checksum or a magic number to audit DMA done */ + dma_done_t marker; +} pcie_gen_status_t; + +/** ring status message */ +typedef struct pcie_ring_status { + /** common message header */ + cmn_msg_hdr_t cmn_hdr; + /** completion message header */ + compl_msg_hdr_t compl_hdr; + /** message which firmware couldn't decode */ + uint16 write_idx; + uint16 rsvd[3]; + /** XOR checksum or a magic number to audit DMA done */ + dma_done_t marker; +} pcie_ring_status_t; + +typedef struct ring_create_response { + cmn_msg_hdr_t cmn_hdr; + compl_msg_hdr_t cmplt; + uint32 rsvd[2]; + /** XOR checksum or a magic number to audit DMA done */ + dma_done_t marker; +} ring_create_response_t; + +typedef ring_create_response_t tx_flowring_create_response_t; +typedef ring_create_response_t h2d_ring_create_response_t; +typedef ring_create_response_t d2h_ring_create_response_t; + +typedef struct tx_flowring_delete_response { + cmn_msg_hdr_t msg; + compl_msg_hdr_t cmplt; + uint16 read_idx; + uint16 rsvd[3]; + /** XOR checksum or a magic number to audit DMA done */ + dma_done_t marker; +} tx_flowring_delete_response_t; + +typedef tx_flowring_delete_response_t h2d_ring_delete_response_t; +typedef tx_flowring_delete_response_t d2h_ring_delete_response_t; + +typedef struct tx_flowring_flush_response { + cmn_msg_hdr_t msg; + compl_msg_hdr_t cmplt; + uint32 rsvd[2]; + /** XOR checksum or a magic number to audit DMA done */ + dma_done_t marker; +} tx_flowring_flush_response_t; + +/** Common layout of all d2h control messages */ +typedef struct ctrl_compl_msg { + /** common message header */ + cmn_msg_hdr_t cmn_hdr; + /** completion message header */ + compl_msg_hdr_t compl_hdr; + uint32 rsvd[2]; + /** XOR checksum or a magic number to audit DMA done */ + dma_done_t marker; +} ctrl_compl_msg_t; + +typedef struct ring_config_resp { + /** common message header */ + cmn_msg_hdr_t cmn_hdr; + /** completion message header */ + compl_msg_hdr_t compl_hdr; + uint16 subtype; + uint16 rsvd[3]; + /** XOR checksum or a magic number to audit DMA done */ + dma_done_t marker; +} ring_config_resp_t; + +typedef struct d2h_mailbox_data { + cmn_msg_hdr_t msg; + compl_msg_hdr_t cmplt; + uint32 d2h_mailbox_data; + uint32 rsvd[1]; + /* XOR checksum or a magic number to audit DMA done */ + dma_done_t marker; +} d2h_mailbox_data_t; + +/* dbg buf completion msg: send from device to host */ +typedef struct info_buf_resp { + /* common message header */ + cmn_msg_hdr_t cmn_hdr; + /* completion message header */ + compl_msg_hdr_t compl_hdr; + /* event data len valid with the event buffer */ + uint16 info_data_len; + /* sequence number */ + uint16 seqnum; + /* destination */ + uint8 dest; + /* rsvd */ + uint8 rsvd[3]; + /* XOR checksum or a magic number to audit DMA done */ + dma_done_t marker; +} info_buf_resp_t; + +/* snapshot completion msg: send from device to host */ +typedef struct snapshot_resp { + /* common message header */ + cmn_msg_hdr_t cmn_hdr; + /* completion message header */ + compl_msg_hdr_t compl_hdr; + /* snapshot length uploaded */ + uint32 resp_len; + /* snapshot type */ + uint8 type; + /* rsvd */ + uint8 rsvd[3]; + /* XOR checksum or a magic number to audit DMA done */ + dma_done_t marker; +} snapshot_resp_t; + +typedef struct info_ring_cpl_item { + info_buf_resp_t info_buf_post; + unsigned char check[D2HRING_INFO_BUFCMPLT_ITEMSIZE]; +} info_cpl_item_t; + +typedef struct host_timestamp_msg_cpl { + cmn_msg_hdr_t msg; + compl_msg_hdr_t cmplt; + uint16 xt_id; /* transaction ID */ + uint16 rsvd; + uint32 rsvd1; + /* XOR checksum or a magic number to audit DMA done */ + dma_done_t marker; +} host_timestamp_msg_cpl_t; + +typedef struct fw_timestamp_event_msg { + cmn_msg_hdr_t msg; + compl_msg_hdr_t cmplt; + /* fw captures time stamp info and passed that to host in TLVs */ + uint16 buf_len; /* length of the time stamp data copied in host buf */ + uint16 seqnum; /* number of times fw captured time stamp */ + uint32 rsvd; + /* XOR checksum or a magic number to audit DMA done */ + dma_done_t marker; +} fw_timestamp_event_msg_t; + +typedef union ctrl_completion_item { + ioctl_comp_resp_msg_t ioctl_resp; + wlevent_req_msg_t event; + ioctl_req_ack_msg_t ioct_ack; + pcie_dmaxfer_cmplt_t pcie_xfer_cmplt; + pcie_gen_status_t pcie_gen_status; + pcie_ring_status_t pcie_ring_status; + tx_flowring_create_response_t txfl_create_resp; + tx_flowring_delete_response_t txfl_delete_resp; + tx_flowring_flush_response_t txfl_flush_resp; + ctrl_compl_msg_t ctrl_compl; + ring_config_resp_t ring_config_resp; + d2h_mailbox_data_t d2h_mailbox_data; + info_buf_resp_t dbg_resp; + h2d_ring_create_response_t h2d_ring_create_resp; + d2h_ring_create_response_t d2h_ring_create_resp; + host_timestamp_msg_cpl_t host_ts_cpl; + fw_timestamp_event_msg_t fw_ts_event; + h2d_ring_delete_response_t h2d_ring_delete_resp; + d2h_ring_delete_response_t d2h_ring_delete_resp; + unsigned char ctrl_response[D2HRING_CTRL_CMPLT_ITEMSIZE]; +} ctrl_completion_item_t; + +/** H2D Rxpost ring work items */ +typedef struct host_rxbuf_post { + /** common message header */ + cmn_msg_hdr_t cmn_hdr; + /** provided meta data buffer len */ + uint16 metadata_buf_len; + /** provided data buffer len to receive data */ + uint16 data_buf_len; + /** alignment to make the host buffers start on 8 byte boundary */ + uint32 rsvd; + /** provided meta data buffer */ + bcm_addr64_t metadata_buf_addr; + /** provided data buffer to receive data */ + bcm_addr64_t data_buf_addr; +} host_rxbuf_post_t; + +typedef union rxbuf_submit_item { + host_rxbuf_post_t rxpost; + unsigned char check[H2DRING_RXPOST_ITEMSIZE]; +} rxbuf_submit_item_t; + +/* D2H Rxcompletion ring work items for IPC rev7 */ +typedef struct host_rxbuf_cmpl { + /** common message header */ + cmn_msg_hdr_t cmn_hdr; + /** completion message header */ + compl_msg_hdr_t compl_hdr; + /** filled up meta data len */ + uint16 metadata_len; + /** filled up buffer len to receive data */ + uint16 data_len; + /** offset in the host rx buffer where the data starts */ + uint16 data_offset; + /** offset in the host rx buffer where the data starts */ + uint16 flags; + /** rx status */ + uint32 rx_status_0; + uint32 rx_status_1; + /** XOR checksum or a magic number to audit DMA done */ + /* This is for rev6 only. For IPC rev7, this is a reserved field */ + dma_done_t marker; + /* timestamp */ + ipc_timestamp_t ts; +} host_rxbuf_cmpl_t; + +typedef union rxbuf_complete_item { + host_rxbuf_cmpl_t rxcmpl; + unsigned char check[D2HRING_RXCMPLT_ITEMSIZE]; +} rxbuf_complete_item_t; + +typedef struct host_txbuf_post { + /** common message header */ + cmn_msg_hdr_t cmn_hdr; + /** eth header */ + uint8 txhdr[ETHER_HDR_LEN]; + /** flags */ + uint8 flags; + /** number of segments */ + uint8 seg_cnt; + + /** provided meta data buffer for txstatus */ + bcm_addr64_t metadata_buf_addr; + /** provided data buffer to receive data */ + bcm_addr64_t data_buf_addr; + /** provided meta data buffer len */ + uint16 metadata_buf_len; + /** provided data buffer len to receive data */ + uint16 data_len; + union { + struct { + /** extended transmit flags */ + uint8 ext_flags; + uint8 rsvd1; + + /** user defined rate */ + uint8 rate; + uint8 rsvd2; + }; + /** XOR checksum or a magic number to audit DMA done */ + dma_done_t marker; + }; +} host_txbuf_post_t; + +#define BCMPCIE_PKT_FLAGS_FRAME_802_3 0x01 +#define BCMPCIE_PKT_FLAGS_FRAME_802_11 0x02 + +#define BCMPCIE_PKT_FLAGS_FRAME_NORETRY 0x01 /* Disable retry on this frame */ +#define BCMPCIE_PKT_FLAGS_FRAME_NOAGGR 0x02 /* Disable aggregation for this frame */ +#define BCMPCIE_PKT_FLAGS_FRAME_UDR 0x04 /* User defined rate for this frame */ +#define BCMPCIE_PKT_FLAGS_FRAME_ATTR_MASK 0x07 /* Attribute mask */ + +#define BCMPCIE_PKT_FLAGS_FRAME_EXEMPT_MASK 0x03 /* Exempt uses 2 bits */ +#define BCMPCIE_PKT_FLAGS_FRAME_EXEMPT_SHIFT 0x02 /* needs to be shifted past other bits */ + +#define BCMPCIE_PKT_FLAGS_PRIO_SHIFT 5 +#define BCMPCIE_PKT_FLAGS_PRIO_MASK (7 << BCMPCIE_PKT_FLAGS_PRIO_SHIFT) +#define BCMPCIE_PKT_FLAGS_MONITOR_NO_AMSDU 0x00 +#define BCMPCIE_PKT_FLAGS_MONITOR_FIRST_PKT 0x01 +#define BCMPCIE_PKT_FLAGS_MONITOR_INTER_PKT 0x02 +#define BCMPCIE_PKT_FLAGS_MONITOR_LAST_PKT 0x03 +#define BCMPCIE_PKT_FLAGS_MONITOR_SHIFT 8 +#define BCMPCIE_PKT_FLAGS_MONITOR_MASK (3 << BCMPCIE_PKT_FLAGS_MONITOR_SHIFT) + +/* These are added to fix up compile issues */ +#define BCMPCIE_TXPOST_FLAGS_FRAME_802_3 BCMPCIE_PKT_FLAGS_FRAME_802_3 +#define BCMPCIE_TXPOST_FLAGS_FRAME_802_11 BCMPCIE_PKT_FLAGS_FRAME_802_11 +#define BCMPCIE_TXPOST_FLAGS_PRIO_SHIFT BCMPCIE_PKT_FLAGS_PRIO_SHIFT +#define BCMPCIE_TXPOST_FLAGS_PRIO_MASK BCMPCIE_PKT_FLAGS_PRIO_MASK + +/* H2D Txpost ring work items */ +typedef union txbuf_submit_item { + host_txbuf_post_t txpost; + unsigned char check[H2DRING_TXPOST_ITEMSIZE]; +} txbuf_submit_item_t; + +/* D2H Txcompletion ring work items - extended for IOC rev7 */ +typedef struct host_txbuf_cmpl { + /** common message header */ + cmn_msg_hdr_t cmn_hdr; + /** completion message header */ + compl_msg_hdr_t compl_hdr; + union { + struct { + union { + /** provided meta data len */ + uint16 metadata_len; + /** provided extended TX status */ + uint16 tx_status_ext; + }; + /** WLAN side txstatus */ + uint16 tx_status; + }; + /** XOR checksum or a magic number to audit DMA done */ + /* This is for rev6 only. For IPC rev7, this is not used */ + dma_done_t marker; + }; + /* timestamp */ + ipc_timestamp_t ts; + +} host_txbuf_cmpl_t; + +typedef union txbuf_complete_item { + host_txbuf_cmpl_t txcmpl; + unsigned char check[D2HRING_TXCMPLT_ITEMSIZE]; +} txbuf_complete_item_t; + +#define BCMPCIE_D2H_METADATA_HDRLEN 4 +#define BCMPCIE_D2H_METADATA_MINLEN (BCMPCIE_D2H_METADATA_HDRLEN + 4) + +/** ret buf struct */ +typedef struct ret_buf_ptr { + uint32 low_addr; + uint32 high_addr; +} ret_buf_t; + +#ifdef PCIE_API_REV1 + +/* ioctl specific hdr */ +typedef struct ioctl_hdr { + uint16 cmd; + uint16 retbuf_len; + uint32 cmd_id; +} ioctl_hdr_t; + +typedef struct ioctlptr_hdr { + uint16 cmd; + uint16 retbuf_len; + uint16 buflen; + uint16 rsvd; + uint32 cmd_id; +} ioctlptr_hdr_t; + +#else /* PCIE_API_REV1 */ + +typedef struct ioctl_req_hdr { + uint32 pkt_id; /**< Packet ID */ + uint32 cmd; /**< IOCTL ID */ + uint16 retbuf_len; + uint16 buflen; + uint16 xt_id; /**< transaction ID */ + uint16 rsvd[1]; +} ioctl_req_hdr_t; + +#endif /* PCIE_API_REV1 */ + +/** Complete msgbuf hdr for ioctl from host to dongle */ +typedef struct ioct_reqst_hdr { + cmn_msg_hdr_t msg; +#ifdef PCIE_API_REV1 + ioctl_hdr_t ioct_hdr; +#else + ioctl_req_hdr_t ioct_hdr; +#endif // endif + ret_buf_t ret_buf; +} ioct_reqst_hdr_t; + +typedef struct ioctptr_reqst_hdr { + cmn_msg_hdr_t msg; +#ifdef PCIE_API_REV1 + ioctlptr_hdr_t ioct_hdr; +#else + ioctl_req_hdr_t ioct_hdr; +#endif // endif + ret_buf_t ret_buf; + ret_buf_t ioct_buf; +} ioctptr_reqst_hdr_t; + +/** ioctl response header */ +typedef struct ioct_resp_hdr { + cmn_msg_hdr_t msg; +#ifdef PCIE_API_REV1 + uint32 cmd_id; +#else + uint32 pkt_id; +#endif // endif + uint32 status; + uint32 ret_len; + uint32 inline_data; +#ifdef PCIE_API_REV1 +#else + uint16 xt_id; /**< transaction ID */ + uint16 rsvd[1]; +#endif // endif +} ioct_resp_hdr_t; + +/* ioct resp header used in dongle */ +/* ret buf hdr will be stripped off inside dongle itself */ +typedef struct msgbuf_ioctl_resp { + ioct_resp_hdr_t ioct_hdr; + ret_buf_t ret_buf; /**< ret buf pointers */ +} msgbuf_ioct_resp_t; + +/** WL event hdr info */ +typedef struct wl_event_hdr { + cmn_msg_hdr_t msg; + uint16 event; + uint8 flags; + uint8 rsvd; + uint16 retbuf_len; + uint16 rsvd1; + uint32 rxbufid; +} wl_event_hdr_t; + +#define TXDESCR_FLOWID_PCIELPBK_1 0xFF +#define TXDESCR_FLOWID_PCIELPBK_2 0xFE + +typedef struct txbatch_lenptr_tup { + uint32 pktid; + uint16 pktlen; + uint16 rsvd; + ret_buf_t ret_buf; /**< ret buf pointers */ +} txbatch_lenptr_tup_t; + +typedef struct txbatch_cmn_msghdr { + cmn_msg_hdr_t msg; + uint8 priority; + uint8 hdrlen; + uint8 pktcnt; + uint8 flowid; + uint8 txhdr[ETHER_HDR_LEN]; + uint16 rsvd; +} txbatch_cmn_msghdr_t; + +typedef struct txbatch_msghdr { + txbatch_cmn_msghdr_t txcmn; + txbatch_lenptr_tup_t tx_tup[0]; /**< Based on packet count */ +} txbatch_msghdr_t; + +/* TX desc posting header */ +typedef struct tx_lenptr_tup { + uint16 pktlen; + uint16 rsvd; + ret_buf_t ret_buf; /**< ret buf pointers */ +} tx_lenptr_tup_t; + +typedef struct txdescr_cmn_msghdr { + cmn_msg_hdr_t msg; + uint8 priority; + uint8 hdrlen; + uint8 descrcnt; + uint8 flowid; + uint32 pktid; +} txdescr_cmn_msghdr_t; + +typedef struct txdescr_msghdr { + txdescr_cmn_msghdr_t txcmn; + uint8 txhdr[ETHER_HDR_LEN]; + uint16 rsvd; + tx_lenptr_tup_t tx_tup[0]; /**< Based on descriptor count */ +} txdescr_msghdr_t; + +/** Tx status header info */ +typedef struct txstatus_hdr { + cmn_msg_hdr_t msg; + uint32 pktid; +} txstatus_hdr_t; + +/** RX bufid-len-ptr tuple */ +typedef struct rx_lenptr_tup { + uint32 rxbufid; + uint16 len; + uint16 rsvd2; + ret_buf_t ret_buf; /**< ret buf pointers */ +} rx_lenptr_tup_t; + +/** Rx descr Post hdr info */ +typedef struct rxdesc_msghdr { + cmn_msg_hdr_t msg; + uint16 rsvd0; + uint8 rsvd1; + uint8 descnt; + rx_lenptr_tup_t rx_tup[0]; +} rxdesc_msghdr_t; + +/** RX complete tuples */ +typedef struct rxcmplt_tup { + uint16 retbuf_len; + uint16 data_offset; + uint32 rxstatus0; + uint32 rxstatus1; + uint32 rxbufid; +} rxcmplt_tup_t; + +/** RX complete messge hdr */ +typedef struct rxcmplt_hdr { + cmn_msg_hdr_t msg; + uint16 rsvd0; + uint16 rxcmpltcnt; + rxcmplt_tup_t rx_tup[0]; +} rxcmplt_hdr_t; + +typedef struct hostevent_hdr { + cmn_msg_hdr_t msg; + uint32 evnt_pyld; +} hostevent_hdr_t; + +typedef struct dma_xfer_params { + uint32 src_physaddr_hi; + uint32 src_physaddr_lo; + uint32 dest_physaddr_hi; + uint32 dest_physaddr_lo; + uint32 len; + uint32 srcdelay; + uint32 destdelay; +} dma_xfer_params_t; + +enum { + HOST_EVENT_CONS_CMD = 1 +}; + +/* defines for flags */ +#define MSGBUF_IOC_ACTION_MASK 0x1 + +#define MAX_SUSPEND_REQ 15 + +typedef struct tx_idle_flowring_suspend_request { + cmn_msg_hdr_t msg; + uint16 ring_id[MAX_SUSPEND_REQ]; /* ring Id's */ + uint16 num; /* number of flowid's to suspend */ +} tx_idle_flowring_suspend_request_t; + +typedef struct tx_idle_flowring_suspend_response { + cmn_msg_hdr_t msg; + compl_msg_hdr_t cmplt; + uint32 rsvd[2]; + dma_done_t marker; +} tx_idle_flowring_suspend_response_t; + +typedef struct tx_idle_flowring_resume_request { + cmn_msg_hdr_t msg; + uint16 flow_ring_id; + uint16 reason; + uint32 rsvd[7]; +} tx_idle_flowring_resume_request_t; + +typedef struct tx_idle_flowring_resume_response { + cmn_msg_hdr_t msg; + compl_msg_hdr_t cmplt; + uint32 rsvd[2]; + dma_done_t marker; +} tx_idle_flowring_resume_response_t; + +/* timesync related additions */ + +typedef struct _bcm_xtlv { + uint16 id; /* TLV idenitifier */ + uint16 len; /* TLV length in bytes */ +} _bcm_xtlv_t; + +#define BCMMSGBUF_FW_CLOCK_INFO_TAG 0 +#define BCMMSGBUF_HOST_CLOCK_INFO_TAG 1 +#define BCMMSGBUF_HOST_CLOCK_SELECT_TAG 2 +#define BCMMSGBUF_D2H_CLOCK_CORRECTION_TAG 3 +#define BCMMSGBUF_HOST_TIMESTAMPING_CONFIG_TAG 4 +#define BCMMSGBUF_MAX_TSYNC_TAG 5 + +/* Flags in fw clock info TLV */ +#define CAP_DEVICE_TS (1 << 0) +#define CAP_CORRECTED_TS (1 << 1) +#define TS_CLK_ACTIVE (1 << 2) + +typedef struct ts_fw_clock_info { + _bcm_xtlv_t xtlv; /* BCMMSGBUF_FW_CLOCK_INFO_TAG */ + ts_timestamp_srcid_t ts; /* tick count */ + uchar clk_src[4]; /* clock source acronym ILP/AVB/TSF */ + uint32 nominal_clock_freq; + uint32 reset_cnt; + uint8 flags; + uint8 rsvd[3]; +} ts_fw_clock_info_t; + +typedef struct ts_host_clock_info { + _bcm_xtlv_t xtlv; /* BCMMSGBUF_HOST_CLOCK_INFO_TAG */ + tick_count_64_t ticks; /* 64 bit host tick counter */ + ts_timestamp_ns_64_t ns; /* 64 bit host time in nano seconds */ +} ts_host_clock_info_t; + +typedef struct ts_host_clock_sel { + _bcm_xtlv_t xtlv; /* BCMMSGBUF_HOST_CLOCK_SELECT_TAG */ + uint32 seqnum; /* number of times GPIO time sync toggled */ + uint8 min_clk_idx; /* clock idenitifer configured for packet tiem stamping */ + uint8 max_clk_idx; /* clock idenitifer configured for packet tiem stamping */ + uint16 rsvd[1]; +} ts_host_clock_sel_t; + +typedef struct ts_d2h_clock_correction { + _bcm_xtlv_t xtlv; /* BCMMSGBUF_HOST_CLOCK_INFO_TAG */ + uint8 clk_id; /* clock source in the device */ + uint8 rsvd[3]; + ts_correction_m_t m; /* y = 'm' x + b */ + ts_correction_b_t b; /* y = 'm' x + 'c' */ +} ts_d2h_clock_correction_t; + +typedef struct ts_host_timestamping_config { + _bcm_xtlv_t xtlv; /* BCMMSGBUF_HOST_TIMESTAMPING_CONFIG_TAG */ + /* time period to capture the device time stamp and toggle WLAN_TIME_SYNC_GPIO */ + uint16 period_ms; + uint8 flags; + uint8 post_delay; + uint32 reset_cnt; +} ts_host_timestamping_config_t; + +/* Flags in host timestamping config TLV */ +#define FLAG_HOST_RESET (1 << 0) +#define IS_HOST_RESET(x) ((x) & FLAG_HOST_RESET) +#define CLEAR_HOST_RESET(x) ((x) & ~FLAG_HOST_RESET) + +#define FLAG_CONFIG_NODROP (1 << 1) +#define IS_CONFIG_NODROP(x) ((x) & FLAG_CONFIG_NODROP) +#define CLEAR_CONFIG_NODROP(x) ((x) & ~FLAG_CONFIG_NODROP) + +#endif /* _bcmmsgbuf_h_ */ diff --git a/bcmdhd.100.10.315.x/include/bcmnvram.h b/bcmdhd.100.10.315.x/include/bcmnvram.h new file mode 100644 index 0000000..5289244 --- /dev/null +++ b/bcmdhd.100.10.315.x/include/bcmnvram.h @@ -0,0 +1,328 @@ +/* + * NVRAM variable manipulation + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmnvram.h 655606 2016-08-22 17:16:11Z $ + */ + +#ifndef _bcmnvram_h_ +#define _bcmnvram_h_ + +#ifndef _LANGUAGE_ASSEMBLY + +#include +#include + +struct nvram_header { + uint32 magic; + uint32 len; + uint32 crc_ver_init; /* 0:7 crc, 8:15 ver, 16:31 sdram_init */ + uint32 config_refresh; /* 0:15 sdram_config, 16:31 sdram_refresh */ + uint32 config_ncdl; /* ncdl values for memc */ +}; + +struct nvram_tuple { + char *name; + char *value; + struct nvram_tuple *next; +}; + +/* + * Get default value for an NVRAM variable + */ +extern char *nvram_default_get(const char *name); +/* + * validate/restore all per-interface related variables + */ +extern void nvram_validate_all(char *prefix, bool restore); + +/* + * restore specific per-interface variable + */ +extern void nvram_restore_var(char *prefix, char *name); + +/* + * Initialize NVRAM access. May be unnecessary or undefined on certain + * platforms. + */ +extern int nvram_init(void *sih); +extern int nvram_deinit(void *sih); + +extern int nvram_file_read(char **nvramp, int *nvraml); + +/* + * Append a chunk of nvram variables to the global list + */ +extern int nvram_append(void *si, char *vars, uint varsz); + +extern void nvram_get_global_vars(char **varlst, uint *varsz); + +/* + * Check for reset button press for restoring factory defaults. + */ +extern int nvram_reset(void *sih); + +/* + * Disable NVRAM access. May be unnecessary or undefined on certain + * platforms. + */ +extern void nvram_exit(void *sih); + +/* + * Get the value of an NVRAM variable. The pointer returned may be + * invalid after a set. + * @param name name of variable to get + * @return value of variable or NULL if undefined + */ +extern char * nvram_get(const char *name); + +/* + * Get the value of an NVRAM variable. The pointer returned may be + * invalid after a set. + * @param name name of variable to get + * @param bit bit value to get + * @return value of variable or NULL if undefined + */ +extern char * nvram_get_bitflag(const char *name, const int bit); + +/* + * Read the reset GPIO value from the nvram and set the GPIO + * as input + */ +extern int nvram_resetgpio_init(void *sih); + +/* + * Get the value of an NVRAM variable. + * @param name name of variable to get + * @return value of variable or NUL if undefined + */ +static INLINE char * +nvram_safe_get(const char *name) +{ + char *p = nvram_get(name); + return p ? p : ""; +} + +/* + * Match an NVRAM variable. + * @param name name of variable to match + * @param match value to compare against value of variable + * @return TRUE if variable is defined and its value is string equal + * to match or FALSE otherwise + */ +static INLINE int +nvram_match(const char *name, const char *match) +{ + const char *value = nvram_get(name); + + /* In nvramstubs.c builds, nvram_get() is defined as returning zero, + * so the return line below never executes the strcmp(), + * resulting in 'match' being an unused parameter. + * Make a ref to 'match' to quiet the compiler warning. + */ + + BCM_REFERENCE(match); + + return (value && !strcmp(value, match)); +} + +/* + * Match an NVRAM variable. + * @param name name of variable to match + * @param bit bit value to get + * @param match value to compare against value of variable + * @return TRUE if variable is defined and its value is string equal + * to match or FALSE otherwise + */ +static INLINE int +nvram_match_bitflag(const char *name, const int bit, const char *match) +{ + const char *value = nvram_get_bitflag(name, bit); + BCM_REFERENCE(match); + return (value && !strcmp(value, match)); +} + +/* + * Inversely match an NVRAM variable. + * @param name name of variable to match + * @param match value to compare against value of variable + * @return TRUE if variable is defined and its value is not string + * equal to invmatch or FALSE otherwise + */ +static INLINE int +nvram_invmatch(const char *name, const char *invmatch) +{ + const char *value = nvram_get(name); + + /* In nvramstubs.c builds, nvram_get() is defined as returning zero, + * so the return line below never executes the strcmp(), + * resulting in 'invmatch' being an unused parameter. + * Make a ref to 'invmatch' to quiet the compiler warning. + */ + + BCM_REFERENCE(invmatch); + + return (value && strcmp(value, invmatch)); +} + +/* + * Set the value of an NVRAM variable. The name and value strings are + * copied into private storage. Pointers to previously set values + * may become invalid. The new value may be immediately + * retrieved but will not be permanently stored until a commit. + * @param name name of variable to set + * @param value value of variable + * @return 0 on success and errno on failure + */ +extern int nvram_set(const char *name, const char *value); + +/* + * Set the value of an NVRAM variable. The name and value strings are + * copied into private storage. Pointers to previously set values + * may become invalid. The new value may be immediately + * retrieved but will not be permanently stored until a commit. + * @param name name of variable to set + * @param bit bit value to set + * @param value value of variable + * @return 0 on success and errno on failure + */ +extern int nvram_set_bitflag(const char *name, const int bit, const int value); +/* + * Unset an NVRAM variable. Pointers to previously set values + * remain valid until a set. + * @param name name of variable to unset + * @return 0 on success and errno on failure + * NOTE: use nvram_commit to commit this change to flash. + */ +extern int nvram_unset(const char *name); + +/* + * Commit NVRAM variables to permanent storage. All pointers to values + * may be invalid after a commit. + * NVRAM values are undefined after a commit. + * @param nvram_corrupt true to corrupt nvram, false otherwise. + * @return 0 on success and errno on failure + */ +extern int nvram_commit_internal(bool nvram_corrupt); + +/* + * Commit NVRAM variables to permanent storage. All pointers to values + * may be invalid after a commit. + * NVRAM values are undefined after a commit. + * @return 0 on success and errno on failure + */ +extern int nvram_commit(void); + +/* + * Get all NVRAM variables (format name=value\0 ... \0\0). + * @param buf buffer to store variables + * @param count size of buffer in bytes + * @return 0 on success and errno on failure + */ +extern int nvram_getall(char *nvram_buf, int count); + +/* + * returns the crc value of the nvram + * @param nvh nvram header pointer + */ +uint8 nvram_calc_crc(struct nvram_header * nvh); + +extern int nvram_space; +#endif /* _LANGUAGE_ASSEMBLY */ + +/* The NVRAM version number stored as an NVRAM variable */ +#define NVRAM_SOFTWARE_VERSION "1" + +#define NVRAM_MAGIC 0x48534C46 /* 'FLSH' */ +#define NVRAM_CLEAR_MAGIC 0x0 +#define NVRAM_INVALID_MAGIC 0xFFFFFFFF +#define NVRAM_VERSION 1 +#define NVRAM_HEADER_SIZE 20 +/* This definition is for precommit staging, and will be removed */ +#define NVRAM_SPACE 0x8000 +/* For CFE builds this gets passed in thru the makefile */ +#ifndef MAX_NVRAM_SPACE +#define MAX_NVRAM_SPACE 0x10000 +#endif // endif +#define DEF_NVRAM_SPACE 0x8000 +#define ROM_ENVRAM_SPACE 0x1000 +#define NVRAM_LZMA_MAGIC 0x4c5a4d41 /* 'LZMA' */ + +#define NVRAM_MAX_VALUE_LEN 255 +#define NVRAM_MAX_PARAM_LEN 64 + +#define NVRAM_CRC_START_POSITION 9 /* magic, len, crc8 to be skipped */ +#define NVRAM_CRC_VER_MASK 0xffffff00 /* for crc_ver_init */ + +/* Offsets to embedded nvram area */ +#define NVRAM_START_COMPRESSED 0x400 +#define NVRAM_START 0x1000 + +#define BCM_JUMBO_NVRAM_DELIMIT '\n' +#define BCM_JUMBO_START "Broadcom Jumbo Nvram file" + +#if (defined(FAILSAFE_UPGRADE) || defined(CONFIG_FAILSAFE_UPGRADE) || \ + defined(__CONFIG_FAILSAFE_UPGRADE_SUPPORT__)) +#define IMAGE_SIZE "image_size" +#define BOOTPARTITION "bootpartition" +#define IMAGE_BOOT BOOTPARTITION +#define PARTIALBOOTS "partialboots" +#define MAXPARTIALBOOTS "maxpartialboots" +#define IMAGE_1ST_FLASH_TRX "flash0.trx" +#define IMAGE_1ST_FLASH_OS "flash0.os" +#define IMAGE_2ND_FLASH_TRX "flash0.trx2" +#define IMAGE_2ND_FLASH_OS "flash0.os2" +#define IMAGE_FIRST_OFFSET "image_first_offset" +#define IMAGE_SECOND_OFFSET "image_second_offset" +#define LINUX_FIRST "linux" +#define LINUX_SECOND "linux2" +#endif // endif + +#if (defined(DUAL_IMAGE) || defined(CONFIG_DUAL_IMAGE) || \ + defined(__CONFIG_DUAL_IMAGE_FLASH_SUPPORT__)) +/* Shared by all: CFE, Linux Kernel, and Ap */ +#define IMAGE_BOOT "image_boot" +#define BOOTPARTITION IMAGE_BOOT +/* CFE variables */ +#define IMAGE_1ST_FLASH_TRX "flash0.trx" +#define IMAGE_1ST_FLASH_OS "flash0.os" +#define IMAGE_2ND_FLASH_TRX "flash0.trx2" +#define IMAGE_2ND_FLASH_OS "flash0.os2" +#define IMAGE_SIZE "image_size" + +/* CFE and Linux Kernel shared variables */ +#define IMAGE_FIRST_OFFSET "image_first_offset" +#define IMAGE_SECOND_OFFSET "image_second_offset" + +/* Linux application variables */ +#define LINUX_FIRST "linux" +#define LINUX_SECOND "linux2" +#define POLICY_TOGGLE "toggle" +#define LINUX_PART_TO_FLASH "linux_to_flash" +#define LINUX_FLASH_POLICY "linux_flash_policy" + +#endif /* defined(DUAL_IMAGE||CONFIG_DUAL_IMAGE)||__CONFIG_DUAL_IMAGE_FLASH_SUPPORT__ */ + +#endif /* _bcmnvram_h_ */ diff --git a/bcmdhd.100.10.315.x/include/bcmpcie.h b/bcmdhd.100.10.315.x/include/bcmpcie.h new file mode 100644 index 0000000..fdc1d6e --- /dev/null +++ b/bcmdhd.100.10.315.x/include/bcmpcie.h @@ -0,0 +1,522 @@ +/* + * Broadcom PCIE + * Software-specific definitions shared between device and host side + * Explains the shared area between host and dongle + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmpcie.h 765083 2018-05-31 02:45:13Z $ + */ + +#ifndef _bcmpcie_h_ +#define _bcmpcie_h_ + +#include + +#define ADDR_64(x) (x.addr) +#define HIGH_ADDR_32(x) ((uint32) (((sh_addr_t) x).high_addr)) +#define LOW_ADDR_32(x) ((uint32) (((sh_addr_t) x).low_addr)) + +typedef struct { + uint32 low_addr; + uint32 high_addr; +} sh_addr_t; + +/* May be overridden by 43xxxxx-roml.mk */ +#if !defined(BCMPCIE_MAX_TX_FLOWS) +#define BCMPCIE_MAX_TX_FLOWS 40 +#endif /* ! BCMPCIE_MAX_TX_FLOWS */ + +#define PCIE_SHARED_VERSION_7 0x00007 +#define PCIE_SHARED_VERSION_6 0x00006 /* rev6 is compatible with rev 5 */ +#define PCIE_SHARED_VERSION_5 0x00005 /* rev6 is compatible with rev 5 */ +/** + * Feature flags enabled in dongle. Advertised by dongle to DHD via the PCIe Shared structure that + * is located in device memory. + */ +#define PCIE_SHARED_VERSION_MASK 0x000FF +#define PCIE_SHARED_ASSERT_BUILT 0x00100 +#define PCIE_SHARED_ASSERT 0x00200 +#define PCIE_SHARED_TRAP 0x00400 +#define PCIE_SHARED_IN_BRPT 0x00800 +#define PCIE_SHARED_SET_BRPT 0x01000 +#define PCIE_SHARED_PENDING_BRPT 0x02000 +/* BCMPCIE_SUPPORT_TX_PUSH_RING 0x04000 obsolete */ +#define PCIE_SHARED_EVT_SEQNUM 0x08000 +#define PCIE_SHARED_DMA_INDEX 0x10000 + +/** + * There are host types where a device interrupt can 'race ahead' of data written by the device into + * host memory. The dongle can avoid this condition using a variety of techniques (read barrier, + * using PCIe Message Signalled Interrupts, or by using the PCIE_DMA_INDEX feature). Unfortunately + * these techniques have drawbacks on router platforms. For these platforms, it was decided to not + * avoid the condition, but to detect the condition instead and act on it. + * D2H M2M DMA Complete Sync mechanism: Modulo-253-SeqNum or XORCSUM + */ +#define PCIE_SHARED_D2H_SYNC_SEQNUM 0x20000 +#define PCIE_SHARED_D2H_SYNC_XORCSUM 0x40000 +#define PCIE_SHARED_D2H_SYNC_MODE_MASK \ + (PCIE_SHARED_D2H_SYNC_SEQNUM | PCIE_SHARED_D2H_SYNC_XORCSUM) +#define PCIE_SHARED_IDLE_FLOW_RING 0x80000 +#define PCIE_SHARED_2BYTE_INDICES 0x100000 + +#define PCIE_SHARED2_EXTENDED_TRAP_DATA 0x00000001 /* using flags2 in shared area */ +#define PCIE_SHARED2_TXSTATUS_METADATA 0x00000002 +#define PCIE_SHARED2_BT_LOGGING 0x00000004 /* BT logging support */ +#define PCIE_SHARED2_SNAPSHOT_UPLOAD 0x00000008 /* BT/WLAN snapshot upload support */ +#define PCIE_SHARED2_SUBMIT_COUNT_WAR 0x00000010 /* submission count WAR */ +#define PCIE_SHARED2_FW_SMALL_MEMDUMP 0x00000200 /* FW small memdump */ +#define PCIE_SHARED2_DEBUG_BUF_DEST 0x00002000 /* debug buf dest support */ +#define PCIE_SHARED_FAST_DELETE_RING 0x00000020 /* Fast Delete Ring */ +#define PCIE_SHARED_EVENT_BUF_POOL_MAX 0x000000c0 /* event buffer pool max bits */ +#define PCIE_SHARED_EVENT_BUF_POOL_MAX_POS 6 /* event buffer pool max bit position */ + +/* dongle supports fatal buf log collection */ +#define PCIE_SHARED_FATAL_LOGBUG_VALID 0x200000 + +/* Implicit DMA with corerev 19 and after */ +#define PCIE_SHARED_IDMA 0x400000 + +/* MSI support */ +#define PCIE_SHARED_D2H_MSI_MULTI_MSG 0x800000 + +/* IFRM with corerev 19 and after */ +#define PCIE_SHARED_IFRM 0x1000000 + +/** + * From Rev6 and above, suspend/resume can be done using two handshake methods. + * 1. Using ctrl post/ctrl cmpl messages (Default rev6) + * 2. Using Mailbox data (old method as used in rev5) + * This shared flag indicates whether to overide rev6 default method and use mailbox for + * suspend/resume. + */ +#define PCIE_SHARED_USE_MAILBOX 0x2000000 + +/* Firmware compiled for mfgbuild purposes */ +#define PCIE_SHARED_MFGBUILD_FW 0x4000000 + +/* Firmware could use DB0 value as host timestamp */ +#define PCIE_SHARED_TIMESTAMP_DB0 0x8000000 +/* Firmware could use Hostready (IPC rev7) */ +#define PCIE_SHARED_HOSTRDY_SUPPORT 0x10000000 + +/* When set, Firmwar does not support OOB Device Wake based DS protocol */ +#define PCIE_SHARED_NO_OOB_DW 0x20000000 + +/* When set, Firmwar supports Inband DS protocol */ +#define PCIE_SHARED_INBAND_DS 0x40000000 + +/* use DAR registers */ +#define PCIE_SHARED_DAR 0x80000000 + +/** + * Following are the shared2 flags. All bits in flags have been used. A flags2 + * field got added and the definition for these flags come here: + */ +/* WAR: D11 txstatus through unused status field of PCIe completion header */ +#define PCIE_SHARED2_D2H_D11_TX_STATUS 0x40000000 +#define PCIE_SHARED2_H2D_D11_TX_STATUS 0x80000000 + +#define PCIE_SHARED2_EXTENDED_TRAP_DATA 0x00000001 + +#define PCIE_SHARED2_TXSTATUS_METADATA 0x00000002 + +/* BT logging support */ +#define PCIE_SHARED2_BT_LOGGING 0x00000004 +/* BT/WLAN snapshot upload support */ +#define PCIE_SHARED2_SNAPSHOT_UPLOAD 0x00000008 +/* submission count WAR */ +#define PCIE_SHARED2_SUBMIT_COUNT_WAR 0x00000010 + +/* Fast Delete ring support */ +#define PCIE_SHARED2_FAST_DELETE_RING 0x00000020 + +/* Host SCB support */ +#define PCIE_SHARED2_HSCB 0x00000800 + +#define PCIE_SHARED_D2H_MAGIC 0xFEDCBA09 +#define PCIE_SHARED_H2D_MAGIC 0x12345678 + +#define PCIE_SHARED2_PKT_TX_STATUS 0x00000100 /* using flags2 to indicate + firmware support added to reuse + timesync to update PKT txstatus + */ +/* Support Enhanced Debug Lane */ +#define PCIE_SHARED2_EDL_RING 0x00001000 + +/** + * Message rings convey messages between host and device. They are unidirectional, and are located + * in host memory. + * + * This is the minimal set of message rings, known as 'common message rings': + */ +#define BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT 0 +#define BCMPCIE_H2D_MSGRING_RXPOST_SUBMIT 1 +#define BCMPCIE_D2H_MSGRING_CONTROL_COMPLETE 2 +#define BCMPCIE_D2H_MSGRING_TX_COMPLETE 3 +#define BCMPCIE_D2H_MSGRING_RX_COMPLETE 4 +#define BCMPCIE_COMMON_MSGRING_MAX_ID 4 + +#define BCMPCIE_H2D_COMMON_MSGRINGS 2 +#define BCMPCIE_D2H_COMMON_MSGRINGS 3 +#define BCMPCIE_COMMON_MSGRINGS 5 + +#define BCMPCIE_H2D_MSGRINGS(max_tx_flows) \ + (BCMPCIE_H2D_COMMON_MSGRINGS + (max_tx_flows)) + +/* different ring types */ +#define BCMPCIE_H2D_RING_TYPE_CTRL_SUBMIT 0x1 +#define BCMPCIE_H2D_RING_TYPE_TXFLOW_RING 0x2 +#define BCMPCIE_H2D_RING_TYPE_RXBUFPOST 0x3 +#define BCMPCIE_H2D_RING_TYPE_TXSUBMIT 0x4 +#define BCMPCIE_H2D_RING_TYPE_DBGBUF_SUBMIT 0x5 +#define BCMPCIE_H2D_RING_TYPE_BTLOG_SUBMIT 0x6 + +#define BCMPCIE_D2H_RING_TYPE_CTRL_CPL 0x1 +#define BCMPCIE_D2H_RING_TYPE_TX_CPL 0x2 +#define BCMPCIE_D2H_RING_TYPE_RX_CPL 0x3 +#define BCMPCIE_D2H_RING_TYPE_DBGBUF_CPL 0x4 +#define BCMPCIE_D2H_RING_TYPE_AC_RX_COMPLETE 0x5 +#define BCMPCIE_D2H_RING_TYPE_BTLOG_CPL 0x6 +#define BCMPCIE_D2H_RING_TYPE_EDL 0x7 + +/** + * H2D and D2H, WR and RD index, are maintained in the following arrays: + * - Array of all H2D WR Indices + * - Array of all H2D RD Indices + * - Array of all D2H WR Indices + * - Array of all D2H RD Indices + * + * The offset of the WR or RD indexes (for common rings) in these arrays are + * listed below. Arrays ARE NOT indexed by a ring's id. + * + * D2H common rings WR and RD index start from 0, even though their ringids + * start from BCMPCIE_H2D_COMMON_MSGRINGS + */ + +#define BCMPCIE_H2D_RING_IDX(h2d_ring_id) (h2d_ring_id) + +enum h2dring_idx { + /* H2D common rings */ + BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT_IDX = + BCMPCIE_H2D_RING_IDX(BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT), + BCMPCIE_H2D_MSGRING_RXPOST_SUBMIT_IDX = + BCMPCIE_H2D_RING_IDX(BCMPCIE_H2D_MSGRING_RXPOST_SUBMIT), + + /* First TxPost's WR or RD index starts after all H2D common rings */ + BCMPCIE_H2D_MSGRING_TXFLOW_IDX_START = + BCMPCIE_H2D_RING_IDX(BCMPCIE_H2D_COMMON_MSGRINGS) +}; + +#define BCMPCIE_D2H_RING_IDX(d2h_ring_id) \ + ((d2h_ring_id) - BCMPCIE_H2D_COMMON_MSGRINGS) + +enum d2hring_idx { + /* D2H Common Rings */ + BCMPCIE_D2H_MSGRING_CONTROL_COMPLETE_IDX = + BCMPCIE_D2H_RING_IDX(BCMPCIE_D2H_MSGRING_CONTROL_COMPLETE), + BCMPCIE_D2H_MSGRING_TX_COMPLETE_IDX = + BCMPCIE_D2H_RING_IDX(BCMPCIE_D2H_MSGRING_TX_COMPLETE), + BCMPCIE_D2H_MSGRING_RX_COMPLETE_IDX = + BCMPCIE_D2H_RING_IDX(BCMPCIE_D2H_MSGRING_RX_COMPLETE) +}; + +/** + * Macros for managing arrays of RD WR indices: + * rw_index_sz: + * - in dongle, rw_index_sz is known at compile time + * - in host/DHD, rw_index_sz is derived from advertized pci_shared flags + * + * ring_idx: See h2dring_idx and d2hring_idx + */ + +/** Offset of a RD or WR index in H2D or D2H indices array */ +#define BCMPCIE_RW_INDEX_OFFSET(rw_index_sz, ring_idx) \ + ((rw_index_sz) * (ring_idx)) + +/** Fetch the address of RD or WR index in H2D or D2H indices array */ +#define BCMPCIE_RW_INDEX_ADDR(indices_array_base, rw_index_sz, ring_idx) \ + (void *)((uint32)(indices_array_base) + \ + BCMPCIE_RW_INDEX_OFFSET((rw_index_sz), (ring_idx))) + +/** H2D DMA Indices array size: given max flow rings */ +#define BCMPCIE_H2D_RW_INDEX_ARRAY_SZ(rw_index_sz, max_tx_flows) \ + ((rw_index_sz) * BCMPCIE_H2D_MSGRINGS(max_tx_flows)) + +/** D2H DMA Indices array size */ +#define BCMPCIE_D2H_RW_INDEX_ARRAY_SZ(rw_index_sz) \ + ((rw_index_sz) * BCMPCIE_D2H_COMMON_MSGRINGS) + +/** + * This type is used by a 'message buffer' (which is a FIFO for messages). Message buffers are used + * for host<->device communication and are instantiated on both sides. ring_mem_t is instantiated + * both in host as well as device memory. + */ +typedef struct ring_mem { + uint16 idx; /* ring id */ + uint8 type; + uint8 rsvd; + uint16 max_item; /* Max number of items in flow ring */ + uint16 len_items; /* Items are fixed size. Length in bytes of one item */ + sh_addr_t base_addr; /* 64 bits address, either in host or device memory */ +} ring_mem_t; + +/** + * Per flow ring, information is maintained in device memory, eg at what address the ringmem and + * ringstate are located. The flow ring itself can be instantiated in either host or device memory. + * + * Perhaps this type should be renamed to make clear that it resides in device memory only. + */ +typedef struct ring_info { + uint32 ringmem_ptr; /* ring mem location in dongle memory */ + + /* Following arrays are indexed using h2dring_idx and d2hring_idx, and not + * by a ringid. + */ + + /* 32bit ptr to arrays of WR or RD indices for all rings in dongle memory */ + uint32 h2d_w_idx_ptr; /* Array of all H2D ring's WR indices */ + uint32 h2d_r_idx_ptr; /* Array of all H2D ring's RD indices */ + uint32 d2h_w_idx_ptr; /* Array of all D2H ring's WR indices */ + uint32 d2h_r_idx_ptr; /* Array of all D2H ring's RD indices */ + + /* PCIE_DMA_INDEX feature: Dongle uses mem2mem DMA to sync arrays in host. + * Host may directly fetch WR and RD indices from these host-side arrays. + * + * 64bit ptr to arrays of WR or RD indices for all rings in host memory. + */ + sh_addr_t h2d_w_idx_hostaddr; /* Array of all H2D ring's WR indices */ + sh_addr_t h2d_r_idx_hostaddr; /* Array of all H2D ring's RD indices */ + sh_addr_t d2h_w_idx_hostaddr; /* Array of all D2H ring's WR indices */ + sh_addr_t d2h_r_idx_hostaddr; /* Array of all D2H ring's RD indices */ + + uint16 max_tx_flowrings; /* maximum number of H2D rings: common + flow */ + uint16 max_submission_queues; /* maximum number of H2D rings: common + flow */ + uint16 max_completion_rings; /* maximum number of H2D rings: common + flow */ + uint16 max_vdevs; /* max number of virtual interfaces supported */ + + sh_addr_t ifrm_w_idx_hostaddr; /* Array of all H2D ring's WR indices for IFRM */ +} ring_info_t; + +/** + * A structure located in TCM that is shared between host and device, primarily used during + * initialization. + */ +typedef struct { + /** shared area version captured at flags 7:0 */ + uint32 flags; + + uint32 trap_addr; + uint32 assert_exp_addr; + uint32 assert_file_addr; + uint32 assert_line; + uint32 console_addr; /**< Address of hnd_cons_t */ + + uint32 msgtrace_addr; + + uint32 fwid; + + /* Used for debug/flow control */ + uint16 total_lfrag_pkt_cnt; + uint16 max_host_rxbufs; /* rsvd in spec */ + + uint32 dma_rxoffset; /* rsvd in spec */ + + /** these will be used for sleep request/ack, d3 req/ack */ + uint32 h2d_mb_data_ptr; + uint32 d2h_mb_data_ptr; + + /* information pertinent to host IPC/msgbuf channels */ + /** location in the TCM memory which has the ring_info */ + uint32 rings_info_ptr; + + /** block of host memory for the scratch buffer */ + uint32 host_dma_scratch_buffer_len; + sh_addr_t host_dma_scratch_buffer; + + /* location in host memory for scb host offload structures */ + sh_addr_t host_scb_addr; + uint32 host_scb_size; + + /* anonymous union for overloading fields in structure */ + union { + uint32 buzz_dbg_ptr; /* BUZZZ state format strings and trace buffer */ + struct { + /* Host provided trap buffer length in words */ + uint16 device_trap_debug_buffer_len; + uint16 rsvd2; + }; + }; + + /* rev6 compatible changes */ + uint32 flags2; + uint32 host_cap; + + /* location in the host address space to write trap indication. + * At this point for the current rev of the spec, firmware will + * support only indications to 32 bit host addresses. + * This essentially is device_trap_debug_buffer_addr + */ + sh_addr_t host_trap_addr; + + /* location for host fatal error log buffer start address */ + uint32 device_fatal_logbuf_start; + + /* location in host memory for offloaded modules */ + sh_addr_t hoffload_addr; +} pciedev_shared_t; + +/* Device F/W provides the following access function: + * pciedev_shared_t *hnd_get_pciedev_shared(void); + */ + +/* host capabilities */ +#define HOSTCAP_PCIEAPI_VERSION_MASK 0x000000FF +#define HOSTCAP_H2D_VALID_PHASE 0x00000100 +#define HOSTCAP_H2D_ENABLE_TRAP_ON_BADPHASE 0x00000200 +#define HOSTCAP_H2D_ENABLE_HOSTRDY 0x00000400 +#define HOSTCAP_DB0_TIMESTAMP 0x00000800 +#define HOSTCAP_DS_NO_OOB_DW 0x00001000 +#define HOSTCAP_DS_INBAND_DW 0x00002000 +#define HOSTCAP_H2D_IDMA 0x00004000 +#define HOSTCAP_H2D_IFRM 0x00008000 +#define HOSTCAP_H2D_DAR 0x00010000 +#define HOSTCAP_EXTENDED_TRAP_DATA 0x00020000 +#define HOSTCAP_TXSTATUS_METADATA 0x00040000 +#define HOSTCAP_BT_LOGGING 0x00080000 +#define HOSTCAP_SNAPSHOT_UPLOAD 0x00100000 +#define HOSTCAP_FAST_DELETE_RING 0x00200000 +#define HOSTCAP_PKT_TXSTATUS 0x00400000 +#define HOSTCAP_UR_FW_NO_TRAP 0x00800000 /* Don't trap on UR */ +#define HOSTCAP_HSCB 0x02000000 +/* Host support for extended device trap debug buffer */ +#define HOSTCAP_EXT_TRAP_DBGBUF 0x04000000 +/* Host support for enhanced debug lane */ +#define HOSTCAP_EDL_RING 0x10000000 + +/* extended trap debug buffer allocation sizes. Note that this buffer can be used for + * other trap related purposes also. + */ +#define BCMPCIE_HOST_EXT_TRAP_DBGBUF_LEN_MIN (64u * 1024u) +#define BCMPCIE_HOST_EXT_TRAP_DBGBUF_LEN_MAX (256u * 1024u) + +/** + * Mailboxes notify a remote party that an event took place, using interrupts. They use hardware + * support. + */ + +/* H2D mail box Data */ +#define H2D_HOST_D3_INFORM 0x00000001 +#define H2D_HOST_DS_ACK 0x00000002 +#define H2D_HOST_DS_NAK 0x00000004 +#define H2D_HOST_D0_INFORM_IN_USE 0x00000008 +#define H2D_HOST_D0_INFORM 0x00000010 +#define H2DMB_DS_ACTIVE 0x00000020 +#define H2DMB_DS_DEVICE_WAKE 0x00000040 +#define H2D_HOST_IDMA_INITED 0x00000080 +#define H2D_HOST_ACK_NOINT 0x00010000 /* d2h_ack interrupt ignore */ +#define H2D_HOST_CONS_INT 0x80000000 /**< h2d int for console cmds */ +#define H2D_FW_TRAP 0x20000000 /**< h2d force TRAP */ +#define H2DMB_DS_HOST_SLEEP_INFORM H2D_HOST_D3_INFORM +#define H2DMB_DS_DEVICE_SLEEP_ACK H2D_HOST_DS_ACK +#define H2DMB_DS_DEVICE_SLEEP_NAK H2D_HOST_DS_NAK +#define H2DMB_D0_INFORM_IN_USE H2D_HOST_D0_INFORM_IN_USE +#define H2DMB_D0_INFORM H2D_HOST_D0_INFORM +#define H2DMB_FW_TRAP H2D_FW_TRAP +#define H2DMB_HOST_CONS_INT H2D_HOST_CONS_INT +#define H2DMB_DS_DEVICE_WAKE_ASSERT H2DMB_DS_DEVICE_WAKE +#define H2DMB_DS_DEVICE_WAKE_DEASSERT H2DMB_DS_ACTIVE + +/* D2H mail box Data */ +#define D2H_DEV_D3_ACK 0x00000001 +#define D2H_DEV_DS_ENTER_REQ 0x00000002 +#define D2H_DEV_DS_EXIT_NOTE 0x00000004 +#define D2HMB_DS_HOST_SLEEP_EXIT_ACK 0x00000008 +#define D2H_DEV_IDMA_INITED 0x00000010 +#define D2H_DEV_FWHALT 0x10000000 +#define D2H_DEV_TRAP_PING_HOST_FAILURE 0x08000000 +#define D2H_DEV_EXT_TRAP_DATA 0x20000000 +#define D2H_DEV_TRAP_IN_TRAP 0x40000000 +#define D2H_DEV_TRAP_DUE_TO_BT 0x01000000 +/* Indicates trap due to HMAP violation */ +#define D2H_DEV_TRAP_DUE_TO_HMAP 0x02000000 +/* Indicates whether HMAP violation was Write */ +#define D2H_DEV_TRAP_HMAP_WRITE 0x04000000 + +#define D2HMB_DS_HOST_SLEEP_ACK D2H_DEV_D3_ACK +#define D2HMB_DS_DEVICE_SLEEP_ENTER_REQ D2H_DEV_DS_ENTER_REQ +#define D2HMB_DS_DEVICE_SLEEP_EXIT D2H_DEV_DS_EXIT_NOTE +#define D2HMB_FWHALT D2H_DEV_FWHALT +#define D2HMB_TRAP_IN_TRAP D2H_DEV_TRAP_IN_TRAP +#define D2HMB_EXT_TRAP_DATA D2H_DEV_EXT_TRAP_DATA +#define D2H_FWTRAP_MASK 0x0000001F /* Adding maskbits for TRAP information */ +#define D2H_DEV_MB_MASK (D2H_DEV_D3_ACK | D2H_DEV_DS_ENTER_REQ | \ + D2H_DEV_DS_EXIT_NOTE | D2H_DEV_IDMA_INITED | D2H_DEV_FWHALT | \ + D2H_FWTRAP_MASK | D2H_DEV_EXT_TRAP_DATA | D2H_DEV_TRAP_IN_TRAP) +#define D2H_DEV_MB_INVALIDATED(x) ((!x) || (x & ~D2H_DEV_MB_MASK)) + +/* Size of Extended Trap data Buffer */ +#define BCMPCIE_EXT_TRAP_DATA_MAXLEN 4096 + +/** These macro's operate on type 'inuse_lclbuf_pool_t' and are used by firmware only */ +#define PREVTXP(i, d) (((i) == 0) ? ((d) - 1) : ((i) - 1)) +#define NEXTTXP(i, d) ((((i)+1) >= (d)) ? 0 : ((i)+1)) +#define NEXTNTXP(i, n, d) ((((i)+(n)) >= (d)) ? 0 : ((i)+(n))) +#define NTXPACTIVE(r, w, d) (((r) <= (w)) ? ((w)-(r)) : ((d)-(r)+(w))) +#define NTXPAVAIL(r, w, d) (((d) - NTXPACTIVE((r), (w), (d))) > 1) + +/* Function can be used to notify host of FW halt */ +#define READ_AVAIL_SPACE(w, r, d) ((w >= r) ? (uint32)(w - r) : (uint32)(d - r)) +#define WRITE_SPACE_AVAIL_CONTINUOUS(r, w, d) ((w >= r) ? (d - w) : (r - w)) +#define WRITE_SPACE_AVAIL(r, w, d) (d - (NTXPACTIVE(r, w, d)) - 1) +#define CHECK_WRITE_SPACE(r, w, d) ((r) > (w)) ? \ + (uint32)((r) - (w) - 1) : ((r) == 0 || (w) == 0) ? \ + (uint32)((d) - (w) - 1) : (uint32)((d) - (w)) + +#define CHECK_NOWRITE_SPACE(r, w, d) \ + (((uint32)(r) == (uint32)((w) + 1)) || (((r) == 0) && ((w) == ((d) - 1)))) + +#define WRT_PEND(x) ((x)->wr_pending) +#define DNGL_RING_WPTR(msgbuf) (*((msgbuf)->tcm_rs_w_ptr)) /**< advanced by producer */ +#define BCMMSGBUF_RING_SET_W_PTR(msgbuf, a) (DNGL_RING_WPTR(msgbuf) = (a)) + +#define DNGL_RING_RPTR(msgbuf) (*((msgbuf)->tcm_rs_r_ptr)) /**< advanced by consumer */ +#define BCMMSGBUF_RING_SET_R_PTR(msgbuf, a) (DNGL_RING_RPTR(msgbuf) = (a)) + +#define MODULO_RING_IDX(x, y) ((x) % (y)->bitmap_size) + +#define RING_READ_PTR(x) ((x)->ringstate->r_offset) +#define RING_WRITE_PTR(x) ((x)->ringstate->w_offset) +#define RING_START_PTR(x) ((x)->ringmem->base_addr.low_addr) +#define RING_MAX_ITEM(x) ((x)->ringmem->max_item) +#define RING_LEN_ITEMS(x) ((x)->ringmem->len_items) +#define HOST_RING_BASE(x) ((x)->dma_buf.va) +#define HOST_RING_END(x) ((uint8 *)HOST_RING_BASE((x)) + \ + ((RING_MAX_ITEM((x))-1)*RING_LEN_ITEMS((x)))) + +/* Trap types copied in the pciedev_shared.trap_addr */ +#define FW_INITIATED_TRAP_TYPE (0x1 << 7) +#define HEALTHCHECK_NODS_TRAP_TYPE (0x1 << 6) + +#endif /* _bcmpcie_h_ */ diff --git a/bcmdhd.100.10.315.x/include/bcmpcispi.h b/bcmdhd.100.10.315.x/include/bcmpcispi.h new file mode 100644 index 0000000..3ca211b --- /dev/null +++ b/bcmdhd.100.10.315.x/include/bcmpcispi.h @@ -0,0 +1,181 @@ +/* + * Broadcom PCI-SPI Host Controller Register Definitions + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmpcispi.h 514727 2014-11-12 03:02:48Z $ + */ +#ifndef _BCM_PCI_SPI_H +#define _BCM_PCI_SPI_H + +/* cpp contortions to concatenate w/arg prescan */ +#ifndef PAD +#define _PADLINE(line) pad ## line +#define _XSTR(line) _PADLINE(line) +#define PAD _XSTR(__LINE__) +#endif /* PAD */ + +typedef volatile struct { + uint32 spih_ctrl; /* 0x00 SPI Control Register */ + uint32 spih_stat; /* 0x04 SPI Status Register */ + uint32 spih_data; /* 0x08 SPI Data Register, 32-bits wide */ + uint32 spih_ext; /* 0x0C SPI Extension Register */ + uint32 PAD[4]; /* 0x10-0x1F PADDING */ + + uint32 spih_gpio_ctrl; /* 0x20 SPI GPIO Control Register */ + uint32 spih_gpio_data; /* 0x24 SPI GPIO Data Register */ + uint32 PAD[6]; /* 0x28-0x3F PADDING */ + + uint32 spih_int_edge; /* 0x40 SPI Interrupt Edge Register (0=Level, 1=Edge) */ + uint32 spih_int_pol; /* 0x44 SPI Interrupt Polarity Register (0=Active Low, */ + /* 1=Active High) */ + uint32 spih_int_mask; /* 0x48 SPI Interrupt Mask */ + uint32 spih_int_status; /* 0x4C SPI Interrupt Status */ + uint32 PAD[4]; /* 0x50-0x5F PADDING */ + + uint32 spih_hex_disp; /* 0x60 SPI 4-digit hex display value */ + uint32 spih_current_ma; /* 0x64 SPI SD card current consumption in mA */ + uint32 PAD[1]; /* 0x68 PADDING */ + uint32 spih_disp_sel; /* 0x6c SPI 4-digit hex display mode select (1=current) */ + uint32 PAD[4]; /* 0x70-0x7F PADDING */ + uint32 PAD[8]; /* 0x80-0x9F PADDING */ + uint32 PAD[8]; /* 0xA0-0xBF PADDING */ + uint32 spih_pll_ctrl; /* 0xC0 PLL Control Register */ + uint32 spih_pll_status; /* 0xC4 PLL Status Register */ + uint32 spih_xtal_freq; /* 0xC8 External Clock Frequency in units of 10000Hz */ + uint32 spih_clk_count; /* 0xCC External Clock Count Register */ + +} spih_regs_t; + +typedef volatile struct { + uint32 cfg_space[0x40]; /* 0x000-0x0FF PCI Configuration Space (Read Only) */ + uint32 P_IMG_CTRL0; /* 0x100 PCI Image0 Control Register */ + + uint32 P_BA0; /* 0x104 32 R/W PCI Image0 Base Address register */ + uint32 P_AM0; /* 0x108 32 R/W PCI Image0 Address Mask register */ + uint32 P_TA0; /* 0x10C 32 R/W PCI Image0 Translation Address register */ + uint32 P_IMG_CTRL1; /* 0x110 32 R/W PCI Image1 Control register */ + uint32 P_BA1; /* 0x114 32 R/W PCI Image1 Base Address register */ + uint32 P_AM1; /* 0x118 32 R/W PCI Image1 Address Mask register */ + uint32 P_TA1; /* 0x11C 32 R/W PCI Image1 Translation Address register */ + uint32 P_IMG_CTRL2; /* 0x120 32 R/W PCI Image2 Control register */ + uint32 P_BA2; /* 0x124 32 R/W PCI Image2 Base Address register */ + uint32 P_AM2; /* 0x128 32 R/W PCI Image2 Address Mask register */ + uint32 P_TA2; /* 0x12C 32 R/W PCI Image2 Translation Address register */ + uint32 P_IMG_CTRL3; /* 0x130 32 R/W PCI Image3 Control register */ + uint32 P_BA3; /* 0x134 32 R/W PCI Image3 Base Address register */ + uint32 P_AM3; /* 0x138 32 R/W PCI Image3 Address Mask register */ + uint32 P_TA3; /* 0x13C 32 R/W PCI Image3 Translation Address register */ + uint32 P_IMG_CTRL4; /* 0x140 32 R/W PCI Image4 Control register */ + uint32 P_BA4; /* 0x144 32 R/W PCI Image4 Base Address register */ + uint32 P_AM4; /* 0x148 32 R/W PCI Image4 Address Mask register */ + uint32 P_TA4; /* 0x14C 32 R/W PCI Image4 Translation Address register */ + uint32 P_IMG_CTRL5; /* 0x150 32 R/W PCI Image5 Control register */ + uint32 P_BA5; /* 0x154 32 R/W PCI Image5 Base Address register */ + uint32 P_AM5; /* 0x158 32 R/W PCI Image5 Address Mask register */ + uint32 P_TA5; /* 0x15C 32 R/W PCI Image5 Translation Address register */ + uint32 P_ERR_CS; /* 0x160 32 R/W PCI Error Control and Status register */ + uint32 P_ERR_ADDR; /* 0x164 32 R PCI Erroneous Address register */ + uint32 P_ERR_DATA; /* 0x168 32 R PCI Erroneous Data register */ + + uint32 PAD[5]; /* 0x16C-0x17F PADDING */ + + uint32 WB_CONF_SPC_BAR; /* 0x180 32 R WISHBONE Configuration Space Base Address */ + uint32 W_IMG_CTRL1; /* 0x184 32 R/W WISHBONE Image1 Control register */ + uint32 W_BA1; /* 0x188 32 R/W WISHBONE Image1 Base Address register */ + uint32 W_AM1; /* 0x18C 32 R/W WISHBONE Image1 Address Mask register */ + uint32 W_TA1; /* 0x190 32 R/W WISHBONE Image1 Translation Address reg */ + uint32 W_IMG_CTRL2; /* 0x194 32 R/W WISHBONE Image2 Control register */ + uint32 W_BA2; /* 0x198 32 R/W WISHBONE Image2 Base Address register */ + uint32 W_AM2; /* 0x19C 32 R/W WISHBONE Image2 Address Mask register */ + uint32 W_TA2; /* 0x1A0 32 R/W WISHBONE Image2 Translation Address reg */ + uint32 W_IMG_CTRL3; /* 0x1A4 32 R/W WISHBONE Image3 Control register */ + uint32 W_BA3; /* 0x1A8 32 R/W WISHBONE Image3 Base Address register */ + uint32 W_AM3; /* 0x1AC 32 R/W WISHBONE Image3 Address Mask register */ + uint32 W_TA3; /* 0x1B0 32 R/W WISHBONE Image3 Translation Address reg */ + uint32 W_IMG_CTRL4; /* 0x1B4 32 R/W WISHBONE Image4 Control register */ + uint32 W_BA4; /* 0x1B8 32 R/W WISHBONE Image4 Base Address register */ + uint32 W_AM4; /* 0x1BC 32 R/W WISHBONE Image4 Address Mask register */ + uint32 W_TA4; /* 0x1C0 32 R/W WISHBONE Image4 Translation Address reg */ + uint32 W_IMG_CTRL5; /* 0x1C4 32 R/W WISHBONE Image5 Control register */ + uint32 W_BA5; /* 0x1C8 32 R/W WISHBONE Image5 Base Address register */ + uint32 W_AM5; /* 0x1CC 32 R/W WISHBONE Image5 Address Mask register */ + uint32 W_TA5; /* 0x1D0 32 R/W WISHBONE Image5 Translation Address reg */ + uint32 W_ERR_CS; /* 0x1D4 32 R/W WISHBONE Error Control and Status reg */ + uint32 W_ERR_ADDR; /* 0x1D8 32 R WISHBONE Erroneous Address register */ + uint32 W_ERR_DATA; /* 0x1DC 32 R WISHBONE Erroneous Data register */ + uint32 CNF_ADDR; /* 0x1E0 32 R/W Configuration Cycle register */ + uint32 CNF_DATA; /* 0x1E4 32 R/W Configuration Cycle Generation Data reg */ + + uint32 INT_ACK; /* 0x1E8 32 R Interrupt Acknowledge register */ + uint32 ICR; /* 0x1EC 32 R/W Interrupt Control register */ + uint32 ISR; /* 0x1F0 32 R/W Interrupt Status register */ +} spih_pciregs_t; + +/* + * PCI Core interrupt enable and status bit definitions. + */ + +/* PCI Core ICR Register bit definitions */ +#define PCI_INT_PROP_EN (1 << 0) /* Interrupt Propagation Enable */ +#define PCI_WB_ERR_INT_EN (1 << 1) /* Wishbone Error Interrupt Enable */ +#define PCI_PCI_ERR_INT_EN (1 << 2) /* PCI Error Interrupt Enable */ +#define PCI_PAR_ERR_INT_EN (1 << 3) /* Parity Error Interrupt Enable */ +#define PCI_SYS_ERR_INT_EN (1 << 4) /* System Error Interrupt Enable */ +#define PCI_SOFTWARE_RESET (1U << 31) /* Software reset of the PCI Core. */ + +/* PCI Core ISR Register bit definitions */ +#define PCI_INT_PROP_ST (1 << 0) /* Interrupt Propagation Status */ +#define PCI_WB_ERR_INT_ST (1 << 1) /* Wishbone Error Interrupt Status */ +#define PCI_PCI_ERR_INT_ST (1 << 2) /* PCI Error Interrupt Status */ +#define PCI_PAR_ERR_INT_ST (1 << 3) /* Parity Error Interrupt Status */ +#define PCI_SYS_ERR_INT_ST (1 << 4) /* System Error Interrupt Status */ + +/* Registers on the Wishbone bus */ +#define SPIH_CTLR_INTR (1 << 0) /* SPI Host Controller Core Interrupt */ +#define SPIH_DEV_INTR (1 << 1) /* SPI Device Interrupt */ +#define SPIH_WFIFO_INTR (1 << 2) /* SPI Tx FIFO Empty Intr (FPGA Rev >= 8) */ + +/* GPIO Bit definitions */ +#define SPIH_CS (1 << 0) /* SPI Chip Select (active low) */ +#define SPIH_SLOT_POWER (1 << 1) /* SD Card Slot Power Enable */ +#define SPIH_CARD_DETECT (1 << 2) /* SD Card Detect */ + +/* SPI Status Register Bit definitions */ +#define SPIH_STATE_MASK 0x30 /* SPI Transfer State Machine state mask */ +#define SPIH_STATE_SHIFT 4 /* SPI Transfer State Machine state shift */ +#define SPIH_WFFULL (1 << 3) /* SPI Write FIFO Full */ +#define SPIH_WFEMPTY (1 << 2) /* SPI Write FIFO Empty */ +#define SPIH_RFFULL (1 << 1) /* SPI Read FIFO Full */ +#define SPIH_RFEMPTY (1 << 0) /* SPI Read FIFO Empty */ + +#define SPIH_EXT_CLK (1U << 31) /* Use External Clock as PLL Clock source. */ + +#define SPIH_PLL_NO_CLK (1 << 1) /* Set to 1 if the PLL's input clock is lost. */ +#define SPIH_PLL_LOCKED (1 << 3) /* Set to 1 when the PLL is locked. */ + +/* Spin bit loop bound check */ +#define SPI_SPIN_BOUND 0xf4240 /* 1 million */ + +#endif /* _BCM_PCI_SPI_H */ diff --git a/bcmdhd.100.10.315.x/include/bcmperf.h b/bcmdhd.100.10.315.x/include/bcmperf.h new file mode 100644 index 0000000..93cbdd3 --- /dev/null +++ b/bcmdhd.100.10.315.x/include/bcmperf.h @@ -0,0 +1,39 @@ +/* + * Performance counters software interface. + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmperf.h 514727 2014-11-12 03:02:48Z $ + */ +/* essai */ +#ifndef _BCMPERF_H_ +#define _BCMPERF_H_ +/* get cache hits and misses */ +#define BCMPERF_ENABLE_INSTRCOUNT() +#define BCMPERF_ENABLE_ICACHE_MISS() +#define BCMPERF_ENABLE_ICACHE_HIT() +#define BCMPERF_GETICACHE_MISS(x) ((x) = 0) +#define BCMPERF_GETICACHE_HIT(x) ((x) = 0) +#define BCMPERF_GETINSTRCOUNT(x) ((x) = 0) +#endif /* _BCMPERF_H_ */ diff --git a/bcmdhd.100.10.315.x/include/bcmsdbus.h b/bcmdhd.100.10.315.x/include/bcmsdbus.h new file mode 100644 index 0000000..ba857cc --- /dev/null +++ b/bcmdhd.100.10.315.x/include/bcmsdbus.h @@ -0,0 +1,179 @@ +/* + * Definitions for API from sdio common code (bcmsdh) to individual + * host controller drivers. + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmsdbus.h 689948 2017-03-14 05:21:03Z $ + */ + +#ifndef _sdio_api_h_ +#define _sdio_api_h_ + +#if defined(BT_OVER_SDIO) +#include +#endif /* defined (BT_OVER_SDIO) */ + +#define SDIOH_API_RC_SUCCESS (0x00) +#define SDIOH_API_RC_FAIL (0x01) +#define SDIOH_API_SUCCESS(status) (status == 0) + +#define SDIOH_READ 0 /* Read request */ +#define SDIOH_WRITE 1 /* Write request */ + +#define SDIOH_DATA_FIX 0 /* Fixed addressing */ +#define SDIOH_DATA_INC 1 /* Incremental addressing */ + +#define SDIOH_CMD_TYPE_NORMAL 0 /* Normal command */ +#define SDIOH_CMD_TYPE_APPEND 1 /* Append command */ +#define SDIOH_CMD_TYPE_CUTTHRU 2 /* Cut-through command */ + +#define SDIOH_DATA_PIO 0 /* PIO mode */ +#define SDIOH_DATA_DMA 1 /* DMA mode */ + +/* Max number of glommed pkts */ +#ifdef CUSTOM_MAX_TXGLOM_SIZE +#define SDPCM_MAXGLOM_SIZE CUSTOM_MAX_TXGLOM_SIZE +#else +#define SDPCM_MAXGLOM_SIZE 36 +#endif /* CUSTOM_MAX_TXGLOM_SIZE */ + +#define SDPCM_TXGLOM_CPY 0 /* SDIO 2.0 should use copy mode */ +#define SDPCM_TXGLOM_MDESC 1 /* SDIO 3.0 should use multi-desc mode */ + +#ifdef CUSTOM_DEF_TXGLOM_SIZE +#define SDPCM_DEFGLOM_SIZE CUSTOM_DEF_TXGLOM_SIZE +#else +#define SDPCM_DEFGLOM_SIZE SDPCM_MAXGLOM_SIZE +#endif /* CUSTOM_DEF_TXGLOM_SIZE */ + +#if SDPCM_DEFGLOM_SIZE > SDPCM_MAXGLOM_SIZE +#warning "SDPCM_DEFGLOM_SIZE cannot be higher than SDPCM_MAXGLOM_SIZE!!" +#undef SDPCM_DEFGLOM_SIZE +#define SDPCM_DEFGLOM_SIZE SDPCM_MAXGLOM_SIZE +#endif // endif + +#ifdef PKT_STATICS +typedef struct pkt_statics { + uint16 event_count; + uint32 event_size; + uint16 ctrl_count; + uint32 ctrl_size; + uint32 data_count; + uint32 data_size; + uint32 glom_cnt[SDPCM_MAXGLOM_SIZE]; + uint16 glom_max; + uint16 glom_count; + uint32 glom_size; + uint16 test_count; + uint32 test_size; +} pkt_statics_t; +#endif + +typedef int SDIOH_API_RC; + +/* SDio Host structure */ +typedef struct sdioh_info sdioh_info_t; + +/* callback function, taking one arg */ +typedef void (*sdioh_cb_fn_t)(void *); +#if defined(BT_OVER_SDIO) +extern +void sdioh_sdmmc_card_enable_func_f3(sdioh_info_t *sd, struct sdio_func *func); +#endif /* defined (BT_OVER_SDIO) */ + +extern SDIOH_API_RC sdioh_interrupt_register(sdioh_info_t *si, sdioh_cb_fn_t fn, void *argh); +extern SDIOH_API_RC sdioh_interrupt_deregister(sdioh_info_t *si); + +/* query whether SD interrupt is enabled or not */ +extern SDIOH_API_RC sdioh_interrupt_query(sdioh_info_t *si, bool *onoff); + +/* enable or disable SD interrupt */ +extern SDIOH_API_RC sdioh_interrupt_set(sdioh_info_t *si, bool enable_disable); + +#if defined(DHD_DEBUG) +extern bool sdioh_interrupt_pending(sdioh_info_t *si); +#endif // endif + +/* read or write one byte using cmd52 */ +extern SDIOH_API_RC sdioh_request_byte(sdioh_info_t *si, uint rw, uint fnc, uint addr, uint8 *byte); + +/* read or write 2/4 bytes using cmd53 */ +extern SDIOH_API_RC sdioh_request_word(sdioh_info_t *si, uint cmd_type, uint rw, uint fnc, + uint addr, uint32 *word, uint nbyte); + +/* read or write any buffer using cmd53 */ +extern SDIOH_API_RC sdioh_request_buffer(sdioh_info_t *si, uint pio_dma, uint fix_inc, + uint rw, uint fnc_num, uint32 addr, uint regwidth, uint32 buflen, uint8 *buffer, + void *pkt); + +/* get cis data */ +extern SDIOH_API_RC sdioh_cis_read(sdioh_info_t *si, uint fuc, uint8 *cis, uint32 length); +extern SDIOH_API_RC sdioh_cisaddr_read(sdioh_info_t *sd, uint func, uint8 *cisd, uint32 offset); + +extern SDIOH_API_RC sdioh_cfg_read(sdioh_info_t *si, uint fuc, uint32 addr, uint8 *data); +extern SDIOH_API_RC sdioh_cfg_write(sdioh_info_t *si, uint fuc, uint32 addr, uint8 *data); + +/* query number of io functions */ +extern uint sdioh_query_iofnum(sdioh_info_t *si); + +/* handle iovars */ +extern int sdioh_iovar_op(sdioh_info_t *si, const char *name, + void *params, int plen, void *arg, int len, bool set); + +/* Issue abort to the specified function and clear controller as needed */ +extern int sdioh_abort(sdioh_info_t *si, uint fnc); + +/* Start and Stop SDIO without re-enumerating the SD card. */ +extern int sdioh_start(sdioh_info_t *si, int stage); +extern int sdioh_stop(sdioh_info_t *si); + +/* Wait system lock free */ +extern int sdioh_waitlockfree(sdioh_info_t *si); + +/* Reset and re-initialize the device */ +extern int sdioh_sdio_reset(sdioh_info_t *si); + +#ifdef BCMSPI +/* Function to pass gSPI specific device-status bits to dhd. */ +extern uint32 sdioh_get_dstatus(sdioh_info_t *si); + +/* chipid and chiprev info for lower layers to control sw WAR's for hw bugs. */ +extern void sdioh_chipinfo(sdioh_info_t *si, uint32 chip, uint32 chiprev); +extern void sdioh_dwordmode(sdioh_info_t *si, bool set); +#endif /* BCMSPI */ + +#if defined(BCMSDIOH_STD) + #define SDIOH_SLEEP_ENABLED +#endif // endif +extern SDIOH_API_RC sdioh_sleep(sdioh_info_t *si, bool enab); + +/* GPIO support */ +extern SDIOH_API_RC sdioh_gpio_init(sdioh_info_t *sd); +extern bool sdioh_gpioin(sdioh_info_t *sd, uint32 gpio); +extern SDIOH_API_RC sdioh_gpioouten(sdioh_info_t *sd, uint32 gpio); +extern SDIOH_API_RC sdioh_gpioout(sdioh_info_t *sd, uint32 gpio, bool enab); +extern uint sdioh_set_mode(sdioh_info_t *sd, uint mode); + +#endif /* _sdio_api_h_ */ diff --git a/bcmdhd.100.10.315.x/include/bcmsdh.h b/bcmdhd.100.10.315.x/include/bcmsdh.h new file mode 100644 index 0000000..82edb27 --- /dev/null +++ b/bcmdhd.100.10.315.x/include/bcmsdh.h @@ -0,0 +1,271 @@ +/* + * SDIO host client driver interface of Broadcom HNBU + * export functions to client drivers + * abstract OS and BUS specific details of SDIO + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmsdh.h 727623 2017-10-21 01:00:32Z $ + */ + +/** + * @file bcmsdh.h + */ + +#ifndef _bcmsdh_h_ +#define _bcmsdh_h_ + +#define BCMSDH_ERROR_VAL 0x0001 /* Error */ +#define BCMSDH_INFO_VAL 0x0002 /* Info */ +extern const uint bcmsdh_msglevel; + +#define BCMSDH_ERROR(x) printf x +#define BCMSDH_INFO(x) + +#if defined(BCMSDIO) && (defined(BCMSDIOH_STD) || defined(BCMSDIOH_BCM) || \ + defined(BCMSDIOH_SPI)) +#define BCMSDH_ADAPTER +#endif /* BCMSDIO && (BCMSDIOH_STD || BCMSDIOH_BCM || BCMSDIOH_SPI) */ + +/* forward declarations */ +typedef struct bcmsdh_info bcmsdh_info_t; +typedef void (*bcmsdh_cb_fn_t)(void *); + +#if defined(BT_OVER_SDIO) +typedef enum { + NO_HANG_STATE = 0, + HANG_START_STATE = 1, + HANG_RECOVERY_STATE = 2 +} dhd_hang_state_t; +#endif // endif + +extern bcmsdh_info_t *bcmsdh_attach(osl_t *osh, void *sdioh, ulong *regsva); +/** + * BCMSDH API context + */ +struct bcmsdh_info +{ + bool init_success; /* underlying driver successfully attached */ + void *sdioh; /* handler for sdioh */ + uint32 vendevid; /* Target Vendor and Device ID on SD bus */ + osl_t *osh; + bool regfail; /* Save status of last reg_read/reg_write call */ + uint32 sbwad; /* Save backplane window address */ + void *os_cxt; /* Pointer to per-OS private data */ + bool force_sbwad_calc; /* forces calculation of sbwad instead of using cached value */ +}; + +/* Detach - freeup resources allocated in attach */ +extern int bcmsdh_detach(osl_t *osh, void *sdh); + +/* Query if SD device interrupts are enabled */ +extern bool bcmsdh_intr_query(void *sdh); + +/* Enable/disable SD interrupt */ +extern int bcmsdh_intr_enable(void *sdh); +extern int bcmsdh_intr_disable(void *sdh); + +/* Register/deregister device interrupt handler. */ +extern int bcmsdh_intr_reg(void *sdh, bcmsdh_cb_fn_t fn, void *argh); +extern int bcmsdh_intr_dereg(void *sdh); +/* Enable/disable SD card interrupt forward */ +extern void bcmsdh_intr_forward(void *sdh, bool pass); + +#if defined(DHD_DEBUG) +/* Query pending interrupt status from the host controller */ +extern bool bcmsdh_intr_pending(void *sdh); +#endif // endif + +/* Register a callback to be called if and when bcmsdh detects + * device removal. No-op in the case of non-removable/hardwired devices. + */ +extern int bcmsdh_devremove_reg(void *sdh, bcmsdh_cb_fn_t fn, void *argh); + +/* Access SDIO address space (e.g. CCCR) using CMD52 (single-byte interface). + * fn: function number + * addr: unmodified SDIO-space address + * data: data byte to write + * err: pointer to error code (or NULL) + */ +extern uint8 bcmsdh_cfg_read(void *sdh, uint func, uint32 addr, int *err); +extern void bcmsdh_cfg_write(void *sdh, uint func, uint32 addr, uint8 data, int *err); + +/* Read/Write 4bytes from/to cfg space */ +extern uint32 bcmsdh_cfg_read_word(void *sdh, uint fnc_num, uint32 addr, int *err); +extern void bcmsdh_cfg_write_word(void *sdh, uint fnc_num, uint32 addr, uint32 data, int *err); + +/* Read CIS content for specified function. + * fn: function whose CIS is being requested (0 is common CIS) + * cis: pointer to memory location to place results + * length: number of bytes to read + * Internally, this routine uses the values from the cis base regs (0x9-0xB) + * to form an SDIO-space address to read the data from. + */ +extern int bcmsdh_cis_read(void *sdh, uint func, uint8 *cis, uint length); +extern int bcmsdh_cisaddr_read(void *sdh, uint func, uint8 *cisd, uint offset); + +/* Synchronous access to device (client) core registers via CMD53 to F1. + * addr: backplane address (i.e. >= regsva from attach) + * size: register width in bytes (2 or 4) + * data: data for register write + */ +extern uint32 bcmsdh_reg_read(void *sdh, uintptr addr, uint size); +extern uint32 bcmsdh_reg_write(void *sdh, uintptr addr, uint size, uint32 data); + +/* set sb address window */ +extern int bcmsdhsdio_set_sbaddr_window(void *sdh, uint32 address, bool force_set); + +/* Indicate if last reg read/write failed */ +extern bool bcmsdh_regfail(void *sdh); + +/* Buffer transfer to/from device (client) core via cmd53. + * fn: function number + * addr: backplane address (i.e. >= regsva from attach) + * flags: backplane width, address increment, sync/async + * buf: pointer to memory data buffer + * nbytes: number of bytes to transfer to/from buf + * pkt: pointer to packet associated with buf (if any) + * complete: callback function for command completion (async only) + * handle: handle for completion callback (first arg in callback) + * Returns 0 or error code. + * NOTE: Async operation is not currently supported. + */ +typedef void (*bcmsdh_cmplt_fn_t)(void *handle, int status, bool sync_waiting); +extern int bcmsdh_send_buf(void *sdh, uint32 addr, uint fn, uint flags, + uint8 *buf, uint nbytes, void *pkt, + bcmsdh_cmplt_fn_t complete_fn, void *handle); +extern int bcmsdh_recv_buf(void *sdh, uint32 addr, uint fn, uint flags, + uint8 *buf, uint nbytes, void *pkt, + bcmsdh_cmplt_fn_t complete_fn, void *handle); + +extern void bcmsdh_glom_post(void *sdh, uint8 *frame, void *pkt, uint len); +extern void bcmsdh_glom_clear(void *sdh); +extern uint bcmsdh_set_mode(void *sdh, uint mode); +extern bool bcmsdh_glom_enabled(void); +/* Flags bits */ +#define SDIO_REQ_4BYTE 0x1 /* Four-byte target (backplane) width (vs. two-byte) */ +#define SDIO_REQ_FIXED 0x2 /* Fixed address (FIFO) (vs. incrementing address) */ +#define SDIO_REQ_ASYNC 0x4 /* Async request (vs. sync request) */ +#define SDIO_BYTE_MODE 0x8 /* Byte mode request(non-block mode) */ + +/* Pending (non-error) return code */ +#define BCME_PENDING 1 + +/* Read/write to memory block (F1, no FIFO) via CMD53 (sync only). + * rw: read or write (0/1) + * addr: direct SDIO address + * buf: pointer to memory data buffer + * nbytes: number of bytes to transfer to/from buf + * Returns 0 or error code. + */ +extern int bcmsdh_rwdata(void *sdh, uint rw, uint32 addr, uint8 *buf, uint nbytes); + +/* Issue an abort to the specified function */ +extern int bcmsdh_abort(void *sdh, uint fn); + +/* Start SDIO Host Controller communication */ +extern int bcmsdh_start(void *sdh, int stage); + +/* Stop SDIO Host Controller communication */ +extern int bcmsdh_stop(void *sdh); + +/* Wait system lock free */ +extern int bcmsdh_waitlockfree(void *sdh); + +/* Returns the "Device ID" of target device on the SDIO bus. */ +extern int bcmsdh_query_device(void *sdh); + +/* Returns the number of IO functions reported by the device */ +extern uint bcmsdh_query_iofnum(void *sdh); + +/* Miscellaneous knob tweaker. */ +extern int bcmsdh_iovar_op(void *sdh, const char *name, + void *params, uint plen, void *arg, uint len, bool set); + +/* Reset and reinitialize the device */ +extern int bcmsdh_reset(bcmsdh_info_t *sdh); + +/* helper functions */ + +/* callback functions */ +typedef struct { + /* probe the device */ + void *(*probe)(uint16 vend_id, uint16 dev_id, uint16 bus, uint16 slot, + uint16 func, uint bustype, void * regsva, osl_t * osh, + void * param); + /* remove the device */ + void (*remove)(void *context); + /* can we suspend now */ + int (*suspend)(void *context); + /* resume from suspend */ + int (*resume)(void *context); +} bcmsdh_driver_t; + +/* platform specific/high level functions */ +extern int bcmsdh_register(bcmsdh_driver_t *driver); +extern void bcmsdh_unregister(void); +extern bool bcmsdh_chipmatch(uint16 vendor, uint16 device); +extern void bcmsdh_device_remove(void * sdh); + +extern int bcmsdh_reg_sdio_notify(void* semaphore); +extern void bcmsdh_unreg_sdio_notify(void); + +#if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) +extern int bcmsdh_oob_intr_register(bcmsdh_info_t *bcmsdh, bcmsdh_cb_fn_t oob_irq_handler, + void* oob_irq_handler_context); +extern void bcmsdh_oob_intr_unregister(bcmsdh_info_t *sdh); +extern void bcmsdh_oob_intr_set(bcmsdh_info_t *sdh, bool enable); +#endif /* defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) */ +extern void bcmsdh_dev_pm_stay_awake(bcmsdh_info_t *sdh); +extern void bcmsdh_dev_relax(bcmsdh_info_t *sdh); +extern bool bcmsdh_dev_pm_enabled(bcmsdh_info_t *sdh); + +int bcmsdh_suspend(bcmsdh_info_t *bcmsdh); +int bcmsdh_resume(bcmsdh_info_t *bcmsdh); + +/* Function to pass device-status bits to DHD. */ +extern uint32 bcmsdh_get_dstatus(void *sdh); + +/* Function to return current window addr */ +extern uint32 bcmsdh_cur_sbwad(void *sdh); + +/* function to force sbwad calculation instead of using cached value */ +extern void bcmsdh_force_sbwad_calc(void *sdh, bool force); + +/* Function to pass chipid and rev to lower layers for controlling pr's */ +extern void bcmsdh_chipinfo(void *sdh, uint32 chip, uint32 chiprev); + +#ifdef BCMSPI +extern void bcmsdh_dwordmode(void *sdh, bool set); +#endif /* BCMSPI */ + +extern int bcmsdh_sleep(void *sdh, bool enab); + +/* GPIO support */ +extern int bcmsdh_gpio_init(void *sd); +extern bool bcmsdh_gpioin(void *sd, uint32 gpio); +extern int bcmsdh_gpioouten(void *sd, uint32 gpio); +extern int bcmsdh_gpioout(void *sd, uint32 gpio, bool enab); + +#endif /* _bcmsdh_h_ */ diff --git a/bcmdhd.100.10.315.x/include/bcmsdh_sdmmc.h b/bcmdhd.100.10.315.x/include/bcmsdh_sdmmc.h new file mode 100644 index 0000000..4265bc6 --- /dev/null +++ b/bcmdhd.100.10.315.x/include/bcmsdh_sdmmc.h @@ -0,0 +1,128 @@ +/* + * BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmsdh_sdmmc.h 753315 2018-03-21 04:10:12Z $ + */ + +#ifndef __BCMSDH_SDMMC_H__ +#define __BCMSDH_SDMMC_H__ + +#define sd_err(x) do { if (sd_msglevel & SDH_ERROR_VAL) printf x; } while (0) +#define sd_trace(x) do { if (sd_msglevel & SDH_TRACE_VAL) printf x; } while (0) +#define sd_info(x) do { if (sd_msglevel & SDH_INFO_VAL) printf x; } while (0) +#define sd_debug(x) do { if (sd_msglevel & SDH_DEBUG_VAL) printf x; } while (0) +#define sd_data(x) do { if (sd_msglevel & SDH_DATA_VAL) printf x; } while (0) +#define sd_ctrl(x) do { if (sd_msglevel & SDH_CTRL_VAL) printf x; } while (0) +#define sd_cost(x) do { if (sd_msglevel & SDH_COST_VAL) printf x; } while (0) + +#define sd_sync_dma(sd, read, nbytes) +#define sd_init_dma(sd) +#define sd_ack_intr(sd) +#define sd_wakeup(sd); + +#define sd_log(x) + +#define SDIOH_ASSERT(exp) \ + do { if (!(exp)) \ + printf("!!!ASSERT fail: file %s lines %d", __FILE__, __LINE__); \ + } while (0) + +#define BLOCK_SIZE_4318 64 +#define BLOCK_SIZE_4328 512 + +/* internal return code */ +#define SUCCESS 0 +#define ERROR 1 + +/* private bus modes */ +#define SDIOH_MODE_SD4 2 +#define CLIENT_INTR 0x100 /* Get rid of this! */ +#define SDIOH_SDMMC_MAX_SG_ENTRIES 64 + +struct sdioh_info { + osl_t *osh; /* osh handler */ + void *bcmsdh; /* upper layer handle */ + bool client_intr_enabled; /* interrupt connnected flag */ + bool intr_handler_valid; /* client driver interrupt handler valid */ + sdioh_cb_fn_t intr_handler; /* registered interrupt handler */ + void *intr_handler_arg; /* argument to call interrupt handler */ + uint16 intmask; /* Current active interrupts */ + + int intrcount; /* Client interrupts */ + bool sd_use_dma; /* DMA on CMD53 */ + bool sd_blockmode; /* sd_blockmode == FALSE => 64 Byte Cmd 53s. */ + /* Must be on for sd_multiblock to be effective */ + bool use_client_ints; /* If this is false, make sure to restore */ + int sd_mode; /* SD1/SD4/SPI */ + int client_block_size[SDIOD_MAX_IOFUNCS]; /* Blocksize */ + uint8 num_funcs; /* Supported funcs on client */ + uint32 com_cis_ptr; + uint32 func_cis_ptr[SDIOD_MAX_IOFUNCS]; + bool use_rxchain; + struct scatterlist sg_list[SDIOH_SDMMC_MAX_SG_ENTRIES]; + struct sdio_func fake_func0; + struct sdio_func *func[SDIOD_MAX_IOFUNCS]; + uint sd_clk_rate; + uint txglom_mode; /* Txglom mode: 0 - copy, 1 - multi-descriptor */ +}; + +/************************************************************ + * Internal interfaces: per-port references into bcmsdh_sdmmc.c + */ + +/* Global message bits */ +extern uint sd_msglevel; + +/* OS-independent interrupt handler */ +extern bool check_client_intr(sdioh_info_t *sd); + +/* Core interrupt enable/disable of device interrupts */ +extern void sdioh_sdmmc_devintr_on(sdioh_info_t *sd); +extern void sdioh_sdmmc_devintr_off(sdioh_info_t *sd); + +/************************************************************** + * Internal interfaces: bcmsdh_sdmmc.c references to per-port code + */ + +/* Register mapping routines */ +extern uint32 *sdioh_sdmmc_reg_map(osl_t *osh, int32 addr, int size); +extern void sdioh_sdmmc_reg_unmap(osl_t *osh, int32 addr, int size); + +/* Interrupt (de)registration routines */ +extern int sdioh_sdmmc_register_irq(sdioh_info_t *sd, uint irq); +extern void sdioh_sdmmc_free_irq(uint irq, sdioh_info_t *sd); + +extern sdioh_info_t *sdioh_attach(osl_t *osh, struct sdio_func *func); +extern SDIOH_API_RC sdioh_detach(osl_t *osh, sdioh_info_t *sd); + +#ifdef GLOBAL_SDMMC_INSTANCE +typedef struct _BCMSDH_SDMMC_INSTANCE { + sdioh_info_t *sd; + struct sdio_func *func[SDIOD_MAX_IOFUNCS]; +} BCMSDH_SDMMC_INSTANCE, *PBCMSDH_SDMMC_INSTANCE; +#endif + +#endif /* __BCMSDH_SDMMC_H__ */ diff --git a/bcmdhd.100.10.315.x/include/bcmsdpcm.h b/bcmdhd.100.10.315.x/include/bcmsdpcm.h new file mode 100644 index 0000000..c5665f1 --- /dev/null +++ b/bcmdhd.100.10.315.x/include/bcmsdpcm.h @@ -0,0 +1,304 @@ +/* + * Broadcom SDIO/PCMCIA + * Software-specific definitions shared between device and host side + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmsdpcm.h 700076 2017-05-17 14:42:22Z $ + */ + +#ifndef _bcmsdpcm_h_ +#define _bcmsdpcm_h_ + +/* + * Software allocation of To SB Mailbox resources + */ + +/* intstatus bits */ +#define I_SMB_NAK I_SMB_SW0 /* To SB Mailbox Frame NAK */ +#define I_SMB_INT_ACK I_SMB_SW1 /* To SB Mailbox Host Interrupt ACK */ +#define I_SMB_USE_OOB I_SMB_SW2 /* To SB Mailbox Use OOB Wakeup */ +#define I_SMB_DEV_INT I_SMB_SW3 /* To SB Mailbox Miscellaneous Interrupt */ + +#define I_TOSBMAIL (I_SMB_NAK | I_SMB_INT_ACK | I_SMB_USE_OOB | I_SMB_DEV_INT) + +/* tosbmailbox bits corresponding to intstatus bits */ +#define SMB_NAK (1 << 0) /* To SB Mailbox Frame NAK */ +#define SMB_INT_ACK (1 << 1) /* To SB Mailbox Host Interrupt ACK */ +#define SMB_USE_OOB (1 << 2) /* To SB Mailbox Use OOB Wakeup */ +#define SMB_DEV_INT (1 << 3) /* To SB Mailbox Miscellaneous Interrupt */ +#define SMB_MASK 0x0000000f /* To SB Mailbox Mask */ + +/* tosbmailboxdata */ + +#ifdef DS_PROT +/* Bit msgs for custom deep sleep protocol */ +#define SMB_DATA_D3INFORM 0x100 /* host announcing D3 entry */ +#define SMB_DATA_DSACK 0x200 /* host acking a deepsleep request */ +#define SMB_DATA_DSNACK 0x400 /* host nacking a deepsleep request */ +#endif /* DS_PROT */ +/* force a trap */ +#define SMB_DATA_TRAP 0x800 /* host forcing trap */ + +#define SMB_DATA_VERSION_MASK 0x00ff0000 /* host protocol version (sent with F2 enable) */ +#define SMB_DATA_VERSION_SHIFT 16 /* host protocol version (sent with F2 enable) */ + +/* + * Software allocation of To Host Mailbox resources + */ + +/* intstatus bits */ +#define I_HMB_INT_ACK I_HMB_SW0 /* To Host Mailbox Dev Interrupt ACK */ +#define I_HMB_FC_STATE I_HMB_SW0 /* To Host Mailbox Flow Control State */ +#define I_HMB_FC_CHANGE I_HMB_SW1 /* To Host Mailbox Flow Control State Changed */ +#define I_HMB_FRAME_IND I_HMB_SW2 /* To Host Mailbox Frame Indication */ +#define I_HMB_HOST_INT I_HMB_SW3 /* To Host Mailbox Miscellaneous Interrupt */ + +#define I_TOHOSTMAIL (I_HMB_INT_ACK | I_HMB_FRAME_IND | I_HMB_HOST_INT) + +/* tohostmailbox bits corresponding to intstatus bits */ +#define HMB_INT_ACK (1 << 0) /* To Host Mailbox Dev Interrupt ACK */ +#define HMB_FRAME_IND (1 << 2) /* To Host Mailbox Frame Indication */ +#define HMB_HOST_INT (1 << 3) /* To Host Mailbox Miscellaneous Interrupt */ +#define HMB_MASK 0x0000000f /* To Host Mailbox Mask */ + +/* tohostmailboxdata */ +#define HMB_DATA_NAKHANDLED 0x01 /* we're ready to retransmit NAK'd frame to host */ +#define HMB_DATA_DEVREADY 0x02 /* we're ready to to talk to host after enable */ +#define HMB_DATA_FC 0x04 /* per prio flowcontrol update flag to host */ +#define HMB_DATA_FWREADY 0x08 /* firmware is ready for protocol activity */ +#define HMB_DATA_FWHALT 0x10 /* firmware has halted operation */ + +#ifdef DS_PROT +/* Bit msgs for custom deep sleep protocol */ +#define HMB_DATA_DSREQ 0x100 /* firmware requesting deepsleep entry */ +#define HMB_DATA_DSEXIT 0x200 /* firmware announcing deepsleep exit */ +#define HMB_DATA_D3ACK 0x400 /* firmware acking a D3 notice from host */ +#define HMB_DATA_D3EXIT 0x800 /* firmware announcing D3 exit */ +#define HMB_DATA_DSPROT_MASK 0xf00 +#endif /* DS_PROT */ + +#define HMB_DATA_FCDATA_MASK 0xff000000 /* per prio flowcontrol data */ +#define HMB_DATA_FCDATA_SHIFT 24 /* per prio flowcontrol data */ + +#define HMB_DATA_VERSION_MASK 0x00ff0000 /* device protocol version (with devready) */ +#define HMB_DATA_VERSION_SHIFT 16 /* device protocol version (with devready) */ + +/* + * Software-defined protocol header + */ + +/* Current protocol version */ +#define SDPCM_PROT_VERSION 4 + +/* SW frame header */ +#define SDPCM_SEQUENCE_MASK 0x000000ff /* Sequence Number Mask */ +#define SDPCM_PACKET_SEQUENCE(p) (((uint8 *)p)[0] & 0xff) /* p starts w/SW Header */ + +#define SDPCM_CHANNEL_MASK 0x00000f00 /* Channel Number Mask */ +#define SDPCM_CHANNEL_SHIFT 8 /* Channel Number Shift */ +#define SDPCM_PACKET_CHANNEL(p) (((uint8 *)p)[1] & 0x0f) /* p starts w/SW Header */ + +#define SDPCM_FLAGS_MASK 0x0000f000 /* Mask of flag bits */ +#define SDPCM_FLAGS_SHIFT 12 /* Flag bits shift */ +#define SDPCM_PACKET_FLAGS(p) ((((uint8 *)p)[1] & 0xf0) >> 4) /* p starts w/SW Header */ + +/* Next Read Len: lookahead length of next frame, in 16-byte units (rounded up) */ +#define SDPCM_NEXTLEN_MASK 0x00ff0000 /* Next Read Len Mask */ +#define SDPCM_NEXTLEN_SHIFT 16 /* Next Read Len Shift */ +#define SDPCM_NEXTLEN_VALUE(p) ((((uint8 *)p)[2] & 0xff) << 4) /* p starts w/SW Header */ +#define SDPCM_NEXTLEN_OFFSET 2 + +/* Data Offset from SOF (HW Tag, SW Tag, Pad) */ +#define SDPCM_DOFFSET_OFFSET 3 /* Data Offset */ +#define SDPCM_DOFFSET_VALUE(p) (((uint8 *)p)[SDPCM_DOFFSET_OFFSET] & 0xff) +#define SDPCM_DOFFSET_MASK 0xff000000 +#define SDPCM_DOFFSET_SHIFT 24 + +#define SDPCM_FCMASK_OFFSET 4 /* Flow control */ +#define SDPCM_FCMASK_VALUE(p) (((uint8 *)p)[SDPCM_FCMASK_OFFSET ] & 0xff) +#define SDPCM_WINDOW_OFFSET 5 /* Credit based fc */ +#define SDPCM_WINDOW_VALUE(p) (((uint8 *)p)[SDPCM_WINDOW_OFFSET] & 0xff) +#define SDPCM_VERSION_OFFSET 6 /* Version # */ +#define SDPCM_VERSION_VALUE(p) (((uint8 *)p)[SDPCM_VERSION_OFFSET] & 0xff) +#define SDPCM_UNUSED_OFFSET 7 /* Spare */ +#define SDPCM_UNUSED_VALUE(p) (((uint8 *)p)[SDPCM_UNUSED_OFFSET] & 0xff) + +#define SDPCM_SWHEADER_LEN 8 /* SW header is 64 bits */ + +/* logical channel numbers */ +#define SDPCM_CONTROL_CHANNEL 0 /* Control Request/Response Channel Id */ +#define SDPCM_EVENT_CHANNEL 1 /* Asyc Event Indication Channel Id */ +#define SDPCM_DATA_CHANNEL 2 /* Data Xmit/Recv Channel Id */ +#define SDPCM_GLOM_CHANNEL 3 /* For coalesced packets (superframes) */ +#define SDPCM_TEST_CHANNEL 15 /* Reserved for test/debug packets */ +#define SDPCM_MAX_CHANNEL 15 + +#define SDPCM_SEQUENCE_WRAP 256 /* wrap-around val for eight-bit frame seq number */ + +#define SDPCM_FLAG_RESVD0 0x01 +#define SDPCM_FLAG_RESVD1 0x02 +#define SDPCM_FLAG_GSPI_TXENAB 0x04 +#define SDPCM_FLAG_GLOMDESC 0x08 /* Superframe descriptor mask */ + +/* For GLOM_CHANNEL frames, use a flag to indicate descriptor frame */ +#define SDPCM_GLOMDESC_FLAG (SDPCM_FLAG_GLOMDESC << SDPCM_FLAGS_SHIFT) + +#define SDPCM_GLOMDESC(p) (((uint8 *)p)[1] & 0x80) + +/* For TEST_CHANNEL packets, define another 4-byte header */ +#define SDPCM_TEST_HDRLEN 4 /* Generally: Cmd(1), Ext(1), Len(2); + * Semantics of Ext byte depend on command. + * Len is current or requested frame length, not + * including test header; sent little-endian. + */ +#define SDPCM_TEST_PKT_CNT_FLD_LEN 4 /* Packet count filed legth */ +#define SDPCM_TEST_DISCARD 0x01 /* Receiver discards. Ext is a pattern id. */ +#define SDPCM_TEST_ECHOREQ 0x02 /* Echo request. Ext is a pattern id. */ +#define SDPCM_TEST_ECHORSP 0x03 /* Echo response. Ext is a pattern id. */ +#define SDPCM_TEST_BURST 0x04 /* Receiver to send a burst. Ext is a frame count + * (Backward compatabilty) Set frame count in a + * 4 byte filed adjacent to the HDR + */ +#define SDPCM_TEST_SEND 0x05 /* Receiver sets send mode. Ext is boolean on/off + * Set frame count in a 4 byte filed adjacent to + * the HDR + */ + +/* Handy macro for filling in datagen packets with a pattern */ +#define SDPCM_TEST_FILL(byteno, id) ((uint8)(id + byteno)) + +/* + * Software counters (first part matches hardware counters) + */ + +typedef volatile struct { + uint32 cmd52rd; /* Cmd52RdCount, SDIO: cmd52 reads */ + uint32 cmd52wr; /* Cmd52WrCount, SDIO: cmd52 writes */ + uint32 cmd53rd; /* Cmd53RdCount, SDIO: cmd53 reads */ + uint32 cmd53wr; /* Cmd53WrCount, SDIO: cmd53 writes */ + uint32 abort; /* AbortCount, SDIO: aborts */ + uint32 datacrcerror; /* DataCrcErrorCount, SDIO: frames w/CRC error */ + uint32 rdoutofsync; /* RdOutOfSyncCount, SDIO/PCMCIA: Rd Frm out of sync */ + uint32 wroutofsync; /* RdOutOfSyncCount, SDIO/PCMCIA: Wr Frm out of sync */ + uint32 writebusy; /* WriteBusyCount, SDIO: device asserted "busy" */ + uint32 readwait; /* ReadWaitCount, SDIO: no data ready for a read cmd */ + uint32 readterm; /* ReadTermCount, SDIO: read frame termination cmds */ + uint32 writeterm; /* WriteTermCount, SDIO: write frames termination cmds */ + uint32 rxdescuflo; /* receive descriptor underflows */ + uint32 rxfifooflo; /* receive fifo overflows */ + uint32 txfifouflo; /* transmit fifo underflows */ + uint32 runt; /* runt (too short) frames recv'd from bus */ + uint32 badlen; /* frame's rxh len does not match its hw tag len */ + uint32 badcksum; /* frame's hw tag chksum doesn't agree with len value */ + uint32 seqbreak; /* break in sequence # space from one rx frame to the next */ + uint32 rxfcrc; /* frame rx header indicates crc error */ + uint32 rxfwoos; /* frame rx header indicates write out of sync */ + uint32 rxfwft; /* frame rx header indicates write frame termination */ + uint32 rxfabort; /* frame rx header indicates frame aborted */ + uint32 woosint; /* write out of sync interrupt */ + uint32 roosint; /* read out of sync interrupt */ + uint32 rftermint; /* read frame terminate interrupt */ + uint32 wftermint; /* write frame terminate interrupt */ +} sdpcmd_cnt_t; + +/* + * Register Access Macros + */ + +#define SDIODREV_IS(var, val) ((var) == (val)) +#define SDIODREV_GE(var, val) ((var) >= (val)) +#define SDIODREV_GT(var, val) ((var) > (val)) +#define SDIODREV_LT(var, val) ((var) < (val)) +#define SDIODREV_LE(var, val) ((var) <= (val)) + +#define SDIODDMAREG32(h, dir, chnl) \ + ((dir) == DMA_TX ? \ + (void *)(uintptr)&((h)->regs->dma.sdiod32.dma32regs[chnl].xmt) : \ + (void *)(uintptr)&((h)->regs->dma.sdiod32.dma32regs[chnl].rcv)) + +#define SDIODDMAREG64(h, dir, chnl) \ + ((dir) == DMA_TX ? \ + (void *)(uintptr)&((h)->regs->dma.sdiod64.dma64regs[chnl].xmt) : \ + (void *)(uintptr)&((h)->regs->dma.sdiod64.dma64regs[chnl].rcv)) + +#define SDIODDMAREG(h, dir, chnl) \ + (SDIODREV_LT((h)->corerev, 1) ? \ + SDIODDMAREG32((h), (dir), (chnl)) : \ + SDIODDMAREG64((h), (dir), (chnl))) + +#define PCMDDMAREG(h, dir, chnl) \ + ((dir) == DMA_TX ? \ + (void *)(uintptr)&((h)->regs->dma.pcm32.dmaregs.xmt) : \ + (void *)(uintptr)&((h)->regs->dma.pcm32.dmaregs.rcv)) + +#define SDPCMDMAREG(h, dir, chnl, coreid) \ + ((coreid) == SDIOD_CORE_ID ? \ + SDIODDMAREG(h, dir, chnl) : \ + PCMDDMAREG(h, dir, chnl)) + +#define SDIODFIFOREG(h, corerev) \ + (SDIODREV_LT((corerev), 1) ? \ + ((dma32diag_t *)(uintptr)&((h)->regs->dma.sdiod32.dmafifo)) : \ + ((dma32diag_t *)(uintptr)&((h)->regs->dma.sdiod64.dmafifo))) + +#define PCMDFIFOREG(h) \ + ((dma32diag_t *)(uintptr)&((h)->regs->dma.pcm32.dmafifo)) + +#define SDPCMFIFOREG(h, coreid, corerev) \ + ((coreid) == SDIOD_CORE_ID ? \ + SDIODFIFOREG(h, corerev) : \ + PCMDFIFOREG(h)) + +/* + * Shared structure between dongle and the host. + * The structure contains pointers to trap or assert information. + */ +#define SDPCM_SHARED_VERSION 0x0001 +#define SDPCM_SHARED_VERSION_MASK 0x00FF +#define SDPCM_SHARED_ASSERT_BUILT 0x0100 +#define SDPCM_SHARED_ASSERT 0x0200 +#define SDPCM_SHARED_TRAP 0x0400 +#define SDPCM_SHARED_IN_BRPT 0x0800 +#define SDPCM_SHARED_SET_BRPT 0x1000 +#define SDPCM_SHARED_PENDING_BRPT 0x2000 +#define SDPCM_SHARED_FATAL_LOGBUF_VALID 0x100000 + +typedef struct { + uint32 flags; + uint32 trap_addr; + uint32 assert_exp_addr; + uint32 assert_file_addr; + uint32 assert_line; + uint32 console_addr; /* Address of hnd_cons_t */ + uint32 msgtrace_addr; + uint32 fwid; + uint32 device_fatal_logbuf_start; +} sdpcm_shared_t; + +/* Device F/W provides the following access function: + * sdpcm_shared_t *hnd_get_sdpcm_shared(void); + */ + +#endif /* _bcmsdpcm_h_ */ diff --git a/bcmdhd.100.10.315.x/include/bcmsdspi.h b/bcmdhd.100.10.315.x/include/bcmsdspi.h new file mode 100644 index 0000000..338066d --- /dev/null +++ b/bcmdhd.100.10.315.x/include/bcmsdspi.h @@ -0,0 +1,138 @@ +/* + * SD-SPI Protocol Conversion - BCMSDH->SPI Translation Layer + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmsdspi.h 514727 2014-11-12 03:02:48Z $ + */ +#ifndef _BCM_SD_SPI_H +#define _BCM_SD_SPI_H + +/* global msglevel for debug messages - bitvals come from sdiovar.h */ + +#define sd_err(x) +#define sd_trace(x) +#define sd_info(x) +#define sd_debug(x) +#define sd_data(x) +#define sd_ctrl(x) + +#define sd_log(x) + +#define SDIOH_ASSERT(exp) \ + do { if (!(exp)) \ + printf("!!!ASSERT fail: file %s lines %d", __FILE__, __LINE__); \ + } while (0) + +#define BLOCK_SIZE_4318 64 +#define BLOCK_SIZE_4328 512 + +/* internal return code */ +#define SUCCESS 0 +#undef ERROR +#define ERROR 1 + +/* private bus modes */ +#define SDIOH_MODE_SPI 0 + +#define USE_BLOCKMODE 0x2 /* Block mode can be single block or multi */ +#define USE_MULTIBLOCK 0x4 + +struct sdioh_info { + uint cfg_bar; /* pci cfg address for bar */ + uint32 caps; /* cached value of capabilities reg */ + uint bar0; /* BAR0 for PCI Device */ + osl_t *osh; /* osh handler */ + void *controller; /* Pointer to SPI Controller's private data struct */ + + uint lockcount; /* nest count of sdspi_lock() calls */ + bool client_intr_enabled; /* interrupt connnected flag */ + bool intr_handler_valid; /* client driver interrupt handler valid */ + sdioh_cb_fn_t intr_handler; /* registered interrupt handler */ + void *intr_handler_arg; /* argument to call interrupt handler */ + bool initialized; /* card initialized */ + uint32 target_dev; /* Target device ID */ + uint32 intmask; /* Current active interrupts */ + void *sdos_info; /* Pointer to per-OS private data */ + + uint32 controller_type; /* Host controller type */ + uint8 version; /* Host Controller Spec Compliance Version */ + uint irq; /* Client irq */ + uint32 intrcount; /* Client interrupts */ + uint32 local_intrcount; /* Controller interrupts */ + bool host_init_done; /* Controller initted */ + bool card_init_done; /* Client SDIO interface initted */ + bool polled_mode; /* polling for command completion */ + + bool sd_use_dma; /* DMA on CMD53 */ + bool sd_blockmode; /* sd_blockmode == FALSE => 64 Byte Cmd 53s. */ + /* Must be on for sd_multiblock to be effective */ + bool use_client_ints; /* If this is false, make sure to restore */ + bool got_hcint; /* Host Controller interrupt. */ + /* polling hack in wl_linux.c:wl_timer() */ + int adapter_slot; /* Maybe dealing with multiple slots/controllers */ + int sd_mode; /* SD1/SD4/SPI */ + int client_block_size[SDIOD_MAX_IOFUNCS]; /* Blocksize */ + uint32 data_xfer_count; /* Current register transfer size */ + uint32 cmd53_wr_data; /* Used to pass CMD53 write data */ + uint32 card_response; /* Used to pass back response status byte */ + uint32 card_rsp_data; /* Used to pass back response data word */ + uint16 card_rca; /* Current Address */ + uint8 num_funcs; /* Supported funcs on client */ + uint32 com_cis_ptr; + uint32 func_cis_ptr[SDIOD_MAX_IOFUNCS]; + void *dma_buf; + ulong dma_phys; + int r_cnt; /* rx count */ + int t_cnt; /* tx_count */ +}; + +/************************************************************ + * Internal interfaces: per-port references into bcmsdspi.c + */ + +/* Global message bits */ +extern uint sd_msglevel; + +/************************************************************** + * Internal interfaces: bcmsdspi.c references to per-port code + */ + +/* Register mapping routines */ +extern uint32 *spi_reg_map(osl_t *osh, uintptr addr, int size); +extern void spi_reg_unmap(osl_t *osh, uintptr addr, int size); + +/* Interrupt (de)registration routines */ +extern int spi_register_irq(sdioh_info_t *sd, uint irq); +extern void spi_free_irq(uint irq, sdioh_info_t *sd); + +/* OS-specific interrupt wrappers (atomic interrupt enable/disable) */ +extern void spi_lock(sdioh_info_t *sd); +extern void spi_unlock(sdioh_info_t *sd); + +/* Allocate/init/free per-OS private data */ +extern int spi_osinit(sdioh_info_t *sd); +extern void spi_osfree(sdioh_info_t *sd); + +#endif /* _BCM_SD_SPI_H */ diff --git a/bcmdhd.100.10.315.x/include/bcmsdstd.h b/bcmdhd.100.10.315.x/include/bcmsdstd.h new file mode 100644 index 0000000..ab79205 --- /dev/null +++ b/bcmdhd.100.10.315.x/include/bcmsdstd.h @@ -0,0 +1,281 @@ +/* + * 'Standard' SDIO HOST CONTROLLER driver + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmsdstd.h 768214 2018-06-19 03:53:58Z $ + */ +#ifndef _BCM_SD_STD_H +#define _BCM_SD_STD_H + +/* global msglevel for debug messages - bitvals come from sdiovar.h */ +#define sd_err(x) do { if (sd_msglevel & SDH_ERROR_VAL) printf x; } while (0) +#define sd_trace(x) +#define sd_info(x) +#define sd_debug(x) +#define sd_data(x) +#define sd_ctrl(x) +#define sd_dma(x) + +#define sd_sync_dma(sd, read, nbytes) +#define sd_init_dma(sd) +#define sd_ack_intr(sd) +#define sd_wakeup(sd); +/* Allocate/init/free per-OS private data */ +extern int sdstd_osinit(sdioh_info_t *sd); +extern void sdstd_osfree(sdioh_info_t *sd); + +#define sd_log(x) + +#define SDIOH_ASSERT(exp) \ + do { if (!(exp)) \ + printf("!!!ASSERT fail: file %s lines %d", __FILE__, __LINE__); \ + } while (0) + +#define BLOCK_SIZE_4318 64 +#define BLOCK_SIZE_4328 512 + +/* internal return code */ +#define SUCCESS 0 +#define ERROR 1 + +/* private bus modes */ +#define SDIOH_MODE_SPI 0 +#define SDIOH_MODE_SD1 1 +#define SDIOH_MODE_SD4 2 + +#define MAX_SLOTS 6 /* For PCI: Only 6 BAR entries => 6 slots */ +#define SDIOH_REG_WINSZ 0x100 /* Number of registers in Standard Host Controller */ + +#define SDIOH_TYPE_ARASAN_HDK 1 +#define SDIOH_TYPE_BCM27XX 2 +#define SDIOH_TYPE_TI_PCIXX21 4 /* TI PCIxx21 Standard Host Controller */ +#define SDIOH_TYPE_RICOH_R5C822 5 /* Ricoh Co Ltd R5C822 SD/SDIO/MMC/MS/MSPro Host Adapter */ +#define SDIOH_TYPE_JMICRON 6 /* JMicron Standard SDIO Host Controller */ + +/* For linux, allow yielding for dongle */ +#define BCMSDYIELD + +/* Expected card status value for CMD7 */ +#define SDIOH_CMD7_EXP_STATUS 0x00001E00 + +#define RETRIES_LARGE 100000 +#define sdstd_os_yield(sd) do {} while (0) +#define RETRIES_SMALL 100 + +#define USE_BLOCKMODE 0x2 /* Block mode can be single block or multi */ +#define USE_MULTIBLOCK 0x4 + +#define USE_FIFO 0x8 /* Fifo vs non-fifo */ + +#define CLIENT_INTR 0x100 /* Get rid of this! */ + +#define HC_INTR_RETUNING 0x1000 + +#ifdef BCMSDIOH_TXGLOM +/* Total glom pkt can not exceed 64K + * need one more slot for glom padding packet + */ +#define SDIOH_MAXGLOM_SIZE (40+1) + +typedef struct glom_buf { + uint32 count; /* Total number of pkts queued */ + void *dma_buf_arr[SDIOH_MAXGLOM_SIZE]; /* Frame address */ + ulong dma_phys_arr[SDIOH_MAXGLOM_SIZE]; /* DMA_MAPed address of frames */ + uint16 nbytes[SDIOH_MAXGLOM_SIZE]; /* Size of each frame */ +} glom_buf_t; +#endif // endif + +struct sdioh_info { + uint cfg_bar; /* pci cfg address for bar */ + uint32 caps; /* cached value of capabilities reg */ + uint32 curr_caps; /* max current capabilities reg */ + + osl_t *osh; /* osh handler */ + volatile char *mem_space; /* pci device memory va */ + uint lockcount; /* nest count of sdstd_lock() calls */ + bool client_intr_enabled; /* interrupt connnected flag */ + bool intr_handler_valid; /* client driver interrupt handler valid */ + sdioh_cb_fn_t intr_handler; /* registered interrupt handler */ + void *intr_handler_arg; /* argument to call interrupt handler */ + bool initialized; /* card initialized */ + uint target_dev; /* Target device ID */ + uint16 intmask; /* Current active interrupts */ + void *sdos_info; /* Pointer to per-OS private data */ + void *bcmsdh; /* handler to upper layer stack (bcmsdh) */ + + uint32 controller_type; /* Host controller type */ + uint8 version; /* Host Controller Spec Compliance Version */ + uint irq; /* Client irq */ + int intrcount; /* Client interrupts */ + int local_intrcount; /* Controller interrupts */ + bool host_init_done; /* Controller initted */ + bool card_init_done; /* Client SDIO interface initted */ + bool polled_mode; /* polling for command completion */ + + bool sd_blockmode; /* sd_blockmode == FALSE => 64 Byte Cmd 53s. */ + /* Must be on for sd_multiblock to be effective */ + bool use_client_ints; /* If this is false, make sure to restore */ + /* polling hack in wl_linux.c:wl_timer() */ + int adapter_slot; /* Maybe dealing with multiple slots/controllers */ + int sd_mode; /* SD1/SD4/SPI */ + int client_block_size[SDIOD_MAX_IOFUNCS]; /* Blocksize */ + uint32 data_xfer_count; /* Current transfer */ + uint16 card_rca; /* Current Address */ + int8 sd_dma_mode; /* DMA Mode (PIO, SDMA, ... ADMA2) on CMD53 */ + uint8 num_funcs; /* Supported funcs on client */ + uint32 com_cis_ptr; + uint32 func_cis_ptr[SDIOD_MAX_IOFUNCS]; + void *dma_buf; /* DMA Buffer virtual address */ + ulong dma_phys; /* DMA Buffer physical address */ + void *adma2_dscr_buf; /* ADMA2 Descriptor Buffer virtual address */ + ulong adma2_dscr_phys; /* ADMA2 Descriptor Buffer physical address */ + + /* adjustments needed to make the dma align properly */ + void *dma_start_buf; + ulong dma_start_phys; + uint alloced_dma_size; + void *adma2_dscr_start_buf; + ulong adma2_dscr_start_phys; + uint alloced_adma2_dscr_size; + + int r_cnt; /* rx count */ + int t_cnt; /* tx_count */ + bool got_hcint; /* local interrupt flag */ + uint16 last_intrstatus; /* to cache intrstatus */ + int host_UHSISupported; /* whether UHSI is supported for HC. */ + int card_UHSI_voltage_Supported; /* whether UHSI is supported for + * Card in terms of Voltage [1.8 or 3.3]. + */ + int global_UHSI_Supp; /* type of UHSI support in both host and card. + * HOST_SDR_UNSUPP: capabilities not supported/matched + * HOST_SDR_12_25: SDR12 and SDR25 supported + * HOST_SDR_50_104_DDR: one of SDR50/SDR104 or DDR50 supptd + */ + volatile int sd3_dat_state; /* data transfer state used for retuning check */ + volatile int sd3_tun_state; /* tuning state used for retuning check */ + bool sd3_tuning_reqd; /* tuning requirement parameter */ + bool sd3_tuning_disable; /* tuning disable due to bus sleeping */ + uint32 caps3; /* cached value of 32 MSbits capabilities reg (SDIO 3.0) */ +#ifdef BCMSDIOH_TXGLOM + glom_buf_t glom_info; /* pkt information used for glomming */ + uint txglom_mode; /* Txglom mode: 0 - copy, 1 - multi-descriptor */ +#endif // endif +}; + +#define DMA_MODE_NONE 0 +#define DMA_MODE_SDMA 1 +#define DMA_MODE_ADMA1 2 +#define DMA_MODE_ADMA2 3 +#define DMA_MODE_ADMA2_64 4 +#define DMA_MODE_AUTO -1 + +#define USE_DMA(sd) ((bool)((sd->sd_dma_mode > 0) ? TRUE : FALSE)) + +/* States for Tuning and corr data */ +#define TUNING_IDLE 0 +#define TUNING_START 1 +#define TUNING_START_AFTER_DAT 2 +#define TUNING_ONGOING 3 + +#define DATA_TRANSFER_IDLE 0 +#define DATA_TRANSFER_ONGOING 1 + +#define CHECK_TUNING_PRE_DATA 1 +#define CHECK_TUNING_POST_DATA 2 + +#ifdef DHD_DEBUG +#define SD_DHD_DISABLE_PERIODIC_TUNING 0x01 +#define SD_DHD_ENABLE_PERIODIC_TUNING 0x00 +#endif // endif + +/************************************************************ + * Internal interfaces: per-port references into bcmsdstd.c + */ + +/* Global message bits */ +extern uint sd_msglevel; + +/* OS-independent interrupt handler */ +extern bool check_client_intr(sdioh_info_t *sd); + +/* Core interrupt enable/disable of device interrupts */ +extern void sdstd_devintr_on(sdioh_info_t *sd); +extern void sdstd_devintr_off(sdioh_info_t *sd); + +/* Enable/disable interrupts for local controller events */ +extern void sdstd_intrs_on(sdioh_info_t *sd, uint16 norm, uint16 err); +extern void sdstd_intrs_off(sdioh_info_t *sd, uint16 norm, uint16 err); + +/* Wait for specified interrupt and error bits to be set */ +extern void sdstd_spinbits(sdioh_info_t *sd, uint16 norm, uint16 err); + +/************************************************************** + * Internal interfaces: bcmsdstd.c references to per-port code + */ + +/* Register mapping routines */ +extern uint32 *sdstd_reg_map(osl_t *osh, ulong addr, int size); +extern void sdstd_reg_unmap(osl_t *osh, ulong addr, int size); + +/* Interrupt (de)registration routines */ +extern int sdstd_register_irq(sdioh_info_t *sd, uint irq); +extern void sdstd_free_irq(uint irq, sdioh_info_t *sd); + +/* OS-specific interrupt wrappers (atomic interrupt enable/disable) */ +extern void sdstd_lock(sdioh_info_t *sd); +extern void sdstd_unlock(sdioh_info_t *sd); +extern void sdstd_waitlockfree(sdioh_info_t *sd); + +/* OS-specific wrappers for safe concurrent register access */ +extern void sdstd_os_lock_irqsave(sdioh_info_t *sd, ulong* flags); +extern void sdstd_os_unlock_irqrestore(sdioh_info_t *sd, ulong* flags); + +/* OS-specific wait-for-interrupt-or-status */ +extern int sdstd_waitbits(sdioh_info_t *sd, uint16 norm, uint16 err, bool yield, uint16 *bits); + +/* used by bcmsdstd_linux [implemented in sdstd] */ +extern void sdstd_3_enable_retuning_int(sdioh_info_t *sd); +extern void sdstd_3_disable_retuning_int(sdioh_info_t *sd); +extern bool sdstd_3_is_retuning_int_set(sdioh_info_t *sd); +extern void sdstd_3_check_and_do_tuning(sdioh_info_t *sd, int tuning_param); +extern bool sdstd_3_check_and_set_retuning(sdioh_info_t *sd); +extern int sdstd_3_get_tune_state(sdioh_info_t *sd); +extern int sdstd_3_get_data_state(sdioh_info_t *sd); +extern void sdstd_3_set_tune_state(sdioh_info_t *sd, int state); +extern void sdstd_3_set_data_state(sdioh_info_t *sd, int state); +extern uint8 sdstd_3_get_tuning_exp(sdioh_info_t *sd); +extern uint32 sdstd_3_get_uhsi_clkmode(sdioh_info_t *sd); +extern int sdstd_3_clk_tuning(sdioh_info_t *sd, uint32 sd3ClkMode); + +/* used by sdstd [implemented in bcmsdstd_linux/ndis] */ +extern void sdstd_3_start_tuning(sdioh_info_t *sd); +extern void sdstd_3_osinit_tuning(sdioh_info_t *sd); +extern void sdstd_3_osclean_tuning(sdioh_info_t *sd); + +extern void sdstd_enable_disable_periodic_timer(sdioh_info_t * sd, uint val); + +extern sdioh_info_t *sdioh_attach(osl_t *osh, void *bar0, uint irq); +extern SDIOH_API_RC sdioh_detach(osl_t *osh, sdioh_info_t *sd); +#endif /* _BCM_SD_STD_H */ diff --git a/bcmdhd.100.10.315.x/include/bcmspi.h b/bcmdhd.100.10.315.x/include/bcmspi.h new file mode 100644 index 0000000..92a7da1 --- /dev/null +++ b/bcmdhd.100.10.315.x/include/bcmspi.h @@ -0,0 +1,43 @@ +/* + * Broadcom SPI Low-Level Hardware Driver API + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmspi.h 514727 2014-11-12 03:02:48Z $ + */ +#ifndef _BCM_SPI_H +#define _BCM_SPI_H + +extern void spi_devintr_off(sdioh_info_t *sd); +extern void spi_devintr_on(sdioh_info_t *sd); +extern bool spi_start_clock(sdioh_info_t *sd, uint16 new_sd_divisor); +extern bool spi_controller_highspeed_mode(sdioh_info_t *sd, bool hsmode); +extern bool spi_check_client_intr(sdioh_info_t *sd, int *is_dev_intr); +extern bool spi_hw_attach(sdioh_info_t *sd); +extern bool spi_hw_detach(sdioh_info_t *sd); +extern void spi_sendrecv(sdioh_info_t *sd, uint8 *msg_out, uint8 *msg_in, int msglen); +extern void spi_spinbits(sdioh_info_t *sd); +extern void spi_waitbits(sdioh_info_t *sd, bool yield); + +#endif /* _BCM_SPI_H */ diff --git a/bcmdhd.100.10.315.x/include/bcmspibrcm.h b/bcmdhd.100.10.315.x/include/bcmspibrcm.h new file mode 100644 index 0000000..fe3a0d7 --- /dev/null +++ b/bcmdhd.100.10.315.x/include/bcmspibrcm.h @@ -0,0 +1,167 @@ +/* + * SD-SPI Protocol Conversion - BCMSDH->gSPI Translation Layer + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmspibrcm.h 514727 2014-11-12 03:02:48Z $ + */ +#ifndef _BCM_SPI_BRCM_H +#define _BCM_SPI_BRCM_H + +#ifndef SPI_MAX_IOFUNCS +/* Maximum number of I/O funcs */ +#define SPI_MAX_IOFUNCS 4 +#endif // endif +/* global msglevel for debug messages - bitvals come from sdiovar.h */ + +#if defined(DHD_DEBUG) +#define sd_err(x) do { if (sd_msglevel & SDH_ERROR_VAL) printf x; } while (0) +#define sd_trace(x) do { if (sd_msglevel & SDH_TRACE_VAL) printf x; } while (0) +#define sd_info(x) do { if (sd_msglevel & SDH_INFO_VAL) printf x; } while (0) +#define sd_debug(x) do { if (sd_msglevel & SDH_DEBUG_VAL) printf x; } while (0) +#define sd_data(x) do { if (sd_msglevel & SDH_DATA_VAL) printf x; } while (0) +#define sd_ctrl(x) do { if (sd_msglevel & SDH_CTRL_VAL) printf x; } while (0) +#else +#define sd_err(x) +#define sd_trace(x) +#define sd_info(x) +#define sd_debug(x) +#define sd_data(x) +#define sd_ctrl(x) +#endif // endif + +#define sd_log(x) + +#define SDIOH_ASSERT(exp) \ + do { if (!(exp)) \ + printf("!!!ASSERT fail: file %s lines %d", __FILE__, __LINE__); \ + } while (0) + +#define BLOCK_SIZE_F1 64 +#define BLOCK_SIZE_F2 2048 +#define BLOCK_SIZE_F3 2048 + +/* internal return code */ +#define SUCCESS 0 +#undef ERROR +#define ERROR 1 +#define ERROR_UF 2 +#define ERROR_OF 3 + +/* private bus modes */ +#define SDIOH_MODE_SPI 0 + +#define USE_BLOCKMODE 0x2 /* Block mode can be single block or multi */ +#define USE_MULTIBLOCK 0x4 + +struct sdioh_info { + uint cfg_bar; /* pci cfg address for bar */ + uint32 caps; /* cached value of capabilities reg */ +#ifndef BCMSPI_ANDROID + void *bar0; /* BAR0 for PCI Device */ +#endif /* !BCMSPI_ANDROID */ + osl_t *osh; /* osh handler */ + void *controller; /* Pointer to SPI Controller's private data struct */ + uint lockcount; /* nest count of spi_lock() calls */ + bool client_intr_enabled; /* interrupt connnected flag */ + bool intr_handler_valid; /* client driver interrupt handler valid */ + sdioh_cb_fn_t intr_handler; /* registered interrupt handler */ + void *intr_handler_arg; /* argument to call interrupt handler */ + bool initialized; /* card initialized */ + uint32 target_dev; /* Target device ID */ + uint32 intmask; /* Current active interrupts */ + void *sdos_info; /* Pointer to per-OS private data */ + uint32 controller_type; /* Host controller type */ + uint8 version; /* Host Controller Spec Compliance Version */ + uint irq; /* Client irq */ + uint32 intrcount; /* Client interrupts */ + uint32 local_intrcount; /* Controller interrupts */ + bool host_init_done; /* Controller initted */ + bool card_init_done; /* Client SDIO interface initted */ + bool polled_mode; /* polling for command completion */ + + bool sd_use_dma; /* DMA on CMD53 */ + bool sd_blockmode; /* sd_blockmode == FALSE => 64 Byte Cmd 53s. */ + /* Must be on for sd_multiblock to be effective */ + bool use_client_ints; /* If this is false, make sure to restore */ + /* polling hack in wl_linux.c:wl_timer() */ + int adapter_slot; /* Maybe dealing with multiple slots/controllers */ + int sd_mode; /* SD1/SD4/SPI */ + int client_block_size[SPI_MAX_IOFUNCS]; /* Blocksize */ + uint32 data_xfer_count; /* Current transfer */ + uint16 card_rca; /* Current Address */ + uint8 num_funcs; /* Supported funcs on client */ + uint32 card_dstatus; /* 32bit device status */ + uint32 com_cis_ptr; + uint32 func_cis_ptr[SPI_MAX_IOFUNCS]; + void *dma_buf; + ulong dma_phys; + int r_cnt; /* rx count */ + int t_cnt; /* tx_count */ + uint32 wordlen; /* host processor 16/32bits */ + uint32 prev_fun; + uint32 chip; + uint32 chiprev; + bool resp_delay_all; + bool dwordmode; + bool resp_delay_new; + + struct spierrstats_t spierrstats; +}; + +/************************************************************ + * Internal interfaces: per-port references into bcmspibrcm.c + */ + +/* Global message bits */ +extern uint sd_msglevel; + +/************************************************************** + * Internal interfaces: bcmspibrcm.c references to per-port code + */ + +/* Interrupt (de)registration routines */ +extern int spi_register_irq(sdioh_info_t *sd, uint irq); +extern void spi_free_irq(uint irq, sdioh_info_t *sd); + +/* OS-specific interrupt wrappers (atomic interrupt enable/disable) */ +extern void spi_lock(sdioh_info_t *sd); +extern void spi_unlock(sdioh_info_t *sd); + +/* Allocate/init/free per-OS private data */ +extern int spi_osinit(sdioh_info_t *sd); +extern void spi_osfree(sdioh_info_t *sd); + +#define SPI_RW_FLAG_M BITFIELD_MASK(1) /* Bit [31] - R/W Command Bit */ +#define SPI_RW_FLAG_S 31 +#define SPI_ACCESS_M BITFIELD_MASK(1) /* Bit [30] - Fixed/Incr Access */ +#define SPI_ACCESS_S 30 +#define SPI_FUNCTION_M BITFIELD_MASK(2) /* Bit [29:28] - Function Number */ +#define SPI_FUNCTION_S 28 +#define SPI_REG_ADDR_M BITFIELD_MASK(17) /* Bit [27:11] - Address */ +#define SPI_REG_ADDR_S 11 +#define SPI_LEN_M BITFIELD_MASK(11) /* Bit [10:0] - Packet length */ +#define SPI_LEN_S 0 + +#endif /* _BCM_SPI_BRCM_H */ diff --git a/bcmdhd.100.10.315.x/include/bcmsrom_fmt.h b/bcmdhd.100.10.315.x/include/bcmsrom_fmt.h new file mode 100644 index 0000000..2bb66ab --- /dev/null +++ b/bcmdhd.100.10.315.x/include/bcmsrom_fmt.h @@ -0,0 +1,1013 @@ +/* + * SROM format definition. + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmsrom_fmt.h 688657 2017-03-07 10:12:56Z $ + */ + +#ifndef _bcmsrom_fmt_h_ +#define _bcmsrom_fmt_h_ + +#define SROM_MAXREV 16 /* max revision supported by driver */ + +/* Maximum srom: 16 Kilobits == 2048 bytes */ + +#define SROM_MAX 2048 +#define SROM_MAXW 1024 + +#ifdef LARGE_NVRAM_MAXSZ +#define VARS_MAX LARGE_NVRAM_MAXSZ +#else +#define LARGE_NVRAM_MAXSZ 8192 +#define VARS_MAX LARGE_NVRAM_MAXSZ +#endif /* LARGE_NVRAM_MAXSZ */ + +/* PCI fields */ +#define PCI_F0DEVID 48 + +#define SROM_WORDS 64 +#define SROM_SIGN_MINWORDS 128 +#define SROM3_SWRGN_OFF 28 /* s/w region offset in words */ + +#define SROM_SSID 2 +#define SROM_SVID 3 + +#define SROM_WL1LHMAXP 29 + +#define SROM_WL1LPAB0 30 +#define SROM_WL1LPAB1 31 +#define SROM_WL1LPAB2 32 + +#define SROM_WL1HPAB0 33 +#define SROM_WL1HPAB1 34 +#define SROM_WL1HPAB2 35 + +#define SROM_MACHI_IL0 36 +#define SROM_MACMID_IL0 37 +#define SROM_MACLO_IL0 38 +#define SROM_MACHI_ET0 39 +#define SROM_MACMID_ET0 40 +#define SROM_MACLO_ET0 41 +#define SROM_MACHI_ET1 42 +#define SROM_MACMID_ET1 43 +#define SROM_MACLO_ET1 44 +#define SROM3_MACHI 37 +#define SROM3_MACMID 38 +#define SROM3_MACLO 39 + +#define SROM_BXARSSI2G 40 +#define SROM_BXARSSI5G 41 + +#define SROM_TRI52G 42 +#define SROM_TRI5GHL 43 + +#define SROM_RXPO52G 45 + +#define SROM2_ENETPHY 45 + +#define SROM_AABREV 46 +/* Fields in AABREV */ +#define SROM_BR_MASK 0x00ff +#define SROM_CC_MASK 0x0f00 +#define SROM_CC_SHIFT 8 +#define SROM_AA0_MASK 0x3000 +#define SROM_AA0_SHIFT 12 +#define SROM_AA1_MASK 0xc000 +#define SROM_AA1_SHIFT 14 + +#define SROM_WL0PAB0 47 +#define SROM_WL0PAB1 48 +#define SROM_WL0PAB2 49 + +#define SROM_LEDBH10 50 +#define SROM_LEDBH32 51 + +#define SROM_WL10MAXP 52 + +#define SROM_WL1PAB0 53 +#define SROM_WL1PAB1 54 +#define SROM_WL1PAB2 55 + +#define SROM_ITT 56 + +#define SROM_BFL 57 +#define SROM_BFL2 28 +#define SROM3_BFL2 61 + +#define SROM_AG10 58 + +#define SROM_CCODE 59 + +#define SROM_OPO 60 + +#define SROM3_LEDDC 62 + +#define SROM_CRCREV 63 + +/* SROM Rev 4: Reallocate the software part of the srom to accomodate + * MIMO features. It assumes up to two PCIE functions and 440 bytes + * of useable srom i.e. the useable storage in chips with OTP that + * implements hardware redundancy. + */ + +#define SROM4_WORDS 220 + +#define SROM4_SIGN 32 +#define SROM4_SIGNATURE 0x5372 + +#define SROM4_BREV 33 + +#define SROM4_BFL0 34 +#define SROM4_BFL1 35 +#define SROM4_BFL2 36 +#define SROM4_BFL3 37 +#define SROM5_BFL0 37 +#define SROM5_BFL1 38 +#define SROM5_BFL2 39 +#define SROM5_BFL3 40 + +#define SROM4_MACHI 38 +#define SROM4_MACMID 39 +#define SROM4_MACLO 40 +#define SROM5_MACHI 41 +#define SROM5_MACMID 42 +#define SROM5_MACLO 43 + +#define SROM4_CCODE 41 +#define SROM4_REGREV 42 +#define SROM5_CCODE 34 +#define SROM5_REGREV 35 + +#define SROM4_LEDBH10 43 +#define SROM4_LEDBH32 44 +#define SROM5_LEDBH10 59 +#define SROM5_LEDBH32 60 + +#define SROM4_LEDDC 45 +#define SROM5_LEDDC 45 + +#define SROM4_AA 46 +#define SROM4_AA2G_MASK 0x00ff +#define SROM4_AA2G_SHIFT 0 +#define SROM4_AA5G_MASK 0xff00 +#define SROM4_AA5G_SHIFT 8 + +#define SROM4_AG10 47 +#define SROM4_AG32 48 + +#define SROM4_TXPID2G 49 +#define SROM4_TXPID5G 51 +#define SROM4_TXPID5GL 53 +#define SROM4_TXPID5GH 55 + +#define SROM4_TXRXC 61 +#define SROM4_TXCHAIN_MASK 0x000f +#define SROM4_TXCHAIN_SHIFT 0 +#define SROM4_RXCHAIN_MASK 0x00f0 +#define SROM4_RXCHAIN_SHIFT 4 +#define SROM4_SWITCH_MASK 0xff00 +#define SROM4_SWITCH_SHIFT 8 + +/* Per-path fields */ +#define MAX_PATH_SROM 4 +#define SROM4_PATH0 64 +#define SROM4_PATH1 87 +#define SROM4_PATH2 110 +#define SROM4_PATH3 133 + +#define SROM4_2G_ITT_MAXP 0 +#define SROM4_2G_PA 1 +#define SROM4_5G_ITT_MAXP 5 +#define SROM4_5GLH_MAXP 6 +#define SROM4_5G_PA 7 +#define SROM4_5GL_PA 11 +#define SROM4_5GH_PA 15 + +/* Fields in the ITT_MAXP and 5GLH_MAXP words */ +#define B2G_MAXP_MASK 0xff +#define B2G_ITT_SHIFT 8 +#define B5G_MAXP_MASK 0xff +#define B5G_ITT_SHIFT 8 +#define B5GH_MAXP_MASK 0xff +#define B5GL_MAXP_SHIFT 8 + +/* All the miriad power offsets */ +#define SROM4_2G_CCKPO 156 +#define SROM4_2G_OFDMPO 157 +#define SROM4_5G_OFDMPO 159 +#define SROM4_5GL_OFDMPO 161 +#define SROM4_5GH_OFDMPO 163 +#define SROM4_2G_MCSPO 165 +#define SROM4_5G_MCSPO 173 +#define SROM4_5GL_MCSPO 181 +#define SROM4_5GH_MCSPO 189 +#define SROM4_CDDPO 197 +#define SROM4_STBCPO 198 +#define SROM4_BW40PO 199 +#define SROM4_BWDUPPO 200 + +#define SROM4_CRCREV 219 + +/* SROM Rev 8: Make space for a 48word hardware header for PCIe rev >= 6. + * This is acombined srom for both MIMO and SISO boards, usable in + * the .130 4Kilobit OTP with hardware redundancy. + */ + +#define SROM8_SIGN 64 + +#define SROM8_BREV 65 + +#define SROM8_BFL0 66 +#define SROM8_BFL1 67 +#define SROM8_BFL2 68 +#define SROM8_BFL3 69 + +#define SROM8_MACHI 70 +#define SROM8_MACMID 71 +#define SROM8_MACLO 72 + +#define SROM8_CCODE 73 +#define SROM8_REGREV 74 + +#define SROM8_LEDBH10 75 +#define SROM8_LEDBH32 76 + +#define SROM8_LEDDC 77 + +#define SROM8_AA 78 + +#define SROM8_AG10 79 +#define SROM8_AG32 80 + +#define SROM8_TXRXC 81 + +#define SROM8_BXARSSI2G 82 +#define SROM8_BXARSSI5G 83 +#define SROM8_TRI52G 84 +#define SROM8_TRI5GHL 85 +#define SROM8_RXPO52G 86 + +#define SROM8_FEM2G 87 +#define SROM8_FEM5G 88 +#define SROM8_FEM_ANTSWLUT_MASK 0xf800 +#define SROM8_FEM_ANTSWLUT_SHIFT 11 +#define SROM8_FEM_TR_ISO_MASK 0x0700 +#define SROM8_FEM_TR_ISO_SHIFT 8 +#define SROM8_FEM_PDET_RANGE_MASK 0x00f8 +#define SROM8_FEM_PDET_RANGE_SHIFT 3 +#define SROM8_FEM_EXTPA_GAIN_MASK 0x0006 +#define SROM8_FEM_EXTPA_GAIN_SHIFT 1 +#define SROM8_FEM_TSSIPOS_MASK 0x0001 +#define SROM8_FEM_TSSIPOS_SHIFT 0 + +#define SROM8_THERMAL 89 + +/* Temp sense related entries */ +#define SROM8_MPWR_RAWTS 90 +#define SROM8_TS_SLP_OPT_CORRX 91 +/* FOC: freiquency offset correction, HWIQ: H/W IOCAL enable, IQSWP: IQ CAL swap disable */ +#define SROM8_FOC_HWIQ_IQSWP 92 + +#define SROM8_EXTLNAGAIN 93 + +/* Temperature delta for PHY calibration */ +#define SROM8_PHYCAL_TEMPDELTA 94 + +/* Measured power 1 & 2, 0-13 bits at offset 95, MSB 2 bits are unused for now. */ +#define SROM8_MPWR_1_AND_2 95 + +/* Per-path offsets & fields */ +#define SROM8_PATH0 96 +#define SROM8_PATH1 112 +#define SROM8_PATH2 128 +#define SROM8_PATH3 144 + +#define SROM8_2G_ITT_MAXP 0 +#define SROM8_2G_PA 1 +#define SROM8_5G_ITT_MAXP 4 +#define SROM8_5GLH_MAXP 5 +#define SROM8_5G_PA 6 +#define SROM8_5GL_PA 9 +#define SROM8_5GH_PA 12 + +/* All the miriad power offsets */ +#define SROM8_2G_CCKPO 160 + +#define SROM8_2G_OFDMPO 161 +#define SROM8_5G_OFDMPO 163 +#define SROM8_5GL_OFDMPO 165 +#define SROM8_5GH_OFDMPO 167 + +#define SROM8_2G_MCSPO 169 +#define SROM8_5G_MCSPO 177 +#define SROM8_5GL_MCSPO 185 +#define SROM8_5GH_MCSPO 193 + +#define SROM8_CDDPO 201 +#define SROM8_STBCPO 202 +#define SROM8_BW40PO 203 +#define SROM8_BWDUPPO 204 + +/* SISO PA parameters are in the path0 spaces */ +#define SROM8_SISO 96 + +/* Legacy names for SISO PA paramters */ +#define SROM8_W0_ITTMAXP (SROM8_SISO + SROM8_2G_ITT_MAXP) +#define SROM8_W0_PAB0 (SROM8_SISO + SROM8_2G_PA) +#define SROM8_W0_PAB1 (SROM8_SISO + SROM8_2G_PA + 1) +#define SROM8_W0_PAB2 (SROM8_SISO + SROM8_2G_PA + 2) +#define SROM8_W1_ITTMAXP (SROM8_SISO + SROM8_5G_ITT_MAXP) +#define SROM8_W1_MAXP_LCHC (SROM8_SISO + SROM8_5GLH_MAXP) +#define SROM8_W1_PAB0 (SROM8_SISO + SROM8_5G_PA) +#define SROM8_W1_PAB1 (SROM8_SISO + SROM8_5G_PA + 1) +#define SROM8_W1_PAB2 (SROM8_SISO + SROM8_5G_PA + 2) +#define SROM8_W1_PAB0_LC (SROM8_SISO + SROM8_5GL_PA) +#define SROM8_W1_PAB1_LC (SROM8_SISO + SROM8_5GL_PA + 1) +#define SROM8_W1_PAB2_LC (SROM8_SISO + SROM8_5GL_PA + 2) +#define SROM8_W1_PAB0_HC (SROM8_SISO + SROM8_5GH_PA) +#define SROM8_W1_PAB1_HC (SROM8_SISO + SROM8_5GH_PA + 1) +#define SROM8_W1_PAB2_HC (SROM8_SISO + SROM8_5GH_PA + 2) + +#define SROM8_CRCREV 219 + +/* SROM REV 9 */ +#define SROM9_2GPO_CCKBW20 160 +#define SROM9_2GPO_CCKBW20UL 161 +#define SROM9_2GPO_LOFDMBW20 162 +#define SROM9_2GPO_LOFDMBW20UL 164 + +#define SROM9_5GLPO_LOFDMBW20 166 +#define SROM9_5GLPO_LOFDMBW20UL 168 +#define SROM9_5GMPO_LOFDMBW20 170 +#define SROM9_5GMPO_LOFDMBW20UL 172 +#define SROM9_5GHPO_LOFDMBW20 174 +#define SROM9_5GHPO_LOFDMBW20UL 176 + +#define SROM9_2GPO_MCSBW20 178 +#define SROM9_2GPO_MCSBW20UL 180 +#define SROM9_2GPO_MCSBW40 182 + +#define SROM9_5GLPO_MCSBW20 184 +#define SROM9_5GLPO_MCSBW20UL 186 +#define SROM9_5GLPO_MCSBW40 188 +#define SROM9_5GMPO_MCSBW20 190 +#define SROM9_5GMPO_MCSBW20UL 192 +#define SROM9_5GMPO_MCSBW40 194 +#define SROM9_5GHPO_MCSBW20 196 +#define SROM9_5GHPO_MCSBW20UL 198 +#define SROM9_5GHPO_MCSBW40 200 + +#define SROM9_PO_MCS32 202 +#define SROM9_PO_LOFDM40DUP 203 +#define SROM9_EU_EDCRSTH 204 +#define SROM10_EU_EDCRSTH 204 +#define SROM8_RXGAINERR_2G 205 +#define SROM8_RXGAINERR_5GL 206 +#define SROM8_RXGAINERR_5GM 207 +#define SROM8_RXGAINERR_5GH 208 +#define SROM8_RXGAINERR_5GU 209 +#define SROM8_SUBBAND_PPR 210 +#define SROM8_PCIEINGRESS_WAR 211 +#define SROM8_EU_EDCRSTH 212 +#define SROM9_SAR 212 + +#define SROM8_NOISELVL_2G 213 +#define SROM8_NOISELVL_5GL 214 +#define SROM8_NOISELVL_5GM 215 +#define SROM8_NOISELVL_5GH 216 +#define SROM8_NOISELVL_5GU 217 +#define SROM8_NOISECALOFFSET 218 + +#define SROM9_REV_CRC 219 + +#define SROM10_CCKPWROFFSET 218 +#define SROM10_SIGN 219 +#define SROM10_SWCTRLMAP_2G 220 +#define SROM10_CRCREV 229 + +#define SROM10_WORDS 230 +#define SROM10_SIGNATURE SROM4_SIGNATURE + +/* SROM REV 11 */ +#define SROM11_BREV 65 + +#define SROM11_BFL0 66 +#define SROM11_BFL1 67 +#define SROM11_BFL2 68 +#define SROM11_BFL3 69 +#define SROM11_BFL4 70 +#define SROM11_BFL5 71 + +#define SROM11_MACHI 72 +#define SROM11_MACMID 73 +#define SROM11_MACLO 74 + +#define SROM11_CCODE 75 +#define SROM11_REGREV 76 + +#define SROM11_LEDBH10 77 +#define SROM11_LEDBH32 78 + +#define SROM11_LEDDC 79 + +#define SROM11_AA 80 + +#define SROM11_AGBG10 81 +#define SROM11_AGBG2A0 82 +#define SROM11_AGA21 83 + +#define SROM11_TXRXC 84 + +#define SROM11_FEM_CFG1 85 +#define SROM11_FEM_CFG2 86 + +/* Masks and offsets for FEM_CFG */ +#define SROM11_FEMCTRL_MASK 0xf800 +#define SROM11_FEMCTRL_SHIFT 11 +#define SROM11_PAPDCAP_MASK 0x0400 +#define SROM11_PAPDCAP_SHIFT 10 +#define SROM11_TWORANGETSSI_MASK 0x0200 +#define SROM11_TWORANGETSSI_SHIFT 9 +#define SROM11_PDGAIN_MASK 0x01f0 +#define SROM11_PDGAIN_SHIFT 4 +#define SROM11_EPAGAIN_MASK 0x000e +#define SROM11_EPAGAIN_SHIFT 1 +#define SROM11_TSSIPOSSLOPE_MASK 0x0001 +#define SROM11_TSSIPOSSLOPE_SHIFT 0 +#define SROM11_GAINCTRLSPH_MASK 0xf800 +#define SROM11_GAINCTRLSPH_SHIFT 11 + +#define SROM11_THERMAL 87 +#define SROM11_MPWR_RAWTS 88 +#define SROM11_TS_SLP_OPT_CORRX 89 +#define SROM11_XTAL_FREQ 90 +#define SROM11_5GB0_4080_W0_A1 91 +#define SROM11_PHYCAL_TEMPDELTA 92 +#define SROM11_MPWR_1_AND_2 93 +#define SROM11_5GB0_4080_W1_A1 94 +#define SROM11_TSSIFLOOR_2G 95 +#define SROM11_TSSIFLOOR_5GL 96 +#define SROM11_TSSIFLOOR_5GM 97 +#define SROM11_TSSIFLOOR_5GH 98 +#define SROM11_TSSIFLOOR_5GU 99 + +/* Masks and offsets for Thermal parameters */ +#define SROM11_TEMPS_PERIOD_MASK 0xf0 +#define SROM11_TEMPS_PERIOD_SHIFT 4 +#define SROM11_TEMPS_HYSTERESIS_MASK 0x0f +#define SROM11_TEMPS_HYSTERESIS_SHIFT 0 +#define SROM11_TEMPCORRX_MASK 0xfc +#define SROM11_TEMPCORRX_SHIFT 2 +#define SROM11_TEMPSENSE_OPTION_MASK 0x3 +#define SROM11_TEMPSENSE_OPTION_SHIFT 0 + +#define SROM11_PDOFF_2G_40M_A0_MASK 0x000f +#define SROM11_PDOFF_2G_40M_A0_SHIFT 0 +#define SROM11_PDOFF_2G_40M_A1_MASK 0x00f0 +#define SROM11_PDOFF_2G_40M_A1_SHIFT 4 +#define SROM11_PDOFF_2G_40M_A2_MASK 0x0f00 +#define SROM11_PDOFF_2G_40M_A2_SHIFT 8 +#define SROM11_PDOFF_2G_40M_VALID_MASK 0x8000 +#define SROM11_PDOFF_2G_40M_VALID_SHIFT 15 + +#define SROM11_PDOFF_2G_40M 100 +#define SROM11_PDOFF_40M_A0 101 +#define SROM11_PDOFF_40M_A1 102 +#define SROM11_PDOFF_40M_A2 103 +#define SROM11_5GB0_4080_W2_A1 103 +#define SROM11_PDOFF_80M_A0 104 +#define SROM11_PDOFF_80M_A1 105 +#define SROM11_PDOFF_80M_A2 106 +#define SROM11_5GB1_4080_W0_A1 106 + +#define SROM11_SUBBAND5GVER 107 + +/* Per-path fields and offset */ +#define MAX_PATH_SROM_11 3 +#define SROM11_PATH0 108 +#define SROM11_PATH1 128 +#define SROM11_PATH2 148 + +#define SROM11_2G_MAXP 0 +#define SROM11_5GB1_4080_PA 0 +#define SROM11_2G_PA 1 +#define SROM11_5GB2_4080_PA 2 +#define SROM11_RXGAINS1 4 +#define SROM11_RXGAINS 5 +#define SROM11_5GB3_4080_PA 5 +#define SROM11_5GB1B0_MAXP 6 +#define SROM11_5GB3B2_MAXP 7 +#define SROM11_5GB0_PA 8 +#define SROM11_5GB1_PA 11 +#define SROM11_5GB2_PA 14 +#define SROM11_5GB3_PA 17 + +/* Masks and offsets for rxgains */ +#define SROM11_RXGAINS5GTRELNABYPA_MASK 0x8000 +#define SROM11_RXGAINS5GTRELNABYPA_SHIFT 15 +#define SROM11_RXGAINS5GTRISOA_MASK 0x7800 +#define SROM11_RXGAINS5GTRISOA_SHIFT 11 +#define SROM11_RXGAINS5GELNAGAINA_MASK 0x0700 +#define SROM11_RXGAINS5GELNAGAINA_SHIFT 8 +#define SROM11_RXGAINS2GTRELNABYPA_MASK 0x0080 +#define SROM11_RXGAINS2GTRELNABYPA_SHIFT 7 +#define SROM11_RXGAINS2GTRISOA_MASK 0x0078 +#define SROM11_RXGAINS2GTRISOA_SHIFT 3 +#define SROM11_RXGAINS2GELNAGAINA_MASK 0x0007 +#define SROM11_RXGAINS2GELNAGAINA_SHIFT 0 +#define SROM11_RXGAINS5GHTRELNABYPA_MASK 0x8000 +#define SROM11_RXGAINS5GHTRELNABYPA_SHIFT 15 +#define SROM11_RXGAINS5GHTRISOA_MASK 0x7800 +#define SROM11_RXGAINS5GHTRISOA_SHIFT 11 +#define SROM11_RXGAINS5GHELNAGAINA_MASK 0x0700 +#define SROM11_RXGAINS5GHELNAGAINA_SHIFT 8 +#define SROM11_RXGAINS5GMTRELNABYPA_MASK 0x0080 +#define SROM11_RXGAINS5GMTRELNABYPA_SHIFT 7 +#define SROM11_RXGAINS5GMTRISOA_MASK 0x0078 +#define SROM11_RXGAINS5GMTRISOA_SHIFT 3 +#define SROM11_RXGAINS5GMELNAGAINA_MASK 0x0007 +#define SROM11_RXGAINS5GMELNAGAINA_SHIFT 0 + +/* Power per rate */ +#define SROM11_CCKBW202GPO 168 +#define SROM11_CCKBW20UL2GPO 169 +#define SROM11_MCSBW202GPO 170 +#define SROM11_MCSBW202GPO_1 171 +#define SROM11_MCSBW402GPO 172 +#define SROM11_MCSBW402GPO_1 173 +#define SROM11_DOT11AGOFDMHRBW202GPO 174 +#define SROM11_OFDMLRBW202GPO 175 + +#define SROM11_MCSBW205GLPO 176 +#define SROM11_MCSBW205GLPO_1 177 +#define SROM11_MCSBW405GLPO 178 +#define SROM11_MCSBW405GLPO_1 179 +#define SROM11_MCSBW805GLPO 180 +#define SROM11_MCSBW805GLPO_1 181 +#define SROM11_RPCAL_2G 182 +#define SROM11_RPCAL_5GL 183 +#define SROM11_MCSBW205GMPO 184 +#define SROM11_MCSBW205GMPO_1 185 +#define SROM11_MCSBW405GMPO 186 +#define SROM11_MCSBW405GMPO_1 187 +#define SROM11_MCSBW805GMPO 188 +#define SROM11_MCSBW805GMPO_1 189 +#define SROM11_RPCAL_5GM 190 +#define SROM11_RPCAL_5GH 191 +#define SROM11_MCSBW205GHPO 192 +#define SROM11_MCSBW205GHPO_1 193 +#define SROM11_MCSBW405GHPO 194 +#define SROM11_MCSBW405GHPO_1 195 +#define SROM11_MCSBW805GHPO 196 +#define SROM11_MCSBW805GHPO_1 197 +#define SROM11_RPCAL_5GU 198 +#define SROM11_PDOFF_2G_CCK 199 +#define SROM11_MCSLR5GLPO 200 +#define SROM11_MCSLR5GMPO 201 +#define SROM11_MCSLR5GHPO 202 + +#define SROM11_SB20IN40HRPO 203 +#define SROM11_SB20IN80AND160HR5GLPO 204 +#define SROM11_SB40AND80HR5GLPO 205 +#define SROM11_SB20IN80AND160HR5GMPO 206 +#define SROM11_SB40AND80HR5GMPO 207 +#define SROM11_SB20IN80AND160HR5GHPO 208 +#define SROM11_SB40AND80HR5GHPO 209 +#define SROM11_SB20IN40LRPO 210 +#define SROM11_SB20IN80AND160LR5GLPO 211 +#define SROM11_SB40AND80LR5GLPO 212 +#define SROM11_TXIDXCAP2G 212 +#define SROM11_SB20IN80AND160LR5GMPO 213 +#define SROM11_SB40AND80LR5GMPO 214 +#define SROM11_TXIDXCAP5G 214 +#define SROM11_SB20IN80AND160LR5GHPO 215 +#define SROM11_SB40AND80LR5GHPO 216 + +#define SROM11_DOT11AGDUPHRPO 217 +#define SROM11_DOT11AGDUPLRPO 218 + +/* MISC */ +#define SROM11_PCIEINGRESS_WAR 220 +#define SROM11_SAR 221 + +#define SROM11_NOISELVL_2G 222 +#define SROM11_NOISELVL_5GL 223 +#define SROM11_NOISELVL_5GM 224 +#define SROM11_NOISELVL_5GH 225 +#define SROM11_NOISELVL_5GU 226 + +#define SROM11_RXGAINERR_2G 227 +#define SROM11_RXGAINERR_5GL 228 +#define SROM11_RXGAINERR_5GM 229 +#define SROM11_RXGAINERR_5GH 230 +#define SROM11_RXGAINERR_5GU 231 + +#define SROM11_EU_EDCRSTH 232 +#define SROM12_EU_EDCRSTH 232 + +#define SROM11_SIGN 64 +#define SROM11_CRCREV 233 + +#define SROM11_WORDS 234 +#define SROM11_SIGNATURE 0x0634 + +/* SROM REV 12 */ +#define SROM12_SIGN 64 +#define SROM12_WORDS 512 +#define SROM12_SIGNATURE 0x8888 +#define SROM12_CRCREV 511 + +#define SROM12_BFL6 486 +#define SROM12_BFL7 487 + +#define SROM12_MCSBW205GX1PO 234 +#define SROM12_MCSBW205GX1PO_1 235 +#define SROM12_MCSBW405GX1PO 236 +#define SROM12_MCSBW405GX1PO_1 237 +#define SROM12_MCSBW805GX1PO 238 +#define SROM12_MCSBW805GX1PO_1 239 +#define SROM12_MCSLR5GX1PO 240 +#define SROM12_SB40AND80LR5GX1PO 241 +#define SROM12_SB20IN80AND160LR5GX1PO 242 +#define SROM12_SB20IN80AND160HR5GX1PO 243 +#define SROM12_SB40AND80HR5GX1PO 244 + +#define SROM12_MCSBW205GX2PO 245 +#define SROM12_MCSBW205GX2PO_1 246 +#define SROM12_MCSBW405GX2PO 247 +#define SROM12_MCSBW405GX2PO_1 248 +#define SROM12_MCSBW805GX2PO 249 +#define SROM12_MCSBW805GX2PO_1 250 +#define SROM12_MCSLR5GX2PO 251 +#define SROM12_SB40AND80LR5GX2PO 252 +#define SROM12_SB20IN80AND160LR5GX2PO 253 +#define SROM12_SB20IN80AND160HR5GX2PO 254 +#define SROM12_SB40AND80HR5GX2PO 255 + +/* MISC */ +#define SROM12_RXGAINS10 483 +#define SROM12_RXGAINS11 484 +#define SROM12_RXGAINS12 485 + +/* Per-path fields and offset */ +#define MAX_PATH_SROM_12 3 +#define SROM12_PATH0 256 +#define SROM12_PATH1 328 +#define SROM12_PATH2 400 + +#define SROM12_5GB42G_MAXP 0 +#define SROM12_2GB0_PA 1 +#define SROM12_2GB0_PA_W0 1 +#define SROM12_2GB0_PA_W1 2 +#define SROM12_2GB0_PA_W2 3 +#define SROM12_2GB0_PA_W3 4 + +#define SROM12_RXGAINS 5 +#define SROM12_5GB1B0_MAXP 6 +#define SROM12_5GB3B2_MAXP 7 + +#define SROM12_5GB0_PA 8 +#define SROM12_5GB0_PA_W0 8 +#define SROM12_5GB0_PA_W1 9 +#define SROM12_5GB0_PA_W2 10 +#define SROM12_5GB0_PA_W3 11 + +#define SROM12_5GB1_PA 12 +#define SROM12_5GB1_PA_W0 12 +#define SROM12_5GB1_PA_W1 13 +#define SROM12_5GB1_PA_W2 14 +#define SROM12_5GB1_PA_W3 15 + +#define SROM12_5GB2_PA 16 +#define SROM12_5GB2_PA_W0 16 +#define SROM12_5GB2_PA_W1 17 +#define SROM12_5GB2_PA_W2 18 +#define SROM12_5GB2_PA_W3 19 + +#define SROM12_5GB3_PA 20 +#define SROM12_5GB3_PA_W0 20 +#define SROM12_5GB3_PA_W1 21 +#define SROM12_5GB3_PA_W2 22 +#define SROM12_5GB3_PA_W3 23 + +#define SROM12_5GB4_PA 24 +#define SROM12_5GB4_PA_W0 24 +#define SROM12_5GB4_PA_W1 25 +#define SROM12_5GB4_PA_W2 26 +#define SROM12_5GB4_PA_W3 27 + +#define SROM12_2G40B0_PA 28 +#define SROM12_2G40B0_PA_W0 28 +#define SROM12_2G40B0_PA_W1 29 +#define SROM12_2G40B0_PA_W2 30 +#define SROM12_2G40B0_PA_W3 31 + +#define SROM12_5G40B0_PA 32 +#define SROM12_5G40B0_PA_W0 32 +#define SROM12_5G40B0_PA_W1 33 +#define SROM12_5G40B0_PA_W2 34 +#define SROM12_5G40B0_PA_W3 35 + +#define SROM12_5G40B1_PA 36 +#define SROM12_5G40B1_PA_W0 36 +#define SROM12_5G40B1_PA_W1 37 +#define SROM12_5G40B1_PA_W2 38 +#define SROM12_5G40B1_PA_W3 39 + +#define SROM12_5G40B2_PA 40 +#define SROM12_5G40B2_PA_W0 40 +#define SROM12_5G40B2_PA_W1 41 +#define SROM12_5G40B2_PA_W2 42 +#define SROM12_5G40B2_PA_W3 43 + +#define SROM12_5G40B3_PA 44 +#define SROM12_5G40B3_PA_W0 44 +#define SROM12_5G40B3_PA_W1 45 +#define SROM12_5G40B3_PA_W2 46 +#define SROM12_5G40B3_PA_W3 47 + +#define SROM12_5G40B4_PA 48 +#define SROM12_5G40B4_PA_W0 48 +#define SROM12_5G40B4_PA_W1 49 +#define SROM12_5G40B4_PA_W2 50 +#define SROM12_5G40B4_PA_W3 51 + +#define SROM12_5G80B0_PA 52 +#define SROM12_5G80B0_PA_W0 52 +#define SROM12_5G80B0_PA_W1 53 +#define SROM12_5G80B0_PA_W2 54 +#define SROM12_5G80B0_PA_W3 55 + +#define SROM12_5G80B1_PA 56 +#define SROM12_5G80B1_PA_W0 56 +#define SROM12_5G80B1_PA_W1 57 +#define SROM12_5G80B1_PA_W2 58 +#define SROM12_5G80B1_PA_W3 59 + +#define SROM12_5G80B2_PA 60 +#define SROM12_5G80B2_PA_W0 60 +#define SROM12_5G80B2_PA_W1 61 +#define SROM12_5G80B2_PA_W2 62 +#define SROM12_5G80B2_PA_W3 63 + +#define SROM12_5G80B3_PA 64 +#define SROM12_5G80B3_PA_W0 64 +#define SROM12_5G80B3_PA_W1 65 +#define SROM12_5G80B3_PA_W2 66 +#define SROM12_5G80B3_PA_W3 67 + +#define SROM12_5G80B4_PA 68 +#define SROM12_5G80B4_PA_W0 68 +#define SROM12_5G80B4_PA_W1 69 +#define SROM12_5G80B4_PA_W2 70 +#define SROM12_5G80B4_PA_W3 71 + +/* PD offset */ +#define SROM12_PDOFF_2G_CCK 472 + +#define SROM12_PDOFF_20in40M_5G_B0 473 +#define SROM12_PDOFF_20in40M_5G_B1 474 +#define SROM12_PDOFF_20in40M_5G_B2 475 +#define SROM12_PDOFF_20in40M_5G_B3 476 +#define SROM12_PDOFF_20in40M_5G_B4 477 + +#define SROM12_PDOFF_40in80M_5G_B0 478 +#define SROM12_PDOFF_40in80M_5G_B1 479 +#define SROM12_PDOFF_40in80M_5G_B2 480 +#define SROM12_PDOFF_40in80M_5G_B3 481 +#define SROM12_PDOFF_40in80M_5G_B4 482 + +#define SROM12_PDOFF_20in80M_5G_B0 488 +#define SROM12_PDOFF_20in80M_5G_B1 489 +#define SROM12_PDOFF_20in80M_5G_B2 490 +#define SROM12_PDOFF_20in80M_5G_B3 491 +#define SROM12_PDOFF_20in80M_5G_B4 492 + +#define SROM12_GPDN_L 91 /* GPIO pull down bits [15:0] */ +#define SROM12_GPDN_H 233 /* GPIO pull down bits [31:16] */ + +#define SROM13_SIGN 64 +#define SROM13_WORDS 590 +#define SROM13_SIGNATURE 0x4d55 +#define SROM13_CRCREV 589 + +/* Per-path fields and offset */ +#define MAX_PATH_SROM_13 4 +#define SROM13_PATH0 256 +#define SROM13_PATH1 328 +#define SROM13_PATH2 400 +#define SROM13_PATH3 512 +#define SROM13_RXGAINS 5 + +#define SROM13_XTALFREQ 90 + +#define SROM13_PDOFFSET20IN40M2G 94 +#define SROM13_PDOFFSET20IN40M2GCORE3 95 +#define SROM13_SB20IN40HRLRPOX 96 + +#define SROM13_RXGAINS1CORE3 97 + +#define SROM13_PDOFFSET20IN40M5GCORE3 98 +#define SROM13_PDOFFSET20IN40M5GCORE3_1 99 + +#define SROM13_ANTGAIN_BANDBGA 100 + +#define SROM13_PDOFFSET40IN80M5GCORE3 105 +#define SROM13_PDOFFSET40IN80M5GCORE3_1 106 + +/* power per rate */ +#define SROM13_MCS1024QAM2GPO 108 +#define SROM13_MCS1024QAM5GLPO 109 +#define SROM13_MCS1024QAM5GLPO_1 110 +#define SROM13_MCS1024QAM5GMPO 111 +#define SROM13_MCS1024QAM5GMPO_1 112 +#define SROM13_MCS1024QAM5GHPO 113 +#define SROM13_MCS1024QAM5GHPO_1 114 +#define SROM13_MCS1024QAM5GX1PO 115 +#define SROM13_MCS1024QAM5GX1PO_1 116 +#define SROM13_MCS1024QAM5GX2PO 117 +#define SROM13_MCS1024QAM5GX2PO_1 118 + +#define SROM13_MCSBW1605GLPO 119 +#define SROM13_MCSBW1605GLPO_1 120 +#define SROM13_MCSBW1605GMPO 121 +#define SROM13_MCSBW1605GMPO_1 122 +#define SROM13_MCSBW1605GHPO 123 +#define SROM13_MCSBW1605GHPO_1 124 + +#define SROM13_MCSBW1605GX1PO 125 +#define SROM13_MCSBW1605GX1PO_1 126 +#define SROM13_MCSBW1605GX2PO 127 +#define SROM13_MCSBW1605GX2PO_1 128 + +#define SROM13_ULBPPROFFS5GB0 129 +#define SROM13_ULBPPROFFS5GB1 130 +#define SROM13_ULBPPROFFS5GB2 131 +#define SROM13_ULBPPROFFS5GB3 132 +#define SROM13_ULBPPROFFS5GB4 133 +#define SROM13_ULBPPROFFS2G 134 + +#define SROM13_MCS8POEXP 135 +#define SROM13_MCS8POEXP_1 136 +#define SROM13_MCS9POEXP 137 +#define SROM13_MCS9POEXP_1 138 +#define SROM13_MCS10POEXP 139 +#define SROM13_MCS10POEXP_1 140 +#define SROM13_MCS11POEXP 141 +#define SROM13_MCS11POEXP_1 142 +#define SROM13_ULBPDOFFS5GB0A0 143 +#define SROM13_ULBPDOFFS5GB0A1 144 +#define SROM13_ULBPDOFFS5GB0A2 145 +#define SROM13_ULBPDOFFS5GB0A3 146 +#define SROM13_ULBPDOFFS5GB1A0 147 +#define SROM13_ULBPDOFFS5GB1A1 148 +#define SROM13_ULBPDOFFS5GB1A2 149 +#define SROM13_ULBPDOFFS5GB1A3 150 +#define SROM13_ULBPDOFFS5GB2A0 151 +#define SROM13_ULBPDOFFS5GB2A1 152 +#define SROM13_ULBPDOFFS5GB2A2 153 +#define SROM13_ULBPDOFFS5GB2A3 154 +#define SROM13_ULBPDOFFS5GB3A0 155 +#define SROM13_ULBPDOFFS5GB3A1 156 +#define SROM13_ULBPDOFFS5GB3A2 157 +#define SROM13_ULBPDOFFS5GB3A3 158 +#define SROM13_ULBPDOFFS5GB4A0 159 +#define SROM13_ULBPDOFFS5GB4A1 160 +#define SROM13_ULBPDOFFS5GB4A2 161 +#define SROM13_ULBPDOFFS5GB4A3 162 +#define SROM13_ULBPDOFFS2GA0 163 +#define SROM13_ULBPDOFFS2GA1 164 +#define SROM13_ULBPDOFFS2GA2 165 +#define SROM13_ULBPDOFFS2GA3 166 + +#define SROM13_RPCAL5GB4 199 +#define SROM13_RPCAL2GCORE3 101 +#define SROM13_RPCAL5GB01CORE3 102 +#define SROM13_RPCAL5GB23CORE3 103 + +#define SROM13_SW_TXRX_MASK 104 + +#define SROM13_EU_EDCRSTH 232 + +#define SROM13_SWCTRLMAP4_CFG 493 +#define SROM13_SWCTRLMAP4_TX2G_FEM3TO0 494 +#define SROM13_SWCTRLMAP4_RX2G_FEM3TO0 495 +#define SROM13_SWCTRLMAP4_RXBYP2G_FEM3TO0 496 +#define SROM13_SWCTRLMAP4_MISC2G_FEM3TO0 497 +#define SROM13_SWCTRLMAP4_TX5G_FEM3TO0 498 +#define SROM13_SWCTRLMAP4_RX5G_FEM3TO0 499 +#define SROM13_SWCTRLMAP4_RXBYP5G_FEM3TO0 500 +#define SROM13_SWCTRLMAP4_MISC5G_FEM3TO0 501 +#define SROM13_SWCTRLMAP4_TX2G_FEM7TO4 502 +#define SROM13_SWCTRLMAP4_RX2G_FEM7TO4 503 +#define SROM13_SWCTRLMAP4_RXBYP2G_FEM7TO4 504 +#define SROM13_SWCTRLMAP4_MISC2G_FEM7TO4 505 +#define SROM13_SWCTRLMAP4_TX5G_FEM7TO4 506 +#define SROM13_SWCTRLMAP4_RX5G_FEM7TO4 507 +#define SROM13_SWCTRLMAP4_RXBYP5G_FEM7TO4 508 +#define SROM13_SWCTRLMAP4_MISC5G_FEM7TO4 509 + +#define SROM13_PDOFFSET20IN80M5GCORE3 510 +#define SROM13_PDOFFSET20IN80M5GCORE3_1 511 + +#define SROM13_NOISELVLCORE3 584 +#define SROM13_NOISELVLCORE3_1 585 +#define SROM13_RXGAINERRCORE3 586 +#define SROM13_RXGAINERRCORE3_1 587 + +#define SROM13_PDOFF_2G_CCK_20M 167 + +#define SROM15_CALDATA_WORDS 943 +#define SROM15_CAL_OFFSET_LOC 68 +#define MAX_IOCTL_TXCHUNK_SIZE 1500 +#define SROM15_MAX_CAL_SIZE 1886 +#define SROM15_SIGNATURE 0x110c +#define SROM15_WORDS 1024 +#define SROM15_MACHI 65 +#define SROM15_CRCREV 1023 +#define SROM15_BRDREV 69 +#define SROM15_CCODE 70 +#define SROM15_REGREV 71 +#define SROM15_SIGN 64 + +#define SROM16_SIGN 128 +#define SROM16_WORDS 1024 +#define SROM16_SIGNATURE 0x4357 +#define SROM16_CRCREV 1023 +#define SROM16_MACHI 129 +#define SROM16_CALDATA_OFFSET_LOC 132 +#define SROM16_BOARDREV 133 +#define SROM16_CCODE 134 +#define SROM16_REGREV 135 + +#define SROM_CALDATA_WORDS 832 + +#define SROM17_SIGN 64 +#define SROM17_BRDREV 65 +#define SROM17_MACADDR 66 +#define SROM17_CCODE 69 +#define SROM17_CALDATA 70 +#define SROM17_GCALTMP 71 + +#define SROM17_C0SRD202G 72 +#define SROM17_C0SRD202G_1 73 +#define SROM17_C0SRD205GL 74 +#define SROM17_C0SRD205GL_1 75 +#define SROM17_C0SRD205GML 76 +#define SROM17_C0SRD205GML_1 77 +#define SROM17_C0SRD205GMU 78 +#define SROM17_C0SRD205GMU_1 79 +#define SROM17_C0SRD205GH 80 +#define SROM17_C0SRD205GH_1 81 + +#define SROM17_C1SRD202G 82 +#define SROM17_C1SRD202G_1 83 +#define SROM17_C1SRD205GL 84 +#define SROM17_C1SRD205GL_1 85 +#define SROM17_C1SRD205GML 86 +#define SROM17_C1SRD205GML_1 87 +#define SROM17_C1SRD205GMU 88 +#define SROM17_C1SRD205GMU_1 89 +#define SROM17_C1SRD205GH 90 +#define SROM17_C1SRD205GH_1 91 + +#define SROM17_TRAMMAGIC 92 +#define SROM17_TRAMMAGIC_1 93 +#define SROM17_TRAMDATA 94 + +#define SROM17_WORDS 256 +#define SROM17_CRCREV 255 +#define SROM17_CALDATA_WORDS 161 +#define SROM17_SIGNATURE 0x1103 /* 4355 in hex format */ + +typedef struct { + uint8 tssipos; /* TSSI positive slope, 1: positive, 0: negative */ + uint8 extpagain; /* Ext PA gain-type: full-gain: 0, pa-lite: 1, no_pa: 2 */ + uint8 pdetrange; /* support 32 combinations of different Pdet dynamic ranges */ + uint8 triso; /* TR switch isolation */ + uint8 antswctrllut; /* antswctrl lookup table configuration: 32 possible choices */ +} srom_fem_t; + +#endif /* _bcmsrom_fmt_h_ */ diff --git a/bcmdhd.100.10.315.x/include/bcmsrom_tbl.h b/bcmdhd.100.10.315.x/include/bcmsrom_tbl.h new file mode 100644 index 0000000..30e47e8 --- /dev/null +++ b/bcmdhd.100.10.315.x/include/bcmsrom_tbl.h @@ -0,0 +1,1458 @@ +/* + * Table that encodes the srom formats for PCI/PCIe NICs. + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmsrom_tbl.h 700323 2017-05-18 16:12:11Z $ + */ + +#ifndef _bcmsrom_tbl_h_ +#define _bcmsrom_tbl_h_ + +#include "sbpcmcia.h" +#include "wlioctl.h" +#include + +typedef struct { + const char *name; + uint32 revmask; + uint32 flags; + uint16 off; + uint16 mask; +} sromvar_t; + +#define SRFL_MORE 1 /* value continues as described by the next entry */ +#define SRFL_NOFFS 2 /* value bits can't be all one's */ +#define SRFL_PRHEX 4 /* value is in hexdecimal format */ +#define SRFL_PRSIGN 8 /* value is in signed decimal format */ +#define SRFL_CCODE 0x10 /* value is in country code format */ +#define SRFL_ETHADDR 0x20 /* value is an Ethernet address */ +#define SRFL_LEDDC 0x40 /* value is an LED duty cycle */ +#define SRFL_NOVAR 0x80 /* do not generate a nvram param, entry is for mfgc */ +#define SRFL_ARRAY 0x100 /* value is in an array. All elements EXCEPT FOR THE LAST + * ONE in the array should have this flag set. + */ +#define PRHEX_N_MORE (SRFL_PRHEX | SRFL_MORE) + +#define SROM_DEVID_PCIE 48 + +/** + * Assumptions: + * - Ethernet address spans across 3 consecutive words + * + * Table rules: + * - Add multiple entries next to each other if a value spans across multiple words + * (even multiple fields in the same word) with each entry except the last having + * it's SRFL_MORE bit set. + * - Ethernet address entry does not follow above rule and must not have SRFL_MORE + * bit set. Its SRFL_ETHADDR bit implies it takes multiple words. + * - The last entry's name field must be NULL to indicate the end of the table. Other + * entries must have non-NULL name. + */ +#if !defined(SROM15_MEMOPT) +static const sromvar_t pci_sromvars[] = { +/* name revmask flags off mask */ +#if defined(CABLECPE) + {"devid", 0xffffff00, SRFL_PRHEX, PCI_F0DEVID, 0xffff}, +#elif defined(BCMPCIEDEV) && defined(BCMPCIEDEV_ENABLED) + {"devid", 0xffffff00, SRFL_PRHEX, SROM_DEVID_PCIE, 0xffff}, +#else + {"devid", 0xffffff00, SRFL_PRHEX|SRFL_NOVAR, PCI_F0DEVID, 0xffff}, +#endif // endif + {"boardrev", 0x0000000e, SRFL_PRHEX, SROM_AABREV, SROM_BR_MASK}, + {"boardrev", 0x000000f0, SRFL_PRHEX, SROM4_BREV, 0xffff}, + {"boardrev", 0xffffff00, SRFL_PRHEX, SROM8_BREV, 0xffff}, + {"boardflags", 0x00000002, SRFL_PRHEX, SROM_BFL, 0xffff}, + {"boardflags", 0x00000004, SRFL_PRHEX|SRFL_MORE, SROM_BFL, 0xffff}, + {"", 0, 0, SROM_BFL2, 0xffff}, + {"boardflags", 0x00000008, SRFL_PRHEX|SRFL_MORE, SROM_BFL, 0xffff}, + {"", 0, 0, SROM3_BFL2, 0xffff}, + {"boardflags", 0x00000010, SRFL_PRHEX|SRFL_MORE, SROM4_BFL0, 0xffff}, + {"", 0, 0, SROM4_BFL1, 0xffff}, + {"boardflags", 0x000000e0, SRFL_PRHEX|SRFL_MORE, SROM5_BFL0, 0xffff}, + {"", 0, 0, SROM5_BFL1, 0xffff}, + {"boardflags", 0xffffff00, SRFL_PRHEX|SRFL_MORE, SROM8_BFL0, 0xffff}, + {"", 0, 0, SROM8_BFL1, 0xffff}, + {"boardflags2", 0x00000010, SRFL_PRHEX|SRFL_MORE, SROM4_BFL2, 0xffff}, + {"", 0, 0, SROM4_BFL3, 0xffff}, + {"boardflags2", 0x000000e0, SRFL_PRHEX|SRFL_MORE, SROM5_BFL2, 0xffff}, + {"", 0, 0, SROM5_BFL3, 0xffff}, + {"boardflags2", 0xffffff00, SRFL_PRHEX|SRFL_MORE, SROM8_BFL2, 0xffff}, + {"", 0, 0, SROM8_BFL3, 0xffff}, + {"boardtype", 0xfffffffc, SRFL_PRHEX, SROM_SSID, 0xffff}, + {"subvid", 0xfffffffc, SRFL_PRHEX, SROM_SVID, 0xffff}, + {"boardnum", 0x00000006, 0, SROM_MACLO_IL0, 0xffff}, + {"boardnum", 0x00000008, 0, SROM3_MACLO, 0xffff}, + {"boardnum", 0x00000010, 0, SROM4_MACLO, 0xffff}, + {"boardnum", 0x000000e0, 0, SROM5_MACLO, 0xffff}, + {"boardnum", 0x00000700, 0, SROM8_MACLO, 0xffff}, + {"cc", 0x00000002, 0, SROM_AABREV, SROM_CC_MASK}, + {"regrev", 0x00000008, 0, SROM_OPO, 0xff00}, + {"regrev", 0x00000010, 0, SROM4_REGREV, 0xffff}, + {"regrev", 0x000000e0, 0, SROM5_REGREV, 0xffff}, + {"regrev", 0x00000700, 0, SROM8_REGREV, 0xffff}, + {"ledbh0", 0x0000000e, SRFL_NOFFS, SROM_LEDBH10, 0x00ff}, + {"ledbh1", 0x0000000e, SRFL_NOFFS, SROM_LEDBH10, 0xff00}, + {"ledbh2", 0x0000000e, SRFL_NOFFS, SROM_LEDBH32, 0x00ff}, + {"ledbh3", 0x0000000e, SRFL_NOFFS, SROM_LEDBH32, 0xff00}, + {"ledbh0", 0x00000010, SRFL_NOFFS, SROM4_LEDBH10, 0x00ff}, + {"ledbh1", 0x00000010, SRFL_NOFFS, SROM4_LEDBH10, 0xff00}, + {"ledbh2", 0x00000010, SRFL_NOFFS, SROM4_LEDBH32, 0x00ff}, + {"ledbh3", 0x00000010, SRFL_NOFFS, SROM4_LEDBH32, 0xff00}, + {"ledbh0", 0x000000e0, SRFL_NOFFS, SROM5_LEDBH10, 0x00ff}, + {"ledbh1", 0x000000e0, SRFL_NOFFS, SROM5_LEDBH10, 0xff00}, + {"ledbh2", 0x000000e0, SRFL_NOFFS, SROM5_LEDBH32, 0x00ff}, + {"ledbh3", 0x000000e0, SRFL_NOFFS, SROM5_LEDBH32, 0xff00}, + {"ledbh0", 0x00000700, SRFL_NOFFS, SROM8_LEDBH10, 0x00ff}, + {"ledbh1", 0x00000700, SRFL_NOFFS, SROM8_LEDBH10, 0xff00}, + {"ledbh2", 0x00000700, SRFL_NOFFS, SROM8_LEDBH32, 0x00ff}, + {"ledbh3", 0x00000700, SRFL_NOFFS, SROM8_LEDBH32, 0xff00}, + {"pa0b0", 0x0000000e, SRFL_PRHEX, SROM_WL0PAB0, 0xffff}, + {"pa0b1", 0x0000000e, SRFL_PRHEX, SROM_WL0PAB1, 0xffff}, + {"pa0b2", 0x0000000e, SRFL_PRHEX, SROM_WL0PAB2, 0xffff}, + {"pa0itssit", 0x0000000e, 0, SROM_ITT, 0x00ff}, + {"pa0maxpwr", 0x0000000e, 0, SROM_WL10MAXP, 0x00ff}, + {"pa0b0", 0x00000700, SRFL_PRHEX, SROM8_W0_PAB0, 0xffff}, + {"pa0b1", 0x00000700, SRFL_PRHEX, SROM8_W0_PAB1, 0xffff}, + {"pa0b2", 0x00000700, SRFL_PRHEX, SROM8_W0_PAB2, 0xffff}, + {"pa0itssit", 0x00000700, 0, SROM8_W0_ITTMAXP, 0xff00}, + {"pa0maxpwr", 0x00000700, 0, SROM8_W0_ITTMAXP, 0x00ff}, + {"opo", 0x0000000c, 0, SROM_OPO, 0x00ff}, + {"opo", 0x00000700, 0, SROM8_2G_OFDMPO, 0x00ff}, + {"aa2g", 0x0000000e, 0, SROM_AABREV, SROM_AA0_MASK}, + {"aa2g", 0x000000f0, 0, SROM4_AA, 0x00ff}, + {"aa2g", 0x00000700, 0, SROM8_AA, 0x00ff}, + {"aa5g", 0x0000000e, 0, SROM_AABREV, SROM_AA1_MASK}, + {"aa5g", 0x000000f0, 0, SROM4_AA, 0xff00}, + {"aa5g", 0x00000700, 0, SROM8_AA, 0xff00}, + {"ag0", 0x0000000e, 0, SROM_AG10, 0x00ff}, + {"ag1", 0x0000000e, 0, SROM_AG10, 0xff00}, + {"ag0", 0x000000f0, 0, SROM4_AG10, 0x00ff}, + {"ag1", 0x000000f0, 0, SROM4_AG10, 0xff00}, + {"ag2", 0x000000f0, 0, SROM4_AG32, 0x00ff}, + {"ag3", 0x000000f0, 0, SROM4_AG32, 0xff00}, + {"ag0", 0x00000700, 0, SROM8_AG10, 0x00ff}, + {"ag1", 0x00000700, 0, SROM8_AG10, 0xff00}, + {"ag2", 0x00000700, 0, SROM8_AG32, 0x00ff}, + {"ag3", 0x00000700, 0, SROM8_AG32, 0xff00}, + {"pa1b0", 0x0000000e, SRFL_PRHEX, SROM_WL1PAB0, 0xffff}, + {"pa1b1", 0x0000000e, SRFL_PRHEX, SROM_WL1PAB1, 0xffff}, + {"pa1b2", 0x0000000e, SRFL_PRHEX, SROM_WL1PAB2, 0xffff}, + {"pa1lob0", 0x0000000c, SRFL_PRHEX, SROM_WL1LPAB0, 0xffff}, + {"pa1lob1", 0x0000000c, SRFL_PRHEX, SROM_WL1LPAB1, 0xffff}, + {"pa1lob2", 0x0000000c, SRFL_PRHEX, SROM_WL1LPAB2, 0xffff}, + {"pa1hib0", 0x0000000c, SRFL_PRHEX, SROM_WL1HPAB0, 0xffff}, + {"pa1hib1", 0x0000000c, SRFL_PRHEX, SROM_WL1HPAB1, 0xffff}, + {"pa1hib2", 0x0000000c, SRFL_PRHEX, SROM_WL1HPAB2, 0xffff}, + {"pa1itssit", 0x0000000e, 0, SROM_ITT, 0xff00}, + {"pa1maxpwr", 0x0000000e, 0, SROM_WL10MAXP, 0xff00}, + {"pa1lomaxpwr", 0x0000000c, 0, SROM_WL1LHMAXP, 0xff00}, + {"pa1himaxpwr", 0x0000000c, 0, SROM_WL1LHMAXP, 0x00ff}, + {"pa1b0", 0x00000700, SRFL_PRHEX, SROM8_W1_PAB0, 0xffff}, + {"pa1b1", 0x00000700, SRFL_PRHEX, SROM8_W1_PAB1, 0xffff}, + {"pa1b2", 0x00000700, SRFL_PRHEX, SROM8_W1_PAB2, 0xffff}, + {"pa1lob0", 0x00000700, SRFL_PRHEX, SROM8_W1_PAB0_LC, 0xffff}, + {"pa1lob1", 0x00000700, SRFL_PRHEX, SROM8_W1_PAB1_LC, 0xffff}, + {"pa1lob2", 0x00000700, SRFL_PRHEX, SROM8_W1_PAB2_LC, 0xffff}, + {"pa1hib0", 0x00000700, SRFL_PRHEX, SROM8_W1_PAB0_HC, 0xffff}, + {"pa1hib1", 0x00000700, SRFL_PRHEX, SROM8_W1_PAB1_HC, 0xffff}, + {"pa1hib2", 0x00000700, SRFL_PRHEX, SROM8_W1_PAB2_HC, 0xffff}, + {"pa1itssit", 0x00000700, 0, SROM8_W1_ITTMAXP, 0xff00}, + {"pa1maxpwr", 0x00000700, 0, SROM8_W1_ITTMAXP, 0x00ff}, + {"pa1lomaxpwr", 0x00000700, 0, SROM8_W1_MAXP_LCHC, 0xff00}, + {"pa1himaxpwr", 0x00000700, 0, SROM8_W1_MAXP_LCHC, 0x00ff}, + {"bxa2g", 0x00000008, 0, SROM_BXARSSI2G, 0x1800}, + {"rssisav2g", 0x00000008, 0, SROM_BXARSSI2G, 0x0700}, + {"rssismc2g", 0x00000008, 0, SROM_BXARSSI2G, 0x00f0}, + {"rssismf2g", 0x00000008, 0, SROM_BXARSSI2G, 0x000f}, + {"bxa2g", 0x00000700, 0, SROM8_BXARSSI2G, 0x1800}, + {"rssisav2g", 0x00000700, 0, SROM8_BXARSSI2G, 0x0700}, + {"rssismc2g", 0x00000700, 0, SROM8_BXARSSI2G, 0x00f0}, + {"rssismf2g", 0x00000700, 0, SROM8_BXARSSI2G, 0x000f}, + {"bxa5g", 0x00000008, 0, SROM_BXARSSI5G, 0x1800}, + {"rssisav5g", 0x00000008, 0, SROM_BXARSSI5G, 0x0700}, + {"rssismc5g", 0x00000008, 0, SROM_BXARSSI5G, 0x00f0}, + {"rssismf5g", 0x00000008, 0, SROM_BXARSSI5G, 0x000f}, + {"bxa5g", 0x00000700, 0, SROM8_BXARSSI5G, 0x1800}, + {"rssisav5g", 0x00000700, 0, SROM8_BXARSSI5G, 0x0700}, + {"rssismc5g", 0x00000700, 0, SROM8_BXARSSI5G, 0x00f0}, + {"rssismf5g", 0x00000700, 0, SROM8_BXARSSI5G, 0x000f}, + {"tri2g", 0x00000008, 0, SROM_TRI52G, 0x00ff}, + {"tri5g", 0x00000008, 0, SROM_TRI52G, 0xff00}, + {"tri5gl", 0x00000008, 0, SROM_TRI5GHL, 0x00ff}, + {"tri5gh", 0x00000008, 0, SROM_TRI5GHL, 0xff00}, + {"tri2g", 0x00000700, 0, SROM8_TRI52G, 0x00ff}, + {"tri5g", 0x00000700, 0, SROM8_TRI52G, 0xff00}, + {"tri5gl", 0x00000700, 0, SROM8_TRI5GHL, 0x00ff}, + {"tri5gh", 0x00000700, 0, SROM8_TRI5GHL, 0xff00}, + {"rxpo2g", 0x00000008, SRFL_PRSIGN, SROM_RXPO52G, 0x00ff}, + {"rxpo5g", 0x00000008, SRFL_PRSIGN, SROM_RXPO52G, 0xff00}, + {"rxpo2g", 0x00000700, SRFL_PRSIGN, SROM8_RXPO52G, 0x00ff}, + {"rxpo5g", 0x00000700, SRFL_PRSIGN, SROM8_RXPO52G, 0xff00}, + {"txchain", 0x000000f0, SRFL_NOFFS, SROM4_TXRXC, SROM4_TXCHAIN_MASK}, + {"rxchain", 0x000000f0, SRFL_NOFFS, SROM4_TXRXC, SROM4_RXCHAIN_MASK}, + {"antswitch", 0x000000f0, SRFL_NOFFS, SROM4_TXRXC, SROM4_SWITCH_MASK}, + {"txchain", 0x00000700, SRFL_NOFFS, SROM8_TXRXC, SROM4_TXCHAIN_MASK}, + {"rxchain", 0x00000700, SRFL_NOFFS, SROM8_TXRXC, SROM4_RXCHAIN_MASK}, + {"antswitch", 0x00000700, SRFL_NOFFS, SROM8_TXRXC, SROM4_SWITCH_MASK}, + {"tssipos2g", 0x00000700, 0, SROM8_FEM2G, SROM8_FEM_TSSIPOS_MASK}, + {"extpagain2g", 0x00000700, 0, SROM8_FEM2G, SROM8_FEM_EXTPA_GAIN_MASK}, + {"pdetrange2g", 0x00000700, 0, SROM8_FEM2G, SROM8_FEM_PDET_RANGE_MASK}, + {"triso2g", 0x00000700, 0, SROM8_FEM2G, SROM8_FEM_TR_ISO_MASK}, + {"antswctl2g", 0x00000700, 0, SROM8_FEM2G, SROM8_FEM_ANTSWLUT_MASK}, + {"tssipos5g", 0x00000700, 0, SROM8_FEM5G, SROM8_FEM_TSSIPOS_MASK}, + {"extpagain5g", 0x00000700, 0, SROM8_FEM5G, SROM8_FEM_EXTPA_GAIN_MASK}, + {"pdetrange5g", 0x00000700, 0, SROM8_FEM5G, SROM8_FEM_PDET_RANGE_MASK}, + {"triso5g", 0x00000700, 0, SROM8_FEM5G, SROM8_FEM_TR_ISO_MASK}, + {"antswctl5g", 0x00000700, 0, SROM8_FEM5G, SROM8_FEM_ANTSWLUT_MASK}, + {"txpid2ga0", 0x000000f0, 0, SROM4_TXPID2G, 0x00ff}, + {"txpid2ga1", 0x000000f0, 0, SROM4_TXPID2G, 0xff00}, + {"txpid2ga2", 0x000000f0, 0, SROM4_TXPID2G + 1, 0x00ff}, + {"txpid2ga3", 0x000000f0, 0, SROM4_TXPID2G + 1, 0xff00}, + {"txpid5ga0", 0x000000f0, 0, SROM4_TXPID5G, 0x00ff}, + {"txpid5ga1", 0x000000f0, 0, SROM4_TXPID5G, 0xff00}, + {"txpid5ga2", 0x000000f0, 0, SROM4_TXPID5G + 1, 0x00ff}, + {"txpid5ga3", 0x000000f0, 0, SROM4_TXPID5G + 1, 0xff00}, + {"txpid5gla0", 0x000000f0, 0, SROM4_TXPID5GL, 0x00ff}, + {"txpid5gla1", 0x000000f0, 0, SROM4_TXPID5GL, 0xff00}, + {"txpid5gla2", 0x000000f0, 0, SROM4_TXPID5GL + 1, 0x00ff}, + {"txpid5gla3", 0x000000f0, 0, SROM4_TXPID5GL + 1, 0xff00}, + {"txpid5gha0", 0x000000f0, 0, SROM4_TXPID5GH, 0x00ff}, + {"txpid5gha1", 0x000000f0, 0, SROM4_TXPID5GH, 0xff00}, + {"txpid5gha2", 0x000000f0, 0, SROM4_TXPID5GH + 1, 0x00ff}, + {"txpid5gha3", 0x000000f0, 0, SROM4_TXPID5GH + 1, 0xff00}, + + {"ccode", 0x0000000f, SRFL_CCODE, SROM_CCODE, 0xffff}, + {"ccode", 0x00000010, SRFL_CCODE, SROM4_CCODE, 0xffff}, + {"ccode", 0x000000e0, SRFL_CCODE, SROM5_CCODE, 0xffff}, + {"ccode", 0x00000700, SRFL_CCODE, SROM8_CCODE, 0xffff}, + {"macaddr", 0x00000700, SRFL_ETHADDR, SROM8_MACHI, 0xffff}, + {"macaddr", 0x000000e0, SRFL_ETHADDR, SROM5_MACHI, 0xffff}, + {"macaddr", 0x00000010, SRFL_ETHADDR, SROM4_MACHI, 0xffff}, + {"macaddr", 0x00000008, SRFL_ETHADDR, SROM3_MACHI, 0xffff}, + {"il0macaddr", 0x00000007, SRFL_ETHADDR, SROM_MACHI_IL0, 0xffff}, + {"et1macaddr", 0x00000007, SRFL_ETHADDR, SROM_MACHI_ET1, 0xffff}, + {"leddc", 0x00000700, SRFL_NOFFS|SRFL_LEDDC, SROM8_LEDDC, 0xffff}, + {"leddc", 0x000000e0, SRFL_NOFFS|SRFL_LEDDC, SROM5_LEDDC, 0xffff}, + {"leddc", 0x00000010, SRFL_NOFFS|SRFL_LEDDC, SROM4_LEDDC, 0xffff}, + {"leddc", 0x00000008, SRFL_NOFFS|SRFL_LEDDC, SROM3_LEDDC, 0xffff}, + + {"tempthresh", 0x00000700, 0, SROM8_THERMAL, 0xff00}, + {"tempoffset", 0x00000700, 0, SROM8_THERMAL, 0x00ff}, + {"rawtempsense", 0x00000700, SRFL_PRHEX, SROM8_MPWR_RAWTS, 0x01ff}, + {"measpower", 0x00000700, SRFL_PRHEX, SROM8_MPWR_RAWTS, 0xfe00}, + {"tempsense_slope", 0x00000700, SRFL_PRHEX, SROM8_TS_SLP_OPT_CORRX, 0x00ff}, + {"tempcorrx", 0x00000700, SRFL_PRHEX, SROM8_TS_SLP_OPT_CORRX, 0xfc00}, + {"tempsense_option", 0x00000700, SRFL_PRHEX, SROM8_TS_SLP_OPT_CORRX, 0x0300}, + {"freqoffset_corr", 0x00000700, SRFL_PRHEX, SROM8_FOC_HWIQ_IQSWP, 0x000f}, + {"iqcal_swp_dis", 0x00000700, SRFL_PRHEX, SROM8_FOC_HWIQ_IQSWP, 0x0010}, + {"hw_iqcal_en", 0x00000700, SRFL_PRHEX, SROM8_FOC_HWIQ_IQSWP, 0x0020}, + {"elna2g", 0x00000700, 0, SROM8_EXTLNAGAIN, 0x00ff}, + {"elna5g", 0x00000700, 0, SROM8_EXTLNAGAIN, 0xff00}, + {"phycal_tempdelta", 0x00000700, 0, SROM8_PHYCAL_TEMPDELTA, 0x00ff}, + {"temps_period", 0x00000700, 0, SROM8_PHYCAL_TEMPDELTA, 0x0f00}, + {"temps_hysteresis", 0x00000700, 0, SROM8_PHYCAL_TEMPDELTA, 0xf000}, + {"measpower1", 0x00000700, SRFL_PRHEX, SROM8_MPWR_1_AND_2, 0x007f}, + {"measpower2", 0x00000700, SRFL_PRHEX, SROM8_MPWR_1_AND_2, 0x3f80}, + + {"cck2gpo", 0x000000f0, 0, SROM4_2G_CCKPO, 0xffff}, + {"cck2gpo", 0x00000100, 0, SROM8_2G_CCKPO, 0xffff}, + {"ofdm2gpo", 0x000000f0, SRFL_MORE, SROM4_2G_OFDMPO, 0xffff}, + {"", 0, 0, SROM4_2G_OFDMPO + 1, 0xffff}, + {"ofdm5gpo", 0x000000f0, SRFL_MORE, SROM4_5G_OFDMPO, 0xffff}, + {"", 0, 0, SROM4_5G_OFDMPO + 1, 0xffff}, + {"ofdm5glpo", 0x000000f0, SRFL_MORE, SROM4_5GL_OFDMPO, 0xffff}, + {"", 0, 0, SROM4_5GL_OFDMPO + 1, 0xffff}, + {"ofdm5ghpo", 0x000000f0, SRFL_MORE, SROM4_5GH_OFDMPO, 0xffff}, + {"", 0, 0, SROM4_5GH_OFDMPO + 1, 0xffff}, + {"ofdm2gpo", 0x00000100, SRFL_MORE, SROM8_2G_OFDMPO, 0xffff}, + {"", 0, 0, SROM8_2G_OFDMPO + 1, 0xffff}, + {"ofdm5gpo", 0x00000100, SRFL_MORE, SROM8_5G_OFDMPO, 0xffff}, + {"", 0, 0, SROM8_5G_OFDMPO + 1, 0xffff}, + {"ofdm5glpo", 0x00000100, SRFL_MORE, SROM8_5GL_OFDMPO, 0xffff}, + {"", 0, 0, SROM8_5GL_OFDMPO + 1, 0xffff}, + {"ofdm5ghpo", 0x00000100, SRFL_MORE, SROM8_5GH_OFDMPO, 0xffff}, + {"", 0, 0, SROM8_5GH_OFDMPO + 1, 0xffff}, + {"mcs2gpo0", 0x000000f0, 0, SROM4_2G_MCSPO, 0xffff}, + {"mcs2gpo1", 0x000000f0, 0, SROM4_2G_MCSPO + 1, 0xffff}, + {"mcs2gpo2", 0x000000f0, 0, SROM4_2G_MCSPO + 2, 0xffff}, + {"mcs2gpo3", 0x000000f0, 0, SROM4_2G_MCSPO + 3, 0xffff}, + {"mcs2gpo4", 0x000000f0, 0, SROM4_2G_MCSPO + 4, 0xffff}, + {"mcs2gpo5", 0x000000f0, 0, SROM4_2G_MCSPO + 5, 0xffff}, + {"mcs2gpo6", 0x000000f0, 0, SROM4_2G_MCSPO + 6, 0xffff}, + {"mcs2gpo7", 0x000000f0, 0, SROM4_2G_MCSPO + 7, 0xffff}, + {"mcs5gpo0", 0x000000f0, 0, SROM4_5G_MCSPO, 0xffff}, + {"mcs5gpo1", 0x000000f0, 0, SROM4_5G_MCSPO + 1, 0xffff}, + {"mcs5gpo2", 0x000000f0, 0, SROM4_5G_MCSPO + 2, 0xffff}, + {"mcs5gpo3", 0x000000f0, 0, SROM4_5G_MCSPO + 3, 0xffff}, + {"mcs5gpo4", 0x000000f0, 0, SROM4_5G_MCSPO + 4, 0xffff}, + {"mcs5gpo5", 0x000000f0, 0, SROM4_5G_MCSPO + 5, 0xffff}, + {"mcs5gpo6", 0x000000f0, 0, SROM4_5G_MCSPO + 6, 0xffff}, + {"mcs5gpo7", 0x000000f0, 0, SROM4_5G_MCSPO + 7, 0xffff}, + {"mcs5glpo0", 0x000000f0, 0, SROM4_5GL_MCSPO, 0xffff}, + {"mcs5glpo1", 0x000000f0, 0, SROM4_5GL_MCSPO + 1, 0xffff}, + {"mcs5glpo2", 0x000000f0, 0, SROM4_5GL_MCSPO + 2, 0xffff}, + {"mcs5glpo3", 0x000000f0, 0, SROM4_5GL_MCSPO + 3, 0xffff}, + {"mcs5glpo4", 0x000000f0, 0, SROM4_5GL_MCSPO + 4, 0xffff}, + {"mcs5glpo5", 0x000000f0, 0, SROM4_5GL_MCSPO + 5, 0xffff}, + {"mcs5glpo6", 0x000000f0, 0, SROM4_5GL_MCSPO + 6, 0xffff}, + {"mcs5glpo7", 0x000000f0, 0, SROM4_5GL_MCSPO + 7, 0xffff}, + {"mcs5ghpo0", 0x000000f0, 0, SROM4_5GH_MCSPO, 0xffff}, + {"mcs5ghpo1", 0x000000f0, 0, SROM4_5GH_MCSPO + 1, 0xffff}, + {"mcs5ghpo2", 0x000000f0, 0, SROM4_5GH_MCSPO + 2, 0xffff}, + {"mcs5ghpo3", 0x000000f0, 0, SROM4_5GH_MCSPO + 3, 0xffff}, + {"mcs5ghpo4", 0x000000f0, 0, SROM4_5GH_MCSPO + 4, 0xffff}, + {"mcs5ghpo5", 0x000000f0, 0, SROM4_5GH_MCSPO + 5, 0xffff}, + {"mcs5ghpo6", 0x000000f0, 0, SROM4_5GH_MCSPO + 6, 0xffff}, + {"mcs5ghpo7", 0x000000f0, 0, SROM4_5GH_MCSPO + 7, 0xffff}, + {"mcs2gpo0", 0x00000100, 0, SROM8_2G_MCSPO, 0xffff}, + {"mcs2gpo1", 0x00000100, 0, SROM8_2G_MCSPO + 1, 0xffff}, + {"mcs2gpo2", 0x00000100, 0, SROM8_2G_MCSPO + 2, 0xffff}, + {"mcs2gpo3", 0x00000100, 0, SROM8_2G_MCSPO + 3, 0xffff}, + {"mcs2gpo4", 0x00000100, 0, SROM8_2G_MCSPO + 4, 0xffff}, + {"mcs2gpo5", 0x00000100, 0, SROM8_2G_MCSPO + 5, 0xffff}, + {"mcs2gpo6", 0x00000100, 0, SROM8_2G_MCSPO + 6, 0xffff}, + {"mcs2gpo7", 0x00000100, 0, SROM8_2G_MCSPO + 7, 0xffff}, + {"mcs5gpo0", 0x00000100, 0, SROM8_5G_MCSPO, 0xffff}, + {"mcs5gpo1", 0x00000100, 0, SROM8_5G_MCSPO + 1, 0xffff}, + {"mcs5gpo2", 0x00000100, 0, SROM8_5G_MCSPO + 2, 0xffff}, + {"mcs5gpo3", 0x00000100, 0, SROM8_5G_MCSPO + 3, 0xffff}, + {"mcs5gpo4", 0x00000100, 0, SROM8_5G_MCSPO + 4, 0xffff}, + {"mcs5gpo5", 0x00000100, 0, SROM8_5G_MCSPO + 5, 0xffff}, + {"mcs5gpo6", 0x00000100, 0, SROM8_5G_MCSPO + 6, 0xffff}, + {"mcs5gpo7", 0x00000100, 0, SROM8_5G_MCSPO + 7, 0xffff}, + {"mcs5glpo0", 0x00000100, 0, SROM8_5GL_MCSPO, 0xffff}, + {"mcs5glpo1", 0x00000100, 0, SROM8_5GL_MCSPO + 1, 0xffff}, + {"mcs5glpo2", 0x00000100, 0, SROM8_5GL_MCSPO + 2, 0xffff}, + {"mcs5glpo3", 0x00000100, 0, SROM8_5GL_MCSPO + 3, 0xffff}, + {"mcs5glpo4", 0x00000100, 0, SROM8_5GL_MCSPO + 4, 0xffff}, + {"mcs5glpo5", 0x00000100, 0, SROM8_5GL_MCSPO + 5, 0xffff}, + {"mcs5glpo6", 0x00000100, 0, SROM8_5GL_MCSPO + 6, 0xffff}, + {"mcs5glpo7", 0x00000100, 0, SROM8_5GL_MCSPO + 7, 0xffff}, + {"mcs5ghpo0", 0x00000100, 0, SROM8_5GH_MCSPO, 0xffff}, + {"mcs5ghpo1", 0x00000100, 0, SROM8_5GH_MCSPO + 1, 0xffff}, + {"mcs5ghpo2", 0x00000100, 0, SROM8_5GH_MCSPO + 2, 0xffff}, + {"mcs5ghpo3", 0x00000100, 0, SROM8_5GH_MCSPO + 3, 0xffff}, + {"mcs5ghpo4", 0x00000100, 0, SROM8_5GH_MCSPO + 4, 0xffff}, + {"mcs5ghpo5", 0x00000100, 0, SROM8_5GH_MCSPO + 5, 0xffff}, + {"mcs5ghpo6", 0x00000100, 0, SROM8_5GH_MCSPO + 6, 0xffff}, + {"mcs5ghpo7", 0x00000100, 0, SROM8_5GH_MCSPO + 7, 0xffff}, + {"cddpo", 0x000000f0, 0, SROM4_CDDPO, 0xffff}, + {"stbcpo", 0x000000f0, 0, SROM4_STBCPO, 0xffff}, + {"bw40po", 0x000000f0, 0, SROM4_BW40PO, 0xffff}, + {"bwduppo", 0x000000f0, 0, SROM4_BWDUPPO, 0xffff}, + {"cddpo", 0x00000100, 0, SROM8_CDDPO, 0xffff}, + {"stbcpo", 0x00000100, 0, SROM8_STBCPO, 0xffff}, + {"bw40po", 0x00000100, 0, SROM8_BW40PO, 0xffff}, + {"bwduppo", 0x00000100, 0, SROM8_BWDUPPO, 0xffff}, + + /* power per rate from sromrev 9 */ + {"cckbw202gpo", 0x00000600, 0, SROM9_2GPO_CCKBW20, 0xffff}, + {"cckbw20ul2gpo", 0x00000600, 0, SROM9_2GPO_CCKBW20UL, 0xffff}, + {"legofdmbw202gpo", 0x00000600, SRFL_MORE, SROM9_2GPO_LOFDMBW20, 0xffff}, + {"", 0, 0, SROM9_2GPO_LOFDMBW20 + 1, 0xffff}, + {"legofdmbw20ul2gpo", 0x00000600, SRFL_MORE, SROM9_2GPO_LOFDMBW20UL, 0xffff}, + {"", 0, 0, SROM9_2GPO_LOFDMBW20UL + 1, 0xffff}, + {"legofdmbw205glpo", 0x00000600, SRFL_MORE, SROM9_5GLPO_LOFDMBW20, 0xffff}, + {"", 0, 0, SROM9_5GLPO_LOFDMBW20 + 1, 0xffff}, + {"legofdmbw20ul5glpo", 0x00000600, SRFL_MORE, SROM9_5GLPO_LOFDMBW20UL, 0xffff}, + {"", 0, 0, SROM9_5GLPO_LOFDMBW20UL + 1, 0xffff}, + {"legofdmbw205gmpo", 0x00000600, SRFL_MORE, SROM9_5GMPO_LOFDMBW20, 0xffff}, + {"", 0, 0, SROM9_5GMPO_LOFDMBW20 + 1, 0xffff}, + {"legofdmbw20ul5gmpo", 0x00000600, SRFL_MORE, SROM9_5GMPO_LOFDMBW20UL, 0xffff}, + {"", 0, 0, SROM9_5GMPO_LOFDMBW20UL + 1, 0xffff}, + {"legofdmbw205ghpo", 0x00000600, SRFL_MORE, SROM9_5GHPO_LOFDMBW20, 0xffff}, + {"", 0, 0, SROM9_5GHPO_LOFDMBW20 + 1, 0xffff}, + {"legofdmbw20ul5ghpo", 0x00000600, SRFL_MORE, SROM9_5GHPO_LOFDMBW20UL, 0xffff}, + {"", 0, 0, SROM9_5GHPO_LOFDMBW20UL + 1, 0xffff}, + {"mcsbw202gpo", 0x00000600, SRFL_MORE, SROM9_2GPO_MCSBW20, 0xffff}, + {"", 0, 0, SROM9_2GPO_MCSBW20 + 1, 0xffff}, + {"mcsbw20ul2gpo", 0x00000600, SRFL_MORE, SROM9_2GPO_MCSBW20UL, 0xffff}, + {"", 0, 0, SROM9_2GPO_MCSBW20UL + 1, 0xffff}, + {"mcsbw402gpo", 0x00000600, SRFL_MORE, SROM9_2GPO_MCSBW40, 0xffff}, + {"", 0, 0, SROM9_2GPO_MCSBW40 + 1, 0xffff}, + {"mcsbw205glpo", 0x00000600, SRFL_MORE, SROM9_5GLPO_MCSBW20, 0xffff}, + {"", 0, 0, SROM9_5GLPO_MCSBW20 + 1, 0xffff}, + {"mcsbw20ul5glpo", 0x00000600, SRFL_MORE, SROM9_5GLPO_MCSBW20UL, 0xffff}, + {"", 0, 0, SROM9_5GLPO_MCSBW20UL + 1, 0xffff}, + {"mcsbw405glpo", 0x00000600, SRFL_MORE, SROM9_5GLPO_MCSBW40, 0xffff}, + {"", 0, 0, SROM9_5GLPO_MCSBW40 + 1, 0xffff}, + {"mcsbw205gmpo", 0x00000600, SRFL_MORE, SROM9_5GMPO_MCSBW20, 0xffff}, + {"", 0, 0, SROM9_5GMPO_MCSBW20 + 1, 0xffff}, + {"mcsbw20ul5gmpo", 0x00000600, SRFL_MORE, SROM9_5GMPO_MCSBW20UL, 0xffff}, + {"", 0, 0, SROM9_5GMPO_MCSBW20UL + 1, 0xffff}, + {"mcsbw405gmpo", 0x00000600, SRFL_MORE, SROM9_5GMPO_MCSBW40, 0xffff}, + {"", 0, 0, SROM9_5GMPO_MCSBW40 + 1, 0xffff}, + {"mcsbw205ghpo", 0x00000600, SRFL_MORE, SROM9_5GHPO_MCSBW20, 0xffff}, + {"", 0, 0, SROM9_5GHPO_MCSBW20 + 1, 0xffff}, + {"mcsbw20ul5ghpo", 0x00000600, SRFL_MORE, SROM9_5GHPO_MCSBW20UL, 0xffff}, + {"", 0, 0, SROM9_5GHPO_MCSBW20UL + 1, 0xffff}, + {"mcsbw405ghpo", 0x00000600, SRFL_MORE, SROM9_5GHPO_MCSBW40, 0xffff}, + {"", 0, 0, SROM9_5GHPO_MCSBW40 + 1, 0xffff}, + {"mcs32po", 0x00000600, 0, SROM9_PO_MCS32, 0xffff}, + {"legofdm40duppo", 0x00000600, 0, SROM9_PO_LOFDM40DUP, 0xffff}, + {"pcieingress_war", 0x00000700, 0, SROM8_PCIEINGRESS_WAR, 0xf}, + {"eu_edthresh2g", 0x00000100, 0, SROM8_EU_EDCRSTH, 0x00ff}, + {"eu_edthresh5g", 0x00000100, 0, SROM8_EU_EDCRSTH, 0xff00}, + {"eu_edthresh2g", 0x00000200, 0, SROM9_EU_EDCRSTH, 0x00ff}, + {"eu_edthresh5g", 0x00000200, 0, SROM9_EU_EDCRSTH, 0xff00}, + {"rxgainerr2ga0", 0x00000700, 0, SROM8_RXGAINERR_2G, 0x003f}, + {"rxgainerr2ga0", 0x00000700, 0, SROM8_RXGAINERR_2G, 0x003f}, + {"rxgainerr2ga1", 0x00000700, 0, SROM8_RXGAINERR_2G, 0x07c0}, + {"rxgainerr2ga2", 0x00000700, 0, SROM8_RXGAINERR_2G, 0xf800}, + {"rxgainerr5gla0", 0x00000700, 0, SROM8_RXGAINERR_5GL, 0x003f}, + {"rxgainerr5gla1", 0x00000700, 0, SROM8_RXGAINERR_5GL, 0x07c0}, + {"rxgainerr5gla2", 0x00000700, 0, SROM8_RXGAINERR_5GL, 0xf800}, + {"rxgainerr5gma0", 0x00000700, 0, SROM8_RXGAINERR_5GM, 0x003f}, + {"rxgainerr5gma1", 0x00000700, 0, SROM8_RXGAINERR_5GM, 0x07c0}, + {"rxgainerr5gma2", 0x00000700, 0, SROM8_RXGAINERR_5GM, 0xf800}, + {"rxgainerr5gha0", 0x00000700, 0, SROM8_RXGAINERR_5GH, 0x003f}, + {"rxgainerr5gha1", 0x00000700, 0, SROM8_RXGAINERR_5GH, 0x07c0}, + {"rxgainerr5gha2", 0x00000700, 0, SROM8_RXGAINERR_5GH, 0xf800}, + {"rxgainerr5gua0", 0x00000700, 0, SROM8_RXGAINERR_5GU, 0x003f}, + {"rxgainerr5gua1", 0x00000700, 0, SROM8_RXGAINERR_5GU, 0x07c0}, + {"rxgainerr5gua2", 0x00000700, 0, SROM8_RXGAINERR_5GU, 0xf800}, + {"sar2g", 0x00000600, 0, SROM9_SAR, 0x00ff}, + {"sar5g", 0x00000600, 0, SROM9_SAR, 0xff00}, + {"noiselvl2ga0", 0x00000700, 0, SROM8_NOISELVL_2G, 0x001f}, + {"noiselvl2ga1", 0x00000700, 0, SROM8_NOISELVL_2G, 0x03e0}, + {"noiselvl2ga2", 0x00000700, 0, SROM8_NOISELVL_2G, 0x7c00}, + {"noiselvl5gla0", 0x00000700, 0, SROM8_NOISELVL_5GL, 0x001f}, + {"noiselvl5gla1", 0x00000700, 0, SROM8_NOISELVL_5GL, 0x03e0}, + {"noiselvl5gla2", 0x00000700, 0, SROM8_NOISELVL_5GL, 0x7c00}, + {"noiselvl5gma0", 0x00000700, 0, SROM8_NOISELVL_5GM, 0x001f}, + {"noiselvl5gma1", 0x00000700, 0, SROM8_NOISELVL_5GM, 0x03e0}, + {"noiselvl5gma2", 0x00000700, 0, SROM8_NOISELVL_5GM, 0x7c00}, + {"noiselvl5gha0", 0x00000700, 0, SROM8_NOISELVL_5GH, 0x001f}, + {"noiselvl5gha1", 0x00000700, 0, SROM8_NOISELVL_5GH, 0x03e0}, + {"noiselvl5gha2", 0x00000700, 0, SROM8_NOISELVL_5GH, 0x7c00}, + {"noiselvl5gua0", 0x00000700, 0, SROM8_NOISELVL_5GU, 0x001f}, + {"noiselvl5gua1", 0x00000700, 0, SROM8_NOISELVL_5GU, 0x03e0}, + {"noiselvl5gua2", 0x00000700, 0, SROM8_NOISELVL_5GU, 0x7c00}, + {"noisecaloffset", 0x00000300, 0, SROM8_NOISECALOFFSET, 0x00ff}, + {"noisecaloffset5g", 0x00000300, 0, SROM8_NOISECALOFFSET, 0xff00}, + {"subband5gver", 0x00000700, 0, SROM8_SUBBAND_PPR, 0x7}, + + {"cckPwrOffset", 0x00000400, 0, SROM10_CCKPWROFFSET, 0xffff}, + {"eu_edthresh2g", 0x00000400, 0, SROM10_EU_EDCRSTH, 0x00ff}, + {"eu_edthresh5g", 0x00000400, 0, SROM10_EU_EDCRSTH, 0xff00}, + /* swctrlmap_2g array, note that the last element doesn't have SRFL_ARRAY flag set */ + {"swctrlmap_2g", 0x00000400, SRFL_MORE|SRFL_PRHEX|SRFL_ARRAY, SROM10_SWCTRLMAP_2G, 0xffff}, + {"", 0x00000400, SRFL_ARRAY, SROM10_SWCTRLMAP_2G + 1, 0xffff}, + {"", 0x00000400, SRFL_MORE|SRFL_PRHEX|SRFL_ARRAY, SROM10_SWCTRLMAP_2G + 2, 0xffff}, + {"", 0x00000400, SRFL_ARRAY, SROM10_SWCTRLMAP_2G + 3, 0xffff}, + {"", 0x00000400, SRFL_MORE|SRFL_PRHEX|SRFL_ARRAY, SROM10_SWCTRLMAP_2G + 4, 0xffff}, + {"", 0x00000400, SRFL_ARRAY, SROM10_SWCTRLMAP_2G + 5, 0xffff}, + {"", 0x00000400, SRFL_MORE|SRFL_PRHEX|SRFL_ARRAY, SROM10_SWCTRLMAP_2G + 6, 0xffff}, + {"", 0x00000400, SRFL_ARRAY, SROM10_SWCTRLMAP_2G + 7, 0xffff}, + {"", 0x00000400, SRFL_PRHEX, SROM10_SWCTRLMAP_2G + 8, 0xffff}, + + /* sromrev 11 */ + {"boardflags3", 0xfffff800, SRFL_PRHEX|SRFL_MORE, SROM11_BFL4, 0xffff}, + {"", 0, 0, SROM11_BFL5, 0xffff}, + {"boardnum", 0xfffff800, 0, SROM11_MACLO, 0xffff}, + {"macaddr", 0xfffff800, SRFL_ETHADDR, SROM11_MACHI, 0xffff}, + {"ccode", 0xfffff800, SRFL_CCODE, SROM11_CCODE, 0xffff}, + {"regrev", 0xfffff800, 0, SROM11_REGREV, 0xffff}, + {"ledbh0", 0xfffff800, SRFL_NOFFS, SROM11_LEDBH10, 0x00ff}, + {"ledbh1", 0xfffff800, SRFL_NOFFS, SROM11_LEDBH10, 0xff00}, + {"ledbh2", 0xfffff800, SRFL_NOFFS, SROM11_LEDBH32, 0x00ff}, + {"ledbh3", 0xfffff800, SRFL_NOFFS, SROM11_LEDBH32, 0xff00}, + {"leddc", 0xfffff800, SRFL_NOFFS|SRFL_LEDDC, SROM11_LEDDC, 0xffff}, + {"aa2g", 0xfffff800, 0, SROM11_AA, 0x00ff}, + {"aa5g", 0xfffff800, 0, SROM11_AA, 0xff00}, + {"agbg0", 0xfffff800, 0, SROM11_AGBG10, 0xff00}, + {"agbg1", 0xfffff800, 0, SROM11_AGBG10, 0x00ff}, + {"agbg2", 0xfffff800, 0, SROM11_AGBG2A0, 0xff00}, + {"aga0", 0xfffff800, 0, SROM11_AGBG2A0, 0x00ff}, + {"aga1", 0xfffff800, 0, SROM11_AGA21, 0xff00}, + {"aga2", 0xfffff800, 0, SROM11_AGA21, 0x00ff}, + {"txchain", 0xfffff800, SRFL_NOFFS, SROM11_TXRXC, SROM4_TXCHAIN_MASK}, + {"rxchain", 0xfffff800, SRFL_NOFFS, SROM11_TXRXC, SROM4_RXCHAIN_MASK}, + {"antswitch", 0xfffff800, SRFL_NOFFS, SROM11_TXRXC, SROM4_SWITCH_MASK}, + + {"tssiposslope2g", 0xfffff800, 0, SROM11_FEM_CFG1, 0x0001}, + {"epagain2g", 0xfffff800, 0, SROM11_FEM_CFG1, 0x000e}, + {"pdgain2g", 0xfffff800, 0, SROM11_FEM_CFG1, 0x01f0}, + {"tworangetssi2g", 0xfffff800, 0, SROM11_FEM_CFG1, 0x0200}, + {"papdcap2g", 0xfffff800, 0, SROM11_FEM_CFG1, 0x0400}, + {"femctrl", 0xfffff800, 0, SROM11_FEM_CFG1, 0xf800}, + + {"tssiposslope5g", 0xfffff800, 0, SROM11_FEM_CFG2, 0x0001}, + {"epagain5g", 0xfffff800, 0, SROM11_FEM_CFG2, 0x000e}, + {"pdgain5g", 0xfffff800, 0, SROM11_FEM_CFG2, 0x01f0}, + {"tworangetssi5g", 0xfffff800, 0, SROM11_FEM_CFG2, 0x0200}, + {"papdcap5g", 0xfffff800, 0, SROM11_FEM_CFG2, 0x0400}, + {"gainctrlsph", 0xfffff800, 0, SROM11_FEM_CFG2, 0xf800}, + + {"tempthresh", 0xfffff800, 0, SROM11_THERMAL, 0xff00}, + {"tempoffset", 0xfffff800, 0, SROM11_THERMAL, 0x00ff}, + {"rawtempsense", 0xfffff800, SRFL_PRHEX, SROM11_MPWR_RAWTS, 0x01ff}, + {"measpower", 0xfffff800, SRFL_PRHEX, SROM11_MPWR_RAWTS, 0xfe00}, + {"tempsense_slope", 0xfffff800, SRFL_PRHEX, SROM11_TS_SLP_OPT_CORRX, 0x00ff}, + {"tempcorrx", 0xfffff800, SRFL_PRHEX, SROM11_TS_SLP_OPT_CORRX, 0xfc00}, + {"tempsense_option", 0xfffff800, SRFL_PRHEX, SROM11_TS_SLP_OPT_CORRX, 0x0300}, + {"xtalfreq", 0xfffff800, 0, SROM11_XTAL_FREQ, 0xffff}, + {"txpwrbckof", 0x00000800, SRFL_PRHEX, SROM11_PATH0 + SROM11_2G_MAXP, 0xff00}, + /* Special PA Params for 4350 5G Band, 40/80 MHz BW Ant #1 */ + {"pa5gbw4080a1", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB0_4080_W0_A1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB0_4080_W1_A1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB0_4080_W2_A1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB1_4080_W0_A1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB1_4080_PA, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB1_4080_PA + 1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB2_4080_PA, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB2_4080_PA + 1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB2_4080_PA + 2, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB3_4080_PA, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB3_4080_PA + 1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX, SROM11_PATH2 + SROM11_5GB3_4080_PA + 2, 0xffff}, + {"phycal_tempdelta", 0xfffff800, 0, SROM11_PHYCAL_TEMPDELTA, 0x00ff}, + {"temps_period", 0xfffff800, 0, SROM11_PHYCAL_TEMPDELTA, 0x0f00}, + {"temps_hysteresis", 0xfffff800, 0, SROM11_PHYCAL_TEMPDELTA, 0xf000}, + {"measpower1", 0xfffff800, SRFL_PRHEX, SROM11_MPWR_1_AND_2, 0x007f}, + {"measpower2", 0xfffff800, SRFL_PRHEX, SROM11_MPWR_1_AND_2, 0x3f80}, + {"tssifloor2g", 0xfffff800, SRFL_PRHEX, SROM11_TSSIFLOOR_2G, 0x03ff}, + {"tssifloor5g", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_TSSIFLOOR_5GL, 0x03ff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_TSSIFLOOR_5GM, 0x03ff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_TSSIFLOOR_5GH, 0x03ff}, + {"", 0xfffff800, SRFL_PRHEX, SROM11_TSSIFLOOR_5GU, 0x03ff}, + {"pdoffset2g40ma0", 0xfffff800, 0, SROM11_PDOFF_2G_40M, 0x000f}, + {"pdoffset2g40ma1", 0xfffff800, 0, SROM11_PDOFF_2G_40M, 0x00f0}, + {"pdoffset2g40ma2", 0xfffff800, 0, SROM11_PDOFF_2G_40M, 0x0f00}, + {"pdoffset2g40mvalid", 0xfffff800, 0, SROM11_PDOFF_2G_40M, 0x8000}, + {"pdoffset40ma0", 0xfffff800, 0, SROM11_PDOFF_40M_A0, 0xffff}, + {"pdoffset40ma1", 0xfffff800, 0, SROM11_PDOFF_40M_A1, 0xffff}, + {"pdoffset40ma2", 0xfffff800, 0, SROM11_PDOFF_40M_A2, 0xffff}, + {"pdoffset80ma0", 0xfffff800, 0, SROM11_PDOFF_80M_A0, 0xffff}, + {"pdoffset80ma1", 0xfffff800, 0, SROM11_PDOFF_80M_A1, 0xffff}, + {"pdoffset80ma2", 0xfffff800, 0, SROM11_PDOFF_80M_A2, 0xffff}, + + {"subband5gver", 0xfffff800, SRFL_PRHEX, SROM11_SUBBAND5GVER, 0xffff}, + {"paparambwver", 0xfffff800, 0, SROM11_MCSLR5GLPO, 0xf000}, + {"rx5ggainwar", 0xfffff800, 0, SROM11_MCSLR5GMPO, 0x2000}, + /* Special PA Params for 4350 5G Band, 40/80 MHz BW Ant #0 */ + {"pa5gbw4080a0", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 +SROM11_5GB0_PA, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB0_PA + 1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB0_PA + 2, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB1_PA, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB1_PA + 1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB1_PA + 2, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB2_PA, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB2_PA + 1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB2_PA + 2, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB3_PA, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB3_PA + 1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX, SROM11_PATH2 + SROM11_5GB3_PA + 2, 0xffff}, + /* Special PA Params for 4335 5G Band, 40 MHz BW */ + {"pa5gbw40a0", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB0_PA, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB0_PA + 1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB0_PA + 2, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB1_PA, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB1_PA + 1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB1_PA + 2, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB2_PA, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB2_PA + 1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB2_PA + 2, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB3_PA, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB3_PA + 1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX, SROM11_PATH1 + SROM11_5GB3_PA + 2, 0xffff}, + /* Special PA Params for 4335 5G Band, 80 MHz BW */ + {"pa5gbw80a0", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB0_PA, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB0_PA + 1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB0_PA + 2, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB1_PA, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB1_PA + 1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB1_PA + 2, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB2_PA, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB2_PA + 1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB2_PA + 2, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB3_PA, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB3_PA + 1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX, SROM11_PATH2 + SROM11_5GB3_PA + 2, 0xffff}, + /* Special PA Params for 4335 2G Band, CCK */ + {"pa2gccka0", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_2G_PA, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_2G_PA + 1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX, SROM11_PATH1 + SROM11_2G_PA + 2, 0xffff}, + + /* power per rate */ + {"cckbw202gpo", 0xfffff800, 0, SROM11_CCKBW202GPO, 0xffff}, + {"cckbw20ul2gpo", 0xfffff800, 0, SROM11_CCKBW20UL2GPO, 0xffff}, + {"mcsbw202gpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW202GPO, 0xffff}, + {"", 0xfffff800, 0, SROM11_MCSBW202GPO_1, 0xffff}, + {"mcsbw402gpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW402GPO, 0xffff}, + {"", 0xfffff800, 0, SROM11_MCSBW402GPO_1, 0xffff}, + {"dot11agofdmhrbw202gpo", 0xfffff800, 0, SROM11_DOT11AGOFDMHRBW202GPO, 0xffff}, + {"ofdmlrbw202gpo", 0xfffff800, 0, SROM11_OFDMLRBW202GPO, 0xffff}, + {"mcsbw205glpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW205GLPO, 0xffff}, + {"", 0xfffff800, 0, SROM11_MCSBW205GLPO_1, 0xffff}, + {"mcsbw405glpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW405GLPO, 0xffff}, + {"", 0xfffff800, 0, SROM11_MCSBW405GLPO_1, 0xffff}, + {"mcsbw805glpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW805GLPO, 0xffff}, + {"", 0xfffff800, 0, SROM11_MCSBW805GLPO_1, 0xffff}, + {"mcsbw205gmpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW205GMPO, 0xffff}, + {"", 0xfffff800, 0, SROM11_MCSBW205GMPO_1, 0xffff}, + {"mcsbw405gmpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW405GMPO, 0xffff}, + {"", 0xfffff800, 0, SROM11_MCSBW405GMPO_1, 0xffff}, + {"mcsbw805gmpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW805GMPO, 0xffff}, + {"", 0xfffff800, 0, SROM11_MCSBW805GMPO_1, 0xffff}, + {"mcsbw205ghpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW205GHPO, 0xffff}, + {"", 0xfffff800, 0, SROM11_MCSBW205GHPO_1, 0xffff}, + {"mcsbw405ghpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW405GHPO, 0xffff}, + {"", 0xfffff800, 0, SROM11_MCSBW405GHPO_1, 0xffff}, + {"mcsbw805ghpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW805GHPO, 0xffff}, + {"", 0xfffff800, 0, SROM11_MCSBW805GHPO_1, 0xffff}, + {"mcslr5glpo", 0xfffff800, 0, SROM11_MCSLR5GLPO, 0x0fff}, + {"mcslr5gmpo", 0xfffff800, 0, SROM11_MCSLR5GMPO, 0xffff}, + {"mcslr5ghpo", 0xfffff800, 0, SROM11_MCSLR5GHPO, 0xffff}, + {"sb20in40hrpo", 0xfffff800, 0, SROM11_SB20IN40HRPO, 0xffff}, + {"sb20in80and160hr5glpo", 0xfffff800, 0, SROM11_SB20IN80AND160HR5GLPO, 0xffff}, + {"sb40and80hr5glpo", 0xfffff800, 0, SROM11_SB40AND80HR5GLPO, 0xffff}, + {"sb20in80and160hr5gmpo", 0xfffff800, 0, SROM11_SB20IN80AND160HR5GMPO, 0xffff}, + {"sb40and80hr5gmpo", 0xfffff800, 0, SROM11_SB40AND80HR5GMPO, 0xffff}, + {"sb20in80and160hr5ghpo", 0xfffff800, 0, SROM11_SB20IN80AND160HR5GHPO, 0xffff}, + {"sb40and80hr5ghpo", 0xfffff800, 0, SROM11_SB40AND80HR5GHPO, 0xffff}, + {"sb20in40lrpo", 0xfffff800, 0, SROM11_SB20IN40LRPO, 0xffff}, + {"sb20in80and160lr5glpo", 0xfffff800, 0, SROM11_SB20IN80AND160LR5GLPO, 0xffff}, + {"sb40and80lr5glpo", 0xfffff800, 0, SROM11_SB40AND80LR5GLPO, 0xffff}, + {"sb20in80and160lr5gmpo", 0xfffff800, 0, SROM11_SB20IN80AND160LR5GMPO, 0xffff}, + {"sb40and80lr5gmpo", 0xfffff800, 0, SROM11_SB40AND80LR5GMPO, 0xffff}, + {"sb20in80and160lr5ghpo", 0xfffff800, 0, SROM11_SB20IN80AND160LR5GHPO, 0xffff}, + {"sb40and80lr5ghpo", 0xfffff800, 0, SROM11_SB40AND80LR5GHPO, 0xffff}, + {"dot11agduphrpo", 0xfffff800, 0, SROM11_DOT11AGDUPHRPO, 0xffff}, + {"dot11agduplrpo", 0xfffff800, 0, SROM11_DOT11AGDUPLRPO, 0xffff}, + + /* Misc */ + {"sar2g", 0xfffff800, 0, SROM11_SAR, 0x00ff}, + {"sar5g", 0xfffff800, 0, SROM11_SAR, 0xff00}, + + {"noiselvl2ga0", 0xfffff800, 0, SROM11_NOISELVL_2G, 0x001f}, + {"noiselvl2ga1", 0xfffff800, 0, SROM11_NOISELVL_2G, 0x03e0}, + {"noiselvl2ga2", 0xfffff800, 0, SROM11_NOISELVL_2G, 0x7c00}, + {"noiselvl5ga0", 0xfffff800, SRFL_ARRAY, SROM11_NOISELVL_5GL, 0x001f}, + {"", 0xfffff800, SRFL_ARRAY, SROM11_NOISELVL_5GM, 0x001f}, + {"", 0xfffff800, SRFL_ARRAY, SROM11_NOISELVL_5GH, 0x001f}, + {"", 0xfffff800, 0, SROM11_NOISELVL_5GU, 0x001f}, + {"noiselvl5ga1", 0xfffff800, SRFL_ARRAY, SROM11_NOISELVL_5GL, 0x03e0}, + {"", 0xfffff800, SRFL_ARRAY, SROM11_NOISELVL_5GM, 0x03e0}, + {"", 0xfffff800, SRFL_ARRAY, SROM11_NOISELVL_5GH, 0x03e0}, + {"", 0xfffff800, 0, SROM11_NOISELVL_5GU, 0x03e0}, + {"noiselvl5ga2", 0xfffff800, SRFL_ARRAY, SROM11_NOISELVL_5GL, 0x7c00}, + {"", 0xfffff800, SRFL_ARRAY, SROM11_NOISELVL_5GM, 0x7c00}, + {"", 0xfffff800, SRFL_ARRAY, SROM11_NOISELVL_5GH, 0x7c00}, + {"", 0xfffff800, 0, SROM11_NOISELVL_5GU, 0x7c00}, + {"eu_edthresh2g", 0x00000800, 0, SROM11_EU_EDCRSTH, 0x00ff}, + {"eu_edthresh5g", 0x00000800, 0, SROM11_EU_EDCRSTH, 0xff00}, + + {"rxgainerr2ga0", 0xfffff800, 0, SROM11_RXGAINERR_2G, 0x003f}, + {"rxgainerr2ga1", 0xfffff800, 0, SROM11_RXGAINERR_2G, 0x07c0}, + {"rxgainerr2ga2", 0xfffff800, 0, SROM11_RXGAINERR_2G, 0xf800}, + {"rxgainerr5ga0", 0xfffff800, SRFL_ARRAY, SROM11_RXGAINERR_5GL, 0x003f}, + {"", 0xfffff800, SRFL_ARRAY, SROM11_RXGAINERR_5GM, 0x003f}, + {"", 0xfffff800, SRFL_ARRAY, SROM11_RXGAINERR_5GH, 0x003f}, + {"", 0xfffff800, 0, SROM11_RXGAINERR_5GU, 0x003f}, + {"rxgainerr5ga1", 0xfffff800, SRFL_ARRAY, SROM11_RXGAINERR_5GL, 0x07c0}, + {"", 0xfffff800, SRFL_ARRAY, SROM11_RXGAINERR_5GM, 0x07c0}, + {"", 0xfffff800, SRFL_ARRAY, SROM11_RXGAINERR_5GH, 0x07c0}, + {"", 0xfffff800, 0, SROM11_RXGAINERR_5GU, 0x07c0}, + {"rxgainerr5ga2", 0xfffff800, SRFL_ARRAY, SROM11_RXGAINERR_5GL, 0xf800}, + {"", 0xfffff800, SRFL_ARRAY, SROM11_RXGAINERR_5GM, 0xf800}, + {"", 0xfffff800, SRFL_ARRAY, SROM11_RXGAINERR_5GH, 0xf800}, + {"", 0xfffff800, 0, SROM11_RXGAINERR_5GU, 0xf800}, + {"rpcal2g", 0xfffff800, 0, SROM11_RPCAL_2G, 0xffff}, + {"rpcal5gb0", 0xfffff800, 0, SROM11_RPCAL_5GL, 0xffff}, + {"rpcal5gb1", 0xfffff800, 0, SROM11_RPCAL_5GM, 0xffff}, + {"rpcal5gb2", 0xfffff800, 0, SROM11_RPCAL_5GH, 0xffff}, + {"rpcal5gb3", 0xfffff800, 0, SROM11_RPCAL_5GU, 0xffff}, + {"txidxcap2g", 0xfffff800, 0, SROM11_TXIDXCAP2G, 0x0ff0}, + {"txidxcap5g", 0xfffff800, 0, SROM11_TXIDXCAP5G, 0x0ff0}, + {"pdoffsetcckma0", 0xfffff800, 0, SROM11_PDOFF_2G_CCK, 0x000f}, + {"pdoffsetcckma1", 0xfffff800, 0, SROM11_PDOFF_2G_CCK, 0x00f0}, + {"pdoffsetcckma2", 0xfffff800, 0, SROM11_PDOFF_2G_CCK, 0x0f00}, + + /* sromrev 12 */ + {"boardflags4", 0xfffff000, SRFL_PRHEX|SRFL_MORE, SROM12_BFL6, 0xffff}, + {"", 0, 0, SROM12_BFL7, 0xffff}, + {"pdoffsetcck", 0xfffff000, 0, SROM12_PDOFF_2G_CCK, 0xffff}, + {"pdoffset20in40m5gb0", 0xfffff000, 0, SROM12_PDOFF_20in40M_5G_B0, 0xffff}, + {"pdoffset20in40m5gb1", 0xfffff000, 0, SROM12_PDOFF_20in40M_5G_B1, 0xffff}, + {"pdoffset20in40m5gb2", 0xfffff000, 0, SROM12_PDOFF_20in40M_5G_B2, 0xffff}, + {"pdoffset20in40m5gb3", 0xfffff000, 0, SROM12_PDOFF_20in40M_5G_B3, 0xffff}, + {"pdoffset20in40m5gb4", 0xfffff000, 0, SROM12_PDOFF_20in40M_5G_B4, 0xffff}, + {"pdoffset40in80m5gb0", 0xfffff000, 0, SROM12_PDOFF_40in80M_5G_B0, 0xffff}, + {"pdoffset40in80m5gb1", 0xfffff000, 0, SROM12_PDOFF_40in80M_5G_B1, 0xffff}, + {"pdoffset40in80m5gb2", 0xfffff000, 0, SROM12_PDOFF_40in80M_5G_B2, 0xffff}, + {"pdoffset40in80m5gb3", 0xfffff000, 0, SROM12_PDOFF_40in80M_5G_B3, 0xffff}, + {"pdoffset40in80m5gb4", 0xfffff000, 0, SROM12_PDOFF_40in80M_5G_B4, 0xffff}, + {"pdoffset20in80m5gb0", 0xfffff000, 0, SROM12_PDOFF_20in80M_5G_B0, 0xffff}, + {"pdoffset20in80m5gb1", 0xfffff000, 0, SROM12_PDOFF_20in80M_5G_B1, 0xffff}, + {"pdoffset20in80m5gb2", 0xfffff000, 0, SROM12_PDOFF_20in80M_5G_B2, 0xffff}, + {"pdoffset20in80m5gb3", 0xfffff000, 0, SROM12_PDOFF_20in80M_5G_B3, 0xffff}, + {"pdoffset20in80m5gb4", 0xfffff000, 0, SROM12_PDOFF_20in80M_5G_B4, 0xffff}, + + /* power per rate */ + {"mcsbw205gx1po", 0xfffff000, SRFL_MORE, SROM12_MCSBW205GX1PO, 0xffff}, + {"", 0xfffff000, 0, SROM12_MCSBW205GX1PO_1, 0xffff}, + {"mcsbw405gx1po", 0xfffff000, SRFL_MORE, SROM12_MCSBW405GX1PO, 0xffff}, + {"", 0xfffff000, 0, SROM12_MCSBW405GX1PO_1, 0xffff}, + {"mcsbw805gx1po", 0xfffff000, SRFL_MORE, SROM12_MCSBW805GX1PO, 0xffff}, + {"", 0xfffff000, 0, SROM12_MCSBW805GX1PO_1, 0xffff}, + {"mcsbw205gx2po", 0xfffff000, SRFL_MORE, SROM12_MCSBW205GX2PO, 0xffff}, + {"", 0xfffff000, 0, SROM12_MCSBW205GX2PO_1, 0xffff}, + {"mcsbw405gx2po", 0xfffff000, SRFL_MORE, SROM12_MCSBW405GX2PO, 0xffff}, + {"", 0xfffff000, 0, SROM12_MCSBW405GX2PO_1, 0xffff}, + {"mcsbw805gx2po", 0xfffff000, SRFL_MORE, SROM12_MCSBW805GX2PO, 0xffff}, + {"", 0xfffff000, 0, SROM12_MCSBW805GX2PO_1, 0xffff}, + + {"sb20in80and160hr5gx1po", 0xfffff000, 0, SROM12_SB20IN80AND160HR5GX1PO, 0xffff}, + {"sb40and80hr5gx1po", 0xfffff000, 0, SROM12_SB40AND80HR5GX1PO, 0xffff}, + {"sb20in80and160lr5gx1po", 0xfffff000, 0, SROM12_SB20IN80AND160LR5GX1PO, 0xffff}, + {"sb40and80hr5gx1po", 0xfffff000, 0, SROM12_SB40AND80HR5GX1PO, 0xffff}, + {"sb20in80and160hr5gx2po", 0xfffff000, 0, SROM12_SB20IN80AND160HR5GX2PO, 0xffff}, + {"sb40and80hr5gx2po", 0xfffff000, 0, SROM12_SB40AND80HR5GX2PO, 0xffff}, + {"sb20in80and160lr5gx2po", 0xfffff000, 0, SROM12_SB20IN80AND160LR5GX2PO, 0xffff}, + {"sb40and80hr5gx2po", 0xfffff000, 0, SROM12_SB40AND80HR5GX2PO, 0xffff}, + + {"rxgains5gmelnagaina0", 0xfffff000, 0, SROM12_RXGAINS10, 0x0007}, + {"rxgains5gmelnagaina1", 0xfffff000, 0, SROM12_RXGAINS11, 0x0007}, + {"rxgains5gmelnagaina2", 0xfffff000, 0, SROM12_RXGAINS12, 0x0007}, + {"rxgains5gmtrisoa0", 0xfffff000, 0, SROM12_RXGAINS10, 0x0078}, + {"rxgains5gmtrisoa1", 0xfffff000, 0, SROM12_RXGAINS11, 0x0078}, + {"rxgains5gmtrisoa2", 0xfffff000, 0, SROM12_RXGAINS12, 0x0078}, + {"rxgains5gmtrelnabypa0", 0xfffff000, 0, SROM12_RXGAINS10, 0x0080}, + {"rxgains5gmtrelnabypa1", 0xfffff000, 0, SROM12_RXGAINS11, 0x0080}, + {"rxgains5gmtrelnabypa2", 0xfffff000, 0, SROM12_RXGAINS12, 0x0080}, + {"rxgains5ghelnagaina0", 0xfffff000, 0, SROM12_RXGAINS10, 0x0700}, + {"rxgains5ghelnagaina1", 0xfffff000, 0, SROM12_RXGAINS11, 0x0700}, + {"rxgains5ghelnagaina2", 0xfffff000, 0, SROM12_RXGAINS12, 0x0700}, + {"rxgains5ghtrisoa0", 0xfffff000, 0, SROM12_RXGAINS10, 0x7800}, + {"rxgains5ghtrisoa1", 0xfffff000, 0, SROM12_RXGAINS11, 0x7800}, + {"rxgains5ghtrisoa2", 0xfffff000, 0, SROM12_RXGAINS12, 0x7800}, + {"rxgains5ghtrelnabypa0", 0xfffff000, 0, SROM12_RXGAINS10, 0x8000}, + {"rxgains5ghtrelnabypa1", 0xfffff000, 0, SROM12_RXGAINS11, 0x8000}, + {"rxgains5ghtrelnabypa2", 0xfffff000, 0, SROM12_RXGAINS12, 0x8000}, + {"eu_edthresh2g", 0x00001000, 0, SROM12_EU_EDCRSTH, 0x00ff}, + {"eu_edthresh5g", 0x00001000, 0, SROM12_EU_EDCRSTH, 0xff00}, + + {"gpdn", 0xfffff000, SRFL_PRHEX|SRFL_MORE, SROM12_GPDN_L, 0xffff}, + {"", 0, 0, SROM12_GPDN_H, 0xffff}, + + {"rpcal2gcore3", 0xffffe000, 0, SROM13_RPCAL2GCORE3, 0x00ff}, + {"rpcal5gb0core3", 0xffffe000, 0, SROM13_RPCAL5GB01CORE3, 0x00ff}, + {"rpcal5gb1core3", 0xffffe000, 0, SROM13_RPCAL5GB01CORE3, 0xff00}, + {"rpcal5gb2core3", 0xffffe000, 0, SROM13_RPCAL5GB23CORE3, 0x00ff}, + {"rpcal5gb3core3", 0xffffe000, 0, SROM13_RPCAL5GB23CORE3, 0xff00}, + + {"sw_txchain_mask", 0xffffe000, 0, SROM13_SW_TXRX_MASK, 0x000f}, + {"sw_rxchain_mask", 0xffffe000, 0, SROM13_SW_TXRX_MASK, 0x00f0}, + + {"eu_edthresh2g", 0x00002000, 0, SROM13_EU_EDCRSTH, 0x00ff}, + {"eu_edthresh5g", 0x00002000, 0, SROM13_EU_EDCRSTH, 0xff00}, + + {"agbg3", 0xffffe000, 0, SROM13_ANTGAIN_BANDBGA, 0xff00}, + {"aga3", 0xffffe000, 0, SROM13_ANTGAIN_BANDBGA, 0x00ff}, + {"noiselvl2ga3", 0xffffe000, 0, SROM13_NOISELVLCORE3, 0x001f}, + {"noiselvl5ga3", 0xffffe000, SRFL_ARRAY, SROM13_NOISELVLCORE3, 0x03e0}, + {"", 0xffffe000, SRFL_ARRAY, SROM13_NOISELVLCORE3, 0x7c00}, + {"", 0xffffe000, SRFL_ARRAY, SROM13_NOISELVLCORE3_1, 0x001f}, + {"", 0xffffe000, 0, SROM13_NOISELVLCORE3_1, 0x03e0}, + {"rxgainerr2ga3", 0xffffe000, 0, SROM13_RXGAINERRCORE3, 0x001f}, + {"rxgainerr5ga3", 0xffffe000, SRFL_ARRAY, SROM13_RXGAINERRCORE3, 0x03e0}, + {"", 0xffffe000, SRFL_ARRAY, SROM13_RXGAINERRCORE3, 0x7c00}, + {"", 0xffffe000, SRFL_ARRAY, SROM13_RXGAINERRCORE3_1, 0x001f}, + {"", 0xffffe000, 0, SROM13_RXGAINERRCORE3_1, 0x03e0}, + {"rxgains5gmelnagaina3", 0xffffe000, 0, SROM13_RXGAINS1CORE3, 0x0007}, + {"rxgains5gmtrisoa3", 0xffffe000, 0, SROM13_RXGAINS1CORE3, 0x0078}, + {"rxgains5gmtrelnabypa3", 0xffffe000, 0, SROM13_RXGAINS1CORE3, 0x0080}, + {"rxgains5ghelnagaina3", 0xffffe000, 0, SROM13_RXGAINS1CORE3, 0x0700}, + {"rxgains5ghtrisoa3", 0xffffe000, 0, SROM13_RXGAINS1CORE3, 0x7800}, + {"rxgains5ghtrelnabypa3", 0xffffe000, 0, SROM13_RXGAINS1CORE3, 0x8000}, + + /* pdoffset */ + {"pdoffset20in40m5gcore3", 0xffffe000, 0, SROM13_PDOFFSET20IN40M5GCORE3, 0xffff}, + {"pdoffset20in40m5gcore3_1", 0xffffe000, 0, SROM13_PDOFFSET20IN40M5GCORE3_1, 0xffff}, + {"pdoffset20in80m5gcore3", 0xffffe000, 0, SROM13_PDOFFSET20IN80M5GCORE3, 0xffff}, + {"pdoffset20in80m5gcore3_1", 0xffffe000, 0, SROM13_PDOFFSET20IN80M5GCORE3_1, 0xffff}, + {"pdoffset40in80m5gcore3", 0xffffe000, 0, SROM13_PDOFFSET40IN80M5GCORE3, 0xffff}, + {"pdoffset40in80m5gcore3_1", 0xffffe000, 0, SROM13_PDOFFSET40IN80M5GCORE3_1, 0xffff}, + + {"pdoffset20in40m2g", 0xffffe000, 0, SROM13_PDOFFSET20IN40M2G, 0xffff}, + {"pdoffset20in40m2gcore3", 0xffffe000, 0, SROM13_PDOFFSET20IN40M2GCORE3, 0xffff}, + {"pdoffsetcck20m", 0xffffe000, 0, SROM13_PDOFF_2G_CCK_20M, 0xffff}, + + /* power per rate */ + {"mcs1024qam2gpo", 0xffffe000, 0, SROM13_MCS1024QAM2GPO, 0xffff}, + {"mcs1024qam5glpo", 0xffffe000, SRFL_MORE, SROM13_MCS1024QAM5GLPO, 0xffff}, + {"", 0xffffe000, 0, SROM13_MCS1024QAM5GLPO_1, 0xffff}, + {"mcs1024qam5gmpo", 0xffffe000, SRFL_MORE, SROM13_MCS1024QAM5GMPO, 0xffff}, + {"", 0xffffe000, 0, SROM13_MCS1024QAM5GMPO_1, 0xffff}, + {"mcs1024qam5ghpo", 0xffffe000, SRFL_MORE, SROM13_MCS1024QAM5GHPO, 0xffff}, + {"", 0xffffe000, 0, SROM13_MCS1024QAM5GHPO_1, 0xffff}, + {"mcs1024qam5gx1po", 0xffffe000, SRFL_MORE, SROM13_MCS1024QAM5GX1PO, 0xffff}, + {"", 0xffffe000, 0, SROM13_MCS1024QAM5GX1PO_1, 0xffff}, + {"mcs1024qam5gx2po", 0xffffe000, SRFL_MORE, SROM13_MCS1024QAM5GX2PO, 0xffff}, + {"", 0xffffe000, 0, SROM13_MCS1024QAM5GX2PO_1, 0xffff}, + + {"mcsbw1605glpo", 0xffffe000, SRFL_MORE, SROM13_MCSBW1605GLPO, 0xffff}, + {"", 0xffffe000, 0, SROM13_MCSBW1605GLPO_1, 0xffff}, + {"mcsbw1605gmpo", 0xffffe000, SRFL_MORE, SROM13_MCSBW1605GMPO, 0xffff}, + {"", 0xffffe000, 0, SROM13_MCSBW1605GMPO_1, 0xffff}, + {"mcsbw1605ghpo", 0xffffe000, SRFL_MORE, SROM13_MCSBW1605GHPO, 0xffff}, + {"", 0xffffe000, 0, SROM13_MCSBW1605GHPO_1, 0xffff}, + {"mcsbw1605gx1po", 0xffffe000, SRFL_MORE, SROM13_MCSBW1605GX1PO, 0xffff}, + {"", 0xffffe000, 0, SROM13_MCSBW1605GX1PO_1, 0xffff}, + {"mcsbw1605gx2po", 0xffffe000, SRFL_MORE, SROM13_MCSBW1605GX2PO, 0xffff}, + {"", 0xffffe000, 0, SROM13_MCSBW1605GX2PO_1, 0xffff}, + + {"ulbpproffs2g", 0xffffe000, 0, SROM13_ULBPPROFFS2G, 0xffff}, + + {"mcs8poexp", 0xffffe000, SRFL_MORE, SROM13_MCS8POEXP, 0xffff}, + {"", 0xffffe000, 0, SROM13_MCS8POEXP_1, 0xffff}, + {"mcs9poexp", 0xffffe000, SRFL_MORE, SROM13_MCS9POEXP, 0xffff}, + {"", 0xffffe000, 0, SROM13_MCS9POEXP_1, 0xffff}, + {"mcs10poexp", 0xffffe000, SRFL_MORE, SROM13_MCS10POEXP, 0xffff}, + {"", 0xffffe000, 0, SROM13_MCS10POEXP_1, 0xffff}, + {"mcs11poexp", 0xffffe000, SRFL_MORE, SROM13_MCS11POEXP, 0xffff}, + {"", 0xffffe000, 0, SROM13_MCS11POEXP_1, 0xffff}, + + {"ulbpdoffs5gb0a0", 0xffffe000, 0, SROM13_ULBPDOFFS5GB0A0, 0xffff}, + {"ulbpdoffs5gb0a1", 0xffffe000, 0, SROM13_ULBPDOFFS5GB0A1, 0xffff}, + {"ulbpdoffs5gb0a2", 0xffffe000, 0, SROM13_ULBPDOFFS5GB0A2, 0xffff}, + {"ulbpdoffs5gb0a3", 0xffffe000, 0, SROM13_ULBPDOFFS5GB0A3, 0xffff}, + {"ulbpdoffs5gb1a0", 0xffffe000, 0, SROM13_ULBPDOFFS5GB1A0, 0xffff}, + {"ulbpdoffs5gb1a1", 0xffffe000, 0, SROM13_ULBPDOFFS5GB1A1, 0xffff}, + {"ulbpdoffs5gb1a2", 0xffffe000, 0, SROM13_ULBPDOFFS5GB1A2, 0xffff}, + {"ulbpdoffs5gb1a3", 0xffffe000, 0, SROM13_ULBPDOFFS5GB1A3, 0xffff}, + {"ulbpdoffs5gb2a0", 0xffffe000, 0, SROM13_ULBPDOFFS5GB2A0, 0xffff}, + {"ulbpdoffs5gb2a1", 0xffffe000, 0, SROM13_ULBPDOFFS5GB2A1, 0xffff}, + {"ulbpdoffs5gb2a2", 0xffffe000, 0, SROM13_ULBPDOFFS5GB2A2, 0xffff}, + {"ulbpdoffs5gb2a3", 0xffffe000, 0, SROM13_ULBPDOFFS5GB2A3, 0xffff}, + {"ulbpdoffs5gb3a0", 0xffffe000, 0, SROM13_ULBPDOFFS5GB3A0, 0xffff}, + {"ulbpdoffs5gb3a1", 0xffffe000, 0, SROM13_ULBPDOFFS5GB3A1, 0xffff}, + {"ulbpdoffs5gb3a2", 0xffffe000, 0, SROM13_ULBPDOFFS5GB3A2, 0xffff}, + {"ulbpdoffs5gb3a3", 0xffffe000, 0, SROM13_ULBPDOFFS5GB3A3, 0xffff}, + {"ulbpdoffs5gb4a0", 0xffffe000, 0, SROM13_ULBPDOFFS5GB4A0, 0xffff}, + {"ulbpdoffs5gb4a1", 0xffffe000, 0, SROM13_ULBPDOFFS5GB4A1, 0xffff}, + {"ulbpdoffs5gb4a2", 0xffffe000, 0, SROM13_ULBPDOFFS5GB4A2, 0xffff}, + {"ulbpdoffs5gb4a3", 0xffffe000, 0, SROM13_ULBPDOFFS5GB4A3, 0xffff}, + {"ulbpdoffs2ga0", 0xffffe000, 0, SROM13_ULBPDOFFS2GA0, 0xffff}, + {"ulbpdoffs2ga1", 0xffffe000, 0, SROM13_ULBPDOFFS2GA1, 0xffff}, + {"ulbpdoffs2ga2", 0xffffe000, 0, SROM13_ULBPDOFFS2GA2, 0xffff}, + {"ulbpdoffs2ga3", 0xffffe000, 0, SROM13_ULBPDOFFS2GA3, 0xffff}, + + {"rpcal5gb4", 0xffffe000, 0, SROM13_RPCAL5GB4, 0xffff}, + + {"sb20in40hrlrpox", 0xffffe000, 0, SROM13_SB20IN40HRLRPOX, 0xffff}, + + {"swctrlmap4_cfg", 0xffffe000, 0, SROM13_SWCTRLMAP4_CFG, 0xffff}, + {"swctrlmap4_TX2g_fem3to0", 0xffffe000, 0, SROM13_SWCTRLMAP4_TX2G_FEM3TO0, 0xffff}, + {"swctrlmap4_RX2g_fem3to0", 0xffffe000, 0, SROM13_SWCTRLMAP4_RX2G_FEM3TO0, 0xffff}, + {"swctrlmap4_RXByp2g_fem3to0", 0xffffe000, 0, SROM13_SWCTRLMAP4_RXBYP2G_FEM3TO0, 0xffff}, + {"swctrlmap4_misc2g_fem3to0", 0xffffe000, 0, SROM13_SWCTRLMAP4_MISC2G_FEM3TO0, 0xffff}, + {"swctrlmap4_TX5g_fem3to0", 0xffffe000, 0, SROM13_SWCTRLMAP4_TX5G_FEM3TO0, 0xffff}, + {"swctrlmap4_RX5g_fem3to0", 0xffffe000, 0, SROM13_SWCTRLMAP4_RX5G_FEM3TO0, 0xffff}, + {"swctrlmap4_RXByp5g_fem3to0", 0xffffe000, 0, SROM13_SWCTRLMAP4_RXBYP5G_FEM3TO0, 0xffff}, + {"swctrlmap4_misc5g_fem3to0", 0xffffe000, 0, SROM13_SWCTRLMAP4_MISC5G_FEM3TO0, 0xffff}, + {"swctrlmap4_TX2g_fem7to4", 0xffffe000, 0, SROM13_SWCTRLMAP4_TX2G_FEM7TO4, 0xffff}, + {"swctrlmap4_RX2g_fem7to4", 0xffffe000, 0, SROM13_SWCTRLMAP4_RX2G_FEM7TO4, 0xffff}, + {"swctrlmap4_RXByp2g_fem7to4", 0xffffe000, 0, SROM13_SWCTRLMAP4_RXBYP2G_FEM7TO4, 0xffff}, + {"swctrlmap4_misc2g_fem7to4", 0xffffe000, 0, SROM13_SWCTRLMAP4_MISC2G_FEM7TO4, 0xffff}, + {"swctrlmap4_TX5g_fem7to4", 0xffffe000, 0, SROM13_SWCTRLMAP4_TX5G_FEM7TO4, 0xffff}, + {"swctrlmap4_RX5g_fem7to4", 0xffffe000, 0, SROM13_SWCTRLMAP4_RX5G_FEM7TO4, 0xffff}, + {"swctrlmap4_RXByp5g_fem7to4", 0xffffe000, 0, SROM13_SWCTRLMAP4_RXBYP5G_FEM7TO4, 0xffff}, + {"swctrlmap4_misc5g_fem7to4", 0xffffe000, 0, SROM13_SWCTRLMAP4_MISC5G_FEM7TO4, 0xffff}, + {NULL, 0, 0, 0, 0} +}; +#endif /* !defined(SROM15_MEMOPT) */ + +static const sromvar_t pci_srom15vars[] = { + {"macaddr", 0x00008000, SRFL_ETHADDR, SROM15_MACHI, 0xffff}, + {"caldata_offset", 0x00008000, 0, SROM15_CAL_OFFSET_LOC, 0xffff}, + {"boardrev", 0x00008000, SRFL_PRHEX, SROM15_BRDREV, 0xffff}, + {"ccode", 0x00008000, SRFL_CCODE, SROM15_CCODE, 0xffff}, + {"regrev", 0x00008000, 0, SROM15_REGREV, 0xffff}, + {NULL, 0, 0, 0, 0} +}; + +static const sromvar_t pci_srom16vars[] = { + {"macaddr", 0x00010000, SRFL_ETHADDR, SROM16_MACHI, 0xffff}, + {"caldata_offset", 0x00010000, 0, SROM16_CALDATA_OFFSET_LOC, 0xffff}, + {"boardrev", 0x00010000, 0, SROM16_BOARDREV, 0xffff}, + {"ccode", 0x00010000, 0, SROM16_CCODE, 0xffff}, + {"regrev", 0x00010000, 0, SROM16_REGREV, 0xffff}, + {NULL, 0, 0, 0, 0} +}; + +static const sromvar_t pci_srom17vars[] = { + {"boardrev", 0x00020000, SRFL_PRHEX, SROM17_BRDREV, 0xffff}, + {"macaddr", 0x00020000, SRFL_ETHADDR, SROM17_MACADDR, 0xffff}, + {"ccode", 0x00020000, SRFL_CCODE, SROM17_CCODE, 0xffff}, + {"caldata_offset", 0x00020000, 0, SROM17_CALDATA, 0xffff}, + {"gain_cal_temp", 0x00020000, SRFL_PRHEX, SROM17_GCALTMP, 0xffff}, + {"rssi_delta_2gb0_c0", 0x00020000, PRHEX_N_MORE, SROM17_C0SRD202G, 0xffff}, + {"", 0x00020000, 0, SROM17_C0SRD202G_1, 0xffff}, + {"rssi_delta_5gl_c0", 0x00020000, PRHEX_N_MORE, SROM17_C0SRD205GL, 0xffff}, + {"", 0x00020000, 0, SROM17_C0SRD205GL_1, 0xffff}, + {"rssi_delta_5gml_c0", 0x00020000, PRHEX_N_MORE, SROM17_C0SRD205GML, 0xffff}, + {"", 0x00020000, 0, SROM17_C0SRD205GML_1, 0xffff}, + {"rssi_delta_5gmu_c0", 0x00020000, PRHEX_N_MORE, SROM17_C0SRD205GMU, 0xffff}, + {"", 0x00020000, 0, SROM17_C0SRD205GMU_1, 0xffff}, + {"rssi_delta_5gh_c0", 0x00020000, PRHEX_N_MORE, SROM17_C0SRD205GH, 0xffff}, + {"", 0x00020000, 0, SROM17_C0SRD205GH_1, 0xffff}, + {"rssi_delta_2gb0_c1", 0x00020000, PRHEX_N_MORE, SROM17_C1SRD202G, 0xffff}, + {"", 0x00020000, 0, SROM17_C1SRD202G_1, 0xffff}, + {"rssi_delta_5gl_c1", 0x00020000, PRHEX_N_MORE, SROM17_C1SRD205GL, 0xffff}, + {"", 0x00020000, 0, SROM17_C1SRD205GL_1, 0xffff}, + {"rssi_delta_5gml_c1", 0x00020000, PRHEX_N_MORE, SROM17_C1SRD205GML, 0xffff}, + {"", 0x00020000, 0, SROM17_C1SRD205GML_1, 0xffff}, + {"rssi_delta_5gmu_c1", 0x00020000, PRHEX_N_MORE, SROM17_C1SRD205GMU, 0xffff}, + {"", 0x00020000, 0, SROM17_C1SRD205GMU_1, 0xffff}, + {"rssi_delta_5gh_c1", 0x00020000, PRHEX_N_MORE, SROM17_C1SRD205GH, 0xffff}, + {"", 0x00020000, 0, SROM17_C1SRD205GH_1, 0xffff}, + {"txpa_trim_magic", 0x00020000, PRHEX_N_MORE, SROM17_TRAMMAGIC, 0xffff}, + {"", 0x00020000, 0, SROM17_TRAMMAGIC_1, 0xffff}, + {"txpa_trim_data", 0x00020000, SRFL_PRHEX, SROM17_TRAMDATA, 0xffff}, + {NULL, 0, 0, 0, 0x00} +}; + +static const sromvar_t perpath_pci_sromvars[] = { + {"maxp2ga", 0x000000f0, 0, SROM4_2G_ITT_MAXP, 0x00ff}, + {"itt2ga", 0x000000f0, 0, SROM4_2G_ITT_MAXP, 0xff00}, + {"itt5ga", 0x000000f0, 0, SROM4_5G_ITT_MAXP, 0xff00}, + {"pa2gw0a", 0x000000f0, SRFL_PRHEX, SROM4_2G_PA, 0xffff}, + {"pa2gw1a", 0x000000f0, SRFL_PRHEX, SROM4_2G_PA + 1, 0xffff}, + {"pa2gw2a", 0x000000f0, SRFL_PRHEX, SROM4_2G_PA + 2, 0xffff}, + {"pa2gw3a", 0x000000f0, SRFL_PRHEX, SROM4_2G_PA + 3, 0xffff}, + {"maxp5ga", 0x000000f0, 0, SROM4_5G_ITT_MAXP, 0x00ff}, + {"maxp5gha", 0x000000f0, 0, SROM4_5GLH_MAXP, 0x00ff}, + {"maxp5gla", 0x000000f0, 0, SROM4_5GLH_MAXP, 0xff00}, + {"pa5gw0a", 0x000000f0, SRFL_PRHEX, SROM4_5G_PA, 0xffff}, + {"pa5gw1a", 0x000000f0, SRFL_PRHEX, SROM4_5G_PA + 1, 0xffff}, + {"pa5gw2a", 0x000000f0, SRFL_PRHEX, SROM4_5G_PA + 2, 0xffff}, + {"pa5gw3a", 0x000000f0, SRFL_PRHEX, SROM4_5G_PA + 3, 0xffff}, + {"pa5glw0a", 0x000000f0, SRFL_PRHEX, SROM4_5GL_PA, 0xffff}, + {"pa5glw1a", 0x000000f0, SRFL_PRHEX, SROM4_5GL_PA + 1, 0xffff}, + {"pa5glw2a", 0x000000f0, SRFL_PRHEX, SROM4_5GL_PA + 2, 0xffff}, + {"pa5glw3a", 0x000000f0, SRFL_PRHEX, SROM4_5GL_PA + 3, 0xffff}, + {"pa5ghw0a", 0x000000f0, SRFL_PRHEX, SROM4_5GH_PA, 0xffff}, + {"pa5ghw1a", 0x000000f0, SRFL_PRHEX, SROM4_5GH_PA + 1, 0xffff}, + {"pa5ghw2a", 0x000000f0, SRFL_PRHEX, SROM4_5GH_PA + 2, 0xffff}, + {"pa5ghw3a", 0x000000f0, SRFL_PRHEX, SROM4_5GH_PA + 3, 0xffff}, + {"maxp2ga", 0x00000700, 0, SROM8_2G_ITT_MAXP, 0x00ff}, + {"itt2ga", 0x00000700, 0, SROM8_2G_ITT_MAXP, 0xff00}, + {"itt5ga", 0x00000700, 0, SROM8_5G_ITT_MAXP, 0xff00}, + {"pa2gw0a", 0x00000700, SRFL_PRHEX, SROM8_2G_PA, 0xffff}, + {"pa2gw1a", 0x00000700, SRFL_PRHEX, SROM8_2G_PA + 1, 0xffff}, + {"pa2gw2a", 0x00000700, SRFL_PRHEX, SROM8_2G_PA + 2, 0xffff}, + {"maxp5ga", 0x00000700, 0, SROM8_5G_ITT_MAXP, 0x00ff}, + {"maxp5gha", 0x00000700, 0, SROM8_5GLH_MAXP, 0x00ff}, + {"maxp5gla", 0x00000700, 0, SROM8_5GLH_MAXP, 0xff00}, + {"pa5gw0a", 0x00000700, SRFL_PRHEX, SROM8_5G_PA, 0xffff}, + {"pa5gw1a", 0x00000700, SRFL_PRHEX, SROM8_5G_PA + 1, 0xffff}, + {"pa5gw2a", 0x00000700, SRFL_PRHEX, SROM8_5G_PA + 2, 0xffff}, + {"pa5glw0a", 0x00000700, SRFL_PRHEX, SROM8_5GL_PA, 0xffff}, + {"pa5glw1a", 0x00000700, SRFL_PRHEX, SROM8_5GL_PA + 1, 0xffff}, + {"pa5glw2a", 0x00000700, SRFL_PRHEX, SROM8_5GL_PA + 2, 0xffff}, + {"pa5ghw0a", 0x00000700, SRFL_PRHEX, SROM8_5GH_PA, 0xffff}, + {"pa5ghw1a", 0x00000700, SRFL_PRHEX, SROM8_5GH_PA + 1, 0xffff}, + {"pa5ghw2a", 0x00000700, SRFL_PRHEX, SROM8_5GH_PA + 2, 0xffff}, + + /* sromrev 11 */ + {"maxp2ga", 0xfffff800, 0, SROM11_2G_MAXP, 0x00ff}, + {"pa2ga", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_2G_PA, 0xffff}, + {"", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_2G_PA + 1, 0xffff}, + {"", 0x00000800, SRFL_PRHEX, SROM11_2G_PA + 2, 0xffff}, + {"rxgains5gmelnagaina", 0x00000800, 0, SROM11_RXGAINS1, 0x0007}, + {"rxgains5gmtrisoa", 0x00000800, 0, SROM11_RXGAINS1, 0x0078}, + {"rxgains5gmtrelnabypa", 0x00000800, 0, SROM11_RXGAINS1, 0x0080}, + {"rxgains5ghelnagaina", 0x00000800, 0, SROM11_RXGAINS1, 0x0700}, + {"rxgains5ghtrisoa", 0x00000800, 0, SROM11_RXGAINS1, 0x7800}, + {"rxgains5ghtrelnabypa", 0x00000800, 0, SROM11_RXGAINS1, 0x8000}, + {"rxgains2gelnagaina", 0x00000800, 0, SROM11_RXGAINS, 0x0007}, + {"rxgains2gtrisoa", 0x00000800, 0, SROM11_RXGAINS, 0x0078}, + {"rxgains2gtrelnabypa", 0x00000800, 0, SROM11_RXGAINS, 0x0080}, + {"rxgains5gelnagaina", 0x00000800, 0, SROM11_RXGAINS, 0x0700}, + {"rxgains5gtrisoa", 0x00000800, 0, SROM11_RXGAINS, 0x7800}, + {"rxgains5gtrelnabypa", 0x00000800, 0, SROM11_RXGAINS, 0x8000}, + {"maxp5ga", 0x00000800, SRFL_ARRAY, SROM11_5GB1B0_MAXP, 0x00ff}, + {"", 0x00000800, SRFL_ARRAY, SROM11_5GB1B0_MAXP, 0xff00}, + {"", 0x00000800, SRFL_ARRAY, SROM11_5GB3B2_MAXP, 0x00ff}, + {"", 0x00000800, 0, SROM11_5GB3B2_MAXP, 0xff00}, + {"pa5ga", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB0_PA, 0xffff}, + {"", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB0_PA + 1, 0xffff}, + {"", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB0_PA + 2, 0xffff}, + {"", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB1_PA, 0xffff}, + {"", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB1_PA + 1, 0xffff}, + {"", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB1_PA + 2, 0xffff}, + {"", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB2_PA, 0xffff}, + {"", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB2_PA + 1, 0xffff}, + {"", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB2_PA + 2, 0xffff}, + {"", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB3_PA, 0xffff}, + {"", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB3_PA + 1, 0xffff}, + {"", 0x00000800, SRFL_PRHEX, SROM11_5GB3_PA + 2, 0xffff}, + + /* sromrev 12 */ + {"maxp5gb4a", 0xfffff000, 0, SROM12_5GB42G_MAXP, 0x00ff00}, + {"pa2ga", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_2GB0_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_2GB0_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_2GB0_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX, SROM12_2GB0_PA_W3, 0x00ffff}, + + {"pa2g40a", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_2G40B0_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_2G40B0_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_2G40B0_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX, SROM12_2G40B0_PA_W3, 0x00ffff}, + {"maxp5gb0a", 0xfffff000, 0, SROM12_5GB1B0_MAXP, 0x00ff}, + {"maxp5gb1a", 0xfffff000, 0, SROM12_5GB1B0_MAXP, 0x00ff00}, + {"maxp5gb2a", 0xfffff000, 0, SROM12_5GB3B2_MAXP, 0x00ff}, + {"maxp5gb3a", 0xfffff000, 0, SROM12_5GB3B2_MAXP, 0x00ff00}, + + {"pa5ga", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB0_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB0_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB0_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB0_PA_W3, 0x00ffff}, + + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB1_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB1_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB1_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB1_PA_W3, 0x00ffff}, + + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB2_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB2_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB2_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB2_PA_W3, 0x00ffff}, + + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB3_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB3_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB3_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB3_PA_W3, 0x00ffff}, + + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB4_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB4_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB4_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX, SROM12_5GB4_PA_W3, 0x00ffff}, + + {"pa5g40a", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B0_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B0_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B0_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B0_PA_W3, 0x00ffff}, + + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B1_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B1_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B1_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B1_PA_W3, 0x00ffff}, + + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B2_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B2_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B2_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B2_PA_W3, 0x00ffff}, + + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B3_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B3_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B3_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B3_PA_W3, 0x00ffff}, + + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B4_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B4_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B4_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX, SROM12_5G40B4_PA_W3, 0x00ffff}, + + {"pa5g80a", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B0_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B0_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B0_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B0_PA_W3, 0x00ffff}, + + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B1_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B1_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B1_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B1_PA_W3, 0x00ffff}, + + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B2_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B2_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B2_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B2_PA_W3, 0x00ffff}, + + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B3_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B3_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B3_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B3_PA_W3, 0x00ffff}, + + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B4_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B4_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B4_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX, SROM12_5G80B4_PA_W3, 0x00ffff}, + /* sromrev 13 */ + {"rxgains2gelnagaina", 0xffffe000, 0, SROM13_RXGAINS, 0x0007}, + {"rxgains2gtrisoa", 0xffffe000, 0, SROM13_RXGAINS, 0x0078}, + {"rxgains2gtrelnabypa", 0xffffe000, 0, SROM13_RXGAINS, 0x0080}, + {"rxgains5gelnagaina", 0xffffe000, 0, SROM13_RXGAINS, 0x0700}, + {"rxgains5gtrisoa", 0xffffe000, 0, SROM13_RXGAINS, 0x7800}, + {"rxgains5gtrelnabypa", 0xffffe000, 0, SROM13_RXGAINS, 0x8000}, + {NULL, 0, 0, 0, 0} +}; + +#if !defined(PHY_TYPE_N) +#define PHY_TYPE_N 4 /* N-Phy value */ +#endif /* !(defined(PHY_TYPE_HT) && defined(PHY_TYPE_N)) */ +#if !defined(PHY_TYPE_AC) +#define PHY_TYPE_AC 11 /* AC-Phy value */ +#endif /* !defined(PHY_TYPE_AC) */ +#if !defined(PHY_TYPE_LCN20) +#define PHY_TYPE_LCN20 12 /* LCN20-Phy value */ +#endif /* !defined(PHY_TYPE_LCN20) */ +#if !defined(PHY_TYPE_NULL) +#define PHY_TYPE_NULL 0xf /* Invalid Phy value */ +#endif /* !defined(PHY_TYPE_NULL) */ + +typedef struct { + uint16 phy_type; + uint16 bandrange; + uint16 chain; + const char *vars; +} pavars_t; + +static const pavars_t pavars[] = { + /* NPHY */ + {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_2G, 0, "pa2gw0a0 pa2gw1a0 pa2gw2a0"}, + {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_2G, 1, "pa2gw0a1 pa2gw1a1 pa2gw2a1"}, + {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5G_BAND0, 0, "pa5glw0a0 pa5glw1a0 pa5glw2a0"}, + {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5G_BAND0, 1, "pa5glw0a1 pa5glw1a1 pa5glw2a1"}, + {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5G_BAND1, 0, "pa5gw0a0 pa5gw1a0 pa5gw2a0"}, + {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5G_BAND1, 1, "pa5gw0a1 pa5gw1a1 pa5gw2a1"}, + {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5G_BAND2, 0, "pa5ghw0a0 pa5ghw1a0 pa5ghw2a0"}, + {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5G_BAND2, 1, "pa5ghw0a1 pa5ghw1a1 pa5ghw2a1"}, + /* ACPHY */ + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 0, "pa2ga0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 1, "pa2ga1"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 2, "pa2ga2"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND, 0, "pa5ga0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND, 1, "pa5ga1"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND, 2, "pa5ga2"}, + /* LCN20PHY */ + {PHY_TYPE_LCN20, WL_CHAN_FREQ_RANGE_2G, 0, "pa2ga0"}, + {PHY_TYPE_NULL, 0, 0, ""} +}; + +static const pavars_t pavars_SROM12[] = { + /* ACPHY */ + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 0, "pa2ga0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 1, "pa2ga1"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 2, "pa2ga2"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G_40, 0, "pa2g40a0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G_40, 1, "pa2g40a1"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G_40, 2, "pa2g40a2"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND, 0, "pa5ga0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND, 1, "pa5ga1"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND, 2, "pa5ga2"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_40, 0, "pa5g40a0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_40, 1, "pa5g40a1"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_40, 2, "pa5g40a2"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_80, 0, "pa5g80a0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_80, 1, "pa5g80a1"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_80, 2, "pa5g80a2"}, + {PHY_TYPE_NULL, 0, 0, ""} +}; + +static const pavars_t pavars_SROM13[] = { + /* ACPHY */ + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 0, "pa2ga0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 1, "pa2ga1"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 2, "pa2ga2"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 3, "pa2ga3"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G_40, 0, "pa2g40a0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G_40, 1, "pa2g40a1"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G_40, 2, "pa2g40a2"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G_40, 3, "pa2g40a3"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND, 0, "pa5ga0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND, 1, "pa5ga1"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND, 2, "pa5ga2"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND, 3, "pa5ga3"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_40, 0, "pa5g40a0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_40, 1, "pa5g40a1"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_40, 2, "pa5g40a2"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_40, 3, "pa5g40a3"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_80, 0, "pa5g80a0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_80, 1, "pa5g80a1"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_80, 2, "pa5g80a2"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_80, 3, "pa5g80a3"}, + {PHY_TYPE_NULL, 0, 0, ""} +}; + +/* pavars table when paparambwver is 1 */ +static const pavars_t pavars_bwver_1[] = { + /* ACPHY */ + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 0, "pa2ga0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 1, "pa2gccka0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 1, "pa2ga2"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND, 0, "pa5ga0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND, 1, "pa5gbw40a0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND, 2, "pa5gbw80a0"}, + {PHY_TYPE_NULL, 0, 0, ""} +}; + +/* pavars table when paparambwver is 2 */ +static const pavars_t pavars_bwver_2[] = { + /* ACPHY */ + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 0, "pa2ga0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 1, "pa2ga1"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND, 0, "pa5ga0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND, 1, "pa5ga1"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND, 2, "pa5gbw4080a0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND, 3, "pa5gbw4080a1"}, + {PHY_TYPE_NULL, 0, 0, ""} +}; + +/* pavars table when paparambwver is 3 */ +static const pavars_t pavars_bwver_3[] = { + /* ACPHY */ + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 0, "pa2ga0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 1, "pa2ga1"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 2, "pa2gccka0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 3, "pa2gccka1"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND, 0, "pa5ga0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND, 1, "pa5ga1"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND, 2, "pa5gbw4080a0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND, 3, "pa5gbw4080a1"}, + {PHY_TYPE_NULL, 0, 0, ""} +}; + +typedef struct { + uint16 phy_type; + uint16 bandrange; + const char *vars; +} povars_t; + +static const povars_t povars[] = { + /* NPHY */ + {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_2G, "mcs2gpo0 mcs2gpo1 mcs2gpo2 mcs2gpo3 " + "mcs2gpo4 mcs2gpo5 mcs2gpo6 mcs2gpo7"}, + {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5GL, "mcs5glpo0 mcs5glpo1 mcs5glpo2 mcs5glpo3 " + "mcs5glpo4 mcs5glpo5 mcs5glpo6 mcs5glpo7"}, + {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5GM, "mcs5gpo0 mcs5gpo1 mcs5gpo2 mcs5gpo3 " + "mcs5gpo4 mcs5gpo5 mcs5gpo6 mcs5gpo7"}, + {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5GH, "mcs5ghpo0 mcs5ghpo1 mcs5ghpo2 mcs5ghpo3 " + "mcs5ghpo4 mcs5ghpo5 mcs5ghpo6 mcs5ghpo7"}, + {PHY_TYPE_NULL, 0, ""} +}; + +typedef struct { + uint8 tag; /* Broadcom subtag name */ + uint32 revmask; /* Supported cis_sromrev bitmask. Some of the parameters in + * different tuples have the same name. Therefore, the MFGc tool + * needs to know which tuple to generate when seeing these + * parameters (given that we know sromrev from user input, like the + * nvram file). + */ + uint8 len; /* Length field of the tuple, note that it includes the + * subtag name (1 byte): 1 + tuple content length + */ + const char *params; +} cis_tuple_t; + +#define OTP_RAW (0xff - 1) /* Reserved tuple number for wrvar Raw input */ +#define OTP_VERS_1 (0xff - 2) /* CISTPL_VERS_1 */ +#define OTP_MANFID (0xff - 3) /* CISTPL_MANFID */ +#define OTP_RAW1 (0xff - 4) /* Like RAW, but comes first */ + +/** this array is used by CIS creating/writing applications */ +static const cis_tuple_t cis_hnbuvars[] = { +/* tag revmask len params */ + {OTP_RAW1, 0xffffffff, 0, ""}, /* special case */ + {OTP_VERS_1, 0xffffffff, 0, "smanf sproductname"}, /* special case (non BRCM tuple) */ + {OTP_MANFID, 0xffffffff, 4, "2manfid 2prodid"}, /* special case (non BRCM tuple) */ + /* Unified OTP: tupple to embed USB manfid inside SDIO CIS */ + {HNBU_UMANFID, 0xffffffff, 8, "8usbmanfid"}, + {HNBU_SROMREV, 0xffffffff, 2, "1sromrev"}, + /* NOTE: subdevid is also written to boardtype. + * Need to write HNBU_BOARDTYPE to change it if it is different. + */ + {HNBU_CHIPID, 0xffffffff, 11, "2vendid 2devid 2chiprev 2subvendid 2subdevid"}, + {HNBU_BOARDREV, 0xffffffff, 3, "2boardrev"}, + {HNBU_PAPARMS, 0xffffffff, 10, "2pa0b0 2pa0b1 2pa0b2 1pa0itssit 1pa0maxpwr 1opo"}, + {HNBU_AA, 0xffffffff, 3, "1aa2g 1aa5g"}, + {HNBU_AA, 0xffffffff, 3, "1aa0 1aa1"}, /* backward compatibility */ + {HNBU_AG, 0xffffffff, 5, "1ag0 1ag1 1ag2 1ag3"}, + {HNBU_BOARDFLAGS, 0xffffffff, 21, "4boardflags 4boardflags2 4boardflags3 " + "4boardflags4 4boardflags5 "}, + {HNBU_LEDS, 0xffffffff, 17, "1ledbh0 1ledbh1 1ledbh2 1ledbh3 1ledbh4 1ledbh5 " + "1ledbh6 1ledbh7 1ledbh8 1ledbh9 1ledbh10 1ledbh11 1ledbh12 1ledbh13 1ledbh14 1ledbh15"}, + {HNBU_CCODE, 0xffffffff, 4, "2ccode 1cctl"}, + {HNBU_CCKPO, 0xffffffff, 3, "2cckpo"}, + {HNBU_OFDMPO, 0xffffffff, 5, "4ofdmpo"}, + {HNBU_PAPARMS5G, 0xffffffff, 23, "2pa1b0 2pa1b1 2pa1b2 2pa1lob0 2pa1lob1 2pa1lob2 " + "2pa1hib0 2pa1hib1 2pa1hib2 1pa1itssit " + "1pa1maxpwr 1pa1lomaxpwr 1pa1himaxpwr"}, + {HNBU_RDLID, 0xffffffff, 3, "2rdlid"}, + {HNBU_RSSISMBXA2G, 0xffffffff, 3, "0rssismf2g 0rssismc2g " + "0rssisav2g 0bxa2g"}, /* special case */ + {HNBU_RSSISMBXA5G, 0xffffffff, 3, "0rssismf5g 0rssismc5g " + "0rssisav5g 0bxa5g"}, /* special case */ + {HNBU_XTALFREQ, 0xffffffff, 5, "4xtalfreq"}, + {HNBU_TRI2G, 0xffffffff, 2, "1tri2g"}, + {HNBU_TRI5G, 0xffffffff, 4, "1tri5gl 1tri5g 1tri5gh"}, + {HNBU_RXPO2G, 0xffffffff, 2, "1rxpo2g"}, + {HNBU_RXPO5G, 0xffffffff, 2, "1rxpo5g"}, + {HNBU_BOARDNUM, 0xffffffff, 3, "2boardnum"}, + {HNBU_MACADDR, 0xffffffff, 7, "6macaddr"}, /* special case */ + {HNBU_RDLSN, 0xffffffff, 3, "2rdlsn"}, + {HNBU_BOARDTYPE, 0xffffffff, 3, "2boardtype"}, + {HNBU_LEDDC, 0xffffffff, 3, "2leddc"}, + {HNBU_RDLRNDIS, 0xffffffff, 2, "1rdlndis"}, + {HNBU_CHAINSWITCH, 0xffffffff, 5, "1txchain 1rxchain 2antswitch"}, + {HNBU_REGREV, 0xffffffff, 3, "2regrev"}, + {HNBU_FEM, 0x000007fe, 5, "0antswctl2g 0triso2g 0pdetrange2g 0extpagain2g " + "0tssipos2g 0antswctl5g 0triso5g 0pdetrange5g 0extpagain5g 0tssipos5g"}, /* special case */ + {HNBU_PAPARMS_C0, 0x000007fe, 31, "1maxp2ga0 1itt2ga0 2pa2gw0a0 2pa2gw1a0 " + "2pa2gw2a0 1maxp5ga0 1itt5ga0 1maxp5gha0 1maxp5gla0 2pa5gw0a0 2pa5gw1a0 2pa5gw2a0 " + "2pa5glw0a0 2pa5glw1a0 2pa5glw2a0 2pa5ghw0a0 2pa5ghw1a0 2pa5ghw2a0"}, + {HNBU_PAPARMS_C1, 0x000007fe, 31, "1maxp2ga1 1itt2ga1 2pa2gw0a1 2pa2gw1a1 " + "2pa2gw2a1 1maxp5ga1 1itt5ga1 1maxp5gha1 1maxp5gla1 2pa5gw0a1 2pa5gw1a1 2pa5gw2a1 " + "2pa5glw0a1 2pa5glw1a1 2pa5glw2a1 2pa5ghw0a1 2pa5ghw1a1 2pa5ghw2a1"}, + {HNBU_PO_CCKOFDM, 0xffffffff, 19, "2cck2gpo 4ofdm2gpo 4ofdm5gpo 4ofdm5glpo " + "4ofdm5ghpo"}, + {HNBU_PO_MCS2G, 0xffffffff, 17, "2mcs2gpo0 2mcs2gpo1 2mcs2gpo2 2mcs2gpo3 " + "2mcs2gpo4 2mcs2gpo5 2mcs2gpo6 2mcs2gpo7"}, + {HNBU_PO_MCS5GM, 0xffffffff, 17, "2mcs5gpo0 2mcs5gpo1 2mcs5gpo2 2mcs5gpo3 " + "2mcs5gpo4 2mcs5gpo5 2mcs5gpo6 2mcs5gpo7"}, + {HNBU_PO_MCS5GLH, 0xffffffff, 33, "2mcs5glpo0 2mcs5glpo1 2mcs5glpo2 2mcs5glpo3 " + "2mcs5glpo4 2mcs5glpo5 2mcs5glpo6 2mcs5glpo7 " + "2mcs5ghpo0 2mcs5ghpo1 2mcs5ghpo2 2mcs5ghpo3 " + "2mcs5ghpo4 2mcs5ghpo5 2mcs5ghpo6 2mcs5ghpo7"}, + {HNBU_CCKFILTTYPE, 0xffffffff, 2, "1cckdigfilttype"}, + {HNBU_PO_CDD, 0xffffffff, 3, "2cddpo"}, + {HNBU_PO_STBC, 0xffffffff, 3, "2stbcpo"}, + {HNBU_PO_40M, 0xffffffff, 3, "2bw40po"}, + {HNBU_PO_40MDUP, 0xffffffff, 3, "2bwduppo"}, + {HNBU_RDLRWU, 0xffffffff, 2, "1rdlrwu"}, + {HNBU_WPS, 0xffffffff, 3, "1wpsgpio 1wpsled"}, + {HNBU_USBFS, 0xffffffff, 2, "1usbfs"}, + {HNBU_ELNA2G, 0xffffffff, 2, "1elna2g"}, + {HNBU_ELNA5G, 0xffffffff, 2, "1elna5g"}, + {HNBU_CUSTOM1, 0xffffffff, 5, "4customvar1"}, + {OTP_RAW, 0xffffffff, 0, ""}, /* special case */ + {HNBU_OFDMPO5G, 0xffffffff, 13, "4ofdm5gpo 4ofdm5glpo 4ofdm5ghpo"}, + {HNBU_USBEPNUM, 0xffffffff, 3, "2usbepnum"}, + {HNBU_CCKBW202GPO, 0xffffffff, 7, "2cckbw202gpo 2cckbw20ul2gpo 2cckbw20in802gpo"}, + {HNBU_LEGOFDMBW202GPO, 0xffffffff, 9, "4legofdmbw202gpo 4legofdmbw20ul2gpo"}, + {HNBU_LEGOFDMBW205GPO, 0xffffffff, 25, "4legofdmbw205glpo 4legofdmbw20ul5glpo " + "4legofdmbw205gmpo 4legofdmbw20ul5gmpo 4legofdmbw205ghpo 4legofdmbw20ul5ghpo"}, + {HNBU_MCS2GPO, 0xffffffff, 17, "4mcsbw202gpo 4mcsbw20ul2gpo 4mcsbw402gpo 4mcsbw802gpo"}, + {HNBU_MCS5GLPO, 0xffffffff, 13, "4mcsbw205glpo 4mcsbw20ul5glpo 4mcsbw405glpo"}, + {HNBU_MCS5GMPO, 0xffffffff, 13, "4mcsbw205gmpo 4mcsbw20ul5gmpo 4mcsbw405gmpo"}, + {HNBU_MCS5GHPO, 0xffffffff, 13, "4mcsbw205ghpo 4mcsbw20ul5ghpo 4mcsbw405ghpo"}, + {HNBU_MCS32PO, 0xffffffff, 3, "2mcs32po"}, + {HNBU_LEG40DUPPO, 0xffffffff, 3, "2legofdm40duppo"}, + {HNBU_TEMPTHRESH, 0xffffffff, 7, "1tempthresh 0temps_period 0temps_hysteresis " + "1tempoffset 1tempsense_slope 0tempcorrx 0tempsense_option " + "1phycal_tempdelta"}, /* special case */ + {HNBU_MUXENAB, 0xffffffff, 2, "1muxenab"}, + {HNBU_FEM_CFG, 0xfffff800, 5, "0femctrl 0papdcap2g 0tworangetssi2g 0pdgain2g " + "0epagain2g 0tssiposslope2g 0gainctrlsph 0papdcap5g 0tworangetssi5g 0pdgain5g 0epagain5g " + "0tssiposslope5g"}, /* special case */ + {HNBU_ACPA_C0, 0x00001800, 39, "2subband5gver 2maxp2ga0 2*3pa2ga0 " + "1*4maxp5ga0 2*12pa5ga0"}, + {HNBU_ACPA_C1, 0x00001800, 37, "2maxp2ga1 2*3pa2ga1 1*4maxp5ga1 2*12pa5ga1"}, + {HNBU_ACPA_C2, 0x00001800, 37, "2maxp2ga2 2*3pa2ga2 1*4maxp5ga2 2*12pa5ga2"}, + {HNBU_MEAS_PWR, 0xfffff800, 5, "1measpower 1measpower1 1measpower2 2rawtempsense"}, + {HNBU_PDOFF, 0xfffff800, 13, "2pdoffset40ma0 2pdoffset40ma1 2pdoffset40ma2 " + "2pdoffset80ma0 2pdoffset80ma1 2pdoffset80ma2"}, + {HNBU_ACPPR_2GPO, 0xfffff800, 13, "2dot11agofdmhrbw202gpo 2ofdmlrbw202gpo " + "2sb20in40dot11agofdm2gpo 2sb20in80dot11agofdm2gpo 2sb20in40ofdmlrbw202gpo " + "2sb20in80ofdmlrbw202gpo"}, + {HNBU_ACPPR_5GPO, 0xfffff800, 59, "4mcsbw805glpo 4mcsbw1605glpo 4mcsbw805gmpo " + "4mcsbw1605gmpo 4mcsbw805ghpo 4mcsbw1605ghpo 2mcslr5glpo 2mcslr5gmpo 2mcslr5ghpo " + "4mcsbw80p805glpo 4mcsbw80p805gmpo 4mcsbw80p805ghpo 4mcsbw80p805gx1po 2mcslr5gx1po " + "2mcslr5g80p80po 4mcsbw805gx1po 4mcsbw1605gx1po"}, + {HNBU_MCS5Gx1PO, 0xfffff800, 9, "4mcsbw205gx1po 4mcsbw405gx1po"}, + {HNBU_ACPPR_SBPO, 0xfffff800, 49, "2sb20in40hrpo 2sb20in80and160hr5glpo " + "2sb40and80hr5glpo 2sb20in80and160hr5gmpo 2sb40and80hr5gmpo 2sb20in80and160hr5ghpo " + "2sb40and80hr5ghpo 2sb20in40lrpo 2sb20in80and160lr5glpo 2sb40and80lr5glpo " + "2sb20in80and160lr5gmpo 2sb40and80lr5gmpo 2sb20in80and160lr5ghpo 2sb40and80lr5ghpo " + "4dot11agduphrpo 4dot11agduplrpo 2sb20in40and80hrpo 2sb20in40and80lrpo " + "2sb20in80and160hr5gx1po 2sb20in80and160lr5gx1po 2sb40and80hr5gx1po 2sb40and80lr5gx1po " + }, + {HNBU_ACPPR_SB8080_PO, 0xfffff800, 23, "2sb2040and80in80p80hr5glpo " + "2sb2040and80in80p80lr5glpo 2sb2040and80in80p80hr5gmpo " + "2sb2040and80in80p80lr5gmpo 2sb2040and80in80p80hr5ghpo 2sb2040and80in80p80lr5ghpo " + "2sb2040and80in80p80hr5gx1po 2sb2040and80in80p80lr5gx1po 2sb20in80p80hr5gpo " + "2sb20in80p80lr5gpo 2dot11agduppo"}, + {HNBU_NOISELVL, 0xfffff800, 16, "1noiselvl2ga0 1noiselvl2ga1 1noiselvl2ga2 " + "1*4noiselvl5ga0 1*4noiselvl5ga1 1*4noiselvl5ga2"}, + {HNBU_RXGAIN_ERR, 0xfffff800, 16, "1rxgainerr2ga0 1rxgainerr2ga1 1rxgainerr2ga2 " + "1*4rxgainerr5ga0 1*4rxgainerr5ga1 1*4rxgainerr5ga2"}, + {HNBU_AGBGA, 0xfffff800, 7, "1agbg0 1agbg1 1agbg2 1aga0 1aga1 1aga2"}, + {HNBU_USBDESC_COMPOSITE, 0xffffffff, 3, "2usbdesc_composite"}, + {HNBU_UUID, 0xffffffff, 17, "16uuid"}, + {HNBU_WOWLGPIO, 0xffffffff, 2, "1wowl_gpio"}, + {HNBU_ACRXGAINS_C0, 0xfffff800, 5, "0rxgains5gtrelnabypa0 0rxgains5gtrisoa0 " + "0rxgains5gelnagaina0 0rxgains2gtrelnabypa0 0rxgains2gtrisoa0 0rxgains2gelnagaina0 " + "0rxgains5ghtrelnabypa0 0rxgains5ghtrisoa0 0rxgains5ghelnagaina0 0rxgains5gmtrelnabypa0 " + "0rxgains5gmtrisoa0 0rxgains5gmelnagaina0"}, /* special case */ + {HNBU_ACRXGAINS_C1, 0xfffff800, 5, "0rxgains5gtrelnabypa1 0rxgains5gtrisoa1 " + "0rxgains5gelnagaina1 0rxgains2gtrelnabypa1 0rxgains2gtrisoa1 0rxgains2gelnagaina1 " + "0rxgains5ghtrelnabypa1 0rxgains5ghtrisoa1 0rxgains5ghelnagaina1 0rxgains5gmtrelnabypa1 " + "0rxgains5gmtrisoa1 0rxgains5gmelnagaina1"}, /* special case */ + {HNBU_ACRXGAINS_C2, 0xfffff800, 5, "0rxgains5gtrelnabypa2 0rxgains5gtrisoa2 " + "0rxgains5gelnagaina2 0rxgains2gtrelnabypa2 0rxgains2gtrisoa2 0rxgains2gelnagaina2 " + "0rxgains5ghtrelnabypa2 0rxgains5ghtrisoa2 0rxgains5ghelnagaina2 0rxgains5gmtrelnabypa2 " + "0rxgains5gmtrisoa2 0rxgains5gmelnagaina2"}, /* special case */ + {HNBU_TXDUTY, 0xfffff800, 9, "2tx_duty_cycle_ofdm_40_5g " + "2tx_duty_cycle_thresh_40_5g 2tx_duty_cycle_ofdm_80_5g 2tx_duty_cycle_thresh_80_5g"}, + {HNBU_PDOFF_2G, 0xfffff800, 3, "0pdoffset2g40ma0 0pdoffset2g40ma1 " + "0pdoffset2g40ma2 0pdoffset2g40mvalid"}, + {HNBU_ACPA_CCK_C0, 0xfffff800, 7, "2*3pa2gccka0"}, + {HNBU_ACPA_CCK_C1, 0xfffff800, 7, "2*3pa2gccka1"}, + {HNBU_ACPA_40, 0xfffff800, 25, "2*12pa5gbw40a0"}, + {HNBU_ACPA_80, 0xfffff800, 25, "2*12pa5gbw80a0"}, + {HNBU_ACPA_4080, 0xfffff800, 49, "2*12pa5gbw4080a0 2*12pa5gbw4080a1"}, + {HNBU_ACPA_4X4C0, 0xffffe000, 23, "1maxp2ga0 2*4pa2ga0 2*4pa2g40a0 " + "1maxp5gb0a0 1maxp5gb1a0 1maxp5gb2a0 1maxp5gb3a0 1maxp5gb4a0"}, + {HNBU_ACPA_4X4C1, 0xffffe000, 23, "1maxp2ga1 2*4pa2ga1 2*4pa2g40a1 " + "1maxp5gb0a1 1maxp5gb1a1 1maxp5gb2a1 1maxp5gb3a1 1maxp5gb4a1"}, + {HNBU_ACPA_4X4C2, 0xffffe000, 23, "1maxp2ga2 2*4pa2ga2 2*4pa2g40a2 " + "1maxp5gb0a2 1maxp5gb1a2 1maxp5gb2a2 1maxp5gb3a2 1maxp5gb4a2"}, + {HNBU_ACPA_4X4C3, 0xffffe000, 23, "1maxp2ga3 2*4pa2ga3 2*4pa2g40a3 " + "1maxp5gb0a3 1maxp5gb1a3 1maxp5gb2a3 1maxp5gb3a3 1maxp5gb4a3"}, + {HNBU_ACPA_BW20_4X4C0, 0xffffe000, 41, "2*20pa5ga0"}, + {HNBU_ACPA_BW40_4X4C0, 0xffffe000, 41, "2*20pa5g40a0"}, + {HNBU_ACPA_BW80_4X4C0, 0xffffe000, 41, "2*20pa5g80a0"}, + {HNBU_ACPA_BW20_4X4C1, 0xffffe000, 41, "2*20pa5ga1"}, + {HNBU_ACPA_BW40_4X4C1, 0xffffe000, 41, "2*20pa5g40a1"}, + {HNBU_ACPA_BW80_4X4C1, 0xffffe000, 41, "2*20pa5g80a1"}, + {HNBU_ACPA_BW20_4X4C2, 0xffffe000, 41, "2*20pa5ga2"}, + {HNBU_ACPA_BW40_4X4C2, 0xffffe000, 41, "2*20pa5g40a2"}, + {HNBU_ACPA_BW80_4X4C2, 0xffffe000, 41, "2*20pa5g80a2"}, + {HNBU_ACPA_BW20_4X4C3, 0xffffe000, 41, "2*20pa5ga3"}, + {HNBU_ACPA_BW40_4X4C3, 0xffffe000, 41, "2*20pa5g40a3"}, + {HNBU_ACPA_BW80_4X4C3, 0xffffe000, 41, "2*20pa5g80a3"}, + {HNBU_SUBBAND5GVER, 0xfffff800, 3, "2subband5gver"}, + {HNBU_PAPARAMBWVER, 0xfffff800, 2, "1paparambwver"}, + {HNBU_TXBFRPCALS, 0xfffff800, 11, + "2rpcal2g 2rpcal5gb0 2rpcal5gb1 2rpcal5gb2 2rpcal5gb3"}, /* txbf rpcalvars */ + {HNBU_GPIO_PULL_DOWN, 0xffffffff, 5, "4gpdn"}, + {HNBU_MACADDR2, 0xffffffff, 7, "6macaddr2"}, /* special case */ + {0xFF, 0xffffffff, 0, ""} +}; + +#endif /* _bcmsrom_tbl_h_ */ diff --git a/bcmdhd.100.10.315.x/include/bcmstdlib_s.h b/bcmdhd.100.10.315.x/include/bcmstdlib_s.h new file mode 100644 index 0000000..4c38259 --- /dev/null +++ b/bcmdhd.100.10.315.x/include/bcmstdlib_s.h @@ -0,0 +1,44 @@ +/* + * Broadcom Secure Standard Library. + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * * $Id $ + */ + +#ifndef _bcmstdlib_s_h_ +#define _bcmstdlib_s_h_ + +#if !defined(__STDC_WANT_SECURE_LIB__) && !(defined(__STDC_LIB_EXT1__) && \ + defined(__STDC_WANT_LIB_EXT1__)) +extern int memmove_s(void *dest, size_t destsz, const void *src, size_t n); +extern int memcpy_s(void *dest, size_t destsz, const void *src, size_t n); +extern int memset_s(void *dest, size_t destsz, int c, size_t n); +#endif /* !__STDC_WANT_SECURE_LIB__ && !(__STDC_LIB_EXT1__ && __STDC_WANT_LIB_EXT1__) */ +#if !defined(FREEBSD) && !defined(BCM_USE_PLATFORM_STRLCPY) +extern size_t strlcpy(char *dest, const char *src, size_t size); +#endif // endif +extern size_t strlcat_s(char *dest, const char *src, size_t size); + +#endif /* _bcmstdlib_s_h_ */ diff --git a/bcmdhd.100.10.315.x/include/bcmtcp.h b/bcmdhd.100.10.315.x/include/bcmtcp.h new file mode 100644 index 0000000..dd8b9c9 --- /dev/null +++ b/bcmdhd.100.10.315.x/include/bcmtcp.h @@ -0,0 +1,92 @@ +/* + * Fundamental constants relating to TCP Protocol + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmtcp.h 700076 2017-05-17 14:42:22Z $ + */ + +#ifndef _bcmtcp_h_ +#define _bcmtcp_h_ + +#ifndef _TYPEDEFS_H_ +#include +#endif // endif + +/* This marks the start of a packed structure section. */ +#include + +#define TCP_SRC_PORT_OFFSET 0 /* TCP source port offset */ +#define TCP_DEST_PORT_OFFSET 2 /* TCP dest port offset */ +#define TCP_SEQ_NUM_OFFSET 4 /* TCP sequence number offset */ +#define TCP_ACK_NUM_OFFSET 8 /* TCP acknowledgement number offset */ +#define TCP_HLEN_OFFSET 12 /* HLEN and reserved bits offset */ +#define TCP_FLAGS_OFFSET 13 /* FLAGS and reserved bits offset */ +#define TCP_CHKSUM_OFFSET 16 /* TCP body checksum offset */ + +#define TCP_PORT_LEN 2 /* TCP port field length */ + +/* 8bit TCP flag field */ +#define TCP_FLAG_URG 0x20 +#define TCP_FLAG_ACK 0x10 +#define TCP_FLAG_PSH 0x08 +#define TCP_FLAG_RST 0x04 +#define TCP_FLAG_SYN 0x02 +#define TCP_FLAG_FIN 0x01 + +#define TCP_HLEN_MASK 0xf000 +#define TCP_HLEN_SHIFT 12 + +/* These fields are stored in network order */ +BWL_PRE_PACKED_STRUCT struct bcmtcp_hdr +{ + uint16 src_port; /* Source Port Address */ + uint16 dst_port; /* Destination Port Address */ + uint32 seq_num; /* TCP Sequence Number */ + uint32 ack_num; /* TCP Sequence Number */ + uint16 hdrlen_rsvd_flags; /* Header length, reserved bits and flags */ + uint16 tcpwin; /* TCP window */ + uint16 chksum; /* Segment checksum with pseudoheader */ + uint16 urg_ptr; /* Points to seq-num of byte following urg data */ +} BWL_POST_PACKED_STRUCT; + +#define TCP_MIN_HEADER_LEN 20 + +#define TCP_HDRLEN_MASK 0xf0 +#define TCP_HDRLEN_SHIFT 4 +#define TCP_HDRLEN(hdrlen) (((hdrlen) & TCP_HDRLEN_MASK) >> TCP_HDRLEN_SHIFT) + +#define TCP_FLAGS_MASK 0x1f +#define TCP_FLAGS(hdrlen) ((hdrlen) & TCP_FLAGS_MASK) + +/* This marks the end of a packed structure section. */ +#include + +/* To address round up by 32bit. */ +#define IS_TCPSEQ_GE(a, b) ((a - b) < NBITVAL(31)) /* a >= b */ +#define IS_TCPSEQ_LE(a, b) ((b - a) < NBITVAL(31)) /* a =< b */ +#define IS_TCPSEQ_GT(a, b) !IS_TCPSEQ_LE(a, b) /* a > b */ +#define IS_TCPSEQ_LT(a, b) !IS_TCPSEQ_GE(a, b) /* a < b */ + +#endif /* #ifndef _bcmtcp_h_ */ diff --git a/bcmdhd.100.10.315.x/include/bcmtlv.h b/bcmdhd.100.10.315.x/include/bcmtlv.h new file mode 100644 index 0000000..0218765 --- /dev/null +++ b/bcmdhd.100.10.315.x/include/bcmtlv.h @@ -0,0 +1,334 @@ +/* + * TLV and XTLV support + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * <> + * + * $Id: $ + */ + +#ifndef _bcmtlv_h_ +#define _bcmtlv_h_ + +#include + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/* begin tlvs - used in 802.11 IEs etc. */ + +/* type(aka id)/length/value buffer triple */ +typedef struct bcm_tlv { + uint8 id; + uint8 len; + uint8 data[1]; +} bcm_tlv_t; + +/* size of tlv including data */ +#define BCM_TLV_SIZE(_tlv) ((_tlv) ? (OFFSETOF(bcm_tlv_t, data) + (_tlv)->len) : 0) + +/* get next tlv - no length checks */ +#define BCM_TLV_NEXT(_tlv) (bcm_tlv_t *)((uint8 *)(_tlv)+ BCM_TLV_SIZE(_tlv)) + +/* tlv length is restricted to 1 byte */ +#define BCM_TLV_MAX_DATA_SIZE (255) + +/* tlv header - two bytes */ +#define BCM_TLV_HDR_SIZE (OFFSETOF(bcm_tlv_t, data)) + +/* Check that bcm_tlv_t fits into the given buffer len */ +#define bcm_valid_tlv(elt, buflen) (\ + ((int)(buflen) >= (int)BCM_TLV_HDR_SIZE) && \ + ((int)(buflen) >= (int)(BCM_TLV_HDR_SIZE + (elt)->len))) + +/* type(aka id)/length/ext/value buffer */ +typedef struct bcm_tlv_ext { + uint8 id; + uint8 len; + uint8 ext; + uint8 data[1]; +} bcm_tlv_ext_t; + +/* get next tlv_ext - no length checks */ +#define BCM_TLV_EXT_NEXT(_tlv_ext) \ + (bcm_tlv_ext_t *)((uint8 *)(_tlv_ext)+ BCM_TLV_EXT_SIZE(_tlv_ext)) + +/* tlv_ext length is restricted to 1 byte */ +#define BCM_TLV_EXT_MAX_DATA_SIZE (254) + +/* tlv_ext header - three bytes */ +#define BCM_TLV_EXT_HDR_SIZE (OFFSETOF(bcm_tlv_ext_t, data)) + +/* size of tlv_ext including data */ +#define BCM_TLV_EXT_SIZE(_tlv_ext) (BCM_TLV_EXT_HDR_SIZE + (_tlv_ext)->len) + +/* find the next tlv */ +bcm_tlv_t *bcm_next_tlv(const bcm_tlv_t *elt, uint *buflen); + +/* find the tlv for a given id */ +bcm_tlv_t *bcm_parse_tlvs(const void *buf, uint buflen, uint key); + +/* + * Traverse tlvs and return pointer to the first tlv that + * matches the key. Return NULL if not found or tlv len < min_bodylen + */ +bcm_tlv_t *bcm_parse_tlvs_min_bodylen(const void *buf, int buflen, uint key, int min_bodylen); + +/* parse tlvs for dot11 - same as parse_tlvs but supports 802.11 id extension */ +bcm_tlv_t *bcm_parse_tlvs_dot11(const void *buf, int buflen, uint key, bool id_ext); + +/* same as parse_tlvs, but stops when found id > key */ +const bcm_tlv_t *bcm_parse_ordered_tlvs(const void *buf, int buflen, uint key); + +/* find a tlv with DOT11_MNG_PROPR_ID as id, and the given oui and type */ + bcm_tlv_t *bcm_find_vendor_ie(const void *tlvs, uint tlvs_len, const char *voui, + uint8 *type, uint type_len); + +/* write tlv at dst and return next tlv ptr */ +uint8 *bcm_write_tlv(int type, const void *data, int datalen, uint8 *dst); + +/* write tlv_ext at dst and return next tlv ptr */ +uint8 *bcm_write_tlv_ext(uint8 type, uint8 ext, const void *data, uint8 datalen, uint8 *dst); + +/* write tlv at dst if space permits and return next tlv ptr */ +uint8 *bcm_write_tlv_safe(int type, const void *data, int datalen, uint8 *dst, + int dst_maxlen); + +/* copy a tlv and return next tlv ptr */ +uint8 *bcm_copy_tlv(const void *src, uint8 *dst); + +/* copy a tlv if space permits and return next tlv ptr */ +uint8 *bcm_copy_tlv_safe(const void *src, uint8 *dst, int dst_maxlen); + +/* end tlvs */ + +/* begin xtlv - used for iovars, nan attributes etc. */ + +/* bcm type(id), length, value with w/16 bit id/len. The structure below + * is nominal, and is used to support variable length id and type. See + * xtlv options below. + */ +typedef struct bcm_xtlv { + uint16 id; + uint16 len; + uint8 data[1]; +} bcm_xtlv_t; + +/* xtlv options */ +#define BCM_XTLV_OPTION_NONE 0x0000 +#define BCM_XTLV_OPTION_ALIGN32 0x0001 /* 32bit alignment of type.len.data */ +#define BCM_XTLV_OPTION_IDU8 0x0002 /* shorter id */ +#define BCM_XTLV_OPTION_LENU8 0x0004 /* shorted length */ +typedef uint16 bcm_xtlv_opts_t; + +/* header size. depends on options. Macros names ending w/ _EX are where + * options are explcitly specified that may be less common. The ones + * without use default values that correspond to ...OPTION_NONE + */ + +/* xtlv header size depends on options */ +#define BCM_XTLV_HDR_SIZE 4 +#define BCM_XTLV_HDR_SIZE_EX(_opts) bcm_xtlv_hdr_size(_opts) + +/* note: xtlv len only stores the value's length without padding */ +#define BCM_XTLV_LEN(_elt) ltoh16_ua(&(_elt)->len) +#define BCM_XTLV_LEN_EX(_elt, _opts) bcm_xtlv_len(_elt, _opts) + +#define BCM_XTLV_ID(_elt) ltoh16_ua(&(_elt)->id) +#define BCM_XTLV_ID_EX(_elt, _opts) bcm_xtlv_id(_elt, _opts) + +/* entire size of the XTLV including header, data, and optional padding */ +#define BCM_XTLV_SIZE(elt, opts) bcm_xtlv_size(elt, opts) +#define BCM_XTLV_SIZE_EX(_elt, _opts) bcm_xtlv_size(_elt, _opts) + +/* max xtlv data size */ +#define BCM_XTLV_MAX_DATA_SIZE 65535 +#define BCM_XTLV_MAX_DATA_SIZE_EX(_opts) ((_opts & BCM_XTLV_OPTION_LENU8) ? \ + 255 : 65535) + +/* descriptor of xtlv data, packing(src) and unpacking(dst) support */ +typedef struct { + uint16 type; + uint16 len; + void *ptr; /* ptr to memory location */ +} xtlv_desc_t; + +/* xtlv buffer - packing/unpacking support */ +struct bcm_xtlvbuf { + bcm_xtlv_opts_t opts; + uint16 size; + uint8 *head; /* point to head of buffer */ + uint8 *buf; /* current position of buffer */ + /* allocated buffer may follow, but not necessarily */ +}; +typedef struct bcm_xtlvbuf bcm_xtlvbuf_t; + +/* valid xtlv ? */ +bool bcm_valid_xtlv(const bcm_xtlv_t *elt, int buf_len, bcm_xtlv_opts_t opts); + +/* return the next xtlv element, and update buffer len (remaining). Buffer length + * updated includes padding as specified by options + */ +bcm_xtlv_t *bcm_next_xtlv(const bcm_xtlv_t *elt, int *buf_len, bcm_xtlv_opts_t opts); + +/* initialize an xtlv buffer. Use options specified for packing/unpacking using + * the buffer. Caller is responsible for allocating both buffers. + */ +int bcm_xtlv_buf_init(bcm_xtlvbuf_t *tlv_buf, uint8 *buf, uint16 len, + bcm_xtlv_opts_t opts); + +/* length of data in the xtlv buffer */ +uint16 bcm_xtlv_buf_len(struct bcm_xtlvbuf *tbuf); + +/* remaining space in the xtlv buffer */ +uint16 bcm_xtlv_buf_rlen(struct bcm_xtlvbuf *tbuf); + +/* write ptr */ +uint8 *bcm_xtlv_buf(struct bcm_xtlvbuf *tbuf); + +/* head */ +uint8 *bcm_xtlv_head(struct bcm_xtlvbuf *tbuf); + +/* put a data buffer into xtlv */ +int bcm_xtlv_put_data(bcm_xtlvbuf_t *tbuf, uint16 type, const uint8 *data, int n); + +/* put one or more u16 elts into xtlv */ +int bcm_xtlv_put16(bcm_xtlvbuf_t *tbuf, uint16 type, const uint16 *data, int n); + +/* put one or more u32 elts into xtlv */ +int bcm_xtlv_put32(bcm_xtlvbuf_t *tbuf, uint16 type, const uint32 *data, int n); + +/* put one or more u64 elts into xtlv */ +int bcm_xtlv_put64(bcm_xtlvbuf_t *tbuf, uint16 type, const uint64 *data, int n); + +/* note: there are no get equivalent of integer unpacking, becasuse bcmendian.h + * can be used directly using pointers returned in the buffer being processed. + */ + +/* unpack a single xtlv entry, advances buffer and copies data to dst_data on match + * type and length match must be exact + */ +int bcm_unpack_xtlv_entry(const uint8 **buf, uint16 expected_type, uint16 expected_len, + uint8 *dst_data, bcm_xtlv_opts_t opts); + +/* packs an xtlv into buffer, and advances buffer, decreements buffer length. + * buffer length is checked and must be >= size of xtlv - otherwise BCME_BADLEN + */ +int bcm_pack_xtlv_entry(uint8 **buf, uint16 *buflen, uint16 type, uint16 len, + const uint8 *src_data, bcm_xtlv_opts_t opts); + +/* accessors and lengths for element given options */ +int bcm_xtlv_size(const bcm_xtlv_t *elt, bcm_xtlv_opts_t opts); +int bcm_xtlv_hdr_size(bcm_xtlv_opts_t opts); +int bcm_xtlv_len(const bcm_xtlv_t *elt, bcm_xtlv_opts_t opts); +int bcm_xtlv_id(const bcm_xtlv_t *elt, bcm_xtlv_opts_t opts); +int bcm_xtlv_size_for_data(int dlen, bcm_xtlv_opts_t opts); + +/* compute size needed for number of tlvs whose total data len is given */ +#define BCM_XTLV_SIZE_FOR_TLVS(_data_len, _num_tlvs, _opts) (\ + bcm_xtlv_size_for_data(_data_len, _opts) + (\ + (_num_tlvs) * BCM_XTLV_HDR_SIZE_EX(_opts))) + +/* unsafe copy xtlv */ +#define BCM_XTLV_BCOPY(_src, _dst, _opts) \ + bcm_xtlv_bcopy(_src, _dst, BCM_XTLV_MAX_DATA_SIZE_EX(_opts), \ + BCM_XTLV_MAX_DATA_SIZE_EX(_opts), _opts) + +/* copy xtlv - note: src->dst bcopy order - to be compatible w/ tlv version */ +bcm_xtlv_t* bcm_xtlv_bcopy(const bcm_xtlv_t *src, bcm_xtlv_t *dst, + int src_buf_len, int dst_buf_len, bcm_xtlv_opts_t opts); + +/* callback for unpacking xtlv from a buffer into context. */ +typedef int (bcm_xtlv_unpack_cbfn_t)(void *ctx, const uint8 *buf, + uint16 type, uint16 len); + +/* unpack a tlv buffer using buffer, options, and callback */ +int bcm_unpack_xtlv_buf(void *ctx, const uint8 *buf, uint16 buflen, + bcm_xtlv_opts_t opts, bcm_xtlv_unpack_cbfn_t *cbfn); + +/* unpack a set of tlvs from the buffer using provided xtlv descriptors */ +int bcm_unpack_xtlv_buf_to_mem(uint8 *buf, int *buflen, xtlv_desc_t *items, + bcm_xtlv_opts_t opts); + +/* pack a set of tlvs into buffer using provided xtlv descriptors */ +int bcm_pack_xtlv_buf_from_mem(uint8 **buf, uint16 *buflen, + const xtlv_desc_t *items, bcm_xtlv_opts_t opts); + +/* return data pointer and data length of a given id from xtlv buffer + * data_len may be NULL + */ +const uint8* bcm_get_data_from_xtlv_buf(const uint8 *tlv_buf, uint16 buflen, + uint16 id, uint16 *datalen, bcm_xtlv_opts_t opts); + +/* callback to return next tlv id and len to pack, if there is more tlvs to come and + * options e.g. alignment + */ +typedef bool (*bcm_pack_xtlv_next_info_cbfn_t)(void *ctx, uint16 *tlv_id, uint16 *tlv_len); + +/* callback to pack the tlv into length validated buffer */ +typedef void (*bcm_pack_xtlv_pack_next_cbfn_t)(void *ctx, + uint16 tlv_id, uint16 tlv_len, uint8* buf); + +/* pack a set of tlvs into buffer using get_next to interate */ +int bcm_pack_xtlv_buf(void *ctx, uint8 *tlv_buf, uint16 buflen, + bcm_xtlv_opts_t opts, bcm_pack_xtlv_next_info_cbfn_t get_next, + bcm_pack_xtlv_pack_next_cbfn_t pack_next, int *outlen); + +/* pack an xtlv. does not do any error checking. if data is not NULL + * data of given length is copied to buffer (xtlv) + */ +void bcm_xtlv_pack_xtlv(bcm_xtlv_t *xtlv, uint16 type, uint16 len, + const uint8 *data, bcm_xtlv_opts_t opts); + +/* unpack an xtlv and return ptr to data, and data length */ +void bcm_xtlv_unpack_xtlv(const bcm_xtlv_t *xtlv, uint16 *type, uint16 *len, + const uint8 **data, bcm_xtlv_opts_t opts); + +/* end xtlvs */ + +/* length value pairs */ +struct bcm_xlv { + uint16 len; + uint8 data[1]; +}; +typedef struct bcm_xlv bcm_xlv_t; + +struct bcm_xlvp { + uint16 len; + uint8 *data; +}; +typedef struct bcm_xlvp bcm_xlvp_t; + +struct bcm_const_xlvp { + uint16 len; + const uint8 *data; +}; +typedef struct bcm_const_xlvp bcm_const_xlvp_t; + +/* end length value pairs */ + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* _bcmtlv_h_ */ diff --git a/bcmdhd.100.10.315.x/include/bcmudp.h b/bcmdhd.100.10.315.x/include/bcmudp.h new file mode 100644 index 0000000..dc0d488 --- /dev/null +++ b/bcmdhd.100.10.315.x/include/bcmudp.h @@ -0,0 +1,60 @@ +/* + * Fundamental constants relating to UDP Protocol + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmudp.h 700076 2017-05-17 14:42:22Z $ + */ + +#ifndef _bcmudp_h_ +#define _bcmudp_h_ + +#ifndef _TYPEDEFS_H_ +#include +#endif // endif + +/* This marks the start of a packed structure section. */ +#include + +/* UDP header */ +#define UDP_DEST_PORT_OFFSET 2 /* UDP dest port offset */ +#define UDP_LEN_OFFSET 4 /* UDP length offset */ +#define UDP_CHKSUM_OFFSET 6 /* UDP body checksum offset */ + +#define UDP_HDR_LEN 8 /* UDP header length */ +#define UDP_PORT_LEN 2 /* UDP port length */ + +/* These fields are stored in network order */ +BWL_PRE_PACKED_STRUCT struct bcmudp_hdr +{ + uint16 src_port; /* Source Port Address */ + uint16 dst_port; /* Destination Port Address */ + uint16 len; /* Number of bytes in datagram including header */ + uint16 chksum; /* entire datagram checksum with pseudoheader */ +} BWL_POST_PACKED_STRUCT; + +/* This marks the end of a packed structure section. */ +#include + +#endif /* #ifndef _bcmudp_h_ */ diff --git a/bcmdhd.100.10.315.x/include/bcmutils.h b/bcmdhd.100.10.315.x/include/bcmutils.h new file mode 100644 index 0000000..e750f25 --- /dev/null +++ b/bcmdhd.100.10.315.x/include/bcmutils.h @@ -0,0 +1,1329 @@ +/* + * Misc useful os-independent macros and functions. + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmutils.h 769659 2018-06-27 05:22:10Z $ + */ + +#ifndef _bcmutils_h_ +#define _bcmutils_h_ + +#include + +#ifdef __cplusplus +extern "C" { +#endif // endif + +#define bcm_strncpy_s(dst, noOfElements, src, count) strncpy((dst), (src), (count)) +#ifdef FREEBSD +#define bcm_strncat_s(dst, noOfElements, src, count) strcat((dst), (src)) +#else +#define bcm_strncat_s(dst, noOfElements, src, count) strncat((dst), (src), (count)) +#endif /* FREEBSD */ +#define bcm_snprintf_s snprintf +#define bcm_sprintf_s snprintf + +/* + * #define bcm_strcpy_s(dst, count, src) strncpy((dst), (src), (count)) + * Use bcm_strcpy_s instead as it is a safer option + * bcm_strcat_s: Use bcm_strncat_s as a safer option + * + */ + +#define BCM_BIT(x) (1 << (x)) + +/* ctype replacement */ +#define _BCM_U 0x01 /* upper */ +#define _BCM_L 0x02 /* lower */ +#define _BCM_D 0x04 /* digit */ +#define _BCM_C 0x08 /* cntrl */ +#define _BCM_P 0x10 /* punct */ +#define _BCM_S 0x20 /* white space (space/lf/tab) */ +#define _BCM_X 0x40 /* hex digit */ +#define _BCM_SP 0x80 /* hard space (0x20) */ + +extern const unsigned char bcm_ctype[]; +#define bcm_ismask(x) (bcm_ctype[(int)(unsigned char)(x)]) + +#define bcm_isalnum(c) ((bcm_ismask(c)&(_BCM_U|_BCM_L|_BCM_D)) != 0) +#define bcm_isalpha(c) ((bcm_ismask(c)&(_BCM_U|_BCM_L)) != 0) +#define bcm_iscntrl(c) ((bcm_ismask(c)&(_BCM_C)) != 0) +#define bcm_isdigit(c) ((bcm_ismask(c)&(_BCM_D)) != 0) +#define bcm_isgraph(c) ((bcm_ismask(c)&(_BCM_P|_BCM_U|_BCM_L|_BCM_D)) != 0) +#define bcm_islower(c) ((bcm_ismask(c)&(_BCM_L)) != 0) +#define bcm_isprint(c) ((bcm_ismask(c)&(_BCM_P|_BCM_U|_BCM_L|_BCM_D|_BCM_SP)) != 0) +#define bcm_ispunct(c) ((bcm_ismask(c)&(_BCM_P)) != 0) +#define bcm_isspace(c) ((bcm_ismask(c)&(_BCM_S)) != 0) +#define bcm_isupper(c) ((bcm_ismask(c)&(_BCM_U)) != 0) +#define bcm_isxdigit(c) ((bcm_ismask(c)&(_BCM_D|_BCM_X)) != 0) +#define bcm_tolower(c) (bcm_isupper((c)) ? ((c) + 'a' - 'A') : (c)) +#define bcm_toupper(c) (bcm_islower((c)) ? ((c) + 'A' - 'a') : (c)) + +#define CIRCULAR_ARRAY_FULL(rd_idx, wr_idx, max) ((wr_idx + 1)%max == rd_idx) + +#define KB(bytes) (((bytes) + 1023) / 1024) + +/* Buffer structure for collecting string-formatted data +* using bcm_bprintf() API. +* Use bcm_binit() to initialize before use +*/ + +struct bcmstrbuf { + char *buf; /* pointer to current position in origbuf */ + unsigned int size; /* current (residual) size in bytes */ + char *origbuf; /* unmodified pointer to orignal buffer */ + unsigned int origsize; /* unmodified orignal buffer size in bytes */ +}; + +#define BCMSTRBUF_LEN(b) (b->size) +#define BCMSTRBUF_BUF(b) (b->buf) + +/* ** driver-only section ** */ +#ifdef BCMDRIVER +#include +#include +#include + +#define GPIO_PIN_NOTDEFINED 0x20 /* Pin not defined */ + +/* + * Spin at most 'us' microseconds while 'exp' is true. + * Caller should explicitly test 'exp' when this completes + * and take appropriate error action if 'exp' is still true. + */ +#ifndef SPINWAIT_POLL_PERIOD +#define SPINWAIT_POLL_PERIOD 10U +#endif // endif + +#define SPINWAIT(exp, us) { \ + uint countdown = (us) + (SPINWAIT_POLL_PERIOD - 1U); \ + while (((exp) != 0) && (uint)(countdown >= SPINWAIT_POLL_PERIOD)) { \ + OSL_DELAY(SPINWAIT_POLL_PERIOD); \ + countdown -= SPINWAIT_POLL_PERIOD; \ + } \ +} + +/* forward definition of ether_addr structure used by some function prototypes */ + +struct ether_addr; + +extern int ether_isbcast(const void *ea); +extern int ether_isnulladdr(const void *ea); + +#define UP_TABLE_MAX ((IPV4_TOS_DSCP_MASK >> IPV4_TOS_DSCP_SHIFT) + 1) /* 64 max */ +#define CORE_SLAVE_PORT_0 0 +#define CORE_SLAVE_PORT_1 1 +#define CORE_BASE_ADDR_0 0 +#define CORE_BASE_ADDR_1 1 + +/* externs */ +/* packet */ +extern uint pktcopy(osl_t *osh, void *p, uint offset, int len, uchar *buf); +extern uint pktfrombuf(osl_t *osh, void *p, uint offset, int len, uchar *buf); +extern uint pkttotlen(osl_t *osh, void *p); +extern void *pktlast(osl_t *osh, void *p); +extern uint pktsegcnt(osl_t *osh, void *p); +extern uint8 *pktdataoffset(osl_t *osh, void *p, uint offset); +extern void *pktoffset(osl_t *osh, void *p, uint offset); +/* Add to adjust 802.1x priority */ +extern void pktset8021xprio(void *pkt, int prio); + +/* Get priority from a packet and pass it back in scb (or equiv) */ +#define PKTPRIO_VDSCP 0x100 /* DSCP prio found after VLAN tag */ +#define PKTPRIO_VLAN 0x200 /* VLAN prio found */ +#define PKTPRIO_UPD 0x400 /* DSCP used to update VLAN prio */ +#define PKTPRIO_DSCP 0x800 /* DSCP prio found */ + +/* DSCP type definitions (RFC4594) */ +/* AF1x: High-Throughput Data (RFC2597) */ +#define DSCP_AF11 0x0A +#define DSCP_AF12 0x0C +#define DSCP_AF13 0x0E +/* AF2x: Low-Latency Data (RFC2597) */ +#define DSCP_AF21 0x12 +#define DSCP_AF22 0x14 +#define DSCP_AF23 0x16 +/* AF3x: Multimedia Streaming (RFC2597) */ +#define DSCP_AF31 0x1A +#define DSCP_AF32 0x1C +#define DSCP_AF33 0x1E +/* EF: Telephony (RFC3246) */ +#define DSCP_EF 0x2E + +extern uint pktsetprio(void *pkt, bool update_vtag); +extern uint pktsetprio_qms(void *pkt, uint8* up_table, bool update_vtag); +extern bool pktgetdscp(uint8 *pktdata, uint pktlen, uint8 *dscp); + +/* ethernet address */ +extern char *bcm_ether_ntoa(const struct ether_addr *ea, char *buf); +extern int bcm_ether_atoe(const char *p, struct ether_addr *ea); + +/* ip address */ +struct ipv4_addr; +extern char *bcm_ip_ntoa(struct ipv4_addr *ia, char *buf); +extern char *bcm_ipv6_ntoa(void *ipv6, char *buf); +extern int bcm_atoipv4(const char *p, struct ipv4_addr *ip); + +/* delay */ +extern void bcm_mdelay(uint ms); +/* variable access */ +#if defined(BCM_RECLAIM) +extern bool _nvram_reclaim_enb; +#define NVRAM_RECLAIM_ENAB() (_nvram_reclaim_enb) +#define NVRAM_RECLAIM_CHECK(name) \ + if (NVRAM_RECLAIM_ENAB() && (bcm_attach_part_reclaimed == TRUE)) { \ + *(char*) 0 = 0; /* TRAP */ \ + return NULL; \ + } +#else /* BCM_RECLAIM */ +#define NVRAM_RECLAIM_CHECK(name) +#endif /* BCM_RECLAIM */ + +extern char *getvar(char *vars, const char *name); +extern int getintvar(char *vars, const char *name); +extern int getintvararray(char *vars, const char *name, int index); +extern int getintvararraysize(char *vars, const char *name); + +/* Read an array of values from a possibly slice-specific nvram string */ +extern int get_uint8_vararray_slicespecific(osl_t *osh, char *vars, char *vars_table_accessor, + const char* name, uint8* dest_array, uint dest_size); +extern int get_int16_vararray_slicespecific(osl_t *osh, char *vars, char *vars_table_accessor, + const char* name, int16* dest_array, uint dest_size); +/* Prepend a slice-specific accessor to an nvram string name */ +extern int get_slicespecific_var_name(osl_t *osh, char *vars_table_accessor, + const char *name, char **name_out); + +extern uint getgpiopin(char *vars, char *pin_name, uint def_pin); +#define bcm_perf_enable() +#define bcmstats(fmt) +#define bcmlog(fmt, a1, a2) +#define bcmdumplog(buf, size) *buf = '\0' +#define bcmdumplogent(buf, idx) -1 + +#define TSF_TICKS_PER_MS 1000 +#define TS_ENTER 0xdeadbeef /* Timestamp profiling enter */ +#define TS_EXIT 0xbeefcafe /* Timestamp profiling exit */ + +#define bcmtslog(tstamp, fmt, a1, a2) +#define bcmprinttslogs() +#define bcmprinttstamp(us) +#define bcmdumptslog(b) + +extern char *bcm_nvram_vars(uint *length); +extern int bcm_nvram_cache(void *sih); + +/* Support for sharing code across in-driver iovar implementations. + * The intent is that a driver use this structure to map iovar names + * to its (private) iovar identifiers, and the lookup function to + * find the entry. Macros are provided to map ids and get/set actions + * into a single number space for a switch statement. + */ + +/* iovar structure */ +typedef struct bcm_iovar { + const char *name; /* name for lookup and display */ + uint16 varid; /* id for switch */ + uint16 flags; /* driver-specific flag bits */ + uint8 flags2; /* driver-specific flag bits */ + uint8 type; /* base type of argument */ + uint16 minlen; /* min length for buffer vars */ +} bcm_iovar_t; + +/* varid definitions are per-driver, may use these get/set bits */ + +/* IOVar action bits for id mapping */ +#define IOV_GET 0 /* Get an iovar */ +#define IOV_SET 1 /* Set an iovar */ + +/* Varid to actionid mapping */ +#define IOV_GVAL(id) ((id) * 2) +#define IOV_SVAL(id) ((id) * 2 + IOV_SET) +#define IOV_ISSET(actionid) ((actionid & IOV_SET) == IOV_SET) +#define IOV_ID(actionid) (actionid >> 1) + +/* flags are per-driver based on driver attributes */ + +extern const bcm_iovar_t *bcm_iovar_lookup(const bcm_iovar_t *table, const char *name); +extern int bcm_iovar_lencheck(const bcm_iovar_t *table, void *arg, int len, bool set); + +/* ioctl structure */ +typedef struct wlc_ioctl_cmd { + uint16 cmd; /**< IOCTL command */ + uint16 flags; /**< IOCTL command flags */ + int16 min_len; /**< IOCTL command minimum argument len (in bytes) */ +} wlc_ioctl_cmd_t; + +#if defined(WLTINYDUMP) || defined(WLMSG_INFORM) || defined(WLMSG_ASSOC) || \ + defined(WLMSG_PRPKT) || defined(WLMSG_WSEC) +extern int bcm_format_ssid(char* buf, const uchar ssid[], uint ssid_len); +#endif // endif +#endif /* BCMDRIVER */ + +/* string */ +extern int bcm_atoi(const char *s); +extern ulong bcm_strtoul(const char *cp, char **endp, uint base); +extern uint64 bcm_strtoull(const char *cp, char **endp, uint base); +extern char *bcmstrstr(const char *haystack, const char *needle); +extern char *bcmstrnstr(const char *s, uint s_len, const char *substr, uint substr_len); +extern char *bcmstrcat(char *dest, const char *src); +extern char *bcmstrncat(char *dest, const char *src, uint size); +extern ulong wchar2ascii(char *abuf, ushort *wbuf, ushort wbuflen, ulong abuflen); +char* bcmstrtok(char **string, const char *delimiters, char *tokdelim); +int bcmstricmp(const char *s1, const char *s2); +int bcmstrnicmp(const char* s1, const char* s2, int cnt); + +/* Base type definitions */ +#define IOVT_VOID 0 /* no value (implictly set only) */ +#define IOVT_BOOL 1 /* any value ok (zero/nonzero) */ +#define IOVT_INT8 2 /* integer values are range-checked */ +#define IOVT_UINT8 3 /* unsigned int 8 bits */ +#define IOVT_INT16 4 /* int 16 bits */ +#define IOVT_UINT16 5 /* unsigned int 16 bits */ +#define IOVT_INT32 6 /* int 32 bits */ +#define IOVT_UINT32 7 /* unsigned int 32 bits */ +#define IOVT_BUFFER 8 /* buffer is size-checked as per minlen */ +#define BCM_IOVT_VALID(type) (((unsigned int)(type)) <= IOVT_BUFFER) + +/* Initializer for IOV type strings */ +#define BCM_IOV_TYPE_INIT { \ + "void", \ + "bool", \ + "int8", \ + "uint8", \ + "int16", \ + "uint16", \ + "int32", \ + "uint32", \ + "buffer", \ + "" } + +#define BCM_IOVT_IS_INT(type) (\ + (type == IOVT_BOOL) || \ + (type == IOVT_INT8) || \ + (type == IOVT_UINT8) || \ + (type == IOVT_INT16) || \ + (type == IOVT_UINT16) || \ + (type == IOVT_INT32) || \ + (type == IOVT_UINT32)) + +/* ** driver/apps-shared section ** */ + +#define BCME_STRLEN 64 /* Max string length for BCM errors */ +#define VALID_BCMERROR(e) valid_bcmerror(e) + +#ifdef DBG_BUS +/** tracks non typical execution paths, use gdb with arm sim + firmware dump to read counters */ +#define DBG_BUS_INC(s, cnt) ((s)->dbg_bus->cnt++) +#else +#define DBG_BUS_INC(s, cnt) +#endif /* DBG_BUS */ + +/* + * error codes could be added but the defined ones shouldn't be changed/deleted + * these error codes are exposed to the user code + * when ever a new error code is added to this list + * please update errorstring table with the related error string and + * update osl files with os specific errorcode map +*/ + +#define BCME_OK 0 /* Success */ +#define BCME_ERROR -1 /* Error generic */ +#define BCME_BADARG -2 /* Bad Argument */ +#define BCME_BADOPTION -3 /* Bad option */ +#define BCME_NOTUP -4 /* Not up */ +#define BCME_NOTDOWN -5 /* Not down */ +#define BCME_NOTAP -6 /* Not AP */ +#define BCME_NOTSTA -7 /* Not STA */ +#define BCME_BADKEYIDX -8 /* BAD Key Index */ +#define BCME_RADIOOFF -9 /* Radio Off */ +#define BCME_NOTBANDLOCKED -10 /* Not band locked */ +#define BCME_NOCLK -11 /* No Clock */ +#define BCME_BADRATESET -12 /* BAD Rate valueset */ +#define BCME_BADBAND -13 /* BAD Band */ +#define BCME_BUFTOOSHORT -14 /* Buffer too short */ +#define BCME_BUFTOOLONG -15 /* Buffer too long */ +#define BCME_BUSY -16 /* Busy */ +#define BCME_NOTASSOCIATED -17 /* Not Associated */ +#define BCME_BADSSIDLEN -18 /* Bad SSID len */ +#define BCME_OUTOFRANGECHAN -19 /* Out of Range Channel */ +#define BCME_BADCHAN -20 /* Bad Channel */ +#define BCME_BADADDR -21 /* Bad Address */ +#define BCME_NORESOURCE -22 /* Not Enough Resources */ +#define BCME_UNSUPPORTED -23 /* Unsupported */ +#define BCME_BADLEN -24 /* Bad length */ +#define BCME_NOTREADY -25 /* Not Ready */ +#define BCME_EPERM -26 /* Not Permitted */ +#define BCME_NOMEM -27 /* No Memory */ +#define BCME_ASSOCIATED -28 /* Associated */ +#define BCME_RANGE -29 /* Not In Range */ +#define BCME_NOTFOUND -30 /* Not Found */ +#define BCME_WME_NOT_ENABLED -31 /* WME Not Enabled */ +#define BCME_TSPEC_NOTFOUND -32 /* TSPEC Not Found */ +#define BCME_ACM_NOTSUPPORTED -33 /* ACM Not Supported */ +#define BCME_NOT_WME_ASSOCIATION -34 /* Not WME Association */ +#define BCME_SDIO_ERROR -35 /* SDIO Bus Error */ +#define BCME_DONGLE_DOWN -36 /* Dongle Not Accessible */ +#define BCME_VERSION -37 /* Incorrect version */ +#define BCME_TXFAIL -38 /* TX failure */ +#define BCME_RXFAIL -39 /* RX failure */ +#define BCME_NODEVICE -40 /* Device not present */ +#define BCME_NMODE_DISABLED -41 /* NMODE disabled */ +#define BCME_HOFFLOAD_RESIDENT -42 /* offload resident */ +#define BCME_SCANREJECT -43 /* reject scan request */ +#define BCME_USAGE_ERROR -44 /* WLCMD usage error */ +#define BCME_IOCTL_ERROR -45 /* WLCMD ioctl error */ +#define BCME_SERIAL_PORT_ERR -46 /* RWL serial port error */ +#define BCME_DISABLED -47 /* Disabled in this build */ +#define BCME_DECERR -48 /* Decrypt error */ +#define BCME_ENCERR -49 /* Encrypt error */ +#define BCME_MICERR -50 /* Integrity/MIC error */ +#define BCME_REPLAY -51 /* Replay */ +#define BCME_IE_NOTFOUND -52 /* IE not found */ +#define BCME_DATA_NOTFOUND -53 /* Complete data not found in buffer */ +#define BCME_NOT_GC -54 /* expecting a group client */ +#define BCME_PRS_REQ_FAILED -55 /* GC presence req failed to sent */ +#define BCME_NO_P2P_SE -56 /* Could not find P2P-Subelement */ +#define BCME_NOA_PND -57 /* NoA pending, CB shuld be NULL */ +#define BCME_FRAG_Q_FAILED -58 /* queueing 80211 frag failedi */ +#define BCME_GET_AF_FAILED -59 /* Get p2p AF pkt failed */ +#define BCME_MSCH_NOTREADY -60 /* scheduler not ready */ +#define BCME_IOV_LAST_CMD -61 /* last batched iov sub-command */ +#define BCME_MINIPMU_CAL_FAIL -62 /* MiniPMU cal failed */ +#define BCME_RCAL_FAIL -63 /* Rcal failed */ +#define BCME_LPF_RCCAL_FAIL -64 /* RCCAL failed */ +#define BCME_DACBUF_RCCAL_FAIL -65 /* RCCAL failed */ +#define BCME_VCOCAL_FAIL -66 /* VCOCAL failed */ +#define BCME_BANDLOCKED -67 /* interface is restricted to a band */ +#define BCME_DNGL_DEVRESET -68 /* dongle re-attach during DEVRESET */ +#define BCME_LAST BCME_DNGL_DEVRESET + +#define BCME_NOTENABLED BCME_DISABLED + +/* This error code is *internal* to the driver, and is not propogated to users. It should + * only be used by IOCTL patch handlers as an indication that it did not handle the IOCTL. + * (Since the error code is internal, an entry in 'BCMERRSTRINGTABLE' is not required, + * nor does it need to be part of any OSL driver-to-OS error code mapping). + */ +#define BCME_IOCTL_PATCH_UNSUPPORTED -9999 +#if (BCME_LAST <= BCME_IOCTL_PATCH_UNSUPPORTED) + #error "BCME_LAST <= BCME_IOCTL_PATCH_UNSUPPORTED" +#endif // endif + +/* These are collection of BCME Error strings */ +#define BCMERRSTRINGTABLE { \ + "OK", \ + "Undefined error", \ + "Bad Argument", \ + "Bad Option", \ + "Not up", \ + "Not down", \ + "Not AP", \ + "Not STA", \ + "Bad Key Index", \ + "Radio Off", \ + "Not band locked", \ + "No clock", \ + "Bad Rate valueset", \ + "Bad Band", \ + "Buffer too short", \ + "Buffer too long", \ + "Busy", \ + "Not Associated", \ + "Bad SSID len", \ + "Out of Range Channel", \ + "Bad Channel", \ + "Bad Address", \ + "Not Enough Resources", \ + "Unsupported", \ + "Bad length", \ + "Not Ready", \ + "Not Permitted", \ + "No Memory", \ + "Associated", \ + "Not In Range", \ + "Not Found", \ + "WME Not Enabled", \ + "TSPEC Not Found", \ + "ACM Not Supported", \ + "Not WME Association", \ + "SDIO Bus Error", \ + "Dongle Not Accessible", \ + "Incorrect version", \ + "TX Failure", \ + "RX Failure", \ + "Device Not Present", \ + "NMODE Disabled", \ + "Host Offload in device", \ + "Scan Rejected", \ + "WLCMD usage error", \ + "WLCMD ioctl error", \ + "RWL serial port error", \ + "Disabled", \ + "Decrypt error", \ + "Encrypt error", \ + "MIC error", \ + "Replay", \ + "IE not found", \ + "Data not found", \ + "NOT GC", \ + "PRS REQ FAILED", \ + "NO P2P SubElement", \ + "NOA Pending", \ + "FRAG Q FAILED", \ + "GET ActionFrame failed", \ + "scheduler not ready", \ + "Last IOV batched sub-cmd", \ + "Mini PMU Cal failed", \ + "R-cal failed", \ + "LPF RC Cal failed", \ + "DAC buf RC Cal failed", \ + "VCO Cal failed", \ + "band locked", \ + "Dongle Devreset", \ +} + +#ifndef ABS +#define ABS(a) (((a) < 0) ? -(a) : (a)) +#endif /* ABS */ + +#ifndef MIN +#define MIN(a, b) (((a) < (b)) ? (a) : (b)) +#endif /* MIN */ + +#ifndef MAX +#define MAX(a, b) (((a) > (b)) ? (a) : (b)) +#endif /* MAX */ + +/* limit to [min, max] */ +#ifndef LIMIT_TO_RANGE +#define LIMIT_TO_RANGE(x, min, max) \ + ((x) < (min) ? (min) : ((x) > (max) ? (max) : (x))) +#endif /* LIMIT_TO_RANGE */ + +/* limit to max */ +#ifndef LIMIT_TO_MAX +#define LIMIT_TO_MAX(x, max) \ + (((x) > (max) ? (max) : (x))) +#endif /* LIMIT_TO_MAX */ + +/* limit to min */ +#ifndef LIMIT_TO_MIN +#define LIMIT_TO_MIN(x, min) \ + (((x) < (min) ? (min) : (x))) +#endif /* LIMIT_TO_MIN */ + +#define DELTA(curr, prev) ((curr) > (prev) ? ((curr) - (prev)) : \ + (0xffffffff - (prev) + (curr) + 1)) +#define CEIL(x, y) (((x) + ((y) - 1)) / (y)) +#define ROUNDUP(x, y) ((((x) + ((y) - 1)) / (y)) * (y)) +#define ROUNDDN(p, align) ((p) & ~((align) - 1)) +#define ISALIGNED(a, x) (((uintptr)(a) & ((x) - 1)) == 0) +#define ALIGN_ADDR(addr, boundary) (void *)(((uintptr)(addr) + (boundary) - 1) \ + & ~((boundary) - 1)) +#define ALIGN_SIZE(size, boundary) (((size) + (boundary) - 1) \ + & ~((boundary) - 1)) +#define ISPOWEROF2(x) ((((x) - 1) & (x)) == 0) +#define VALID_MASK(mask) !((mask) & ((mask) + 1)) + +#ifndef OFFSETOF +#ifdef __ARMCC_VERSION +/* + * The ARM RVCT compiler complains when using OFFSETOF where a constant + * expression is expected, such as an initializer for a static object. + * offsetof from the runtime library doesn't have that problem. + */ +#include +#define OFFSETOF(type, member) offsetof(type, member) +#else +# if ((__GNUC__ >= 4) && (__GNUC_MINOR__ >= 8)) +/* GCC 4.8+ complains when using our OFFSETOF macro in array length declarations. */ +# define OFFSETOF(type, member) __builtin_offsetof(type, member) +# else +# define OFFSETOF(type, member) ((uint)(uintptr)&((type *)0)->member) +# endif /* GCC 4.8 or newer */ +#endif /* __ARMCC_VERSION */ +#endif /* OFFSETOF */ + +#ifndef CONTAINEROF +#define CONTAINEROF(ptr, type, member) ((type *)((char *)(ptr) - OFFSETOF(type, member))) +#endif /* CONTAINEROF */ + +/* substruct size up to and including a member of the struct */ +#ifndef STRUCT_SIZE_THROUGH +#define STRUCT_SIZE_THROUGH(sptr, fname) \ + (((uint8*)&((sptr)->fname) - (uint8*)(sptr)) + sizeof((sptr)->fname)) +#endif // endif + +/* Extracting the size of element in a structure */ +#define SIZE_OF(type, field) sizeof(((type *)0)->field) + +#ifndef ARRAYSIZE +#define ARRAYSIZE(a) (sizeof(a) / sizeof(a[0])) +#endif // endif + +#ifndef ARRAYLAST /* returns pointer to last array element */ +#define ARRAYLAST(a) (&a[ARRAYSIZE(a)-1]) +#endif // endif + +/* Calculates the required pad size. This is mainly used in register structures */ +#define PADSZ(start, end) ((((end) - (start)) / 4) + 1) + +/* Reference a function; used to prevent a static function from being optimized out */ +extern void *_bcmutils_dummy_fn; +#define REFERENCE_FUNCTION(f) (_bcmutils_dummy_fn = (void *)(f)) + +/* bit map related macros */ +#ifndef setbit +#ifndef NBBY /* the BSD family defines NBBY */ +#define NBBY 8 /* 8 bits per byte */ +#endif /* #ifndef NBBY */ +#ifdef BCMUTILS_BIT_MACROS_USE_FUNCS +extern void setbit(void *array, uint bit); +extern void clrbit(void *array, uint bit); +extern bool isset(const void *array, uint bit); +extern bool isclr(const void *array, uint bit); +#else +#define setbit(a, i) (((uint8 *)a)[(i) / NBBY] |= 1 << ((i) % NBBY)) +#define clrbit(a, i) (((uint8 *)a)[(i) / NBBY] &= ~(1 << ((i) % NBBY))) +#define isset(a, i) (((const uint8 *)a)[(i) / NBBY] & (1 << ((i) % NBBY))) +#define isclr(a, i) ((((const uint8 *)a)[(i) / NBBY] & (1 << ((i) % NBBY))) == 0) +#endif // endif +#endif /* setbit */ + +/* read/write/clear field in a consecutive bits in an octet array. + * 'addr' is the octet array's start byte address + * 'size' is the octet array's byte size + * 'stbit' is the value's start bit offset + * 'nbits' is the value's bit size + * This set of utilities are for convenience. Don't use them + * in time critical/data path as there's a great overhead in them. + */ +void setbits(uint8 *addr, uint size, uint stbit, uint nbits, uint32 val); +uint32 getbits(const uint8 *addr, uint size, uint stbit, uint nbits); +#define clrbits(addr, size, stbit, nbits) setbits(addr, size, stbit, nbits, 0) + +extern void set_bitrange(void *array, uint start, uint end, uint maxbit); +extern int bcm_find_fsb(uint32 num); + +#define isbitset(a, i) (((a) & (1 << (i))) != 0) + +#define NBITS(type) (sizeof(type) * 8) +#define NBITVAL(nbits) (1 << (nbits)) +#define MAXBITVAL(nbits) ((1 << (nbits)) - 1) +#define NBITMASK(nbits) MAXBITVAL(nbits) +#define MAXNBVAL(nbyte) MAXBITVAL((nbyte) * 8) + +extern void bcm_bitprint32(const uint32 u32); + +/* + * ---------------------------------------------------------------------------- + * Multiword map of 2bits, nibbles + * setbit2 setbit4 (void *ptr, uint32 ix, uint32 val) + * getbit2 getbit4 (void *ptr, uint32 ix) + * ---------------------------------------------------------------------------- + */ + +#define DECLARE_MAP_API(NB, RSH, LSH, OFF, MSK) \ +static INLINE void setbit##NB(void *ptr, uint32 ix, uint32 val) \ +{ \ + uint32 *addr = (uint32 *)ptr; \ + uint32 *a = addr + (ix >> RSH); /* (ix / 2^RSH) */ \ + uint32 pos = (ix & OFF) << LSH; /* (ix % 2^RSH) * 2^LSH */ \ + uint32 mask = (MSK << pos); \ + uint32 tmp = *a & ~mask; \ + *a = tmp | (val << pos); \ +} \ +static INLINE uint32 getbit##NB(void *ptr, uint32 ix) \ +{ \ + uint32 *addr = (uint32 *)ptr; \ + uint32 *a = addr + (ix >> RSH); \ + uint32 pos = (ix & OFF) << LSH; \ + return ((*a >> pos) & MSK); \ +} + +DECLARE_MAP_API(2, 4, 1, 15U, 0x0003) /* setbit2() and getbit2() */ +DECLARE_MAP_API(4, 3, 2, 7U, 0x000F) /* setbit4() and getbit4() */ +DECLARE_MAP_API(8, 2, 3, 3U, 0x00FF) /* setbit8() and getbit8() */ + +/* basic mux operation - can be optimized on several architectures */ +#define MUX(pred, true, false) ((pred) ? (true) : (false)) + +/* modulo inc/dec - assumes x E [0, bound - 1] */ +#define MODDEC(x, bound) MUX((x) == 0, (bound) - 1, (x) - 1) +#define MODINC(x, bound) MUX((x) == (bound) - 1, 0, (x) + 1) + +/* modulo inc/dec, bound = 2^k */ +#define MODDEC_POW2(x, bound) (((x) - 1) & ((bound) - 1)) +#define MODINC_POW2(x, bound) (((x) + 1) & ((bound) - 1)) + +/* modulo add/sub - assumes x, y E [0, bound - 1] */ +#define MODADD(x, y, bound) \ + MUX((x) + (y) >= (bound), (x) + (y) - (bound), (x) + (y)) +#define MODSUB(x, y, bound) \ + MUX(((int)(x)) - ((int)(y)) < 0, (x) - (y) + (bound), (x) - (y)) + +/* module add/sub, bound = 2^k */ +#define MODADD_POW2(x, y, bound) (((x) + (y)) & ((bound) - 1)) +#define MODSUB_POW2(x, y, bound) (((x) - (y)) & ((bound) - 1)) + +/* crc defines */ +#define CRC8_INIT_VALUE 0xff /* Initial CRC8 checksum value */ +#define CRC8_GOOD_VALUE 0x9f /* Good final CRC8 checksum value */ +#define CRC16_INIT_VALUE 0xffff /* Initial CRC16 checksum value */ +#define CRC16_GOOD_VALUE 0xf0b8 /* Good final CRC16 checksum value */ +#define CRC32_INIT_VALUE 0xffffffff /* Initial CRC32 checksum value */ +#define CRC32_GOOD_VALUE 0xdebb20e3 /* Good final CRC32 checksum value */ + +/* use for direct output of MAC address in printf etc */ +#define MACF "%02x:%02x:%02x:%02x:%02x:%02x" +#define ETHERP_TO_MACF(ea) ((struct ether_addr *) (ea))->octet[0], \ + ((struct ether_addr *) (ea))->octet[1], \ + ((struct ether_addr *) (ea))->octet[2], \ + ((struct ether_addr *) (ea))->octet[3], \ + ((struct ether_addr *) (ea))->octet[4], \ + ((struct ether_addr *) (ea))->octet[5] + +#define CONST_ETHERP_TO_MACF(ea) ((const struct ether_addr *) (ea))->octet[0], \ + ((const struct ether_addr *) (ea))->octet[1], \ + ((const struct ether_addr *) (ea))->octet[2], \ + ((const struct ether_addr *) (ea))->octet[3], \ + ((const struct ether_addr *) (ea))->octet[4], \ + ((const struct ether_addr *) (ea))->octet[5] +#define ETHER_TO_MACF(ea) (ea).octet[0], \ + (ea).octet[1], \ + (ea).octet[2], \ + (ea).octet[3], \ + (ea).octet[4], \ + (ea).octet[5] +#if !defined(SIMPLE_MAC_PRINT) +#define MACDBG "%02x:%02x:%02x:%02x:%02x:%02x" +#define MAC2STRDBG(ea) CONST_ETHERP_TO_MACF(ea) +#else +#define MACDBG "%02x:xx:xx:xx:x%x:%02x" +#define MAC2STRDBG(ea) ((uint8*)(ea))[0], (((uint8*)(ea))[4] & 0xf), ((uint8*)(ea))[5] +#endif /* SIMPLE_MAC_PRINT */ + +#define MACOUIDBG "%02x:%x:%02x" +#define MACOUI2STRDBG(ea) ((uint8*)(ea))[0], ((uint8*)(ea))[1] & 0xf, ((uint8*)(ea))[2] + +#define MACOUI "%02x:%02x:%02x" +#define MACOUI2STR(ea) ((uint8*)(ea))[0], ((uint8*)(ea))[1], ((uint8*)(ea))[2] + +/* bcm_format_flags() bit description structure */ +typedef struct bcm_bit_desc { + uint32 bit; + const char* name; +} bcm_bit_desc_t; + +/* bcm_format_field */ +typedef struct bcm_bit_desc_ex { + uint32 mask; + const bcm_bit_desc_t *bitfield; +} bcm_bit_desc_ex_t; + +/* buffer length for ethernet address from bcm_ether_ntoa() */ +#define ETHER_ADDR_STR_LEN 18 /* 18-bytes of Ethernet address buffer length */ + +static INLINE uint32 /* 32bit word aligned xor-32 */ +bcm_compute_xor32(volatile uint32 *u32_val, int num_u32) +{ + int idx; + uint32 xor32 = 0; + for (idx = 0; idx < num_u32; idx++) + xor32 ^= *(u32_val + idx); + return xor32; +} + +/* crypto utility function */ +/* 128-bit xor: *dst = *src1 xor *src2. dst1, src1 and src2 may have any alignment */ +static INLINE void +xor_128bit_block(const uint8 *src1, const uint8 *src2, uint8 *dst) +{ + if ( +#ifdef __i386__ + 1 || +#endif // endif + (((uintptr)src1 | (uintptr)src2 | (uintptr)dst) & 3) == 0) { + /* ARM CM3 rel time: 1229 (727 if alignment check could be omitted) */ + /* x86 supports unaligned. This version runs 6x-9x faster on x86. */ + ((uint32 *)dst)[0] = ((const uint32 *)src1)[0] ^ ((const uint32 *)src2)[0]; + ((uint32 *)dst)[1] = ((const uint32 *)src1)[1] ^ ((const uint32 *)src2)[1]; + ((uint32 *)dst)[2] = ((const uint32 *)src1)[2] ^ ((const uint32 *)src2)[2]; + ((uint32 *)dst)[3] = ((const uint32 *)src1)[3] ^ ((const uint32 *)src2)[3]; + } else { + /* ARM CM3 rel time: 4668 (4191 if alignment check could be omitted) */ + int k; + for (k = 0; k < 16; k++) + dst[k] = src1[k] ^ src2[k]; + } +} + +/* externs */ +/* crc */ +uint8 hndcrc8(const uint8 *p, uint nbytes, uint8 crc); +uint16 hndcrc16(const uint8 *p, uint nbytes, uint16 crc); +uint32 hndcrc32(const uint8 *p, uint nbytes, uint32 crc); + +/* format/print */ +#if defined(DHD_DEBUG) || defined(WLMSG_PRHDRS) || defined(WLMSG_PRPKT) || \ + defined(WLMSG_ASSOC) +/* print out the value a field has: fields may have 1-32 bits and may hold any value */ +extern int bcm_format_field(const bcm_bit_desc_ex_t *bd, uint32 field, char* buf, int len); +/* print out which bits in flags are set */ +extern int bcm_format_flags(const bcm_bit_desc_t *bd, uint32 flags, char* buf, int len); +/* print out whcih bits in octet array 'addr' are set. bcm_bit_desc_t:bit is a bit offset. */ +int bcm_format_octets(const bcm_bit_desc_t *bd, uint bdsz, + const uint8 *addr, uint size, char *buf, int len); +#endif // endif + +extern int bcm_format_hex(char *str, const void *bytes, int len); + +extern const char *bcm_crypto_algo_name(uint algo); +extern char *bcm_chipname(uint chipid, char *buf, uint len); +extern char *bcm_brev_str(uint32 brev, char *buf); +extern void printbig(char *buf); +extern void prhex(const char *msg, const uchar *buf, uint len); + +/* bcmerror */ +extern const char *bcmerrorstr(int bcmerror); + +extern int wl_set_up_table(uint8 *up_table, bcm_tlv_t *qos_map_ie); + +/* multi-bool data type: set of bools, mbool is true if any is set */ +typedef uint32 mbool; +#define mboolset(mb, bit) ((mb) |= (bit)) /* set one bool */ +#define mboolclr(mb, bit) ((mb) &= ~(bit)) /* clear one bool */ +#define mboolisset(mb, bit) (((mb) & (bit)) != 0) /* TRUE if one bool is set */ +#define mboolmaskset(mb, mask, val) ((mb) = (((mb) & ~(mask)) | (val))) + +/* generic datastruct to help dump routines */ +struct fielddesc { + const char *nameandfmt; + uint32 offset; + uint32 len; +}; + +extern void bcm_binit(struct bcmstrbuf *b, char *buf, uint size); +extern void bcm_bprhex(struct bcmstrbuf *b, const char *msg, bool newline, + const uint8 *buf, int len); + +extern void bcm_inc_bytes(uchar *num, int num_bytes, uint8 amount); +extern int bcm_cmp_bytes(const uchar *arg1, const uchar *arg2, uint8 nbytes); +extern void bcm_print_bytes(const char *name, const uchar *cdata, int len); + +typedef uint32 (*bcmutl_rdreg_rtn)(void *arg0, uint arg1, uint32 offset); +extern uint bcmdumpfields(bcmutl_rdreg_rtn func_ptr, void *arg0, uint arg1, struct fielddesc *str, + char *buf, uint32 bufsize); +extern uint bcm_bitcount(uint8 *bitmap, uint bytelength); + +extern int bcm_bprintf(struct bcmstrbuf *b, const char *fmt, ...); + +/* power conversion */ +extern uint16 bcm_qdbm_to_mw(uint8 qdbm); +extern uint8 bcm_mw_to_qdbm(uint16 mw); +extern uint bcm_mkiovar(const char *name, const char *data, uint datalen, char *buf, uint len); + +unsigned int process_nvram_vars(char *varbuf, unsigned int len); +extern bool replace_nvram_variable(char *varbuf, unsigned int buflen, const char *variable, + unsigned int *datalen); + +/* trace any object allocation / free, with / without features (flags) set to the object */ + +#define BCM_OBJDBG_ADD 1 +#define BCM_OBJDBG_REMOVE 2 +#define BCM_OBJDBG_ADD_PKT 3 + +/* object feature: set or clear flags */ +#define BCM_OBJECT_FEATURE_FLAG 1 +#define BCM_OBJECT_FEATURE_PKT_STATE 2 +/* object feature: flag bits */ +#define BCM_OBJECT_FEATURE_0 (1 << 0) +#define BCM_OBJECT_FEATURE_1 (1 << 1) +#define BCM_OBJECT_FEATURE_2 (1 << 2) +/* object feature: clear flag bits field set with this flag */ +#define BCM_OBJECT_FEATURE_CLEAR (1 << 31) +#ifdef BCM_OBJECT_TRACE +#define bcm_pkt_validate_chk(obj) do { \ + void * pkttag; \ + bcm_object_trace_chk(obj, 0, 0, \ + __FUNCTION__, __LINE__); \ + if ((pkttag = PKTTAG(obj))) { \ + bcm_object_trace_chk(obj, 1, DHD_PKTTAG_SN(pkttag), \ + __FUNCTION__, __LINE__); \ + } \ +} while (0) +extern void bcm_object_trace_opr(void *obj, uint32 opt, const char *caller, int line); +extern void bcm_object_trace_upd(void *obj, void *obj_new); +extern void bcm_object_trace_chk(void *obj, uint32 chksn, uint32 sn, + const char *caller, int line); +extern void bcm_object_feature_set(void *obj, uint32 type, uint32 value); +extern int bcm_object_feature_get(void *obj, uint32 type, uint32 value); +extern void bcm_object_trace_init(void); +extern void bcm_object_trace_deinit(void); +#else +#define bcm_pkt_validate_chk(obj) +#define bcm_object_trace_opr(a, b, c, d) +#define bcm_object_trace_upd(a, b) +#define bcm_object_trace_chk(a, b, c, d, e) +#define bcm_object_feature_set(a, b, c) +#define bcm_object_feature_get(a, b, c) +#define bcm_object_trace_init() +#define bcm_object_trace_deinit() +#endif /* BCM_OBJECT_TRACE */ + +/* Public domain bit twiddling hacks/utilities: Sean Eron Anderson */ + +/* Table driven count set bits. */ +static const uint8 /* Table only for use by bcm_cntsetbits */ +_CSBTBL[256] = +{ +# define B2(n) n, n + 1, n + 1, n + 2 +# define B4(n) B2(n), B2(n + 1), B2(n + 1), B2(n + 2) +# define B6(n) B4(n), B4(n + 1), B4(n + 1), B4(n + 2) + B6(0), B6(0 + 1), B6(0 + 1), B6(0 + 2) +}; + +static INLINE uint32 /* Uses table _CSBTBL for fast counting of 1's in a u32 */ +bcm_cntsetbits(const uint32 u32arg) +{ + /* function local scope declaration of const _CSBTBL[] */ + const uint8 * p = (const uint8 *)&u32arg; + return (_CSBTBL[p[0]] + _CSBTBL[p[1]] + _CSBTBL[p[2]] + _CSBTBL[p[3]]); +} + +static INLINE int /* C equivalent count of leading 0's in a u32 */ +C_bcm_count_leading_zeros(uint32 u32arg) +{ + int shifts = 0; + while (u32arg) { + shifts++; u32arg >>= 1; + } + return (32U - shifts); +} + +/* the format of current TCM layout during boot + * + * Code Unused memory Random numbers Random number Magic number NVRAM NVRAM + * byte Count 0xFEEDC0DE Size + * |<-----Variable---->|<---Variable--->|<-----4 bytes-->|<---4 bytes---->|<---V--->|<--4B--->| + * |<------------- BCM_ENTROPY_HOST_MAXSIZE --------->| + */ + +/* The HOST need to provided 64 bytes (512 bits) entropy for the bcm SW RNG */ +#define BCM_ENTROPY_MAGIC_SIZE 4u +#define BCM_ENTROPY_COUNT_SIZE 4u +#define BCM_ENTROPY_MIN_NBYTES 64u +#define BCM_ENTROPY_MAX_NBYTES 512u +#define BCM_ENTROPY_HOST_NBYTES 128u +#define BCM_ENTROPY_HOST_MAXSIZE \ + (BCM_ENTROPY_MAGIC_SIZE + BCM_ENTROPY_COUNT_SIZE + BCM_ENTROPY_MAX_NBYTES) + +/* Keep BCM MAX_RAND NUMBERS definition for the current dongle image. It will be + * removed after the dongle image is updated to use the bcm RNG. + */ +#define BCM_MAX_RAND_NUMBERS 2u + +/* Constant for calculate the location of host entropy input */ +#define BCM_NVRAM_OFFSET_TCM 4u +#define BCM_NVRAM_IMG_COMPRS_FACTOR 4u +#define BCM_NVRAM_RNG_SIGNATURE 0xFEEDC0DEu + +typedef struct bcm_rand_metadata { + uint32 count; /* number of random numbers in bytes */ + uint32 signature; /* host fills it in, FW verfies before reading rand */ +} bcm_rand_metadata_t; + +#ifdef BCMDRIVER +/* + * Assembly instructions: Count Leading Zeros + * "clz" : MIPS, ARM + * "cntlzw" : PowerPC + * "BSF" : x86 + * "lzcnt" : AMD, SPARC + */ + +#if defined(__arm__) +#if defined(__ARM_ARCH_7M__) /* Cortex M3 */ +#define __USE_ASM_CLZ__ +#endif /* __ARM_ARCH_7M__ */ +#if defined(__ARM_ARCH_7R__) /* Cortex R4 */ +#define __USE_ASM_CLZ__ +#endif /* __ARM_ARCH_7R__ */ +#endif /* __arm__ */ + +static INLINE int +bcm_count_leading_zeros(uint32 u32arg) +{ +#if defined(__USE_ASM_CLZ__) + int zeros; + __asm__ volatile("clz %0, %1 \n" : "=r" (zeros) : "r" (u32arg)); + return zeros; +#else /* C equivalent */ + return C_bcm_count_leading_zeros(u32arg); +#endif /* C equivalent */ +} + +/* + * Macro to count leading zeroes + * + */ +#if defined(__GNUC__) +#define CLZ(x) __builtin_clzl(x) +#elif defined(__arm__) +#define CLZ(x) __clz(x) +#else +#define CLZ(x) bcm_count_leading_zeros(x) +#endif /* __GNUC__ */ + +/* INTERFACE: Multiword bitmap based small id allocator. */ +struct bcm_mwbmap; /* forward declaration for use as an opaque mwbmap handle */ + +#define BCM_MWBMAP_INVALID_HDL ((struct bcm_mwbmap *)NULL) +#define BCM_MWBMAP_INVALID_IDX ((uint32)(~0U)) + +/* Incarnate a multiword bitmap based small index allocator */ +extern struct bcm_mwbmap * bcm_mwbmap_init(osl_t * osh, uint32 items_max); + +/* Free up the multiword bitmap index allocator */ +extern void bcm_mwbmap_fini(osl_t * osh, struct bcm_mwbmap * mwbmap_hdl); + +/* Allocate a unique small index using a multiword bitmap index allocator */ +extern uint32 bcm_mwbmap_alloc(struct bcm_mwbmap * mwbmap_hdl); + +/* Force an index at a specified position to be in use */ +extern void bcm_mwbmap_force(struct bcm_mwbmap * mwbmap_hdl, uint32 bitix); + +/* Free a previously allocated index back into the multiword bitmap allocator */ +extern void bcm_mwbmap_free(struct bcm_mwbmap * mwbmap_hdl, uint32 bitix); + +/* Fetch the toal number of free indices in the multiword bitmap allocator */ +extern uint32 bcm_mwbmap_free_cnt(struct bcm_mwbmap * mwbmap_hdl); + +/* Determine whether an index is inuse or free */ +extern bool bcm_mwbmap_isfree(struct bcm_mwbmap * mwbmap_hdl, uint32 bitix); + +/* Debug dump a multiword bitmap allocator */ +extern void bcm_mwbmap_show(struct bcm_mwbmap * mwbmap_hdl); + +extern void bcm_mwbmap_audit(struct bcm_mwbmap * mwbmap_hdl); +/* End - Multiword bitmap based small Id allocator. */ + +/* INTERFACE: Simple unique 16bit Id Allocator using a stack implementation. */ + +#define ID8_INVALID 0xFFu +#define ID16_INVALID 0xFFFFu +#define ID32_INVALID 0xFFFFFFFFu +#define ID16_UNDEFINED ID16_INVALID + +/* + * Construct a 16bit id allocator, managing 16bit ids in the range: + * [start_val16 .. start_val16+total_ids) + * Note: start_val16 is inclusive. + * Returns an opaque handle to the 16bit id allocator. + */ +extern void * id16_map_init(osl_t *osh, uint16 total_ids, uint16 start_val16); +extern void * id16_map_fini(osl_t *osh, void * id16_map_hndl); +extern void id16_map_clear(void * id16_map_hndl, uint16 total_ids, uint16 start_val16); + +/* Allocate a unique 16bit id */ +extern uint16 id16_map_alloc(void * id16_map_hndl); + +/* Free a 16bit id value into the id16 allocator */ +extern void id16_map_free(void * id16_map_hndl, uint16 val16); + +/* Get the number of failures encountered during id allocation. */ +extern uint32 id16_map_failures(void * id16_map_hndl); + +/* Audit the 16bit id allocator state. */ +extern bool id16_map_audit(void * id16_map_hndl); +/* End - Simple 16bit Id Allocator. */ +#endif /* BCMDRIVER */ + +#define MASK_32_BITS (~0) +#define MASK_8_BITS ((1 << 8) - 1) + +#define EXTRACT_LOW32(num) (uint32)(num & MASK_32_BITS) +#define EXTRACT_HIGH32(num) (uint32)(((uint64)num >> 32) & MASK_32_BITS) + +#define MAXIMUM(a, b) ((a > b) ? a : b) +#define MINIMUM(a, b) ((a < b) ? a : b) +#define LIMIT(x, min, max) ((x) < (min) ? (min) : ((x) > (max) ? (max) : (x))) + +/* calculate checksum for ip header, tcp / udp header / data */ +uint16 bcm_ip_cksum(uint8 *buf, uint32 len, uint32 sum); + +#ifndef _dll_t_ +#define _dll_t_ +/* + * ----------------------------------------------------------------------------- + * Double Linked List Macros + * ----------------------------------------------------------------------------- + * + * All dll operations must be performed on a pre-initialized node. + * Inserting an uninitialized node into a list effectively initialized it. + * + * When a node is deleted from a list, you may initialize it to avoid corruption + * incurred by double deletion. You may skip initialization if the node is + * immediately inserted into another list. + * + * By placing a dll_t element at the start of a struct, you may cast a dll_t * + * to the struct or vice versa. + * + * Example of declaring an initializing someList and inserting nodeA, nodeB + * + * typedef struct item { + * dll_t node; + * int someData; + * } Item_t; + * Item_t nodeA, nodeB, nodeC; + * nodeA.someData = 11111, nodeB.someData = 22222, nodeC.someData = 33333; + * + * dll_t someList; + * dll_init(&someList); + * + * dll_append(&someList, (dll_t *) &nodeA); + * dll_prepend(&someList, &nodeB.node); + * dll_insert((dll_t *)&nodeC, &nodeA.node); + * + * dll_delete((dll_t *) &nodeB); + * + * Example of a for loop to walk someList of node_p + * + * extern void mydisplay(Item_t * item_p); + * + * dll_t * item_p, * next_p; + * for (item_p = dll_head_p(&someList); ! dll_end(&someList, item_p); + * item_p = next_p) + * { + * next_p = dll_next_p(item_p); + * ... use item_p at will, including removing it from list ... + * mydisplay((PItem_t)item_p); + * } + * + * ----------------------------------------------------------------------------- + */ +typedef struct dll { + struct dll * next_p; + struct dll * prev_p; +} dll_t; + +static INLINE void +dll_init(dll_t *node_p) +{ + node_p->next_p = node_p; + node_p->prev_p = node_p; +} +/* dll macros returing a pointer to dll_t */ + +static INLINE dll_t * +dll_head_p(dll_t *list_p) +{ + return list_p->next_p; +} + +static INLINE dll_t * +dll_tail_p(dll_t *list_p) +{ + return (list_p)->prev_p; +} + +static INLINE dll_t * +dll_next_p(dll_t *node_p) +{ + return (node_p)->next_p; +} + +static INLINE dll_t * +dll_prev_p(dll_t *node_p) +{ + return (node_p)->prev_p; +} + +static INLINE bool +dll_empty(dll_t *list_p) +{ + return ((list_p)->next_p == (list_p)); +} + +static INLINE bool +dll_end(dll_t *list_p, dll_t * node_p) +{ + return (list_p == node_p); +} + +/* inserts the node new_p "after" the node at_p */ +static INLINE void +dll_insert(dll_t *new_p, dll_t * at_p) +{ + new_p->next_p = at_p->next_p; + new_p->prev_p = at_p; + at_p->next_p = new_p; + (new_p->next_p)->prev_p = new_p; +} + +static INLINE void +dll_append(dll_t *list_p, dll_t *node_p) +{ + dll_insert(node_p, dll_tail_p(list_p)); +} + +static INLINE void +dll_prepend(dll_t *list_p, dll_t *node_p) +{ + dll_insert(node_p, list_p); +} + +/* deletes a node from any list that it "may" be in, if at all. */ +static INLINE void +dll_delete(dll_t *node_p) +{ + node_p->prev_p->next_p = node_p->next_p; + node_p->next_p->prev_p = node_p->prev_p; +} +#endif /* ! defined(_dll_t_) */ + +/* Elements managed in a double linked list */ + +typedef struct dll_pool { + dll_t free_list; + uint16 free_count; + uint16 elems_max; + uint16 elem_size; + dll_t elements[1]; +} dll_pool_t; + +dll_pool_t * dll_pool_init(void * osh, uint16 elems_max, uint16 elem_size); +void * dll_pool_alloc(dll_pool_t * dll_pool_p); +void dll_pool_free(dll_pool_t * dll_pool_p, void * elem_p); +void dll_pool_free_tail(dll_pool_t * dll_pool_p, void * elem_p); +typedef void (* dll_elem_dump)(void * elem_p); +void dll_pool_detach(void * osh, dll_pool_t * pool, uint16 elems_max, uint16 elem_size); + +int valid_bcmerror(int e); + +/* calculate IPv4 header checksum + * - input ip points to IP header in network order + * - output cksum is in network order + */ +uint16 ipv4_hdr_cksum(uint8 *ip, int ip_len); + +/* calculate IPv4 TCP header checksum + * - input ip and tcp points to IP and TCP header in network order + * - output cksum is in network order + */ +uint16 ipv4_tcp_hdr_cksum(uint8 *ip, uint8 *tcp, uint16 tcp_len); + +/* calculate IPv6 TCP header checksum + * - input ipv6 and tcp points to IPv6 and TCP header in network order + * - output cksum is in network order + */ +uint16 ipv6_tcp_hdr_cksum(uint8 *ipv6, uint8 *tcp, uint16 tcp_len); + +#ifdef __cplusplus + } +#endif // endif + +/* #define DEBUG_COUNTER */ +#ifdef DEBUG_COUNTER +#define CNTR_TBL_MAX 10 +typedef struct _counter_tbl_t { + char name[16]; /* name of this counter table */ + uint32 prev_log_print; /* Internal use. Timestamp of the previous log print */ + uint log_print_interval; /* Desired interval to print logs in ms */ + uint needed_cnt; /* How many counters need to be used */ + uint32 cnt[CNTR_TBL_MAX]; /* Counting entries to increase at desired places */ + bool enabled; /* Whether to enable printing log */ +} counter_tbl_t; + +void counter_printlog(counter_tbl_t *ctr_tbl); +#endif /* DEBUG_COUNTER */ + +#if defined(__GNUC__) +#define CALL_SITE __builtin_return_address(0) +#else +#define CALL_SITE ((void*) 0) +#endif // endif +#ifdef SHOW_LOGTRACE +#define TRACE_LOG_BUF_MAX_SIZE 1700 +#define BUF_NOT_AVAILABLE 0 +#define NEXT_BUF_NOT_AVAIL 1 +#define NEXT_BUF_AVAIL 2 + +typedef struct trace_buf_info { + int availability; + int size; + char buf[TRACE_LOG_BUF_MAX_SIZE]; +} trace_buf_info_t; +#endif /* SHOW_LOGTRACE */ + +enum dump_dongle_e { + DUMP_DONGLE_COREREG = 0, + DUMP_DONGLE_D11MEM +}; + +typedef struct { + uint32 type; /**< specifies e.g dump of d11 memory, use enum dump_dongle_e */ + uint32 index; /**< iterator1, specifies core index or d11 memory index */ + uint32 offset; /**< iterator2, byte offset within register set or memory */ +} dump_dongle_in_t; + +typedef struct { + uint32 address; /**< e.g. backplane address of register */ + uint32 id; /**< id, e.g. core id */ + uint32 rev; /**< rev, e.g. core rev */ + uint32 n_bytes; /**< nbytes in array val[] */ + uint32 val[1]; /**< out: values that were read out of registers or memory */ +} dump_dongle_out_t; + +extern uint32 sqrt_int(uint32 value); + +#ifdef BCMDRIVER +/* structures and routines to process variable sized data */ +typedef struct var_len_data { + uint32 vlen; + uint8 *vdata; +} var_len_data_t; + +int bcm_vdata_alloc(osl_t *osh, var_len_data_t *vld, uint32 size); +int bcm_vdata_free(osl_t *osh, var_len_data_t *vld); +#endif /* BCMDRIVER */ + +/* Count the number of elements in an array that do not match the given value */ +extern int array_value_mismatch_count(uint8 value, uint8 *array, int array_size); +/* Count the number of non-zero elements in an uint8 array */ +extern int array_nonzero_count(uint8 *array, int array_size); +/* Count the number of non-zero elements in an int16 array */ +extern int array_nonzero_count_int16(int16 *array, int array_size); +/* Count the number of zero elements in an uint8 array */ +extern int array_zero_count(uint8 *array, int array_size); +/* Validate a uint8 ordered array. Assert if invalid. */ +extern int verify_ordered_array_uint8(uint8 *array, int array_size, uint8 range_lo, uint8 range_hi); +/* Validate a int16 configuration array that need not be zero-terminated. Assert if invalid. */ +extern int verify_ordered_array_int16(int16 *array, int array_size, int16 range_lo, int16 range_hi); +/* Validate all values in an array are in range */ +extern int verify_array_values(uint8 *array, int array_size, + int range_lo, int range_hi, bool zero_terminated); + +#endif /* _bcmutils_h_ */ diff --git a/bcmdhd.100.10.315.x/include/brcm_nl80211.h b/bcmdhd.100.10.315.x/include/brcm_nl80211.h new file mode 100644 index 0000000..eb04883 --- /dev/null +++ b/bcmdhd.100.10.315.x/include/brcm_nl80211.h @@ -0,0 +1,77 @@ +/* + * Definitions for nl80211 vendor command/event access to host driver + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: brcm_nl80211.h 768773 2018-06-21 08:38:23Z $ + * + */ + +#ifndef _brcm_nl80211_h_ +#define _brcm_nl80211_h_ + +#define OUI_BRCM 0x001018 +#define OUI_GOOGLE 0x001A11 + +enum wl_vendor_subcmd { + BRCM_VENDOR_SCMD_UNSPEC, + BRCM_VENDOR_SCMD_PRIV_STR, + BRCM_VENDOR_SCMD_BCM_STR, + BRCM_VENDOR_SCMD_BCM_PSK +}; + +enum brcm_nl80211_vendor_events { + BRCM_VENDOR_EVENT_UNSPEC, + BRCM_VENDOR_EVENT_PRIV_STR, + BRCM_VENDOR_EVENT_HANGED = 33, + BRCM_VENDOR_EVENT_SAE_KEY = 34, + BRCM_VENDOR_EVENT_BEACON_RECV = 35 +}; + +struct bcm_nlmsg_hdr { + uint cmd; /* common ioctl definition */ + int len; /* expected return buffer length */ + uint offset; /* user buffer offset */ + uint set; /* get or set request optional */ + uint magic; /* magic number for verification */ +}; + +enum bcmnl_attrs { + BCM_NLATTR_UNSPEC, + + BCM_NLATTR_LEN, + BCM_NLATTR_DATA, + + __BCM_NLATTR_AFTER_LAST, + BCM_NLATTR_MAX = __BCM_NLATTR_AFTER_LAST - 1 +}; + +struct nl_prv_data { + int err; /* return result */ + void *data; /* ioctl return buffer pointer */ + uint len; /* ioctl return buffer length */ + struct bcm_nlmsg_hdr *nlioc; /* bcm_nlmsg_hdr header pointer */ +}; + +#endif /* _brcm_nl80211_h_ */ diff --git a/bcmdhd.100.10.315.x/include/dbus.h b/bcmdhd.100.10.315.x/include/dbus.h new file mode 100644 index 0000000..e8bd5d7 --- /dev/null +++ b/bcmdhd.100.10.315.x/include/dbus.h @@ -0,0 +1,598 @@ +/* + * Dongle BUS interface Abstraction layer + * target serial buses like USB, SDIO, SPI, etc. + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dbus.h 686618 2017-02-23 07:20:43Z $ + */ + +#ifndef __DBUS_H__ +#define __DBUS_H__ + +#include "typedefs.h" +#include + +extern uint dbus_msglevel; +#define DBUS_ERROR_VAL 0x0001 +#define DBUS_TRACE_VAL 0x0002 +#define DBUS_INFO_VAL 0x0004 + +#if defined(DHD_DEBUG) +#define DBUSERR(args) do {if (dbus_msglevel & DBUS_ERROR_VAL) printf args;} while (0) +#define DBUSTRACE(args) do {if (dbus_msglevel & DBUS_TRACE_VAL) printf args;} while (0) +#define DBUSINFO(args) do {if (dbus_msglevel & DBUS_INFO_VAL) printf args;} while (0) +#else /* defined(DHD_DEBUG) */ +#define DBUSERR(args) +#define DBUSTRACE(args) +#define DBUSINFO(args) +#endif + +enum { + DBUS_OK = 0, + DBUS_ERR = -200, + DBUS_ERR_TIMEOUT, + DBUS_ERR_DISCONNECT, + DBUS_ERR_NODEVICE, + DBUS_ERR_UNSUPPORTED, + DBUS_ERR_PENDING, + DBUS_ERR_NOMEM, + DBUS_ERR_TXFAIL, + DBUS_ERR_TXTIMEOUT, + DBUS_ERR_TXDROP, + DBUS_ERR_RXFAIL, + DBUS_ERR_RXDROP, + DBUS_ERR_TXCTLFAIL, + DBUS_ERR_RXCTLFAIL, + DBUS_ERR_REG_PARAM, + DBUS_STATUS_CANCELLED, + DBUS_ERR_NVRAM, + DBUS_JUMBO_NOMATCH, + DBUS_JUMBO_BAD_FORMAT, + DBUS_NVRAM_NONTXT, + DBUS_ERR_RXZLP +}; + +#define BCM_OTP_SIZE_43236 84 /* number of 16 bit values */ +#define BCM_OTP_SW_RGN_43236 24 /* start offset of SW config region */ +#define BCM_OTP_ADDR_43236 0x18000800 /* address of otp base */ + +#define ERR_CBMASK_TXFAIL 0x00000001 +#define ERR_CBMASK_RXFAIL 0x00000002 +#define ERR_CBMASK_ALL 0xFFFFFFFF + +#define DBUS_CBCTL_WRITE 0 +#define DBUS_CBCTL_READ 1 +#if defined(INTR_EP_ENABLE) +#define DBUS_CBINTR_POLL 2 +#endif /* defined(INTR_EP_ENABLE) */ + +#define DBUS_TX_RETRY_LIMIT 3 /* retries for failed txirb */ +#define DBUS_TX_TIMEOUT_INTERVAL 250 /* timeout for txirb complete, in ms */ + +#define DBUS_BUFFER_SIZE_TX 32000 +#define DBUS_BUFFER_SIZE_RX 24000 + +#define DBUS_BUFFER_SIZE_TX_NOAGG 2048 +#define DBUS_BUFFER_SIZE_RX_NOAGG 2048 + +/** DBUS types */ +enum { + DBUS_USB, + DBUS_SDIO, + DBUS_SPI, + DBUS_UNKNOWN +}; + +enum dbus_state { + DBUS_STATE_DL_PENDING, + DBUS_STATE_DL_DONE, + DBUS_STATE_UP, + DBUS_STATE_DOWN, + DBUS_STATE_PNP_FWDL, + DBUS_STATE_DISCONNECT, + DBUS_STATE_SLEEP, + DBUS_STATE_DL_NEEDED +}; + +enum dbus_pnp_state { + DBUS_PNP_DISCONNECT, + DBUS_PNP_SLEEP, + DBUS_PNP_RESUME +}; + +enum dbus_file { + DBUS_FIRMWARE, + DBUS_NVFILE +}; + +typedef enum _DEVICE_SPEED { + INVALID_SPEED = -1, + LOW_SPEED = 1, /**< USB 1.1: 1.5 Mbps */ + FULL_SPEED, /**< USB 1.1: 12 Mbps */ + HIGH_SPEED, /**< USB 2.0: 480 Mbps */ + SUPER_SPEED, /**< USB 3.0: 4.8 Gbps */ +} DEVICE_SPEED; + +typedef struct { + int bustype; + int vid; + int pid; + int devid; + int chiprev; /**< chip revsion number */ + int mtu; + int nchan; /**< Data Channels */ + int has_2nd_bulk_in_ep; +} dbus_attrib_t; + +/* FIX: Account for errors related to DBUS; + * Let upper layer account for packets/bytes + */ +typedef struct { + uint32 rx_errors; + uint32 tx_errors; + uint32 rx_dropped; + uint32 tx_dropped; +} dbus_stats_t; + +/** + * Configurable BUS parameters + */ +enum { + DBUS_CONFIG_ID_RXCTL_DEFERRES = 1, + DBUS_CONFIG_ID_AGGR_LIMIT, + DBUS_CONFIG_ID_KEEPIF_ON_DEVRESET +}; + +typedef struct { + uint32 config_id; + union { + uint32 general_param; + bool rxctl_deferrespok; + struct { + int maxrxsf; + int maxrxsize; + int maxtxsf; + int maxtxsize; + } aggr_param; + }; +} dbus_config_t; + +/** + * External Download Info + */ +typedef struct dbus_extdl { + uint8 *fw; + int fwlen; + uint8 *vars; + int varslen; +} dbus_extdl_t; + +struct dbus_callbacks; +struct exec_parms; + +typedef void *(*probe_cb_t)(void *arg, const char *desc, uint32 bustype, + uint16 bus_no, uint16 slot, uint32 hdrlen); +typedef void (*disconnect_cb_t)(void *arg); +typedef void *(*exec_cb_t)(struct exec_parms *args); + +/** Client callbacks registered during dbus_attach() */ +typedef struct dbus_callbacks { + void (*send_complete)(void *cbarg, void *info, int status); + void (*recv_buf)(void *cbarg, uint8 *buf, int len); + void (*recv_pkt)(void *cbarg, void *pkt); + void (*txflowcontrol)(void *cbarg, bool onoff); + void (*errhandler)(void *cbarg, int err); + void (*ctl_complete)(void *cbarg, int type, int status); + void (*state_change)(void *cbarg, int state); + void *(*pktget)(void *cbarg, uint len, bool send); + void (*pktfree)(void *cbarg, void *p, bool send); +} dbus_callbacks_t; + +struct dbus_pub; +struct bcmstrbuf; +struct dbus_irb; +struct dbus_irb_rx; +struct dbus_irb_tx; +struct dbus_intf_callbacks; + +typedef struct { + void* (*attach)(struct dbus_pub *pub, void *cbarg, struct dbus_intf_callbacks *cbs); + void (*detach)(struct dbus_pub *pub, void *bus); + + int (*up)(void *bus); + int (*down)(void *bus); + int (*send_irb)(void *bus, struct dbus_irb_tx *txirb); + int (*recv_irb)(void *bus, struct dbus_irb_rx *rxirb); + int (*cancel_irb)(void *bus, struct dbus_irb_tx *txirb); + int (*send_ctl)(void *bus, uint8 *buf, int len); + int (*recv_ctl)(void *bus, uint8 *buf, int len); + int (*get_stats)(void *bus, dbus_stats_t *stats); + int (*get_attrib)(void *bus, dbus_attrib_t *attrib); + + int (*pnp)(void *bus, int evnt); + int (*remove)(void *bus); + int (*resume)(void *bus); + int (*suspend)(void *bus); + int (*stop)(void *bus); + int (*reset)(void *bus); + + /* Access to bus buffers directly */ + void *(*pktget)(void *bus, int len); + void (*pktfree)(void *bus, void *pkt); + + int (*iovar_op)(void *bus, const char *name, void *params, int plen, void *arg, int len, + bool set); + void (*dump)(void *bus, struct bcmstrbuf *strbuf); + int (*set_config)(void *bus, dbus_config_t *config); + int (*get_config)(void *bus, dbus_config_t *config); + + bool (*device_exists)(void *bus); + int (*dlneeded)(void *bus); + int (*dlstart)(void *bus, uint8 *fw, int len); + int (*dlrun)(void *bus); + bool (*recv_needed)(void *bus); + + void *(*exec_rxlock)(void *bus, exec_cb_t func, struct exec_parms *args); + void *(*exec_txlock)(void *bus, exec_cb_t func, struct exec_parms *args); + + int (*tx_timer_init)(void *bus); + int (*tx_timer_start)(void *bus, uint timeout); + int (*tx_timer_stop)(void *bus); + + int (*sched_dpc)(void *bus); + int (*lock)(void *bus); + int (*unlock)(void *bus); + int (*sched_probe_cb)(void *bus); + + int (*shutdown)(void *bus); + + int (*recv_stop)(void *bus); + int (*recv_resume)(void *bus); + + int (*recv_irb_from_ep)(void *bus, struct dbus_irb_rx *rxirb, uint ep_idx); + + int (*readreg)(void *bus, uint32 regaddr, int datalen, uint32 *value); + + /* Add from the bottom */ +} dbus_intf_t; + +typedef struct dbus_pub { + struct osl_info *osh; + dbus_stats_t stats; + dbus_attrib_t attrib; + enum dbus_state busstate; + DEVICE_SPEED device_speed; + int ntxq, nrxq, rxsize; + void *bus; + struct shared_info *sh; + void *dev_info; +} dbus_pub_t; + +#define BUS_INFO(bus, type) (((type *) bus)->pub->bus) + +#define ALIGNED_LOCAL_VARIABLE(var, align) \ + uint8 buffer[SDALIGN+64]; \ + uint8 *var = (uint8 *)(((uintptr)&buffer[0]) & ~(align-1)) + align; + +/* + * Public Bus Function Interface + */ + +/* + * FIX: Is there better way to pass OS/Host handles to DBUS but still + * maintain common interface for all OS?? + * Under NDIS, param1 needs to be MiniportHandle + * For NDIS60, param2 is WdfDevice + * Under Linux, param1 and param2 are NULL; + */ +extern int dbus_register(int vid, int pid, probe_cb_t prcb, disconnect_cb_t discb, void *prarg, + void *param1, void *param2); +extern int dbus_deregister(void); + +//extern int dbus_download_firmware(dbus_pub_t *pub); +//extern int dbus_up(struct dhd_bus *pub); +extern int dbus_down(dbus_pub_t *pub); +//extern int dbus_stop(struct dhd_bus *pub); +extern int dbus_shutdown(dbus_pub_t *pub); +extern void dbus_flowctrl_rx(dbus_pub_t *pub, bool on); + +extern int dbus_send_txdata(dbus_pub_t *dbus, void *pktbuf); +extern int dbus_send_buf(dbus_pub_t *pub, uint8 *buf, int len, void *info); +extern int dbus_send_pkt(dbus_pub_t *pub, void *pkt, void *info); +//extern int dbus_send_ctl(struct dhd_bus *pub, uint8 *buf, int len); +//extern int dbus_recv_ctl(struct dhd_bus *pub, uint8 *buf, int len); +extern int dbus_recv_bulk(dbus_pub_t *pub, uint32 ep_idx); +extern int dbus_poll_intr(dbus_pub_t *pub); +extern int dbus_get_stats(dbus_pub_t *pub, dbus_stats_t *stats); +extern int dbus_get_device_speed(dbus_pub_t *pub); +extern int dbus_set_config(dbus_pub_t *pub, dbus_config_t *config); +extern int dbus_get_config(dbus_pub_t *pub, dbus_config_t *config); +extern void * dbus_get_devinfo(dbus_pub_t *pub); + +extern void *dbus_pktget(dbus_pub_t *pub, int len); +extern void dbus_pktfree(dbus_pub_t *pub, void* pkt); + +extern int dbus_set_errmask(dbus_pub_t *pub, uint32 mask); +extern int dbus_pnp_sleep(dbus_pub_t *pub); +extern int dbus_pnp_resume(dbus_pub_t *pub, int *fw_reload); +extern int dbus_pnp_disconnect(dbus_pub_t *pub); + +//extern int dhd_bus_iovar_op(dhd_pub_t *dhdp, const char *name, +// void *params, int plen, void *arg, int len, bool set); + +extern void *dhd_dbus_txq(const dbus_pub_t *pub); +extern uint dhd_dbus_hdrlen(const dbus_pub_t *pub); + +/* + * Private Common Bus Interface + */ + +/** IO Request Block (IRB) */ +typedef struct dbus_irb { + struct dbus_irb *next; /**< it's casted from dbus_irb_tx or dbus_irb_rx struct */ +} dbus_irb_t; + +typedef struct dbus_irb_rx { + struct dbus_irb irb; /* Must be first */ + uint8 *buf; + int buf_len; + int actual_len; + void *pkt; + void *info; + void *arg; +} dbus_irb_rx_t; + +typedef struct dbus_irb_tx { + struct dbus_irb irb; /** Must be first */ + uint8 *buf; /** mutually exclusive with struct member 'pkt' */ + int len; /** length of field 'buf' */ + void *pkt; /** mutually exclusive with struct member 'buf' */ + int retry_count; + void *info; + void *arg; + void *send_buf; /**< linear bufffer for LINUX when aggreagtion is enabled */ +} dbus_irb_tx_t; + +/** + * DBUS interface callbacks are different from user callbacks + * so, internally, different info can be passed to upper layer + */ +typedef struct dbus_intf_callbacks { + void (*send_irb_timeout)(void *cbarg, dbus_irb_tx_t *txirb); + void (*send_irb_complete)(void *cbarg, dbus_irb_tx_t *txirb, int status); + void (*recv_irb_complete)(void *cbarg, dbus_irb_rx_t *rxirb, int status); + void (*errhandler)(void *cbarg, int err); + void (*ctl_complete)(void *cbarg, int type, int status); + void (*state_change)(void *cbarg, int state); + bool (*isr)(void *cbarg, bool *wantdpc); + bool (*dpc)(void *cbarg, bool bounded); + void (*watchdog)(void *cbarg); + void *(*pktget)(void *cbarg, uint len, bool send); + void (*pktfree)(void *cbarg, void *p, bool send); + struct dbus_irb* (*getirb)(void *cbarg, bool send); + void (*rxerr_indicate)(void *cbarg, bool on); +} dbus_intf_callbacks_t; + +/* + * Porting: To support new bus, port these functions below + */ + +/* + * Bus specific Interface + * Implemented by dbus_usb.c/dbus_sdio.c + */ +extern int dbus_bus_register(int vid, int pid, probe_cb_t prcb, disconnect_cb_t discb, void *prarg, + dbus_intf_t **intf, void *param1, void *param2); +extern int dbus_bus_deregister(void); +extern void dbus_bus_fw_get(void *bus, uint8 **fw, int *fwlen, int *decomp); + +/* + * Bus-specific and OS-specific Interface + * Implemented by dbus_usb_[linux/ndis].c/dbus_sdio_[linux/ndis].c + */ +extern int dbus_bus_osl_register(int vid, int pid, probe_cb_t prcb, disconnect_cb_t discb, + void *prarg, dbus_intf_t **intf, void *param1, void *param2); +extern int dbus_bus_osl_deregister(void); + +/* + * Bus-specific, OS-specific, HW-specific Interface + * Mainly for SDIO Host HW controller + */ +extern int dbus_bus_osl_hw_register(int vid, int pid, probe_cb_t prcb, disconnect_cb_t discb, + void *prarg, dbus_intf_t **intf); +extern int dbus_bus_osl_hw_deregister(void); + +extern uint usbdev_bulkin_eps(void); +#if defined(BCM_REQUEST_FW) +extern void *dbus_get_fw_nvfile(int devid, int chiprev, uint8 **fw, int *fwlen, int type, + uint16 boardtype, uint16 boardrev); +extern void dbus_release_fw_nvfile(void *firmware); +#endif /* #if defined(BCM_REQUEST_FW) */ + +#if defined(EHCI_FASTPATH_TX) || defined(EHCI_FASTPATH_RX) + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) + /* Backward compatibility */ + typedef unsigned int gfp_t; + + #define dma_pool pci_pool + #define dma_pool_create(name, dev, size, align, alloc) \ + pci_pool_create(name, dev, size, align, alloc, GFP_DMA | GFP_ATOMIC) + #define dma_pool_destroy(pool) pci_pool_destroy(pool) + #define dma_pool_alloc(pool, flags, handle) pci_pool_alloc(pool, flags, handle) + #define dma_pool_free(pool, vaddr, addr) pci_pool_free(pool, vaddr, addr) + + #define dma_map_single(dev, addr, size, dir) pci_map_single(dev, addr, size, dir) + #define dma_unmap_single(dev, hnd, size, dir) pci_unmap_single(dev, hnd, size, dir) + #define DMA_FROM_DEVICE PCI_DMA_FROMDEVICE + #define DMA_TO_DEVICE PCI_DMA_TODEVICE +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) */ + +/* Availability of these functions varies (when present, they have two arguments) */ +#ifndef hc32_to_cpu + #define hc32_to_cpu(x) le32_to_cpu(x) + #define cpu_to_hc32(x) cpu_to_le32(x) + typedef unsigned int __hc32; +#else + #error Two-argument functions needed +#endif // endif + +/* Private USB opcode base */ +#define EHCI_FASTPATH 0x31 +#define EHCI_SET_EP_BYPASS EHCI_FASTPATH +#define EHCI_SET_BYPASS_CB (EHCI_FASTPATH + 1) +#define EHCI_SET_BYPASS_DEV (EHCI_FASTPATH + 2) +#define EHCI_DUMP_STATE (EHCI_FASTPATH + 3) +#define EHCI_SET_BYPASS_POOL (EHCI_FASTPATH + 4) +#define EHCI_CLR_EP_BYPASS (EHCI_FASTPATH + 5) + +/* + * EHCI QTD structure (hardware and extension) + * NOTE that is does not need to (and does not) match its kernel counterpart + */ +#define EHCI_QTD_NBUFFERS 5 +#define EHCI_QTD_ALIGN 32 +#define EHCI_BULK_PACKET_SIZE 512 +#define EHCI_QTD_XACTERR_MAX 32 + +struct ehci_qtd { + /* Hardware map */ + volatile uint32_t qtd_next; + volatile uint32_t qtd_altnext; + volatile uint32_t qtd_status; +#define EHCI_QTD_GET_BYTES(x) (((x)>>16) & 0x7fff) +#define EHCI_QTD_IOC 0x00008000 +#define EHCI_QTD_GET_CERR(x) (((x)>>10) & 0x3) +#define EHCI_QTD_SET_CERR(x) ((x) << 10) +#define EHCI_QTD_GET_PID(x) (((x)>>8) & 0x3) +#define EHCI_QTD_SET_PID(x) ((x) << 8) +#define EHCI_QTD_ACTIVE 0x80 +#define EHCI_QTD_HALTED 0x40 +#define EHCI_QTD_BUFERR 0x20 +#define EHCI_QTD_BABBLE 0x10 +#define EHCI_QTD_XACTERR 0x08 +#define EHCI_QTD_MISSEDMICRO 0x04 + volatile uint32_t qtd_buffer[EHCI_QTD_NBUFFERS]; + volatile uint32_t qtd_buffer_hi[EHCI_QTD_NBUFFERS]; + + /* Implementation extension */ + dma_addr_t qtd_self; /**< own hardware address */ + struct ehci_qtd *obj_next; /**< software link to the next QTD */ + void *rpc; /**< pointer to the rpc buffer */ + size_t length; /**< length of the data in the buffer */ + void *buff; /**< pointer to the reassembly buffer */ + int xacterrs; /**< retry counter for qtd xact error */ +} __attribute__ ((aligned(EHCI_QTD_ALIGN))); + +#define EHCI_NULL __constant_cpu_to_le32(1) /* HW null pointer shall be odd */ + +#define SHORT_READ_Q(token) (EHCI_QTD_GET_BYTES(token) != 0 && EHCI_QTD_GET_PID(token) == 1) + +/** + * Queue Head + * NOTE This structure is slightly different from the one in the kernel; but needs to stay + * compatible. + */ +struct ehci_qh { + /* Hardware map */ + volatile uint32_t qh_link; + volatile uint32_t qh_endp; + volatile uint32_t qh_endphub; + volatile uint32_t qh_curqtd; + + /* QTD overlay */ + volatile uint32_t ow_next; + volatile uint32_t ow_altnext; + volatile uint32_t ow_status; + volatile uint32_t ow_buffer [EHCI_QTD_NBUFFERS]; + volatile uint32_t ow_buffer_hi [EHCI_QTD_NBUFFERS]; + + /* Extension (should match the kernel layout) */ + dma_addr_t unused0; + void *unused1; + struct list_head unused2; + struct ehci_qtd *dummy; + struct ehci_qh *unused3; + + struct ehci_hcd *unused4; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) + struct kref unused5; + unsigned unused6; + + uint8_t unused7; + + /* periodic schedule info */ + uint8_t unused8; + uint8_t unused9; + uint8_t unused10; + uint16_t unused11; + uint16_t unused12; + uint16_t unused13; + struct usb_device *unused14; +#else + unsigned unused5; + + u8 unused6; + + /* periodic schedule info */ + u8 unused7; + u8 unused8; + u8 unused9; + unsigned short unused10; + unsigned short unused11; +#define NO_FRAME ((unsigned short)~0) +#ifdef EHCI_QUIRK_FIX + struct usb_device *unused12; +#endif /* EHCI_QUIRK_FIX */ +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) */ + struct ehci_qtd *first_qtd; + /* Link to the first QTD; this is an optimized equivalent of the qtd_list field */ + /* NOTE that ehci_qh in ehci.h shall reserve this word */ +} __attribute__ ((aligned(EHCI_QTD_ALIGN))); + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) +/** The corresponding structure in the kernel is used to get the QH */ +struct hcd_dev { /* usb_device.hcpriv points to this */ + struct list_head unused0; + struct list_head unused1; + + /* array of QH pointers */ + void *ep[32]; +}; +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) */ + +int optimize_qtd_fill_with_rpc(const dbus_pub_t *pub, int epn, struct ehci_qtd *qtd, void *rpc, + int token, int len); +int optimize_qtd_fill_with_data(const dbus_pub_t *pub, int epn, struct ehci_qtd *qtd, void *data, + int token, int len); +int optimize_submit_async(struct ehci_qtd *qtd, int epn); +void inline optimize_ehci_qtd_init(struct ehci_qtd *qtd, dma_addr_t dma); +struct ehci_qtd *optimize_ehci_qtd_alloc(gfp_t flags); +void optimize_ehci_qtd_free(struct ehci_qtd *qtd); +void optimize_submit_rx_request(const dbus_pub_t *pub, int epn, struct ehci_qtd *qtd_in, void *buf); +#endif /* EHCI_FASTPATH_TX || EHCI_FASTPATH_RX */ + +void dbus_flowctrl_tx(void *dbi, bool on); +#endif /* __DBUS_H__ */ diff --git a/bcmdhd.100.10.315.x/include/dhd_daemon.h b/bcmdhd.100.10.315.x/include/dhd_daemon.h new file mode 100644 index 0000000..06aa182 --- /dev/null +++ b/bcmdhd.100.10.315.x/include/dhd_daemon.h @@ -0,0 +1,62 @@ +/* + * Header file for DHD daemon to handle timeouts + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_daemon.h 671442 2016-11-22 05:16:18Z $ + */ + +#ifndef __BCM_DHDD_H__ +#define __BCM_DHDD_H__ + +/** + * To maintain compatabily when dhd driver and dhd daemon is taken from different branches, + * make sure to keep this file same across dhd driver branch and dhd apps branch. + * TODO: Make this file as shared between apps and dhd.ko + */ + +#define BCM_TO_MAGIC 0x600DB055 +#define NO_TRAP 0 +#define DO_TRAP 1 + +#define BCM_NL_USER 31 + +typedef enum notify_dhd_daemon_reason { + REASON_COMMAND_TO, + REASON_OQS_TO, + REASON_SCAN_TO, + REASON_JOIN_TO, + REASON_DAEMON_STARTED, + REASON_DEVICE_TX_STUCK_WARNING, + REASON_DEVICE_TX_STUCK, + REASON_UNKOWN +} notify_dhd_daemon_reason_t; + +typedef struct bcm_to_info { + int magic; + int reason; + int trap; +} bcm_to_info_t; + +#endif /* __BCM_DHDD_H__ */ diff --git a/bcmdhd.100.10.315.x/include/dhdioctl.h b/bcmdhd.100.10.315.x/include/dhdioctl.h new file mode 100644 index 0000000..458e14f --- /dev/null +++ b/bcmdhd.100.10.315.x/include/dhdioctl.h @@ -0,0 +1,242 @@ +/* + * Definitions for ioctls to access DHD iovars. + * Based on wlioctl.h (for Broadcom 802.11abg driver). + * (Moves towards generic ioctls for BCM drivers/iovars.) + * + * Definitions subject to change without notice. + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhdioctl.h 765806 2018-06-05 13:56:08Z $ + */ + +#ifndef _dhdioctl_h_ +#define _dhdioctl_h_ + +#include + +/* Linux network driver ioctl encoding */ +typedef struct dhd_ioctl { + uint32 cmd; /* common ioctl definition */ + void *buf; /* pointer to user buffer */ + uint32 len; /* length of user buffer */ + uint32 set; /* get or set request boolean (optional) */ + uint32 used; /* bytes read or written (optional) */ + uint32 needed; /* bytes needed (optional) */ + uint32 driver; /* to identify target driver */ +} dhd_ioctl_t; + +/* Underlying BUS definition */ +enum { + BUS_TYPE_USB = 0, /* for USB dongles */ + BUS_TYPE_SDIO, /* for SDIO dongles */ + BUS_TYPE_PCIE /* for PCIE dongles */ +}; + +typedef enum { + DMA_XFER_SUCCESS = 0, + DMA_XFER_IN_PROGRESS, + DMA_XFER_FAILED +} dma_xfer_status_t; + +typedef struct tput_test { + uint16 version; + uint16 length; + uint8 direction; + uint8 tput_test_running; + uint8 mac_sta[6]; + uint8 mac_ap[6]; + uint8 PAD[2]; + uint32 payload_size; + uint32 num_pkts; + uint32 timeout_ms; + uint32 flags; + + uint32 pkts_good; + uint32 pkts_bad; + uint32 pkts_cmpl; + uint64 time_ms; + uint64 tput_bps; +} tput_test_t; + +typedef enum { + TPUT_DIR_TX = 0, + TPUT_DIR_RX +} tput_dir_t; + +#define TPUT_TEST_T_VER 1 +#define TPUT_TEST_T_LEN 68 +#define TPUT_TEST_MIN_PAYLOAD_SIZE 16 +#define TPUT_TEST_USE_ETHERNET_HDR 0x1 +#define TPUT_TEST_USE_802_11_HDR 0x2 + +/* per-driver magic numbers */ +#define DHD_IOCTL_MAGIC 0x00444944 + +/* bump this number if you change the ioctl interface */ +#define DHD_IOCTL_VERSION 1 + +/* + * Increase the DHD_IOCTL_MAXLEN to 16K for supporting download of NVRAM files of size + * > 8K. In the existing implementation when NVRAM is to be downloaded via the "vars" + * DHD IOVAR, the NVRAM is copied to the DHD Driver memory. Later on when "dwnldstate" is + * invoked with FALSE option, the NVRAM gets copied from the DHD driver to the Dongle + * memory. The simple way to support this feature without modifying the DHD application, + * driver logic is to increase the DHD_IOCTL_MAXLEN size. This macro defines the "size" + * of the buffer in which data is exchanged between the DHD App and DHD driver. + */ +#define DHD_IOCTL_MAXLEN (16384) /* max length ioctl buffer required */ +#define DHD_IOCTL_SMLEN 256 /* "small" length ioctl buffer required */ + +/* common ioctl definitions */ +#define DHD_GET_MAGIC 0 +#define DHD_GET_VERSION 1 +#define DHD_GET_VAR 2 +#define DHD_SET_VAR 3 + +/* message levels */ +#define DHD_ERROR_VAL 0x0001 +#define DHD_TRACE_VAL 0x0002 +#define DHD_INFO_VAL 0x0004 +#define DHD_DATA_VAL 0x0008 +#define DHD_CTL_VAL 0x0010 +#define DHD_TIMER_VAL 0x0020 +#define DHD_HDRS_VAL 0x0040 +#define DHD_BYTES_VAL 0x0080 +#define DHD_INTR_VAL 0x0100 +#define DHD_LOG_VAL 0x0200 +#define DHD_GLOM_VAL 0x0400 +#define DHD_EVENT_VAL 0x0800 +#define DHD_BTA_VAL 0x1000 +#define DHD_ISCAN_VAL 0x2000 +#define DHD_ARPOE_VAL 0x4000 +#define DHD_REORDER_VAL 0x8000 +#define DHD_NOCHECKDIED_VAL 0x20000 /* UTF WAR */ +#define DHD_PNO_VAL 0x80000 +#define DHD_RTT_VAL 0x100000 +#define DHD_MSGTRACE_VAL 0x200000 +#define DHD_FWLOG_VAL 0x400000 +#define DHD_DBGIF_VAL 0x800000 +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM +#define DHD_RPM_VAL 0x1000000 +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ +#define DHD_PKT_MON_VAL 0x2000000 +#define DHD_PKT_MON_DUMP_VAL 0x4000000 +#define DHD_ERROR_MEM_VAL 0x8000000 +#define DHD_DNGL_IOVAR_SET_VAL 0x10000000 /**< logs the setting of dongle iovars */ +#define DHD_LPBKDTDUMP_VAL 0x20000000 +#define DHD_PRSRV_MEM_VAL 0x40000000 +#define DHD_IOVAR_MEM_VAL 0x80000000 +#define DHD_ANDROID_VAL 0x10000 +#define DHD_IW_VAL 0x20000 +#define DHD_CFG_VAL 0x40000 +#define DHD_CONFIG_VAL 0x80000 + +#ifdef SDTEST +/* For pktgen iovar */ +typedef struct dhd_pktgen { + uint32 version; /* To allow structure change tracking */ + uint32 freq; /* Max ticks between tx/rx attempts */ + uint32 count; /* Test packets to send/rcv each attempt */ + uint32 print; /* Print counts every attempts */ + uint32 total; /* Total packets (or bursts) */ + uint32 minlen; /* Minimum length of packets to send */ + uint32 maxlen; /* Maximum length of packets to send */ + uint32 numsent; /* Count of test packets sent */ + uint32 numrcvd; /* Count of test packets received */ + uint32 numfail; /* Count of test send failures */ + uint32 mode; /* Test mode (type of test packets) */ + uint32 stop; /* Stop after this many tx failures */ +} dhd_pktgen_t; + +/* Version in case structure changes */ +#define DHD_PKTGEN_VERSION 2 + +/* Type of test packets to use */ +#define DHD_PKTGEN_ECHO 1 /* Send echo requests */ +#define DHD_PKTGEN_SEND 2 /* Send discard packets */ +#define DHD_PKTGEN_RXBURST 3 /* Request dongle send N packets */ +#define DHD_PKTGEN_RECV 4 /* Continuous rx from continuous tx dongle */ +#endif /* SDTEST */ + +/* Enter idle immediately (no timeout) */ +#define DHD_IDLE_IMMEDIATE (-1) + +/* Values for idleclock iovar: other values are the sd_divisor to use when idle */ +#define DHD_IDLE_ACTIVE 0 /* Do not request any SD clock change when idle */ +#define DHD_IDLE_STOP (-1) /* Request SD clock be stopped (and use SD1 mode) */ + +enum dhd_maclist_xtlv_type { + DHD_MACLIST_XTLV_R = 0x1, + DHD_MACLIST_XTLV_X = 0x2, + DHD_SVMPLIST_XTLV = 0x3 +}; + +typedef struct _dhd_maclist_t { + uint16 version; /* Version */ + uint16 bytes_len; /* Total bytes length of lists, XTLV headers and paddings */ + uint8 plist[1]; /* Pointer to the first list */ +} dhd_maclist_t; + +typedef struct _dhd_pd11regs_param { + uint16 start_idx; + uint8 verbose; + uint8 pad; + uint8 plist[1]; +} dhd_pd11regs_param; + +typedef struct _dhd_pd11regs_buf { + uint16 idx; + uint8 pad[2]; + uint8 pbuf[1]; +} dhd_pd11regs_buf; + +/* BT logging and memory dump */ + +#define BT_LOG_BUF_MAX_SIZE (DHD_IOCTL_MAXLEN - (2 * sizeof(int))) +#define BT_LOG_BUF_NOT_AVAILABLE 0 +#define BT_LOG_NEXT_BUF_NOT_AVAIL 1 +#define BT_LOG_NEXT_BUF_AVAIL 2 +#define BT_LOG_NOT_READY 3 + +typedef struct bt_log_buf_info { + int availability; + int size; + char buf[BT_LOG_BUF_MAX_SIZE]; +} bt_log_buf_info_t; + +/* request BT memory in chunks */ +typedef struct bt_mem_req { + int offset; /* offset from BT memory start */ + int buf_size; /* buffer size per chunk */ +} bt_mem_req_t; + +/* max dest supported */ +#define DEBUG_BUF_DEST_MAX 4 + +/* debug buf dest stat */ +typedef struct debug_buf_dest_stat { + uint32 stat[DEBUG_BUF_DEST_MAX]; +} debug_buf_dest_stat_t; +#endif /* _dhdioctl_h_ */ diff --git a/bcmdhd.100.10.315.x/include/dnglevent.h b/bcmdhd.100.10.315.x/include/dnglevent.h new file mode 100644 index 0000000..4919af7 --- /dev/null +++ b/bcmdhd.100.10.315.x/include/dnglevent.h @@ -0,0 +1,139 @@ +/* + * Broadcom Event protocol definitions + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * Dependencies: bcmeth.h + * + * $Id: dnglevent.h $ + * + * <> + * + * ----------------------------------------------------------------------------- + * + */ + +/* + * Broadcom dngl Ethernet Events protocol defines + * + */ + +#ifndef _DNGLEVENT_H_ +#define _DNGLEVENT_H_ + +#ifndef _TYPEDEFS_H_ +#include +#endif // endif +#include +#include + +/* This marks the start of a packed structure section. */ +#include +#define BCM_DNGL_EVENT_MSG_VERSION 1 +#define DNGL_E_RSRVD_1 0x0 +#define DNGL_E_RSRVD_2 0x1 +#define DNGL_E_SOCRAM_IND 0x2 +typedef BWL_PRE_PACKED_STRUCT struct +{ + uint16 version; /* Current version is 1 */ + uint16 reserved; /* reserved for any future extension */ + uint16 event_type; /* DNGL_E_SOCRAM_IND */ + uint16 datalen; /* Length of the event payload */ +} BWL_POST_PACKED_STRUCT bcm_dngl_event_msg_t; + +typedef BWL_PRE_PACKED_STRUCT struct bcm_dngl_event { + struct ether_header eth; + bcmeth_hdr_t bcm_hdr; + bcm_dngl_event_msg_t dngl_event; + /* data portion follows */ +} BWL_POST_PACKED_STRUCT bcm_dngl_event_t; + +typedef BWL_PRE_PACKED_STRUCT struct bcm_dngl_socramind { + uint16 tag; /* data tag */ + uint16 length; /* data length */ + uint8 value[1]; /* data value with variable length specified by length */ +} BWL_POST_PACKED_STRUCT bcm_dngl_socramind_t; + +/* SOCRAM_IND type tags */ +typedef enum socram_ind_tag { + SOCRAM_IND_ASSERT_TAG = 1, + SOCRAM_IND_TAG_HEALTH_CHECK = 2 +} socram_ind_tag_t; + +/* Health check top level module tags */ +typedef BWL_PRE_PACKED_STRUCT struct bcm_dngl_healthcheck { + uint16 top_module_tag; /* top level module tag */ + uint16 top_module_len; /* Type of PCIE issue indication */ + uint8 value[1]; /* data value with variable length specified by length */ +} BWL_POST_PACKED_STRUCT bcm_dngl_healthcheck_t; + +/* Health check top level module tags */ +#define HEALTH_CHECK_TOP_LEVEL_MODULE_PCIEDEV_RTE 1 +#define HEALTH_CHECK_PCIEDEV_VERSION_1 1 +#define HEALTH_CHECK_PCIEDEV_FLAG_IN_D3_SHIFT 0 +#define HEALTH_CHECK_PCIEDEV_FLAG_AER_SHIFT 1 +#define HEALTH_CHECK_PCIEDEV_FLAG_LINKDOWN_SHIFT 2 +#define HEALTH_CHECK_PCIEDEV_FLAG_MSI_INT_SHIFT 3 +#define HEALTH_CHECK_PCIEDEV_FLAG_NODS_SHIFT 4 +#define HEALTH_CHECK_PCIEDEV_FLAG_NO_HOST_WAKE_SHIFT 5 +#define HEALTH_CHECK_PCIEDEV_FLAG_IN_D3 1 << HEALTH_CHECK_PCIEDEV_FLAG_IN_D3_SHIFT +#define HEALTH_CHECK_PCIEDEV_FLAG_AER 1 << HEALTH_CHECK_PCIEDEV_FLAG_AER_SHIFT +#define HEALTH_CHECK_PCIEDEV_FLAG_LINKDOWN 1 << HEALTH_CHECK_PCIEDEV_FLAG_LINKDOWN_SHIFT +#define HEALTH_CHECK_PCIEDEV_FLAG_MSI_INT 1 << HEALTH_CHECK_PCIEDEV_FLAG_MSI_INT_SHIFT +#define HEALTH_CHECK_PCIEDEV_FLAG_NODS 1 << HEALTH_CHECK_PCIEDEV_FLAG_NODS_SHIFT +#define HEALTH_CHECK_PCIEDEV_FLAG_NO_HOST_WAKE 1 << HEALTH_CHECK_PCIEDEV_FLAG_NO_HOST_WAKE_SHIFT +/* PCIE Module TAGs */ +#define HEALTH_CHECK_PCIEDEV_INDUCED_IND 0x1 +#define HEALTH_CHECK_PCIEDEV_H2D_DMA_IND 0x2 +#define HEALTH_CHECK_PCIEDEV_D2H_DMA_IND 0x3 +#define HEALTH_CHECK_PCIEDEV_IOCTL_STALL_IND 0x4 +#define HEALTH_CHECK_PCIEDEV_D3ACK_STALL_IND 0x5 +#define HEALTH_CHECK_PCIEDEV_NODS_IND 0x6 +#define HEALTH_CHECK_PCIEDEV_LINKSPEED_FALLBACK_IND 0x7 + +#define HC_PCIEDEV_CONFIG_REGLIST_MAX 20 +typedef BWL_PRE_PACKED_STRUCT struct bcm_dngl_pcie_hc { + uint16 version; /* HEALTH_CHECK_PCIEDEV_VERSION_1 */ + uint16 reserved; + uint16 pcie_err_ind_type; /* PCIE Module TAGs */ + uint16 pcie_flag; + uint32 pcie_control_reg; + uint32 pcie_config_regs[HC_PCIEDEV_CONFIG_REGLIST_MAX]; +} BWL_POST_PACKED_STRUCT bcm_dngl_pcie_hc_t; + +#ifdef HCHK_COMMON_SW_EVENT +/* Enumerating top level SW entities for use by health check */ +typedef enum { + HCHK_SW_ENTITY_UNDEFINED = 0, + HCHK_SW_ENTITY_PCIE = 1, + HCHK_SW_ENTITY_SDIO = 2, + HCHK_SW_ENTITY_USB = 3, + HCHK_SW_ENTITY_RTE = 4, + HCHK_SW_ENTITY_WL_PRIMARY = 5, /* WL instance 0 */ + HCHK_SW_ENTITY_WL_SECONDARY = 6, /* WL instance 1 */ + HCHK_SW_ENTITY_MAX +} hchk_sw_entity_t; +#endif /* HCHK_COMMON_SW_EVENT */ + +/* This marks the end of a packed structure section. */ +#include + +#endif /* _DNGLEVENT_H_ */ diff --git a/bcmdhd.100.10.315.x/include/eapol.h b/bcmdhd.100.10.315.x/include/eapol.h new file mode 100644 index 0000000..5b11964 --- /dev/null +++ b/bcmdhd.100.10.315.x/include/eapol.h @@ -0,0 +1,266 @@ +/* + * 802.1x EAPOL definitions + * + * See + * IEEE Std 802.1X-2001 + * IEEE 802.1X RADIUS Usage Guidelines + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: eapol.h 767212 2018-06-13 00:17:23Z $ + */ + +#ifndef _eapol_h_ +#define _eapol_h_ + +#ifndef _TYPEDEFS_H_ +#include +#endif // endif + +/* This marks the start of a packed structure section. */ +#include + +#if !defined(BCMCRYPTO_COMPONENT) +#include +#endif /* !BCMCRYPTO_COMPONENT */ + +/* EAPOL for 802.3/Ethernet */ +typedef BWL_PRE_PACKED_STRUCT struct { + struct ether_header eth; /* 802.3/Ethernet header */ + unsigned char version; /* EAPOL protocol version */ + unsigned char type; /* EAPOL type */ + unsigned short length; /* Length of body */ + unsigned char body[1]; /* Body (optional) */ +} BWL_POST_PACKED_STRUCT eapol_header_t; + +#define EAPOL_HEADER_LEN 18 + +typedef struct { + unsigned char version; /* EAPOL protocol version */ + unsigned char type; /* EAPOL type */ + unsigned short length; /* Length of body */ +} eapol_hdr_t; + +#define EAPOL_HDR_LEN 4u + +/* EAPOL version */ +#define WPA2_EAPOL_VERSION 2u +#define WPA_EAPOL_VERSION 1u +#define LEAP_EAPOL_VERSION 1u +#define SES_EAPOL_VERSION 1u + +/* EAPOL types */ +#define EAP_PACKET 0 +#define EAPOL_START 1u +#define EAPOL_LOGOFF 2u +#define EAPOL_KEY 3u +#define EAPOL_ASF 4u + +/* EAPOL-Key types */ +#define EAPOL_RC4_KEY 1u +#define EAPOL_WPA2_KEY 2u /* 802.11i/WPA2 */ +#define EAPOL_WPA_KEY 254u /* WPA */ + +/* RC4 EAPOL-Key header field sizes */ +#define EAPOL_KEY_REPLAY_LEN 8u +#define EAPOL_KEY_IV_LEN 16u +#define EAPOL_KEY_SIG_LEN 16u + +/* RC4 EAPOL-Key */ +typedef BWL_PRE_PACKED_STRUCT struct { + unsigned char type; /* Key Descriptor Type */ + unsigned short length; /* Key Length (unaligned) */ + unsigned char replay[EAPOL_KEY_REPLAY_LEN]; /* Replay Counter */ + unsigned char iv[EAPOL_KEY_IV_LEN]; /* Key IV */ + unsigned char index; /* Key Flags & Index */ + unsigned char signature[EAPOL_KEY_SIG_LEN]; /* Key Signature */ + unsigned char key[1]; /* Key (optional) */ +} BWL_POST_PACKED_STRUCT eapol_key_header_t; + +#define EAPOL_KEY_HEADER_LEN 44u + +/* RC4 EAPOL-Key flags */ +#define EAPOL_KEY_FLAGS_MASK 0x80u +#define EAPOL_KEY_BROADCAST 0u +#define EAPOL_KEY_UNICAST 0x80u + +/* RC4 EAPOL-Key index */ +#define EAPOL_KEY_INDEX_MASK 0x7fu + +/* WPA/802.11i/WPA2 EAPOL-Key header field sizes */ +#define EAPOL_AKW_BLOCK_LEN 8 +#define EAPOL_WPA_KEY_REPLAY_LEN 8u +#define EAPOL_WPA_KEY_NONCE_LEN 32u +#define EAPOL_WPA_KEY_IV_LEN 16u +#define EAPOL_WPA_KEY_RSC_LEN 8u +#define EAPOL_WPA_KEY_ID_LEN 8u +#define EAPOL_WPA_KEY_DATA_LEN (EAPOL_WPA_MAX_KEY_SIZE + EAPOL_AKW_BLOCK_LEN) +#define EAPOL_WPA_MAX_KEY_SIZE 32u +#define EAPOL_WPA_KEY_MAX_MIC_LEN 32u +#define EAPOL_WPA_ENCR_KEY_MAX_LEN 64u +#define EAPOL_WPA_TEMP_ENCR_KEY_MAX_LEN 32u + +#define EAPOL_WPA_PMK_MAX_LEN 64u +#define EAPOL_WPA_PMK_SHA384_LEN 48u +#define EAPOL_WPA_PMK_DEFAULT_LEN 32u +#define EAPOL_WPA_KCK_DEFAULT_LEN 16u +#define EAPOL_WPA_KCK_MIC_DEFAULT_LEN 16u +#define EAPOL_WPA_ENCR_KEY_DEFAULT_LEN 16u + +#ifndef EAPOL_KEY_HDR_VER_V2 +#define EAPOL_WPA_KEY_MIC_LEN 16u /* deprecated */ +#define EAPOL_WPA_KEY_LEN 95u /* deprecated */ +#endif // endif + +#ifndef EAPOL_KEY_HDR_VER_V2 +/* WPA EAPOL-Key : deprecated */ +typedef BWL_PRE_PACKED_STRUCT struct { + unsigned char type; /* Key Descriptor Type */ + unsigned short key_info; /* Key Information (unaligned) */ + unsigned short key_len; /* Key Length (unaligned) */ + unsigned char replay[EAPOL_WPA_KEY_REPLAY_LEN]; /* Replay Counter */ + unsigned char nonce[EAPOL_WPA_KEY_NONCE_LEN]; /* Nonce */ + unsigned char iv[EAPOL_WPA_KEY_IV_LEN]; /* Key IV */ + unsigned char rsc[EAPOL_WPA_KEY_RSC_LEN]; /* Key RSC */ + unsigned char id[EAPOL_WPA_KEY_ID_LEN]; /* WPA:Key ID, 802.11i/WPA2: Reserved */ + unsigned char mic[EAPOL_WPA_KEY_MIC_LEN]; /* Key MIC */ + unsigned short data_len; /* Key Data Length */ + unsigned char data[EAPOL_WPA_KEY_DATA_LEN]; /* Key data */ +} BWL_POST_PACKED_STRUCT eapol_wpa_key_header_t; +#else +/* WPA EAPOL-Key : new structure to consider dynamic MIC length */ +typedef BWL_PRE_PACKED_STRUCT struct { + unsigned char type; /* Key Descriptor Type */ + unsigned short key_info; /* Key Information (unaligned) */ + unsigned short key_len; /* Key Length (unaligned) */ + unsigned char replay[EAPOL_WPA_KEY_REPLAY_LEN]; /* Replay Counter */ + unsigned char nonce[EAPOL_WPA_KEY_NONCE_LEN]; /* Nonce */ + unsigned char iv[EAPOL_WPA_KEY_IV_LEN]; /* Key IV */ + unsigned char rsc[EAPOL_WPA_KEY_RSC_LEN]; /* Key RSC */ + unsigned char id[EAPOL_WPA_KEY_ID_LEN]; /* WPA:Key ID, 802.11i/WPA2: Reserved */ +} BWL_POST_PACKED_STRUCT eapol_wpa_key_header_v2_t; + +typedef eapol_wpa_key_header_v2_t eapol_wpa_key_header_t; +#endif /* EAPOL_KEY_HDR_VER_V2 */ + +#define EAPOL_WPA_KEY_DATA_LEN_SIZE 2u + +#ifdef EAPOL_KEY_HDR_VER_V2 +#define EAPOL_WPA_KEY_HDR_SIZE(mic_len) (sizeof(eapol_wpa_key_header_v2_t) \ + + mic_len + EAPOL_WPA_KEY_DATA_LEN_SIZE) + +/* WPA EAPOL-Key header macros to reach out mic/data_len/data field */ +#define EAPOL_WPA_KEY_HDR_MIC_PTR(pos) ((uint8 *)pos + sizeof(eapol_wpa_key_header_v2_t)) +#define EAPOL_WPA_KEY_HDR_DATA_LEN_PTR(pos, mic_len) \ + ((uint8 *)pos + sizeof(eapol_wpa_key_header_v2_t) + mic_len) +#define EAPOL_WPA_KEY_HDR_DATA_PTR(pos, mic_len) \ + ((uint8 *)pos + EAPOL_WPA_KEY_HDR_SIZE(mic_len)) +#else +#define EAPOL_WPA_KEY_HDR_SIZE(mic_len) EAPOL_WPA_KEY_LEN +#define EAPOL_WPA_KEY_HDR_MIC_PTR(pos) ((uint8 *)&pos->mic) +#define EAPOL_WPA_KEY_HDR_DATA_LEN_PTR(pos, mic_len) ((uint8 *)&pos->data_len) +#define EAPOL_WPA_KEY_HDR_DATA_PTR(pos, mic_len) ((uint8 *)&pos->data) +#endif /* EAPOL_KEY_HDR_VER_V2 */ + +/* WPA/802.11i/WPA2 KEY KEY_INFO bits */ +#define WPA_KEY_DESC_OSEN 0x0 +#define WPA_KEY_DESC_V0 0x0 +#define WPA_KEY_DESC_V1 0x01 +#define WPA_KEY_DESC_V2 0x02 +#define WPA_KEY_DESC_V3 0x03 +#define WPA_KEY_PAIRWISE 0x08 +#define WPA_KEY_INSTALL 0x40 +#define WPA_KEY_ACK 0x80 +#define WPA_KEY_MIC 0x100 +#define WPA_KEY_SECURE 0x200 +#define WPA_KEY_ERROR 0x400 +#define WPA_KEY_REQ 0x800 +#define WPA_KEY_DESC_VER(_ki) ((_ki) & 0x03u) + +#define WPA_KEY_DESC_V2_OR_V3 WPA_KEY_DESC_V2 + +/* WPA-only KEY KEY_INFO bits */ +#define WPA_KEY_INDEX_0 0x00 +#define WPA_KEY_INDEX_1 0x10 +#define WPA_KEY_INDEX_2 0x20 +#define WPA_KEY_INDEX_3 0x30 +#define WPA_KEY_INDEX_MASK 0x30 +#define WPA_KEY_INDEX_SHIFT 0x04 + +/* 802.11i/WPA2-only KEY KEY_INFO bits */ +#define WPA_KEY_ENCRYPTED_DATA 0x1000 + +/* Key Data encapsulation */ +typedef BWL_PRE_PACKED_STRUCT struct { + uint8 type; + uint8 length; + uint8 oui[3]; + uint8 subtype; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT eapol_wpa2_encap_data_t; + +#define EAPOL_WPA2_ENCAP_DATA_HDR_LEN 6 + +#define WPA2_KEY_DATA_SUBTYPE_GTK 1 +#define WPA2_KEY_DATA_SUBTYPE_STAKEY 2 +#define WPA2_KEY_DATA_SUBTYPE_MAC 3 +#define WPA2_KEY_DATA_SUBTYPE_PMKID 4 +#define WPA2_KEY_DATA_SUBTYPE_IGTK 9 + +/* GTK encapsulation */ +typedef BWL_PRE_PACKED_STRUCT struct { + uint8 flags; + uint8 reserved; + uint8 gtk[EAPOL_WPA_MAX_KEY_SIZE]; +} BWL_POST_PACKED_STRUCT eapol_wpa2_key_gtk_encap_t; + +#define EAPOL_WPA2_KEY_GTK_ENCAP_HDR_LEN 2 + +#define WPA2_GTK_INDEX_MASK 0x03 +#define WPA2_GTK_INDEX_SHIFT 0x00 + +#define WPA2_GTK_TRANSMIT 0x04 + +/* IGTK encapsulation */ +typedef BWL_PRE_PACKED_STRUCT struct { + uint16 key_id; + uint8 ipn[6]; + uint8 key[EAPOL_WPA_MAX_KEY_SIZE]; +} BWL_POST_PACKED_STRUCT eapol_wpa2_key_igtk_encap_t; + +#define EAPOL_WPA2_KEY_IGTK_ENCAP_HDR_LEN 8 + +/* STAKey encapsulation */ +typedef BWL_PRE_PACKED_STRUCT struct { + uint8 reserved[2]; + uint8 mac[ETHER_ADDR_LEN]; + uint8 stakey[EAPOL_WPA_MAX_KEY_SIZE]; +} BWL_POST_PACKED_STRUCT eapol_wpa2_key_stakey_encap_t; + +#define WPA2_KEY_DATA_PAD 0xdd + +/* This marks the end of a packed structure section. */ +#include + +#endif /* _eapol_h_ */ diff --git a/bcmdhd.100.10.315.x/include/epivers.h b/bcmdhd.100.10.315.x/include/epivers.h new file mode 100644 index 0000000..c99ea90 --- /dev/null +++ b/bcmdhd.100.10.315.x/include/epivers.h @@ -0,0 +1,51 @@ +/* + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: epivers.h.in 596126 2015-10-29 19:53:48Z $ + * +*/ + +#ifndef _epivers_h_ +#define _epivers_h_ + +#define EPI_MAJOR_VERSION 100 + +#define EPI_MINOR_VERSION 10 + +#define EPI_RC_NUMBER 315 + +#define EPI_INCREMENTAL_NUMBER 0 + +#define EPI_BUILD_NUMBER 0 + +#define EPI_VERSION 100, 10, 315, 0 + +#define EPI_VERSION_NUM 0x640a13b0 + +#define EPI_VERSION_DEV 100.10.315 + +/* Driver Version String, ASCII, 32 chars max */ +#define EPI_VERSION_STR "100.10.315.2 (r771911)" + +#endif /* _epivers_h_ */ diff --git a/bcmdhd.100.10.315.x/include/etd.h b/bcmdhd.100.10.315.x/include/etd.h new file mode 100644 index 0000000..6b7b87e --- /dev/null +++ b/bcmdhd.100.10.315.x/include/etd.h @@ -0,0 +1,478 @@ +/* + * Extended Trap data component interface file. + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: etd.h 751605 2018-03-13 03:32:30Z $ + */ + +#ifndef _ETD_H_ +#define _ETD_H_ + +#if defined(ETD) && !defined(WLETD) +#include +#endif // endif +#include +/* Tags for structures being used by etd info iovar. + * Related structures are defined in wlioctl.h. + */ +#define ETD_TAG_JOIN_CLASSIFICATION_INFO 10 /* general information about join request */ +#define ETD_TAG_JOIN_TARGET_CLASSIFICATION_INFO 11 /* per target (AP) join information */ +#define ETD_TAG_ASSOC_STATE 12 /* current state of the Device association state machine */ +#define ETD_TAG_CHANNEL 13 /* current channel on which the association was performed */ +#define ETD_TAG_TOTAL_NUM_OF_JOIN_ATTEMPTS 14 /* number of join attempts (bss_retries) */ + +#define PSMDBG_REG_READ_CNT_FOR_PSMWDTRAP_V1 3 +#define PSMDBG_REG_READ_CNT_FOR_PSMWDTRAP_V2 6 + +#ifndef _LANGUAGE_ASSEMBLY + +#define HND_EXTENDED_TRAP_VERSION 1 +#define HND_EXTENDED_TRAP_BUFLEN 512 + +typedef struct hnd_ext_trap_hdr { + uint8 version; /* Extended trap version info */ + uint8 reserved; /* currently unused */ + uint16 len; /* Length of data excluding this header */ + uint8 data[]; /* TLV data */ +} hnd_ext_trap_hdr_t; + +typedef enum { + TAG_TRAP_NONE = 0, /* None trap type */ + TAG_TRAP_SIGNATURE = 1, /* Processor register dumps */ + TAG_TRAP_STACK = 2, /* Processor stack dump (possible code locations) */ + TAG_TRAP_MEMORY = 3, /* Memory subsystem dump */ + TAG_TRAP_DEEPSLEEP = 4, /* Deep sleep health check failures */ + TAG_TRAP_PSM_WD = 5, /* PSM watchdog information */ + TAG_TRAP_PHY = 6, /* Phy related issues */ + TAG_TRAP_BUS = 7, /* Bus level issues */ + TAG_TRAP_MAC_SUSP = 8, /* Mac level suspend issues */ + TAG_TRAP_BACKPLANE = 9, /* Backplane related errors */ + /* Values 10 through 14 are in use by etd_data info iovar */ + TAG_TRAP_PCIE_Q = 15, /* PCIE Queue state during memory trap */ + TAG_TRAP_WLC_STATE = 16, /* WLAN state during memory trap */ + TAG_TRAP_MAC_WAKE = 17, /* Mac level wake issues */ + TAG_TRAP_PHYTXERR_THRESH = 18, /* Phy Tx Err */ + TAG_TRAP_HC_DATA = 19, /* Data collected by HC module */ + TAG_TRAP_LOG_DATA = 20, + TAG_TRAP_CODE = 21, /* The trap type */ + TAG_TRAP_HMAP = 22, /* HMAP violation Address and Info */ + TAG_TRAP_LAST /* This must be the last entry */ +} hnd_ext_tag_trap_t; + +typedef struct hnd_ext_trap_bp_err +{ + uint32 error; + uint32 coreid; + uint32 baseaddr; + uint32 ioctrl; + uint32 iostatus; + uint32 resetctrl; + uint32 resetstatus; + uint32 resetreadid; + uint32 resetwriteid; + uint32 errlogctrl; + uint32 errlogdone; + uint32 errlogstatus; + uint32 errlogaddrlo; + uint32 errlogaddrhi; + uint32 errlogid; + uint32 errloguser; + uint32 errlogflags; + uint32 itipoobaout; + uint32 itipoobbout; + uint32 itipoobcout; + uint32 itipoobdout; +} hnd_ext_trap_bp_err_t; + +#define HND_EXT_TRAP_PSMWD_INFO_VER 1 +typedef struct hnd_ext_trap_psmwd_v1 { + uint16 xtag; + uint16 version; /* version of the information following this */ + uint32 i32_maccontrol; + uint32 i32_maccommand; + uint32 i32_macintstatus; + uint32 i32_phydebug; + uint32 i32_clk_ctl_st; + uint32 i32_psmdebug[PSMDBG_REG_READ_CNT_FOR_PSMWDTRAP_V1]; + uint16 i16_0x1a8; /* gated clock en */ + uint16 i16_0x406; /* Rcv Fifo Ctrl */ + uint16 i16_0x408; /* Rx ctrl 1 */ + uint16 i16_0x41a; /* Rxe Status 1 */ + uint16 i16_0x41c; /* Rxe Status 2 */ + uint16 i16_0x424; /* rcv wrd count 0 */ + uint16 i16_0x426; /* rcv wrd count 1 */ + uint16 i16_0x456; /* RCV_LFIFO_STS */ + uint16 i16_0x480; /* PSM_SLP_TMR */ + uint16 i16_0x490; /* PSM BRC */ + uint16 i16_0x500; /* TXE CTRL */ + uint16 i16_0x50e; /* TXE Status */ + uint16 i16_0x55e; /* TXE_xmtdmabusy */ + uint16 i16_0x566; /* TXE_XMTfifosuspflush */ + uint16 i16_0x690; /* IFS Stat */ + uint16 i16_0x692; /* IFS_MEDBUSY_CTR */ + uint16 i16_0x694; /* IFS_TX_DUR */ + uint16 i16_0x6a0; /* SLow_CTL */ + uint16 i16_0x838; /* TXE_AQM fifo Ready */ + uint16 i16_0x8c0; /* Dagg ctrl */ + uint16 shm_prewds_cnt; + uint16 shm_txtplufl_cnt; + uint16 shm_txphyerr_cnt; + uint16 pad; +} hnd_ext_trap_psmwd_v1_t; + +typedef struct hnd_ext_trap_psmwd { + uint16 xtag; + uint16 version; /* version of the information following this */ + uint32 i32_maccontrol; + uint32 i32_maccommand; + uint32 i32_macintstatus; + uint32 i32_phydebug; + uint32 i32_clk_ctl_st; + uint32 i32_psmdebug[PSMDBG_REG_READ_CNT_FOR_PSMWDTRAP_V2]; + uint16 i16_0x4b8; /* psm_brwk_0 */ + uint16 i16_0x4ba; /* psm_brwk_1 */ + uint16 i16_0x4bc; /* psm_brwk_2 */ + uint16 i16_0x4be; /* psm_brwk_2 */ + uint16 i16_0x1a8; /* gated clock en */ + uint16 i16_0x406; /* Rcv Fifo Ctrl */ + uint16 i16_0x408; /* Rx ctrl 1 */ + uint16 i16_0x41a; /* Rxe Status 1 */ + uint16 i16_0x41c; /* Rxe Status 2 */ + uint16 i16_0x424; /* rcv wrd count 0 */ + uint16 i16_0x426; /* rcv wrd count 1 */ + uint16 i16_0x456; /* RCV_LFIFO_STS */ + uint16 i16_0x480; /* PSM_SLP_TMR */ + uint16 i16_0x500; /* TXE CTRL */ + uint16 i16_0x50e; /* TXE Status */ + uint16 i16_0x55e; /* TXE_xmtdmabusy */ + uint16 i16_0x566; /* TXE_XMTfifosuspflush */ + uint16 i16_0x690; /* IFS Stat */ + uint16 i16_0x692; /* IFS_MEDBUSY_CTR */ + uint16 i16_0x694; /* IFS_TX_DUR */ + uint16 i16_0x6a0; /* SLow_CTL */ + uint16 i16_0x490; /* psm_brc */ + uint16 i16_0x4da; /* psm_brc_1 */ + uint16 i16_0x838; /* TXE_AQM fifo Ready */ + uint16 i16_0x8c0; /* Dagg ctrl */ + uint16 shm_prewds_cnt; + uint16 shm_txtplufl_cnt; + uint16 shm_txphyerr_cnt; +} hnd_ext_trap_psmwd_t; + +#define HEAP_HISTOGRAM_DUMP_LEN 6 +#define HEAP_MAX_SZ_BLKS_LEN 2 + +/* Ignore chunks for which there are fewer than this many instances, irrespective of size */ +#define HEAP_HISTOGRAM_INSTANCE_MIN 4 + +/* + * Use the last two length values for chunks larger than this, or when we run out of + * histogram entries (because we have too many different sized chunks) to store "other" + */ +#define HEAP_HISTOGRAM_SPECIAL 0xfffeu + +#define HEAP_HISTOGRAM_GRTR256K 0xffffu + +typedef struct hnd_ext_trap_heap_err { + uint32 arena_total; + uint32 heap_free; + uint32 heap_inuse; + uint32 mf_count; + uint32 stack_lwm; + uint16 heap_histogm[HEAP_HISTOGRAM_DUMP_LEN * 2]; /* size/number */ + uint16 max_sz_free_blk[HEAP_MAX_SZ_BLKS_LEN]; +} hnd_ext_trap_heap_err_t; + +#define MEM_TRAP_NUM_WLC_TX_QUEUES 6 +#define HND_EXT_TRAP_WLC_MEM_ERR_VER_V2 2 + +typedef struct hnd_ext_trap_wlc_mem_err { + uint8 instance; + uint8 associated; + uint8 soft_ap_client_cnt; + uint8 peer_cnt; + uint16 txqueue_len[MEM_TRAP_NUM_WLC_TX_QUEUES]; +} hnd_ext_trap_wlc_mem_err_t; + +typedef struct hnd_ext_trap_wlc_mem_err_v2 { + uint16 version; + uint16 pad; + uint8 instance; + uint8 stas_associated; + uint8 aps_associated; + uint8 soft_ap_client_cnt; + uint16 txqueue_len[MEM_TRAP_NUM_WLC_TX_QUEUES]; +} hnd_ext_trap_wlc_mem_err_v2_t; + +typedef struct hnd_ext_trap_pcie_mem_err { + uint16 d2h_queue_len; + uint16 d2h_req_queue_len; +} hnd_ext_trap_pcie_mem_err_t; + +#define HND_EXT_TRAP_MACSUSP_INFO_VER 1 +typedef struct hnd_ext_trap_macsusp { + uint16 xtag; + uint8 version; /* version of the information following this */ + uint8 trap_reason; + uint32 i32_maccontrol; + uint32 i32_maccommand; + uint32 i32_macintstatus; + uint32 i32_phydebug[4]; + uint32 i32_psmdebug[8]; + uint16 i16_0x41a; /* Rxe Status 1 */ + uint16 i16_0x41c; /* Rxe Status 2 */ + uint16 i16_0x490; /* PSM BRC */ + uint16 i16_0x50e; /* TXE Status */ + uint16 i16_0x55e; /* TXE_xmtdmabusy */ + uint16 i16_0x566; /* TXE_XMTfifosuspflush */ + uint16 i16_0x690; /* IFS Stat */ + uint16 i16_0x692; /* IFS_MEDBUSY_CTR */ + uint16 i16_0x694; /* IFS_TX_DUR */ + uint16 i16_0x7c0; /* WEP CTL */ + uint16 i16_0x838; /* TXE_AQM fifo Ready */ + uint16 i16_0x880; /* MHP_status */ + uint16 shm_prewds_cnt; + uint16 shm_ucode_dbgst; +} hnd_ext_trap_macsusp_t; + +#define HND_EXT_TRAP_MACENAB_INFO_VER 1 +typedef struct hnd_ext_trap_macenab { + uint16 xtag; + uint8 version; /* version of the information following this */ + uint8 trap_reason; + uint32 i32_maccontrol; + uint32 i32_maccommand; + uint32 i32_macintstatus; + uint32 i32_psmdebug[8]; + uint32 i32_clk_ctl_st; + uint32 i32_powerctl; + uint16 i16_0x1a8; /* gated clock en */ + uint16 i16_0x480; /* PSM_SLP_TMR */ + uint16 i16_0x490; /* PSM BRC */ + uint16 i16_0x600; /* TSF CTL */ + uint16 i16_0x690; /* IFS Stat */ + uint16 i16_0x692; /* IFS_MEDBUSY_CTR */ + uint16 i16_0x6a0; /* SLow_CTL */ + uint16 i16_0x6a6; /* SLow_FRAC */ + uint16 i16_0x6a8; /* fast power up delay */ + uint16 i16_0x6aa; /* SLow_PER */ + uint16 shm_ucode_dbgst; + uint16 PAD; +} hnd_ext_trap_macenab_t; + +typedef struct hnd_ext_trap_phydbg { + uint16 err; + uint16 RxFeStatus; + uint16 TxFIFOStatus0; + uint16 TxFIFOStatus1; + uint16 RfseqMode; + uint16 RfseqStatus0; + uint16 RfseqStatus1; + uint16 RfseqStatus_Ocl; + uint16 RfseqStatus_Ocl1; + uint16 OCLControl1; + uint16 TxError; + uint16 bphyTxError; + uint16 TxCCKError; + uint16 TxCtrlWrd0; + uint16 TxCtrlWrd1; + uint16 TxCtrlWrd2; + uint16 TxLsig0; + uint16 TxLsig1; + uint16 TxVhtSigA10; + uint16 TxVhtSigA11; + uint16 TxVhtSigA20; + uint16 TxVhtSigA21; + uint16 txPktLength; + uint16 txPsdulengthCtr; + uint16 gpioClkControl; + uint16 gpioSel; + uint16 pktprocdebug; + uint16 PAD; + uint32 gpioOut[3]; +} hnd_ext_trap_phydbg_t; + +/* unique IDs for separate cores in SI */ +#define REGDUMP_MASK_MAC0 BCM_BIT(1) +#define REGDUMP_MASK_ARM BCM_BIT(2) +#define REGDUMP_MASK_PCIE BCM_BIT(3) +#define REGDUMP_MASK_MAC1 BCM_BIT(4) +#define REGDUMP_MASK_PMU BCM_BIT(5) + +typedef struct { + uint16 reg_offset; + uint16 core_mask; +} reg_dump_config_t; + +#define HND_EXT_TRAP_PHY_INFO_VER 2 +typedef struct hnd_ext_trap_phydbg_v2 { + uint8 version; + uint8 len; + uint16 err; + uint16 RxFeStatus; + uint16 TxFIFOStatus0; + uint16 TxFIFOStatus1; + uint16 RfseqMode; + uint16 RfseqStatus0; + uint16 RfseqStatus1; + uint16 RfseqStatus_Ocl; + uint16 RfseqStatus_Ocl1; + uint16 OCLControl1; + uint16 TxError; + uint16 bphyTxError; + uint16 TxCCKError; + uint16 TxCtrlWrd0; + uint16 TxCtrlWrd1; + uint16 TxCtrlWrd2; + uint16 TxLsig0; + uint16 TxLsig1; + uint16 TxVhtSigA10; + uint16 TxVhtSigA11; + uint16 TxVhtSigA20; + uint16 TxVhtSigA21; + uint16 txPktLength; + uint16 txPsdulengthCtr; + uint16 gpioClkControl; + uint16 gpioSel; + uint16 pktprocdebug; + uint32 gpioOut[3]; + uint32 additional_regs[1]; +} hnd_ext_trap_phydbg_v2_t; + +/* Phy TxErr Dump Structure */ +#define HND_EXT_TRAP_PHYTXERR_INFO_VER 1 +#define HND_EXT_TRAP_PHYTXERR_INFO_VER_V2 2 +typedef struct hnd_ext_trap_macphytxerr { + uint8 version; /* version of the information following this */ + uint8 trap_reason; + uint16 i16_0x63E; /* tsf_tmr_rx_ts */ + uint16 i16_0x640; /* tsf_tmr_tx_ts */ + uint16 i16_0x642; /* tsf_tmr_rx_end_ts */ + uint16 i16_0x846; /* TDC_FrmLen0 */ + uint16 i16_0x848; /* TDC_FrmLen1 */ + uint16 i16_0x84a; /* TDC_Txtime */ + uint16 i16_0xa5a; /* TXE_BytCntInTxFrmLo */ + uint16 i16_0xa5c; /* TXE_BytCntInTxFrmHi */ + uint16 i16_0x856; /* TDC_VhtPsduLen0 */ + uint16 i16_0x858; /* TDC_VhtPsduLen1 */ + uint16 i16_0x490; /* psm_brc */ + uint16 i16_0x4d8; /* psm_brc_1 */ + uint16 shm_txerr_reason; + uint16 shm_pctl0; + uint16 shm_pctl1; + uint16 shm_pctl2; + uint16 shm_lsig0; + uint16 shm_lsig1; + uint16 shm_plcp0; + uint16 shm_plcp1; + uint16 shm_plcp2; + uint16 shm_vht_sigb0; + uint16 shm_vht_sigb1; + uint16 shm_tx_tst; + uint16 shm_txerr_tm; + uint16 shm_curchannel; + uint16 shm_crx_rxtsf_pos; + uint16 shm_lasttx_tsf; + uint16 shm_s_rxtsftmrval; + uint16 i16_0x29; /* Phy indirect address */ + uint16 i16_0x2a; /* Phy indirect address */ +} hnd_ext_trap_macphytxerr_t; + +typedef struct hnd_ext_trap_macphytxerr_v2 { + uint8 version; /* version of the information following this */ + uint8 trap_reason; + uint16 i16_0x63E; /* tsf_tmr_rx_ts */ + uint16 i16_0x640; /* tsf_tmr_tx_ts */ + uint16 i16_0x642; /* tsf_tmr_rx_end_ts */ + uint16 i16_0x846; /* TDC_FrmLen0 */ + uint16 i16_0x848; /* TDC_FrmLen1 */ + uint16 i16_0x84a; /* TDC_Txtime */ + uint16 i16_0xa5a; /* TXE_BytCntInTxFrmLo */ + uint16 i16_0xa5c; /* TXE_BytCntInTxFrmHi */ + uint16 i16_0x856; /* TDC_VhtPsduLen0 */ + uint16 i16_0x858; /* TDC_VhtPsduLen1 */ + uint16 i16_0x490; /* psm_brc */ + uint16 i16_0x4d8; /* psm_brc_1 */ + uint16 shm_txerr_reason; + uint16 shm_pctl0; + uint16 shm_pctl1; + uint16 shm_pctl2; + uint16 shm_lsig0; + uint16 shm_lsig1; + uint16 shm_plcp0; + uint16 shm_plcp1; + uint16 shm_plcp2; + uint16 shm_vht_sigb0; + uint16 shm_vht_sigb1; + uint16 shm_tx_tst; + uint16 shm_txerr_tm; + uint16 shm_curchannel; + uint16 shm_crx_rxtsf_pos; + uint16 shm_lasttx_tsf; + uint16 shm_s_rxtsftmrval; + uint16 i16_0x29; /* Phy indirect address */ + uint16 i16_0x2a; /* Phy indirect address */ + uint8 phyerr_bmac_cnt; /* number of times bmac raised phy tx err */ + uint8 phyerr_bmac_rsn; /* bmac reason for phy tx error */ + uint16 pad; + uint32 recv_fifo_status[3][2]; /* Rcv Status0 & Rcv Status1 for 3 Rx fifos */ +} hnd_ext_trap_macphytxerr_v2_t; + +#define MAX_EVENTLOG_BUFFERS 48 +typedef struct eventlog_trapdata_info { + uint32 num_elements; + uint32 seq_num; + uint32 log_arr_addr; +} eventlog_trapdata_info_t; + +typedef struct eventlog_trap_buf_info { + uint32 len; + uint32 buf_addr; +} eventlog_trap_buf_info_t; + +#if defined(ETD) && !defined(WLETD) +#define ETD_SW_FLAG_MEM 0x00000001 + +int etd_init(osl_t *osh); +int etd_register_trap_ext_callback(void *cb, void *arg); +int (etd_register_trap_ext_callback_late)(void *cb, void *arg); +uint32 *etd_get_trap_ext_data(void); +uint32 etd_get_trap_ext_swflags(void); +void etd_set_trap_ext_swflag(uint32 flag); +void etd_notify_trap_ext_callback(trap_t *tr); +reg_dump_config_t *etd_get_reg_dump_config_tbl(void); +uint etd_get_reg_dump_config_len(void); + +extern bool _etd_enab; + + #define ETD_ENAB(pub) (_etd_enab) + +#else +#define ETD_ENAB(pub) (0) +#endif /* WLETD */ + +#endif /* !LANGUAGE_ASSEMBLY */ + +#endif /* _ETD_H_ */ diff --git a/bcmdhd.100.10.315.x/include/ethernet.h b/bcmdhd.100.10.315.x/include/ethernet.h new file mode 100644 index 0000000..565fc70 --- /dev/null +++ b/bcmdhd.100.10.315.x/include/ethernet.h @@ -0,0 +1,224 @@ +/* + * From FreeBSD 2.2.7: Fundamental constants relating to ethernet. + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: ethernet.h 700076 2017-05-17 14:42:22Z $ + */ + +#ifndef _NET_ETHERNET_H_ /* use native BSD ethernet.h when available */ +#define _NET_ETHERNET_H_ + +#ifndef _TYPEDEFS_H_ +#include "typedefs.h" +#endif // endif + +/* This marks the start of a packed structure section. */ +#include + +/* + * The number of bytes in an ethernet (MAC) address. + */ +#define ETHER_ADDR_LEN 6 + +/* + * The number of bytes in the type field. + */ +#define ETHER_TYPE_LEN 2 + +/* + * The number of bytes in the trailing CRC field. + */ +#define ETHER_CRC_LEN 4 + +/* + * The length of the combined header. + */ +#define ETHER_HDR_LEN (ETHER_ADDR_LEN * 2 + ETHER_TYPE_LEN) + +/* + * The minimum packet length. + */ +#define ETHER_MIN_LEN 64 + +/* + * The minimum packet user data length. + */ +#define ETHER_MIN_DATA 46 + +/* + * The maximum packet length. + */ +#define ETHER_MAX_LEN 1518 + +/* + * The maximum packet user data length. + */ +#define ETHER_MAX_DATA 1500 + +/* ether types */ +#define ETHER_TYPE_MIN 0x0600 /* Anything less than MIN is a length */ +#define ETHER_TYPE_IP 0x0800 /* IP */ +#define ETHER_TYPE_ARP 0x0806 /* ARP */ +#define ETHER_TYPE_8021Q 0x8100 /* 802.1Q */ +#define ETHER_TYPE_IPV6 0x86dd /* IPv6 */ +#define ETHER_TYPE_BRCM 0x886c /* Broadcom Corp. */ +#define ETHER_TYPE_802_1X 0x888e /* 802.1x */ +#define ETHER_TYPE_802_1X_PREAUTH 0x88c7 /* 802.1x preauthentication */ +#define ETHER_TYPE_WAI 0x88b4 /* WAI */ +#define ETHER_TYPE_89_0D 0x890d /* 89-0d frame for TDLS */ +#define ETHER_TYPE_RRB ETHER_TYPE_89_0D /* RRB 802.11r 2008 */ + +#define ETHER_TYPE_PPP_SES 0x8864 /* PPPoE Session */ + +#define ETHER_TYPE_IAPP_L2_UPDATE 0x6 /* IAPP L2 update frame */ + +/* Broadcom subtype follows ethertype; First 2 bytes are reserved; Next 2 are subtype; */ +#define ETHER_BRCM_SUBTYPE_LEN 4 /* Broadcom 4 byte subtype */ + +/* ether header */ +#define ETHER_DEST_OFFSET (0 * ETHER_ADDR_LEN) /* dest address offset */ +#define ETHER_SRC_OFFSET (1 * ETHER_ADDR_LEN) /* src address offset */ +#define ETHER_TYPE_OFFSET (2 * ETHER_ADDR_LEN) /* ether type offset */ + +/* + * A macro to validate a length with + */ +#define ETHER_IS_VALID_LEN(foo) \ + ((foo) >= ETHER_MIN_LEN && (foo) <= ETHER_MAX_LEN) + +#define ETHER_FILL_MCAST_ADDR_FROM_IP(ea, mgrp_ip) { \ + ((uint8 *)ea)[0] = 0x01; \ + ((uint8 *)ea)[1] = 0x00; \ + ((uint8 *)ea)[2] = 0x5e; \ + ((uint8 *)ea)[3] = ((mgrp_ip) >> 16) & 0x7f; \ + ((uint8 *)ea)[4] = ((mgrp_ip) >> 8) & 0xff; \ + ((uint8 *)ea)[5] = ((mgrp_ip) >> 0) & 0xff; \ +} + +#ifndef __INCif_etherh /* Quick and ugly hack for VxWorks */ +/* + * Structure of a 10Mb/s Ethernet header. + */ +BWL_PRE_PACKED_STRUCT struct ether_header { + uint8 ether_dhost[ETHER_ADDR_LEN]; + uint8 ether_shost[ETHER_ADDR_LEN]; + uint16 ether_type; +} BWL_POST_PACKED_STRUCT; + +/* + * Structure of a 48-bit Ethernet address. + */ +BWL_PRE_PACKED_STRUCT struct ether_addr { + uint8 octet[ETHER_ADDR_LEN]; +} BWL_POST_PACKED_STRUCT; +#endif /* !__INCif_etherh Quick and ugly hack for VxWorks */ + +/* + * Takes a pointer, set, test, clear, toggle locally admininistered + * address bit in the 48-bit Ethernet address. + */ +#define ETHER_SET_LOCALADDR(ea) (((uint8 *)(ea))[0] = (((uint8 *)(ea))[0] | 2)) +#define ETHER_IS_LOCALADDR(ea) (((uint8 *)(ea))[0] & 2) +#define ETHER_CLR_LOCALADDR(ea) (((uint8 *)(ea))[0] = (((uint8 *)(ea))[0] & 0xfd)) +#define ETHER_TOGGLE_LOCALADDR(ea) (((uint8 *)(ea))[0] = (((uint8 *)(ea))[0] ^ 2)) + +/* Takes a pointer, marks unicast address bit in the MAC address */ +#define ETHER_SET_UNICAST(ea) (((uint8 *)(ea))[0] = (((uint8 *)(ea))[0] & ~1)) + +/* + * Takes a pointer, returns true if a 48-bit multicast address + * (including broadcast, since it is all ones) + */ +#define ETHER_ISMULTI(ea) (((const uint8 *)(ea))[0] & 1) + +/* compare two ethernet addresses - assumes the pointers can be referenced as shorts */ +#define eacmp(a, b) ((((const uint16 *)(a))[0] ^ ((const uint16 *)(b))[0]) | \ + (((const uint16 *)(a))[1] ^ ((const uint16 *)(b))[1]) | \ + (((const uint16 *)(a))[2] ^ ((const uint16 *)(b))[2])) + +#define ether_cmp(a, b) eacmp(a, b) + +/* copy an ethernet address - assumes the pointers can be referenced as shorts */ +#define eacopy(s, d) \ +do { \ + ((uint16 *)(d))[0] = ((const uint16 *)(s))[0]; \ + ((uint16 *)(d))[1] = ((const uint16 *)(s))[1]; \ + ((uint16 *)(d))[2] = ((const uint16 *)(s))[2]; \ +} while (0) + +#define ether_copy(s, d) eacopy(s, d) + +/* Copy an ethernet address in reverse order */ +#define ether_rcopy(s, d) \ +do { \ + ((uint16 *)(d))[2] = ((uint16 *)(s))[2]; \ + ((uint16 *)(d))[1] = ((uint16 *)(s))[1]; \ + ((uint16 *)(d))[0] = ((uint16 *)(s))[0]; \ +} while (0) + +/* Copy 14B ethernet header: 32bit aligned source and destination. */ +#define ehcopy32(s, d) \ +do { \ + ((uint32 *)(d))[0] = ((const uint32 *)(s))[0]; \ + ((uint32 *)(d))[1] = ((const uint32 *)(s))[1]; \ + ((uint32 *)(d))[2] = ((const uint32 *)(s))[2]; \ + ((uint16 *)(d))[6] = ((const uint16 *)(s))[6]; \ +} while (0) + +static const struct ether_addr ether_bcast = {{255, 255, 255, 255, 255, 255}}; +static const struct ether_addr ether_null = {{0, 0, 0, 0, 0, 0}}; +static const struct ether_addr ether_ipv6_mcast = {{0x33, 0x33, 0x00, 0x00, 0x00, 0x01}}; + +#define ETHER_ISBCAST(ea) ((((const uint8 *)(ea))[0] & \ + ((const uint8 *)(ea))[1] & \ + ((const uint8 *)(ea))[2] & \ + ((const uint8 *)(ea))[3] & \ + ((const uint8 *)(ea))[4] & \ + ((const uint8 *)(ea))[5]) == 0xff) +#define ETHER_ISNULLADDR(ea) ((((const uint8 *)(ea))[0] | \ + ((const uint8 *)(ea))[1] | \ + ((const uint8 *)(ea))[2] | \ + ((const uint8 *)(ea))[3] | \ + ((const uint8 *)(ea))[4] | \ + ((const uint8 *)(ea))[5]) == 0) + +#define ETHER_ISNULLDEST(da) ((((const uint16 *)(da))[0] | \ + ((const uint16 *)(da))[1] | \ + ((const uint16 *)(da))[2]) == 0) +#define ETHER_ISNULLSRC(sa) ETHER_ISNULLDEST(sa) + +#define ETHER_MOVE_HDR(d, s) \ +do { \ + struct ether_header t; \ + t = *(struct ether_header *)(s); \ + *(struct ether_header *)(d) = t; \ +} while (0) + +#define ETHER_ISUCAST(ea) ((((uint8 *)(ea))[0] & 0x01) == 0) + +/* This marks the end of a packed structure section. */ +#include + +#endif /* _NET_ETHERNET_H_ */ diff --git a/bcmdhd.100.10.315.x/include/event_log.h b/bcmdhd.100.10.315.x/include/event_log.h new file mode 100644 index 0000000..f051602 --- /dev/null +++ b/bcmdhd.100.10.315.x/include/event_log.h @@ -0,0 +1,422 @@ +/* + * EVENT_LOG system definitions + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: event_log.h 717896 2017-08-28 21:56:11Z $ + */ + +#ifndef _EVENT_LOG_H_ +#define _EVENT_LOG_H_ + +#include +#include +#include +#include +#include + +/* logstrs header */ +#define LOGSTRS_MAGIC 0x4C4F4753 +#define LOGSTRS_VERSION 0x1 + +/* We make sure that the block size will fit in a single packet + * (allowing for a bit of overhead on each packet + */ +#if defined(BCMPCIEDEV) +#define EVENT_LOG_MAX_BLOCK_SIZE 1648 +#else +#define EVENT_LOG_MAX_BLOCK_SIZE 1400 +#endif // endif +#define EVENT_LOG_WL_BLOCK_SIZE 0x200 +#define EVENT_LOG_PSM_BLOCK_SIZE 0x200 +#define EVENT_LOG_BUS_BLOCK_SIZE 0x200 +#define EVENT_LOG_ERROR_BLOCK_SIZE 0x200 +/* Maximum event log record payload size = 1016 bytes or 254 words. */ +#define EVENT_LOG_MAX_RECORD_PAYLOAD_SIZE 254 + +/* + * There are multiple levels of objects define here: + * event_log_set - a set of buffers + * event log groups - every event log call is part of just one. All + * event log calls in a group are handled the + * same way. Each event log group is associated + * with an event log set or is off. + */ + +#ifndef __ASSEMBLER__ + +/* On the external system where the dumper is we need to make sure + * that these types are the same size as they are on the ARM the + * produced them + */ +#ifdef EVENT_LOG_DUMPER +#define _EL_BLOCK_PTR uint32 +#define _EL_TYPE_PTR uint32 +#define _EL_SET_PTR uint32 +#define _EL_TOP_PTR uint32 +#else +#define _EL_BLOCK_PTR struct event_log_block * +#define _EL_TYPE_PTR uint32 * +#define _EL_SET_PTR struct event_log_set ** +#define _EL_TOP_PTR struct event_log_top * +#endif /* EVENT_LOG_DUMPER */ + +/* Event log sets (a logical circurlar buffer) consist of one or more + * event_log_blocks. The blocks themselves form a logical circular + * list. The log entries are placed in each event_log_block until it + * is full. Logging continues with the next event_log_block in the + * event_set until the last event_log_block is reached and then + * logging starts over with the first event_log_block in the + * event_set. + */ +typedef struct event_log_block { + _EL_BLOCK_PTR next_block; + _EL_BLOCK_PTR prev_block; + _EL_TYPE_PTR end_ptr; + + /* Start of packet sent for log tracing */ + uint16 pktlen; /* Size of rest of block */ + uint16 count; /* Logtrace counter */ + uint32 extra_hdr_info; /* LSB: 6 bits set id. MSB 24 bits reserved */ + uint32 event_logs; +} event_log_block_t; +#define EVENT_LOG_BLOCK_HDRLEN 8 /* pktlen 2 + count 2 + extra_hdr_info 4 */ + +#define EVENT_LOG_BLOCK_LEN 12 + +typedef enum { + SET_DESTINATION_INVALID = -1, + SET_DESTINATION_HOST = 0, + SET_DESTINATION_NONE = 1, + SET_DESTINATION_MAX +} event_log_set_destination_t; + +/* There can be multiple event_sets with each logging a set of + * associated events (i.e, "fast" and "slow" events). + */ +typedef struct event_log_set { + _EL_BLOCK_PTR first_block; /* Pointer to first event_log block */ + _EL_BLOCK_PTR last_block; /* Pointer to last event_log block */ + _EL_BLOCK_PTR logtrace_block; /* next block traced */ + _EL_BLOCK_PTR cur_block; /* Pointer to current event_log block */ + _EL_TYPE_PTR cur_ptr; /* Current event_log pointer */ + uint32 blockcount; /* Number of blocks */ + uint16 logtrace_count; /* Last count for logtrace */ + uint16 blockfill_count; /* Fill count for logtrace */ + uint32 timestamp; /* Last timestamp event */ + uint32 cyclecount; /* Cycles at last timestamp event */ + event_log_set_destination_t destination; + uint16 size; /* same size for all buffers in one set */ +} event_log_set_t; + +/* logstr_hdr_flags */ +#define LOGSTRS_ENCRYPTED 0x1 + +/* Top data structure for access to everything else */ +typedef struct event_log_top { + uint32 magic; +#define EVENT_LOG_TOP_MAGIC 0x474C8669 /* 'EVLG' */ + uint32 version; +#define EVENT_LOG_VERSION 1 + uint32 num_sets; + uint32 logstrs_size; /* Size of lognums + logstrs area */ + uint32 timestamp; /* Last timestamp event */ + uint32 cyclecount; /* Cycles at last timestamp event */ + _EL_SET_PTR sets; /* Ptr to array of set ptrs */ +} event_log_top_t; + +/* structure of the trailing 3 words in logstrs.bin */ +typedef struct { + uint32 fw_id; /* FWID will be written by tool later */ + uint32 flags; /* 0th bit indicates whether encrypted or not */ + /* Keep version and magic last since "header" is appended to the end of logstrs file. */ + uint32 version; /* Header version */ + uint32 log_magic; /* MAGIC number for verification 'LOGS' */ +} logstr_trailer_t; + +/* Data structure of Keeping the Header from logstrs.bin */ +typedef struct { + uint32 logstrs_size; /* Size of the file */ + uint32 rom_lognums_offset; /* Offset to the ROM lognum */ + uint32 ram_lognums_offset; /* Offset to the RAM lognum */ + uint32 rom_logstrs_offset; /* Offset to the ROM logstr */ + uint32 ram_logstrs_offset; /* Offset to the RAM logstr */ + logstr_trailer_t trailer; +} logstr_header_t; + +/* Ver 1 Header from logstrs.bin */ +typedef struct { + uint32 logstrs_size; /* Size of the file */ + uint32 rom_lognums_offset; /* Offset to the ROM lognum */ + uint32 ram_lognums_offset; /* Offset to the RAM lognum */ + uint32 rom_logstrs_offset; /* Offset to the ROM logstr */ + uint32 ram_logstrs_offset; /* Offset to the RAM logstr */ + /* Keep version and magic last since "header" is appended to the end of logstrs file. */ + uint32 version; /* Header version */ + uint32 log_magic; /* MAGIC number for verification 'LOGS' */ +} logstr_header_v1_t; + +/* + * Use the following macros for generating log events. + * + * The FAST versions check the enable of the tag before evaluating the arguments and calling the + * event_log function. This adds 5 instructions. The COMPACT versions evaluate the arguments + * and call the event_log function unconditionally. The event_log function will then skip logging + * if this tag is disabled. + * + * To support easy usage of existing debugging (e.g. msglevel) via macro re-definition there are + * two variants of these macros to help. + * + * First there are the CAST versions. The event_log function normally logs uint32 values or else + * they have to be cast to uint32. The CAST versions blindly cast for you so you don't have to edit + * any existing code. + * + * Second there are the PAREN_ARGS versions. These expect the logging format string and arguments + * to be enclosed in parentheses. This allows us to make the following mapping of an existing + * msglevel macro: + * #define WL_ERROR(args) EVENT_LOG_CAST_PAREN_ARGS(EVENT_LOG_TAG_WL_ERROR, args) + * + * The versions of the macros without FAST or COMPACT in their name are just synonyms for the + * COMPACT versions. + * + * You should use the COMPACT macro (or its synonym) in cases where there is some preceding logic + * that prevents the execution of the macro, e.g. WL_ERROR by definition rarely gets executed. + * Use the FAST macro in performance sensitive paths. The key concept here is that you should be + * assuming that your macro usage is compiled into ROM and can't be changed ... so choose wisely. + * + */ + +#if !defined(EVENT_LOG_DUMPER) + +#ifndef EVENT_LOG_COMPILE + +/* Null define if no tracing */ +#define EVENT_LOG(format, ...) +#define EVENT_LOG_FAST(tag, fmt, ...) +#define EVENT_LOG_COMPACT(tag, fmt, ...) + +#define EVENT_LOG_CAST(tag, fmt, ...) +#define EVENT_LOG_FAST_CAST(tag, fmt, ...) +#define EVENT_LOG_COMPACT_CAST(tag, fmt, ...) + +#define EVENT_LOG_CAST_PAREN_ARGS(tag, pargs) +#define EVENT_LOG_FAST_CAST_PAREN_ARGS(tag, pargs) +#define EVENT_LOG_COMPACT_CAST_PAREN_ARGS(tag, pargs) + +#define EVENT_LOG_IS_ON(tag) 0 +#define EVENT_LOG_IS_LOG_ON(tag) 0 + +#define EVENT_LOG_BUFFER(tag, buf, size) + +#else /* EVENT_LOG_COMPILE */ + +/* The first few are special because they can be done more efficiently + * this way and they are the common case. Once there are too many + * parameters the code size starts to be an issue and a loop is better + */ +#define _EVENT_LOG0(tag, fmt_num) \ + event_log0(tag, fmt_num) +#define _EVENT_LOG1(tag, fmt_num, t1) \ + event_log1(tag, fmt_num, t1) +#define _EVENT_LOG2(tag, fmt_num, t1, t2) \ + event_log2(tag, fmt_num, t1, t2) +#define _EVENT_LOG3(tag, fmt_num, t1, t2, t3) \ + event_log3(tag, fmt_num, t1, t2, t3) +#define _EVENT_LOG4(tag, fmt_num, t1, t2, t3, t4) \ + event_log4(tag, fmt_num, t1, t2, t3, t4) + +/* The rest call the generic routine that takes a count */ +#define _EVENT_LOG5(tag, fmt_num, ...) event_logn(5, tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOG6(tag, fmt_num, ...) event_logn(6, tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOG7(tag, fmt_num, ...) event_logn(7, tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOG8(tag, fmt_num, ...) event_logn(8, tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOG9(tag, fmt_num, ...) event_logn(9, tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOGA(tag, fmt_num, ...) event_logn(10, tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOGB(tag, fmt_num, ...) event_logn(11, tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOGC(tag, fmt_num, ...) event_logn(12, tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOGD(tag, fmt_num, ...) event_logn(13, tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOGE(tag, fmt_num, ...) event_logn(14, tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOGF(tag, fmt_num, ...) event_logn(15, tag, fmt_num, __VA_ARGS__) + +/* Casting low level macros */ +#define _EVENT_LOG_CAST0(tag, fmt_num) \ + event_log0(tag, fmt_num) +#define _EVENT_LOG_CAST1(tag, fmt_num, t1) \ + event_log1(tag, fmt_num, (uint32)(t1)) +#define _EVENT_LOG_CAST2(tag, fmt_num, t1, t2) \ + event_log2(tag, fmt_num, (uint32)(t1), (uint32)(t2)) +#define _EVENT_LOG_CAST3(tag, fmt_num, t1, t2, t3) \ + event_log3(tag, fmt_num, (uint32)(t1), (uint32)(t2), (uint32)(t3)) +#define _EVENT_LOG_CAST4(tag, fmt_num, t1, t2, t3, t4) \ + event_log4(tag, fmt_num, (uint32)(t1), (uint32)(t2), (uint32)(t3), (uint32)(t4)) + +/* The rest call the generic routine that takes a count */ +#define _EVENT_LOG_CAST5(tag, fmt_num, ...) _EVENT_LOG5(tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOG_CAST6(tag, fmt_num, ...) _EVENT_LOG6(tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOG_CAST7(tag, fmt_num, ...) _EVENT_LOG7(tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOG_CAST8(tag, fmt_num, ...) _EVENT_LOG8(tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOG_CAST9(tag, fmt_num, ...) _EVENT_LOG9(tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOG_CASTA(tag, fmt_num, ...) _EVENT_LOGA(tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOG_CASTB(tag, fmt_num, ...) _EVENT_LOGB(tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOG_CASTC(tag, fmt_num, ...) _EVENT_LOGC(tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOG_CASTD(tag, fmt_num, ...) _EVENT_LOGD(tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOG_CASTE(tag, fmt_num, ...) _EVENT_LOGE(tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOG_CASTF(tag, fmt_num, ...) _EVENT_LOGF(tag, fmt_num, __VA_ARGS__) + +/* Hack to make the proper routine call when variadic macros get + * passed. Note the max of 15 arguments. More than that can't be + * handled by the event_log entries anyways so best to catch it at compile + * time + */ + +#define _EVENT_LOG_VA_NUM_ARGS(F, _1, _2, _3, _4, _5, _6, _7, _8, _9, \ + _A, _B, _C, _D, _E, _F, N, ...) F ## N + +/* cast = _EVENT_LOG for no casting + * cast = _EVENT_LOG_CAST for casting of fmt arguments to uint32. + * Only first 4 arguments are casted to uint32. event_logn() is called + * if more than 4 arguments are present. This function internally assumes + * all arguments are uint32 + */ +#define _EVENT_LOG(cast, tag, fmt, ...) \ + static char logstr[] __attribute__ ((section(".logstrs"))) = fmt; \ + static uint32 fmtnum __attribute__ ((section(".lognums"))) = (uint32) &logstr; \ + _EVENT_LOG_VA_NUM_ARGS(cast, ##__VA_ARGS__, \ + F, E, D, C, B, A, 9, 8, \ + 7, 6, 5, 4, 3, 2, 1, 0) \ + (tag, (int) &fmtnum , ## __VA_ARGS__) + +#define EVENT_LOG_FAST(tag, fmt, ...) \ + do { \ + if (event_log_tag_sets != NULL) { \ + uint8 tag_flag = *(event_log_tag_sets + tag); \ + if ((tag_flag & ~EVENT_LOG_TAG_FLAG_SET_MASK) != 0) { \ + _EVENT_LOG(_EVENT_LOG, tag, fmt , ## __VA_ARGS__); \ + } \ + } \ + } while (0) + +#define EVENT_LOG_COMPACT(tag, fmt, ...) \ + do { \ + _EVENT_LOG(_EVENT_LOG, tag, fmt , ## __VA_ARGS__); \ + } while (0) + +/* Event log macro with casting to uint32 of arguments */ +#define EVENT_LOG_FAST_CAST(tag, fmt, ...) \ + do { \ + if (event_log_tag_sets != NULL) { \ + uint8 tag_flag = *(event_log_tag_sets + tag); \ + if ((tag_flag & ~EVENT_LOG_TAG_FLAG_SET_MASK) != 0) { \ + _EVENT_LOG(_EVENT_LOG_CAST, tag, fmt , ## __VA_ARGS__); \ + } \ + } \ + } while (0) + +#define EVENT_LOG_COMPACT_CAST(tag, fmt, ...) \ + do { \ + _EVENT_LOG(_EVENT_LOG_CAST, tag, fmt , ## __VA_ARGS__); \ + } while (0) + +#define EVENT_LOG(tag, fmt, ...) EVENT_LOG_COMPACT(tag, fmt , ## __VA_ARGS__) + +#define EVENT_LOG_CAST(tag, fmt, ...) EVENT_LOG_COMPACT_CAST(tag, fmt , ## __VA_ARGS__) + +#define _EVENT_LOG_REMOVE_PAREN(...) __VA_ARGS__ +#define EVENT_LOG_REMOVE_PAREN(args) _EVENT_LOG_REMOVE_PAREN args + +#define EVENT_LOG_CAST_PAREN_ARGS(tag, pargs) \ + EVENT_LOG_CAST(tag, EVENT_LOG_REMOVE_PAREN(pargs)) + +#define EVENT_LOG_FAST_CAST_PAREN_ARGS(tag, pargs) \ + EVENT_LOG_FAST_CAST(tag, EVENT_LOG_REMOVE_PAREN(pargs)) + +#define EVENT_LOG_COMPACT_CAST_PAREN_ARGS(tag, pargs) \ + EVENT_LOG_COMPACT_CAST(tag, EVENT_LOG_REMOVE_PAREN(pargs)) + +/* Minimal event logging. Event log internally calls event_logx() + * log return address in caller. + * Note that the if(0){..} below is to avoid compiler warnings + * due to unused variables caused by this macro + */ +#define EVENT_LOG_RA(tag, args) \ + do { \ + if (0) { \ + EVENT_LOG_COMPACT_CAST_PAREN_ARGS(tag, args); \ + } \ + event_log_caller_return_address(tag); \ + } while (0) + +#define EVENT_LOG_IS_ON(tag) (*(event_log_tag_sets + (tag)) & ~EVENT_LOG_TAG_FLAG_SET_MASK) +#define EVENT_LOG_IS_LOG_ON(tag) (*(event_log_tag_sets + (tag)) & EVENT_LOG_TAG_FLAG_LOG) + +#define EVENT_LOG_BUFFER(tag, buf, size) event_log_buffer(tag, buf, size) +#define EVENT_DUMP event_log_buffer + +extern uint8 *event_log_tag_sets; + +extern int event_log_init(osl_t *osh); +extern int event_log_set_init(osl_t *osh, int set_num, int size); +extern int event_log_set_expand(osl_t *osh, int set_num, int size); +extern int event_log_set_shrink(osl_t *osh, int set_num, int size); + +extern int event_log_tag_start(int tag, int set_num, int flags); +extern int event_log_tag_set_retrieve(int tag); +extern int event_log_tag_stop(int tag); + +typedef void (*event_log_logtrace_trigger_fn_t)(void *ctx); +void event_log_set_logtrace_trigger_fn(event_log_logtrace_trigger_fn_t fn, void *ctx); + +event_log_top_t *event_log_get_top(void); + +extern int event_log_get(int set_num, int buflen, void *buf); + +extern uint8 *event_log_next_logtrace(int set_num); + +extern void event_log0(int tag, int fmtNum); +extern void event_log1(int tag, int fmtNum, uint32 t1); +extern void event_log2(int tag, int fmtNum, uint32 t1, uint32 t2); +extern void event_log3(int tag, int fmtNum, uint32 t1, uint32 t2, uint32 t3); +extern void event_log4(int tag, int fmtNum, uint32 t1, uint32 t2, uint32 t3, uint32 t4); +extern void event_logn(int num_args, int tag, int fmtNum, ...); + +extern void event_log_time_sync(uint32 ms); +extern void event_log_buffer(int tag, uint8 *buf, int size); +extern void event_log_caller_return_address(int tag); +extern int event_log_set_destination_set(int set, event_log_set_destination_t dest); +extern event_log_set_destination_t event_log_set_destination_get(int set); +extern int event_log_flush_log_buffer(int set); +extern uint16 event_log_get_available_space(int set); +extern bool event_log_is_set_configured(int set_num); +extern bool event_log_is_tag_valid(int tag); +/* returns number of blocks available for writing */ +extern int event_log_free_blocks_get(int set); +extern bool event_log_is_ready(void); + +#endif /* EVENT_LOG_DUMPER */ + +#endif /* EVENT_LOG_COMPILE */ + +#endif /* __ASSEMBLER__ */ + +#endif /* _EVENT_LOG_H_ */ diff --git a/bcmdhd.100.10.315.x/include/event_log_payload.h b/bcmdhd.100.10.315.x/include/event_log_payload.h new file mode 100644 index 0000000..2644831 --- /dev/null +++ b/bcmdhd.100.10.315.x/include/event_log_payload.h @@ -0,0 +1,799 @@ +/* + * EVENT_LOG System Definitions + * + * This file describes the payloads of event log entries that are data buffers + * rather than formatted string entries. The contents are generally XTLVs. + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: event_log_payload.h 768232 2018-06-19 05:28:22Z $ + */ + +#ifndef _EVENT_LOG_PAYLOAD_H_ +#define _EVENT_LOG_PAYLOAD_H_ + +#include +#include +#include +#include + +#define EVENT_LOG_XTLV_ID_STR 0 /**< XTLV ID for a string */ +#define EVENT_LOG_XTLV_ID_TXQ_SUM 1 /**< XTLV ID for txq_summary_t */ +#define EVENT_LOG_XTLV_ID_SCBDATA_SUM 2 /**< XTLV ID for cb_subq_summary_t */ +#define EVENT_LOG_XTLV_ID_SCBDATA_AMPDU_TX_SUM 3 /**< XTLV ID for scb_ampdu_tx_summary_t */ +#define EVENT_LOG_XTLV_ID_BSSCFGDATA_SUM 4 /**< XTLV ID for bsscfg_q_summary_t */ +#define EVENT_LOG_XTLV_ID_UCTXSTATUS 5 /**< XTLV ID for ucode TxStatus array */ +#define EVENT_LOG_XTLV_ID_TXQ_SUM_V2 6 /**< XTLV ID for txq_summary_v2_t */ + +/** + * An XTLV holding a string + * String is not null terminated, length is the XTLV len. + */ +typedef struct xtlv_string { + uint16 id; /* XTLV ID: EVENT_LOG_XTLV_ID_STR */ + uint16 len; /* XTLV Len (String length) */ + char str[1]; /* var len array characters */ +} xtlv_string_t; + +#define XTLV_STRING_FULL_LEN(str_len) (BCM_XTLV_HDR_SIZE + (str_len) * sizeof(char)) + +/** + * Summary for a single TxQ context + * Two of these will be used per TxQ context---one for the high TxQ, and one for + * the low txq that contains DMA prepared pkts. The high TxQ is a full multi-precidence + * queue and also has a BSSCFG map to identify the BSSCFGS associated with the queue context. + * The low txq counterpart does not populate the BSSCFG map. + * The excursion queue will have no bsscfgs associated and is the first queue dumped. + */ +typedef struct txq_summary { + uint16 id; /* XTLV ID: EVENT_LOG_XTLV_ID_TXQ_SUM */ + uint16 len; /* XTLV Len */ + uint32 bsscfg_map; /* bitmap of bsscfg indexes associated with this queue */ + uint32 stopped; /* flow control bitmap */ + uint8 prec_count; /* count of precedences/fifos and len of following array */ + uint8 pad; + uint16 plen[1]; /* var len array of lengths of each prec/fifo in the queue */ +} txq_summary_t; + +#define TXQ_SUMMARY_LEN (OFFSETOF(txq_summary_t, plen)) +#define TXQ_SUMMARY_FULL_LEN(num_q) (TXQ_SUMMARY_LEN + (num_q) * sizeof(uint16)) + +typedef struct txq_summary_v2 { + uint16 id; /* XTLV ID: EVENT_LOG_XTLV_ID_TXQ_SUM_V2 */ + uint16 len; /* XTLV Len */ + uint32 bsscfg_map; /* bitmap of bsscfg indexes associated with this queue */ + uint32 stopped; /* flow control bitmap */ + uint32 hw_stopped; /* flow control bitmap */ + uint8 prec_count; /* count of precedences/fifos and len of following array */ + uint8 pad; + uint16 plen[1]; /* var len array of lengths of each prec/fifo in the queue */ +} txq_summary_v2_t; + +#define TXQ_SUMMARY_V2_LEN (OFFSETOF(txq_summary_v2_t, plen)) +#define TXQ_SUMMARY_V2_FULL_LEN(num_q) (TXQ_SUMMARY_V2_LEN + (num_q) * sizeof(uint16)) + +/** + * Summary for tx datapath of an SCB cubby + * This is a generic summary structure (one size fits all) with + * a cubby ID and sub-ID to differentiate SCB cubby types and possible sub-queues. + */ +typedef struct scb_subq_summary { + uint16 id; /* XTLV ID: EVENT_LOG_XTLV_ID_SCBDATA_SUM */ + uint16 len; /* XTLV Len */ + uint32 flags; /* cubby specficic flags */ + uint8 cubby_id; /* ID registered for cubby */ + uint8 sub_id; /* sub ID if a cubby has more than one queue */ + uint8 prec_count; /* count of precedences/fifos and len of following array */ + uint8 pad; + uint16 plen[1]; /* var len array of lengths of each prec/fifo in the queue */ +} scb_subq_summary_t; + +#define SCB_SUBQ_SUMMARY_LEN (OFFSETOF(scb_subq_summary_t, plen)) +#define SCB_SUBQ_SUMMARY_FULL_LEN(num_q) (SCB_SUBQ_SUMMARY_LEN + (num_q) * sizeof(uint16)) + +/* scb_subq_summary_t.flags for APPS */ +#define SCBDATA_APPS_F_PS 0x00000001 +#define SCBDATA_APPS_F_PSPEND 0x00000002 +#define SCBDATA_APPS_F_INPVB 0x00000004 +#define SCBDATA_APPS_F_APSD_USP 0x00000008 +#define SCBDATA_APPS_F_TXBLOCK 0x00000010 +#define SCBDATA_APPS_F_APSD_HPKT_TMR 0x00000020 +#define SCBDATA_APPS_F_APSD_TX_PEND 0x00000040 +#define SCBDATA_APPS_F_INTRANS 0x00000080 +#define SCBDATA_APPS_F_OFF_PEND 0x00000100 +#define SCBDATA_APPS_F_OFF_BLOCKED 0x00000200 +#define SCBDATA_APPS_F_OFF_IN_PROG 0x00000400 + +/** + * Summary for tx datapath AMPDU SCB cubby + * This is a specific data structure to describe the AMPDU datapath state for an SCB + * used instead of scb_subq_summary_t. + * Info is for one TID, so one will be dumped per BA TID active for an SCB. + */ +typedef struct scb_ampdu_tx_summary { + uint16 id; /* XTLV ID: EVENT_LOG_XTLV_ID_SCBDATA_AMPDU_TX_SUM */ + uint16 len; /* XTLV Len */ + uint32 flags; /* misc flags */ + uint8 tid; /* initiator TID (priority) */ + uint8 ba_state; /* internal BA state */ + uint8 bar_cnt; /* number of bars sent with no progress */ + uint8 retry_bar; /* reason code if bar to be retried at watchdog */ + uint16 barpending_seq; /* seqnum for bar */ + uint16 bar_ackpending_seq; /* seqnum of bar for which ack is pending */ + uint16 start_seq; /* seqnum of the first unacknowledged packet */ + uint16 max_seq; /* max unacknowledged seqnum sent */ + uint32 released_bytes_inflight; /* Number of bytes pending in bytes */ + uint32 released_bytes_target; +} scb_ampdu_tx_summary_t; + +/* scb_ampdu_tx_summary.flags defs */ +#define SCBDATA_AMPDU_TX_F_BAR_ACKPEND 0x00000001 /* bar_ackpending */ + +/** XTLV stuct to summarize a BSSCFG's packet queue */ +typedef struct bsscfg_q_summary { + uint16 id; /* XTLV ID: EVENT_LOG_XTLV_ID_BSSCFGDATA_SUM */ + uint16 len; /* XTLV Len */ + struct ether_addr BSSID; /* BSSID */ + uint8 bsscfg_idx; /* bsscfg index */ + uint8 type; /* bsscfg type enumeration: BSSCFG_TYPE_XXX */ + uint8 subtype; /* bsscfg subtype enumeration: BSSCFG_SUBTYPE_XXX */ + uint8 prec_count; /* count of precedences/fifos and len of following array */ + uint16 plen[1]; /* var len array of lengths of each prec/fifo in the queue */ +} bsscfg_q_summary_t; + +#define BSSCFG_Q_SUMMARY_LEN (OFFSETOF(bsscfg_q_summary_t, plen)) +#define BSSCFG_Q_SUMMARY_FULL_LEN(num_q) (BSSCFG_Q_SUMMARY_LEN + (num_q) * sizeof(uint16)) + +/** + * An XTLV holding a TxStats array + * TxStatus entries are 8 or 16 bytes, size in words (2 or 4) givent in + * entry_size field. + * Array is uint32 words + */ +typedef struct xtlv_uc_txs { + uint16 id; /* XTLV ID: EVENT_LOG_XTLV_ID_UCTXSTATUS */ + uint16 len; /* XTLV Len */ + uint8 entry_size; /* num uint32 words per entry */ + uint8 pad[3]; /* reserved, zero */ + uint32 w[1]; /* var len array of words */ +} xtlv_uc_txs_t; + +#define XTLV_UCTXSTATUS_LEN (OFFSETOF(xtlv_uc_txs_t, w)) +#define XTLV_UCTXSTATUS_FULL_LEN(words) (XTLV_UCTXSTATUS_LEN + (words) * sizeof(uint32)) + +#define SCAN_SUMMARY_VERSION 1 +/* Scan flags */ +#define SCAN_SUM_CHAN_INFO 0x1 +/* Scan_sum flags */ +#define BAND5G_SIB_ENAB 0x2 +#define BAND2G_SIB_ENAB 0x4 +#define PARALLEL_SCAN 0x8 +#define SCAN_ABORT 0x10 + +/* scan_channel_info flags */ +#define ACTIVE_SCAN_SCN_SUM 0x2 +#define SCAN_SUM_WLC_CORE0 0x4 +#define SCAN_SUM_WLC_CORE1 0x8 +#define HOME_CHAN 0x10 + +typedef struct wl_scan_ssid_info +{ + uint8 ssid_len; /* the length of SSID */ + uint8 ssid[32]; /* SSID string */ +} wl_scan_ssid_info_t; + +typedef struct wl_scan_channel_info { + uint16 chanspec; /* chanspec scanned */ + uint16 reserv; + uint32 start_time; /* Scan start time in + * milliseconds for the chanspec + * or home_dwell time start + */ + uint32 end_time; /* Scan end time in + * milliseconds for the chanspec + * or home_dwell time end + */ + uint16 probe_count; /* No of probes sent out. For future use + */ + uint16 scn_res_count; /* Count of scan_results found per + * channel. For future use + */ +} wl_scan_channel_info_t; + +typedef struct wl_scan_summary_info { + uint32 total_chan_num; /* Total number of channels scanned */ + uint32 scan_start_time; /* Scan start time in milliseconds */ + uint32 scan_end_time; /* Scan end time in milliseconds */ + wl_scan_ssid_info_t ssid[1]; /* SSID being scanned in current + * channel. For future use + */ +} wl_scan_summary_info_t; + +struct wl_scan_summary { + uint8 version; /* Version */ + uint8 reserved; + uint16 len; /* Length of the data buffer including SSID + * list. + */ + uint16 sync_id; /* Scan Sync ID */ + uint16 scan_flags; /* flags [0] or SCAN_SUM_CHAN_INFO = */ + /* channel_info, if not set */ + /* it is scan_summary_info */ + /* when channel_info is used, */ + /* the following flag bits are overridden: */ + /* flags[1] or ACTIVE_SCAN_SCN_SUM = active channel if set */ + /* passive if not set */ + /* flags[2] or WLC_CORE0 = if set, represents wlc_core0 */ + /* flags[3] or WLC_CORE1 = if set, represents wlc_core1 */ + /* flags[4] or HOME_CHAN = if set, represents home-channel */ + /* flags[5:15] = reserved */ + /* when scan_summary_info is used, */ + /* the following flag bits are used: */ + /* flags[1] or BAND5G_SIB_ENAB = */ + /* allowSIBParallelPassiveScan on 5G band */ + /* flags[2] or BAND2G_SIB_ENAB = */ + /* allowSIBParallelPassiveScan on 2G band */ + /* flags[3] or PARALLEL_SCAN = Parallel scan enabled or not */ + /* flags[4] or SCAN_ABORT = SCAN_ABORTED scenario */ + /* flags[5:15] = reserved */ + union { + wl_scan_channel_info_t scan_chan_info; /* scan related information + * for each channel scanned + */ + wl_scan_summary_info_t scan_sum_info; /* Cumulative scan related + * information. + */ + } u; +}; + +/* Channel switch log record structure + * Host may map the following structure on channel switch event log record + * received from dongle. Note that all payload entries in event log record are + * uint32/int32. + */ +typedef struct wl_chansw_event_log_record { + uint32 time; /* Time in us */ + uint32 old_chanspec; /* Old channel spec */ + uint32 new_chanspec; /* New channel spec */ + uint32 chansw_reason; /* Reason for channel change */ + int32 dwell_time; +} wl_chansw_event_log_record_t; + +typedef struct wl_chansw_event_log_record_v2 { + uint32 time; /* Time in us */ + uint32 old_chanspec; /* Old channel spec */ + uint32 new_chanspec; /* New channel spec */ + uint32 chansw_reason; /* Reason for channel change */ + int32 dwell_time; + uint32 core; + int32 phychanswtime; /* channel switch time */ +} wl_chansw_event_log_record_v2_t; + +/* Sub-block type for EVENT_LOG_TAG_AMPDU_DUMP */ +typedef enum { + WL_AMPDU_STATS_TYPE_RXMCSx1 = 0, /* RX MCS rate (Nss = 1) */ + WL_AMPDU_STATS_TYPE_RXMCSx2 = 1, + WL_AMPDU_STATS_TYPE_RXMCSx3 = 2, + WL_AMPDU_STATS_TYPE_RXMCSx4 = 3, + WL_AMPDU_STATS_TYPE_RXVHTx1 = 4, /* RX VHT rate (Nss = 1) */ + WL_AMPDU_STATS_TYPE_RXVHTx2 = 5, + WL_AMPDU_STATS_TYPE_RXVHTx3 = 6, + WL_AMPDU_STATS_TYPE_RXVHTx4 = 7, + WL_AMPDU_STATS_TYPE_TXMCSx1 = 8, /* TX MCS rate (Nss = 1) */ + WL_AMPDU_STATS_TYPE_TXMCSx2 = 9, + WL_AMPDU_STATS_TYPE_TXMCSx3 = 10, + WL_AMPDU_STATS_TYPE_TXMCSx4 = 11, + WL_AMPDU_STATS_TYPE_TXVHTx1 = 12, /* TX VHT rate (Nss = 1) */ + WL_AMPDU_STATS_TYPE_TXVHTx2 = 13, + WL_AMPDU_STATS_TYPE_TXVHTx3 = 14, + WL_AMPDU_STATS_TYPE_TXVHTx4 = 15, + WL_AMPDU_STATS_TYPE_RXMCSSGI = 16, /* RX SGI usage (for all MCS rates) */ + WL_AMPDU_STATS_TYPE_TXMCSSGI = 17, /* TX SGI usage (for all MCS rates) */ + WL_AMPDU_STATS_TYPE_RXVHTSGI = 18, /* RX SGI usage (for all VHT rates) */ + WL_AMPDU_STATS_TYPE_TXVHTSGI = 19, /* TX SGI usage (for all VHT rates) */ + WL_AMPDU_STATS_TYPE_RXMCSPER = 20, /* RX PER (for all MCS rates) */ + WL_AMPDU_STATS_TYPE_TXMCSPER = 21, /* TX PER (for all MCS rates) */ + WL_AMPDU_STATS_TYPE_RXVHTPER = 22, /* RX PER (for all VHT rates) */ + WL_AMPDU_STATS_TYPE_TXVHTPER = 23, /* TX PER (for all VHT rates) */ + WL_AMPDU_STATS_TYPE_RXDENS = 24, /* RX AMPDU density */ + WL_AMPDU_STATS_TYPE_TXDENS = 25, /* TX AMPDU density */ + WL_AMPDU_STATS_TYPE_RXMCSOK = 26, /* RX all MCS rates */ + WL_AMPDU_STATS_TYPE_RXVHTOK = 27, /* RX all VHT rates */ + WL_AMPDU_STATS_TYPE_TXMCSALL = 28, /* TX all MCS rates */ + WL_AMPDU_STATS_TYPE_TXVHTALL = 29, /* TX all VHT rates */ + WL_AMPDU_STATS_TYPE_TXMCSOK = 30, /* TX all MCS rates */ + WL_AMPDU_STATS_TYPE_TXVHTOK = 31, /* TX all VHT rates */ + WL_AMPDU_STATS_MAX_CNTS = 64 +} wl_ampdu_stat_enum_t; +typedef struct { + uint16 type; /* AMPDU statistics sub-type */ + uint16 len; /* Number of 32-bit counters */ + uint32 counters[WL_AMPDU_STATS_MAX_CNTS]; +} wl_ampdu_stats_generic_t; + +typedef wl_ampdu_stats_generic_t wl_ampdu_stats_rx_t; +typedef wl_ampdu_stats_generic_t wl_ampdu_stats_tx_t; + +typedef struct { + uint16 type; /* AMPDU statistics sub-type */ + uint16 len; /* Number of 32-bit counters + 2 */ + uint32 total_ampdu; + uint32 total_mpdu; + uint32 aggr_dist[WL_AMPDU_STATS_MAX_CNTS + 1]; +} wl_ampdu_stats_aggrsz_t; + +/* Sub-block type for EVENT_LOG_TAG_MSCHPROFILE */ +#define WL_MSCH_PROFILER_START 0 /* start event check */ +#define WL_MSCH_PROFILER_EXIT 1 /* exit event check */ +#define WL_MSCH_PROFILER_REQ 2 /* request event */ +#define WL_MSCH_PROFILER_CALLBACK 3 /* call back event */ +#define WL_MSCH_PROFILER_MESSAGE 4 /* message event */ +#define WL_MSCH_PROFILER_PROFILE_START 5 +#define WL_MSCH_PROFILER_PROFILE_END 6 +#define WL_MSCH_PROFILER_REQ_HANDLE 7 +#define WL_MSCH_PROFILER_REQ_ENTITY 8 +#define WL_MSCH_PROFILER_CHAN_CTXT 9 +#define WL_MSCH_PROFILER_EVENT_LOG 10 +#define WL_MSCH_PROFILER_REQ_TIMING 11 +#define WL_MSCH_PROFILER_TYPE_MASK 0x00ff +#define WL_MSCH_PROFILER_WLINDEX_SHIFT 8 +#define WL_MSCH_PROFILER_WLINDEX_MASK 0x0f00 +#define WL_MSCH_PROFILER_VER_SHIFT 12 +#define WL_MSCH_PROFILER_VER_MASK 0xf000 + +/* MSCH Event data current verion */ +#define WL_MSCH_PROFILER_VER 2 + +/* msch version history */ +#define WL_MSCH_PROFILER_RSDB_VER 1 +#define WL_MSCH_PROFILER_REPORT_VER 2 + +/* msch collect header size */ +#define WL_MSCH_PROFILE_HEAD_SIZE OFFSETOF(msch_collect_tlv_t, value) + +/* msch event log header size */ +#define WL_MSCH_EVENT_LOG_HEAD_SIZE OFFSETOF(msch_event_log_profiler_event_data_t, data) + +/* MSCH data buffer size */ +#define WL_MSCH_PROFILER_BUFFER_SIZE 512 + +/* request type used in wlc_msch_req_param_t struct */ +#define WL_MSCH_RT_BOTH_FIXED 0 /* both start and end time is fixed */ +#define WL_MSCH_RT_START_FLEX 1 /* start time is flexible and duration is fixed */ +#define WL_MSCH_RT_DUR_FLEX 2 /* start time is fixed and end time is flexible */ +#define WL_MSCH_RT_BOTH_FLEX 3 /* Both start and duration is flexible */ + +/* Flags used in wlc_msch_req_param_t struct */ +#define WL_MSCH_REQ_FLAGS_CHAN_CONTIGUOUS (1 << 0) /* Don't break up channels in chanspec_list */ +#define WL_MSCH_REQ_FLAGS_MERGE_CONT_SLOTS (1 << 1) /* No slot end if slots are continous */ +#define WL_MSCH_REQ_FLAGS_PREMTABLE (1 << 2) /* Req can be pre-empted by PREMT_CURTS req */ +#define WL_MSCH_REQ_FLAGS_PREMT_CURTS (1 << 3) /* Pre-empt request at the end of curts */ +#define WL_MSCH_REQ_FLAGS_PREMT_IMMEDIATE (1 << 4) /* Pre-empt cur_ts immediately */ + +/* Requested slot Callback states + * req->pend_slot/cur_slot->flags + */ +#define WL_MSCH_RC_FLAGS_ONCHAN_FIRE (1 << 0) +#define WL_MSCH_RC_FLAGS_START_FIRE_DONE (1 << 1) +#define WL_MSCH_RC_FLAGS_END_FIRE_DONE (1 << 2) +#define WL_MSCH_RC_FLAGS_ONFIRE_DONE (1 << 3) +#define WL_MSCH_RC_FLAGS_SPLIT_SLOT_START (1 << 4) +#define WL_MSCH_RC_FLAGS_SPLIT_SLOT_END (1 << 5) +#define WL_MSCH_RC_FLAGS_PRE_ONFIRE_DONE (1 << 6) + +/* Request entity flags */ +#define WL_MSCH_ENTITY_FLAG_MULTI_INSTANCE (1 << 0) + +/* Request Handle flags */ +#define WL_MSCH_REQ_HDL_FLAGS_NEW_REQ (1 << 0) /* req_start callback */ + +/* MSCH state flags (msch_info->flags) */ +#define WL_MSCH_STATE_IN_TIEMR_CTXT 0x1 +#define WL_MSCH_STATE_SCHD_PENDING 0x2 + +/* MSCH callback type */ +#define WL_MSCH_CT_REQ_START 0x1 +#define WL_MSCH_CT_ON_CHAN 0x2 +#define WL_MSCH_CT_SLOT_START 0x4 +#define WL_MSCH_CT_SLOT_END 0x8 +#define WL_MSCH_CT_SLOT_SKIP 0x10 +#define WL_MSCH_CT_OFF_CHAN 0x20 +#define WL_MSCH_CT_OFF_CHAN_DONE 0x40 +#define WL_MSCH_CT_REQ_END 0x80 +#define WL_MSCH_CT_PARTIAL 0x100 +#define WL_MSCH_CT_PRE_ONCHAN 0x200 +#define WL_MSCH_CT_PRE_REQ_START 0x400 + +/* MSCH command bits */ +#define WL_MSCH_CMD_ENABLE_BIT 0x01 +#define WL_MSCH_CMD_PROFILE_BIT 0x02 +#define WL_MSCH_CMD_CALLBACK_BIT 0x04 +#define WL_MSCH_CMD_REGISTER_BIT 0x08 +#define WL_MSCH_CMD_ERROR_BIT 0x10 +#define WL_MSCH_CMD_DEBUG_BIT 0x20 +#define WL_MSCH_CMD_INFOM_BIT 0x40 +#define WL_MSCH_CMD_TRACE_BIT 0x80 +#define WL_MSCH_CMD_ALL_BITS 0xfe +#define WL_MSCH_CMD_SIZE_MASK 0x00ff0000 +#define WL_MSCH_CMD_SIZE_SHIFT 16 +#define WL_MSCH_CMD_VER_MASK 0xff000000 +#define WL_MSCH_CMD_VER_SHIFT 24 + +/* maximum channels returned by the get valid channels iovar */ +#define WL_MSCH_NUMCHANNELS 64 + +typedef struct msch_collect_tlv { + uint16 type; + uint16 size; + char value[1]; +} msch_collect_tlv_t; + +typedef struct msch_profiler_event_data { + uint32 time_lo; /* Request time */ + uint32 time_hi; +} msch_profiler_event_data_t; + +typedef struct msch_start_profiler_event_data { + uint32 time_lo; /* Request time */ + uint32 time_hi; + uint32 status; +} msch_start_profiler_event_data_t; + +typedef struct msch_message_profiler_event_data { + uint32 time_lo; /* Request time */ + uint32 time_hi; + char message[1]; /* message */ +} msch_message_profiler_event_data_t; + +typedef struct msch_event_log_profiler_event_data { + uint32 time_lo; /* Request time */ + uint32 time_hi; + event_log_hdr_t hdr; /* event log header */ + uint32 data[9]; /* event data */ +} msch_event_log_profiler_event_data_t; + +typedef struct msch_req_param_profiler_event_data { + uint16 flags; /* Describe various request properties */ + uint8 req_type; /* Describe start and end time flexiblilty */ + uint8 priority; /* Define the request priority */ + uint32 start_time_l; /* Requested start time offset in us unit */ + uint32 start_time_h; + uint32 duration; /* Requested duration in us unit */ + uint32 interval; /* Requested periodic interval in us unit, + * 0 means non-periodic + */ + union { + uint32 dur_flex; /* MSCH_REG_DUR_FLEX, min_dur = duration - dur_flex */ + struct { + uint32 min_dur; /* min duration for traffic, maps to home_time */ + uint32 max_away_dur; /* max acceptable away dur, maps to home_away_time */ + uint32 hi_prio_time_l; + uint32 hi_prio_time_h; + uint32 hi_prio_interval; /* repeated high priority interval */ + } bf; + } flex; +} msch_req_param_profiler_event_data_t; + +typedef struct msch_req_timing_profiler_event_data { + uint32 p_req_timing; + uint32 p_prev; + uint32 p_next; + uint16 flags; + uint16 timeslot_ptr; + uint32 fire_time_l; + uint32 fire_time_h; + uint32 pre_start_time_l; + uint32 pre_start_time_h; + uint32 start_time_l; + uint32 start_time_h; + uint32 end_time_l; + uint32 end_time_h; + uint32 p_timeslot; +} msch_req_timing_profiler_event_data_t; + +typedef struct msch_chan_ctxt_profiler_event_data { + uint32 p_chan_ctxt; + uint32 p_prev; + uint32 p_next; + uint16 chanspec; + uint16 bf_sch_pending; + uint32 bf_link_prev; + uint32 bf_link_next; + uint32 onchan_time_l; + uint32 onchan_time_h; + uint32 actual_onchan_dur_l; + uint32 actual_onchan_dur_h; + uint32 pend_onchan_dur_l; + uint32 pend_onchan_dur_h; + uint16 req_entity_list_cnt; + uint16 req_entity_list_ptr; + uint16 bf_entity_list_cnt; + uint16 bf_entity_list_ptr; + uint32 bf_skipped_count; +} msch_chan_ctxt_profiler_event_data_t; + +typedef struct msch_req_entity_profiler_event_data { + uint32 p_req_entity; + uint32 req_hdl_link_prev; + uint32 req_hdl_link_next; + uint32 chan_ctxt_link_prev; + uint32 chan_ctxt_link_next; + uint32 rt_specific_link_prev; + uint32 rt_specific_link_next; + uint32 start_fixed_link_prev; + uint32 start_fixed_link_next; + uint32 both_flex_list_prev; + uint32 both_flex_list_next; + uint16 chanspec; + uint16 priority; + uint16 cur_slot_ptr; + uint16 pend_slot_ptr; + uint16 pad; + uint16 chan_ctxt_ptr; + uint32 p_chan_ctxt; + uint32 p_req_hdl; + uint32 bf_last_serv_time_l; + uint32 bf_last_serv_time_h; + uint16 onchan_chn_idx; + uint16 cur_chn_idx; + uint32 flags; + uint32 actual_start_time_l; + uint32 actual_start_time_h; + uint32 curts_fire_time_l; + uint32 curts_fire_time_h; +} msch_req_entity_profiler_event_data_t; + +typedef struct msch_req_handle_profiler_event_data { + uint32 p_req_handle; + uint32 p_prev; + uint32 p_next; + uint32 cb_func; + uint32 cb_ctxt; + uint16 req_param_ptr; + uint16 req_entity_list_cnt; + uint16 req_entity_list_ptr; + uint16 chan_cnt; + uint32 flags; + uint16 chanspec_list; + uint16 chanspec_cnt; + uint16 chan_idx; + uint16 last_chan_idx; + uint32 req_time_l; + uint32 req_time_h; +} msch_req_handle_profiler_event_data_t; + +typedef struct msch_profiler_profiler_event_data { + uint32 time_lo; /* Request time */ + uint32 time_hi; + uint32 free_req_hdl_list; + uint32 free_req_entity_list; + uint32 free_chan_ctxt_list; + uint32 free_chanspec_list; + uint16 cur_msch_timeslot_ptr; + uint16 next_timeslot_ptr; + uint32 p_cur_msch_timeslot; + uint32 p_next_timeslot; + uint32 cur_armed_timeslot; + uint32 flags; + uint32 ts_id; + uint32 service_interval; + uint32 max_lo_prio_interval; + uint16 flex_list_cnt; + uint16 msch_chanspec_alloc_cnt; + uint16 msch_req_entity_alloc_cnt; + uint16 msch_req_hdl_alloc_cnt; + uint16 msch_chan_ctxt_alloc_cnt; + uint16 msch_timeslot_alloc_cnt; + uint16 msch_req_hdl_list_cnt; + uint16 msch_req_hdl_list_ptr; + uint16 msch_chan_ctxt_list_cnt; + uint16 msch_chan_ctxt_list_ptr; + uint16 msch_req_timing_list_cnt; + uint16 msch_req_timing_list_ptr; + uint16 msch_start_fixed_list_cnt; + uint16 msch_start_fixed_list_ptr; + uint16 msch_both_flex_req_entity_list_cnt; + uint16 msch_both_flex_req_entity_list_ptr; + uint16 msch_start_flex_list_cnt; + uint16 msch_start_flex_list_ptr; + uint16 msch_both_flex_list_cnt; + uint16 msch_both_flex_list_ptr; + uint32 slotskip_flag; +} msch_profiler_profiler_event_data_t; + +typedef struct msch_req_profiler_event_data { + uint32 time_lo; /* Request time */ + uint32 time_hi; + uint16 chanspec_cnt; + uint16 chanspec_ptr; + uint16 req_param_ptr; + uint16 pad; +} msch_req_profiler_event_data_t; + +typedef struct msch_callback_profiler_event_data { + uint32 time_lo; /* Request time */ + uint32 time_hi; + uint16 type; /* callback type */ + uint16 chanspec; /* actual chanspec, may different with requested one */ + uint32 start_time_l; /* time slot start time low 32bit */ + uint32 start_time_h; /* time slot start time high 32bit */ + uint32 end_time_l; /* time slot end time low 32 bit */ + uint32 end_time_h; /* time slot end time high 32 bit */ + uint32 timeslot_id; /* unique time slot id */ + uint32 p_req_hdl; + uint32 onchan_idx; /* Current channel index */ + uint32 cur_chan_seq_start_time_l; /* start time of current sequence */ + uint32 cur_chan_seq_start_time_h; +} msch_callback_profiler_event_data_t; + +typedef struct msch_timeslot_profiler_event_data { + uint32 p_timeslot; + uint32 timeslot_id; + uint32 pre_start_time_l; + uint32 pre_start_time_h; + uint32 end_time_l; + uint32 end_time_h; + uint32 sch_dur_l; + uint32 sch_dur_h; + uint32 p_chan_ctxt; + uint32 fire_time_l; + uint32 fire_time_h; + uint32 state; +} msch_timeslot_profiler_event_data_t; + +typedef struct msch_register_params { + uint16 wlc_index; /* Optional wlc index */ + uint16 flags; /* Describe various request properties */ + uint32 req_type; /* Describe start and end time flexiblilty */ + uint16 id; /* register id */ + uint16 priority; /* Define the request priority */ + uint32 start_time; /* Requested start time offset in ms unit */ + uint32 duration; /* Requested duration in ms unit */ + uint32 interval; /* Requested periodic interval in ms unit, + * 0 means non-periodic + */ + uint32 dur_flex; /* MSCH_REG_DUR_FLEX, min_dur = duration - dur_flex */ + uint32 min_dur; /* min duration for traffic, maps to home_time */ + uint32 max_away_dur; /* max acceptable away dur, maps to home_away_time */ + uint32 hi_prio_time; + uint32 hi_prio_interval; /* repeated high priority interval */ + uint32 chanspec_cnt; + uint16 chanspec_list[WL_MSCH_NUMCHANNELS]; +} msch_register_params_t; + +typedef struct { + uint32 txallfrm; /**< total number of frames sent, incl. Data, ACK, RTS, CTS, + * Control Management (includes retransmissions) + */ + uint32 rxrsptmout; /**< number of response timeouts for transmitted frames + * expecting a response + */ + uint32 rxstrt; /**< number of received frames with a good PLCP */ + uint32 rxbadplcp; /**< number of parity check of the PLCP header failed */ + uint32 rxcrsglitch; /**< PHY was able to correlate the preamble but not the header */ + uint32 rxnodelim; /**< number of no valid delimiter detected by ampdu parser */ + uint32 bphy_badplcp; /**< number of bad PLCP reception on BPHY rate */ + uint32 bphy_rxcrsglitch; /**< PHY count of bphy glitches */ + uint32 rxbadfcs; /**< number of frames for which the CRC check failed in the MAC */ + uint32 rxanyerr; /**< Any RX error that is not counted by other counters. */ + uint32 rxbeaconmbss; /**< beacons received from member of BSS */ + uint32 rxdtucastmbss; /**< number of received DATA frames with good FCS and matching RA */ + uint32 rxdtocast; /**< number of received DATA frames (good FCS and no matching RA) */ + uint32 rxtoolate; /**< receive too late */ + uint32 goodfcs; /**< Good fcs counters */ + uint32 rxf0ovfl; /** < Rx FIFO0 overflow counters information */ + uint32 rxf1ovfl; /** < Rx FIFO1 overflow counters information */ +} phy_periodic_counters_v1_t; + +typedef struct phycal_log_cmn { + uint16 chanspec; /* Current phy chanspec */ + uint8 last_cal_reason; /* Last Cal Reason */ + uint8 pad1; /* Padding byte to align with word */ + uint last_cal_time; /* Last cal time in sec */ +} phycal_log_cmn_t; + +typedef struct phycal_log_core { + uint16 ofdm_txa; /* OFDM Tx IQ Cal a coeff */ + uint16 ofdm_txb; /* OFDM Tx IQ Cal b coeff */ + uint16 ofdm_txd; /* contain di & dq */ + uint16 bphy_txa; /* BPHY Tx IQ Cal a coeff */ + uint16 bphy_txb; /* BPHY Tx IQ Cal b coeff */ + uint16 bphy_txd; /* contain di & dq */ + + uint16 rxa; /* Rx IQ Cal A coeffecient */ + uint16 rxb; /* Rx IQ Cal B coeffecient */ + int32 rxs; /* FDIQ Slope coeffecient */ + + uint8 baseidx; /* TPC Base index */ + uint8 adc_coeff_cap0_adcI; /* ADC CAP Cal Cap0 I */ + uint8 adc_coeff_cap1_adcI; /* ADC CAP Cal Cap1 I */ + uint8 adc_coeff_cap2_adcI; /* ADC CAP Cal Cap2 I */ + uint8 adc_coeff_cap0_adcQ; /* ADC CAP Cal Cap0 Q */ + uint8 adc_coeff_cap1_adcQ; /* ADC CAP Cal Cap1 Q */ + uint8 adc_coeff_cap2_adcQ; /* ADC CAP Cal Cap2 Q */ + uint8 pad; /* Padding byte to align with word */ +} phycal_log_core_t; + +#define PHYCAL_LOG_VER1 (1u) + +typedef struct phycal_log_v1 { + uint8 version; /* Logging structure version */ + uint8 numcores; /* Numbe of cores for which core specific data present */ + uint16 length; /* Length of the entire structure */ + phycal_log_cmn_t phycal_log_cmn; /* Logging common structure */ + /* This will be a variable length based on the numcores field defined above */ + phycal_log_core_t phycal_log_core[1]; +} phycal_log_v1_t; + +typedef struct phy_periodic_log_cmn { + uint16 chanspec; /* Current phy chanspec */ + uint16 vbatmeas; /* Measured VBAT sense value */ + uint16 featureflag; /* Currently active feature flags */ + int8 chiptemp; /* Chip temparature */ + int8 femtemp; /* Fem temparature */ + + uint32 nrate; /* Current Tx nrate */ + + uint8 cal_phase_id; /* Current Multi phase cal ID */ + uint8 rxchain; /* Rx Chain */ + uint8 txchain; /* Tx Chain */ + uint8 ofdm_desense; /* OFDM desense */ + + uint8 bphy_desense; /* BPHY desense */ + uint8 pll_lockstatus; /* PLL Lock status */ + uint8 pad1; /* Padding byte to align with word */ + uint8 pad2; /* Padding byte to align with word */ + + uint32 duration; /**< millisecs spent sampling this channel */ + uint32 congest_ibss; /**< millisecs in our bss (presumably this traffic will */ + /**< move if cur bss moves channels) */ + uint32 congest_obss; /**< traffic not in our bss */ + uint32 interference; /**< millisecs detecting a non 802.11 interferer. */ + +} phy_periodic_log_cmn_t; + +typedef struct phy_periodic_log_core { + uint8 baseindxval; /* TPC Base index */ + int8 tgt_pwr; /* Programmed Target power */ + int8 estpwradj; /* Current Est Power Adjust value */ + int8 crsmin_pwr; /* CRS Min/Noise power */ + int8 rssi_per_ant; /* RSSI Per antenna */ + int8 snr_per_ant; /* SNR Per antenna */ + int8 pad1; /* Padding byte to align with word */ + int8 pad2; /* Padding byte to align with word */ +} phy_periodic_log_core_t; + +#define PHY_PERIODIC_LOG_VER1 (1u) + +typedef struct phy_periodic_log_v1 { + uint8 version; /* Logging structure version */ + uint8 numcores; /* Numbe of cores for which core specific data present */ + uint16 length; /* Length of the entire structure */ + phy_periodic_log_cmn_t phy_perilog_cmn; + phy_periodic_counters_v1_t counters_peri_log; + /* This will be a variable length based on the numcores field defined above */ + phy_periodic_log_core_t phy_perilog_core[1]; +} phy_periodic_log_v1_t; + +#endif /* _EVENT_LOG_PAYLOAD_H_ */ diff --git a/bcmdhd.100.10.315.x/include/event_log_set.h b/bcmdhd.100.10.315.x/include/event_log_set.h new file mode 100644 index 0000000..44051a9 --- /dev/null +++ b/bcmdhd.100.10.315.x/include/event_log_set.h @@ -0,0 +1,111 @@ +/* + * EVENT_LOG system definitions + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: event_log_set.h 771154 2018-07-09 05:46:33Z $ + */ + +#ifndef _EVENT_LOG_SET_H_ +#define _EVENT_LOG_SET_H_ + +#ifndef NUM_EVENT_LOG_SETS +/* Set a maximum number of sets here. It is not dynamic for + * efficiency of the EVENT_LOG calls. Old branches could define + * this to an appropriat enumber in their makefiles to reduce + * ROM invalidation + */ +#define NUM_EVENT_LOG_SETS (24) +#endif // endif + +/* Set assignments */ +#define EVENT_LOG_SET_BUS (0u) +#define EVENT_LOG_SET_WL (1u) +#define EVENT_LOG_SET_PSM (2u) +#define EVENT_LOG_SET_ERROR (3u) + +/* MSCH logging */ +#define EVENT_LOG_SET_MSCH_PROFILER (4u) + +#define EVENT_LOG_SET_5 (5u) +#define EVENT_LOG_SET_ECOUNTERS (EVENT_LOG_SET_5) +#define EVENT_LOG_SET_6 (6u) +#define EVENT_LOG_SET_7 (7u) + +#define EVENT_LOG_SET_8 (8u) +#define EVENT_LOG_SET_PRSRV (EVENT_LOG_SET_8) + +#define EVENT_LOG_SET_9 (9u) +/* General purpose preserve chatty. + * EVENT_LOG_SET_PRSRV_CHATTY log set should not be used by FW as it is + * used by customer host. FW should use EVENT_LOG_SET_GP_PRSRV_CHATTY + * for general purpose preserve chatty logs. + */ +#define EVENT_LOG_SET_GP_PRSRV_CHATTY (EVENT_LOG_SET_9) +#define EVENT_LOG_SET_PRSRV_CHATTY (EVENT_LOG_SET_6) + +/* BUS preserve */ +#define EVENT_LOG_SET_PRSRV_BUS (10u) + +/* WL preserve */ +#define EVENT_LOG_SET_PRSRV_WL (11u) + +/* Slotted BSS set */ +#define EVENT_LOG_SET_WL_SLOTTED_BSS (12u) + +/* PHY entity logging */ +#define EVENT_LOG_SET_PHY (13u) + +/* PHY preserve */ +#define EVENT_LOG_SET_PRSRV_PHY (14u) + +/* RTE entity */ +#define EVENT_LOG_SET_RTE (15u) + +/* Malloc and free logging */ +#define EVENT_LOG_SET_MEM_API (16u) + +/* Console buffer */ +#define EVENT_LOG_SET_RTE_CONS_BUF (17u) + +/* three log sets for general debug purposes */ +#define EVENT_LOG_SET_GENERAL_DBG_1 (18u) +#define EVENT_LOG_SET_GENERAL_DBG_2 (19u) +#define EVENT_LOG_SET_GENERAL_DBG_3 (20u) + +/* Log sets for capturing power related logs. Note that these sets + * are to be used across entire system and not just WL. + */ +#define EVENT_LOG_SET_POWER_1 (21u) +#define EVENT_LOG_SET_POWER_2 (22u) + +/* Used for timestamp plotting, TS_LOG() */ +#define EVENT_LOG_SET_TS_LOG (23u) + +/* send delayed logs when >= 50% of buffer is full */ +#ifndef ECOUNTERS_DELAYED_FLUSH_PERCENTAGE +#define ECOUNTERS_DELAYED_FLUSH_PERCENTAGE (50) +#endif // endif + +#endif /* _EVENT_LOG_SET_H_ */ diff --git a/bcmdhd.100.10.315.x/include/event_log_tag.h b/bcmdhd.100.10.315.x/include/event_log_tag.h new file mode 100644 index 0000000..5dcead8 --- /dev/null +++ b/bcmdhd.100.10.315.x/include/event_log_tag.h @@ -0,0 +1,415 @@ +/* + * EVENT_LOG system definitions + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: event_log_tag.h 771154 2018-07-09 05:46:33Z $ + */ + +#ifndef _EVENT_LOG_TAG_H_ +#define _EVENT_LOG_TAG_H_ + +#include + +/* Define new event log tags here */ +#define EVENT_LOG_TAG_NULL 0 /* Special null tag */ +#define EVENT_LOG_TAG_TS 1 /* Special timestamp tag */ + +/* HSIC Legacy support */ +/* Possible candidates for reuse */ +#define EVENT_LOG_TAG_BUS_OOB 2 +#define EVENT_LOG_TAG_BUS_STATE 3 +#define EVENT_LOG_TAG_BUS_PROTO 4 +#define EVENT_LOG_TAG_BUS_CTL 5 +#define EVENT_LOG_TAG_BUS_EVENT 6 +#define EVENT_LOG_TAG_BUS_PKT 7 +#define EVENT_LOG_TAG_BUS_FRAME 8 +#define EVENT_LOG_TAG_BUS_DESC 9 +#define EVENT_LOG_TAG_BUS_SETUP 10 +#define EVENT_LOG_TAG_BUS_MISC 11 + +#define EVENT_LOG_TAG_SRSCAN 22 +#define EVENT_LOG_TAG_PWRSTATS_INFO 23 + +/* Timestamp logging for plotting. */ +#define EVENT_LOG_TAG_TSLOG 26 + +/* Possible candidates for reuse */ +#define EVENT_LOG_TAG_UCODE_FIFO 27 + +#define EVENT_LOG_TAG_SCAN_TRACE_LOW 28 +#define EVENT_LOG_TAG_SCAN_TRACE_HIGH 29 +#define EVENT_LOG_TAG_SCAN_ERROR 30 +#define EVENT_LOG_TAG_SCAN_WARN 31 +#define EVENT_LOG_TAG_MPF_ERR 32 +#define EVENT_LOG_TAG_MPF_WARN 33 +#define EVENT_LOG_TAG_MPF_INFO 34 +#define EVENT_LOG_TAG_MPF_DEBUG 35 +#define EVENT_LOG_TAG_EVENT_INFO 36 +#define EVENT_LOG_TAG_EVENT_ERR 37 +#define EVENT_LOG_TAG_PWRSTATS_ERROR 38 +#define EVENT_LOG_TAG_EXCESS_PM_ERROR 39 +#define EVENT_LOG_TAG_IOCTL_LOG 40 +#define EVENT_LOG_TAG_PFN_ERR 41 +#define EVENT_LOG_TAG_PFN_WARN 42 +#define EVENT_LOG_TAG_PFN_INFO 43 +#define EVENT_LOG_TAG_PFN_DEBUG 44 +#define EVENT_LOG_TAG_BEACON_LOG 45 +#define EVENT_LOG_TAG_WNM_BSSTRANS_INFO 46 +#define EVENT_LOG_TAG_TRACE_CHANSW 47 +#define EVENT_LOG_TAG_PCI_ERROR 48 +#define EVENT_LOG_TAG_PCI_TRACE 49 +#define EVENT_LOG_TAG_PCI_WARN 50 +#define EVENT_LOG_TAG_PCI_INFO 51 +#define EVENT_LOG_TAG_PCI_DBG 52 +#define EVENT_LOG_TAG_PCI_DATA 53 +#define EVENT_LOG_TAG_PCI_RING 54 +/* EVENT_LOG_TAG_AWDL_TRACE_RANGING will be removed after wlc_ranging merge from IGUANA + * keeping it here to avoid compilation error on trunk + */ +#define EVENT_LOG_TAG_AWDL_TRACE_RANGING 55 +#define EVENT_LOG_TAG_RANGING_TRACE 55 +#define EVENT_LOG_TAG_WL_ERROR 56 +#define EVENT_LOG_TAG_PHY_ERROR 57 +#define EVENT_LOG_TAG_OTP_ERROR 58 +#define EVENT_LOG_TAG_NOTIF_ERROR 59 +#define EVENT_LOG_TAG_MPOOL_ERROR 60 +#define EVENT_LOG_TAG_OBJR_ERROR 61 +#define EVENT_LOG_TAG_DMA_ERROR 62 +#define EVENT_LOG_TAG_PMU_ERROR 63 +#define EVENT_LOG_TAG_BSROM_ERROR 64 +#define EVENT_LOG_TAG_SI_ERROR 65 +#define EVENT_LOG_TAG_ROM_PRINTF 66 +#define EVENT_LOG_TAG_RATE_CNT 67 +#define EVENT_LOG_TAG_CTL_MGT_CNT 68 +#define EVENT_LOG_TAG_AMPDU_DUMP 69 +#define EVENT_LOG_TAG_MEM_ALLOC_SUCC 70 +#define EVENT_LOG_TAG_MEM_ALLOC_FAIL 71 +#define EVENT_LOG_TAG_MEM_FREE 72 +#define EVENT_LOG_TAG_WL_ASSOC_LOG 73 +#define EVENT_LOG_TAG_WL_PS_LOG 74 +#define EVENT_LOG_TAG_WL_ROAM_LOG 75 +#define EVENT_LOG_TAG_WL_MPC_LOG 76 +#define EVENT_LOG_TAG_WL_WSEC_LOG 77 +#define EVENT_LOG_TAG_WL_WSEC_DUMP 78 +#define EVENT_LOG_TAG_WL_MCNX_LOG 79 +#define EVENT_LOG_TAG_HEALTH_CHECK_ERROR 80 +#define EVENT_LOG_TAG_HNDRTE_EVENT_ERROR 81 +#define EVENT_LOG_TAG_ECOUNTERS_ERROR 82 +#define EVENT_LOG_TAG_WL_COUNTERS 83 +#define EVENT_LOG_TAG_ECOUNTERS_IPCSTATS 84 +#define EVENT_LOG_TAG_WL_P2P_LOG 85 +#define EVENT_LOG_TAG_SDIO_ERROR 86 +#define EVENT_LOG_TAG_SDIO_TRACE 87 +#define EVENT_LOG_TAG_SDIO_DBG 88 +#define EVENT_LOG_TAG_SDIO_PRHDRS 89 +#define EVENT_LOG_TAG_SDIO_PRPKT 90 +#define EVENT_LOG_TAG_SDIO_INFORM 91 +#define EVENT_LOG_TAG_MIMO_PS_ERROR 92 +#define EVENT_LOG_TAG_MIMO_PS_TRACE 93 +#define EVENT_LOG_TAG_MIMO_PS_INFO 94 +#define EVENT_LOG_TAG_BTCX_STATS 95 +#define EVENT_LOG_TAG_LEAKY_AP_STATS 96 +#define EVENT_LOG_TAG_AWDL_TRACE_ELECTION 97 +#define EVENT_LOG_TAG_MIMO_PS_STATS 98 +#define EVENT_LOG_TAG_PWRSTATS_PHY 99 +#define EVENT_LOG_TAG_PWRSTATS_SCAN 100 +#define EVENT_LOG_TAG_PWRSTATS_AWDL 101 +#define EVENT_LOG_TAG_PWRSTATS_WAKE_V2 102 +#define EVENT_LOG_TAG_LQM 103 +#define EVENT_LOG_TAG_TRACE_WL_INFO 104 +#define EVENT_LOG_TAG_TRACE_BTCOEX_INFO 105 +#define EVENT_LOG_TAG_ECOUNTERS_TIME_DATA 106 +#define EVENT_LOG_TAG_NAN_ERROR 107 +#define EVENT_LOG_TAG_NAN_INFO 108 +#define EVENT_LOG_TAG_NAN_DBG 109 +#define EVENT_LOG_TAG_STF_ARBITRATOR_ERROR 110 +#define EVENT_LOG_TAG_STF_ARBITRATOR_TRACE 111 +#define EVENT_LOG_TAG_STF_ARBITRATOR_WARN 112 +#define EVENT_LOG_TAG_SCAN_SUMMARY 113 +#define EVENT_LOG_TAG_PROXD_SAMPLE_COLLECT 114 +#define EVENT_LOG_TAG_OCL_INFO 115 +#define EVENT_LOG_TAG_RSDB_PMGR_DEBUG 116 +#define EVENT_LOG_TAG_RSDB_PMGR_ERR 117 +#define EVENT_LOG_TAG_NAT_ERR 118 +#define EVENT_LOG_TAG_NAT_WARN 119 +#define EVENT_LOG_TAG_NAT_INFO 120 +#define EVENT_LOG_TAG_NAT_DEBUG 121 +#define EVENT_LOG_TAG_STA_INFO 122 +#define EVENT_LOG_TAG_PROXD_ERROR 123 +#define EVENT_LOG_TAG_PROXD_TRACE 124 +#define EVENT_LOG_TAG_PROXD_INFO 125 +#define EVENT_LOG_TAG_IE_ERROR 126 +#define EVENT_LOG_TAG_ASSOC_ERROR 127 +#define EVENT_LOG_TAG_SCAN_ERR 128 +#define EVENT_LOG_TAG_AMSDU_ERROR 129 +#define EVENT_LOG_TAG_AMPDU_ERROR 130 +#define EVENT_LOG_TAG_KM_ERROR 131 +#define EVENT_LOG_TAG_DFS 132 +#define EVENT_LOG_TAG_REGULATORY 133 +#define EVENT_LOG_TAG_CSA 134 +#define EVENT_LOG_TAG_WNM_BSSTRANS_ERR 135 +#define EVENT_LOG_TAG_SUP_INFO 136 +#define EVENT_LOG_TAG_SUP_ERROR 137 +#define EVENT_LOG_TAG_CHANCTXT_TRACE 138 +#define EVENT_LOG_TAG_CHANCTXT_INFO 139 +#define EVENT_LOG_TAG_CHANCTXT_ERROR 140 +#define EVENT_LOG_TAG_CHANCTXT_WARN 141 +#define EVENT_LOG_TAG_MSCHPROFILE 142 +#define EVENT_LOG_TAG_4WAYHANDSHAKE 143 +#define EVENT_LOG_TAG_MSCHPROFILE_TLV 144 +#define EVENT_LOG_TAG_ADPS 145 +#define EVENT_LOG_TAG_MBO_DBG 146 +#define EVENT_LOG_TAG_MBO_INFO 147 +#define EVENT_LOG_TAG_MBO_ERR 148 +#define EVENT_LOG_TAG_TXDELAY 149 +#define EVENT_LOG_TAG_BCNTRIM_INFO 150 +#define EVENT_LOG_TAG_BCNTRIM_TRACE 151 +#define EVENT_LOG_TAG_OPS_INFO 152 +#define EVENT_LOG_TAG_STATS 153 +#define EVENT_LOG_TAG_BAM 154 +#define EVENT_LOG_TAG_TXFAIL 155 +#define EVENT_LOG_TAG_AWDL_CONFIG_DBG 156 +#define EVENT_LOG_TAG_AWDL_SYNC_DBG 157 +#define EVENT_LOG_TAG_AWDL_PEER_DBG 158 +#define EVENT_LOG_TAG_RANDMAC_INFO 159 +#define EVENT_LOG_TAG_RANDMAC_DBG 160 +#define EVENT_LOG_TAG_RANDMAC_ERR 161 +#define EVENT_LOG_TAG_AWDL_DFSP_DBG 162 +#define EVENT_LOG_TAG_MSCH_CAL 163 +#define EVENT_LOG_TAG_MSCH_OPP_CAL 164 +#define EVENT_LOG_TAG_MSCH 165 +#define EVENT_LOG_TAG_NAN_SYNC 166 +#define EVENT_LOG_TAG_NAN_DPE 167 +#define EVENT_LOG_TAG_NAN_SCHED 168 +#define EVENT_LOG_TAG_NAN_RNG 169 +#define EVENT_LOG_TAG_NAN_DAM 170 +#define EVENT_LOG_TAG_NAN_NA 171 +#define EVENT_LOG_TAG_NAN_NDL 172 +#define EVENT_LOG_TAG_NAN_NDP 173 +#define EVENT_LOG_TAG_NAN_SEC 174 +#define EVENT_LOG_TAG_NAN_MAC 175 +#define EVENT_LOG_TAG_NAN_FSM 176 + +#define EVENT_LOG_TAG_TPA_ERR 192 +#define EVENT_LOG_TAG_TPA_INFO 193 +#define EVENT_LOG_TAG_OCE_DBG 194 +#define EVENT_LOG_TAG_OCE_INFO 195 +#define EVENT_LOG_TAG_OCE_ERR 196 +#define EVENT_LOG_TAG_WL_WARN 197 +#define EVENT_LOG_TAG_SB_ERR 198 +#define EVENT_LOG_TAG_SB_INFO 199 +#define EVENT_LOG_TAG_SB_SCHED 200 +#define EVENT_LOG_TAG_ADPS_INFO 201 +#define EVENT_LOG_TAG_SB_CMN_SYNC_INFO 202 +#define EVENT_LOG_TAG_PHY_CAL_INFO 203 /* PHY CALs scheduler info */ +#define EVENT_LOG_TAG_EVT_NOTIF_INFO 204 +#define EVENT_LOG_TAG_PHY_HC_ERROR 205 +#define EVENT_LOG_TAG_PHY_TXPWR_WARN 206 +#define EVENT_LOG_TAG_PHY_TXPWR_INFO 207 +#define EVENT_LOG_TAG_PHY_ACI_INFO 208 +#define EVENT_LOG_TAG_WL_COUNTERS_AUX 209 +#define EVENT_LOG_TAG_AMPDU_DUMP_AUX 210 +#define EVENT_LOG_TAG_PWRSTATS_AWDL_AUX 211 +#define EVENT_LOG_TAG_PWRSTATS_PHY_AUX 212 +#define EVENT_LOG_TAG_PWRSTATS_SCAN_AUX 213 +#define EVENT_LOG_TAG_PWRSTATS_WAKE_V2_AUX 214 +#define EVENT_LOG_TAG_SVT_TESTING 215 /* SVT testing/verification */ +#define EVENT_LOG_TAG_HND_SMD_ERROR 216 +#define EVENT_LOG_TAG_PSBW_INFO 217 +#define EVENT_LOG_TAG_PHY_CAL_DBG 218 +#define EVENT_LOG_TAG_FILS_DBG 219 +#define EVENT_LOG_TAG_FILS_INFO 220 +#define EVENT_LOG_TAG_FILS_ERROR 221 +#define EVENT_LOG_TAG_HWA_TXPOST 222 +#define EVENT_LOG_TAG_HWA_TXDMA 223 +/* Arbitrator callback log tags */ +#define EVENT_LOG_TAG_STF_ARB_CB_TRACE 224 +#define EVENT_LOG_TAG_STF_ARB_CB_ERROR 225 +#define EVENT_LOG_TAG_PHY_PERIODIC_SEC 226 + +/* Debug tags for making debug builds */ +#define EVENT_LOG_TAG_DBG1 251 +#define EVENT_LOG_TAG_DBG2 252 +#define EVENT_LOG_TAG_DBG3 253 +#define EVENT_LOG_TAG_DBG4 254 +#define EVENT_LOG_TAG_DBG5 255 + +/* Insert new tags here for Koala onwards */ + +/* NAN INFO/ERR evnt tags */ +#define EVENT_LOG_TAG_NAN_SYNC_INFO 256 +#define EVENT_LOG_TAG_NAN_DPE_INFO 257 +#define EVENT_LOG_TAG_NAN_SCHED_INFO 258 +#define EVENT_LOG_TAG_NAN_RNG_INFO 259 +#define EVENT_LOG_TAG_NAN_DAM_INFO 260 +#define EVENT_LOG_TAG_NAN_NA_INFO 261 +#define EVENT_LOG_TAG_NAN_NDL_INFO 262 +#define EVENT_LOG_TAG_NAN_NDP_INFO 263 +#define EVENT_LOG_TAG_NAN_SEC_INFO 264 +#define EVENT_LOG_TAG_NAN_MAC_INFO 265 +#define EVENT_LOG_TAG_NAN_FSM_INFO 266 +#define EVENT_LOG_TAG_NAN_PEER_INFO 267 +#define EVENT_LOG_TAG_NAN_AVAIL_INFO 268 +#define EVENT_LOG_TAG_NAN_CMN_INFO 269 +#define EVENT_LOG_TAG_NAN_SYNC_ERR 270 +#define EVENT_LOG_TAG_NAN_DPE_ERR 271 +#define EVENT_LOG_TAG_NAN_SCHED_ERR 272 +#define EVENT_LOG_TAG_NAN_RNG_ERR 273 +#define EVENT_LOG_TAG_NAN_DAM_ERR 274 +#define EVENT_LOG_TAG_NAN_NA_ERR 275 +#define EVENT_LOG_TAG_NAN_NDL_ERR 276 +#define EVENT_LOG_TAG_NAN_NDP_ERR 277 +#define EVENT_LOG_TAG_NAN_SEC_ERR 278 +#define EVENT_LOG_TAG_NAN_MAC_ERR 279 +#define EVENT_LOG_TAG_NAN_FSM_ERR 280 +#define EVENT_LOG_TAG_NAN_PEER_ERR 281 +#define EVENT_LOG_TAG_NAN_AVAIL_ERR 282 +#define EVENT_LOG_TAG_NAN_CMN_ERR 283 + +/* More NAN DBG evt Tags */ +#define EVENT_LOG_TAG_NAN_PEER 284 +#define EVENT_LOG_TAG_NAN_AVAIL 285 +#define EVENT_LOG_TAG_NAN_CMN 286 + +#define EVENT_LOG_TAG_SAE_ERROR 287 +#define EVENT_LOG_TAG_SAE_INFO 288 + +/* rxsig module logging */ +#define EVENT_LOG_TAG_RXSIG_ERROR 289 +#define EVENT_LOG_TAG_RXSIG_DEBUG 290 +#define EVENT_LOG_TAG_RXSIG_INFO 291 + +/* HE TWT HEB EVEVNT_LOG_TAG */ +#define EVENT_LOG_TAG_WL_HE_INFO 292 +#define EVENT_LOG_TAG_WL_HE_TRACE 293 +#define EVENT_LOG_TAG_WL_HE_WARN 294 +#define EVENT_LOG_TAG_WL_HE_ERROR 295 +#define EVENT_LOG_TAG_WL_TWT_INFO 296 +#define EVENT_LOG_TAG_WL_TWT_TRACE 297 +#define EVENT_LOG_TAG_WL_TWT_WARN 298 +#define EVENT_LOG_TAG_WL_TWT_ERROR 299 +#define EVENT_LOG_TAG_WL_HEB_ERROR 300 +#define EVENT_LOG_TAG_WL_HEB_TRACE 301 + +/* RRM EVENT_LOG_TAG */ +#define EVENT_LOG_TAG_RRM_DBG 302 +#define EVENT_LOG_TAG_RRM_INFO 303 +#define EVENT_LOG_TAG_RRM_ERR 304 + +/* scan core */ +#define EVENT_LOG_TAG_SC 305 + +#define EVENT_LOG_TAG_ESP_DBG 306 +#define EVENT_LOG_TAG_ESP_INFO 307 +#define EVENT_LOG_TAG_ESP_ERR 308 + +/* SDC */ +#define EVENT_LOG_TAG_SDC_DBG 309 +#define EVENT_LOG_TAG_SDC_INFO 310 +#define EVENT_LOG_TAG_SDC_ERR 311 + +/* RTE */ +#define EVENT_LOG_TAG_RTE_ERR 312 + +/* EVENT_LOG_TAG_MAX = Set to the same value of last tag, not last tag + 1 */ +#define EVENT_LOG_TAG_MAX 312 + +typedef enum wl_el_set_type_def { + EVENT_LOG_SET_TYPE_DEFAULT = 0, /* flush the log buffer when it is full - Default option */ + EVENT_LOG_SET_TYPE_PRSRV = 1, /* flush the log buffer based on fw or host trigger */ + EVENT_LOG_SET_TYPE_DFLUSH = 2 /* flush the log buffer once the watermark is reached */ +} wl_el_set_type_def_t; + +#define EVENT_LOG_TAG_FLUSH_NONE 0x00 /* No flush */ +#define EVENT_LOG_TAG_FLUSH_ALL 0x40 /* Flush all preserved sets */ +#define EVENT_LOG_TAG_FLUSH_SETNUM 0x80 /* Flush preserved set */ +#define EVENT_LOG_TAG_FLUSH_MASK 0x3f /* SetNum Mask */ + +typedef enum wl_el_flush_type { + EL_TAG_PRSRV_FLUSH_NONE = 0, /* No flush of preserve buf on this tag */ + EL_TAG_PRSRV_FLUSH_SETNUM, /* Flush the buffer set specifid on this tag */ + EL_TAG_PRSRV_FLUSH_ALL /* Flush all preserved buffer set on this tag */ +} wl_el_flush_type_t; + +#define EVENT_LOG_FLUSH_CURRENT_VERSION 0 +typedef struct wl_el_set_flush_prsrv_s { + uint16 version; + uint16 len; + uint16 tag; /* Tag for which preserve flush should be done */ + uint8 flush_type; /* Check wl_el_flush_type_t */ + uint8 set_num; /* Log set num to flush. Max is NUM_EVENT_LOG_SETS. Valid only when + * action is EVENT_LOG_TAG_FLUSH_SETNUM + */ +} wl_el_set_flush_prsrv_t; + +#define SD_PRHDRS(i, s, h, p, n, l) +#define SD_PRPKT(m, b, n) +#define SD_INFORM(args) + +/* Flags for tag control */ +#define EVENT_LOG_TAG_FLAG_NONE 0 +#define EVENT_LOG_TAG_FLAG_LOG 0x80 +#define EVENT_LOG_TAG_FLAG_PRINT 0x40 +#define EVENT_LOG_TAG_FLAG_SET_MASK 0x3f + +/* Each event log entry has a type. The type is the LAST word of the + * event log. The printing code walks the event entries in reverse + * order to find the first entry. + */ +typedef union event_log_hdr { + struct { + uint8 tag; /* Event_log entry tag */ + uint8 count; /* Count of 4-byte entries */ + uint16 fmt_num; /* Format number */ + }; + uint32 t; /* Type cheat */ +} event_log_hdr_t; + +/* for internal use - legacy max. tag */ +#define EVENT_LOG_TAG_MAX_LEGACY_FORMAT 255 + +/* + * The position of the extended header in the event log stream will be as follows: + * + * Extended header could be due to count > 255 or tag > 255. + * + * Extended count: 6 bits long. 8 bits (existing) + 6 bits => + * 2^14 words = 65536 bytes payload max + * Extended count field is currently reserved + * Extended tag: 8 (existing) + 4 bits = 12 bits =>2^12 = 4096 tags + * bits[7..4] of extended tags are reserved. + * MSB 16 bits of the extended header are reserved for future use. + */ + +typedef union event_log_extended_hdr { + struct { + uint8 extended_tag; /* Extended tag, bits[7..4] are reserved */ + uint8 extended_count; /* Extended count. Reserved for now. */ + uint16 rsvd; /* Reserved */ + }; + + uint32 t; /* Type cheat */ +} event_log_extended_hdr_t; +#endif /* _EVENT_LOG_TAG_H_ */ diff --git a/bcmdhd.100.10.315.x/include/event_trace.h b/bcmdhd.100.10.315.x/include/event_trace.h new file mode 100644 index 0000000..da34699 --- /dev/null +++ b/bcmdhd.100.10.315.x/include/event_trace.h @@ -0,0 +1,123 @@ +/* + * Trace log blocks sent over HBUS + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: event_trace.h 693870 2017-04-05 09:03:17Z $ + */ + +/** + * @file + * @brief + * Define the trace event ID and tag ID + */ + +#ifndef _WL_DIAG_H +#define _WL_DIAG_H + +#define DIAG_MAJOR_VERSION 1 /* 4 bits */ +#define DIAG_MINOR_VERSION 0 /* 4 bits */ +#define DIAG_MICRO_VERSION 0 /* 4 bits */ + +#define DIAG_VERSION \ + ((DIAG_MICRO_VERSION&0xF) | (DIAG_MINOR_VERSION&0xF)<<4 | \ + (DIAG_MAJOR_VERSION&0xF)<<8) + /* bit[11:8] major ver */ + /* bit[7:4] minor ver */ + /* bit[3:0] micro ver */ + +/* event ID for trace purpose only, to avoid the conflict with future new +* WLC_E_ , starting from 0x8000 +*/ +#define TRACE_FW_AUTH_STARTED 0x8000 +#define TRACE_FW_ASSOC_STARTED 0x8001 +#define TRACE_FW_RE_ASSOC_STARTED 0x8002 +#define TRACE_G_SCAN_STARTED 0x8003 +#define TRACE_ROAM_SCAN_STARTED 0x8004 +#define TRACE_ROAM_SCAN_COMPLETE 0x8005 +#define TRACE_FW_EAPOL_FRAME_TRANSMIT_START 0x8006 +#define TRACE_FW_EAPOL_FRAME_TRANSMIT_STOP 0x8007 +#define TRACE_BLOCK_ACK_NEGOTIATION_COMPLETE 0x8008 /* protocol status */ +#define TRACE_BT_COEX_BT_SCO_START 0x8009 +#define TRACE_BT_COEX_BT_SCO_STOP 0x800a +#define TRACE_BT_COEX_BT_SCAN_START 0x800b +#define TRACE_BT_COEX_BT_SCAN_STOP 0x800c +#define TRACE_BT_COEX_BT_HID_START 0x800d +#define TRACE_BT_COEX_BT_HID_STOP 0x800e +#define TRACE_ROAM_AUTH_STARTED 0x800f +/* Event ID for NAN, start from 0x9000 */ +#define TRACE_NAN_CLUSTER_STARTED 0x9000 +#define TRACE_NAN_CLUSTER_JOINED 0x9001 +#define TRACE_NAN_CLUSTER_MERGED 0x9002 +#define TRACE_NAN_ROLE_CHANGED 0x9003 +#define TRACE_NAN_SCAN_COMPLETE 0x9004 +#define TRACE_NAN_STATUS_CHNG 0x9005 + +/* Parameters of wifi logger events are TLVs */ +/* Event parameters tags are defined as: */ +#define TRACE_TAG_VENDOR_SPECIFIC 0 /* take a byte stream as parameter */ +#define TRACE_TAG_BSSID 1 /* takes a 6 bytes MAC address as parameter */ +#define TRACE_TAG_ADDR 2 /* takes a 6 bytes MAC address as parameter */ +#define TRACE_TAG_SSID 3 /* takes a 32 bytes SSID address as parameter */ +#define TRACE_TAG_STATUS 4 /* takes an integer as parameter */ +#define TRACE_TAG_CHANNEL_SPEC 5 /* takes one or more wifi_channel_spec as */ + /* parameter */ +#define TRACE_TAG_WAKE_LOCK_EVENT 6 /* takes a wake_lock_event struct as parameter */ +#define TRACE_TAG_ADDR1 7 /* takes a 6 bytes MAC address as parameter */ +#define TRACE_TAG_ADDR2 8 /* takes a 6 bytes MAC address as parameter */ +#define TRACE_TAG_ADDR3 9 /* takes a 6 bytes MAC address as parameter */ +#define TRACE_TAG_ADDR4 10 /* takes a 6 bytes MAC address as parameter */ +#define TRACE_TAG_TSF 11 /* take a 64 bits TSF value as parameter */ +#define TRACE_TAG_IE 12 /* take one or more specific 802.11 IEs */ + /* parameter, IEs are in turn indicated in */ + /* TLV format as per 802.11 spec */ +#define TRACE_TAG_INTERFACE 13 /* take interface name as parameter */ +#define TRACE_TAG_REASON_CODE 14 /* take a reason code as per 802.11 */ + /* as parameter */ +#define TRACE_TAG_RATE_MBPS 15 /* take a wifi rate in 0.5 mbps */ +#define TRACE_TAG_REQUEST_ID 16 /* take an integer as parameter */ +#define TRACE_TAG_BUCKET_ID 17 /* take an integer as parameter */ +#define TRACE_TAG_GSCAN_PARAMS 18 /* takes a wifi_scan_cmd_params struct as parameter */ +#define TRACE_TAG_GSCAN_CAPABILITIES 19 /* takes a wifi_gscan_capabilities struct as parameter */ +#define TRACE_TAG_SCAN_ID 20 /* take an integer as parameter */ +#define TRACE_TAG_RSSI 21 /* take an integer as parameter */ +#define TRACE_TAG_CHANNEL 22 /* take an integer as parameter */ +#define TRACE_TAG_LINK_ID 23 /* take an integer as parameter */ +#define TRACE_TAG_LINK_ROLE 24 /* take an integer as parameter */ +#define TRACE_TAG_LINK_STATE 25 /* take an integer as parameter */ +#define TRACE_TAG_LINK_TYPE 26 /* take an integer as parameter */ +#define TRACE_TAG_TSCO 27 /* take an integer as parameter */ +#define TRACE_TAG_RSCO 28 /* take an integer as parameter */ +#define TRACE_TAG_EAPOL_MESSAGE_TYPE 29 /* take an integer as parameter */ + /* M1-1, M2-2, M3-3, M4-4 */ + +typedef union { + struct { + uint16 event: 16; + uint16 version: 16; + }; + uint32 t; +} wl_event_log_id_ver_t; + +#endif /* _WL_DIAG_H */ diff --git a/bcmdhd.100.10.315.x/include/fils.h b/bcmdhd.100.10.315.x/include/fils.h new file mode 100644 index 0000000..06b983d --- /dev/null +++ b/bcmdhd.100.10.315.x/include/fils.h @@ -0,0 +1,294 @@ +/* + * Fundamental types and constants relating to FILS AUTHENTICATION + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id$ + */ + +#ifndef _FILSAUTH_H_ +#define _FILSAUTH_H_ + +/* This marks the start of a packed structure section. */ +#include + +/* 11ai D6.0 8.6.8.36 FILS Discovery frame format + category + action + fils_discovery_info_field_t + fils_rnr_element_t + fils_indication_element_t + fils_vendor_specific_element_t +*/ + +/* 11revmc D4.0 8.4.2.25 Vendor Specific element */ +typedef BWL_PRE_PACKED_STRUCT struct fils_vendor_specific_element { + uint8 elementid; + uint8 length; + /* variable len info */ + uint8 orgid_vendorspecific_content[]; +} BWL_POST_PACKED_STRUCT fils_vendor_specific_element_t; + +#define FILS_VS_ELEM_HDR_LEN (sizeof(fils_vendor_specific_element_t)) + +/* 11ai D6.0 8.4.2.178 FILS Indication element */ +typedef BWL_PRE_PACKED_STRUCT struct fils_indication_element { + uint8 elementid; + uint8 length; + uint16 fils_info; + /* variable len info */ + uint8 cache_domain_publickey_id[]; +} BWL_POST_PACKED_STRUCT fils_indication_element_t; + +#define FILS_INDICATION_ELEM_HDR_LEN (sizeof(fils_indication_element_t)) + +#define FILS_INDICATION_IE_TAG_FIXED_LEN 2 + +#define FI_INFO_CACHE_IND_SUBFIELD_SIZE 2 + +/* FILS Indication Information field */ +#define FI_INFO_PUB_KEY_IDENTS_MASK (0x0007) +#define FI_INFO_REALM_IDENTS_MASK (0x0038) +#define FI_INFO_IP_ADDR_CFG_MASK (0x0040) +#define FI_INFO_CACHE_IDENT_MASK (0x0080) +#define FI_INFO_HESSID_MASK (0x0100) +#define FI_INFO_SHRKEY_AUTH_WOPFS_MASK (0x0200) +#define FI_INFO_SHRKEY_AUTH_WPFS_MASK (0x0400) +#define FI_INFO_PUBKEY_AUTH_MASK (0x0800) + +#define FI_INFO_CACHE_IDENT(fc) ((fc & FI_INFO_CACHE_IDENT_MASK)) +#define FI_INFO_HESSID(fc) ((fc & FI_INFO_HESSID_MASK)) +#define FI_INFO_SHRKEY_AUTH_WOPFS(fc) ((fc & FI_INFO_SHRKEY_AUTH_WOPFS_MASK)) +#define FI_INFO_SHRKEY_AUTH_WPFS(fc) ((fc & FI_INFO_SHRKEY_AUTH_WPFS_MASK)) + +/* 11ai D11.0 9.4.2.171.1 TBTT Information field */ +typedef BWL_PRE_PACKED_STRUCT struct tbtt_info_field { + uint8 tbtt_offset; + uint8 bssid[ETHER_ADDR_LEN]; + uint32 short_ssid; +} BWL_POST_PACKED_STRUCT tbtt_info_field_t; + +#define TBTT_INFO_FIELD_HDR_LEN (sizeof(tbtt_info_field_t)) + +/* 11ai D11.0 9.4.2.171.1 Neighbor AP Information field */ +typedef BWL_PRE_PACKED_STRUCT struct neighbor_ap_info_field { + uint16 tbtt_info_header; + uint8 op_class; + uint8 channel; + /* variable len info */ + uint8 tbtt_info_field[]; +} BWL_POST_PACKED_STRUCT neighbor_ap_info_field_t; + +#define NEIGHBOR_AP_INFO_FIELD_HDR_LEN (sizeof(neighbor_ap_info_field_t)) + +/* 11ai D11.0 9.4.2.171 Reduced Neighbor Report element */ +typedef BWL_PRE_PACKED_STRUCT struct fils_rnr_element { + uint8 elementid; + uint8 length; + /* variable len info */ + uint8 neighbor_ap_info[]; +} BWL_POST_PACKED_STRUCT fils_rnr_element_t; + +#define FILS_RNR_ELEM_HDR_LEN (sizeof(fils_rnr_element_t)) + +/* TBTT Info Header macros */ +#define TBTT_INFO_HDR_FIELD_TYPE_MASK (0x001f) +#define TBTT_INFO_HDR_FN_AP_MASK (0x0004) +#define TBTT_INFO_HDR_COUNT_MASK (0x00f0) +#define TBTT_INFO_HDR_LENGTH_MASK (0xff00) + +#define TBTT_INFO_HDR_FIELD_TYPE(hdr)\ + ((hdr) & TBTT_INFO_HDR_FIELD_TYPE_MASK) +#define TBTT_INFO_HDR_FN_AP(hdr)\ + (((hdr) & TBTT_INFO_HDR_FN_AP_MASK) >> 2) +#define TBTT_INFO_HDR_COUNT(hdr)\ + (((hdr) & TBTT_INFO_HDR_COUNT_MASK) >> 4) +#define TBTT_INFO_HDR_LENGTH(hdr)\ + (((hdr) & TBTT_INFO_HDR_LENGTH_MASK) >> 8) + +/* FILS Nonce element */ +#define FILS_NONCE_LENGTH 16u + +typedef BWL_PRE_PACKED_STRUCT struct fils_nonce_element { + uint8 elementid; + uint8 length; + uint8 element_id_ext; + uint8 fils_nonce[FILS_NONCE_LENGTH]; +} BWL_POST_PACKED_STRUCT fils_nonce_element_t; + +/* 11ai 9.4.2.186 FILS Key Delivery element */ +#define FILS_KEY_RSC_LENGTH 8u + +typedef BWL_PRE_PACKED_STRUCT struct fils_key_delivery_element { + uint8 elementid; + uint8 length; + uint8 element_id_ext; + uint8 key_rsc[FILS_KEY_RSC_LENGTH]; + uint8 kde_list[]; /* Key Data Elements */ +} BWL_POST_PACKED_STRUCT fils_key_delivery_element_t; + +/* 8.4.2.175 FILS Session element */ +#define FILS_SESSION_LENGTH 8u + +typedef BWL_PRE_PACKED_STRUCT struct fils_session_element { + uint8 elementid; + uint8 length; + uint8 element_id_ext; + uint8 fils_session[FILS_SESSION_LENGTH]; +} BWL_POST_PACKED_STRUCT fils_session_element_t; + +/* 9.4.2.179 FILS key confirmation element */ +#define FILS_KEY_CONFIRMATION_HEADER_LEN 3u + +typedef BWL_PRE_PACKED_STRUCT struct fils_key_conf_element { + uint8 elementid; + uint8 length; + uint8 element_id_ext; + /* variable len info */ + uint8 key_auth[]; +} BWL_POST_PACKED_STRUCT fils_key_conf_element_t; + +#define FILS_SESSION_ELEM_LEN (sizeof(fils_session_element_t)) + +/* 8.4.2.174 FILS Key Confirmation element */ +typedef BWL_PRE_PACKED_STRUCT struct fils_key_confirm_element { + uint8 elementid; + uint8 length; + uint8 element_id_ext; + /* variable len info */ + uint8 keyauth[]; +} BWL_POST_PACKED_STRUCT fils_key_confirm_element_t; + +#define FILS_CONFIRM_ELEM_HDR_LEN (sizeof(fils_key_confirm_element_t)) + +/* 11ai D6.0 8.6.8.36 FILS Discovery frame format */ +typedef BWL_PRE_PACKED_STRUCT struct fils_discovery_info_field { + uint16 framecontrol; + uint32 timestamp[2]; + uint16 bcninterval; + /* variable len info */ + uint8 disc_info[]; +} BWL_POST_PACKED_STRUCT fils_discovery_info_field_t; + +#define FD_INFO_FIELD_HDR_LEN (sizeof(fils_discovery_info_field_t)) + +#define FD_INFO_CAP_SUBFIELD_SIZE 2 +#define FD_INFO_LENGTH_FIELD_SIZE 2 + +/* FILS Discovery Information field */ +#define FD_INFO_SSID_LENGTH_MASK (0x001f) +#define FD_INFO_CAP_IND_MASK (0x0020) +#define FD_INFO_SHORT_SSID_IND_MASK (0x0040) +#define FD_INFO_APCSN_IND_MASK (0x0080) +#define FD_INFO_ANO_IND_MASK (0x0100) +#define FD_INFO_CH_CENTER_FR_IND_MASK (0x0200) +#define FD_INFO_PRIMARY_CH_IND_MASK (0x0400) +#define FD_INFO_RSN_IND_MASK (0x0800) +#define FD_INFO_LENGTH_IND_MASK (0x1000) +#define FD_INFO_MD_IND_MASK (0x2000) + +#define FD_INFO_SET_SSID_LENGTH(fc, len) (fc |= ((uint16)(len) & FD_INFO_SSID_LENGTH_MASK)) +#define FD_INFO_SET_CAP_PRESENT(fc) (fc |= FD_INFO_CAP_IND_MASK) +#define FD_INFO_SET_SHORT_SSID_PRESENT(fc) (fc |= FD_INFO_SHORT_SSID_IND_MASK) +#define FD_INFO_SET_APCSN_PRESENT(fc) ((fc |= FD_INFO_APCSN_IND_MASK) +#define FD_INFO_SET_ANO_PRESENT(fc) (fc |= FD_INFO_ANO_IND_MASK) +#define FD_INFO_SET_CH_CENTER_FR_PRESENT(fc) (fc |= FD_INFO_CH_CENTER_FR_IND_MASK) +#define FD_INFO_SET_PRIMARY_CH_PRESENT(fc) (fc |= FD_INFO_PRIMARY_CH_IND_MASK) +#define FD_INFO_SET_RSN_PRESENT(fc) (fc |= FD_INFO_RSN_IND_MASK) +#define FD_INFO_SET_LENGTH_PRESENT(fc) (fc |= FD_INFO_LENGTH_IND_MASK) +#define FD_INFO_SET_MD_PRESENT(fc) (fc |= FD_INFO_MD_IND_MASK) + +#define FD_INFO_SSID_LENGTH(fc) ((fc & FD_INFO_SSID_LENGTH_MASK)) +#define FD_INFO_IS_CAP_PRESENT(fc) ((fc & FD_INFO_CAP_IND_MASK) >> 5) +#define FD_INFO_IS_SHORT_SSID_PRESENT(fc) ((fc & FD_INFO_SHORT_SSID_IND_MASK) >> 6) +#define FD_INFO_IS_APCSN_PRESENT(fc) ((fc & FD_INFO_APCSN_IND_MASK) >> 7) +#define FD_INFO_IS_ANO_PRESENT(fc) ((fc & FD_INFO_ANO_IND_MASK) >> 8) +#define FD_INFO_IS_CH_CENTER_FR_PRESENT(fc) ((fc & FD_INFO_CH_CENTER_FR_IND_MASK) >> 9) +#define FD_INFO_IS_PRIMARY_CH_PRESENT(fc) ((fc & FD_INFO_PRIMARY_CH_IND_MASK) >> 10) +#define FD_INFO_IS_RSN_PRESENT(fc) ((fc & FD_INFO_RSN_IND_MASK) >> 11) +#define FD_INFO_IS_LENGTH_PRESENT(fc) ((fc & FD_INFO_LENGTH_IND_MASK) >> 12) +#define FD_INFO_IS_MD_PRESENT(fc) ((fc & FD_INFO_MD_IND_MASK) >> 13) + +/* FILS Discovery Capability subfield */ +#define FD_CAP_ESS_MASK (0x0001) +#define FD_CAP_PRIVACY_MASK (0x0002) +#define FD_CAP_BSS_CH_WIDTH_MASK (0x001c) +#define FD_CAP_MAX_NSS_MASK (0x00e0) +#define FD_CAP_MULTI_BSS_MASK (0x0200) +#define FD_CAP_PHY_INDEX_MASK (0x1c00) +#define FD_CAP_FILS_MIN_RATE_MASK (0xe000) + +#define FD_CAP_ESS(cap) ((cap & FD_CAP_ESS_MASK)) +#define FD_CAP_PRIVACY(cap) ((cap & FD_CAP_PRIVACY_MASK) >> 1) +#define FD_CAP_BSS_CH_WIDTH(cap) ((cap & FD_CAP_BSS_CH_WIDTH_MASK) >> 2) +#define FD_CAP_MAX_NSS(cap) ((cap & FD_CAP_MAX_NSS_MASK) >> 5) +#define FD_CAP_MULTI_BSS(cap) ((cap & FD_CAP_MULTI_BSS_MASK) >> 9) +#define FD_CAP_PHY_INDEX(cap) ((cap & FD_CAP_PHY_INDEX_MASK) >> 10) +#define FD_CAP_FILS_MIN_RATE(cap) ((cap & FD_CAP_FILS_MIN_RATE_MASK) >> 13) + +#define FD_CAP_SET_ESS(cap) ((cap |= FD_CAP_ESS_MASK)) +#define FD_CAP_SET_PRIVACY(cap) ((cap & FD_CAP_PRIVACY_MASK) >> 1) +#define FD_CAP_SET_BSS_CH_WIDTH(cap) ((cap & FD_CAP_BSS_CH_WIDTH_MASK) >> 2) +#define FD_CAP_SET_MAX_NSS(cap) ((cap & FD_CAP_MAX_NSS_MASK) >> 5) +#define FD_CAP_SET_MULTI_BSS(cap) ((cap & FD_CAP_MULTI_BSS_MASK) >> 9) +#define FD_CAP_SET_PHY_INDEX(cap) ((cap & FD_CAP_PHY_INDEX_MASK) >> 10) +#define FD_CAP_SET_FILS_MIN_RATE(cap) ((cap & FD_CAP_FILS_MIN_RATE_MASK) >> 13) + +/* 11ai D6.0 8.4.2.173 FILS Request Parameters element */ +typedef BWL_PRE_PACKED_STRUCT struct fils_request_parameters_element { + uint8 elementid; + uint8 length; + uint8 element_id_ext; + uint8 params_bitmap; + /* variable len info */ + uint8 params_fields[]; +} BWL_POST_PACKED_STRUCT fils_request_parameters_element_t; + +#define FILS_PARAM_MAX_CHANNEL_TIME (1 << 2) + +/* 11ai 9.4.2.184 FILS HLP Container element */ +typedef BWL_PRE_PACKED_STRUCT struct fils_hlp_container_element { + uint8 elementid; + uint8 length; + uint8 element_id_ext; + uint8 dest_addr[ETHER_ADDR_LEN]; + uint8 src_addr[ETHER_ADDR_LEN]; + /* variable len hlp packet */ + uint8 hlp[]; +} BWL_POST_PACKED_STRUCT fils_hlp_container_element_t; + +/* 11ai 9.4.2.184 FILS Wrapped Data element */ +typedef BWL_PRE_PACKED_STRUCT struct fils_wrapped_data_element { + uint8 elementid; + uint8 length; + uint8 element_id_ext; + /* variable len wrapped data packet */ + uint8 wrapped_data[]; +} BWL_POST_PACKED_STRUCT fils_wrapped_data_element_t; + +#define FILS_HLP_CONTAINER_ELEM_LEN (sizeof(fils_hlp_container_element_t)) + +/* This marks the end of a packed structure section. */ +#include + +#endif /* __FILSAUTH_H__ */ diff --git a/bcmdhd.100.10.315.x/include/hnd_armtrap.h b/bcmdhd.100.10.315.x/include/hnd_armtrap.h new file mode 100644 index 0000000..622fa43 --- /dev/null +++ b/bcmdhd.100.10.315.x/include/hnd_armtrap.h @@ -0,0 +1,89 @@ +/* + * HND arm trap handling. + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: hnd_armtrap.h 545867 2015-04-01 22:45:19Z $ + */ + +#ifndef _hnd_armtrap_h_ +#define _hnd_armtrap_h_ + +/* ARM trap handling */ + +/* Trap types defined by ARM (see arminc.h) */ + +/* Trap locations in lo memory */ +#define TRAP_STRIDE 4 +#define FIRST_TRAP TR_RST +#define LAST_TRAP (TR_FIQ * TRAP_STRIDE) + +#if defined(__ARM_ARCH_7M__) +#define MAX_TRAP_TYPE (TR_ISR + ARMCM3_NUMINTS) +#endif /* __ARM_ARCH_7M__ */ + +/* The trap structure is defined here as offsets for assembly */ +#define TR_TYPE 0x00 +#define TR_EPC 0x04 +#define TR_CPSR 0x08 +#define TR_SPSR 0x0c +#define TR_REGS 0x10 +#define TR_REG(n) (TR_REGS + (n) * 4) +#define TR_SP TR_REG(13) +#define TR_LR TR_REG(14) +#define TR_PC TR_REG(15) + +#define TRAP_T_SIZE 80 +#define ASSERT_TRAP_SVC_NUMBER 255 + +#ifndef _LANGUAGE_ASSEMBLY + +#include + +typedef struct _trap_struct { + uint32 type; + uint32 epc; + uint32 cpsr; + uint32 spsr; + uint32 r0; /* a1 */ + uint32 r1; /* a2 */ + uint32 r2; /* a3 */ + uint32 r3; /* a4 */ + uint32 r4; /* v1 */ + uint32 r5; /* v2 */ + uint32 r6; /* v3 */ + uint32 r7; /* v4 */ + uint32 r8; /* v5 */ + uint32 r9; /* sb/v6 */ + uint32 r10; /* sl/v7 */ + uint32 r11; /* fp/v8 */ + uint32 r12; /* ip */ + uint32 r13; /* sp */ + uint32 r14; /* lr */ + uint32 pc; /* r15 */ +} trap_t; + +#endif /* !_LANGUAGE_ASSEMBLY */ + +#endif /* _hnd_armtrap_h_ */ diff --git a/bcmdhd.100.10.315.x/include/hnd_cons.h b/bcmdhd.100.10.315.x/include/hnd_cons.h new file mode 100644 index 0000000..c764142 --- /dev/null +++ b/bcmdhd.100.10.315.x/include/hnd_cons.h @@ -0,0 +1,86 @@ +/* + * Console support for RTE - for host use only. + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: hnd_cons.h 624181 2016-03-10 18:58:22Z $ + */ +#ifndef _hnd_cons_h_ +#define _hnd_cons_h_ + +#include +#include + +#define CBUF_LEN (128) + +#ifndef LOG_BUF_LEN +#if defined(BCM_BIG_LOG) +#define LOG_BUF_LEN (16 * 1024) +#else +#define LOG_BUF_LEN 1024 +#endif // endif +#endif /* LOG_BUF_LEN */ + +#ifdef BOOTLOADER_CONSOLE_OUTPUT +#undef RWL_MAX_DATA_LEN +#undef CBUF_LEN +#undef LOG_BUF_LEN +#define RWL_MAX_DATA_LEN (4 * 1024 + 8) +#define CBUF_LEN (RWL_MAX_DATA_LEN + 64) +#define LOG_BUF_LEN (16 * 1024) +#endif // endif + +typedef struct { + uint32 buf; /* Can't be pointer on (64-bit) hosts */ + uint buf_size; + uint idx; + uint out_idx; /* output index */ +} hnd_log_t; + +typedef struct { + /* Virtual UART + * When there is no UART (e.g. Quickturn), the host should write a complete + * input line directly into cbuf and then write the length into vcons_in. + * This may also be used when there is a real UART (at risk of conflicting with + * the real UART). vcons_out is currently unused. + */ + volatile uint vcons_in; + volatile uint vcons_out; + + /* Output (logging) buffer + * Console output is written to a ring buffer log_buf at index log_idx. + * The host may read the output when it sees log_idx advance. + * Output will be lost if the output wraps around faster than the host polls. + */ + hnd_log_t log; + + /* Console input line buffer + * Characters are read one at a time into cbuf until is received, then + * the buffer is processed as a command line. Also used for virtual UART. + */ + uint cbuf_idx; + char cbuf[CBUF_LEN]; +} hnd_cons_t; + +#endif /* _hnd_cons_h_ */ diff --git a/bcmdhd.100.10.315.x/include/hnd_debug.h b/bcmdhd.100.10.315.x/include/hnd_debug.h new file mode 100644 index 0000000..ad191fb --- /dev/null +++ b/bcmdhd.100.10.315.x/include/hnd_debug.h @@ -0,0 +1,168 @@ +/* + * HND Run Time Environment debug info area + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: hnd_debug.h 726313 2017-10-12 06:07:22Z $ + */ + +#ifndef _HND_DEBUG_H +#define _HND_DEBUG_H + +/* Magic number at a magic location to find HND_DEBUG pointers */ +#define HND_DEBUG_PTR_PTR_MAGIC 0x50504244 /* DBPP */ + +/* Magic number at a magic location to find RAM size */ +#define HND_RAMSIZE_PTR_MAGIC 0x534d4152 /* RAMS */ + +#ifndef _LANGUAGE_ASSEMBLY + +/* Includes only when building dongle code */ + +/* We use explicit sizes here since this gets included from different + * systems. The sizes must be the size of the creating system + * (currently 32 bit ARM) since this is gleaned from dump. + */ + +#ifdef FWID +extern uint32 gFWID; +#endif // endif + +/* Define pointers for use on other systems */ +#define _HD_EVLOG_P uint32 +#define _HD_CONS_P uint32 +#define _HD_TRAP_P uint32 + +/* This struct is placed at a well-defined location, and contains a pointer to hnd_debug. */ +typedef struct hnd_debug_ptr { + uint32 magic; + + /* RAM address of 'hnd_debug'. For legacy versions of this struct, it is a 0-indexed + * offset instead. + */ + uint32 hnd_debug_addr; + + /* Base address of RAM. This field does not exist for legacy versions of this struct. */ + uint32 ram_base_addr; + +} hnd_debug_ptr_t; + +/* This struct is placed at a well-defined location. */ +typedef struct hnd_ramsize_ptr { + uint32 magic; /* 'RAMS' */ + + /* RAM size information. */ + uint32 ram_size; +} hnd_ramsize_ptr_t; + +#define HND_DEBUG_EPIVERS_MAX_STR_LEN 32 +#define HND_DEBUG_BUILD_SIGNATURE_FWID_LEN 17 +#define HND_DEBUG_BUILD_SIGNATURE_VER_LEN 22 +typedef struct hnd_debug { + uint32 magic; +#define HND_DEBUG_MAGIC 0x47424544 /* 'DEBG' */ + + uint32 version; /* Debug struct version */ +#define HND_DEBUG_VERSION 1 + + uint32 fwid; /* 4 bytes of fw info */ + char epivers[HND_DEBUG_EPIVERS_MAX_STR_LEN]; + + _HD_TRAP_P trap_ptr; /* trap_t data struct */ + _HD_CONS_P console; /* Console */ + + uint32 ram_base; + uint32 ram_size; + + uint32 rom_base; + uint32 rom_size; + + _HD_EVLOG_P event_log_top; + + /* To populated fields below, + * INCLUDE_BUILD_SIGNATURE_IN_SOCRAM needs to be enabled + */ + char fwid_signature[HND_DEBUG_BUILD_SIGNATURE_FWID_LEN]; /* fwid= */ + char ver_signature[HND_DEBUG_BUILD_SIGNATURE_VER_LEN]; /* ver=abc.abc.abc.abc */ + +} hnd_debug_t; + +/* + * timeval_t and prstatus_t are copies of the Linux structures. + * Included here because we need the definitions for the target processor + * (32 bits) and not the definition on the host this is running on + * (which could be 64 bits). + */ + +typedef struct { /* Time value with microsecond resolution */ + uint32 tv_sec; /* Seconds */ + uint32 tv_usec; /* Microseconds */ +} timeval_t; + +/* Linux/ARM 32 prstatus for notes section */ +typedef struct prstatus { + int32 si_signo; /* Signal number */ + int32 si_code; /* Extra code */ + int32 si_errno; /* Errno */ + uint16 pr_cursig; /* Current signal. */ + uint16 unused; + uint32 pr_sigpend; /* Set of pending signals. */ + uint32 pr_sighold; /* Set of held signals. */ + uint32 pr_pid; + uint32 pr_ppid; + uint32 pr_pgrp; + uint32 pr_sid; + timeval_t pr_utime; /* User time. */ + timeval_t pr_stime; /* System time. */ + timeval_t pr_cutime; /* Cumulative user time. */ + timeval_t pr_cstime; /* Cumulative system time. */ + uint32 uregs[18]; + int32 pr_fpvalid; /* True if math copro being used. */ +} prstatus_t; + +/* for mkcore and other utilities use */ +#define DUMP_INFO_PTR_PTR_0 0x74 +#define DUMP_INFO_PTR_PTR_1 0x78 +#define DUMP_INFO_PTR_PTR_2 0xf0 +#define DUMP_INFO_PTR_PTR_3 0xf8 +#define DUMP_INFO_PTR_PTR_4 0x874 +#define DUMP_INFO_PTR_PTR_5 0x878 +#define DUMP_INFO_PTR_PTR_END 0xffffffff +#define DUMP_INFO_PTR_PTR_LIST DUMP_INFO_PTR_PTR_0, \ + DUMP_INFO_PTR_PTR_1, \ + DUMP_INFO_PTR_PTR_2, \ + DUMP_INFO_PTR_PTR_3, \ + DUMP_INFO_PTR_PTR_4, \ + DUMP_INFO_PTR_PTR_5, \ + DUMP_INFO_PTR_PTR_END + +/* for DHD driver to get dongle ram size info. */ +#define RAMSIZE_PTR_PTR_0 0x6c +#define RAMSIZE_PTR_PTR_END 0xffffffff +#define RAMSIZE_PTR_PTR_LIST RAMSIZE_PTR_PTR_0, \ + RAMSIZE_PTR_PTR_END + +#endif /* !LANGUAGE_ASSEMBLY */ + +#endif /* _HND_DEBUG_H */ diff --git a/bcmdhd.100.10.315.x/include/hnd_pktpool.h b/bcmdhd.100.10.315.x/include/hnd_pktpool.h new file mode 100644 index 0000000..2d9eaf5 --- /dev/null +++ b/bcmdhd.100.10.315.x/include/hnd_pktpool.h @@ -0,0 +1,243 @@ +/* + * HND generic packet pool operation primitives + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: hnd_pktpool.h 633941 2016-04-26 07:04:26Z $ + */ + +#ifndef _hnd_pktpool_h_ +#define _hnd_pktpool_h_ + +#include + +#ifdef __cplusplus +extern "C" { +#endif // endif + +/* mutex macros for thread safe */ +#ifdef HND_PKTPOOL_THREAD_SAFE +#define HND_PKTPOOL_MUTEX_DECL(mutex) OSL_EXT_MUTEX_DECL(mutex) +#else +#define HND_PKTPOOL_MUTEX_DECL(mutex) +#endif // endif + +#ifdef BCMPKTPOOL +#define POOL_ENAB(pool) ((pool) && (pool)->inited) +#else /* BCMPKTPOOL */ +#define POOL_ENAB(bus) 0 +#endif /* BCMPKTPOOL */ + +#ifndef PKTPOOL_LEN_MAX +#define PKTPOOL_LEN_MAX 40 +#endif /* PKTPOOL_LEN_MAX */ +#define PKTPOOL_CB_MAX 3 +#define PKTPOOL_CB_MAX_AVL 4 + +/* REMOVE_RXCPLID is an arg for pktpool callback function for removing rxcplID + * and host addr associated with the rxfrag or shared pool buffer during pktpool_reclaim(). + */ +#define REMOVE_RXCPLID 2 + +/* forward declaration */ +struct pktpool; + +typedef void (*pktpool_cb_t)(struct pktpool *pool, void *arg); +typedef struct { + pktpool_cb_t cb; + void *arg; + uint8 refcnt; +} pktpool_cbinfo_t; + +/** PCIe SPLITRX related: call back fn extension to populate host address in pool pkt */ +typedef int (*pktpool_cb_extn_t)(struct pktpool *pool, void *arg1, void* pkt, int arg2); +typedef struct { + pktpool_cb_extn_t cb; + void *arg; +} pktpool_cbextn_info_t; + +#ifdef BCMDBG_POOL +/* pkt pool debug states */ +#define POOL_IDLE 0 +#define POOL_RXFILL 1 +#define POOL_RXDH 2 +#define POOL_RXD11 3 +#define POOL_TXDH 4 +#define POOL_TXD11 5 +#define POOL_AMPDU 6 +#define POOL_TXENQ 7 + +typedef struct { + void *p; + uint32 cycles; + uint32 dur; +} pktpool_dbg_t; + +typedef struct { + uint8 txdh; /* tx to host */ + uint8 txd11; /* tx to d11 */ + uint8 enq; /* waiting in q */ + uint8 rxdh; /* rx from host */ + uint8 rxd11; /* rx from d11 */ + uint8 rxfill; /* dma_rxfill */ + uint8 idle; /* avail in pool */ +} pktpool_stats_t; +#endif /* BCMDBG_POOL */ + +typedef struct pktpool { + bool inited; /**< pktpool_init was successful */ + uint8 type; /**< type of lbuf: basic, frag, etc */ + uint8 id; /**< pktpool ID: index in registry */ + bool istx; /**< direction: transmit or receive data path */ + HND_PKTPOOL_MUTEX_DECL(mutex) /**< thread-safe mutex */ + + void * freelist; /**< free list: see PKTNEXTFREE(), PKTSETNEXTFREE() */ + uint16 avail; /**< number of packets in pool's free list */ + uint16 n_pkts; /**< number of packets managed by pool */ + uint16 maxlen; /**< maximum size of pool <= PKTPOOL_LEN_MAX */ + uint16 max_pkt_bytes; /**< size of pkt buffer in [bytes], excluding lbuf|lbuf_frag */ + + bool empty; + uint8 cbtoggle; + uint8 cbcnt; + uint8 ecbcnt; + uint8 emptycb_disable; /**< Value of type enum pktpool_empty_cb_state */ + pktpool_cbinfo_t *availcb_excl; + pktpool_cbinfo_t cbs[PKTPOOL_CB_MAX_AVL]; + pktpool_cbinfo_t ecbs[PKTPOOL_CB_MAX]; + pktpool_cbextn_info_t cbext; /**< PCIe SPLITRX related */ + pktpool_cbextn_info_t rxcplidfn; +#ifdef BCMDBG_POOL + uint8 dbg_cbcnt; + pktpool_cbinfo_t dbg_cbs[PKTPOOL_CB_MAX]; + uint16 dbg_qlen; + pktpool_dbg_t dbg_q[PKTPOOL_LEN_MAX + 1]; +#endif // endif + pktpool_cbinfo_t dmarxfill; +} pktpool_t; + +pktpool_t *get_pktpools_registry(int id); + +/* Incarnate a pktpool registry. On success returns total_pools. */ +extern int pktpool_attach(osl_t *osh, uint32 total_pools); +extern int pktpool_dettach(osl_t *osh); /* Relinquish registry */ + +extern int pktpool_init(osl_t *osh, pktpool_t *pktp, int *n_pkts, int max_pkt_bytes, bool istx, + uint8 type); +extern int pktpool_deinit(osl_t *osh, pktpool_t *pktp); +extern int pktpool_fill(osl_t *osh, pktpool_t *pktp, bool minimal); +extern int pktpool_empty(osl_t *osh, pktpool_t *pktp); +extern uint16 pktpool_reclaim(osl_t *osh, pktpool_t *pktp, uint16 free_cnt); +extern void* pktpool_get(pktpool_t *pktp); +extern void pktpool_free(pktpool_t *pktp, void *p); +extern int pktpool_add(pktpool_t *pktp, void *p); +extern int pktpool_avail_notify_normal(osl_t *osh, pktpool_t *pktp); +extern int pktpool_avail_notify_exclusive(osl_t *osh, pktpool_t *pktp, pktpool_cb_t cb); +extern int pktpool_avail_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg); +extern int pktpool_avail_deregister(pktpool_t *pktp, pktpool_cb_t cb, void *arg); +extern int pktpool_empty_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg); +extern int pktpool_setmaxlen(pktpool_t *pktp, uint16 max_pkts); +extern int pktpool_setmaxlen_strict(osl_t *osh, pktpool_t *pktp, uint16 max_pkts); +extern void pktpool_emptycb_disable(pktpool_t *pktp, bool disable); +extern bool pktpool_emptycb_disabled(pktpool_t *pktp); +extern int pktpool_hostaddr_fill_register(pktpool_t *pktp, pktpool_cb_extn_t cb, void *arg1); +extern int pktpool_rxcplid_fill_register(pktpool_t *pktp, pktpool_cb_extn_t cb, void *arg); +extern void pktpool_invoke_dmarxfill(pktpool_t *pktp); +extern int pkpool_haddr_avail_register_cb(pktpool_t *pktp, pktpool_cb_t cb, void *arg); + +#define POOLPTR(pp) ((pktpool_t *)(pp)) +#define POOLID(pp) (POOLPTR(pp)->id) + +#define POOLSETID(pp, ppid) (POOLPTR(pp)->id = (ppid)) + +#define pktpool_tot_pkts(pp) (POOLPTR(pp)->n_pkts) /**< n_pkts = avail + in_use <= max_pkts */ +#define pktpool_avail(pp) (POOLPTR(pp)->avail) +#define pktpool_max_pkt_bytes(pp) (POOLPTR(pp)->max_pkt_bytes) +#define pktpool_max_pkts(pp) (POOLPTR(pp)->maxlen) + +/* + * ---------------------------------------------------------------------------- + * A pool ID is assigned with a pkt pool during pool initialization. This is + * done by maintaining a registry of all initialized pools, and the registry + * index at which the pool is registered is used as the pool's unique ID. + * ID 0 is reserved and is used to signify an invalid pool ID. + * All packets henceforth allocated from a pool will be tagged with the pool's + * unique ID. Packets allocated from the heap will use the reserved ID = 0. + * Packets with non-zero pool id signify that they were allocated from a pool. + * A maximum of 15 pools are supported, allowing a 4bit pool ID to be used + * in place of a 32bit pool pointer in each packet. + * ---------------------------------------------------------------------------- + */ +#define PKTPOOL_INVALID_ID (0) +#define PKTPOOL_MAXIMUM_ID (15) + +/* Registry of pktpool(s) */ +/* Pool ID to/from Pool Pointer converters */ +#define PKTPOOL_ID2PTR(id) (get_pktpools_registry(id)) +#define PKTPOOL_PTR2ID(pp) (POOLID(pp)) + +#ifdef BCMDBG_POOL +extern int pktpool_dbg_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg); +extern int pktpool_start_trigger(pktpool_t *pktp, void *p); +extern int pktpool_dbg_dump(pktpool_t *pktp); +extern int pktpool_dbg_notify(pktpool_t *pktp); +extern int pktpool_stats_dump(pktpool_t *pktp, pktpool_stats_t *stats); +#endif /* BCMDBG_POOL */ + +#ifdef BCMPKTPOOL +#define SHARED_POOL (pktpool_shared) +extern pktpool_t *pktpool_shared; +#ifdef BCMFRAGPOOL +#define SHARED_FRAG_POOL (pktpool_shared_lfrag) +extern pktpool_t *pktpool_shared_lfrag; +#endif // endif + +#ifdef BCMRESVFRAGPOOL +#define RESV_FRAG_POOL (pktpool_resv_lfrag) +#define RESV_POOL_INFO (resv_pool_info) +#else +#define RESV_FRAG_POOL ((struct pktpool *)NULL) +#define RESV_POOL_INFO (NULL) +#endif /* BCMRESVFRAGPOOL */ + +/** PCIe SPLITRX related */ +#define SHARED_RXFRAG_POOL (pktpool_shared_rxlfrag) +extern pktpool_t *pktpool_shared_rxlfrag; + +int hnd_pktpool_init(osl_t *osh); +int hnd_pktpool_fill(pktpool_t *pktpool, bool minimal); +void hnd_pktpool_refill(bool minimal); +#ifdef BCMRESVFRAGPOOL +extern pktpool_t *pktpool_resv_lfrag; +extern struct resv_info *resv_pool_info; +#endif /* BCMRESVFRAGPOOL */ +#else /* BCMPKTPOOL */ +#define SHARED_POOL ((struct pktpool *)NULL) +#endif /* BCMPKTPOOL */ + +#ifdef __cplusplus + } +#endif // endif + +#endif /* _hnd_pktpool_h_ */ diff --git a/bcmdhd.100.10.315.x/include/hnd_pktq.h b/bcmdhd.100.10.315.x/include/hnd_pktq.h new file mode 100644 index 0000000..24a3e42 --- /dev/null +++ b/bcmdhd.100.10.315.x/include/hnd_pktq.h @@ -0,0 +1,325 @@ +/* + * HND generic pktq operation primitives + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: hnd_pktq.h 698847 2017-05-11 00:10:48Z $ + */ + +#ifndef _hnd_pktq_h_ +#define _hnd_pktq_h_ + +#include + +#ifdef __cplusplus +extern "C" { +#endif // endif + +/* mutex macros for thread safe */ +#ifdef HND_PKTQ_THREAD_SAFE +#define HND_PKTQ_MUTEX_DECL(mutex) OSL_EXT_MUTEX_DECL(mutex) +#else +#define HND_PKTQ_MUTEX_DECL(mutex) +#endif // endif + +/* osl multi-precedence packet queue */ +#define PKTQ_LEN_MAX 0xFFFF /* Max uint16 65535 packets */ +#ifndef PKTQ_LEN_DEFAULT +#define PKTQ_LEN_DEFAULT 128 /* Max 128 packets */ +#endif // endif +#ifndef PKTQ_MAX_PREC +#define PKTQ_MAX_PREC 16 /* Maximum precedence levels */ +#endif // endif + +/** Queue for a single precedence level */ +typedef struct pktq_prec { + void *head; /**< first packet to dequeue */ + void *tail; /**< last packet to dequeue */ + uint16 n_pkts; /**< number of queued packets */ + uint16 max_pkts; /**< maximum number of queued packets */ + uint16 stall_count; /**< # seconds since no packets are dequeued */ + uint16 dequeue_count; /**< # of packets dequeued in last 1 second */ +} pktq_prec_t; + +#ifdef PKTQ_LOG +typedef struct { + uint32 requested; /**< packets requested to be stored */ + uint32 stored; /**< packets stored */ + uint32 saved; /**< packets saved, + because a lowest priority queue has given away one packet + */ + uint32 selfsaved; /**< packets saved, + because an older packet from the same queue has been dropped + */ + uint32 full_dropped; /**< packets dropped, + because pktq is full with higher precedence packets + */ + uint32 dropped; /**< packets dropped because pktq per that precedence is full */ + uint32 sacrificed; /**< packets dropped, + in order to save one from a queue of a highest priority + */ + uint32 busy; /**< packets droped because of hardware/transmission error */ + uint32 retry; /**< packets re-sent because they were not received */ + uint32 ps_retry; /**< packets retried again prior to moving power save mode */ + uint32 suppress; /**< packets which were suppressed and not transmitted */ + uint32 retry_drop; /**< packets finally dropped after retry limit */ + uint32 max_avail; /**< the high-water mark of the queue capacity for packets - + goes to zero as queue fills + */ + uint32 max_used; /**< the high-water mark of the queue utilisation for packets - + increases with use ('inverse' of max_avail) + */ + uint32 queue_capacity; /**< the maximum capacity of the queue */ + uint32 rtsfail; /**< count of rts attempts that failed to receive cts */ + uint32 acked; /**< count of packets sent (acked) successfully */ + uint32 txrate_succ; /**< running total of phy rate of packets sent successfully */ + uint32 txrate_main; /**< running totoal of primary phy rate of all packets */ + uint32 throughput; /**< actual data transferred successfully */ + uint32 airtime; /**< cumulative total medium access delay in useconds */ + uint32 _logtime; /**< timestamp of last counter clear */ +} pktq_counters_t; + +#define PKTQ_LOG_COMMON \ + uint32 pps_time; /**< time spent in ps pretend state */ \ + uint32 _prec_log; + +typedef struct { + PKTQ_LOG_COMMON + pktq_counters_t* _prec_cnt[PKTQ_MAX_PREC]; /**< Counters per queue */ +} pktq_log_t; +#else +typedef struct pktq_log pktq_log_t; +#endif /* PKTQ_LOG */ + +#define PKTQ_COMMON \ + HND_PKTQ_MUTEX_DECL(mutex) \ + pktq_log_t *pktqlog; \ + uint16 num_prec; /**< number of precedences in use */ \ + uint16 hi_prec; /**< rapid dequeue hint (>= highest non-empty prec) */ \ + uint16 max_pkts; /**< max packets */ \ + uint16 n_pkts_tot; /**< total (cummulative over all precedences) number of packets */ + +/** multi-priority packet queue */ +struct pktq { + PKTQ_COMMON + /* q array must be last since # of elements can be either PKTQ_MAX_PREC or 1 */ + struct pktq_prec q[PKTQ_MAX_PREC]; +}; + +/** simple, non-priority packet queue */ +struct spktq { + HND_PKTQ_MUTEX_DECL(mutex) + struct pktq_prec q; +}; + +#define PKTQ_PREC_ITER(pq, prec) for (prec = (pq)->num_prec - 1; prec >= 0; prec--) + +/* fn(pkt, arg). return true if pkt belongs to bsscfg */ +typedef bool (*ifpkt_cb_t)(void*, int); + +/* + * pktq filter support + */ + +/** filter function return values */ +typedef enum { + PKT_FILTER_NOACTION = 0, /**< restore the pkt to its position in the queue */ + PKT_FILTER_DELETE = 1, /**< delete the pkt */ + PKT_FILTER_REMOVE = 2, /**< do not restore the pkt to the queue, + * filter fn has taken ownership of the pkt + */ +} pktq_filter_result_t; + +/** + * Caller supplied filter function to pktq_pfilter(), pktq_filter(). + * Function filter(ctx, pkt) is called with its ctx pointer on each pkt in the + * pktq. When the filter function is called, the supplied pkt will have been + * unlinked from the pktq. The filter function returns a pktq_filter_result_t + * result specifying the action pktq_filter()/pktq_pfilter() should take for + * the pkt. + * Here are the actions taken by pktq_filter/pfilter() based on the supplied + * filter function's return value: + * + * PKT_FILTER_NOACTION - The filter will re-link the pkt at its + * previous location. + * + * PKT_FILTER_DELETE - The filter will not relink the pkt and will + * call the user supplied defer_free_pkt fn on the packet. + * + * PKT_FILTER_REMOVE - The filter will not relink the pkt. The supplied + * filter fn took ownership (or deleted) the pkt. + * + * WARNING: pkts inserted by the user (in pkt_filter and/or flush callbacks + * and chains) in the prec queue will not be seen by the filter, and the prec + * queue will be temporarily be removed from the queue hence there're side + * effects including pktq_n_pkts_tot() on the queue won't reflect the correct number + * of packets in the queue. + */ + +typedef pktq_filter_result_t (*pktq_filter_t)(void* ctx, void* pkt); + +/** + * The defer_free_pkt callback is invoked when the the pktq_filter callback + * returns PKT_FILTER_DELETE decision, which allows the user to deposite + * the packet appropriately based on the situation (free the packet or + * save it in a temporary queue etc.). + */ +typedef void (*defer_free_pkt_fn_t)(void *ctx, void *pkt); + +/** + * The flush_free_pkt callback is invoked when all packets in the pktq + * are processed. + */ +typedef void (*flush_free_pkt_fn_t)(void *ctx); + +#if defined(WLAMPDU_MAC) && defined(PROP_TXSTATUS) +/* this callback will be invoked when in low_txq_scb flush() + * two back-to-back pkts has same epoch value. + */ +typedef void (*flip_epoch_t)(void *ctx, void *pkt, uint8 *flipEpoch, uint8 *lastEpoch); +#endif /* defined(WLAMPDU_MAC) && defined(PROP_TXSTATUS) */ + +/** filter a pktq, using the caller supplied filter/deposition/flush functions */ +extern void pktq_filter(struct pktq *pq, pktq_filter_t fn, void* arg, + defer_free_pkt_fn_t defer, void *defer_ctx, flush_free_pkt_fn_t flush, void *flush_ctx); +/** filter a particular precedence in pktq, using the caller supplied filter function */ +extern void pktq_pfilter(struct pktq *pq, int prec, pktq_filter_t fn, void* arg, + defer_free_pkt_fn_t defer, void *defer_ctx, flush_free_pkt_fn_t flush, void *flush_ctx); +/** filter a simple non-precedence in spktq, using the caller supplied filter function */ +extern void spktq_filter(struct spktq *spq, pktq_filter_t fltr, void* fltr_ctx, + defer_free_pkt_fn_t defer, void *defer_ctx, flush_free_pkt_fn_t flush, void *flush_ctx); + +/* operations on a specific precedence in packet queue */ +#define pktqprec_max_pkts(pq, prec) ((pq)->q[prec].max_pkts) +#define pktqprec_n_pkts(pq, prec) ((pq)->q[prec].n_pkts) +#define pktqprec_empty(pq, prec) ((pq)->q[prec].n_pkts == 0) +#define pktqprec_peek(pq, prec) ((pq)->q[prec].head) +#define pktqprec_peek_tail(pq, prec) ((pq)->q[prec].tail) +#define spktq_peek_tail(pq) ((pq)->q.tail) +#ifdef HND_PKTQ_THREAD_SAFE +extern int pktqprec_avail_pkts(struct pktq *pq, int prec); +extern bool pktqprec_full(struct pktq *pq, int prec); +#else +#define pktqprec_avail_pkts(pq, prec) ((pq)->q[prec].max_pkts - (pq)->q[prec].n_pkts) +#define pktqprec_full(pq, prec) ((pq)->q[prec].n_pkts >= (pq)->q[prec].max_pkts) +#endif /* HND_PKTQ_THREAD_SAFE */ + +extern void pktq_append(struct pktq *pq, int prec, struct spktq *list); +extern void spktq_append(struct spktq *spq, struct spktq *list); +extern void pktq_prepend(struct pktq *pq, int prec, struct spktq *list); +extern void spktq_prepend(struct spktq *spq, struct spktq *list); +extern void *pktq_penq(struct pktq *pq, int prec, void *p); +extern void *pktq_penq_head(struct pktq *pq, int prec, void *p); +extern void *pktq_pdeq(struct pktq *pq, int prec); +extern void *pktq_pdeq_prev(struct pktq *pq, int prec, void *prev_p); +extern void *pktq_pdeq_with_fn(struct pktq *pq, int prec, ifpkt_cb_t fn, int arg); +extern void *pktq_pdeq_tail(struct pktq *pq, int prec); +/** Remove a specified packet from its queue */ +extern bool pktq_pdel(struct pktq *pq, void *p, int prec); + +/* For single precedence queues */ +extern void *spktq_enq(struct spktq *spq, void *p); +extern void *spktq_enq_head(struct spktq *spq, void *p); +extern void *spktq_deq(struct spktq *spq); +extern void *spktq_deq_tail(struct spktq *spq); + +/* operations on a set of precedences in packet queue */ + +extern int pktq_mlen(struct pktq *pq, uint prec_bmp); +extern void *pktq_mdeq(struct pktq *pq, uint prec_bmp, int *prec_out); +extern void *pktq_mpeek(struct pktq *pq, uint prec_bmp, int *prec_out); + +/* operations on packet queue as a whole */ + +#define pktq_n_pkts_tot(pq) ((int)(pq)->n_pkts_tot) +#define pktq_max(pq) ((int)(pq)->max_pkts) +#define pktq_empty(pq) ((pq)->n_pkts_tot == 0) +#define spktq_n_pkts(spq) ((int)(spq)->q.n_pkts) +#define spktq_empty(spq) ((spq)->q.n_pkts == 0) + +#define spktq_max(spq) ((int)(spq)->q.max_pkts) +#define spktq_empty(spq) ((spq)->q.n_pkts == 0) +#ifdef HND_PKTQ_THREAD_SAFE +extern int pktq_avail(struct pktq *pq); +extern bool pktq_full(struct pktq *pq); +extern int spktq_avail(struct spktq *spq); +extern bool spktq_full(struct spktq *spq); +#else +#define pktq_avail(pq) ((int)((pq)->max_pkts - (pq)->n_pkts_tot)) +#define pktq_full(pq) ((pq)->n_pkts_tot >= (pq)->max_pkts) +#define spktq_avail(spq) ((int)((spq)->q.max_pkts - (spq)->q.n_pkts)) +#define spktq_full(spq) ((spq)->q.n_pkts >= (spq)->q.max_pkts) +#endif /* HND_PKTQ_THREAD_SAFE */ + +/* operations for single precedence queues */ +#define pktenq(pq, p) pktq_penq((pq), 0, (p)) +#define pktenq_head(pq, p) pktq_penq_head((pq), 0, (p)) +#define pktdeq(pq) pktq_pdeq((pq), 0) +#define pktdeq_tail(pq) pktq_pdeq_tail((pq), 0) +#define pktqflush(osh, pq, dir) pktq_pflush(osh, (pq), 0, (dir)) +#define pktqinit(pq, max_pkts) pktq_init((pq), 1, (max_pkts)) +#define pktqdeinit(pq) pktq_deinit((pq)) +#define pktqavail(pq) pktq_avail((pq)) +#define pktqfull(pq) pktq_full((pq)) +#define pktqfilter(pq, fltr, fltr_ctx, defer, defer_ctx, flush, flush_ctx) \ + pktq_pfilter((pq), 0, (fltr), (fltr_ctx), (defer), (defer_ctx), (flush), (flush_ctx)) + +/* operations for simple non-precedence queues */ +#define spktenq(spq, p) spktq_enq((spq), (p)) +#define spktenq_head(spq, p) spktq_enq_head((spq), (p)) +#define spktdeq(spq) spktq_deq((spq)) +#define spktdeq_tail(spq) spktq_deq_tail((spq)) +#define spktqflush(osh, spq, dir) spktq_flush((osh), (spq), (dir)) +#define spktqinit(spq, max_pkts) spktq_init((spq), (max_pkts)) +#define spktqdeinit(spq) spktq_deinit((spq)) +#define spktqavail(spq) spktq_avail((spq)) +#define spktqfull(spq) spktq_full((spq)) + +#define spktqfilter(spq, fltr, fltr_ctx, defer, defer_ctx, flush, flush_ctx) \ + spktq_filter((spq), (fltr), (fltr_ctx), (defer), (defer_ctx), (flush), (flush_ctx)) +extern bool pktq_init(struct pktq *pq, int num_prec, int max_pkts); +extern bool pktq_deinit(struct pktq *pq); +extern bool spktq_init(struct spktq *spq, int max_pkts); +extern bool spktq_deinit(struct spktq *spq); + +extern void pktq_set_max_plen(struct pktq *pq, int prec, int max_pkts); + +/* prec_out may be NULL if caller is not interested in return value */ +extern void *pktq_deq(struct pktq *pq, int *prec_out); +extern void *pktq_deq_tail(struct pktq *pq, int *prec_out); +extern void *pktq_peek(struct pktq *pq, int *prec_out); +extern void *spktq_peek(struct spktq *spq); +extern void *pktq_peek_tail(struct pktq *pq, int *prec_out); + +/** flush pktq */ +extern void pktq_flush(osl_t *osh, struct pktq *pq, bool dir); +extern void spktq_flush(osl_t *osh, struct spktq *spq, bool dir); +/** Empty the queue at particular precedence level */ +extern void pktq_pflush(osl_t *osh, struct pktq *pq, int prec, bool dir); + +#ifdef __cplusplus +} +#endif // endif + +#endif /* _hnd_pktq_h_ */ diff --git a/bcmdhd.100.10.315.x/include/hnd_trap.h b/bcmdhd.100.10.315.x/include/hnd_trap.h new file mode 100644 index 0000000..1e2a4a6 --- /dev/null +++ b/bcmdhd.100.10.315.x/include/hnd_trap.h @@ -0,0 +1,39 @@ +/* + * HND Trap handling. + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: hnd_trap.h 514727 2014-11-12 03:02:48Z $ + */ + +#ifndef _hnd_trap_h_ +#define _hnd_trap_h_ + +#if defined(__arm__) || defined(__thumb__) || defined(__thumb2__) +#include +#else +#error "unsupported CPU architecture" +#endif // endif + +#endif /* _hnd_trap_h_ */ diff --git a/bcmdhd.100.10.315.x/include/hndchipc.h b/bcmdhd.100.10.315.x/include/hndchipc.h new file mode 100644 index 0000000..71f26b0 --- /dev/null +++ b/bcmdhd.100.10.315.x/include/hndchipc.h @@ -0,0 +1,53 @@ +/* + * HND SiliconBackplane chipcommon support - OS independent. + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: hndchipc.h 689775 2017-03-13 12:37:05Z $ + */ + +#ifndef _hndchipc_h_ +#define _hndchipc_h_ + +#include +#include + +#ifdef RTE_UART +typedef void (*si_serial_init_fn)(si_t *sih, void *regs, uint irq, uint baud_base, uint reg_shift); +#else +typedef void (*si_serial_init_fn)(void *regs, uint irq, uint baud_base, uint reg_shift); +#endif // endif +extern void si_serial_init(si_t *sih, si_serial_init_fn add); + +extern volatile void *hnd_jtagm_init(si_t *sih, uint clkd, bool exttap); +extern void hnd_jtagm_disable(si_t *sih, volatile void *h); +extern uint32 jtag_scan(si_t *sih, volatile void *h, uint irsz, uint32 ir0, uint32 ir1, + uint drsz, uint32 dr0, uint32 *dr1, bool rti); +extern uint32 jtag_read_128(si_t *sih, volatile void *h, uint irsz, uint32 ir0, uint drsz, + uint32 dr0, uint32 *dr1, uint32 *dr2, uint32 *dr3); +extern uint32 jtag_write_128(si_t *sih, volatile void *h, uint irsz, uint32 ir0, uint drsz, + uint32 dr0, uint32 *dr1, uint32 *dr2, uint32 *dr3); +extern int jtag_setbit_128(si_t *sih, uint32 jtagureg_addr, uint8 bit_pos, uint8 bit_val); + +#endif /* _hndchipc_h_ */ diff --git a/bcmdhd.100.10.315.x/include/hndlhl.h b/bcmdhd.100.10.315.x/include/hndlhl.h new file mode 100644 index 0000000..3fd9563 --- /dev/null +++ b/bcmdhd.100.10.315.x/include/hndlhl.h @@ -0,0 +1,61 @@ +/* + * HND SiliconBackplane PMU support. + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: hndpmu.h 546588 2015-04-13 09:24:52Z $ + */ + +#ifndef _hndlhl_h_ +#define _hndlhl_h_ + +enum { + LHL_MAC_TIMER = 0, + LHL_ARM_TIMER = 1 +}; + +typedef struct { + uint16 offset; + uint32 mask; + uint32 val; +} lhl_reg_set_t; + +#define LHL_REG_OFF(reg) OFFSETOF(gciregs_t, reg) + +extern void si_lhl_timer_config(si_t *sih, osl_t *osh, int timer_type); +extern void si_lhl_timer_enable(si_t *sih); + +extern void si_lhl_setup(si_t *sih, osl_t *osh); +extern void si_lhl_enable(si_t *sih, osl_t *osh, bool enable); +extern void si_lhl_ilp_config(si_t *sih, osl_t *osh, uint32 ilp_period); +extern void si_lhl_enable_sdio_wakeup(si_t *sih, osl_t *osh); +extern void si_lhl_disable_sdio_wakeup(si_t *sih); +extern int si_lhl_set_lpoclk(si_t *sih, osl_t *osh, uint32 lpo_force); +extern void si_set_lv_sleep_mode_lhl_config_4369(si_t *sih); + +#define HIB_EXT_WAKEUP_CAP(sih) (BCM4347_CHIP(sih->chip)) + +#define LHL_IS_PSMODE_0(sih) (si_lhl_ps_mode(sih) == LHL_PS_MODE_0) +#define LHL_IS_PSMODE_1(sih) (si_lhl_ps_mode(sih) == LHL_PS_MODE_1) +#endif /* _hndlhl_h_ */ diff --git a/bcmdhd.100.10.315.x/include/hndmem.h b/bcmdhd.100.10.315.x/include/hndmem.h new file mode 100644 index 0000000..8706bde --- /dev/null +++ b/bcmdhd.100.10.315.x/include/hndmem.h @@ -0,0 +1,80 @@ +/* + * Utility routines for configuring different memories in Broadcom chips. + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: $ + */ + +#ifndef _HNDMEM_H_ +#define _HNDMEM_H_ + +typedef enum { + MEM_SOCRAM = 0, + MEM_BM = 1, + MEM_UCM = 2, + MEM_SHM = 3, + MEM_MAX = 4 +} hndmem_type_t; + +/* PDA (Power Down Array) configuration */ +typedef enum { + PDA_CONFIG_CLEAR = 0, /* Clear PDA, i.e. Turns on the memory bank */ + PDA_CONFIG_SET_FULL = 1, /* Set PDA, i.e. Truns off the memory bank */ + PDA_CONFIG_SET_PARTIAL = 2, /* Set PDA, i.e. Truns off the memory bank */ + PDA_CONFIG_MAX = 3 +} hndmem_config_t; + +/* Returns the number of banks in a given memory */ +extern int hndmem_num_banks(si_t *sih, int mem); + +/* Returns the size of a give bank in a given memory */ +extern int hndmem_bank_size(si_t *sih, hndmem_type_t mem, int bank_num); + +/* Returns the start address of given memory */ +extern uint32 hndmem_mem_base(si_t *sih, hndmem_type_t mem); + +#ifdef BCMDEBUG +/* Dumps the complete memory information */ +extern void hndmem_dump_meminfo_all(si_t *sih); +#endif /* BCMDEBUG */ + +/* Configures the Sleep PDA for a particular bank for a given memory type */ +extern int hndmem_sleeppda_bank_config(si_t *sih, hndmem_type_t mem, + int bank_num, hndmem_config_t config, uint32 pda); +/* Configures the Active PDA for a particular bank for a given memory type */ +extern int hndmem_activepda_bank_config(si_t *sih, hndmem_type_t mem, + int bank_num, hndmem_config_t config, uint32 pda); + +/* Configures the Sleep PDA for all the banks for a given memory type */ +extern int hndmem_sleeppda_config(si_t *sih, hndmem_type_t mem, + hndmem_config_t config); +/* Configures the Active PDA for all the banks for a given memory type */ +extern int hndmem_activepda_config(si_t *sih, hndmem_type_t mem, + hndmem_config_t config); + +/* Turn off/on all the possible banks in a given memory range */ +extern int hndmem_activepda_mem_config(si_t *sih, hndmem_type_t mem, + uint32 mem_start, uint32 size, hndmem_config_t config); +#endif /* _HNDMEM_H_ */ diff --git a/bcmdhd.100.10.315.x/include/hndpmu.h b/bcmdhd.100.10.315.x/include/hndpmu.h new file mode 100644 index 0000000..025f7a6 --- /dev/null +++ b/bcmdhd.100.10.315.x/include/hndpmu.h @@ -0,0 +1,79 @@ +/* + * HND SiliconBackplane PMU support. + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: hndpmu.h 700376 2017-05-18 22:55:43Z $ + */ + +#ifndef _hndpmu_h_ +#define _hndpmu_h_ + +#include +#include +#include +#include + +extern uint32 si_pmu_rsrc_macphy_clk_deps(si_t *sih, osl_t *osh, int maccore_index); +extern uint32 si_pmu_rsrc_ht_avail_clk_deps(si_t *sih, osl_t *osh); + +extern void si_pmu_otp_power(si_t *sih, osl_t *osh, bool on, uint32* min_res_mask); +extern void si_sdiod_drive_strength_init(si_t *sih, osl_t *osh, uint32 drivestrength); + +extern void si_pmu_slow_clk_reinit(si_t *sih, osl_t *osh); +extern void si_pmu_avbtimer_enable(si_t *sih, osl_t *osh, bool set_flag); +extern uint32 si_pmu_dump_pmucap_binary(si_t *sih, uchar *p); +extern uint32 si_pmu_dump_buf_size_pmucap(si_t *sih); +extern int si_pmu_wait_for_steady_state(si_t *sih, osl_t *osh, pmuregs_t *pmu); +extern uint32 si_pmu_wake_bit_offset(si_t *sih); +#if defined(BCMULP) +int si_pmu_ulp_register(si_t *sih); +extern void si_pmu_ulp_chipconfig(si_t *sih, osl_t *osh); +extern void si_pmu_ulp_ilp_config(si_t *sih, osl_t *osh, uint32 ilp_period); +extern void si_pmu_ds1_res_init(si_t *sih, osl_t *osh); +#endif /* BCMULP */ +extern uint32 si_pmu_get_pmutimer(si_t *sih); +extern void si_switch_pmu_dependency(si_t *sih, uint mode); +extern void si_pmu_set_min_res_mask(si_t *sih, osl_t *osh, uint min_res_mask); +extern void si_pmu_set_mac_rsrc_req(si_t *sih, int macunit); +extern bool si_pmu_fast_lpo_enable_pcie(si_t *sih); +extern bool si_pmu_fast_lpo_enable_pmu(si_t *sih); +extern void si_pmu_chipcontrol_xtal_settings_4369(si_t *sih); +extern uint32 si_cur_pmu_time(si_t *sih); +extern bool si_pmu_cap_fast_lpo(si_t *sih); +extern int si_pmu_fast_lpo_disable(si_t *sih); +#ifdef BCMPMU_STATS +extern void si_pmustatstimer_init(si_t *sih); +extern void si_pmustatstimer_dump(si_t *sih); +extern void si_pmustatstimer_start(si_t *sih, uint8 timerid); +extern void si_pmustatstimer_stop(si_t *sih, uint8 timerid); +extern void si_pmustatstimer_clear(si_t *sih, uint8 timerid); +extern void si_pmustatstimer_clear_overflow(si_t *sih); +extern uint32 si_pmustatstimer_read(si_t *sih, uint8 timerid); +extern void si_pmustatstimer_cfg_src_num(si_t *sih, uint8 src_num, uint8 timerid); +extern void si_pmustatstimer_cfg_cnt_mode(si_t *sih, uint8 cnt_mode, uint8 timerid); +extern void si_pmustatstimer_int_enable(si_t *sih); +extern void si_pmustatstimer_int_disable(si_t *sih); +#endif /* BCMPMU_STATS */ +#endif /* _hndpmu_h_ */ diff --git a/bcmdhd.100.10.315.x/include/hndsoc.h b/bcmdhd.100.10.315.x/include/hndsoc.h new file mode 100644 index 0000000..46f5acc --- /dev/null +++ b/bcmdhd.100.10.315.x/include/hndsoc.h @@ -0,0 +1,349 @@ +/* + * Broadcom HND chip & on-chip-interconnect-related definitions. + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: hndsoc.h 672520 2016-11-28 23:30:55Z $ + */ + +#ifndef _HNDSOC_H +#define _HNDSOC_H + +/* Include the soci specific files */ +#include +#include + +/* + * SOC Interconnect Address Map. + * All regions may not exist on all chips. + */ +#define SI_SDRAM_BASE 0x00000000 /* Physical SDRAM */ +#define SI_PCI_MEM 0x08000000 /* Host Mode sb2pcitranslation0 (64 MB) */ +#define SI_PCI_MEM_SZ (64 * 1024 * 1024) +#define SI_PCI_CFG 0x0c000000 /* Host Mode sb2pcitranslation1 (64 MB) */ +#define SI_SDRAM_SWAPPED 0x10000000 /* Byteswapped Physical SDRAM */ +#define SI_SDRAM_R2 0x80000000 /* Region 2 for sdram (512 MB) */ + +#ifdef STB_SOC_WIFI +#define SI_REG_BASE_SIZE 0xB000 /* size from 0xf1800000 to 0xf180AFFF (44KB) */ +#define SI_ENUM_BASE_DEFAULT 0xF1800000 /* Enumeration space base */ +#define SI_WRAP_BASE_DEFAULT 0xF1900000 /* Wrapper space base */ +#endif /* STB_SOC_WIFI */ + +#ifndef SI_ENUM_BASE_DEFAULT +#define SI_ENUM_BASE_DEFAULT 0x18000000 /* Enumeration space base */ +#endif // endif + +#ifndef SI_WRAP_BASE_DEFAULT +#define SI_WRAP_BASE_DEFAULT 0x18100000 /* Wrapper space base */ +#endif // endif + +/** new(er) chips started locating their chipc core at a different BP address than 0x1800_0000 */ +// NIC and DHD driver binaries should support both old(er) and new(er) chips at the same time +#define SI_ENUM_BASE(sih) ((sih)->enum_base) +#define SI_WRAP_BASE(sih) (SI_ENUM_BASE(sih) + 0x00100000) + +#define SI_CORE_SIZE 0x1000 /* each core gets 4Kbytes for registers */ + +#define SI_NIC400_GPV_BASE 0x18200000 /* NIC-400 Global Programmers View (GPV) */ +#define SI_GPV_WR_CAP_ADDR 0x4008 /* WR-CAP offset */ +#define SI_GPV_RD_CAP_EN 0x1 /* issue read */ +#define SI_GPV_WR_CAP_EN 0x2 /* issue write */ + +#ifndef SI_MAXCORES +#define SI_MAXCORES 32 /* NorthStar has more cores */ +#endif /* SI_MAXCORES */ + +#define SI_MAXBR 4 /* Max bridges (this is arbitrary, for software + * convenience and could be changed if we + * make any larger chips + */ + +#define SI_FASTRAM 0x19000000 /* On-chip RAM on chips that also have DDR */ +#define SI_FASTRAM_SWAPPED 0x19800000 + +#define SI_FLASH2 0x1c000000 /* Flash Region 2 (region 1 shadowed here) */ +#define SI_FLASH2_SZ 0x02000000 /* Size of Flash Region 2 */ +#define SI_ARMCM3_ROM 0x1e000000 /* ARM Cortex-M3 ROM */ +#define SI_FLASH1 0x1fc00000 /* MIPS Flash Region 1 */ +#define SI_FLASH1_SZ 0x00400000 /* MIPS Size of Flash Region 1 */ +#define SI_FLASH_WINDOW 0x01000000 /* Flash XIP Window */ + +#define SI_NS_NANDFLASH 0x1c000000 /* NorthStar NAND flash base */ +#define SI_NS_NORFLASH 0x1e000000 /* NorthStar NOR flash base */ +#define SI_NS_ROM 0xfffd0000 /* NorthStar ROM */ +#define SI_NS_FLASH_WINDOW 0x02000000 /* Flash XIP Window */ + +#define SI_ARM7S_ROM 0x20000000 /* ARM7TDMI-S ROM */ +#define SI_ARMCR4_ROM 0x000f0000 /* ARM Cortex-R4 ROM */ +#define SI_ARMCM3_SRAM2 0x60000000 /* ARM Cortex-M3 SRAM Region 2 */ +#define SI_ARM7S_SRAM2 0x80000000 /* ARM7TDMI-S SRAM Region 2 */ +#define SI_ARMCA7_ROM 0x00000000 /* ARM Cortex-A7 ROM */ +#ifndef SI_ARMCA7_RAM +#define SI_ARMCA7_RAM 0x00200000 /* ARM Cortex-A7 RAM */ +#endif // endif +#define SI_ARM_FLASH1 0xffff0000 /* ARM Flash Region 1 */ +#define SI_ARM_FLASH1_SZ 0x00010000 /* ARM Size of Flash Region 1 */ + +#define SI_SFLASH 0x14000000 +#define SI_PCI_DMA 0x40000000 /* Client Mode sb2pcitranslation2 (1 GB) */ +#define SI_PCI_DMA2 0x80000000 /* Client Mode sb2pcitranslation2 (1 GB) */ +#define SI_PCI_DMA_SZ 0x40000000 /* Client Mode sb2pcitranslation2 size in bytes */ +#define SI_PCIE_DMA_L32 0x00000000 /* PCIE Client Mode sb2pcitranslation2 + * (2 ZettaBytes), low 32 bits + */ +#define SI_PCIE_DMA_H32 0x80000000 /* PCIE Client Mode sb2pcitranslation2 + * (2 ZettaBytes), high 32 bits + */ + +#define SI_BCM53573_NANDFLASH 0x30000000 /* 53573 NAND flash base */ +#define SI_BCM53573_NORFLASH 0x1c000000 /* 53573 NOR flash base */ +#define SI_BCM53573_FLASH2_SZ 0x04000000 /* 53573 NOR flash2 size */ + +#define SI_BCM53573_NORFLASH_WINDOW 0x01000000 /* only support 16M direct access for + * 3-byte address modes in spi flash + */ +#define SI_BCM53573_BOOTDEV_MASK 0x3 +#define SI_BCM53573_BOOTDEV_NOR 0x0 + +#define SI_BCM53573_NAND_PRE_MASK 0x100 /* 53573 NAND present mask */ + +#define SI_BCM53573_DDRTYPE_MASK 0x10 +#define SI_BCM53573_DDRTYPE_DDR3 0x10 + +#define SI_BCM47189_RGMII_VDD_MASK 0x3 +#define SI_BCM47189_RGMII_VDD_SHIFT 21 +#define SI_BCM47189_RGMII_VDD_3_3V 0 +#define SI_BCM47189_RGMII_VDD_2_5V 1 +#define SI_BCM47189_RGMII_VDD_1_5V 1 + +#define SI_BCM53573_LOCKED_CPUPLL 0x1 + +/* APB bridge code */ +#define APB_BRIDGE_ID 0x135 /* APB Bridge 0, 1, etc. */ + +/* core codes */ +#define NODEV_CORE_ID 0x700 /* Invalid coreid */ +#define CC_CORE_ID 0x800 /* chipcommon core */ +#define ILINE20_CORE_ID 0x801 /* iline20 core */ +#define SRAM_CORE_ID 0x802 /* sram core */ +#define SDRAM_CORE_ID 0x803 /* sdram core */ +#define PCI_CORE_ID 0x804 /* pci core */ +#define MIPS_CORE_ID 0x805 /* mips core */ +#define ENET_CORE_ID 0x806 /* enet mac core */ +#define CODEC_CORE_ID 0x807 /* v90 codec core */ +#define USB_CORE_ID 0x808 /* usb 1.1 host/device core */ +#define ADSL_CORE_ID 0x809 /* ADSL core */ +#define ILINE100_CORE_ID 0x80a /* iline100 core */ +#define IPSEC_CORE_ID 0x80b /* ipsec core */ +#define UTOPIA_CORE_ID 0x80c /* utopia core */ +#define PCMCIA_CORE_ID 0x80d /* pcmcia core */ +#define SOCRAM_CORE_ID 0x80e /* internal memory core */ +#define MEMC_CORE_ID 0x80f /* memc sdram core */ +#define OFDM_CORE_ID 0x810 /* OFDM phy core */ +#define EXTIF_CORE_ID 0x811 /* external interface core */ +#define D11_CORE_ID 0x812 /* 802.11 MAC core */ +#define APHY_CORE_ID 0x813 /* 802.11a phy core */ +#define BPHY_CORE_ID 0x814 /* 802.11b phy core */ +#define GPHY_CORE_ID 0x815 /* 802.11g phy core */ +#define MIPS33_CORE_ID 0x816 /* mips3302 core */ +#define USB11H_CORE_ID 0x817 /* usb 1.1 host core */ +#define USB11D_CORE_ID 0x818 /* usb 1.1 device core */ +#define USB20H_CORE_ID 0x819 /* usb 2.0 host core */ +#define USB20D_CORE_ID 0x81a /* usb 2.0 device core */ +#define SDIOH_CORE_ID 0x81b /* sdio host core */ +#define ROBO_CORE_ID 0x81c /* roboswitch core */ +#define ATA100_CORE_ID 0x81d /* parallel ATA core */ +#define SATAXOR_CORE_ID 0x81e /* serial ATA & XOR DMA core */ +#define GIGETH_CORE_ID 0x81f /* gigabit ethernet core */ +#define PCIE_CORE_ID 0x820 /* pci express core */ +#define NPHY_CORE_ID 0x821 /* 802.11n 2x2 phy core */ +#define SRAMC_CORE_ID 0x822 /* SRAM controller core */ +#define MINIMAC_CORE_ID 0x823 /* MINI MAC/phy core */ +#define ARM11_CORE_ID 0x824 /* ARM 1176 core */ +#define ARM7S_CORE_ID 0x825 /* ARM7tdmi-s core */ +#define LPPHY_CORE_ID 0x826 /* 802.11a/b/g phy core */ +#define PMU_CORE_ID 0x827 /* PMU core */ +#define SSNPHY_CORE_ID 0x828 /* 802.11n single-stream phy core */ +#define SDIOD_CORE_ID 0x829 /* SDIO device core */ +#define ARMCM3_CORE_ID 0x82a /* ARM Cortex M3 core */ +#define HTPHY_CORE_ID 0x82b /* 802.11n 4x4 phy core */ +#define MIPS74K_CORE_ID 0x82c /* mips 74k core */ +#define GMAC_CORE_ID 0x82d /* Gigabit MAC core */ +#define DMEMC_CORE_ID 0x82e /* DDR1/2 memory controller core */ +#define PCIERC_CORE_ID 0x82f /* PCIE Root Complex core */ +#define OCP_CORE_ID 0x830 /* OCP2OCP bridge core */ +#define SC_CORE_ID 0x831 /* shared common core */ +#define AHB_CORE_ID 0x832 /* OCP2AHB bridge core */ +#define SPIH_CORE_ID 0x833 /* SPI host core */ +#define I2S_CORE_ID 0x834 /* I2S core */ +#define DMEMS_CORE_ID 0x835 /* SDR/DDR1 memory controller core */ +#define DEF_SHIM_COMP 0x837 /* SHIM component in ubus/6362 */ + +#define ACPHY_CORE_ID 0x83b /* Dot11 ACPHY */ +#define PCIE2_CORE_ID 0x83c /* pci express Gen2 core */ +#define USB30D_CORE_ID 0x83d /* usb 3.0 device core */ +#define ARMCR4_CORE_ID 0x83e /* ARM CR4 CPU */ +#define GCI_CORE_ID 0x840 /* GCI Core */ +#define SR_CORE_ID 0x841 /* SR_CORE ID */ +#define M2MDMA_CORE_ID 0x844 /* memory to memory dma */ +#define CMEM_CORE_ID 0x846 /* CNDS DDR2/3 memory controller */ +#define ARMCA7_CORE_ID 0x847 /* ARM CA7 CPU */ +#define SYSMEM_CORE_ID 0x849 /* System memory core */ +#define HUB_CORE_ID 0x84b /* Hub core ID */ +#define APB_BRIDGE_CORE_ID 0x135 /* APB bridge core ID */ +#define AXI_CORE_ID 0x301 /* AXI/GPV core ID */ +#define EROM_CORE_ID 0x366 /* EROM core ID */ +#define OOB_ROUTER_CORE_ID 0x367 /* OOB router core ID */ +#define DEF_AI_COMP 0xfff /* Default component, in ai chips it maps all + * unused address ranges + */ + +#define NS_PCIEG2_CORE_ID 0x501 /* PCIE Gen 2 core */ +#define NS_DMA_CORE_ID 0x502 /* DMA core */ +#define NS_SDIO3_CORE_ID 0x503 /* SDIO3 core */ +#define NS_USB20_CORE_ID 0x504 /* USB2.0 core */ +#define NS_USB30_CORE_ID 0x505 /* USB3.0 core */ +#define NS_A9JTAG_CORE_ID 0x506 /* ARM Cortex A9 JTAG core */ +#define NS_DDR23_CORE_ID 0x507 /* Denali DDR2/DDR3 memory controller */ +#define NS_ROM_CORE_ID 0x508 /* ROM core */ +#define NS_NAND_CORE_ID 0x509 /* NAND flash controller core */ +#define NS_QSPI_CORE_ID 0x50a /* SPI flash controller core */ +#define NS_CCB_CORE_ID 0x50b /* ChipcommonB core */ +#define NS_SOCRAM_CORE_ID 0x50e /* internal memory core */ +#define ARMCA9_CORE_ID 0x510 /* ARM Cortex A9 core (ihost) */ +#define NS_IHOST_CORE_ID ARMCA9_CORE_ID /* ARM Cortex A9 core (ihost) */ +#define AMEMC_CORE_ID 0x52e /* DDR1/2 memory controller core */ +#define ALTA_CORE_ID 0x534 /* I2S core */ +#define DDR23_PHY_CORE_ID 0x5dd + +#define SI_PCI1_MEM 0x40000000 /* Host Mode sb2pcitranslation0 (64 MB) */ +#define SI_PCI1_CFG 0x44000000 /* Host Mode sb2pcitranslation1 (64 MB) */ +#define SI_PCIE1_DMA_H32 0xc0000000 /* PCIE Client Mode sb2pcitranslation2 + * (2 ZettaBytes), high 32 bits + */ +#define NS_PCIEG2_CORE_REV_B0 0x7 /* NS-B0 PCIE Gen 2 core rev */ + +/* There are TWO constants on all HND chips: SI_ENUM_BASE_DEFAULT above, + * and chipcommon being the first core: + */ +#define SI_CC_IDX 0 +/* SOC Interconnect types (aka chip types) */ +#define SOCI_SB 0 +#define SOCI_AI 1 +#define SOCI_UBUS 2 +#define SOCI_NAI 3 +#define SOCI_DVTBUS 4 /* BCM7XXX Digital Video Tech bus */ + +/* Common core control flags */ +#define SICF_BIST_EN 0x8000 +#define SICF_PME_EN 0x4000 +#define SICF_CORE_BITS 0x3ffc +#define SICF_FGC 0x0002 +#define SICF_CLOCK_EN 0x0001 + +/* Common core status flags */ +#define SISF_BIST_DONE 0x8000 +#define SISF_BIST_ERROR 0x4000 +#define SISF_GATED_CLK 0x2000 +#define SISF_DMA64 0x1000 +#define SISF_CORE_BITS 0x0fff + +/* Norstar core status flags */ +#define SISF_NS_BOOTDEV_MASK 0x0003 /* ROM core */ +#define SISF_NS_BOOTDEV_NOR 0x0000 /* ROM core */ +#define SISF_NS_BOOTDEV_NAND 0x0001 /* ROM core */ +#define SISF_NS_BOOTDEV_ROM 0x0002 /* ROM core */ +#define SISF_NS_BOOTDEV_OFFLOAD 0x0003 /* ROM core */ +#define SISF_NS_SKUVEC_MASK 0x000c /* ROM core */ + +/* dot11 core-specific status flags */ +#define SISF_MINORREV_D11_SHIFT 16 +#define SISF_MINORREV_D11_MASK 0xF /**< minor corerev (corerev == 61) */ + +/* A register that is common to all cores to + * communicate w/PMU regarding clock control. + */ +#define SI_CLK_CTL_ST 0x1e0 /* clock control and status */ +#define SI_PWR_CTL_ST 0x1e8 /* For memory clock gating */ + +/* clk_ctl_st register */ +#define CCS_FORCEALP 0x00000001 /* force ALP request */ +#define CCS_FORCEHT 0x00000002 /* force HT request */ +#define CCS_FORCEILP 0x00000004 /* force ILP request */ +#define CCS_ALPAREQ 0x00000008 /* ALP Avail Request */ +#define CCS_HTAREQ 0x00000010 /* HT Avail Request */ +#define CCS_FORCEHWREQOFF 0x00000020 /* Force HW Clock Request Off */ +#define CCS_HQCLKREQ 0x00000040 /* HQ Clock Required */ +#define CCS_USBCLKREQ 0x00000100 /* USB Clock Req */ +#define CCS_SECICLKREQ 0x00000100 /* SECI Clock Req */ +#define CCS_ARMFASTCLOCKREQ 0x00000100 /* ARM CR4/CA7 fast clock request */ +#define CCS_SFLASH_CLKREQ 0x00000200 /* Sflash clk request */ +#define CCS_AVBCLKREQ 0x00000400 /* AVB Clock enable request */ +#define CCS_ERSRC_REQ_MASK 0x00000700 /* external resource requests */ +#define CCS_ERSRC_REQ_SHIFT 8 +#define CCS_ALPAVAIL 0x00010000 /* ALP is available */ +#define CCS_HTAVAIL 0x00020000 /* HT is available */ +#define CCS_BP_ON_APL 0x00040000 /* RO: Backplane is running on ALP clock */ +#define CCS_BP_ON_HT 0x00080000 /* RO: Backplane is running on HT clock */ +#define CCS_ARMFASTCLOCKSTATUS 0x01000000 /* Fast CPU clock is running */ +#define CCS_ERSRC_STS_MASK 0x07000000 /* external resource status */ +#define CCS_ERSRC_STS_SHIFT 24 +#define CCS_SECI_AVAIL 0x01000000 /* RO: SECI is available */ + +/* Not really related to SOC Interconnect, but a couple of software + * conventions for the use the flash space: + */ + +/* Minumum amount of flash we support */ +#define FLASH_MIN 0x00020000 /* Minimum flash size */ + +/* A boot/binary may have an embedded block that describes its size */ +#define BISZ_OFFSET 0x3e0 /* At this offset into the binary */ +#define BISZ_MAGIC 0x4249535a /* Marked with this value: 'BISZ' */ +#define BISZ_MAGIC_IDX 0 /* Word 0: magic */ +#define BISZ_TXTST_IDX 1 /* 1: text start */ +#define BISZ_TXTEND_IDX 2 /* 2: text end */ +#define BISZ_DATAST_IDX 3 /* 3: data start */ +#define BISZ_DATAEND_IDX 4 /* 4: data end */ +#define BISZ_BSSST_IDX 5 /* 5: bss start */ +#define BISZ_BSSEND_IDX 6 /* 6: bss end */ +#define BISZ_SIZE 7 /* descriptor size in 32-bit integers */ + +/* Boot/Kernel related defintion and functions */ +#define SOC_BOOTDEV_ROM 0x00000001 +#define SOC_BOOTDEV_PFLASH 0x00000002 +#define SOC_BOOTDEV_SFLASH 0x00000004 +#define SOC_BOOTDEV_NANDFLASH 0x00000008 + +#define SOC_KNLDEV_NORFLASH 0x00000002 +#define SOC_KNLDEV_NANDFLASH 0x00000004 + +#if !defined(_LANGUAGE_ASSEMBLY) && !defined(__ASSEMBLY__) +int soc_boot_dev(void *sih); +int soc_knl_dev(void *sih); +#endif /* !defined(_LANGUAGE_ASSEMBLY) && !defined(__ASSEMBLY__) */ + +#define PMU_BASE_OFFSET 0x00012000 /* PMU offset is changed for ccrev >= 56 */ +#endif /* _HNDSOC_H */ diff --git a/bcmdhd.100.10.315.x/include/linux_osl.h b/bcmdhd.100.10.315.x/include/linux_osl.h new file mode 100644 index 0000000..54d979e --- /dev/null +++ b/bcmdhd.100.10.315.x/include/linux_osl.h @@ -0,0 +1,588 @@ +/* + * Linux OS Independent Layer + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: linux_osl.h 749612 2018-03-01 08:51:26Z $ + */ + +#ifndef _linux_osl_h_ +#define _linux_osl_h_ + +#include +#define DECLSPEC_ALIGN(x) __attribute__ ((aligned(x))) + +/* Linux Kernel: File Operations: start */ +extern void * osl_os_open_image(char * filename); +extern int osl_os_get_image_block(char * buf, int len, void * image); +extern void osl_os_close_image(void * image); +extern int osl_os_image_size(void *image); +/* Linux Kernel: File Operations: end */ + +#ifdef BCMDRIVER + +/* OSL initialization */ +extern osl_t *osl_attach(void *pdev, uint bustype, bool pkttag); + +extern void osl_detach(osl_t *osh); +extern int osl_static_mem_init(osl_t *osh, void *adapter); +extern int osl_static_mem_deinit(osl_t *osh, void *adapter); +extern void osl_set_bus_handle(osl_t *osh, void *bus_handle); +extern void* osl_get_bus_handle(osl_t *osh); +#ifdef DHD_MAP_LOGGING +extern void osl_dma_map_dump(void); +#endif /* DHD_MAP_LOGGING */ + +/* Global ASSERT type */ +extern uint32 g_assert_type; + +#ifdef CONFIG_PHYS_ADDR_T_64BIT +#define PRI_FMT_x "llx" +#define PRI_FMT_X "llX" +#define PRI_FMT_o "llo" +#define PRI_FMT_d "lld" +#else +#define PRI_FMT_x "x" +#define PRI_FMT_X "X" +#define PRI_FMT_o "o" +#define PRI_FMT_d "d" +#endif /* CONFIG_PHYS_ADDR_T_64BIT */ +/* ASSERT */ +#ifndef ASSERT +#if defined(BCMASSERT_LOG) + #define ASSERT(exp) \ + do { if (!(exp)) osl_assert(#exp, __FILE__, __LINE__); } while (0) +extern void osl_assert(const char *exp, const char *file, int line); +#else + #ifdef __GNUC__ + #define GCC_VERSION \ + (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) + #if GCC_VERSION > 30100 + #define ASSERT(exp) do {} while (0) + #else + /* ASSERT could cause segmentation fault on GCC3.1, use empty instead */ + #define ASSERT(exp) + #endif /* GCC_VERSION > 30100 */ + #endif /* __GNUC__ */ +#endif // endif +#endif /* ASSERT */ + +/* bcm_prefetch_32B */ +static inline void bcm_prefetch_32B(const uint8 *addr, const int cachelines_32B) +{ +#if (defined(STB) && defined(__arm__)) && (__LINUX_ARM_ARCH__ >= 5) + switch (cachelines_32B) { + case 4: __asm__ __volatile__("pld\t%a0" :: "p"(addr + 96) : "cc"); + case 3: __asm__ __volatile__("pld\t%a0" :: "p"(addr + 64) : "cc"); + case 2: __asm__ __volatile__("pld\t%a0" :: "p"(addr + 32) : "cc"); + case 1: __asm__ __volatile__("pld\t%a0" :: "p"(addr + 0) : "cc"); + } +#endif // endif +} + +/* microsecond delay */ +#define OSL_DELAY(usec) osl_delay(usec) +extern void osl_delay(uint usec); + +#define OSL_SLEEP(ms) osl_sleep(ms) +extern void osl_sleep(uint ms); + +#define OSL_PCMCIA_READ_ATTR(osh, offset, buf, size) \ + osl_pcmcia_read_attr((osh), (offset), (buf), (size)) +#define OSL_PCMCIA_WRITE_ATTR(osh, offset, buf, size) \ + osl_pcmcia_write_attr((osh), (offset), (buf), (size)) +extern void osl_pcmcia_read_attr(osl_t *osh, uint offset, void *buf, int size); +extern void osl_pcmcia_write_attr(osl_t *osh, uint offset, void *buf, int size); + +/* PCI configuration space access macros */ +#define OSL_PCI_READ_CONFIG(osh, offset, size) \ + osl_pci_read_config((osh), (offset), (size)) +#define OSL_PCI_WRITE_CONFIG(osh, offset, size, val) \ + osl_pci_write_config((osh), (offset), (size), (val)) +extern uint32 osl_pci_read_config(osl_t *osh, uint offset, uint size); +extern void osl_pci_write_config(osl_t *osh, uint offset, uint size, uint val); + +/* PCI device bus # and slot # */ +#define OSL_PCI_BUS(osh) osl_pci_bus(osh) +#define OSL_PCI_SLOT(osh) osl_pci_slot(osh) +#define OSL_PCIE_DOMAIN(osh) osl_pcie_domain(osh) +#define OSL_PCIE_BUS(osh) osl_pcie_bus(osh) +extern uint osl_pci_bus(osl_t *osh); +extern uint osl_pci_slot(osl_t *osh); +extern uint osl_pcie_domain(osl_t *osh); +extern uint osl_pcie_bus(osl_t *osh); +extern struct pci_dev *osl_pci_device(osl_t *osh); + +#define OSL_ACP_COHERENCE (1<<1L) +#define OSL_FWDERBUF (1<<2L) + +/* Pkttag flag should be part of public information */ +typedef struct { + bool pkttag; + bool mmbus; /**< Bus supports memory-mapped register accesses */ + pktfree_cb_fn_t tx_fn; /**< Callback function for PKTFREE */ + void *tx_ctx; /**< Context to the callback function */ + void *unused[3]; + void (*rx_fn)(void *rx_ctx, void *p); + void *rx_ctx; +} osl_pubinfo_t; + +extern void osl_flag_set(osl_t *osh, uint32 mask); +extern void osl_flag_clr(osl_t *osh, uint32 mask); +extern bool osl_is_flag_set(osl_t *osh, uint32 mask); + +#define PKTFREESETCB(osh, _tx_fn, _tx_ctx) \ + do { \ + ((osl_pubinfo_t*)osh)->tx_fn = _tx_fn; \ + ((osl_pubinfo_t*)osh)->tx_ctx = _tx_ctx; \ + } while (0) + +#define PKTFREESETRXCB(osh, _rx_fn, _rx_ctx) \ + do { \ + ((osl_pubinfo_t*)osh)->rx_fn = _rx_fn; \ + ((osl_pubinfo_t*)osh)->rx_ctx = _rx_ctx; \ + } while (0) + +/* host/bus architecture-specific byte swap */ +#define BUS_SWAP32(v) (v) + #define MALLOC(osh, size) osl_malloc((osh), (size)) + #define MALLOCZ(osh, size) osl_mallocz((osh), (size)) + #define MFREE(osh, addr, size) osl_mfree((osh), (addr), (size)) + #define VMALLOC(osh, size) osl_vmalloc((osh), (size)) + #define VMALLOCZ(osh, size) osl_vmallocz((osh), (size)) + #define VMFREE(osh, addr, size) osl_vmfree((osh), (addr), (size)) + #define MALLOCED(osh) osl_malloced((osh)) + #define MEMORY_LEFTOVER(osh) osl_check_memleak(osh) + extern void *osl_malloc(osl_t *osh, uint size); + extern void *osl_mallocz(osl_t *osh, uint size); + extern void osl_mfree(osl_t *osh, void *addr, uint size); + extern void *osl_vmalloc(osl_t *osh, uint size); + extern void *osl_vmallocz(osl_t *osh, uint size); + extern void osl_vmfree(osl_t *osh, void *addr, uint size); + extern uint osl_malloced(osl_t *osh); + extern uint osl_check_memleak(osl_t *osh); + +#define MALLOC_FAILED(osh) osl_malloc_failed((osh)) +extern uint osl_malloc_failed(osl_t *osh); + +/* allocate/free shared (dma-able) consistent memory */ +#define DMA_CONSISTENT_ALIGN osl_dma_consistent_align() +#define DMA_ALLOC_CONSISTENT(osh, size, align, tot, pap, dmah) \ + osl_dma_alloc_consistent((osh), (size), (align), (tot), (pap)) +#define DMA_FREE_CONSISTENT(osh, va, size, pa, dmah) \ + osl_dma_free_consistent((osh), (void*)(va), (size), (pa)) + +#define DMA_ALLOC_CONSISTENT_FORCE32(osh, size, align, tot, pap, dmah) \ + osl_dma_alloc_consistent((osh), (size), (align), (tot), (pap)) +#define DMA_FREE_CONSISTENT_FORCE32(osh, va, size, pa, dmah) \ + osl_dma_free_consistent((osh), (void*)(va), (size), (pa)) + +extern uint osl_dma_consistent_align(void); +extern void *osl_dma_alloc_consistent(osl_t *osh, uint size, uint16 align, + uint *tot, dmaaddr_t *pap); +extern void osl_dma_free_consistent(osl_t *osh, void *va, uint size, dmaaddr_t pa); + +/* map/unmap direction */ +#define DMA_NO 0 /* Used to skip cache op */ +#define DMA_TX 1 /* TX direction for DMA */ +#define DMA_RX 2 /* RX direction for DMA */ + +/* map/unmap shared (dma-able) memory */ +#define DMA_UNMAP(osh, pa, size, direction, p, dmah) \ + osl_dma_unmap((osh), (pa), (size), (direction)) +extern void osl_dma_flush(osl_t *osh, void *va, uint size, int direction, void *p, + hnddma_seg_map_t *txp_dmah); +extern dmaaddr_t osl_dma_map(osl_t *osh, void *va, uint size, int direction, void *p, + hnddma_seg_map_t *txp_dmah); +extern void osl_dma_unmap(osl_t *osh, dmaaddr_t pa, uint size, int direction); + +#ifndef PHYS_TO_VIRT +#define PHYS_TO_VIRT(pa) osl_phys_to_virt(pa) +#endif // endif +#ifndef VIRT_TO_PHYS +#define VIRT_TO_PHYS(va) osl_virt_to_phys(va) +#endif // endif +extern void * osl_phys_to_virt(void * pa); +extern void * osl_virt_to_phys(void * va); + +/* API for DMA addressing capability */ +#define OSL_DMADDRWIDTH(osh, addrwidth) ({BCM_REFERENCE(osh); BCM_REFERENCE(addrwidth);}) + +#define OSL_SMP_WMB() smp_wmb() + +/* API for CPU relax */ +extern void osl_cpu_relax(void); +#define OSL_CPU_RELAX() osl_cpu_relax() + +extern void osl_preempt_disable(osl_t *osh); +extern void osl_preempt_enable(osl_t *osh); +#define OSL_DISABLE_PREEMPTION(osh) osl_preempt_disable(osh) +#define OSL_ENABLE_PREEMPTION(osh) osl_preempt_enable(osh) + +#if (!defined(DHD_USE_COHERENT_MEM_FOR_RING) && defined(__ARM_ARCH_7A__)) || \ + defined(STB_SOC_WIFI) + extern void osl_cache_flush(void *va, uint size); + extern void osl_cache_inv(void *va, uint size); + extern void osl_prefetch(const void *ptr); + #define OSL_CACHE_FLUSH(va, len) osl_cache_flush((void *)(va), len) + #define OSL_CACHE_INV(va, len) osl_cache_inv((void *)(va), len) + #define OSL_PREFETCH(ptr) osl_prefetch(ptr) +#if defined(__ARM_ARCH_7A__) || defined(STB_SOC_WIFI) + extern int osl_arch_is_coherent(void); + #define OSL_ARCH_IS_COHERENT() osl_arch_is_coherent() + extern int osl_acp_war_enab(void); + #define OSL_ACP_WAR_ENAB() osl_acp_war_enab() +#else /* !__ARM_ARCH_7A__ */ + #define OSL_ARCH_IS_COHERENT() NULL + #define OSL_ACP_WAR_ENAB() NULL +#endif /* !__ARM_ARCH_7A__ */ +#else /* !__mips__ && !__ARM_ARCH_7A__ */ + #define OSL_CACHE_FLUSH(va, len) BCM_REFERENCE(va) + #define OSL_CACHE_INV(va, len) BCM_REFERENCE(va) + #define OSL_PREFETCH(ptr) BCM_REFERENCE(ptr) + + #define OSL_ARCH_IS_COHERENT() NULL + #define OSL_ACP_WAR_ENAB() NULL +#endif // endif + +#ifdef BCM_BACKPLANE_TIMEOUT +extern void osl_set_bpt_cb(osl_t *osh, void *bpt_cb, void *bpt_ctx); +extern void osl_bpt_rreg(osl_t *osh, ulong addr, volatile void *v, uint size); +#endif /* BCM_BACKPLANE_TIMEOUT */ + +#if (defined(STB) && defined(__arm__)) +extern void osl_pcie_rreg(osl_t *osh, ulong addr, volatile void *v, uint size); +#endif // endif + +/* register access macros */ +#if defined(BCMSDIO) + #include + #define OSL_WRITE_REG(osh, r, v) (bcmsdh_reg_write(osl_get_bus_handle(osh), \ + (uintptr)(r), sizeof(*(r)), (v))) + #define OSL_READ_REG(osh, r) (bcmsdh_reg_read(osl_get_bus_handle(osh), \ + (uintptr)(r), sizeof(*(r)))) +#elif defined(BCM_BACKPLANE_TIMEOUT) +#define OSL_READ_REG(osh, r) \ + ({\ + __typeof(*(r)) __osl_v; \ + osl_bpt_rreg(osh, (uintptr)(r), &__osl_v, sizeof(*(r))); \ + __osl_v; \ + }) +#elif (defined(STB) && defined(__arm__)) +#define OSL_READ_REG(osh, r) \ + ({\ + __typeof(*(r)) __osl_v; \ + osl_pcie_rreg(osh, (uintptr)(r), &__osl_v, sizeof(*(r))); \ + __osl_v; \ + }) +#endif // endif + +#if defined(BCM_BACKPLANE_TIMEOUT) || (defined(STB) && defined(__arm__)) + #define SELECT_BUS_WRITE(osh, mmap_op, bus_op) ({BCM_REFERENCE(osh); mmap_op;}) + #define SELECT_BUS_READ(osh, mmap_op, bus_op) ({BCM_REFERENCE(osh); bus_op;}) +#else /* !BCM47XX_CA9 && !BCM_BACKPLANE_TIMEOUT && !(STB && __arm__) */ +#if defined(BCMSDIO) + #define SELECT_BUS_WRITE(osh, mmap_op, bus_op) if (((osl_pubinfo_t*)(osh))->mmbus) \ + mmap_op else bus_op + #define SELECT_BUS_READ(osh, mmap_op, bus_op) (((osl_pubinfo_t*)(osh))->mmbus) ? \ + mmap_op : bus_op +#else + #define SELECT_BUS_WRITE(osh, mmap_op, bus_op) ({BCM_REFERENCE(osh); mmap_op;}) + #define SELECT_BUS_READ(osh, mmap_op, bus_op) ({BCM_REFERENCE(osh); mmap_op;}) +#endif // endif +#endif // endif + +#define OSL_ERROR(bcmerror) osl_error(bcmerror) +extern int osl_error(int bcmerror); + +/* the largest reasonable packet buffer driver uses for ethernet MTU in bytes */ +#define PKTBUFSZ 2048 /* largest reasonable packet buffer, driver uses for ethernet MTU */ + +#define OSH_NULL NULL + +/* + * BINOSL selects the slightly slower function-call-based binary compatible osl. + * Macros expand to calls to functions defined in linux_osl.c . + */ +#include /* use current 2.4.x calling conventions */ +#include /* for vsn/printf's */ +#include /* for mem*, str* */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 29) +extern uint64 osl_sysuptime_us(void); +#define OSL_SYSUPTIME() ((uint32)jiffies_to_msecs(jiffies)) +#define OSL_SYSUPTIME_US() osl_sysuptime_us() +#else +#define OSL_SYSUPTIME() ((uint32)jiffies * (1000 / HZ)) +#error "OSL_SYSUPTIME_US() may need to be defined" +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 29) */ +#define printf(fmt, args...) printk(fmt , ## args) +#include /* for vsn/printf's */ +#include /* for mem*, str* */ +/* bcopy's: Linux kernel doesn't provide these (anymore) */ +#define bcopy_hw(src, dst, len) memcpy((dst), (src), (len)) +#define bcopy_hw_async(src, dst, len) memcpy((dst), (src), (len)) +#define bcopy_hw_poll_for_completion() +#define bcopy(src, dst, len) memcpy((dst), (src), (len)) +#define bcmp(b1, b2, len) memcmp((b1), (b2), (len)) +#define bzero(b, len) memset((b), '\0', (len)) + +/* register access macros */ + +#ifdef CONFIG_64BIT +/* readq is defined only for 64 bit platform */ +#define R_REG(osh, r) (\ + SELECT_BUS_READ(osh, \ + ({ \ + __typeof(*(r)) __osl_v = 0; \ + BCM_REFERENCE(osh); \ + switch (sizeof(*(r))) { \ + case sizeof(uint8): __osl_v = \ + readb((volatile uint8*)(r)); break; \ + case sizeof(uint16): __osl_v = \ + readw((volatile uint16*)(r)); break; \ + case sizeof(uint32): __osl_v = \ + readl((volatile uint32*)(r)); break; \ + case sizeof(uint64): __osl_v = \ + readq((volatile uint64*)(r)); break; \ + } \ + __osl_v; \ + }), \ + OSL_READ_REG(osh, r)) \ +) +#else /* !CONFIG_64BIT */ +#define R_REG(osh, r) (\ + SELECT_BUS_READ(osh, \ + ({ \ + __typeof(*(r)) __osl_v = 0; \ + switch (sizeof(*(r))) { \ + case sizeof(uint8): __osl_v = \ + readb((volatile uint8*)(r)); break; \ + case sizeof(uint16): __osl_v = \ + readw((volatile uint16*)(r)); break; \ + case sizeof(uint32): __osl_v = \ + readl((volatile uint32*)(r)); break; \ + } \ + __osl_v; \ + }), \ + OSL_READ_REG(osh, r)) \ +) +#endif /* CONFIG_64BIT */ + +#ifdef CONFIG_64BIT +/* writeq is defined only for 64 bit platform */ +#define W_REG(osh, r, v) do { \ + SELECT_BUS_WRITE(osh, \ + switch (sizeof(*(r))) { \ + case sizeof(uint8): writeb((uint8)(v), (volatile uint8*)(r)); break; \ + case sizeof(uint16): writew((uint16)(v), (volatile uint16*)(r)); break; \ + case sizeof(uint32): writel((uint32)(v), (volatile uint32*)(r)); break; \ + case sizeof(uint64): writeq((uint64)(v), (volatile uint64*)(r)); break; \ + }, \ + (OSL_WRITE_REG(osh, r, v))); \ + } while (0) + +#else /* !CONFIG_64BIT */ +#define W_REG(osh, r, v) do { \ + SELECT_BUS_WRITE(osh, \ + switch (sizeof(*(r))) { \ + case sizeof(uint8): writeb((uint8)(v), (volatile uint8*)(r)); break; \ + case sizeof(uint16): writew((uint16)(v), (volatile uint16*)(r)); break; \ + case sizeof(uint32): writel((uint32)(v), (volatile uint32*)(r)); break; \ + }, \ + (OSL_WRITE_REG(osh, r, v))); \ + } while (0) +#endif /* CONFIG_64BIT */ + +#define AND_REG(osh, r, v) W_REG(osh, (r), R_REG(osh, r) & (v)) +#define OR_REG(osh, r, v) W_REG(osh, (r), R_REG(osh, r) | (v)) + +/* bcopy, bcmp, and bzero functions */ +#define bcopy(src, dst, len) memcpy((dst), (src), (len)) +#define bcmp(b1, b2, len) memcmp((b1), (b2), (len)) +#define bzero(b, len) memset((b), '\0', (len)) + +/* uncached/cached virtual address */ +#define OSL_UNCACHED(va) ((void *)va) +#define OSL_CACHED(va) ((void *)va) + +#define OSL_PREF_RANGE_LD(va, sz) BCM_REFERENCE(va) +#define OSL_PREF_RANGE_ST(va, sz) BCM_REFERENCE(va) + +/* get processor cycle count */ +#if defined(__i386__) +#define OSL_GETCYCLES(x) rdtscl((x)) +#else +#define OSL_GETCYCLES(x) ((x) = 0) +#endif // endif + +/* dereference an address that may cause a bus exception */ +#define BUSPROBE(val, addr) ({ (val) = R_REG(NULL, (addr)); 0; }) + +/* map/unmap physical to virtual I/O */ +#if !defined(CONFIG_MMC_MSM7X00A) +#define REG_MAP(pa, size) ioremap_nocache((unsigned long)(pa), (unsigned long)(size)) +#else +#define REG_MAP(pa, size) (void *)(0) +#endif /* !defined(CONFIG_MMC_MSM7X00A */ +#define REG_UNMAP(va) iounmap((va)) + +/* shared (dma-able) memory access macros */ +#define R_SM(r) *(r) +#define W_SM(r, v) (*(r) = (v)) +#define BZERO_SM(r, len) memset((r), '\0', (len)) + +/* Because the non BINOSL implemenation of the PKT OSL routines are macros (for + * performance reasons), we need the Linux headers. + */ +#include /* use current 2.4.x calling conventions */ + +#define OSL_RAND() osl_rand() +extern uint32 osl_rand(void); + +#define DMA_FLUSH(osh, va, size, direction, p, dmah) \ + osl_dma_flush((osh), (va), (size), (direction), (p), (dmah)) +#if !defined(BCM_SECURE_DMA) +#define DMA_MAP(osh, va, size, direction, p, dmah) \ + osl_dma_map((osh), (va), (size), (direction), (p), (dmah)) +#endif /* !(defined(BCM_SECURE_DMA)) */ + +#else /* ! BCMDRIVER */ + +/* ASSERT */ + #define ASSERT(exp) do {} while (0) + +/* MALLOC and MFREE */ +#define MALLOC(o, l) malloc(l) +#define MFREE(o, p, l) free(p) +#include + +/* str* and mem* functions */ +#include + +/* *printf functions */ +#include + +/* bcopy, bcmp, and bzero */ +extern void bcopy(const void *src, void *dst, size_t len); +extern int bcmp(const void *b1, const void *b2, size_t len); +extern void bzero(void *b, size_t len); +#endif /* ! BCMDRIVER */ + +/* Current STB 7445D1 doesn't use ACP and it is non-coherrent. + * Adding these dummy values for build apss only + * When we revisit need to change these. + */ + +#ifdef BCM_SECURE_DMA + +#define SECURE_DMA_MAP(osh, va, size, direction, p, dmah, pcma, offset) \ + osl_sec_dma_map((osh), (va), (size), (direction), (p), (dmah), (pcma), (offset)) +#define SECURE_DMA_DD_MAP(osh, va, size, direction, p, dmah) \ + osl_sec_dma_dd_map((osh), (va), (size), (direction), (p), (dmah)) +#define SECURE_DMA_MAP_TXMETA(osh, va, size, direction, p, dmah, pcma) \ + osl_sec_dma_map_txmeta((osh), (va), (size), (direction), (p), (dmah), (pcma)) +#define SECURE_DMA_UNMAP(osh, pa, size, direction, p, dmah, pcma, offset) \ + osl_sec_dma_unmap((osh), (pa), (size), (direction), (p), (dmah), (pcma), (offset)) +#define SECURE_DMA_UNMAP_ALL(osh, pcma) \ + osl_sec_dma_unmap_all((osh), (pcma)) + +#define DMA_MAP(osh, va, size, direction, p, dmah) + +typedef struct sec_cma_info { + struct sec_mem_elem *sec_alloc_list; + struct sec_mem_elem *sec_alloc_list_tail; +} sec_cma_info_t; + +#if defined(__ARM_ARCH_7A__) +#define CMA_BUFSIZE_4K 4096 +#define CMA_BUFSIZE_2K 2048 +#define CMA_BUFSIZE_512 512 + +#define CMA_BUFNUM 2048 +#define SEC_CMA_COHERENT_BLK 0x8000 /* 32768 */ +#define SEC_CMA_COHERENT_MAX 278 +#define CMA_DMA_DESC_MEMBLOCK (SEC_CMA_COHERENT_BLK * SEC_CMA_COHERENT_MAX) +#define CMA_DMA_DATA_MEMBLOCK (CMA_BUFSIZE_4K*CMA_BUFNUM) +#define CMA_MEMBLOCK (CMA_DMA_DESC_MEMBLOCK + CMA_DMA_DATA_MEMBLOCK) +#define CONT_REGION 0x02 /* Region CMA */ +#else +#define CONT_REGION 0x00 /* To access the MIPs mem, Not yet... */ +#endif /* !defined __ARM_ARCH_7A__ */ + +#define SEC_DMA_ALIGN (1<<16) +typedef struct sec_mem_elem { + size_t size; + int direction; + phys_addr_t pa_cma; /**< physical address */ + void *va; /**< virtual address of driver pkt */ + dma_addr_t dma_handle; /**< bus address assign by linux */ + void *vac; /**< virtual address of cma buffer */ + struct page *pa_cma_page; /* phys to page address */ + struct sec_mem_elem *next; +} sec_mem_elem_t; + +extern dma_addr_t osl_sec_dma_map(osl_t *osh, void *va, uint size, int direction, void *p, + hnddma_seg_map_t *dmah, void *ptr_cma_info, uint offset); +extern dma_addr_t osl_sec_dma_dd_map(osl_t *osh, void *va, uint size, int direction, void *p, + hnddma_seg_map_t *dmah); +extern dma_addr_t osl_sec_dma_map_txmeta(osl_t *osh, void *va, uint size, + int direction, void *p, hnddma_seg_map_t *dmah, void *ptr_cma_info); +extern void osl_sec_dma_unmap(osl_t *osh, dma_addr_t dma_handle, uint size, int direction, + void *p, hnddma_seg_map_t *map, void *ptr_cma_info, uint offset); +extern void osl_sec_dma_unmap_all(osl_t *osh, void *ptr_cma_info); + +#endif /* BCM_SECURE_DMA */ + +typedef struct sk_buff_head PKT_LIST; +#define PKTLIST_INIT(x) skb_queue_head_init((x)) +#define PKTLIST_ENQ(x, y) skb_queue_head((struct sk_buff_head *)(x), (struct sk_buff *)(y)) +#define PKTLIST_DEQ(x) skb_dequeue((struct sk_buff_head *)(x)) +#define PKTLIST_UNLINK(x, y) skb_unlink((struct sk_buff *)(y), (struct sk_buff_head *)(x)) +#define PKTLIST_FINI(x) skb_queue_purge((struct sk_buff_head *)(x)) + +#ifdef REPORT_FATAL_TIMEOUTS +typedef struct osl_timer { + struct timer_list *timer; + bool set; +} osl_timer_t; + +typedef void (*linux_timer_fn)(ulong arg); + +extern osl_timer_t * osl_timer_init(osl_t *osh, const char *name, void (*fn)(void *arg), void *arg); +extern void osl_timer_add(osl_t *osh, osl_timer_t *t, uint32 ms, bool periodic); +extern void osl_timer_update(osl_t *osh, osl_timer_t *t, uint32 ms, bool periodic); +extern bool osl_timer_del(osl_t *osh, osl_timer_t *t); +#endif + +typedef atomic_t osl_atomic_t; +#define OSL_ATOMIC_SET(osh, v, x) atomic_set(v, x) +#define OSL_ATOMIC_INIT(osh, v) atomic_set(v, 0) +#define OSL_ATOMIC_INC(osh, v) atomic_inc(v) +#define OSL_ATOMIC_INC_RETURN(osh, v) atomic_inc_return(v) +#define OSL_ATOMIC_DEC(osh, v) atomic_dec(v) +#define OSL_ATOMIC_DEC_RETURN(osh, v) atomic_dec_return(v) +#define OSL_ATOMIC_READ(osh, v) atomic_read(v) +#define OSL_ATOMIC_ADD(osh, v, x) atomic_add(v, x) + +#endif /* _linux_osl_h_ */ diff --git a/bcmdhd.100.10.315.x/include/linux_pkt.h b/bcmdhd.100.10.315.x/include/linux_pkt.h new file mode 100644 index 0000000..7a84b8c --- /dev/null +++ b/bcmdhd.100.10.315.x/include/linux_pkt.h @@ -0,0 +1,233 @@ +/* + * Linux Packet (skb) interface + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: linux_pkt.h 701430 2017-05-25 00:03:02Z $ + */ + +#ifndef _linux_pkt_h_ +#define _linux_pkt_h_ + +#include + +#ifdef __ARM_ARCH_7A__ +#define PKT_HEADROOM_DEFAULT NET_SKB_PAD /**< NET_SKB_PAD is defined in a linux kernel header */ +#else +#define PKT_HEADROOM_DEFAULT 16 +#endif /* __ARM_ARCH_7A__ */ + +#ifdef BCMDRIVER +/* + * BINOSL selects the slightly slower function-call-based binary compatible osl. + * Macros expand to calls to functions defined in linux_osl.c . + */ +/* Because the non BINOSL implemenation of the PKT OSL routines are macros (for + * performance reasons), we need the Linux headers. + */ +#include + +/* packet primitives */ +#ifdef BCM_OBJECT_TRACE +#define PKTGET(osh, len, send) linux_pktget((osh), (len), __LINE__, __FUNCTION__) +#define PKTDUP(osh, skb) osl_pktdup((osh), (skb), __LINE__, __FUNCTION__) +#else +#define PKTGET(osh, len, send) linux_pktget((osh), (len)) +#define PKTDUP(osh, skb) osl_pktdup((osh), (skb)) +#endif /* BCM_OBJECT_TRACE */ +#define PKTLIST_DUMP(osh, buf) BCM_REFERENCE(osh) +#define PKTDBG_TRACE(osh, pkt, bit) BCM_REFERENCE(osh) +#if defined(BCM_OBJECT_TRACE) +#define PKTFREE(osh, skb, send) linux_pktfree((osh), (skb), (send), __LINE__, __FUNCTION__) +#else +#define PKTFREE(osh, skb, send) linux_pktfree((osh), (skb), (send)) +#endif /* BCM_OBJECT_TRACE */ +#ifdef CONFIG_DHD_USE_STATIC_BUF +#define PKTGET_STATIC(osh, len, send) osl_pktget_static((osh), (len)) +#define PKTFREE_STATIC(osh, skb, send) osl_pktfree_static((osh), (skb), (send)) +#else +#define PKTGET_STATIC PKTGET +#define PKTFREE_STATIC PKTFREE +#endif /* CONFIG_DHD_USE_STATIC_BUF */ +#define PKTDATA(osh, skb) ({BCM_REFERENCE(osh); (((struct sk_buff*)(skb))->data);}) +#define PKTLEN(osh, skb) ({BCM_REFERENCE(osh); (((struct sk_buff*)(skb))->len);}) +#define PKTHEAD(osh, skb) ({BCM_REFERENCE(osh); (((struct sk_buff*)(skb))->head);}) +#define PKTSETHEAD(osh, skb, h) ({BCM_REFERENCE(osh); \ + (((struct sk_buff *)(skb))->head = (h));}) +#define PKTHEADROOM(osh, skb) (PKTDATA(osh, skb)-(((struct sk_buff*)(skb))->head)) +#define PKTEXPHEADROOM(osh, skb, b) \ + ({ \ + BCM_REFERENCE(osh); \ + skb_realloc_headroom((struct sk_buff*)(skb), (b)); \ + }) +#define PKTTAILROOM(osh, skb) \ + ({ \ + BCM_REFERENCE(osh); \ + skb_tailroom((struct sk_buff*)(skb)); \ + }) +#define PKTPADTAILROOM(osh, skb, padlen) \ + ({ \ + BCM_REFERENCE(osh); \ + skb_pad((struct sk_buff*)(skb), (padlen)); \ + }) +#define PKTNEXT(osh, skb) ({BCM_REFERENCE(osh); (((struct sk_buff*)(skb))->next);}) +#define PKTSETNEXT(osh, skb, x) \ + ({ \ + BCM_REFERENCE(osh); \ + (((struct sk_buff*)(skb))->next = (struct sk_buff*)(x)); \ + }) +#define PKTSETLEN(osh, skb, len) \ + ({ \ + BCM_REFERENCE(osh); \ + __skb_trim((struct sk_buff*)(skb), (len)); \ + }) +#define PKTPUSH(osh, skb, bytes) \ + ({ \ + BCM_REFERENCE(osh); \ + skb_push((struct sk_buff*)(skb), (bytes)); \ + }) +#define PKTPULL(osh, skb, bytes) \ + ({ \ + BCM_REFERENCE(osh); \ + skb_pull((struct sk_buff*)(skb), (bytes)); \ + }) +#define PKTTAG(skb) ((void*)(((struct sk_buff*)(skb))->cb)) +#define PKTSETPOOL(osh, skb, x, y) BCM_REFERENCE(osh) +#define PKTPOOL(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb); FALSE;}) +#define PKTFREELIST(skb) PKTLINK(skb) +#define PKTSETFREELIST(skb, x) PKTSETLINK((skb), (x)) +#define PKTPTR(skb) (skb) +#define PKTID(skb) ({BCM_REFERENCE(skb); 0;}) +#define PKTSETID(skb, id) ({BCM_REFERENCE(skb); BCM_REFERENCE(id);}) +#define PKTSHRINK(osh, m) ({BCM_REFERENCE(osh); m;}) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) && defined(TSQ_MULTIPLIER) +#define PKTORPHAN(skb, tsq) osl_pkt_orphan_partial(skb, tsq) +extern void osl_pkt_orphan_partial(struct sk_buff *skb, int tsq); +#else +#define PKTORPHAN(skb, tsq) ({BCM_REFERENCE(skb); 0;}) +#endif /* LINUX VERSION >= 3.6 */ + +#define PKTSETFAST(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);}) +#define PKTCLRFAST(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);}) +#define PKTISFAST(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb); FALSE;}) + +#define PKTSETCTF(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);}) +#define PKTCLRCTF(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);}) +#define PKTISCTF(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb); FALSE;}) + +#define PKTSETSKIPCT(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);}) +#define PKTCLRSKIPCT(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);}) +#define PKTSKIPCT(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);}) +#define CTF_MARK(m) ({BCM_REFERENCE(m); 0;}) + +#define PKTFRAGLEN(osh, lb, ix) (0) +#define PKTSETFRAGLEN(osh, lb, ix, len) BCM_REFERENCE(osh) + +#define PKTSETFWDERBUF(osh, skb) ({ BCM_REFERENCE(osh); BCM_REFERENCE(skb); }) +#define PKTCLRFWDERBUF(osh, skb) ({ BCM_REFERENCE(osh); BCM_REFERENCE(skb); }) +#define PKTISFWDERBUF(osh, skb) ({ BCM_REFERENCE(osh); BCM_REFERENCE(skb); FALSE;}) + +#define PKTSETTOBR(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);}) +#define PKTCLRTOBR(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);}) +#define PKTISTOBR(skb) ({BCM_REFERENCE(skb); FALSE;}) + +#ifdef BCMFA +#ifdef BCMFA_HW_HASH +#define PKTSETFAHIDX(skb, idx) (((struct sk_buff*)(skb))->napt_idx = idx) +#else +#define PKTSETFAHIDX(skb, idx) ({BCM_REFERENCE(skb); BCM_REFERENCE(idx);}) +#endif /* BCMFA_SW_HASH */ +#define PKTGETFAHIDX(skb) (((struct sk_buff*)(skb))->napt_idx) +#define PKTSETFADEV(skb, imp) (((struct sk_buff*)(skb))->dev = imp) +#define PKTSETRXDEV(skb) (((struct sk_buff*)(skb))->rxdev = ((struct sk_buff*)(skb))->dev) + +#define AUX_TCP_FIN_RST (1 << 0) +#define AUX_FREED (1 << 1) +#define PKTSETFAAUX(skb) (((struct sk_buff*)(skb))->napt_flags |= AUX_TCP_FIN_RST) +#define PKTCLRFAAUX(skb) (((struct sk_buff*)(skb))->napt_flags &= (~AUX_TCP_FIN_RST)) +#define PKTISFAAUX(skb) (((struct sk_buff*)(skb))->napt_flags & AUX_TCP_FIN_RST) +#define PKTSETFAFREED(skb) (((struct sk_buff*)(skb))->napt_flags |= AUX_FREED) +#define PKTCLRFAFREED(skb) (((struct sk_buff*)(skb))->napt_flags &= (~AUX_FREED)) +#define PKTISFAFREED(skb) (((struct sk_buff*)(skb))->napt_flags & AUX_FREED) +#define PKTISFABRIDGED(skb) PKTISFAAUX(skb) +#else +#define PKTISFAAUX(skb) ({BCM_REFERENCE(skb); FALSE;}) +#define PKTISFABRIDGED(skb) ({BCM_REFERENCE(skb); FALSE;}) +#define PKTISFAFREED(skb) ({BCM_REFERENCE(skb); FALSE;}) + +#define PKTCLRFAAUX(skb) BCM_REFERENCE(skb) +#define PKTSETFAFREED(skb) BCM_REFERENCE(skb) +#define PKTCLRFAFREED(skb) BCM_REFERENCE(skb) +#endif /* BCMFA */ + +#if defined(BCM_OBJECT_TRACE) +extern void linux_pktfree(osl_t *osh, void *skb, bool send, int line, const char *caller); +#else +extern void linux_pktfree(osl_t *osh, void *skb, bool send); +#endif /* BCM_OBJECT_TRACE */ +extern void *osl_pktget_static(osl_t *osh, uint len); +extern void osl_pktfree_static(osl_t *osh, void *skb, bool send); +extern void osl_pktclone(osl_t *osh, void **pkt); + +#ifdef BCM_OBJECT_TRACE +extern void *linux_pktget(osl_t *osh, uint len, int line, const char *caller); +extern void *osl_pktdup(osl_t *osh, void *skb, int line, const char *caller); +#else +extern void *linux_pktget(osl_t *osh, uint len); +extern void *osl_pktdup(osl_t *osh, void *skb); +#endif /* BCM_OBJECT_TRACE */ +extern void *osl_pkt_frmnative(osl_t *osh, void *skb); +extern struct sk_buff *osl_pkt_tonative(osl_t *osh, void *pkt); +#define PKTFRMNATIVE(osh, skb) osl_pkt_frmnative(((osl_t *)osh), (struct sk_buff*)(skb)) +#define PKTTONATIVE(osh, pkt) osl_pkt_tonative((osl_t *)(osh), (pkt)) + +#define PKTLINK(skb) (((struct sk_buff*)(skb))->prev) +#define PKTSETLINK(skb, x) (((struct sk_buff*)(skb))->prev = (struct sk_buff*)(x)) +#define PKTPRIO(skb) (((struct sk_buff*)(skb))->priority) +#define PKTSETPRIO(skb, x) (((struct sk_buff*)(skb))->priority = (x)) +#define PKTSUMNEEDED(skb) (((struct sk_buff*)(skb))->ip_summed == CHECKSUM_HW) +#define PKTSETSUMGOOD(skb, x) (((struct sk_buff*)(skb))->ip_summed = \ + ((x) ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE)) +/* PKTSETSUMNEEDED and PKTSUMGOOD are not possible because skb->ip_summed is overloaded */ +#define PKTSHARED(skb) (((struct sk_buff*)(skb))->cloned) + +#ifdef CONFIG_NF_CONNTRACK_MARK +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) +#define PKTMARK(p) (((struct sk_buff *)(p))->mark) +#define PKTSETMARK(p, m) ((struct sk_buff *)(p))->mark = (m) +#else /* !2.6.0 */ +#define PKTMARK(p) (((struct sk_buff *)(p))->nfmark) +#define PKTSETMARK(p, m) ((struct sk_buff *)(p))->nfmark = (m) +#endif /* 2.6.0 */ +#else /* CONFIG_NF_CONNTRACK_MARK */ +#define PKTMARK(p) 0 +#define PKTSETMARK(p, m) +#endif /* CONFIG_NF_CONNTRACK_MARK */ + +#define PKTALLOCED(osh) osl_pktalloced(osh) +extern uint osl_pktalloced(osl_t *osh); + +#endif /* BCMDRIVER */ + +#endif /* _linux_pkt_h_ */ diff --git a/bcmdhd.100.10.315.x/include/linuxver.h b/bcmdhd.100.10.315.x/include/linuxver.h new file mode 100644 index 0000000..2caff4d --- /dev/null +++ b/bcmdhd.100.10.315.x/include/linuxver.h @@ -0,0 +1,834 @@ +/* + * Linux-specific abstractions to gain some independence from linux kernel versions. + * Pave over some 2.2 versus 2.4 versus 2.6 kernel differences. + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: linuxver.h 767291 2018-06-13 06:35:04Z $ + */ + +#ifndef _linuxver_h_ +#define _linuxver_h_ + +#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunused-but-set-variable" +#pragma GCC diagnostic ignored "-Wunused-but-set-parameter" +#endif // endif + +#include +#include +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) +#include +#else +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 33)) +#include +#else +#include +#endif // endif +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0)) +#include +#endif // endif +#include + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0)) +/* __NO_VERSION__ must be defined for all linkables except one in 2.2 */ +#ifdef __UNDEF_NO_VERSION__ +#undef __NO_VERSION__ +#else +#define __NO_VERSION__ +#endif // endif +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0) */ + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0) +#define module_param(_name_, _type_, _perm_) MODULE_PARM(_name_, "i") +#define module_param_string(_name_, _string_, _size_, _perm_) \ + MODULE_PARM(_string_, "c" __MODULE_STRING(_size_)) +#endif // endif + +/* linux/malloc.h is deprecated, use linux/slab.h instead. */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 9)) +#include +#else +#include +#endif // endif + +#include +#include +#include +#include +#include +#include +#include +#include +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) +#include +#else +#include +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)) +#undef IP_TOS +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)) */ +#include + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 41)) +#include +#else +#include +#ifndef work_struct +#define work_struct tq_struct +#endif // endif +#ifndef INIT_WORK +#define INIT_WORK(_work, _func, _data) INIT_TQUEUE((_work), (_func), (_data)) +#endif // endif +#ifndef schedule_work +#define schedule_work(_work) schedule_task((_work)) +#endif // endif +#ifndef flush_scheduled_work +#define flush_scheduled_work() flush_scheduled_tasks() +#endif // endif +#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 41) */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)) +#define DAEMONIZE(a) do { \ + allow_signal(SIGKILL); \ + allow_signal(SIGTERM); \ + } while (0) +#elif ((LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) && \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))) +#define DAEMONIZE(a) daemonize(a); \ + allow_signal(SIGKILL); \ + allow_signal(SIGTERM); +#else /* Linux 2.4 (w/o preemption patch) */ +#define RAISE_RX_SOFTIRQ() \ + cpu_raise_softirq(smp_processor_id(), NET_RX_SOFTIRQ) +#define DAEMONIZE(a) daemonize(); \ + do { if (a) \ + strncpy(current->comm, a, MIN(sizeof(current->comm), (strlen(a)))); \ + } while (0); +#endif /* LINUX_VERSION_CODE */ + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19) +#define MY_INIT_WORK(_work, _func) INIT_WORK(_work, _func) +#else +#define MY_INIT_WORK(_work, _func) INIT_WORK(_work, _func, _work) +#if !(LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 18) && defined(RHEL_MAJOR) && \ + (RHEL_MAJOR == 5)) +/* Exclude RHEL 5 */ +typedef void (*work_func_t)(void *work); +#endif // endif +#endif /* >= 2.6.20 */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) +/* Some distributions have their own 2.6.x compatibility layers */ +#ifndef IRQ_NONE +typedef void irqreturn_t; +#define IRQ_NONE +#define IRQ_HANDLED +#define IRQ_RETVAL(x) +#endif // endif +#else +typedef irqreturn_t(*FN_ISR) (int irq, void *dev_id, struct pt_regs *ptregs); +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) */ + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) +#define IRQF_SHARED SA_SHIRQ +#endif /* < 2.6.18 */ + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17) +#ifdef CONFIG_NET_RADIO +#define CONFIG_WIRELESS_EXT +#endif // endif +#endif /* < 2.6.17 */ + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 67) +#define MOD_INC_USE_COUNT +#define MOD_DEC_USE_COUNT +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 67) */ + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) +#include +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)) +#include +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) */ + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29) +#include +#endif // endif +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29) +#include +#else +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14) +#include +#endif // endif +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30) */ + +#ifndef __exit +#define __exit +#endif // endif +#ifndef __devexit +#define __devexit +#endif // endif +#ifndef __devinit +# if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) +# define __devinit __init +# else +/* All devices are hotpluggable since linux 3.8.0 */ +# define __devinit +# endif +#endif /* !__devinit */ +#ifndef __devinitdata +#define __devinitdata +#endif // endif +#ifndef __devexit_p +#define __devexit_p(x) x +#endif // endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0)) + +#define pci_get_drvdata(dev) (dev)->sysdata +#define pci_set_drvdata(dev, value) (dev)->sysdata = (value) + +/* + * New-style (2.4.x) PCI/hot-pluggable PCI/CardBus registration + */ + +struct pci_device_id { + unsigned int vendor, device; /* Vendor and device ID or PCI_ANY_ID */ + unsigned int subvendor, subdevice; /* Subsystem ID's or PCI_ANY_ID */ + unsigned int class, class_mask; /* (class,subclass,prog-if) triplet */ + unsigned long driver_data; /* Data private to the driver */ +}; + +struct pci_driver { + struct list_head node; + char *name; + const struct pci_device_id *id_table; /* NULL if wants all devices */ + int (*probe)(struct pci_dev *dev, + const struct pci_device_id *id); /* New device inserted */ + void (*remove)(struct pci_dev *dev); /* Device removed (NULL if not a hot-plug + * capable driver) + */ + void (*suspend)(struct pci_dev *dev); /* Device suspended */ + void (*resume)(struct pci_dev *dev); /* Device woken up */ +}; + +#define MODULE_DEVICE_TABLE(type, name) +#define PCI_ANY_ID (~0) + +/* compatpci.c */ +#define pci_module_init pci_register_driver +extern int pci_register_driver(struct pci_driver *drv); +extern void pci_unregister_driver(struct pci_driver *drv); + +#endif /* PCI registration */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18)) +#define pci_module_init pci_register_driver +#endif // endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 18)) +#ifdef MODULE +#define module_init(x) int init_module(void) { return x(); } +#define module_exit(x) void cleanup_module(void) { x(); } +#else +#define module_init(x) __initcall(x); +#define module_exit(x) __exitcall(x); +#endif // endif +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 18) */ + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31) +#define WL_USE_NETDEV_OPS +#else +#undef WL_USE_NETDEV_OPS +#endif // endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) && defined(CONFIG_RFKILL) +#define WL_CONFIG_RFKILL +#else +#undef WL_CONFIG_RFKILL +#endif // endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 48)) +#define list_for_each(pos, head) \ + for (pos = (head)->next; pos != (head); pos = pos->next) +#endif // endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 13)) +#define pci_resource_start(dev, bar) ((dev)->base_address[(bar)]) +#elif (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 44)) +#define pci_resource_start(dev, bar) ((dev)->resource[(bar)].start) +#endif // endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 23)) +#define pci_enable_device(dev) do { } while (0) +#endif // endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 14)) +#define net_device device +#endif // endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 42)) + +/* + * DMA mapping + * + * See linux/Documentation/DMA-mapping.txt + */ + +#ifndef PCI_DMA_TODEVICE +#define PCI_DMA_TODEVICE 1 +#define PCI_DMA_FROMDEVICE 2 +#endif // endif + +typedef u32 dma_addr_t; + +/* Pure 2^n version of get_order */ +static inline int get_order(unsigned long size) +{ + int order; + + size = (size-1) >> (PAGE_SHIFT-1); + order = -1; + do { + size >>= 1; + order++; + } while (size); + return order; +} + +static inline void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size, + dma_addr_t *dma_handle) +{ + void *ret; + int gfp = GFP_ATOMIC | GFP_DMA; + + ret = (void *)__get_free_pages(gfp, get_order(size)); + + if (ret != NULL) { + memset(ret, 0, size); + *dma_handle = virt_to_bus(ret); + } + return ret; +} +static inline void pci_free_consistent(struct pci_dev *hwdev, size_t size, + void *vaddr, dma_addr_t dma_handle) +{ + free_pages((unsigned long)vaddr, get_order(size)); +} +#define pci_map_single(cookie, address, size, dir) virt_to_bus(address) +#define pci_unmap_single(cookie, address, size, dir) + +#endif /* DMA mapping */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 43)) + +#define dev_kfree_skb_any(a) dev_kfree_skb(a) +#define netif_down(dev) do { (dev)->start = 0; } while (0) + +/* pcmcia-cs provides its own netdevice compatibility layer */ +#ifndef _COMPAT_NETDEVICE_H + +/* + * SoftNet + * + * For pre-softnet kernels we need to tell the upper layer not to + * re-enter start_xmit() while we are in there. However softnet + * guarantees not to enter while we are in there so there is no need + * to do the netif_stop_queue() dance unless the transmit queue really + * gets stuck. This should also improve performance according to tests + * done by Aman Singla. + */ + +#define dev_kfree_skb_irq(a) dev_kfree_skb(a) +#define netif_wake_queue(dev) \ + do { clear_bit(0, &(dev)->tbusy); mark_bh(NET_BH); } while (0) +#define netif_stop_queue(dev) set_bit(0, &(dev)->tbusy) + +static inline void netif_start_queue(struct net_device *dev) +{ + dev->tbusy = 0; + dev->interrupt = 0; + dev->start = 1; +} + +#define netif_queue_stopped(dev) (dev)->tbusy +#define netif_running(dev) (dev)->start + +#endif /* _COMPAT_NETDEVICE_H */ + +#define netif_device_attach(dev) netif_start_queue(dev) +#define netif_device_detach(dev) netif_stop_queue(dev) + +/* 2.4.x renamed bottom halves to tasklets */ +#define tasklet_struct tq_struct +static inline void tasklet_schedule(struct tasklet_struct *tasklet) +{ + queue_task(tasklet, &tq_immediate); + mark_bh(IMMEDIATE_BH); +} + +static inline void tasklet_init(struct tasklet_struct *tasklet, + void (*func)(unsigned long), + unsigned long data) +{ + tasklet->next = NULL; + tasklet->sync = 0; + tasklet->routine = (void (*)(void *))func; + tasklet->data = (void *)data; +} +#define tasklet_kill(tasklet) { do {} while (0); } + +/* 2.4.x introduced del_timer_sync() */ +#define del_timer_sync(timer) del_timer(timer) + +#else + +#define netif_down(dev) + +#endif /* SoftNet */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 3)) + +/* + * Emit code to initialise a tq_struct's routine and data pointers + */ +#define PREPARE_TQUEUE(_tq, _routine, _data) \ + do { \ + (_tq)->routine = _routine; \ + (_tq)->data = _data; \ + } while (0) + +/* + * Emit code to initialise all of a tq_struct + */ +#define INIT_TQUEUE(_tq, _routine, _data) \ + do { \ + INIT_LIST_HEAD(&(_tq)->list); \ + (_tq)->sync = 0; \ + PREPARE_TQUEUE((_tq), (_routine), (_data)); \ + } while (0) + +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 3) */ + +/* Power management related macro & routines */ +#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 9) +#define PCI_SAVE_STATE(a, b) pci_save_state(a) +#define PCI_RESTORE_STATE(a, b) pci_restore_state(a) +#else +#define PCI_SAVE_STATE(a, b) pci_save_state(a, b) +#define PCI_RESTORE_STATE(a, b) pci_restore_state(a, b) +#endif // endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 6)) +static inline int +pci_save_state(struct pci_dev *dev, u32 *buffer) +{ + int i; + if (buffer) { + for (i = 0; i < 16; i++) + pci_read_config_dword(dev, i * 4, &buffer[i]); + } + return 0; +} + +static inline int +pci_restore_state(struct pci_dev *dev, u32 *buffer) +{ + int i; + + if (buffer) { + for (i = 0; i < 16; i++) + pci_write_config_dword(dev, i * 4, buffer[i]); + } + /* + * otherwise, write the context information we know from bootup. + * This works around a problem where warm-booting from Windows + * combined with a D3(hot)->D0 transition causes PCI config + * header data to be forgotten. + */ + else { + for (i = 0; i < 6; i ++) + pci_write_config_dword(dev, + PCI_BASE_ADDRESS_0 + (i * 4), + pci_resource_start(dev, i)); + pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq); + } + return 0; +} +#endif /* PCI power management */ + +/* Old cp0 access macros deprecated in 2.4.19 */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 19)) +#define read_c0_count() read_32bit_cp0_register(CP0_COUNT) +#endif // endif + +/* Module refcount handled internally in 2.6.x */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24)) +#ifndef SET_MODULE_OWNER +#define SET_MODULE_OWNER(dev) do {} while (0) +#define OLD_MOD_INC_USE_COUNT MOD_INC_USE_COUNT +#define OLD_MOD_DEC_USE_COUNT MOD_DEC_USE_COUNT +#else +#define OLD_MOD_INC_USE_COUNT do {} while (0) +#define OLD_MOD_DEC_USE_COUNT do {} while (0) +#endif // endif +#else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24) */ +#ifndef SET_MODULE_OWNER +#define SET_MODULE_OWNER(dev) do {} while (0) +#endif // endif +#ifndef MOD_INC_USE_COUNT +#define MOD_INC_USE_COUNT do {} while (0) +#endif // endif +#ifndef MOD_DEC_USE_COUNT +#define MOD_DEC_USE_COUNT do {} while (0) +#endif // endif +#define OLD_MOD_INC_USE_COUNT MOD_INC_USE_COUNT +#define OLD_MOD_DEC_USE_COUNT MOD_DEC_USE_COUNT +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24) */ + +#ifndef SET_NETDEV_DEV +#define SET_NETDEV_DEV(net, pdev) do {} while (0) +#endif // endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 1, 0)) +#ifndef HAVE_FREE_NETDEV +#define free_netdev(dev) kfree(dev) +#endif // endif +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 1, 0) */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) +/* struct packet_type redefined in 2.6.x */ +#define af_packet_priv data +#endif // endif + +/* suspend args */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 11) +#define DRV_SUSPEND_STATE_TYPE pm_message_t +#else +#define DRV_SUSPEND_STATE_TYPE uint32 +#endif // endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19) +#define CHECKSUM_HW CHECKSUM_PARTIAL +#endif // endif + +typedef struct { + void *parent; /* some external entity that the thread supposed to work for */ + char *proc_name; + struct task_struct *p_task; + long thr_pid; + int prio; /* priority */ + struct semaphore sema; + int terminated; + struct completion completed; + int flush_ind; + struct completion flushed; + spinlock_t spinlock; + int up_cnt; +} tsk_ctl_t; + +/* requires tsk_ctl_t tsk argument, the caller's priv data is passed in owner ptr */ +/* note this macro assumes there may be only one context waiting on thread's completion */ +#ifdef DHD_DEBUG +#define DBG_THR(x) printk x +#else +#define DBG_THR(x) +#endif // endif + +static inline bool binary_sema_down(tsk_ctl_t *tsk) +{ + if (down_interruptible(&tsk->sema) == 0) { + unsigned long flags = 0; + spin_lock_irqsave(&tsk->spinlock, flags); + if (tsk->up_cnt == 1) + tsk->up_cnt--; + else { + DBG_THR(("dhd_dpc_thread: Unexpected up_cnt %d\n", tsk->up_cnt)); + } + spin_unlock_irqrestore(&tsk->spinlock, flags); + return false; + } else + return true; +} + +static inline bool binary_sema_up(tsk_ctl_t *tsk) +{ + bool sem_up = false; + unsigned long flags = 0; + + spin_lock_irqsave(&tsk->spinlock, flags); + if (tsk->up_cnt == 0) { + tsk->up_cnt++; + sem_up = true; + } else if (tsk->up_cnt == 1) { + /* dhd_sched_dpc: dpc is alread up! */ + } else + DBG_THR(("dhd_sched_dpc: unexpected up cnt %d!\n", tsk->up_cnt)); + + spin_unlock_irqrestore(&tsk->spinlock, flags); + + if (sem_up) + up(&tsk->sema); + + return sem_up; +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) +#define SMP_RD_BARRIER_DEPENDS(x) smp_read_barrier_depends(x) +#else +#define SMP_RD_BARRIER_DEPENDS(x) smp_rmb(x) +#endif // endif + +#define PROC_START(thread_func, owner, tsk_ctl, flags, name) \ +{ \ + sema_init(&((tsk_ctl)->sema), 0); \ + init_completion(&((tsk_ctl)->completed)); \ + init_completion(&((tsk_ctl)->flushed)); \ + (tsk_ctl)->parent = owner; \ + (tsk_ctl)->proc_name = name; \ + (tsk_ctl)->terminated = FALSE; \ + (tsk_ctl)->flush_ind = FALSE; \ + (tsk_ctl)->up_cnt = 0; \ + (tsk_ctl)->p_task = kthread_run(thread_func, tsk_ctl, (char*)name); \ + if (IS_ERR((tsk_ctl)->p_task)) { \ + (tsk_ctl)->thr_pid = -1; \ + DBG_THR(("%s(): thread:%s create failed\n", __FUNCTION__, \ + (tsk_ctl)->proc_name)); \ + } else { \ + (tsk_ctl)->thr_pid = (tsk_ctl)->p_task->pid; \ + spin_lock_init(&((tsk_ctl)->spinlock)); \ + DBG_THR(("%s(): thread:%s:%lx started\n", __FUNCTION__, \ + (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \ + }; \ +} + +#define PROC_WAIT_TIMEOUT_MSEC 5000 /* 5 seconds */ + +#define PROC_STOP(tsk_ctl) \ +{ \ + uint timeout = msecs_to_jiffies(PROC_WAIT_TIMEOUT_MSEC); \ + (tsk_ctl)->terminated = TRUE; \ + smp_wmb(); \ + up(&((tsk_ctl)->sema)); \ + DBG_THR(("%s(): thread:%s:%lx wait for terminate\n", __FUNCTION__, \ + (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \ + timeout = wait_for_completion_timeout(&((tsk_ctl)->completed), timeout); \ + if (timeout == 0) \ + DBG_THR(("%s(): thread:%s:%lx terminate timeout\n", __FUNCTION__, \ + (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \ + else \ + DBG_THR(("%s(): thread:%s:%lx terminated OK\n", __FUNCTION__, \ + (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \ + (tsk_ctl)->parent = NULL; \ + (tsk_ctl)->proc_name = NULL; \ + (tsk_ctl)->thr_pid = -1; \ + (tsk_ctl)->up_cnt = 0; \ +} + +#define PROC_STOP_USING_BINARY_SEMA(tsk_ctl) \ +{ \ + uint timeout = msecs_to_jiffies(PROC_WAIT_TIMEOUT_MSEC); \ + (tsk_ctl)->terminated = TRUE; \ + smp_wmb(); \ + binary_sema_up(tsk_ctl); \ + DBG_THR(("%s(): thread:%s:%lx wait for terminate\n", __FUNCTION__, \ + (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \ + timeout = wait_for_completion_timeout(&((tsk_ctl)->completed), timeout); \ + if (timeout == 0) \ + DBG_THR(("%s(): thread:%s:%lx terminate timeout\n", __FUNCTION__, \ + (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \ + else \ + DBG_THR(("%s(): thread:%s:%lx terminated OK\n", __FUNCTION__, \ + (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \ + (tsk_ctl)->parent = NULL; \ + (tsk_ctl)->proc_name = NULL; \ + (tsk_ctl)->thr_pid = -1; \ +} + +/* +* Flush is non-rentrant, so callers must make sure +* there is no race condition. +* For safer exit, added wait_for_completion_timeout +* with 1 sec timeout. +*/ +#define PROC_FLUSH_USING_BINARY_SEMA(tsk_ctl) \ +{ \ + uint timeout = msecs_to_jiffies(PROC_WAIT_TIMEOUT_MSEC); \ + (tsk_ctl)->flush_ind = TRUE; \ + smp_wmb(); \ + binary_sema_up(tsk_ctl); \ + DBG_THR(("%s(): thread:%s:%lx wait for flush\n", __FUNCTION__, \ + (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \ + timeout = wait_for_completion_timeout(&((tsk_ctl)->flushed), timeout); \ + if (timeout == 0) \ + DBG_THR(("%s(): thread:%s:%lx flush timeout\n", __FUNCTION__, \ + (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \ + else \ + DBG_THR(("%s(): thread:%s:%lx flushed OK\n", __FUNCTION__, \ + (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \ +} + +/* ----------------------- */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) +#define KILL_PROC(nr, sig) \ +{ \ +struct task_struct *tsk; \ +struct pid *pid; \ +pid = find_get_pid((pid_t)nr); \ +tsk = pid_task(pid, PIDTYPE_PID); \ +if (tsk) send_sig(sig, tsk, 1); \ +} +#else +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \ + KERNEL_VERSION(2, 6, 30)) +#define KILL_PROC(pid, sig) \ +{ \ + struct task_struct *tsk; \ + tsk = find_task_by_vpid(pid); \ + if (tsk) send_sig(sig, tsk, 1); \ +} +#else +#define KILL_PROC(pid, sig) \ +{ \ + kill_proc(pid, sig, 1); \ +} +#endif // endif +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31) */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) +#include +#include +#else +#include + +#define __wait_event_interruptible_timeout(wq, condition, ret) \ +do { \ + wait_queue_t __wait; \ + init_waitqueue_entry(&__wait, current); \ + \ + add_wait_queue(&wq, &__wait); \ + for (;;) { \ + set_current_state(TASK_INTERRUPTIBLE); \ + if (condition) \ + break; \ + if (!signal_pending(current)) { \ + ret = schedule_timeout(ret); \ + if (!ret) \ + break; \ + continue; \ + } \ + ret = -ERESTARTSYS; \ + break; \ + } \ + current->state = TASK_RUNNING; \ + remove_wait_queue(&wq, &__wait); \ +} while (0) + +#define wait_event_interruptible_timeout(wq, condition, timeout) \ +({ \ + long __ret = timeout; \ + if (!(condition)) \ + __wait_event_interruptible_timeout(wq, condition, __ret); \ + __ret; \ +}) + +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) */ + +/* +For < 2.6.24, wl creates its own netdev but doesn't +align the priv area like the genuine alloc_netdev(). +Since netdev_priv() always gives us the aligned address, it will +not match our unaligned address for < 2.6.24 +*/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24)) +#define DEV_PRIV(dev) (dev->priv) +#else +#define DEV_PRIV(dev) netdev_priv(dev) +#endif // endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20) +#define WL_ISR(i, d, p) wl_isr((i), (d)) +#else +#define WL_ISR(i, d, p) wl_isr((i), (d), (p)) +#endif /* < 2.6.20 */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) +#define netdev_priv(dev) dev->priv +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) +#define CAN_SLEEP() ((!in_atomic() && !irqs_disabled())) +#else +#define CAN_SLEEP() (FALSE) +#endif // endif + +#define KMALLOC_FLAG (CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC) + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)) +#define RANDOM32 prandom_u32 +#define RANDOM_BYTES prandom_bytes +#else +#define RANDOM32 random32 +#define RANDOM_BYTES get_random_bytes +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)) +#define SRANDOM32(entropy) prandom_seed(entropy) +#else +#define SRANDOM32(entropy) srandom32(entropy) +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) */ + +/* + * Overide latest kfifo functions with + * older version to work on older kernels + */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)) && !defined(WL_COMPAT_WIRELESS) +#define kfifo_in_spinlocked(a, b, c, d) kfifo_put(a, (u8 *)b, c) +#define kfifo_out_spinlocked(a, b, c, d) kfifo_get(a, (u8 *)b, c) +#define kfifo_esize(a) 1 +#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 32)) && \ + (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)) && !defined(WL_COMPAT_WIRELESS) +#define kfifo_in_spinlocked(a, b, c, d) kfifo_in_locked(a, b, c, d) +#define kfifo_out_spinlocked(a, b, c, d) kfifo_out_locked(a, b, c, d) +#define kfifo_esize(a) 1 +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)) */ + +#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) +#pragma GCC diagnostic pop +#endif // endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)) +static inline struct inode *file_inode(const struct file *f) +{ + return f->f_dentry->d_inode; +} +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)) */ + +#endif /* _linuxver_h_ */ diff --git a/bcmdhd.100.10.315.x/include/lpflags.h b/bcmdhd.100.10.315.x/include/lpflags.h new file mode 100644 index 0000000..dad31c5 --- /dev/null +++ b/bcmdhd.100.10.315.x/include/lpflags.h @@ -0,0 +1,45 @@ +/* + * Chip related low power flags + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: lpflags.h 592839 2015-10-14 14:19:09Z $ + */ +#ifndef _lpflags_h_ +#define _lpflags_h_ + +/* Chip related low power flags (lpflags) */ +#define LPFLAGS_SI_GLOBAL_DISABLE (1 << 0) +#define LPFLAGS_SI_MEM_STDBY_DISABLE (1 << 1) +#define LPFLAGS_SI_SFLASH_DISABLE (1 << 2) +#define LPFLAGS_SI_BTLDO3P3_DISABLE (1 << 3) +#define LPFLAGS_SI_GCI_FORCE_REGCLK_DISABLE (1 << 4) +#define LPFLAGS_SI_FORCE_PWM_WHEN_RADIO_ON (1 << 5) +#define LPFLAGS_SI_DS0_SLEEP_PDA_DISABLE (1 << 6) +#define LPFLAGS_SI_DS1_SLEEP_PDA_DISABLE (1 << 7) +#define LPFLAGS_PHY_GLOBAL_DISABLE (1 << 16) +#define LPFLAGS_PHY_LP_DISABLE (1 << 17) +#define LPFLAGS_PSM_PHY_CTL (1 << 18) + +#endif /* _lpflags_h_ */ diff --git a/bcmdhd.100.10.315.x/include/mbo.h b/bcmdhd.100.10.315.x/include/mbo.h new file mode 100644 index 0000000..dd638bf --- /dev/null +++ b/bcmdhd.100.10.315.x/include/mbo.h @@ -0,0 +1,285 @@ +/* + * Fundamental types and constants relating to WFA MBO + * (Multiband Operation) + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id$ + */ + +#ifndef _MBO_H_ +#define _MBO_H_ + +/* This marks the start of a packed structure section. */ +#include + +/* WiFi MBO OUI values */ +#define MBO_OUI WFA_OUI /* WiFi OUI 50:6F:9A */ +/* oui_type field identifying the type and version of the MBO IE. */ +#define MBO_OUI_TYPE WFA_OUI_TYPE_MBO /* OUI Type/Version */ +/* IEEE 802.11 vendor specific information element. */ +#define MBO_IE_ID 0xdd + +/* MBO ATTR related macros */ +#define MBO_ATTR_ID_OFF 0 +#define MBO_ATTR_LEN_OFF 1 +#define MBO_ATTR_DATA_OFF 2 + +#define MBO_ATTR_ID_LEN 1 /* Attr ID field length */ +#define MBO_ATTR_LEN_LEN 1 /* Attr Length field length */ +#define MBO_ATTR_HDR_LEN 2 /* ID + 1-byte length field */ + +/* MBO subelemts related */ +#define MBO_SUBELEM_ID 0xdd +#define MBO_SUBELEM_OUI WFA_OUI + +#define MBO_SUBELEM_ID_LEN 1 /* SubElement ID field length */ +#define MBO_SUBELEM_LEN_LEN 1 /* SubElement length field length */ +#define MBO_SUBELEM_HDR_LEN 6 /* ID + length + OUI + OUY TYPE */ + +#define MBO_NON_PREF_CHAN_SUBELEM_LEN_LEN(L) (7 + (L)) /* value of length field */ +#define MBO_NON_PREF_CHAN_SUBELEM_TOT_LEN(L) \ + (MBO_SUBELEM_ID_LEN + MBO_SUBELEM_LEN_LEN + MBO_NON_PREF_CHAN_SUBELEM_LEN_LEN(L)) +/* MBO attributes as defined in the mbo spec */ +enum { + MBO_ATTR_MBO_AP_CAPABILITY = 1, + MBO_ATTR_NON_PREF_CHAN_REPORT = 2, + MBO_ATTR_CELL_DATA_CAP = 3, + MBO_ATTR_ASSOC_DISALLOWED = 4, + MBO_ATTR_CELL_DATA_CONN_PREF = 5, + MBO_ATTR_TRANS_REASON_CODE = 6, + MBO_ATTR_TRANS_REJ_REASON_CODE = 7, + MBO_ATTR_ASSOC_RETRY_DELAY = 8 +}; + +typedef BWL_PRE_PACKED_STRUCT struct wifi_mbo_ie_s { + uint8 id; /* IE ID: MBO_IE_ID 0xDD */ + uint8 len; /* IE length */ + uint8 oui[WFA_OUI_LEN]; /* MBO_OUI 50:6F:9A */ + uint8 oui_type; /* MBO_OUI_TYPE 0x16 */ + uint8 attr[1]; /* var len attributes */ +} BWL_POST_PACKED_STRUCT wifi_mbo_ie_t; + +#define MBO_IE_HDR_SIZE (OFFSETOF(wifi_mbo_ie_t, attr)) +/* oui:3 bytes + oui type:1 byte */ +#define MBO_IE_NO_ATTR_LEN 4 + +/* MBO AP Capability Attribute */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_mbo_ap_cap_ind_attr_s { + /* Attribute ID - 0x01. */ + uint8 id; + /* Length of the following fields in the attribute */ + uint8 len; + /* AP capability bitmap */ + uint8 cap_ind; +} BWL_POST_PACKED_STRUCT wifi_mbo_ap_cap_ind_attr_t; + +/* MBO AP Capability Indication Field Values */ +#define MBO_AP_CAP_IND_CELLULAR_AWARE 0x40 + +/* Non-preferred Channel Report Attribute */ +#define MBO_NON_PREF_CHAN_ATTR_OPCALSS_OFF 2 +#define MBO_NON_PREF_CHAN_ATTR_CHANLIST_OFF 3 +#define MBO_NON_PREF_CHAN_ATTR_PREF_OFF(L) \ + (MBO_NON_PREF_CHAN_ATTR_CHANLIST_OFF + (L)) + +#define MBO_NON_PREF_CHAN_ATTR_OPCALSS_LEN 1 +#define MBO_NON_PREF_CHAN_ATTR_PREF_LEN 1 +#define MBO_NON_PREF_CHAN_ATTR_REASON_LEN 1 + +#define MBO_NON_PREF_CHAN_ATTR_LEN(L) ((L) + 3) +#define MBO_NON_PREF_CHAN_ATTR_TOT_LEN(L) (MBO_ATTR_HDR_LEN + (L) + 3) + +/* attribute len - (opclass + Pref + Reason) */ +#define MBO_NON_PREF_CHAN_ATTR_CHANLIST_LEN(L) ((L) - 3) + +/* MBO Non-preferred Channel Report: "Preference" field value */ +enum { + MBO_STA_NON_OPERABLE_BAND_CHAN = 0, + MBO_STA_NON_PREFERRED_BAND_CHAN = 1, + MBO_STA_PREFERRED_BAND_CHAN = 255 +}; + +/* MBO Non-preferred Channel Report: "Reason Code" field value */ +enum { + MBO_NON_PREF_CHAN_RC_UNSPECIFIED = 0, + MBO_NON_PREF_CHAN_RC_BCN_STRENGTH = 1, + MBO_NON_PREF_CHAN_RC_CO_LOC_INTERFERENCE = 2, + MBO_NON_PREF_CHAN_RC_IN_DEV_INTERFERENCE = 3 +}; + +/* Cellular Data Capability Attribute */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_mbo_cell_data_cap_attr_s { + /* Attribute ID - 0x03. */ + uint8 id; + /* Length of the following fields in the attribute */ + uint8 len; + /* MBO STA's cellular capability */ + uint8 cell_conn; +} BWL_POST_PACKED_STRUCT wifi_mbo_cell_data_cap_attr_t; + +/* MBO Cellular Data Capability: "Cellular Connectivity" field value */ +enum { + MBO_CELL_DATA_CONN_AVAILABLE = 1, + MBO_CELL_DATA_CONN_NOT_AVAILABLE = 2, + MBO_CELL_DATA_CONN_NOT_CAPABLE = 3 +}; + +/* Association Disallowed attribute */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_mbo_assoc_disallowed_attr_s { + /* Attribute ID - 0x04. */ + uint8 id; + /* Length of the following fields in the attribute */ + uint8 len; + /* Reason of not accepting new association */ + uint8 reason_code; +} BWL_POST_PACKED_STRUCT wifi_mbo_assoc_disallowed_attr_t; + +/* Association Disallowed attr Reason code field values */ +enum { + MBO_ASSOC_DISALLOWED_RC_UNSPECIFIED = 1, + MBO_ASSOC_DISALLOWED_RC_MAX_STA_REACHED = 2, + MBO_ASSOC_DISALLOWED_RC_AIR_IFACE_OVERLOADED = 3, + MBO_ASSOC_DISALLOWED_RC_AUTH_SRVR_OVERLOADED = 4, + MBO_ASSOC_DISALLOWED_RC_INSUFFIC_RSSI = 5 +}; + +/* Cellular Data Conn Pref attribute */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_mbo_cell_data_conn_pref_attr_s { + /* Attribute ID - 0x05. */ + uint8 id; + /* Length of the following fields in the attribute */ + uint8 len; + /* Preference value of cellular connection */ + uint8 cell_pref; +} BWL_POST_PACKED_STRUCT wifi_mbo_cell_data_conn_pref_attr_t; + +/* Cellular Data Conn Pref attr: Cellular Pref field values */ +enum { + MBO_CELLULAR_DATA_CONN_EXCLUDED = 1, + MBO_CELLULAR_DATA_CONN_NOT_PREFERRED = 2, + MBO_CELLULAR_DATA_CONN_PREFERRED = 255 +}; + +/* Transition Reason Code Attribute */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_mbo_trans_reason_code_attr_s { + /* Attribute ID - 0x06. */ + uint8 id; + /* Length of the following fields in the attribute */ + uint8 len; + /* Reason of transition recommendation */ + uint8 trans_reason_code; +} BWL_POST_PACKED_STRUCT wifi_mbo_trans_reason_code_attr_t; + +/* Transition Reason Code Attr: trans reason code field values */ +enum { + MBO_TRANS_REASON_UNSPECIFIED = 0, + MBO_TRANS_REASON_EXCESSV_FRM_LOSS_RATE = 1, + MBO_TRANS_REASON_EXCESSV_TRAFFIC_DELAY = 2, + MBO_TRANS_REASON_INSUFF_BW = 3, + MBO_TRANS_REASON_LOAD_BALANCING = 4, + MBO_TRANS_REASON_LOW_RSSI = 5, + MBO_TRANS_REASON_EXCESSV_RETRANS_RCVD = 6, + MBO_TRANS_REASON_HIGH_INTERFERENCE = 7, + MBO_TRANS_REASON_GRAY_ZONE = 8, + MBO_TRANS_REASON_PREMIUM_AP_TRANS = 9 +}; + +/* Transition Rejection Reason Code Attribute */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_mbo_trans_rej_reason_code_attr_s { + /* Attribute ID - 0x07. */ + uint8 id; + /* Length of the following fields in the attribute */ + uint8 len; + /* Reason of transition rejection */ + uint8 trans_rej_reason_code; +} BWL_POST_PACKED_STRUCT wifi_mbo_trans_rej_reason_code_attr_t; + +/* Transition Rej Reason Code Attr: trans rej reason code field values */ +enum { + MBO_TRANS_REJ_REASON_UNSPECIFIED = 0, + MBO_TRANS_REJ_REASON_EXSSIV_FRM_LOSS_RATE = 1, + MBO_TRANS_REJ_REASON_EXSSIV_TRAFFIC_DELAY = 2, + MBO_TRANS_REJ_REASON_INSUFF_QOS_CAPACITY = 3, + MBO_TRANS_REJ_REASON_LOW_RSSI = 4, + MBO_TRANS_REJ_REASON_HIGH_INTERFERENCE = 5, + MBO_TRANS_REJ_REASON_SERVICE_UNAVAIL = 6 +}; + +/* Assoc Retry Delay Attribute */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_mbo_assoc_retry_delay_attr_s { + /* Attribute ID - 0x08. */ + uint8 id; + /* Length of the following fields in the attribute */ + uint8 len; + /* No of Seconds before next assoc attempt */ + uint16 reassoc_delay; +} BWL_POST_PACKED_STRUCT wifi_mbo_assoc_retry_delay_attr_t; + +#define MBO_ANQP_OUI_TYPE 0x12 /* OUTI Type/Version */ + +/* MBO ANQP Element */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_mbo_anqp_elem_s { + /* ID - 56797 */ + uint16 info_id; + /* Length of the OUI + Vendor Specific content */ + uint16 len; + /* WFA_OUI 50:6F:9A */ + uint8 oui[WFA_OUI_LEN]; + /* MBO_ANQP_OUI_TYPE 0x12 */ + uint8 oui_type; + /* MBO ANQP element type */ + uint8 sub_type; + /* variable len payload */ + uint8 payload[1]; +} BWL_POST_PACKED_STRUCT wifi_mbo_anqp_elem_t; + +#define MBO_ANQP_ELEM_HDR_SIZE (OFFSETOF(wifi_mbo_anqp_elem_t, payload)) + +/* oui:3 bytes + oui type:1 byte + sub type:1 byte */ +#define MBO_ANQP_ELEM_NO_PAYLOAD_LEN 5 + +/* MBO ANQP Subtype Values */ +enum { + MBO_ANQP_ELEM_MBO_QUERY_LIST = 1, + MBO_ANQP_ELEM_CELL_DATA_CONN_PREF = 2 +}; + +/* MBO sub-elements */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_mbo_cell_cap_subelem_s { + /* 0xDD */ + uint8 sub_elem_id; + /* Length of the following fields in sub-element */ + uint8 len; + /* WFA_OUI 50:6F:9A */ + uint8 oui[WFA_OUI_LEN]; + /* OUI_TYPE 0x03 */ + uint8 oui_type; + /* STA cellular capability */ + uint8 cell_conn; +} BWL_POST_PACKED_STRUCT wifi_mbo_cell_cap_subelem_t; + +/* This marks the end of a packed structure section. */ +#include + +#endif /* __MBO_H__ */ diff --git a/bcmdhd.100.10.315.x/include/miniopt.h b/bcmdhd.100.10.315.x/include/miniopt.h new file mode 100644 index 0000000..87895cb --- /dev/null +++ b/bcmdhd.100.10.315.x/include/miniopt.h @@ -0,0 +1,79 @@ +/* + * Command line options parser. + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: miniopt.h 672943 2016-11-30 08:54:06Z $ + */ + +#ifndef MINI_OPT_H +#define MINI_OPT_H + +#ifdef __cplusplus +extern "C" { +#endif // endif + +/* ---- Include Files ---------------------------------------------------- */ + +/* ---- Constants and Types ---------------------------------------------- */ + +#define MINIOPT_MAXKEY 128 /* Max options */ +typedef struct miniopt { + + /* These are persistent after miniopt_init() */ + const char* name; /* name for prompt in error strings */ + const char* flags; /* option chars that take no args */ + bool longflags; /* long options may be flags */ + bool opt_end; /* at end of options (passed a "--") */ + + /* These are per-call to miniopt() */ + + int consumed; /* number of argv entries cosumed in + * the most recent call to miniopt() + */ + bool positional; + bool good_int; /* 'val' member is the result of a sucessful + * strtol conversion of the option value + */ + char opt; + char key[MINIOPT_MAXKEY]; + char* valstr; /* positional param, or value for the option, + * or null if the option had + * no accompanying value + */ + uint uval; /* strtol translation of valstr */ + int val; /* strtol translation of valstr */ +} miniopt_t; + +void miniopt_init(miniopt_t *t, const char* name, const char* flags, bool longflags); +int miniopt(miniopt_t *t, char **argv); + +/* ---- Variable Externs ------------------------------------------------- */ +/* ---- Function Prototypes ---------------------------------------------- */ + +#ifdef __cplusplus + } +#endif // endif + +#endif /* MINI_OPT_H */ diff --git a/bcmdhd.100.10.315.x/include/msf.h b/bcmdhd.100.10.315.x/include/msf.h new file mode 100644 index 0000000..2228dc4 --- /dev/null +++ b/bcmdhd.100.10.315.x/include/msf.h @@ -0,0 +1,66 @@ +/* + * Common interface to MSF (multi-segment format) definitions. + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: msf.h 619634 2016-02-17 19:01:25Z $ + */ + +#ifndef _WLC_MSF_H_ +#define _WLC_MSF_H_ + +struct wl_segment { + uint32 type; + uint32 offset; + uint32 length; + uint32 crc32; + uint32 flags; +}; +typedef struct wl_segment wl_segment_t; + +struct wl_segment_info { + uint8 magic[4]; + uint32 hdr_len; + uint32 crc32; + uint32 file_type; + uint32 num_segments; + wl_segment_t segments[1]; +}; +typedef struct wl_segment_info wl_segment_info_t; + +typedef struct wlc_blob_segment { + uint32 type; + uint8 *data; + uint32 length; +} wlc_blob_segment_t; + +/** Segment types in Binary Eventlog Archive file */ +enum bea_seg_type_e { + MSF_SEG_TYP_RTECDC_BIN = 1, + MSF_SEG_TYP_LOGSTRS_BIN = 2, + MSF_SEG_TYP_FW_SYMBOLS = 3, + MSF_SEG_TYP_ROML_BIN = 4 +}; + +#endif /* _WLC_MSF_H */ diff --git a/bcmdhd.100.10.315.x/include/msgtrace.h b/bcmdhd.100.10.315.x/include/msgtrace.h new file mode 100644 index 0000000..3e143c5 --- /dev/null +++ b/bcmdhd.100.10.315.x/include/msgtrace.h @@ -0,0 +1,62 @@ +/* + * Trace messages sent over HBUS + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: msgtrace.h 542902 2015-03-22 23:29:48Z $ + */ + +#ifndef _MSGTRACE_H +#define _MSGTRACE_H + +#ifndef _TYPEDEFS_H_ +#include +#endif // endif + +/* This marks the start of a packed structure section. */ +#include + +#define MSGTRACE_VERSION 1 + +/* Message trace header */ +typedef BWL_PRE_PACKED_STRUCT struct msgtrace_hdr { + uint8 version; + uint8 trace_type; +#define MSGTRACE_HDR_TYPE_MSG 0 +#define MSGTRACE_HDR_TYPE_LOG 1 + uint16 len; /* Len of the trace */ + uint32 seqnum; /* Sequence number of message. Useful if the messsage has been lost + * because of DMA error or a bus reset (ex: SDIO Func2) + */ + /* Msgtrace type only */ + uint32 discarded_bytes; /* Number of discarded bytes because of trace overflow */ + uint32 discarded_printf; /* Number of discarded printf because of trace overflow */ +} BWL_POST_PACKED_STRUCT msgtrace_hdr_t; + +#define MSGTRACE_HDRLEN sizeof(msgtrace_hdr_t) + +/* This marks the end of a packed structure section. */ +#include + +#endif /* _MSGTRACE_H */ diff --git a/bcmdhd.100.10.315.x/include/nan.h b/bcmdhd.100.10.315.x/include/nan.h new file mode 100644 index 0000000..816f13f --- /dev/null +++ b/bcmdhd.100.10.315.x/include/nan.h @@ -0,0 +1,1529 @@ +/* + * Fundamental types and constants relating to WFA NAN + * (Neighbor Awareness Networking) + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: nan.h 758133 2018-04-17 19:07:15Z $ + */ +#ifndef _NAN_H_ +#define _NAN_H_ + +#include +#include <802.11.h> + +/* This marks the start of a packed structure section. */ +#include + +/* WiFi NAN OUI values */ +#define NAN_OUI "\x50\x6F\x9A" /* WFA OUI. WiFi-Alliance OUI */ +/* For oui_type field identifying the type and version of the NAN IE. */ +#define NAN_OUI_TYPE 0x13 /* Type/Version */ +#define NAN_AF_OUI_TYPE 0x18 /* Type/Version */ +/* IEEE 802.11 vendor specific information element. (Same as P2P_IE_ID.) */ +#define NAN_IE_ID 0xdd + +/* Same as P2P_PUB_AF_CATEGORY and DOT11_ACTION_CAT_PUBLIC */ +#define NAN_PUB_AF_CATEGORY DOT11_ACTION_CAT_PUBLIC +/* Protected dual public action frame category */ +#define NAN_PROT_DUAL_PUB_AF_CATEGORY DOT11_ACTION_CAT_PDPA +/* IEEE 802.11 Public Action Frame Vendor Specific. (Same as P2P_PUB_AF_ACTION.) */ +#define NAN_PUB_AF_ACTION DOT11_PUB_ACTION_VENDOR_SPEC +/* Number of octents in hash of service name. (Same as P2P_WFDS_HASH_LEN.) */ +#define NAN_SVC_HASH_LEN 6 +/* Size of fixed length part of nan_pub_act_frame_t before attributes. */ +#define NAN_PUB_ACT_FRAME_FIXED_LEN 6 +/* Number of octents in master rank value. */ +#define NAN_MASTER_RANK_LEN 8 +/* NAN public action frame header size */ +#define NAN_PUB_ACT_FRAME_HDR_SIZE (OFFSETOF(nan_pub_act_frame_t, data)) +/* NAN network ID */ +#define NAN_NETWORK_ID "\x51\x6F\x9A\x01\x00\x00" +/* Service Control Type length */ +#define NAN_SVC_CONTROL_TYPE_LEN 2 +/* Binding Bitmap length */ +#define NAN_BINDING_BITMAP_LEN 2 +/* Service Response Filter (SRF) control field masks */ +#define NAN_SRF_BLOOM_MASK 0x01 +#define NAN_SRF_INCLUDE_MASK 0x02 +#define NAN_SRF_INDEX_MASK 0x0C +/* SRF Bloom Filter index shift */ +#define NAN_SRF_BLOOM_SHIFT 2 +#define NAN_SRF_INCLUDE_SHIFT 1 +/* Mask for CRC32 output, used in hash function for NAN bloom filter */ +#define NAN_BLOOM_CRC32_MASK 0xFFFF + +/* Attribute TLV header size */ +#define NAN_ATTR_ID_OFF 0 +#define NAN_ATTR_LEN_OFF 1 +#define NAN_ATTR_DATA_OFF 3 + +#define NAN_ATTR_ID_LEN 1u /* ID field length */ +#define NAN_ATTR_LEN_LEN 2u /* Length field length */ +#define NAN_ATTR_HDR_LEN (NAN_ATTR_ID_LEN + NAN_ATTR_LEN_LEN) +#define NAN_ENTRY_CTRL_LEN 1 /* Entry control field length from FAM attribute */ +#define NAN_MAP_ID_LEN 1 /* MAP ID length to signify band */ +#define NAN_OPERATING_CLASS_LEN 1 /* operating class field length from NAN FAM */ +#define NAN_CHANNEL_NUM_LEN 1 /* channel number field length 1 byte */ + +/* generic nan attribute total length */ +#define NAN_ATTR_TOT_LEN(_nan_attr) (ltoh16_ua(((const uint8 *)(_nan_attr)) + \ + NAN_ATTR_ID_LEN) + NAN_ATTR_HDR_LEN) + +/* NAN slot duration / period */ +#define NAN_MIN_TU 16 +#define NAN_TU_PER_DW 512 +#define NAN_MAX_DW 16 +#define NAN_MAX_TU (NAN_MAX_DW * NAN_TU_PER_DW) + +#define NAN_SLOT_DUR_0TU 0 +#define NAN_SLOT_DUR_16TU 16 +#define NAN_SLOT_DUR_32TU 32 +#define NAN_SLOT_DUR_64TU 64 +#define NAN_SLOT_DUR_128TU 128 +#define NAN_SLOT_DUR_256TU 256 +#define NAN_SLOT_DUR_512TU 512 +#define NAN_SLOT_DUR_1024TU 1024 +#define NAN_SLOT_DUR_2048TU 2048 +#define NAN_SLOT_DUR_4096TU 4096 +#define NAN_SLOT_DUR_8192TU 8192 + +#define NAN_SOC_CHAN_2G 6 /* NAN 2.4G discovery channel */ +#define NAN_SOC_CHAN_5G_CH149 149 /* NAN 5G discovery channel if upper band allowed */ +#define NAN_SOC_CHAN_5G_CH44 44 /* NAN 5G discovery channel if only lower band allowed */ + +/* size of ndc id */ +#define NAN_DATA_NDC_ID_SIZE 6 + +#define NAN_AVAIL_ENTRY_LEN_RES0 7 /* Avail entry len in FAM attribute for resolution 16TU */ +#define NAN_AVAIL_ENTRY_LEN_RES1 5 /* Avail entry len in FAM attribute for resolution 32TU */ +#define NAN_AVAIL_ENTRY_LEN_RES2 4 /* Avail entry len in FAM attribute for resolution 64TU */ + +/* map id field */ +#define NAN_MAPID_SPECIFIC_MAP_MASK 0x01 /* apply to specific map */ +#define NAN_MAPID_MAPID_MASK 0x1E +#define NAN_MAPID_MAPID_SHIFT 1 +#define NAN_MAPID_SPECIFIC_MAP(_mapid) ((_mapid) & NAN_MAPID_SPECIFIC_MAP_MASK) +#define NAN_MAPID_ALL_MAPS(_mapid) (!NAN_MAPID_SPECIFIC_MAP(_mapid)) +#define NAN_MAPID_MAPID(_mapid) (((_mapid) & NAN_MAPID_MAPID_MASK) \ + >> NAN_MAPID_MAPID_SHIFT) +#define NAN_MAPID_SET_SPECIFIC_MAPID(map_id) ((((map_id) << NAN_MAPID_MAPID_SHIFT) \ + & NAN_MAPID_MAPID_MASK) | NAN_MAPID_SPECIFIC_MAP_MASK) + +/* Vendor-specific public action frame for NAN */ +typedef BWL_PRE_PACKED_STRUCT struct nan_pub_act_frame_s { + /* NAN_PUB_AF_CATEGORY 0x04 */ + uint8 category_id; + /* NAN_PUB_AF_ACTION 0x09 */ + uint8 action_field; + /* NAN_OUI 0x50-6F-9A */ + uint8 oui[DOT11_OUI_LEN]; + /* NAN_OUI_TYPE 0x13 */ + uint8 oui_type; + /* One or more NAN Attributes follow */ + uint8 data[]; +} BWL_POST_PACKED_STRUCT nan_pub_act_frame_t; + +/* NAN attributes as defined in the nan spec */ +enum { + NAN_ATTR_MASTER_IND = 0, + NAN_ATTR_CLUSTER = 1, + NAN_ATTR_SVC_ID_LIST = 2, + NAN_ATTR_SVC_DESCRIPTOR = 3, + NAN_ATTR_CONN_CAP = 4, + NAN_ATTR_INFRA = 5, + NAN_ATTR_P2P = 6, + NAN_ATTR_IBSS = 7, + NAN_ATTR_MESH = 8, + NAN_ATTR_FURTHER_NAN_SD = 9, + NAN_ATTR_FURTHER_AVAIL = 10, + NAN_ATTR_COUNTRY_CODE = 11, + NAN_ATTR_RANGING = 12, + NAN_ATTR_CLUSTER_DISC = 13, + /* nan 2.0 */ + NAN_ATTR_SVC_DESC_EXTENSION = 14, + NAN_ATTR_NAN_DEV_CAP = 15, + NAN_ATTR_NAN_NDP = 16, + NAN_ATTR_NAN_NMSG = 17, + NAN_ATTR_NAN_AVAIL = 18, + NAN_ATTR_NAN_NDC = 19, + NAN_ATTR_NAN_NDL = 20, + NAN_ATTR_NAN_NDL_QOS = 21, + NAN_ATTR_MCAST_SCHED = 22, + NAN_ATTR_UNALIGN_SCHED = 23, + NAN_ATTR_PAGING_UCAST = 24, + NAN_ATTR_PAGING_MCAST = 25, + NAN_ATTR_RANGING_INFO = 26, + NAN_ATTR_RANGING_SETUP = 27, + NAN_ATTR_FTM_RANGE_REPORT = 28, + NAN_ATTR_ELEMENT_CONTAINER = 29, + NAN_ATTR_WLAN_INFRA_EXT = 30, + NAN_ATTR_EXT_P2P_OPER = 31, + NAN_ATTR_EXT_IBSS = 32, + NAN_ATTR_EXT_MESH = 33, + NAN_ATTR_CIPHER_SUITE_INFO = 34, + NAN_ATTR_SEC_CTX_ID_INFO = 35, + NAN_ATTR_SHARED_KEY_DESC = 36, + NAN_ATTR_MCAST_SCHED_CHANGE = 37, + NAN_ATTR_MCAST_SCHED_OWNER_CHANGE = 38, + NAN_ATTR_PUBLIC_AVAILABILITY = 39, + NAN_ATTR_SUB_SVC_ID_LIST = 40, + NAN_ATTR_NDPE = 41, + /* change NAN_ATTR_MAX_ID to max ids + 1, excluding NAN_ATTR_VENDOR_SPECIFIC. + * This is used in nan_parse.c + */ + NAN_ATTR_MAX_ID = NAN_ATTR_NDPE + 1, + + NAN_ATTR_VENDOR_SPECIFIC = 221 +}; + +enum wifi_nan_avail_resolution { + NAN_AVAIL_RES_16_TU = 0, + NAN_AVAIL_RES_32_TU = 1, + NAN_AVAIL_RES_64_TU = 2, + NAN_AVAIL_RES_INVALID = 255 +}; + +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_ie_s { + uint8 id; /* IE ID: NAN_IE_ID 0xDD */ + uint8 len; /* IE length */ + uint8 oui[DOT11_OUI_LEN]; /* NAN_OUI 50:6F:9A */ + uint8 oui_type; /* NAN_OUI_TYPE 0x13 */ + uint8 attr[]; /* var len attributes */ +} BWL_POST_PACKED_STRUCT wifi_nan_ie_t; + +#define NAN_IE_HDR_SIZE (OFFSETOF(wifi_nan_ie_t, attr)) + +/* master indication record */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_master_ind_attr_s { + uint8 id; + uint16 len; + uint8 master_preference; + uint8 random_factor; +} BWL_POST_PACKED_STRUCT wifi_nan_master_ind_attr_t; + +/* cluster attr record */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_cluster_attr_s { + uint8 id; + uint16 len; + uint8 amr[NAN_MASTER_RANK_LEN]; + uint8 hop_count; + /* Anchor Master Beacon Transmission Time */ + uint32 ambtt; +} BWL_POST_PACKED_STRUCT wifi_nan_cluster_attr_t; + +/* container for service ID records */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_svc_id_attr_s { + uint8 id; + uint16 len; + uint8 svcid[0]; /* 6*len of srvc IDs */ +} BWL_POST_PACKED_STRUCT wifi_nan_svc_id_attr_t; + +/* service_control bitmap for wifi_nan_svc_descriptor_attr_t below */ +#define NAN_SC_PUBLISH 0x0 +#define NAN_SC_SUBSCRIBE 0x1 +#define NAN_SC_FOLLOWUP 0x2 +/* Set to 1 if a Matching Filter field is included in descriptors. */ +#define NAN_SC_MATCHING_FILTER_PRESENT 0x4 +/* Set to 1 if a Service Response Filter field is included in descriptors. */ +#define NAN_SC_SR_FILTER_PRESENT 0x8 +/* Set to 1 if a Service Info field is included in descriptors. */ +#define NAN_SC_SVC_INFO_PRESENT 0x10 +/* range is close proximity only */ +#define NAN_SC_RANGE_LIMITED 0x20 +/* Set to 1 if binding bitamp is present in descriptors */ +#define NAN_SC_BINDING_BITMAP_PRESENT 0x40 + +/* Service descriptor */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_svc_descriptor_attr_s { + /* Attribute ID - 0x03. */ + uint8 id; + /* Length of the following fields in the attribute */ + uint16 len; + /* Hash of the Service Name */ + uint8 svc_hash[NAN_SVC_HASH_LEN]; + /* Publish or subscribe instance id */ + uint8 instance_id; + /* Requestor Instance ID */ + uint8 requestor_id; + /* Service Control Bitmask. Also determines what data follows. */ + uint8 svc_control; + /* Optional fields follow */ +} BWL_POST_PACKED_STRUCT wifi_nan_svc_descriptor_attr_t; + +/* IBSS attribute */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_ibss_attr_s { + /* Attribute ID - 0x07. */ + uint8 id; + /* Length of the following fields in the attribute */ + uint16 len; + /* BSSID of the ibss */ + struct ether_addr bssid; + /* + map control:, bits: + [0-3]: Id for associated further avail map attribute + [4-5]: avail interval duration: 0:16ms; 1:32ms; 2:64ms; 3:reserved + [6] : repeat : 0 - applies to next DW, 1: 16 intervals max? wtf? + [7] : reserved + */ + uint8 map_ctrl; + /* avail. intervals bitmap, var len */ + uint8 avail_bmp[1]; +} BWL_POST_PACKED_STRUCT wifi_nan_ibss_attr_t; + +/* Further Availability MAP attr */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_favail_attr_s { + /* Attribute ID - 0x0A. */ + uint8 id; + /* Length of the following fields in the attribute */ + uint16 len; + /* MAP id: val [0..15], values[16-255] reserved */ + uint8 map_id; + /* availibility entry, var len */ + uint8 avil_entry[1]; +} BWL_POST_PACKED_STRUCT wifi_nan_favail_attr_t; + +/* Further Availability MAP attr */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_avail_entry_s { + /* + entry control + [0-1]: avail interval duration: 0:16ms; 1:32ms; 2:64ms; + [2:7] reserved + */ + uint8 entry_ctrl; + /* operating class: freq band etc IEEE 802.11 */ + uint8 opclass; + /* channel number */ + uint8 chan; + /* avail bmp, var len */ + uint8 avail_bmp[1]; +} BWL_POST_PACKED_STRUCT wifi_nan_avail_entry_t; + +/* Map control Field */ +#define NAN_MAPCTRL_IDMASK 0x7 +#define NAN_MAPCTRL_DURSHIFT 4 +#define NAN_MAPCTRL_DURMASK 0x30 +#define NAN_MAPCTRL_REPEAT 0x40 +#define NAN_MAPCTRL_REPEATSHIFT 6 + +#define NAN_VENDOR_TYPE_RTT 0 +#define NAN_VENDOR_TYPE_P2P 1 + +/* Vendor Specific Attribute - old definition */ +/* TODO remove */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_vendor_attr_s { + uint8 id; /* 0xDD */ + uint16 len; /* IE length */ + uint8 oui[DOT11_OUI_LEN]; /* 00-90-4C */ + uint8 type; /* attribute type */ + uint8 attr[1]; /* var len attributes */ +} BWL_POST_PACKED_STRUCT wifi_nan_vendor_attr_t; + +#define NAN_VENDOR_HDR_SIZE (OFFSETOF(wifi_nan_vendor_attr_t, attr)) + +/* vendor specific attribute */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_vndr_attr_s { + uint8 id; /* 0xDD */ + uint16 len; /* length of following fields */ + uint8 oui[DOT11_OUI_LEN]; /* vendor specific OUI */ + uint8 body[]; +} BWL_POST_PACKED_STRUCT wifi_nan_vndr_attr_t; + +/* p2p operation attribute */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_p2p_op_attr_s { + /* Attribute ID - 0x06. */ + uint8 id; + /* Length of the following fields in the attribute */ + uint16 len; + /* P2P device role */ + uint8 dev_role; + /* BSSID of the ibss */ + struct ether_addr p2p_dev_addr; + /* + map control:, bits: + [0-3]: Id for associated further avail map attribute + [4-5]: avail interval duration: 0:16ms; 1:32ms; 2:64ms; 3:reserved + [6] : repeat : 0 - applies to next DW, 1: 16 intervals max? wtf? + [7] : reserved + */ + uint8 map_ctrl; + /* avail. intervals bitmap */ + uint8 avail_bmp[1]; +} BWL_POST_PACKED_STRUCT wifi_nan_p2p_op_attr_t; + +/* ranging attribute */ +#define NAN_RANGING_MAP_CTRL_ID_SHIFT 0 +#define NAN_RANGING_MAP_CTRL_ID_MASK 0x0F +#define NAN_RANGING_MAP_CTRL_DUR_SHIFT 4 +#define NAN_RANGING_MAP_CTRL_DUR_MASK 0x30 +#define NAN_RANGING_MAP_CTRL_REPEAT_SHIFT 6 +#define NAN_RANGING_MAP_CTRL_REPEAT_MASK 0x40 +#define NAN_RANGING_MAP_CTRL_REPEAT_DW(_ctrl) (((_ctrl) & \ + NAN_RANGING_MAP_CTRL_DUR_MASK) ? 16 : 1) +#define NAN_RANGING_MAP_CTRL(_id, _dur, _repeat) (\ + (((_id) << NAN_RANGING_MAP_CTRL_ID_SHIFT) & \ + NAN_RANGING_MAP_CTRL_ID_MASK) | \ + (((_dur) << NAN_RANGING_MAP_CTRL_DUR_SHIFT) & \ + NAN_RANGING_MAP_CTRL_DUR_MASK) | \ + (((_repeat) << NAN_RANGING_MAP_CTRL_REPEAT_SHIFT) & \ + NAN_RANGING_MAP_CTRL_REPEAT_MASK)) + +enum { + NAN_RANGING_PROTO_FTM = 0 +}; + +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_ranging_attr_s { + uint8 id; /* 0x0C */ + uint16 len; /* length that follows */ + struct ether_addr dev_addr; /* device mac address */ + + /* + map control:, bits: + [0-3]: Id for associated further avail map attribute + [4-5]: avail interval duration: 0:16ms; 1:32ms; 2:64ms; 3:reserved + [6] : repeat : 0 - applies to next DW, 1: 16 intervals max? wtf? + [7] : reserved + */ + uint8 map_ctrl; + + uint8 protocol; /* FTM = 0 */ + uint32 avail_bmp; /* avail interval bitmap */ +} BWL_POST_PACKED_STRUCT wifi_nan_ranging_attr_t; + +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_ranging_info_attr_s { + uint8 id; /* 0x1A */ + uint16 len; /* length that follows */ + /* + location info availability bit map + 0: LCI Local Coordinates + 1: Geospatial LCI WGS84 + 2: Civi Location + 3: Last Movement Indication + [4-7]: reserved + */ + uint8 lc_info_avail; + /* + Last movement indication + present if bit 3 is set in lc_info_avail + cluster TSF[29:14] at the last detected platform movement + */ + uint16 last_movement; + +} BWL_POST_PACKED_STRUCT wifi_nan_ranging_info_attr_t; + +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_ranging_setup_attr_hdr_s { + uint8 id; /* 0x1B */ + uint16 len; /* length that follows */ + uint8 dialog_token; /* Identify req and resp */ + uint8 type_status; /* bits 0-3 type, 4-7 status */ + /* reason code + i. when frm type = response & status = reject + ii. frm type = termination + */ + uint8 reason; +} BWL_POST_PACKED_STRUCT wifi_nan_ranging_setup_attr_hdr_t; + +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_ranging_setup_attr_s { + + wifi_nan_ranging_setup_attr_hdr_t setup_attr_hdr; + /* Below fields not required when frm type = termination */ + uint8 ranging_ctrl; /* Bit 0: ranging report required or not */ + uint8 ftm_params[3]; + uint8 data[]; /* schedule entry list */ +} BWL_POST_PACKED_STRUCT wifi_nan_ranging_setup_attr_t; + +#define NAN_RANGE_SETUP_ATTR_OFFSET_TBM_INFO (OFFSETOF(wifi_nan_ranging_setup_attr_t, data)) + +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_ranging_report_attr_s { + uint8 id; /* 0x1C */ + uint16 len; /* length that follows */ + /* FTM report format in spec. + See definition in 9.4.2.22.18 in 802.11mc D5.0 + */ + uint8 entry_count; + uint8 data[2]; /* includes pad */ + /* + dot11_ftm_range_entry_t entries[entry_count]; + uint8 error_count; + dot11_ftm_error_entry_t errors[error_count]; + */ +} BWL_POST_PACKED_STRUCT wifi_nan_ranging_report_attr_t; + +/* Ranging control flags */ +#define NAN_RNG_REPORT_REQUIRED 0x01 +#define NAN_RNG_FTM_PARAMS_PRESENT 0x02 +#define NAN_RNG_SCHED_ENTRY_PRESENT 0X04 + +/* Location info flags */ +#define NAN_RNG_LOCATION_FLAGS_LOCAL_CORD 0x1 +#define NAN_RNG_LOCATION_FLAGS_GEO_SPATIAL 0x2 +#define NAN_RNG_LOCATION_FLAGS_CIVIC 0x4 +#define NAN_RNG_LOCATION_FLAGS_LAST_MVMT 0x8 + +/* Last movement mask and shift value */ +#define NAN_RNG_LOCATION_MASK_LAST_MVT_TSF 0x3FFFC000 +#define NAN_RNG_LOCATION_SHIFT_LAST_MVT_TSF 14 + +/* FTM params shift values */ +#define NAN_FTM_MAX_BURST_DUR_SHIFT 0 +#define NAN_FTM_MIN_FTM_DELTA_SHIFT 4 +#define NAN_FTM_NUM_FTM_SHIFT 10 +#define NAN_FTM_FORMAT_BW_SHIFT 15 + +/* FTM params mask */ +#define NAN_FTM_MAX_BURST_DUR_MASK 0x00000F +#define NAN_FTM_MIN_FTM_DELTA_MASK 0x00003F +#define NAN_FTM_NUM_FTM_MASK 0x00001F +#define NAN_FTM_FORMAT_BW_MASK 0x00003F + +#define FTM_PARAMS_BURSTTMO_FACTOR 250 + +/* set to value to uint32 */ +#define NAN_FTM_SET_BURST_DUR(ftm, dur) (ftm |= (((dur + 2) & NAN_FTM_MAX_BURST_DUR_MASK) <<\ + NAN_FTM_MAX_BURST_DUR_SHIFT)) +#define NAN_FTM_SET_FTM_DELTA(ftm, delta) (ftm |= (((delta/100) & NAN_FTM_MIN_FTM_DELTA_MASK) <<\ + NAN_FTM_MIN_FTM_DELTA_SHIFT)) +#define NAN_FTM_SET_NUM_FTM(ftm, delta) (ftm |= ((delta & NAN_FTM_NUM_FTM_MASK) <<\ + NAN_FTM_NUM_FTM_SHIFT)) +#define NAN_FTM_SET_FORMAT_BW(ftm, delta) (ftm |= ((delta & NAN_FTM_FORMAT_BW_MASK) <<\ + NAN_FTM_FORMAT_BW_SHIFT)) +/* set uint32 to attribute */ +#define NAN_FTM_PARAMS_UINT32_TO_ATTR(ftm_u32, ftm_attr) {ftm_attr[0] = ftm_u32 & 0xFF; \ + ftm_attr[1] = (ftm_u32 >> 8) & 0xFF; ftm_attr[2] = (ftm_u32 >> 16) & 0xFF;} + +/* get atrribute to uint32 */ +#define NAN_FTM_PARAMS_ATTR_TO_UINT32(ftm_p, ftm_u32) (ftm_u32 = ftm_p[0] | ftm_p[1] << 8 | \ + ftm_p[2] << 16) +/* get param values from uint32 */ +#define NAN_FTM_GET_BURST_DUR(ftm) (((ftm >> NAN_FTM_MAX_BURST_DUR_SHIFT) &\ + NAN_FTM_MAX_BURST_DUR_MASK)) +#define NAN_FTM_GET_BURST_DUR_USEC(_val) ((1 << ((_val)-2)) * FTM_PARAMS_BURSTTMO_FACTOR) +#define NAN_FTM_GET_FTM_DELTA(ftm) (((ftm >> NAN_FTM_MIN_FTM_DELTA_SHIFT) &\ + NAN_FTM_MIN_FTM_DELTA_MASK)*100) +#define NAN_FTM_GET_NUM_FTM(ftm) ((ftm >> NAN_FTM_NUM_FTM_SHIFT) &\ + NAN_FTM_NUM_FTM_MASK) +#define NAN_FTM_GET_FORMAT_BW(ftm) ((ftm >> NAN_FTM_FORMAT_BW_SHIFT) &\ + NAN_FTM_FORMAT_BW_MASK) + +#define NAN_CONN_CAPABILITY_WFD 0x0001 +#define NAN_CONN_CAPABILITY_WFDS 0x0002 +#define NAN_CONN_CAPABILITY_TDLS 0x0004 +#define NAN_CONN_CAPABILITY_INFRA 0x0008 +#define NAN_CONN_CAPABILITY_IBSS 0x0010 +#define NAN_CONN_CAPABILITY_MESH 0x0020 + +#define NAN_DEFAULT_MAP_ID 0 /* nan default map id */ +#define NAN_DEFAULT_MAP_CTRL 0 /* nan default map control */ + +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_conn_cap_attr_s { + /* Attribute ID - 0x04. */ + uint8 id; + /* Length of the following fields in the attribute */ + uint16 len; + uint16 conn_cap_bmp; /* Connection capability bitmap */ +} BWL_POST_PACKED_STRUCT wifi_nan_conn_cap_attr_t; + +/* NAN Element container Attribute */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_container_attr_s { + uint8 id; /* id - 0x20 */ + uint16 len; /* Total length of following IEs */ + uint8 map_id; /* map id */ + uint8 data[1]; /* Data pointing to one or more IEs */ +} BWL_POST_PACKED_STRUCT wifi_nan_container_attr_t; + +/* NAN 2.0 NAN avail attribute */ + +/* Availability Attribute */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_avail_attr_s { + uint8 id; /* id - 0x12 */ + uint16 len; /* total length */ + uint8 seqid; /* sequence id */ + uint16 ctrl; /* attribute control */ + uint8 entry[1]; /* availability entry list */ +} BWL_POST_PACKED_STRUCT wifi_nan_avail_attr_t; + +/* for processing/building time bitmap info in nan_avail_entry */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_time_bitmap_s { + uint16 ctrl; /* Time bitmap control */ + uint8 len; /* Time bitmap length */ + uint8 bitmap[]; /* Time bitmap */ +} BWL_POST_PACKED_STRUCT wifi_nan_time_bitmap_t; + +/* Availability Entry format */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_avail_entry_attr_s { + uint16 len; /* Length */ + uint16 entry_cntrl; /* Entry Control */ + uint8 var[]; /* Time bitmap and channel entry list */ +} BWL_POST_PACKED_STRUCT wifi_nan_avail_entry_attr_t; + +/* FAC Channel Entry (section 10.7.19.1.5) */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_chan_entry_s { + uint8 oper_class; /* Operating Class */ + uint16 chan_bitmap; /* Channel Bitmap */ + uint8 primary_chan_bmp; /* Primary Channel Bitmap */ + uint8 aux_chan[0]; /* Auxiliary Channel bitmap */ +} BWL_POST_PACKED_STRUCT wifi_nan_chan_entry_t; + +/* Channel entry */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_channel_entry_s { + uint8 opclass; /* Operating class */ + uint16 chan_bitmap; /* Channel bitmap */ + uint8 prim_bitmap; /* Primary channel bitmap */ + uint16 aux_bitmap; /* Time bitmap length */ +} BWL_POST_PACKED_STRUCT wifi_nan_channel_entry_t; + +/* Type of Availability: committed */ +#define NAN_ENTRY_CNTRL_TYPE_COMM_AVAIL_MASK 0x1 +/* Type of Availability: potential */ +#define NAN_ENTRY_CNTRL_TYPE_POTEN_AVAIL_MASK 0x2 +/* Type of Availability: conditional */ +#define NAN_ENTRY_CNTRL_TYPE_COND_AVAIL_MASK 0x4 + +#define NAN_AVAIL_CTRL_MAP_ID_MASK 0x000F +#define NAN_AVAIL_CTRL_MAP_ID(_ctrl) ((_ctrl) & NAN_AVAIL_CTRL_MAP_ID_MASK) +#define NAN_AVAIL_CTRL_COMM_CHANGED_MASK 0x0010 +#define NAN_AVAIL_CTRL_COMM_CHANGED(_ctrl) ((_ctrl) & NAN_AVAIL_CTRL_COMM_CHANGED_MASK) +#define NAN_AVAIL_CTRL_POTEN_CHANGED_MASK 0x0020 +#define NAN_AVAIL_CTRL_POTEN_CHANGED(_ctrl) ((_ctrl) & NAN_AVAIL_CTRL_POTEN_CHANGED_MASK) +#define NAN_AVAIL_CTRL_PUBLIC_CHANGED_MASK 0x0040 +#define NAN_AVAIL_CTRL_PUBLIC_CHANGED(_ctrl) ((_ctrl) & NAN_AVAIL_CTRL_PUBLIC_CHANGED_MASK) +#define NAN_AVAIL_CTRL_NDC_CHANGED_MASK 0x0080 +#define NAN_AVAIL_CTRL_NDC_CHANGED(_ctrl) ((_ctrl) & NAN_AVAIL_CTRL_NDC_CHANGED_MASK) +#define NAN_AVAIL_CTRL_MCAST_CHANGED_MASK 0x0100 +#define NAN_AVAIL_CTRL_MCAST_CHANGED(_ctrl) ((_ctrl) & NAN_AVAIL_CTRL_MCAST_CHANGED_MASK) +#define NAN_AVAIL_CTRL_MCAST_CHG_CHANGED_MASK 0x0200 +#define NAN_AVAIL_CTRL_MCAST_CHG_CHANGED(_ctrl) ((_ctrl) & NAN_AVAIL_CTRL_MCAST_CHG_CHANGED_MASK) +#define NAN_AVAIL_CTRL_CHANGED_FLAGS_MASK 0x03f0 + +#define NAN_AVAIL_ENTRY_CTRL_AVAIL_TYPE_MASK 0x07 +#define NAN_AVAIL_ENTRY_CTRL_AVAIL_TYPE(_flags) ((_flags) & NAN_AVAIL_ENTRY_CTRL_AVAIL_TYPE_MASK) +#define NAN_AVAIL_ENTRY_CTRL_USAGE_MASK 0x18 +#define NAN_AVAIL_ENTRY_CTRL_USAGE_SHIFT 3 +#define NAN_AVAIL_ENTRY_CTRL_USAGE(_flags) (((_flags) & NAN_AVAIL_ENTRY_CTRL_USAGE_MASK) \ + >> NAN_AVAIL_ENTRY_CTRL_USAGE_SHIFT) +#define NAN_AVAIL_ENTRY_CTRL_UTIL_MASK 0xE0 +#define NAN_AVAIL_ENTRY_CTRL_UTIL_SHIFT 5 +#define NAN_AVAIL_ENTRY_CTRL_UTIL(_flags) (((_flags) & NAN_AVAIL_ENTRY_CTRL_UTIL_MASK) \ + >> NAN_AVAIL_ENTRY_CTRL_UTIL_SHIFT) +#define NAN_AVAIL_ENTRY_CTRL_RX_NSS_MASK 0xF00 +#define NAN_AVAIL_ENTRY_CTRL_RX_NSS_SHIFT 8 +#define NAN_AVAIL_ENTRY_CTRL_RX_NSS(_flags) (((_flags) & NAN_AVAIL_ENTRY_CTRL_RX_NSS_MASK) \ + >> NAN_AVAIL_ENTRY_CTRL_RX_NSS_SHIFT) +#define NAN_AVAIL_ENTRY_CTRL_BITMAP_PRESENT_MASK 0x1000 +#define NAN_AVAIL_ENTRY_CTRL_BITMAP_PRESENT_SHIFT 12 +#define NAN_AVAIL_ENTRY_CTRL_BITMAP_PRESENT(_flags) (((_flags) & \ + NAN_AVAIL_ENTRY_CTRL_BITMAP_PRESENT_MASK) >> NAN_AVAIL_ENTRY_CTRL_BITMAP_PRESENT_SHIFT) + +#define NAN_TIME_BMAP_CTRL_BITDUR_MASK 0x07 +#define NAN_TIME_BMAP_CTRL_BITDUR(_flags) ((_flags) & NAN_TIME_BMAP_CTRL_BITDUR_MASK) +#define NAN_TIME_BMAP_CTRL_PERIOD_MASK 0x38 +#define NAN_TIME_BMAP_CTRL_PERIOD_SHIFT 3 +#define NAN_TIME_BMAP_CTRL_PERIOD(_flags) (((_flags) & NAN_TIME_BMAP_CTRL_PERIOD_MASK) \ + >> NAN_TIME_BMAP_CTRL_PERIOD_SHIFT) +#define NAN_TIME_BMAP_CTRL_OFFSET_MASK 0x7FC0 +#define NAN_TIME_BMAP_CTRL_OFFSET_SHIFT 6 +#define NAN_TIME_BMAP_CTRL_OFFSET(_flags) (((_flags) & NAN_TIME_BMAP_CTRL_OFFSET_MASK) \ + >> NAN_TIME_BMAP_CTRL_OFFSET_SHIFT) +#define NAN_TIME_BMAP_LEN(avail_entry) \ + (*(uint8 *)(((wifi_nan_avail_entry_attr_t *)avail_entry)->var + 2)) + +#define NAN_AVAIL_CHAN_LIST_HDR_LEN 1 +#define NAN_AVAIL_CHAN_LIST_TYPE_BAND 0x00 +#define NAN_AVAIL_CHAN_LIST_TYPE_CHANNEL 0x01 +#define NAN_AVAIL_CHAN_LIST_NON_CONTIG_BW 0x02 +#define NAN_AVAIL_CHAN_LIST_NUM_ENTRIES_MASK 0xF0 +#define NAN_AVAIL_CHAN_LIST_NUM_ENTRIES_SHIFT 4 +#define NAN_AVAIL_CHAN_LIST_NUM_ENTRIES(_ctrl) (((_ctrl) & NAN_AVAIL_CHAN_LIST_NUM_ENTRIES_MASK) \ + >> NAN_AVAIL_CHAN_LIST_NUM_ENTRIES_SHIFT) + +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_channel_entry_list_s { + uint8 chan_info; + uint8 var[0]; +} BWL_POST_PACKED_STRUCT wifi_nan_channel_entry_list_t; + +/* define for chan_info */ +#define NAN_CHAN_OP_CLASS_MASK 0x01 +#define NAN_CHAN_NON_CONT_BW_MASK 0x02 +#define NAN_CHAN_RSVD_MASK 0x03 +#define NAN_CHAN_NUM_ENTRIES_MASK 0xF0 + +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_band_entry_s { + uint8 band[0]; +} BWL_POST_PACKED_STRUCT wifi_nan_band_entry_t; + +/* Type of Availability: committed */ +#define NAN_ENTRY_CNTRL_TYPE_COMM_AVAIL 0x1 +/* Type of Availability: potential */ +#define NAN_ENTRY_CNTRL_TYPE_POTEN_AVAIL 0x2 +/* Type of Availability: conditional */ +#define NAN_ENTRY_CNTRL_TYPE_COND_AVAIL 0x4 +/* Committed + Potential */ +#define NAN_ENTRY_CNTRL_TYPE_COMM_POTEN \ + (NAN_ENTRY_CNTRL_TYPE_COMM_AVAIL | NAN_ENTRY_CNTRL_TYPE_POTEN_AVAIL) +/* Conditional + Potential */ +#define NAN_ENTRY_CNTRL_TYPE_COND_POTEN \ + (NAN_ENTRY_CNTRL_TYPE_COND_AVAIL | NAN_ENTRY_CNTRL_TYPE_POTEN_AVAIL) + +/* Type of Availability */ +#define NAN_ENTRY_CNTRL_TYPE_OF_AVAIL_MASK 0x07 +#define NAN_ENTRY_CNTRL_TYPE_OF_AVAIL_SHIFT 0 +/* Usage Preference */ +#define NAN_ENTRY_CNTRL_USAGE_PREF_MASK 0x18 +#define NAN_ENTRY_CNTRL_USAGE_PREF_SHIFT 3 +/* Utilization */ +#define NAN_ENTRY_CNTRL_UTIL_MASK 0x1E0 +#define NAN_ENTRY_CNTRL_UTIL_SHIFT 5 + +/* Time Bitmap Control field (section 5.7.18.2.3) */ + +/* Reserved */ +#define NAN_TIME_BMP_CNTRL_RSVD_MASK 0x01 +#define NAN_TIME_BMP_CNTRL_RSVD_SHIFT 0 +/* Bitmap Len */ +#define NAN_TIME_BMP_CNTRL_BMP_LEN_MASK 0x7E +#define NAN_TIME_BMP_CNTRL_BMP_LEN_SHIFT 1 +/* Bit Duration */ +#define NAN_TIME_BMP_CNTRL_BIT_DUR_MASK 0x380 +#define NAN_TIME_BMP_CNTRL_BIT_DUR_SHIFT 7 +/* Bitmap Len */ +#define NAN_TIME_BMP_CNTRL_PERIOD_MASK 0x1C00 +#define NAN_TIME_BMP_CNTRL_PERIOD_SHIFT 10 +/* Start Offset */ +#define NAN_TIME_BMP_CNTRL_START_OFFSET_MASK 0x3FE000 +#define NAN_TIME_BMP_CNTRL_START_OFFSET_SHIFT 13 +/* Reserved */ +#define NAN_TIME_BMP_CNTRL_RESERVED_MASK 0xC00000 +#define NAN_TIME_BMP_CNTRL_RESERVED_SHIFT 22 + +/* Time Bitmap Control field: Period */ +typedef enum +{ + NAN_TIME_BMP_CTRL_PERIOD_128TU = 1, + NAN_TIME_BMP_CTRL_PERIOD_256TU = 2, + NAN_TIME_BMP_CTRL_PERIOD_512TU = 3, + NAN_TIME_BMP_CTRL_PERIOD_1024TU = 4, + NAN_TIME_BMP_CTRL_PERIOD_2048U = 5, + NAN_TIME_BMP_CTRL_PERIOD_4096U = 6, + NAN_TIME_BMP_CTRL_PERIOD_8192U = 7 +} nan_time_bmp_ctrl_repeat_interval_t; + +enum +{ + NAN_TIME_BMP_BIT_DUR_16TU_IDX = 0, + NAN_TIME_BMP_BIT_DUR_32TU_IDX = 1, + NAN_TIME_BMP_BIT_DUR_64TU_IDX = 2, + NAN_TIME_BMP_BIT_DUR_128TU_IDX = 3 +}; + +enum +{ + NAN_TIME_BMP_BIT_DUR_IDX_0 = 16, + NAN_TIME_BMP_BIT_DUR_IDX_1 = 32, + NAN_TIME_BMP_BIT_DUR_IDX_2 = 64, + NAN_TIME_BMP_BIT_DUR_IDX_3 = 128 +}; + +enum +{ + NAN_TIME_BMP_CTRL_PERIOD_IDX_1 = 128, + NAN_TIME_BMP_CTRL_PERIOD_IDX_2 = 256, + NAN_TIME_BMP_CTRL_PERIOD_IDX_3 = 512, + NAN_TIME_BMP_CTRL_PERIOD_IDX_4 = 1024, + NAN_TIME_BMP_CTRL_PERIOD_IDX_5 = 2048, + NAN_TIME_BMP_CTRL_PERIOD_IDX_6 = 4096, + NAN_TIME_BMP_CTRL_PERIOD_IDX_7 = 8192 +}; + +/* Channel Entries List field */ + +/* Type */ +#define NAN_CHAN_ENTRY_TYPE_MASK 0x01 +#define NAN_CHAN_ENTRY_TYPE_SHIFT 0 +/* Channel Entry Length Indication */ +#define NAN_CHAN_ENTRY_LEN_IND_MASK 0x02 +#define NAN_CHAN_ENTRY_LEN_IND_SHIFT 1 +/* Reserved */ +#define NAN_CHAN_ENTRY_RESERVED_MASK 0x0C +#define NAN_CHAN_ENTRY_RESERVED_SHIFT 2 +/* Number of FAC Band or Channel Entries */ +#define NAN_CHAN_ENTRY_NO_OF_CHAN_ENTRY_MASK 0xF0 +#define NAN_CHAN_ENTRY_NO_OF_CHAN_ENTRY_SHIFT 4 + +#define NAN_CHAN_ENTRY_TYPE_BANDS 0 +#define NAN_CHAN_ENTRY_TYPE_OPCLASS_CHANS 1 + +#define NAN_CHAN_ENTRY_BW_LT_80MHZ 0 +#define NAN_CHAN_ENTRY_BW_EQ_160MHZ 1 + +/* + * NDL Attribute WFA Tech. Spec ver 1.0.r12 (section 10.7.19.2) + */ +#define NDL_ATTR_IM_MAP_ID_LEN 1 +#define NDL_ATTR_IM_TIME_BMP_CTRL_LEN 2 +#define NDL_ATTR_IM_TIME_BMP_LEN_LEN 1 + +/* + * NDL Control field - Table xx + */ +#define NDL_ATTR_CTRL_PEER_ID_PRESENT_MASK 0x01 +#define NDL_ATTR_CTRL_PEER_ID_PRESENT_SHIFT 0 +#define NDL_ATTR_CTRL_IM_SCHED_PRESENT_MASK 0x02 +#define NDL_ATTR_CTRL_IM_SCHED_PRESENT_SHIFT 1 +#define NDL_ATTR_CTRL_NDC_ATTR_PRESENT_MASK 0x04 +#define NDL_ATTR_CTRL_NDC_ATTR_PRESENT_SHIFT 2 +#define NDL_ATTR_CTRL_QOS_ATTR_PRESENT_MASK 0x08 +#define NDL_ATTR_CTRL_QOS_ATTR_PRESENT_SHIFT 3 +#define NDL_ATTR_CTRL_MAX_IDLE_PER_PRESENT_MASK 0x10 /* max idle period */ +#define NDL_ATTR_CTRL_MAX_IDLE_PER_PRESENT_SHIFT 4 +#define NDL_ATTR_CTRL_NDL_TYPE_MASK 0x20 /* NDL type */ +#define NDL_ATTR_CTRL_NDL_TYPE_SHIFT 5 +#define NDL_ATTR_CTRL_NDL_SETUP_REASON_MASK 0xC0 /* NDL Setup Reason */ +#define NDL_ATTR_CTRL_NDL_SETUP_REASON_SHIFT 6 + +/* NDL setup Reason */ +#define NDL_ATTR_CTRL_NDL_TYPE_S_NDL 0x0 /* S-NDL */ +#define NDL_ATTR_CTRL_NDL_TYPE_P_NDL 0x1 /* P-NDL */ + +/* NDL setup Reason */ +#define NDL_ATTR_CTRL_NDL_SETUP_REASON_NDP_RANG 0x0 /* NDP or Ranging */ +#define NDL_ATTR_CTRL_NDL_SETUP_REASON_FSD_GAS 0x1 /* FSD using GAS */ + +#define NAN_NDL_TYPE_MASK 0x0F +#define NDL_ATTR_TYPE_STATUS_REQUEST 0x00 +#define NDL_ATTR_TYPE_STATUS_RESPONSE 0x01 +#define NDL_ATTR_TYPE_STATUS_CONFIRM 0x02 +#define NDL_ATTR_TYPE_STATUS_CONTINUED 0x00 +#define NDL_ATTR_TYPE_STATUS_ACCEPTED 0x10 +#define NDL_ATTR_TYPE_STATUS_REJECTED 0x20 + +#define NAN_NDL_TYPE_CHECK(_ndl, x) (((_ndl)->type_status & NAN_NDL_TYPE_MASK) == (x)) +#define NAN_NDL_REQUEST(_ndl) (((_ndl)->type_status & NAN_NDL_TYPE_MASK) == \ + NDL_ATTR_TYPE_STATUS_REQUEST) +#define NAN_NDL_RESPONSE(_ndl) (((_ndl)->type_status & NAN_NDL_TYPE_MASK) == \ + NDL_ATTR_TYPE_STATUS_RESPONSE) +#define NAN_NDL_CONFIRM(_ndl) (((_ndl)->type_status & NAN_NDL_TYPE_MASK) == \ + NDL_ATTR_TYPE_STATUS_CONFIRM) + +#define NAN_NDL_STATUS_SHIFT 4 +#define NAN_NDL_STATUS_MASK 0xF0 +#define NAN_NDL_CONT(_ndl) (((_ndl)->type_status & NAN_NDL_STATUS_MASK) == \ + NDL_ATTR_TYPE_STATUS_CONTINUED) +#define NAN_NDL_ACCEPT(_ndl) (((_ndl)->type_status & NAN_NDL_STATUS_MASK) == \ + NDL_ATTR_TYPE_STATUS_ACCEPTED) +#define NAN_NDL_REJECT(_ndl) (((_ndl)->type_status & NAN_NDL_STATUS_MASK) == \ + NDL_ATTR_TYPE_STATUS_REJECTED) + +#define NDL_ATTR_CTRL_NONE 0 +#define NDL_ATTR_CTRL_PEER_ID_PRESENT (1 << NDL_ATTR_CTRL_PEER_ID_PRESENT_SHIFT) +#define NDL_ATTR_CTRL_IMSCHED_PRESENT (1 << NDL_ATTR_CTRL_IM_SCHED_PRESENT_SHIFT) +#define NDL_ATTR_CTRL_NDC_PRESENT (1 << NDL_ATTR_CTRL_NDC_ATTR_PRESENT_SHIFT) +#define NDL_ATTR_CTRL_NDL_QOS_PRESENT (1 << NDL_ATTR_CTRL_QOS_ATTR_PRESENT_SHIFT) +#define NDL_ATTR_CTRL_MAX_IDLE_PER_PRESENT (1 << NDL_ATTR_CTRL_MAX_IDLE_PER_PRESENT_SHIFT) + +#define NA_NDL_IS_IMMUT_PRESENT(ndl) (((ndl)->ndl_ctrl) & NDL_ATTR_CTRL_IMSCHED_PRESENT) +#define NA_NDL_IS_PEER_ID_PRESENT(ndl) (((ndl)->ndl_ctrl) & NDL_ATTR_CTRL_PEER_ID_PRESENT) +#define NA_NDL_IS_MAX_IDLE_PER_PRESENT(ndl) (((ndl)->ndl_ctrl) & NDL_ATTR_CTRL_MAX_IDLE_PER_PRESENT) + +#define NDL_ATTR_PEERID_LEN 1 +#define NDL_ATTR_MAX_IDLE_PERIOD_LEN 2 + +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_ndl_attr_s { + uint8 id; /* NAN_ATTR_NAN_NDL = 0x17 */ + uint16 len; /* Length of the fields in the attribute */ + uint8 dialog_token; /* Identify req and resp */ + uint8 type_status; /* Bits[3-0] type subfield, Bits[7-4] status subfield */ + uint8 reason; /* Identifies reject reason */ + uint8 ndl_ctrl; /* NDL control field */ + uint8 var[]; /* Optional fields follow */ +} BWL_POST_PACKED_STRUCT wifi_nan_ndl_attr_t; + +/* + * NDL QoS Attribute WFA Tech. Spec ver r26 + */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_ndl_qos_attr_s { + uint8 id; /* NAN_ATTR_NAN_NDL_QOS = 24 */ + uint16 len; /* Length of the attribute field following */ + uint8 min_slots; /* Min. number of FAW slots needed per DW interval */ + uint16 max_latency; /* Max interval between non-cont FAW */ +} BWL_POST_PACKED_STRUCT wifi_nan_ndl_qos_attr_t; + +/* no preference to min time slots */ +#define NAN_NDL_QOS_MIN_SLOT_NO_PREF 0 +/* no preference to no. of slots between two non-contiguous slots */ +#define NAN_NDL_QOS_MAX_LAT_NO_PREF 0xFFFF + +/* Device Capability Attribute */ + +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_dev_cap_s { + uint8 id; /* 0x0F */ + uint16 len; /* Length */ + uint8 map_id; /* map id */ + uint16 commit_dw_info; /* Committed DW Info */ + uint8 bands_supported; /* Supported Bands */ + uint8 op_mode; /* Operation Mode */ + uint8 num_antennas; /* Bit 0-3 tx, 4-7 rx */ + uint16 chan_switch_time; /* Max channel switch time in us */ + uint8 capabilities; /* DFS Master, Extended key id etc */ +} BWL_POST_PACKED_STRUCT wifi_nan_dev_cap_t; + +/* map id related */ + +/* all maps */ +#define NAN_DEV_CAP_ALL_MAPS_FLAG_MASK 0x1 /* nan default map control */ +#define NAN_DEV_CAP_ALL_MAPS_FLAG_SHIFT 0 +/* map id */ +#define NAN_DEV_CAP_MAPID_MASK 0x1E +#define NAN_DEV_CAP_MAPID_SHIFT 1 + +/* Awake DW Info field format */ + +/* 2.4GHz DW */ +#define NAN_DEV_CAP_AWAKE_DW_2G_MASK 0x07 +/* 5GHz DW */ +#define NAN_DEV_CAP_AWAKE_DW_5G_MASK 0x38 +/* Reserved */ +#define NAN_DEV_CAP_AWAKE_DW_RSVD_MASK 0xC0 + +/* bit shift for dev cap */ +#define NAN_DEV_CAP_AWAKE_DW_2G_SHIFT 0 +#define NAN_DEV_CAP_AWAKE_DW_5G_SHIFT 3 + +/* Device Capability Attribute Format */ + +/* Committed DW Info field format */ +/* 2.4GHz DW */ +#define NAN_DEV_CAP_COMMIT_DW_2G_MASK 0x07 +#define NAN_DEV_CAP_COMMIT_DW_2G_OVERWRITE_MASK 0x3C0 +/* 5GHz DW */ +#define NAN_DEV_CAP_COMMIT_DW_5G_MASK 0x38 +#define NAN_DEV_CAP_COMMIT_DW_5G_OVERWRITE_MASK 0x3C00 +/* Reserved */ +#define NAN_DEV_CAP_COMMIT_DW_RSVD_MASK 0xC000 +/* Committed DW bit shift for dev cap */ +#define NAN_DEV_CAP_COMMIT_DW_2G_SHIFT 0 +#define NAN_DEV_CAP_COMMIT_DW_5G_SHIFT 3 +#define NAN_DEV_CAP_COMMIT_DW_2G_OVERWRITE_SHIFT 6 +#define NAN_DEV_CAP_COMMIT_DW_5G_OVERWRITE_SHIFT 10 +/* Operation Mode */ +#define NAN_DEV_CAP_OP_PHY_MODE_HT_ONLY 0x00 +#define NAN_DEV_CAP_OP_PHY_MODE_VHT 0x01 +#define NAN_DEV_CAP_OP_PHY_MODE_VHT_8080 0x02 +#define NAN_DEV_CAP_OP_PHY_MODE_VHT_160 0x04 +#define NAN_DEV_CAP_OP_PAGING_NDL 0x08 + +#define NAN_DEV_CAP_OP_MODE_VHT_MASK 0x01 +#define NAN_DEV_CAP_OP_MODE_VHT_SHIFT 0 +#define NAN_DEV_CAP_OP_MODE_VHT8080_MASK 0x02 +#define NAN_DEV_CAP_OP_MODE_VHT8080_SHIFT 1 +#define NAN_DEV_CAP_OP_MODE_VHT160_MASK 0x04 +#define NAN_DEV_CAP_OP_MODE_VHT160_SHIFT 2 +#define NAN_DEV_CAP_OP_MODE_PAGING_NDL_MASK 0x08 +#define NAN_DEV_CAP_OP_MODE_PAGING_NDL_SHIFT 3 + +#define NAN_DEV_CAP_RX_ANT_SHIFT 4 +#define NAN_DEV_CAP_TX_ANT_MASK 0x0F +#define NAN_DEV_CAP_RX_ANT_MASK 0xF0 +#define NAN_DEV_CAP_TX_ANT(_ant) ((_ant) & NAN_DEV_CAP_TX_ANT_MASK) +#define NAN_DEV_CAP_RX_ANT(_ant) (((_ant) & NAN_DEV_CAP_RX_ANT_MASK) \ + >> NAN_DEV_CAP_RX_ANT_SHIFT) + +/* Device capabilities */ + +/* DFS master capability */ +#define NAN_DEV_CAP_DFS_MASTER_MASK 0x01 +#define NAN_DEV_CAP_DFS_MASTER_SHIFT 0 +/* extended iv cap */ +#define NAN_DEV_CAP_EXT_KEYID_MASK 0x02 +#define NAN_DEV_CAP_EXT_KEYID_SHIFT 1 +/* NDPE attribute support */ +#define NAN_DEV_CAP_NDPE_ATTR_SUPPORT_MASK 0x08 +#define NAN_DEV_CAP_NDPE_ATTR_SUPPORT(_cap) ((_cap) & NAN_DEV_CAP_NDPE_ATTR_SUPPORT_MASK) + +/* Band IDs */ +enum { + NAN_BAND_ID_TVWS = 0, + NAN_BAND_ID_SIG = 1, /* Sub 1 GHz */ + NAN_BAND_ID_2G = 2, /* 2.4 GHz */ + NAN_BAND_ID_3G = 3, /* 3.6 GHz */ + NAN_BAND_ID_5G = 4, /* 4.9 & 5 GHz */ + NAN_BAND_ID_60G = 5 +}; +typedef uint8 nan_band_id_t; + +/* NAN supported band in device capability */ +#define NAN_DEV_CAP_SUPPORTED_BANDS_2G (1 << NAN_BAND_ID_2G) +#define NAN_DEV_CAP_SUPPORTED_BANDS_5G (1 << NAN_BAND_ID_5G) + +/* + * Unaligned schedule attribute section 10.7.19.6 spec. ver r15 + */ +#define NAN_ULW_ATTR_CTRL_SCHED_ID_MASK 0x000F +#define NAN_ULW_ATTR_CTRL_SCHED_ID_SHIFT 0 +#define NAN_ULW_ATTR_CTRL_SEQ_ID_MASK 0xFF00 +#define NAN_ULW_ATTR_CTRL_SEQ_ID_SHIFT 8 + +#define NAN_ULW_OVWR_ALL_MASK 0x01 +#define NAN_ULW_OVWR_ALL_SHIFT 0 +#define NAN_ULW_OVWR_MAP_ID_MASK 0x1E +#define NAN_ULW_OVWR_MAP_ID_SHIFT 1 + +#define NAN_ULW_CTRL_TYPE_MASK 0x03 +#define NAN_ULW_CTRL_TYPE_SHIFT 0 +#define NAN_ULW_CTRL_TYPE(ctrl) (ctrl & NAN_ULW_CTRL_TYPE_MASK) +#define NAN_ULW_CTRL_CHAN_AVAIL_MASK 0x04 +#define NAN_ULW_CTRL_CHAN_AVAIL_SHIFT 2 +#define NAN_ULW_CTRL_CHAN_AVAIL(ctrl) ((ctrl & NAN_ULW_CTRL_CHAN_AVAIL_MASK) \ + >> NAN_ULW_CTRL_CHAN_AVAIL_SHIFT) +#define NAN_ULW_CTRL_RX_NSS_MASK 0x78 +#define NAN_ULW_CTRL_RX_NSS_SHIFT 3 + +#define NAN_ULW_CTRL_TYPE_BAND 0 +#define NAN_ULW_CTRL_TYPE_CHAN_NOAUX 1 +#define NAN_ULW_CTRL_TYPE_CHAN_AUX 2 + +#define NAN_ULW_CNT_DOWN_NO_EXPIRE 0xFF /* ULWs doen't end until next sched update */ +#define NAN_ULW_CNT_DOWN_CANCEL 0x0 /* cancel remaining ulws */ + +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_ulw_attr_s { + uint8 id; + uint16 len; + uint16 ctrl; + uint32 start; /* low 32 bits of tsf */ + uint32 dur; + uint32 period; + uint8 count_down; + uint8 overwrite; + /* + * ulw[0] == optional field ULW control when present. + * band ID or channel follows + */ + uint8 ulw_entry[]; +} BWL_POST_PACKED_STRUCT wifi_nan_ulw_attr_t; + +/* NAN2 Management Frame (section 5.6) */ + +/* Public action frame for NAN2 */ +typedef BWL_PRE_PACKED_STRUCT struct nan2_pub_act_frame_s { + /* NAN_PUB_AF_CATEGORY 0x04 */ + uint8 category_id; + /* NAN_PUB_AF_ACTION 0x09 */ + uint8 action_field; + /* NAN_OUI 0x50-6F-9A */ + uint8 oui[DOT11_OUI_LEN]; + /* NAN_OUI_TYPE TBD */ + uint8 oui_type; + /* NAN_OUI_SUB_TYPE TBD */ + uint8 oui_sub_type; + /* One or more NAN Attributes follow */ + uint8 data[]; +} BWL_POST_PACKED_STRUCT nan2_pub_act_frame_t; + +#define NAN2_PUB_ACT_FRM_SIZE (OFFSETOF(nan2_pub_act_frame_t, data)) + +/* NAN Action Frame Subtypes */ +/* Subtype-0 is Reserved */ +#define NAN_MGMT_FRM_SUBTYPE_RESERVED 0 +#define NAN_MGMT_FRM_SUBTYPE_INVALID 0 +/* NAN Ranging Request */ +#define NAN_MGMT_FRM_SUBTYPE_RANGING_REQ 1 +/* NAN Ranging Response */ +#define NAN_MGMT_FRM_SUBTYPE_RANGING_RESP 2 +/* NAN Ranging Termination */ +#define NAN_MGMT_FRM_SUBTYPE_RANGING_TERM 3 +/* NAN Ranging Report */ +#define NAN_MGMT_FRM_SUBTYPE_RANGING_RPT 4 +/* NDP Request */ +#define NAN_MGMT_FRM_SUBTYPE_NDP_REQ 5 +/* NDP Response */ +#define NAN_MGMT_FRM_SUBTYPE_NDP_RESP 6 +/* NDP Confirm */ +#define NAN_MGMT_FRM_SUBTYPE_NDP_CONFIRM 7 +/* NDP Key Installment */ +#define NAN_MGMT_FRM_SUBTYPE_NDP_KEY_INST 8 +/* NDP Termination */ +#define NAN_MGMT_FRM_SUBTYPE_NDP_END 9 +/* Schedule Request */ +#define NAN_MGMT_FRM_SUBTYPE_SCHED_REQ 10 +/* Schedule Response */ +#define NAN_MGMT_FRM_SUBTYPE_SCHED_RESP 11 +/* Schedule Confirm */ +#define NAN_MGMT_FRM_SUBTYPE_SCHED_CONF 12 +/* Schedule Update */ +#define NAN_MGMT_FRM_SUBTYPE_SCHED_UPD 13 + +/* Reason code defines */ +#define NAN_REASON_RESERVED 0x0 +#define NAN_REASON_UNSPECIFIED 0x1 +#define NAN_REASON_RESOURCE_LIMIT 0x2 +#define NAN_REASON_INVALID_PARAMS 0x3 +#define NAN_REASON_FTM_PARAM_INCAP 0x4 +#define NAN_REASON_NO_MOVEMENT 0x5 +#define NAN_REASON_INVALID_AVAIL 0x6 +#define NAN_REASON_IMMUT_UNACCEPT 0x7 +#define NAN_REASON_SEC_POLICY 0x8 +#define NAN_REASON_QOS_UNACCEPT 0x9 +#define NAN_REASON_NDP_REJECT 0xa +#define NAN_REASON_NDL_UNACCEPTABLE 0xb + +/* nan 2.0 qos (not attribute) */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_ndp_qos_s { + uint8 tid; /* traffic identifier */ + uint16 pkt_size; /* service data pkt size */ + uint8 data_rate; /* mean data rate */ + uint8 svc_interval; /* max service interval */ +} BWL_POST_PACKED_STRUCT wifi_nan_ndp_qos_t; + +/* NDP control bitmap defines */ +#define NAN_NDP_CTRL_CONFIRM_REQUIRED 0x01 +#define NAN_NDP_CTRL_SECURTIY_PRESENT 0x04 +#define NAN_NDP_CTRL_PUB_ID_PRESENT 0x08 +#define NAN_NDP_CTRL_RESP_NDI_PRESENT 0x10 +#define NAN_NDP_CTRL_SPEC_INFO_PRESENT 0x20 +#define NAN_NDP_CTRL_RESERVED 0xA0 + +/* Used for both NDP Attribute and NDPE Attribute, since the structures are identical */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_ndp_attr_s { + uint8 id; /* NDP: 0x10, NDPE: 0x29 */ + uint16 len; /* length */ + uint8 dialog_token; /* dialog token */ + uint8 type_status; /* bits 0-3 type, 4-7 status */ + uint8 reason; /* reason code */ + struct ether_addr init_ndi; /* ndp initiator's data interface address */ + uint8 ndp_id; /* ndp identifier (created by initiator */ + uint8 control; /* ndp control field */ + uint8 var[]; /* Optional fields follow */ +} BWL_POST_PACKED_STRUCT wifi_nan_ndp_attr_t; +/* NDP attribute type and status macros */ +#define NAN_NDP_TYPE_MASK 0x0F +#define NAN_NDP_TYPE_REQUEST 0x0 +#define NAN_NDP_TYPE_RESPONSE 0x1 +#define NAN_NDP_TYPE_CONFIRM 0x2 +#define NAN_NDP_TYPE_SECURITY 0x3 +#define NAN_NDP_TYPE_TERMINATE 0x4 +#define NAN_NDP_REQUEST(_ndp) (((_ndp)->type_status & NAN_NDP_TYPE_MASK) == NAN_NDP_TYPE_REQUEST) +#define NAN_NDP_RESPONSE(_ndp) (((_ndp)->type_status & NAN_NDP_TYPE_MASK) == NAN_NDP_TYPE_RESPONSE) +#define NAN_NDP_CONFIRM(_ndp) (((_ndp)->type_status & NAN_NDP_TYPE_MASK) == NAN_NDP_TYPE_CONFIRM) +#define NAN_NDP_SECURITY_INST(_ndp) (((_ndp)->type_status & NAN_NDP_TYPE_MASK) == \ + NAN_NDP_TYPE_SECURITY) +#define NAN_NDP_TERMINATE(_ndp) (((_ndp)->type_status & NAN_NDP_TYPE_MASK) == \ + NAN_NDP_TYPE_TERMINATE) +#define NAN_NDP_STATUS_SHIFT 4 +#define NAN_NDP_STATUS_MASK 0xF0 +#define NAN_NDP_STATUS_CONT (0 << NAN_NDP_STATUS_SHIFT) +#define NAN_NDP_STATUS_ACCEPT (1 << NAN_NDP_STATUS_SHIFT) +#define NAN_NDP_STATUS_REJECT (2 << NAN_NDP_STATUS_SHIFT) +#define NAN_NDP_CONT(_ndp) (((_ndp)->type_status & NAN_NDP_STATUS_MASK) == NAN_NDP_STATUS_CONT) +#define NAN_NDP_ACCEPT(_ndp) (((_ndp)->type_status & NAN_NDP_STATUS_MASK) == \ + NAN_NDP_STATUS_ACCEPT) +#define NAN_NDP_REJECT(_ndp) (((_ndp)->type_status & NAN_NDP_STATUS_MASK) == \ + NAN_NDP_STATUS_REJECT) +/* NDP Setup Status */ +#define NAN_NDP_SETUP_STATUS_OK 1 +#define NAN_NDP_SETUP_STATUS_FAIL 0 +#define NAN_NDP_SETUP_STATUS_REJECT 2 + +/* Rng setup attribute type and status macros */ +#define NAN_RNG_TYPE_MASK 0x0F +#define NAN_RNG_TYPE_REQUEST 0x0 +#define NAN_RNG_TYPE_RESPONSE 0x1 +#define NAN_RNG_TYPE_TERMINATE 0x2 + +#define NAN_RNG_STATUS_SHIFT 4 +#define NAN_RNG_STATUS_MASK 0xF0 +#define NAN_RNG_STATUS_ACCEPT (0 << NAN_RNG_STATUS_SHIFT) +#define NAN_RNG_STATUS_REJECT (1 << NAN_RNG_STATUS_SHIFT) + +#define NAN_RNG_ACCEPT(_rsua) (((_rsua)->type_status & NAN_RNG_STATUS_MASK) == \ + NAN_RNG_STATUS_ACCEPT) +#define NAN_RNG_REJECT(_rsua) (((_rsua)->type_status & NAN_RNG_STATUS_MASK) == \ + NAN_RNG_STATUS_REJECT) + +/* schedule entry */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_sched_entry_s { + uint8 map_id; /* map id */ + uint16 tbmp_ctrl; /* time bitmap control */ + uint8 tbmp_len; /* time bitmap len */ + uint8 tbmp[]; /* time bitmap - Optional */ +} BWL_POST_PACKED_STRUCT wifi_nan_sched_entry_t; + +#define NAN_SCHED_ENTRY_MAPID_MASK 0x0F +#define NAN_SCHED_ENTRY_MIN_SIZE OFFSETOF(wifi_nan_sched_entry_t, tbmp) +#define NAN_SCHED_ENTRY_SIZE(_entry) (NAN_SCHED_ENTRY_MIN_SIZE + (_entry)->tbmp_len) + +/* for dev cap, element container etc. */ +#define NAN_DEV_ELE_MAPID_CTRL_MASK 0x1 +#define NAN_DEV_ELE_MAPID_CTRL_SHIFT 0 +#define NAN_DEV_ELE_MAPID_MASK 0x1E +#define NAN_DEV_ELE_MAPID_SHIFT 1 + +#define NAN_DEV_ELE_MAPID_CTRL_SET(_mapid_field, value) \ + do {(_mapid_field) &= ~NAN_DEV_ELE_MAPID_CTRL_MASK; \ + (_mapid_field) |= ((value << NAN_DEV_ELE_MAPID_CTRL_SHIFT) & \ + NAN_DEV_ELE_MAPID_CTRL_MASK); \ + } while (0); + +#define NAN_DEV_ELE_MAPID_CTRL_GET(_mapid_field) \ + (((_mapid_field) & NAN_DEV_ELE_MAPID_CTRL_MASK) >> \ + NAN_DEV_ELE_MAPID_CTRL_SHIFT) + +#define NAN_DEV_ELE_MAPID_SET(_mapid_field, value) \ + do {(_mapid_field) &= ~NAN_DEV_ELE_MAPID_MASK; \ + (_mapid_field) |= ((value << NAN_DEV_ELE_MAPID_SHIFT) & \ + NAN_DEV_ELE_MAPID_MASK); \ + } while (0); + +#define NAN_DEV_ELE_MAPID_GET(_mapid_field) \ + (((_mapid_field) & NAN_DEV_ELE_MAPID_MASK) >> \ + NAN_DEV_ELE_MAPID_SHIFT) + +/* schedule entry map id handling */ +#define NAN_SCHED_ENTRY_MAPID_MASK 0x0F +#define NAN_SCHED_ENTRY_MAPID_SHIFT 0 + +#define NAN_SCHED_ENTRY_MAPID_SET(_mapid_field, value) \ + do {(_mapid_field) &= ~NAN_SCHED_ENTRY_MAPID_MASK; \ + (_mapid_field) |= ((value << NAN_SCHED_ENTRY_MAPID_SHIFT) & \ + NAN_SCHED_ENTRY_MAPID_MASK); \ + } while (0); + +#define NAN_SCHED_ENTRY_MAPID_GET(_mapid_field) \ + (((_mapid_field) & NAN_SCHED_ENTRY_MAPID_MASK) >> \ + NAN_SCHED_ENTRY_MAPID_SHIFT) + +/* NDC attribute */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_ndc_attr_s { + uint8 id; + uint16 len; + uint8 ndc_id[NAN_DATA_NDC_ID_SIZE]; + uint8 attr_cntrl; + uint8 var[]; +} BWL_POST_PACKED_STRUCT wifi_nan_ndc_attr_t; + +/* Attribute control subfield of NDC attr */ +/* Proposed NDC */ +#define NAN_NDC_ATTR_PROPOSED_NDC_MASK 0x1 +#define NAN_NDC_ATTR_PROPOSED_NDC_SHIFT 0 + +/* get & set */ +#define NAN_NDC_GET_PROPOSED_FLAG(_attr) \ + (((_attr)->attr_cntrl & NAN_NDC_ATTR_PROPOSED_NDC_MASK) >> \ + NAN_NDC_ATTR_PROPOSED_NDC_SHIFT) +#define NAN_NDC_SET_PROPOSED_FLAG(_attr, value) \ + do {((_attr)->attr_cntrl &= ~NAN_NDC_ATTR_PROPOSED_NDC_MASK); \ + ((_attr)->attr_cntrl |= \ + (((value) << NAN_NDC_ATTR_PROPOSED_NDC_SHIFT) & NAN_NDC_ATTR_PROPOSED_NDC_MASK)); \ + } while (0) + +/* Service descriptor extension attribute */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_svc_desc_ext_attr_s { + /* Attribute ID - 0x11 */ + uint8 id; + /* Length of the following fields in the attribute */ + uint16 len; + /* Instance id of associated service descriptor attribute */ + uint8 instance_id; + /* SDE control field */ + uint16 control; + /* range limit, svc upd indicator etc. */ + uint8 var[]; +} BWL_POST_PACKED_STRUCT wifi_nan_svc_desc_ext_attr_t; + +#define NAN_SDE_ATTR_MIN_LEN OFFSETOF(wifi_nan_svc_desc_ext_attr_t, var) +#define NAN_SDE_ATTR_RANGE_LEN 4 +#define NAN_SDE_ATTR_SUI_LEN 1 +#define NAN_SDE_ATTR_INFO_LEN_PARAM_LEN 2 +#define NAN_SDE_ATTR_RANGE_INGRESS_LEN 2 +#define NAN_SDE_ATTR_RANGE_EGRESS_LEN 2 +#define NAN_SDE_ATTR_CTRL_LEN 2 +/* max length of variable length field (matching filter, service response filter, + * or service info) in service descriptor attribute + */ +#define NAN_DISC_SDA_FIELD_MAX_LEN 255 + +/* SDEA control field bit definitions and access macros */ +#define NAN_SDE_CF_FSD_REQUIRED (1 << 0) +#define NAN_SDE_CF_FSD_GAS (1 << 1) +#define NAN_SDE_CF_DP_REQUIRED (1 << 2) +#define NAN_SDE_CF_DP_TYPE (1 << 3) +#define NAN_SDE_CF_MULTICAST_TYPE (1 << 4) +#define NAN_SDE_CF_QOS_REQUIRED (1 << 5) +#define NAN_SDE_CF_SECURITY_REQUIRED (1 << 6) +#define NAN_SDE_CF_RANGING_REQUIRED (1 << 7) +#define NAN_SDE_CF_RANGE_PRESENT (1 << 8) +#define NAN_SDE_CF_SVC_UPD_IND_PRESENT (1 << 9) +/* Using Reserved Bits as per Spec */ +#define NAN_SDE_CF_LIFE_CNT_PUB_RX (1 << 15) +#define NAN_SDE_FSD_REQUIRED(_sde) ((_sde)->control & NAN_SDE_CF_FSD_REQUIRED) +#define NAN_SDE_FSD_GAS(_sde) ((_sde)->control & NAN_SDE_CF_FSD_GAS) +#define NAN_SDE_DP_REQUIRED(_sde) ((_sde)->control & NAN_SDE_CF_DP_REQUIRED) +#define NAN_SDE_DP_MULTICAST(_sde) ((_sde)->control & NAN_SDE_CF_DP_TYPE) +#define NAN_SDE_MULTICAST_M_TO_M(_sde) ((_sde)->control & NAN_SDE_CF_MULTICAST_TYPE) +#define NAN_SDE_QOS_REQUIRED(_sde) ((_sde)->control & NAN_SDE_CF_QOS_REQUIRED) +#define NAN_SDE_SECURITY_REQUIRED(_sde) ((_sde)->control & NAN_SDE_CF_SECURITY_REQUIRED) +#define NAN_SDE_RANGING_REQUIRED(_sde) ((_sde)->control & NAN_SDE_CF_RANGING_REQUIRED) +#define NAN_SDE_RANGE_PRESENT(_sde) ((_sde)->control & NAN_SDE_CF_RANGE_PRESENT) +#define NAN_SDE_SVC_UPD_IND_PRESENT(_sde) ((_sde)->control & NAN_SDE_CF_SVC_UPD_IND_PRESENT) +#define NAN_SDE_LIFE_COUNT_FOR_PUB_RX(_sde) (_sde & NAN_SDE_CF_LIFE_CNT_PUB_RX) + +/* nan2 security */ + +/* + * Cipher suite information Attribute. + * WFA Tech. Spec ver 1.0.r21 (section 10.7.24.2) + */ +#define NAN_SEC_CIPHER_SUITE_CAP_REPLAY_4 0 +#define NAN_SEC_CIPHER_SUITE_CAP_REPLAY_16 (1 << 0) + +/* enum security algo. +*/ +enum nan_sec_csid { + NAN_SEC_ALGO_NONE = 0, + NAN_SEC_ALGO_NCS_SK_CCM_128 = 1, /* CCMP 128 */ + NAN_SEC_ALGO_NCS_SK_GCM_256 = 2, /* GCMP 256 */ + NAN_SEC_ALGO_LAST = 3 +}; +typedef int8 nan_sec_csid_e; + +/* nan2 cipher suite attribute field */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_sec_cipher_suite_field_s { + uint8 cipher_suite_id; + uint8 inst_id; /* Instance Id */ +} BWL_POST_PACKED_STRUCT wifi_nan_sec_cipher_suite_field_t; + +/* nan2 cipher suite information attribute field */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_sec_cipher_suite_info_attr_s { + uint8 attr_id; /* 0x22 - NAN_ATTR_CIPHER_SUITE_INFO */ + uint16 len; + uint8 capabilities; + uint8 var[]; /* cipher suite list */ +} BWL_POST_PACKED_STRUCT wifi_nan_sec_cipher_suite_info_attr_t; + +/* + * Security context identifier attribute + * WFA Tech. Spec ver 1.0.r21 (section 10.7.24.4) + */ + +#define NAN_SEC_CTX_ID_TYPE_PMKID (1 << 0) + +/* nan2 security context identifier attribute field */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_sec_ctx_id_field_s { + uint16 sec_ctx_id_type_len; /* length of security ctx identifier */ + uint8 sec_ctx_id_type; + uint8 inst_id; /* Instance Id */ + uint8 var[]; /* security ctx identifier */ +} BWL_POST_PACKED_STRUCT wifi_nan_sec_ctx_id_field_t; + +/* nan2 security context identifier info attribute field */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_sec_ctx_id_info_attr_s { + uint8 attr_id; /* 0x23 - NAN_ATTR_SEC_CTX_ID_INFO */ + uint16 len; + uint8 var[]; /* security context identifier list */ +} BWL_POST_PACKED_STRUCT wifi_nan_sec_ctx_id_info_attr_t; + +/* + * Nan shared key descriptor attribute + * WFA Tech. Spec ver 23 + */ + +#define NAN_SEC_NCSSK_DESC_REPLAY_CNT_LEN 8 +#define NAN_SEC_NCSSK_DESC_KEY_NONCE_LEN 32 + +/* nan shared key descriptor attr field */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_sec_ncssk_key_desc_attr_s { + uint8 attr_id; /* 0x24 - NAN_ATTR_SHARED_KEY_DESC */ + uint16 len; + uint8 inst_id; /* Publish service instance ID */ + uint8 desc_type; + uint16 key_info; + uint16 key_len; + uint8 key_replay_cntr[NAN_SEC_NCSSK_DESC_REPLAY_CNT_LEN]; + uint8 key_nonce[NAN_SEC_NCSSK_DESC_KEY_NONCE_LEN]; + uint8 reserved[32]; /* EAPOL IV + Key RSC + Rsvd fields in EAPOL Key */ + uint8 mic[]; /* mic + key data len + key data */ +} BWL_POST_PACKED_STRUCT wifi_nan_sec_ncssk_key_desc_attr_t; + +/* Key Info fields */ +#define NAN_SEC_NCSSK_DESC_MASK 0x7 +#define NAN_SEC_NCSSK_DESC_SHIFT 0 +#define NAN_SEC_NCSSK_DESC_KEY_TYPE_MASK 0x8 +#define NAN_SEC_NCSSK_DESC_KEY_TYPE_SHIFT 3 +#define NAN_SEC_NCSSK_DESC_KEY_INSTALL_MASK 0x40 +#define NAN_SEC_NCSSK_DESC_KEY_INSTALL_SHIFT 6 +#define NAN_SEC_NCSSK_DESC_KEY_ACK_MASK 0x80 +#define NAN_SEC_NCSSK_DESC_KEY_ACK_SHIFT 7 +#define NAN_SEC_NCSSK_DESC_KEY_MIC_MASK 0x100 +#define NAN_SEC_NCSSK_DESC_KEY_MIC_SHIFT 8 +#define NAN_SEC_NCSSK_DESC_KEY_SEC_MASK 0x200 +#define NAN_SEC_NCSSK_DESC_KEY_SEC_SHIFT 9 +#define NAN_SEC_NCSSK_DESC_KEY_ERR_MASK 0x400 +#define NAN_SEC_NCSSK_DESC_KEY_ERR_SHIFT 10 +#define NAN_SEC_NCSSK_DESC_KEY_REQ_MASK 0x800 +#define NAN_SEC_NCSSK_DESC_KEY_REQ_SHIFT 11 +#define NAN_SEC_NCSSK_DESC_KEY_ENC_KEY_MASK 0x1000 +#define NAN_SEC_NCSSK_DESC_KEY_ENC_KEY_SHIFT 12 +#define NAN_SEC_NCSSK_DESC_KEY_SMK_MSG_MASK 0x2000 +#define NAN_SEC_NCSSK_DESC_KEY_SMK_MSG_SHIFT 13 + +/* Key Info get & set macros */ +#define NAN_SEC_NCSSK_KEY_DESC_VER_GET(_key_info) \ + (((_key_info) & NAN_SEC_NCSSK_DESC_MASK) >> NAN_SEC_NCSSK_DESC_SHIFT) +#define NAN_SEC_NCSSK_KEY_DESC_VER_SET(_val, _key_info) \ + do {(_key_info) &= ~NAN_SEC_NCSSK_DESC_MASK; \ + (_key_info) |= (((_val) << NAN_SEC_NCSSK_DESC_SHIFT) & \ + NAN_SEC_NCSSK_DESC_MASK);} while (0) +#define NAN_SEC_NCSSK_DESC_KEY_TYPE_GET(_key_info) \ + (((_key_info) & NAN_SEC_NCSSK_DESC_KEY_TYPE_MASK) >> NAN_SEC_NCSSK_DESC_KEY_TYPE_SHIFT) +#define NAN_SEC_NCSSK_DESC_KEY_TYPE_SET(_val, _key_info) \ + do {(_key_info) &= ~NAN_SEC_NCSSK_DESC_KEY_TYPE_MASK; \ + (_key_info) |= (((_val) << NAN_SEC_NCSSK_DESC_KEY_TYPE_SHIFT) & \ + NAN_SEC_NCSSK_DESC_KEY_TYPE_MASK);} while (0) +#define NAN_SEC_NCSSK_DESC_KEY_INSTALL_GET(_key_info) \ + (((_key_info) & NAN_SEC_NCSSK_DESC_KEY_INSTALL_MASK) >> \ + NAN_SEC_NCSSK_DESC_KEY_INSTALL_SHIFT) +#define NAN_SEC_NCSSK_DESC_KEY_INSTALL_SET(_val, _key_info) \ + do {(_key_info) &= ~NAN_SEC_NCSSK_DESC_KEY_INSTALL_MASK; \ + (_key_info) |= (((_val) << NAN_SEC_NCSSK_DESC_KEY_INSTALL_SHIFT) & \ + NAN_SEC_NCSSK_DESC_KEY_INSTALL_MASK);} while (0) +#define NAN_SEC_NCSSK_DESC_KEY_ACK_GET(_key_info) \ + (((_key_info) & NAN_SEC_NCSSK_DESC_KEY_ACK_MASK) >> NAN_SEC_NCSSK_DESC_KEY_ACK_SHIFT) +#define NAN_SEC_NCSSK_DESC_KEY_ACK_SET(_val, _key_info) \ + do {(_key_info) &= ~NAN_SEC_NCSSK_DESC_KEY_ACK_MASK; \ + (_key_info) |= (((_val) << NAN_SEC_NCSSK_DESC_KEY_ACK_SHIFT) & \ + NAN_SEC_NCSSK_DESC_KEY_ACK_MASK);} while (0) +#define NAN_SEC_NCSSK_DESC_KEY_MIC_GET(_key_info) \ + (((_key_info) & NAN_SEC_NCSSK_DESC_KEY_MIC_MASK) >> NAN_SEC_NCSSK_DESC_KEY_MIC_SHIFT) +#define NAN_SEC_NCSSK_DESC_KEY_MIC_SET(_val, _key_info) \ + do {(_key_info) &= ~NAN_SEC_NCSSK_DESC_KEY_MIC_MASK; \ + (_key_info) |= (((_val) << NAN_SEC_NCSSK_DESC_KEY_MIC_SHIFT) & \ + NAN_SEC_NCSSK_DESC_KEY_MIC_MASK);} while (0) +#define NAN_SEC_NCSSK_DESC_KEY_SEC_GET(_key_info) \ + (((_key_info) & NAN_SEC_NCSSK_DESC_KEY_SEC_MASK) >> NAN_SEC_NCSSK_DESC_KEY_SEC_SHIFT) +#define NAN_SEC_NCSSK_DESC_KEY_SEC_SET(_val, _key_info) \ + do {(_key_info) &= ~NAN_SEC_NCSSK_DESC_KEY_SEC_MASK; \ + (_key_info) |= (((_val) << NAN_SEC_NCSSK_DESC_KEY_SEC_SHIFT) & \ + NAN_SEC_NCSSK_DESC_KEY_SEC_MASK);} while (0) +#define NAN_SEC_NCSSK_DESC_KEY_ERR_GET(_key_info) \ + (((_key_info) & NAN_SEC_NCSSK_DESC_KEY_ERR_MASK) >> NAN_SEC_NCSSK_DESC_KEY_ERR_SHIFT) +#define NAN_SEC_NCSSK_DESC_KEY_ERR_SET(_val, _key_info) \ + do {(_key_info) &= ~NAN_SEC_NCSSK_DESC_KEY_ERR_MASK; \ + (_key_info) |= (((_val) << NAN_SEC_NCSSK_DESC_KEY_ERR_SHIFT) & \ + NAN_SEC_NCSSK_DESC_KEY_ERR_MASK);} while (0) +#define NAN_SEC_NCSSK_DESC_KEY_REQ_GET(_key_info) \ + (((_key_info) & NAN_SEC_NCSSK_DESC_KEY_REQ_MASK) >> NAN_SEC_NCSSK_DESC_KEY_REQ_SHIFT) +#define NAN_SEC_NCSSK_DESC_KEY_REQ_SET(_val, _key_info) \ + do {(_key_info) &= ~NAN_SEC_NCSSK_DESC_KEY_REQ_MASK; \ + (_key_info) |= (((_val) << NAN_SEC_NCSSK_DESC_KEY_REQ_SHIFT) & \ + NAN_SEC_NCSSK_DESC_KEY_REQ_MASK);} while (0) +#define NAN_SEC_NCSSK_DESC_KEY_ENC_KEY_GET(_key_info) \ + (((_key_info) & NAN_SEC_NCSSK_DESC_KEY_ENC_KEY_MASK) >> \ + NAN_SEC_NCSSK_DESC_KEY_ENC_KEY_SHIFT) +#define NAN_SEC_NCSSK_DESC_KEY_ENC_KEY_SET(_val, _key_info) \ + do {(_key_info) &= ~NAN_SEC_NCSSK_DESC_KEY_ENC_KEY_MASK; \ + (_key_info) |= (((_val) << NAN_SEC_NCSSK_DESC_KEY_ENC_KEY_SHIFT) & \ + NAN_SEC_NCSSK_DESC_KEY_ENC_KEY_MASK);} while (0) +#define NAN_SEC_NCSSK_DESC_KEY_SMK_MSG_GET(_key_info) \ + (((_key_info) & NAN_SEC_NCSSK_DESC_KEY_SMK_MSG_MASK) >> \ + NAN_SEC_NCSSK_DESC_KEY_SMK_MSG_SHIFT) +#define NAN_SEC_NCSSK_DESC_KEY_SMK_MSG_SET(_val, _key_info) \ + do {(_key_info) &= ~NAN_SEC_NCSSK_DESC_KEY_SMK_MSG_MASK; \ + (_key_info) |= (((_val) << NAN_SEC_NCSSK_DESC_KEY_SMK_MSG_SHIFT) & \ + NAN_SEC_NCSSK_DESC_KEY_SMK_MSG_MASK);} while (0) + +#define NAN_SEC_NCSSK_IEEE80211_KDESC_TYPE 2 /* IEEE 802.11 Key Descriptor Type */ +#define NAN_SEC_NCSSK_KEY_DESC_VER 0 /* NCSSK-128/256 */ +#define NAN_SEC_NCSSK_KEY_TYPE_PAIRWISE 1 /* Pairwise */ +#define NAN_SEC_NCSSK_LIFETIME_KDE 7 /* Lifetime KDE type */ + +/* TODO include MTK related attributes */ + +/* NAN Multicast service group(NMSG) definitions */ +/* Length of NMSG_ID -- (NDI * 2^16 + pub_id * 2^8 + Random_factor) */ +#define NAN_NMSG_ID_LEN 8 + +#define NAN_NMSG_TYPE_MASK 0x0F +#define NMSG_ATTR_TYPE_STATUS_REQUEST 0x00 +#define NMSG_ATTR_TYPE_STATUS_RESPONSE 0x01 +#define NMSG_ATTR_TYPE_STATUS_CONFIRM 0x02 +#define NMSG_ATTR_TYPE_STATUS_SEC_INSTALL 0x03 +#define NMSG_ATTR_TYPE_STATUS_TERMINATE 0x04 +#define NMSG_ATTR_TYPE_STATUS_IMPLICIT_ENROL 0x05 + +#define NMSG_ATTR_TYPE_STATUS_CONTINUED 0x00 +#define NMSG_ATTR_TYPE_STATUS_ACCEPTED 0x10 +#define NMSG_ATTR_TYPE_STATUS_REJECTED 0x20 + +#define NMSG_CTRL_PUB_ID_PRESENT 0x0001 +#define NMSG_CTRL_NMSG_ID_PRESENT 0x0002 +#define NMSG_CTRL_SECURITY_PRESENT 0x0004 +#define NMSG_CTRL_MANY_TO_MANY_PRESENT 0x0008 +#define NMSG_CTRL_SVC_INFO_PRESENT 0x0010 + +/* NMSG attribute */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_nmsg_attr_s { + uint8 id; /* Attribute ID - 0x11 */ + uint16 len; /* Length including pubid, NMSGID and svc info */ + uint8 dialog_token; + uint8 type_status; /* Type and Status field byte */ + uint8 reason_code; + uint8 mc_id; /* Multicast id similar to NDPID */ + uint8 nmsg_ctrl; /* NMSG control field */ + /* Optional publish id, NMSGID and svc info are included in var[] */ + uint8 var[0]; +} BWL_POST_PACKED_STRUCT wifi_nan_nmsg_attr_t; + +#define NMSG_ATTR_MCAST_SCHED_MAP_ID_MASK 0x1E +#define NMSG_ATTR_MCAST_SCHED_MAP_ID_SHIFT 1 +#define NMSG_ATTR_MCAST_SCHED_TIME_MAP_MASK 0x20 +#define NMSG_ATTR_MCAST_SCHED_TIME_MAP_SHIFT 5 + +/* NAN Multicast Schedule atribute structure */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_mcast_sched_attr_s { + uint8 id; /* 0x16 */ + uint16 len; + uint8 nmsg_id[NAN_NMSG_ID_LEN]; + uint8 attr_cntrl; + uint8 sched_own[ETHER_ADDR_LEN]; + uint8 var[]; /* multicast sched entry list (schedule_entry_list) */ +} BWL_POST_PACKED_STRUCT wifi_nan_mcast_sched_attr_t; + +/* FAC Channel Entry (section 10.7.19.1.5) */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_fac_chan_entry_s { + uint8 oper_class; /* Operating Class */ + uint16 chan_bitmap; /* Channel Bitmap */ + uint8 primary_chan_bmp; /* Primary Channel Bitmap */ + uint16 aux_chan; /* Auxiliary Channel bitmap */ +} BWL_POST_PACKED_STRUCT wifi_nan_fac_chan_entry_t; + +/* TODO move this from nan.h */ +#define NAN_ALL_NAN_MGMT_FRAMES (NAN_FRM_SCHED_AF | \ + NAN_FRM_NDP_AF | NAN_FRM_NDL_AF | \ + NAN_FRM_DISC_BCN | NAN_FRM_SYNC_BCN | \ + NAN_FRM_SVC_DISC | NAN_FRM_RNG_REQ_AF | \ + NAN_FRM_RNG_RESP_AF | NAN_FRM_RNG_REPORT_AF | \ + NAN_FRM_RNG_TERM_AF) + +/* This marks the end of a packed structure section. */ +#include + +#endif /* _NAN_H_ */ diff --git a/bcmdhd.100.10.315.x/include/osl.h b/bcmdhd.100.10.315.x/include/osl.h new file mode 100644 index 0000000..7853c1b --- /dev/null +++ b/bcmdhd.100.10.315.x/include/osl.h @@ -0,0 +1,361 @@ +/* + * OS Abstraction Layer + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: osl.h 768071 2018-06-18 09:23:56Z $ + */ + +#ifndef _osl_h_ +#define _osl_h_ + +#include + +enum { + TAIL_BYTES_TYPE_FCS = 1, + TAIL_BYTES_TYPE_ICV = 2, + TAIL_BYTES_TYPE_MIC = 3 +}; + +#define OSL_PKTTAG_SZ 48 /* standard linux pkttag size is 48 bytes */ + +/* Drivers use PKTFREESETCB to register a callback function when a packet is freed by OSL */ +typedef void (*pktfree_cb_fn_t)(void *ctx, void *pkt, unsigned int status); + +/* Drivers use REGOPSSET() to register register read/write funcitons */ +typedef unsigned int (*osl_rreg_fn_t)(void *ctx, volatile void *reg, unsigned int size); +typedef void (*osl_wreg_fn_t)(void *ctx, volatile void *reg, unsigned int val, unsigned int size); + +#if defined(WL_UNITTEST) +#include +#else +#include +#include +#endif // endif + +#ifndef PKTDBG_TRACE +#define PKTDBG_TRACE(osh, pkt, bit) BCM_REFERENCE(osh) +#endif // endif + +#define PKTCTFMAP(osh, p) BCM_REFERENCE(osh) + +/* -------------------------------------------------------------------------- +** Register manipulation macros. +*/ + +#define SET_REG(osh, r, mask, val) W_REG((osh), (r), ((R_REG((osh), r) & ~(mask)) | (val))) + +#ifndef AND_REG +#define AND_REG(osh, r, v) W_REG(osh, (r), R_REG(osh, r) & (v)) +#endif /* !AND_REG */ + +#ifndef OR_REG +#define OR_REG(osh, r, v) W_REG(osh, (r), R_REG(osh, r) | (v)) +#endif /* !OR_REG */ + +#if !defined(OSL_SYSUPTIME) +#define OSL_SYSUPTIME() (0) +#define OSL_SYSUPTIME_NOT_DEFINED 1 +#endif /* !defined(OSL_SYSUPTIME) */ + +#if !defined(OSL_SYSUPTIME_US) +#define OSL_SYSUPTIME_US() (0) +#define OSL_SYSUPTIME_US_NOT_DEFINED 1 +#endif /* !defined(OSL_SYSUPTIME) */ + +#if defined(OSL_SYSUPTIME_NOT_DEFINED) && defined(OSL_SYSUPTIME_US_NOT_DEFINED) +#define OSL_SYSUPTIME_SUPPORT FALSE +#else +#define OSL_SYSUPTIME_SUPPORT TRUE +#endif /* OSL_SYSUPTIME */ + +#ifndef OSL_SYS_HALT +#define OSL_SYS_HALT() do {} while (0) +#endif // endif + +#ifndef DMB +#if defined(STB) +#define DMB() mb(); +#else /* STB */ +#define DMB() do {} while (0) +#endif /* STB */ +#endif /* DMB */ + +#ifndef OSL_MEM_AVAIL +#define OSL_MEM_AVAIL() (0xffffffff) +#endif // endif + +#ifndef OSL_OBFUSCATE_BUF +/* For security reasons printing pointers is not allowed. + * Some OSLs implement OSL_OBFUSCATE_BUF to OS specific obfuscate API. + * If OSL_OBFUSCATE_BUF() is not implemented in OSL, then default to + * printing the input pointer + */ +#define OSL_OBFUSCATE_BUF(x) (x) +#endif /* OSL_OBFUSCATE_BUF */ + +#if !defined(PKTC_DONGLE) + +#define PKTCGETATTR(skb) (0) +#define PKTCSETATTR(skb, f, p, b) BCM_REFERENCE(skb) +#define PKTCCLRATTR(skb) BCM_REFERENCE(skb) +#define PKTCCNT(skb) (1) +#define PKTCLEN(skb) PKTLEN(NULL, skb) +#define PKTCGETFLAGS(skb) (0) +#define PKTCSETFLAGS(skb, f) BCM_REFERENCE(skb) +#define PKTCCLRFLAGS(skb) BCM_REFERENCE(skb) +#define PKTCFLAGS(skb) (0) +#define PKTCSETCNT(skb, c) BCM_REFERENCE(skb) +#define PKTCINCRCNT(skb) BCM_REFERENCE(skb) +#define PKTCADDCNT(skb, c) BCM_REFERENCE(skb) +#define PKTCSETLEN(skb, l) BCM_REFERENCE(skb) +#define PKTCADDLEN(skb, l) BCM_REFERENCE(skb) +#define PKTCSETFLAG(skb, fb) BCM_REFERENCE(skb) +#define PKTCCLRFLAG(skb, fb) BCM_REFERENCE(skb) +#define PKTCLINK(skb) NULL +#define PKTSETCLINK(skb, x) BCM_REFERENCE(skb) +#define FOREACH_CHAINED_PKT(skb, nskb) \ + for ((nskb) = NULL; (skb) != NULL; (skb) = (nskb)) +#define PKTCFREE PKTFREE +#define PKTCENQTAIL(h, t, p) \ +do { \ + if ((t) == NULL) { \ + (h) = (t) = (p); \ + } \ +} while (0) +#endif // endif + +#ifndef PKTSETCHAINED +#define PKTSETCHAINED(osh, skb) BCM_REFERENCE(osh) +#endif // endif +#ifndef PKTCLRCHAINED +#define PKTCLRCHAINED(osh, skb) BCM_REFERENCE(osh) +#endif // endif +#ifndef PKTISCHAINED +#define PKTISCHAINED(skb) FALSE +#endif // endif + +/* Lbuf with fraglist */ +#ifndef PKTFRAGPKTID +#define PKTFRAGPKTID(osh, lb) (0) +#endif // endif +#ifndef PKTSETFRAGPKTID +#define PKTSETFRAGPKTID(osh, lb, id) BCM_REFERENCE(osh) +#endif // endif +#ifndef PKTFRAGTOTNUM +#define PKTFRAGTOTNUM(osh, lb) (0) +#endif // endif +#ifndef PKTSETFRAGTOTNUM +#define PKTSETFRAGTOTNUM(osh, lb, tot) BCM_REFERENCE(osh) +#endif // endif +#ifndef PKTFRAGTOTLEN +#define PKTFRAGTOTLEN(osh, lb) (0) +#endif // endif +#ifndef PKTSETFRAGTOTLEN +#define PKTSETFRAGTOTLEN(osh, lb, len) BCM_REFERENCE(osh) +#endif // endif +#ifndef PKTIFINDEX +#define PKTIFINDEX(osh, lb) (0) +#endif // endif +#ifndef PKTSETIFINDEX +#define PKTSETIFINDEX(osh, lb, idx) BCM_REFERENCE(osh) +#endif // endif +#ifndef PKTGETLF +#define PKTGETLF(osh, len, send, lbuf_type) (0) +#endif // endif + +/* in rx path, reuse totlen as used len */ +#ifndef PKTFRAGUSEDLEN +#define PKTFRAGUSEDLEN(osh, lb) (0) +#endif // endif +#ifndef PKTSETFRAGUSEDLEN +#define PKTSETFRAGUSEDLEN(osh, lb, len) BCM_REFERENCE(osh) +#endif // endif +#ifndef PKTFRAGLEN +#define PKTFRAGLEN(osh, lb, ix) (0) +#endif // endif +#ifndef PKTSETFRAGLEN +#define PKTSETFRAGLEN(osh, lb, ix, len) BCM_REFERENCE(osh) +#endif // endif +#ifndef PKTFRAGDATA_LO +#define PKTFRAGDATA_LO(osh, lb, ix) (0) +#endif // endif +#ifndef PKTSETFRAGDATA_LO +#define PKTSETFRAGDATA_LO(osh, lb, ix, addr) BCM_REFERENCE(osh) +#endif // endif +#ifndef PKTFRAGDATA_HI +#define PKTFRAGDATA_HI(osh, lb, ix) (0) +#endif // endif +#ifndef PKTSETFRAGDATA_HI +#define PKTSETFRAGDATA_HI(osh, lb, ix, addr) BCM_REFERENCE(osh) +#endif // endif + +/* RX FRAG */ +#ifndef PKTISRXFRAG +#define PKTISRXFRAG(osh, lb) (0) +#endif // endif +#ifndef PKTSETRXFRAG +#define PKTSETRXFRAG(osh, lb) BCM_REFERENCE(osh) +#endif // endif +#ifndef PKTRESETRXFRAG +#define PKTRESETRXFRAG(osh, lb) BCM_REFERENCE(osh) +#endif // endif + +/* TX FRAG */ +#ifndef PKTISTXFRAG +#define PKTISTXFRAG(osh, lb) (0) +#endif // endif +#ifndef PKTSETTXFRAG +#define PKTSETTXFRAG(osh, lb) BCM_REFERENCE(osh) +#endif // endif + +/* Need Rx completion used for AMPDU reordering */ +#ifndef PKTNEEDRXCPL +#define PKTNEEDRXCPL(osh, lb) (TRUE) +#endif // endif +#ifndef PKTSETNORXCPL +#define PKTSETNORXCPL(osh, lb) BCM_REFERENCE(osh) +#endif // endif +#ifndef PKTRESETNORXCPL +#define PKTRESETNORXCPL(osh, lb) BCM_REFERENCE(osh) +#endif // endif +#ifndef PKTISFRAG +#define PKTISFRAG(osh, lb) (0) +#endif // endif +#ifndef PKTFRAGISCHAINED +#define PKTFRAGISCHAINED(osh, i) (0) +#endif // endif +/* TRIM Tail bytes from lfrag */ +#ifndef PKTFRAG_TRIM_TAILBYTES +#define PKTFRAG_TRIM_TAILBYTES(osh, p, len, type) PKTSETLEN(osh, p, PKTLEN(osh, p) - len) +#endif // endif +#ifndef PKTISHDRCONVTD +#define PKTISHDRCONVTD(osh, lb) (0) +#endif // endif + +/* Forwarded pkt indication */ +#ifndef PKTISFRWDPKT +#define PKTISFRWDPKT(osh, lb) 0 +#endif // endif +#ifndef PKTSETFRWDPKT +#define PKTSETFRWDPKT(osh, lb) BCM_REFERENCE(osh) +#endif // endif +#ifndef PKTRESETFRWDPKT +#define PKTRESETFRWDPKT(osh, lb) BCM_REFERENCE(osh) +#endif // endif + +/* SFD Frame */ +#ifndef PKTISSFDFRAME +#define PKTISSFDFRAME(osh, lb) (0) +#endif // endif +#ifndef PKTSETSFDFRAME +#define PKTSETSFDFRAME(osh, lb) BCM_REFERENCE(osh) +#endif // endif +#ifndef PKTRESETSFDFRAME +#define PKTRESETSFDFRAME(osh, lb) BCM_REFERENCE(osh) +#endif // endif +#ifndef PKTISSFDTXC +#define PKTISSFDTXC(osh, lb) (0) +#endif // endif +#ifndef PKTSETSFDTXC +#define PKTSETSFDTXC(osh, lb) BCM_REFERENCE(osh) +#endif // endif +#ifndef PKTRESETSFDTXC +#define PKTRESETSFDTXC(osh, lb) BCM_REFERENCE(osh) +#endif // endif + +#ifdef BCM_SECURE_DMA +#define SECURE_DMA_ENAB(osh) (1) +#else + +#define SECURE_DMA_ENAB(osh) (0) +#ifndef BCMDMA64OSL +#define SECURE_DMA_MAP(osh, va, size, direction, p, dmah, pcma, offset) ((dmaaddr_t) ((0))) +#else +#define SECURE_DMA_MAP(osh, va, size, direction, p, dmah, pcma, offset) \ + ((dmaaddr_t) {.hiaddr = 0, .loaddr = 0}) +#endif // endif +#define SECURE_DMA_DD_MAP(osh, va, size, direction, p, dmah) 0 +#ifndef BCMDMA64OSL +#define SECURE_DMA_MAP_TXMETA(osh, va, size, direction, p, dmah, pcma) ((dmaaddr_t) ((0))) +#else +#define SECURE_DMA_MAP_TXMETA(osh, va, size, direction, p, dmah, pcma) \ + ((dmaaddr_t) {.hiaddr = 0, .loaddr = 0}) +#endif // endif +#define SECURE_DMA_UNMAP(osh, pa, size, direction, p, dmah, pcma, offset) +#define SECURE_DMA_UNMAP_ALL(osh, pcma) + +#endif /* BCMDMA64OSL */ + +#ifndef ROMMABLE_ASSERT +#define ROMMABLE_ASSERT(exp) ASSERT(exp) +#endif /* ROMMABLE_ASSERT */ + +#ifndef MALLOC_NOPERSIST + #define MALLOC_NOPERSIST MALLOC +#endif /* !MALLOC_NOPERSIST */ + +#ifndef MALLOC_PERSIST + #define MALLOC_PERSIST MALLOC +#endif /* !MALLOC_PERSIST */ + +#ifndef MALLOC_NOPERSIST + #define MALLOC_NOPERSIST MALLOC +#endif /* !MALLOC_NOPERSIST */ + +#ifndef MALLOC_PERSIST_ATTACH + #define MALLOC_PERSIST_ATTACH MALLOC +#endif /* !MALLOC_PERSIST_ATTACH */ + +#ifndef MALLOCZ_PERSIST_ATTACH + #define MALLOCZ_PERSIST_ATTACH MALLOCZ +#endif /* !MALLOCZ_PERSIST_ATTACH */ + +#ifndef MALLOCZ_NOPERSIST + #define MALLOCZ_NOPERSIST MALLOCZ +#endif /* !MALLOCZ_NOPERSIST */ + +#ifndef MALLOCZ_PERSIST + #define MALLOCZ_PERSIST MALLOCZ +#endif /* !MALLOCZ_PERSIST */ + +#ifndef MFREE_PERSIST + #define MFREE_PERSIST MFREE +#endif /* !MFREE_PERSIST */ + +#ifndef MALLOC_SET_NOPERSIST + #define MALLOC_SET_NOPERSIST(osh) do { } while (0) +#endif /* !MALLOC_SET_NOPERSIST */ + +#ifndef MALLOC_CLEAR_NOPERSIST + #define MALLOC_CLEAR_NOPERSIST(osh) do { } while (0) +#endif /* !MALLOC_CLEAR_NOPERSIST */ + +#if defined(OSL_MEMCHECK) +#define MEMCHECK(f, l) osl_memcheck(f, l) +#else +#define MEMCHECK(f, l) +#endif /* OSL_MEMCHECK */ + +#endif /* _osl_h_ */ diff --git a/bcmdhd.100.10.315.x/include/osl_decl.h b/bcmdhd.100.10.315.x/include/osl_decl.h new file mode 100644 index 0000000..8b487de --- /dev/null +++ b/bcmdhd.100.10.315.x/include/osl_decl.h @@ -0,0 +1,37 @@ +/* + * osl forward declarations + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: osl_decl.h 596126 2015-10-29 19:53:48Z $ + */ + +#ifndef _osl_decl_h_ +#define _osl_decl_h_ + +/* osl handle type forward declaration */ +typedef struct osl_info osl_t; +typedef struct osl_dmainfo osldma_t; +extern unsigned int lmtest; /* low memory test */ +#endif // endif diff --git a/bcmdhd.100.10.315.x/include/osl_ext.h b/bcmdhd.100.10.315.x/include/osl_ext.h new file mode 100644 index 0000000..024ca5c --- /dev/null +++ b/bcmdhd.100.10.315.x/include/osl_ext.h @@ -0,0 +1,765 @@ +/* + * OS Abstraction Layer Extension - the APIs defined by the "extension" API + * are only supported by a subset of all operating systems. + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: osl_ext.h 759145 2018-04-24 05:09:37Z $ + */ + +#ifndef _osl_ext_h_ +#define _osl_ext_h_ + +/* ---- Include Files ---------------------------------------------------- */ + +#if defined(TARGETOS_symbian) + #include + #include +#elif defined(THREADX) + #include +#else + #define OSL_EXT_DISABLED +#endif // endif + +/* Include base operating system abstraction. */ +#include + +#ifdef __cplusplus +extern "C" { +#endif // endif + +/* ---- Constants and Types ---------------------------------------------- */ + +/* ----------------------------------------------------------------------- + * Generic OS types. + */ +typedef enum osl_ext_status_t +{ + OSL_EXT_SUCCESS, + OSL_EXT_ERROR, + OSL_EXT_TIMEOUT + +} osl_ext_status_t; +#define OSL_EXT_STATUS_DECL(status) osl_ext_status_t status; + +#define OSL_EXT_TIME_FOREVER ((osl_ext_time_ms_t)(-1)) +typedef unsigned int osl_ext_time_ms_t; +typedef unsigned int osl_ext_time_us_t; + +typedef unsigned int osl_ext_event_bits_t; + +typedef unsigned int osl_ext_interrupt_state_t; + +/* ----------------------------------------------------------------------- + * Timers. + */ +typedef enum +{ + /* One-shot timer. */ + OSL_EXT_TIMER_MODE_ONCE, + + /* Periodic timer. */ + OSL_EXT_TIMER_MODE_REPEAT + +} osl_ext_timer_mode_t; + +/* User registered callback and parameter to invoke when timer expires. */ +typedef void* osl_ext_timer_arg_t; +typedef void (*osl_ext_timer_callback)(osl_ext_timer_arg_t arg); + +/* ----------------------------------------------------------------------- + * Tasks. + */ + +/* Task entry argument. */ +typedef void* osl_ext_task_arg_t; + +/* Task entry function. */ +typedef void (*osl_ext_task_entry)(osl_ext_task_arg_t arg); + +/* Abstract task priority levels. */ +typedef enum +{ + OSL_EXT_TASK_IDLE_PRIORITY, + OSL_EXT_TASK_LOW_PRIORITY, + OSL_EXT_TASK_LOW_NORMAL_PRIORITY, + OSL_EXT_TASK_NORMAL_PRIORITY, + OSL_EXT_TASK_HIGH_NORMAL_PRIORITY, + OSL_EXT_TASK_HIGHEST_PRIORITY, + OSL_EXT_TASK_TIME_CRITICAL_PRIORITY, + + /* This must be last. */ + OSL_EXT_TASK_NUM_PRIORITES +} osl_ext_task_priority_t; + +#ifndef OSL_EXT_DISABLED + +/* ---- Variable Externs ------------------------------------------------- */ +/* ---- Function Prototypes ---------------------------------------------- */ + +/* -------------------------------------------------------------------------- +** Semaphore +*/ + +/**************************************************************************** +* Function: osl_ext_sem_create +* +* Purpose: Creates a counting semaphore object, which can subsequently be +* used for thread notification. +* +* Parameters: name (in) Name to assign to the semaphore (must be unique). +* init_cnt (in) Initial count that the semaphore should have. +* sem (out) Newly created semaphore. +* +* Returns: OSL_EXT_SUCCESS if the semaphore was created successfully, or an +* error code if the semaphore could not be created. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_sem_create(char *name, int init_cnt, osl_ext_sem_t *sem); + +/**************************************************************************** +* Function: osl_ext_sem_delete +* +* Purpose: Destroys a previously created semaphore object. +* +* Parameters: sem (mod) Semaphore object to destroy. +* +* Returns: OSL_EXT_SUCCESS if the semaphore was deleted successfully, or an +* error code if the semaphore could not be created. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_sem_delete(osl_ext_sem_t *sem); + +/**************************************************************************** +* Function: osl_ext_sem_give +* +* Purpose: Increments the count associated with the semaphore. This will +* cause one thread blocked on a take to wake up. +* +* Parameters: sem (mod) Semaphore object to give. +* +* Returns: OSL_EXT_SUCCESS if the semaphore was given successfully, or an +* error code if the semaphore could not be created. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_sem_give(osl_ext_sem_t *sem); + +/**************************************************************************** +* Function: osl_ext_sem_take +* +* Purpose: Decrements the count associated with the semaphore. If the count +* is less than zero, then the calling task will become blocked until +* another thread does a give on the semaphore. This function will only +* block the calling thread for timeout_msec milliseconds, before +* returning with OSL_EXT_TIMEOUT. +* +* Parameters: sem (mod) Semaphore object to take. +* timeout_msec (in) Number of milliseconds to wait for the +* semaphore to enter a state where it can be +* taken. +* +* Returns: OSL_EXT_SUCCESS if the semaphore was taken successfully, or an +* error code if the semaphore could not be created. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_sem_take(osl_ext_sem_t *sem, osl_ext_time_ms_t timeout_msec); + +/* -------------------------------------------------------------------------- +** Mutex +*/ + +/**************************************************************************** +* Function: osl_ext_mutex_create +* +* Purpose: Creates a mutex object, which can subsequently be used to control +* mutually exclusion of resources. +* +* Parameters: name (in) Name to assign to the mutex (must be unique). +* mutex (out) Mutex object to initialize. +* +* Returns: OSL_EXT_SUCCESS if the mutex was created successfully, or an +* error code if the mutex could not be created. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_mutex_create(char *name, osl_ext_mutex_t *mutex); + +/**************************************************************************** +* Function: osl_ext_mutex_delete +* +* Purpose: Destroys a previously created mutex object. +* +* Parameters: mutex (mod) Mutex object to destroy. +* +* Returns: OSL_EXT_SUCCESS if the mutex was deleted successfully, or an +* error code if the mutex could not be created. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_mutex_delete(osl_ext_mutex_t *mutex); + +/**************************************************************************** +* Function: osl_ext_mutex_acquire +* +* Purpose: Acquires the indicated mutual exclusion object. If the object is +* currently acquired by another task, then this function will wait +* for timeout_msec milli-seconds before returning with OSL_EXT_TIMEOUT. +* +* Parameters: mutex (mod) Mutex object to acquire. +* timeout_msec (in) Number of milliseconds to wait for the mutex. +* +* Returns: OSL_EXT_SUCCESS if the mutex was acquired successfully, or an +* error code if the mutex could not be created. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_mutex_acquire(osl_ext_mutex_t *mutex, osl_ext_time_ms_t timeout_msec); + +/**************************************************************************** +* Function: osl_ext_mutex_release +* +* Purpose: Releases the indicated mutual exclusion object. This makes it +* available for another task to acquire. +* +* Parameters: mutex (mod) Mutex object to release. +* +* Returns: OSL_EXT_SUCCESS if the mutex was released successfully, or an +* error code if the mutex could not be created. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_mutex_release(osl_ext_mutex_t *mutex); + +/* -------------------------------------------------------------------------- +** Timers +*/ + +/**************************************************************************** +* Function: osl_ext_timer_create +* +* Purpose: Creates a timer object. +* +* Parameters: name (in) Name of timer. +* timeout_msec (in) Invoke callback after this number of milliseconds. +* mode (in) One-shot or periodic timer. +* func (in) Callback function to invoke on timer expiry. +* arg (in) Argument to callback function. +* timer (out) Timer object to create. +* +* Note: The function callback occurs in interrupt context. The application is +* required to provide context switch for the callback if required. +* +* Returns: OSL_EXT_SUCCESS if the timer was created successfully, or an +* error code if the timer could not be created. +***************************************************************************** +*/ +osl_ext_status_t +osl_ext_timer_create(char *name, osl_ext_time_ms_t timeout_msec, osl_ext_timer_mode_t mode, + osl_ext_timer_callback func, osl_ext_timer_arg_t arg, osl_ext_timer_t *timer); + +/**************************************************************************** +* Function: osl_ext_timer_delete +* +* Purpose: Destroys a previously created timer object. +* +* Parameters: timer (mod) Timer object to destroy. +* +* Returns: OSL_EXT_SUCCESS if the timer was created successfully, or an +* error code if the timer could not be created. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_timer_delete(osl_ext_timer_t *timer); + +/**************************************************************************** +* Function: osl_ext_timer_start +* +* Purpose: Start a previously created timer object. +* +* Parameters: timer (in) Timer object. +* timeout_msec (in) Invoke callback after this number of milliseconds. +* mode (in) One-shot or periodic timer. +* +* Returns: OSL_EXT_SUCCESS if the timer was created successfully, or an +* error code if the timer could not be created. +***************************************************************************** +*/ +osl_ext_status_t +osl_ext_timer_start(osl_ext_timer_t *timer, + osl_ext_time_ms_t timeout_msec, osl_ext_timer_mode_t mode); + +/**************************************************************************** +* Function: osl_ext_timer_start +* +* Purpose: Start a previously created timer object. +* +* Parameters: timer (in) Timer object. +* timeout_usec (in) Invoke callback after this number of micro-seconds. +* mode (in) One-shot or periodic timer. +* +* Returns: OSL_EXT_SUCCESS if the timer was created successfully, or an +* error code if the timer could not be created. +***************************************************************************** +*/ +osl_ext_status_t +osl_ext_timer_start_us(osl_ext_timer_t *timer, + osl_ext_time_us_t timeout_usec, osl_ext_timer_mode_t mode); + +/**************************************************************************** +* Function: osl_ext_timer_stop +* +* Purpose: Stop a previously created timer object. +* +* Parameters: timer (in) Timer object. +* +* Returns: OSL_EXT_SUCCESS if the timer was created successfully, or an +* error code if the timer could not be created. +***************************************************************************** +*/ +osl_ext_status_t +osl_ext_timer_stop(osl_ext_timer_t *timer); + +/**************************************************************************** +* Function: osl_ext_time_get +* +* Purpose: Returns incrementing time counter. +* +* Parameters: None. +* +* Returns: Returns incrementing time counter in msec. +***************************************************************************** +*/ +osl_ext_time_ms_t osl_ext_time_get(void); + +/* -------------------------------------------------------------------------- +** Tasks +*/ + +/**************************************************************************** +* Function: osl_ext_task_create +* +* Purpose: Create a task. +* +* Parameters: name (in) Pointer to task string descriptor. +* stack (in) Pointer to stack. NULL to allocate. +* stack_size (in) Stack size - in bytes. +* priority (in) Abstract task priority. +* func (in) A pointer to the task entry point function. +* arg (in) Value passed into task entry point function. +* task (out) Task to create. +* +* Returns: OSL_EXT_SUCCESS if the task was created successfully, or an +* error code if the task could not be created. +***************************************************************************** +*/ + +#define osl_ext_task_create(name, stack, stack_size, priority, func, arg, task) \ + osl_ext_task_create_ex((name), (stack), (stack_size), (priority), 0, (func), \ + (arg), TRUE, (task)) + +/**************************************************************************** +* Function: osl_ext_task_create_ex +* +* Purpose: Create a task with autostart option. +* +* Parameters: name (in) Pointer to task string descriptor. +* stack (in) Pointer to stack. NULL to allocate. +* stack_size (in) Stack size - in bytes. +* priority (in) Abstract task priority. +* func (in) A pointer to the task entry point function. +* arg (in) Value passed into task entry point function. +* autostart (in) TRUE to start task after creation. +* task (out) Task to create. +* +* Returns: OSL_EXT_SUCCESS if the task was created successfully, or an +* error code if the task could not be created. +***************************************************************************** +*/ + +osl_ext_status_t osl_ext_task_create_ex(char* name, + void *stack, unsigned int stack_size, osl_ext_task_priority_t priority, + osl_ext_time_ms_t timslice_msec, osl_ext_task_entry func, osl_ext_task_arg_t arg, + bool autostart, osl_ext_task_t *task); + +/**************************************************************************** +* Function: osl_ext_task_delete +* +* Purpose: Destroy a task. +* +* Parameters: task (mod) Task to destroy. +* +* Returns: OSL_EXT_SUCCESS if the task was created successfully, or an +* error code if the task could not be created. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_task_delete(osl_ext_task_t *task); + +/**************************************************************************** +* Function: osl_ext_task_is_running +* +* Purpose: Returns current running task. +* +* Parameters: None. +* +* Returns: osl_ext_task_t of current running task. +***************************************************************************** +*/ +osl_ext_task_t *osl_ext_task_current(void); + +/**************************************************************************** +* Function: osl_ext_task_yield +* +* Purpose: Yield the CPU to other tasks of the same priority that are +* ready-to-run. +* +* Parameters: None. +* +* Returns: OSL_EXT_SUCCESS if successful, else error code. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_task_yield(void); + +/**************************************************************************** +* Function: osl_ext_task_yield +* +* Purpose: Yield the CPU to other tasks of the same priority that are +* ready-to-run. +* +* Parameters: None. +* +* Returns: OSL_EXT_SUCCESS if successful, else error code. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_task_yield(void); + +/**************************************************************************** +* Function: osl_ext_task_suspend +* +* Purpose: Suspend a task. +* +* Parameters: task (mod) Task to suspend. +* +* Returns: OSL_EXT_SUCCESS if the task was suspended successfully, or an +* error code if the task could not be suspended. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_task_suspend(osl_ext_task_t *task); + +/**************************************************************************** +* Function: osl_ext_task_resume +* +* Purpose: Resume a task. +* +* Parameters: task (mod) Task to resume. +* +* Returns: OSL_EXT_SUCCESS if the task was resumed successfully, or an +* error code if the task could not be resumed. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_task_resume(osl_ext_task_t *task); + +/**************************************************************************** +* Function: osl_ext_task_enable_stack_check +* +* Purpose: Enable task stack checking. +* +* Parameters: None. +* +* Returns: OSL_EXT_SUCCESS if successful, else error code. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_task_enable_stack_check(void); + +/* -------------------------------------------------------------------------- +** Queue +*/ + +/**************************************************************************** +* Function: osl_ext_queue_create +* +* Purpose: Create a queue. +* +* Parameters: name (in) Name to assign to the queue (must be unique). +* buffer (in) Queue buffer. NULL to allocate. +* size (in) Size of the queue. +* queue (out) Newly created queue. +* +* Returns: OSL_EXT_SUCCESS if the queue was created successfully, or an +* error code if the queue could not be created. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_queue_create(char *name, + void *queue_buffer, unsigned int queue_size, + osl_ext_queue_t *queue); + +/**************************************************************************** +* Function: osl_ext_queue_delete +* +* Purpose: Destroys a previously created queue object. +* +* Parameters: queue (mod) Queue object to destroy. +* +* Returns: OSL_EXT_SUCCESS if the queue was deleted successfully, or an +* error code if the queue could not be deleteed. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_queue_delete(osl_ext_queue_t *queue); + +/**************************************************************************** +* Function: osl_ext_queue_send +* +* Purpose: Send/add data to the queue. This function will not block the +* calling thread if the queue is full. +* +* Parameters: queue (mod) Queue object. +* data (in) Data pointer to be queued. +* +* Returns: OSL_EXT_SUCCESS if the data was queued successfully, or an +* error code if the data could not be queued. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_queue_send(osl_ext_queue_t *queue, void *data); + +/**************************************************************************** +* Function: osl_ext_queue_send_synchronous +* +* Purpose: Send/add data to the queue. This function will block the +* calling thread until the data is dequeued. +* +* Parameters: queue (mod) Queue object. +* data (in) Data pointer to be queued. +* +* Returns: OSL_EXT_SUCCESS if the data was queued successfully, or an +* error code if the data could not be queued. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_queue_send_synchronous(osl_ext_queue_t *queue, void *data); + +/**************************************************************************** +* Function: osl_ext_queue_receive +* +* Purpose: Receive/remove data from the queue. This function will only +* block the calling thread for timeout_msec milliseconds, before +* returning with OSL_EXT_TIMEOUT. +* +* Parameters: queue (mod) Queue object. +* timeout_msec (in) Number of milliseconds to wait for the +* data from the queue. +* data (out) Data pointer received/removed from the queue. +* +* Returns: OSL_EXT_SUCCESS if the data was dequeued successfully, or an +* error code if the data could not be dequeued. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_queue_receive(osl_ext_queue_t *queue, + osl_ext_time_ms_t timeout_msec, void **data); + +/**************************************************************************** +* Function: osl_ext_queue_count +* +* Purpose: Returns the number of items in the queue. +* +* Parameters: queue (mod) Queue object. +* count (out) Data pointer received/removed from the queue. +* +* Returns: OSL_EXT_SUCCESS if the count was returned successfully, or an +* error code if the count is invalid. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_queue_count(osl_ext_queue_t *queue, int *count); + +/* -------------------------------------------------------------------------- +** Event +*/ + +/**************************************************************************** +* Function: osl_ext_event_create +* +* Purpose: Creates a event object, which can subsequently be used to +* notify and trigger tasks. +* +* Parameters: name (in) Name to assign to the event (must be unique). +* event (out) Event object to initialize. +* +* Returns: OSL_EXT_SUCCESS if the event was created successfully, or an +* error code if the event could not be created. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_event_create(char *name, osl_ext_event_t *event); + +/**************************************************************************** +* Function: osl_ext_event_delete +* +* Purpose: Destroys a previously created event object. +* +* Parameters: event (mod) Event object to destroy. +* +* Returns: OSL_EXT_SUCCESS if the event was created successfully, or an +* error code if the event could not be created. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_event_delete(osl_ext_event_t *event); + +/**************************************************************************** +* Function: osl_ext_event_get +* +* Purpose: Get event from specified event object. +* +* Parameters: event (mod) Event object to get. +* requested (in) Requested event to get. +* timeout_msec (in) Number of milliseconds to wait for the event. +* event_bits (out) Event bits retrieved. +* +* Returns: OSL_EXT_SUCCESS if the event was created successfully, or an +* error code if the event could not be created. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_event_get(osl_ext_event_t *event, + osl_ext_event_bits_t requested, osl_ext_time_ms_t timeout_msec, + osl_ext_event_bits_t *event_bits); + +/**************************************************************************** +* Function: osl_ext_event_set +* +* Purpose: Set event of specified event object. +* +* Parameters: event (mod) Event object to set. +* event_bits (in) Event bits to set. +* +* Returns: OSL_EXT_SUCCESS if the event was created successfully, or an +* error code if the event could not be created. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_event_set(osl_ext_event_t *event, + osl_ext_event_bits_t event_bits); + +/* -------------------------------------------------------------------------- +** Interrupt +*/ + +/**************************************************************************** +* Function: osl_ext_interrupt_disable +* +* Purpose: Disable CPU interrupt. +* +* Parameters: None. +* +* Returns: The interrupt state before disable for restoring interrupt. +***************************************************************************** +*/ +osl_ext_interrupt_state_t osl_ext_interrupt_disable(void); + +/**************************************************************************** +* Function: osl_ext_interrupt_restore +* +* Purpose: Restore CPU interrupt state. +* +* Parameters: state (in) Interrupt state to restore returned from +* osl_ext_interrupt_disable(). +* +* Returns: None. +***************************************************************************** +*/ +void osl_ext_interrupt_restore(osl_ext_interrupt_state_t state); + +#else + +/* ---- Constants and Types ---------------------------------------------- */ + +/* Interrupt control */ +#define OSL_INTERRUPT_SAVE_AREA +#define OSL_DISABLE +#define OSL_RESTORE + +/* Semaphore. */ +#define osl_ext_sem_t +#define OSL_EXT_SEM_DECL(sem) + +/* Mutex. */ +#define osl_ext_mutex_t +#define OSL_EXT_MUTEX_DECL(mutex) + +/* Timer. */ +#define osl_ext_timer_t +#define OSL_EXT_TIMER_DECL(timer) + +/* Task. */ +#define osl_ext_task_t void +#define OSL_EXT_TASK_DECL(task) + +/* Queue. */ +#define osl_ext_queue_t +#define OSL_EXT_QUEUE_DECL(queue) + +/* Event. */ +#define osl_ext_event_t +#define OSL_EXT_EVENT_DECL(event) + +/* ---- Variable Externs ------------------------------------------------- */ +/* ---- Function Prototypes ---------------------------------------------- */ + +#define osl_ext_sem_create(name, init_cnt, sem) (OSL_EXT_SUCCESS) +#define osl_ext_sem_delete(sem) (OSL_EXT_SUCCESS) +#define osl_ext_sem_give(sem) (OSL_EXT_SUCCESS) +#define osl_ext_sem_take(sem, timeout_msec) (OSL_EXT_SUCCESS) + +#define osl_ext_mutex_create(name, mutex) (OSL_EXT_SUCCESS) +#define osl_ext_mutex_delete(mutex) (OSL_EXT_SUCCESS) +#define osl_ext_mutex_acquire(mutex, timeout_msec) (OSL_EXT_SUCCESS) +#define osl_ext_mutex_release(mutex) (OSL_EXT_SUCCESS) + +#define osl_ext_timer_create(name, timeout_msec, mode, func, arg, timer) \ + (OSL_EXT_SUCCESS) +#define osl_ext_timer_delete(timer) (OSL_EXT_SUCCESS) +#define osl_ext_timer_start(timer, timeout_msec, mode) (OSL_EXT_SUCCESS) +#define osl_ext_timer_stop(timer) (OSL_EXT_SUCCESS) +#define osl_ext_time_get() (0) + +#define osl_ext_task_create(name, stack, stack_size, priority, func, arg, task) \ + (OSL_EXT_SUCCESS) +#define osl_ext_task_delete(task) (OSL_EXT_SUCCESS) +#define osl_ext_task_current() (NULL) +#define osl_ext_task_yield() (OSL_EXT_SUCCESS) +#define osl_ext_task_enable_stack_check() (OSL_EXT_SUCCESS) + +#define osl_ext_queue_create(name, queue_buffer, queue_size, queue) \ + (OSL_EXT_SUCCESS) +#define osl_ext_queue_delete(queue) (OSL_EXT_SUCCESS) +#define osl_ext_queue_send(queue, data) (OSL_EXT_SUCCESS) +#define osl_ext_queue_send_synchronous(queue, data) (OSL_EXT_SUCCESS) +#define osl_ext_queue_receive(queue, timeout_msec, data) \ + (OSL_EXT_SUCCESS) +#define osl_ext_queue_count(queue, count) (OSL_EXT_SUCCESS) + +#define osl_ext_event_create(name, event) (OSL_EXT_SUCCESS) +#define osl_ext_event_delete(event) (OSL_EXT_SUCCESS) +#define osl_ext_event_get(event, requested, timeout_msec, event_bits) \ + (OSL_EXT_SUCCESS) +#define osl_ext_event_set(event, event_bits) (OSL_EXT_SUCCESS) + +#define osl_ext_interrupt_disable(void) +#define osl_ext_interrupt_restore(state) + +#endif /* OSL_EXT_DISABLED */ + +#ifdef __cplusplus +} +#endif // endif + +#endif /* _osl_ext_h_ */ diff --git a/bcmdhd.100.10.315.x/include/p2p.h b/bcmdhd.100.10.315.x/include/p2p.h new file mode 100644 index 0000000..8bb14b5 --- /dev/null +++ b/bcmdhd.100.10.315.x/include/p2p.h @@ -0,0 +1,701 @@ +/* + * Fundamental types and constants relating to WFA P2P (aka WiFi Direct) + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: p2p.h 757905 2018-04-16 23:16:27Z $ + */ + +#ifndef _P2P_H_ +#define _P2P_H_ + +#ifndef _TYPEDEFS_H_ +#include +#endif // endif +#include +#include <802.11.h> + +/* This marks the start of a packed structure section. */ +#include + +/* WiFi P2P OUI values */ +#define P2P_VER WFA_OUI_TYPE_P2P /* P2P version: 9=WiFi P2P v1.0 */ + +#define P2P_IE_ID 0xdd /* P2P IE element ID */ + +/* WiFi P2P IE */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_ie { + uint8 id; /* IE ID: 0xDD */ + uint8 len; /* IE length */ + uint8 OUI[3]; /* WiFi P2P specific OUI: P2P_OUI */ + uint8 oui_type; /* Identifies P2P version: P2P_VER */ + uint8 subelts[1]; /* variable length subelements */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_ie wifi_p2p_ie_t; + +#define P2P_IE_FIXED_LEN 6 + +#define P2P_ATTR_ID_OFF 0 +#define P2P_ATTR_LEN_OFF 1 +#define P2P_ATTR_DATA_OFF 3 + +#define P2P_ATTR_ID_LEN 1 /* ID filed length */ +#define P2P_ATTR_LEN_LEN 2 /* length field length */ +#define P2P_ATTR_HDR_LEN 3 /* ID + 2-byte length field spec 1.02 */ + +#define P2P_WFDS_HASH_LEN 6 +#define P2P_WFDS_MAX_SVC_NAME_LEN 32 + +/* P2P IE Subelement IDs from WiFi P2P Technical Spec 1.00 */ +#define P2P_SEID_STATUS 0 /* Status */ +#define P2P_SEID_MINOR_RC 1 /* Minor Reason Code */ +#define P2P_SEID_P2P_INFO 2 /* P2P Capability (capabilities info) */ +#define P2P_SEID_DEV_ID 3 /* P2P Device ID */ +#define P2P_SEID_INTENT 4 /* Group Owner Intent */ +#define P2P_SEID_CFG_TIMEOUT 5 /* Configuration Timeout */ +#define P2P_SEID_CHANNEL 6 /* Listen channel */ +#define P2P_SEID_GRP_BSSID 7 /* P2P Group BSSID */ +#define P2P_SEID_XT_TIMING 8 /* Extended Listen Timing */ +#define P2P_SEID_INTINTADDR 9 /* Intended P2P Interface Address */ +#define P2P_SEID_P2P_MGBTY 10 /* P2P Manageability */ +#define P2P_SEID_CHAN_LIST 11 /* Channel List */ +#define P2P_SEID_ABSENCE 12 /* Notice of Absence */ +#define P2P_SEID_DEV_INFO 13 /* Device Info */ +#define P2P_SEID_GROUP_INFO 14 /* Group Info */ +#define P2P_SEID_GROUP_ID 15 /* Group ID */ +#define P2P_SEID_P2P_IF 16 /* P2P Interface */ +#define P2P_SEID_OP_CHANNEL 17 /* Operating Channel */ +#define P2P_SEID_INVITE_FLAGS 18 /* Invitation Flags */ +#define P2P_SEID_SERVICE_HASH 21 /* Service hash */ +#define P2P_SEID_SESSION 22 /* Session information */ +#define P2P_SEID_CONNECT_CAP 23 /* Connection capability */ +#define P2P_SEID_ADVERTISE_ID 24 /* Advertisement ID */ +#define P2P_SEID_ADVERTISE_SERVICE 25 /* Advertised service */ +#define P2P_SEID_SESSION_ID 26 /* Session ID */ +#define P2P_SEID_FEATURE_CAP 27 /* Feature capability */ +#define P2P_SEID_PERSISTENT_GROUP 28 /* Persistent group */ +#define P2P_SEID_SESSION_INFO_RESP 29 /* Session Information Response */ +#define P2P_SEID_VNDR 221 /* Vendor-specific subelement */ + +#define P2P_SE_VS_ID_SERVICES 0x1b + +/* WiFi P2P IE subelement: P2P Capability (capabilities info) */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_info_se_s { + uint8 eltId; /* SE ID: P2P_SEID_P2P_INFO */ + uint8 len[2]; /* SE length not including eltId, len fields */ + uint8 dev; /* Device Capability Bitmap */ + uint8 group; /* Group Capability Bitmap */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_info_se_s wifi_p2p_info_se_t; + +/* P2P Capability subelement's Device Capability Bitmap bit values */ +#define P2P_CAPSE_DEV_SERVICE_DIS 0x1 /* Service Discovery */ +#define P2P_CAPSE_DEV_CLIENT_DIS 0x2 /* Client Discoverability */ +#define P2P_CAPSE_DEV_CONCURRENT 0x4 /* Concurrent Operation */ +#define P2P_CAPSE_DEV_INFRA_MAN 0x8 /* P2P Infrastructure Managed */ +#define P2P_CAPSE_DEV_LIMIT 0x10 /* P2P Device Limit */ +#define P2P_CAPSE_INVITE_PROC 0x20 /* P2P Invitation Procedure */ + +/* P2P Capability subelement's Group Capability Bitmap bit values */ +#define P2P_CAPSE_GRP_OWNER 0x1 /* P2P Group Owner */ +#define P2P_CAPSE_PERSIST_GRP 0x2 /* Persistent P2P Group */ +#define P2P_CAPSE_GRP_LIMIT 0x4 /* P2P Group Limit */ +#define P2P_CAPSE_GRP_INTRA_BSS 0x8 /* Intra-BSS Distribution */ +#define P2P_CAPSE_GRP_X_CONNECT 0x10 /* Cross Connection */ +#define P2P_CAPSE_GRP_PERSISTENT 0x20 /* Persistent Reconnect */ +#define P2P_CAPSE_GRP_FORMATION 0x40 /* Group Formation */ + +/* WiFi P2P IE subelement: Group Owner Intent */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_intent_se_s { + uint8 eltId; /* SE ID: P2P_SEID_INTENT */ + uint8 len[2]; /* SE length not including eltId, len fields */ + uint8 intent; /* Intent Value 0...15 (0=legacy 15=master only) */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_intent_se_s wifi_p2p_intent_se_t; + +/* WiFi P2P IE subelement: Configuration Timeout */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_cfg_tmo_se_s { + uint8 eltId; /* SE ID: P2P_SEID_CFG_TIMEOUT */ + uint8 len[2]; /* SE length not including eltId, len fields */ + uint8 go_tmo; /* GO config timeout in units of 10 ms */ + uint8 client_tmo; /* Client config timeout in units of 10 ms */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_cfg_tmo_se_s wifi_p2p_cfg_tmo_se_t; + +/* WiFi P2P IE subelement: Listen Channel */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_listen_channel_se_s { + uint8 eltId; /* SE ID: P2P_SEID_CHANNEL */ + uint8 len[2]; /* SE length not including eltId, len fields */ + uint8 country[3]; /* Country String */ + uint8 op_class; /* Operating Class */ + uint8 channel; /* Channel */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_listen_channel_se_s wifi_p2p_listen_channel_se_t; + +/* WiFi P2P IE subelement: P2P Group BSSID */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_grp_bssid_se_s { + uint8 eltId; /* SE ID: P2P_SEID_GRP_BSSID */ + uint8 len[2]; /* SE length not including eltId, len fields */ + uint8 mac[6]; /* P2P group bssid */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_grp_bssid_se_s wifi_p2p_grp_bssid_se_t; + +/* WiFi P2P IE subelement: P2P Group ID */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_grp_id_se_s { + uint8 eltId; /* SE ID: P2P_SEID_GROUP_ID */ + uint8 len[2]; /* SE length not including eltId, len fields */ + uint8 mac[6]; /* P2P device address */ + uint8 ssid[1]; /* ssid. device id. variable length */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_grp_id_se_s wifi_p2p_grp_id_se_t; + +/* WiFi P2P IE subelement: P2P Interface */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_intf_se_s { + uint8 eltId; /* SE ID: P2P_SEID_P2P_IF */ + uint8 len[2]; /* SE length not including eltId, len fields */ + uint8 mac[6]; /* P2P device address */ + uint8 ifaddrs; /* P2P Interface Address count */ + uint8 ifaddr[1][6]; /* P2P Interface Address list */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_intf_se_s wifi_p2p_intf_se_t; + +/* WiFi P2P IE subelement: Status */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_status_se_s { + uint8 eltId; /* SE ID: P2P_SEID_STATUS */ + uint8 len[2]; /* SE length not including eltId, len fields */ + uint8 status; /* Status Code: P2P_STATSE_* */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_status_se_s wifi_p2p_status_se_t; + +/* Status subelement Status Code definitions */ +#define P2P_STATSE_SUCCESS 0 + /* Success */ +#define P2P_STATSE_FAIL_INFO_CURR_UNAVAIL 1 + /* Failed, information currently unavailable */ +#define P2P_STATSE_PASSED_UP P2P_STATSE_FAIL_INFO_CURR_UNAVAIL + /* Old name for above in P2P spec 1.08 and older */ +#define P2P_STATSE_FAIL_INCOMPAT_PARAMS 2 + /* Failed, incompatible parameters */ +#define P2P_STATSE_FAIL_LIMIT_REACHED 3 + /* Failed, limit reached */ +#define P2P_STATSE_FAIL_INVALID_PARAMS 4 + /* Failed, invalid parameters */ +#define P2P_STATSE_FAIL_UNABLE_TO_ACCOM 5 + /* Failed, unable to accomodate request */ +#define P2P_STATSE_FAIL_PROTO_ERROR 6 + /* Failed, previous protocol error or disruptive behaviour */ +#define P2P_STATSE_FAIL_NO_COMMON_CHAN 7 + /* Failed, no common channels */ +#define P2P_STATSE_FAIL_UNKNOWN_GROUP 8 + /* Failed, unknown P2P Group */ +#define P2P_STATSE_FAIL_INTENT 9 + /* Failed, both peers indicated Intent 15 in GO Negotiation */ +#define P2P_STATSE_FAIL_INCOMPAT_PROVIS 10 + /* Failed, incompatible provisioning method */ +#define P2P_STATSE_FAIL_USER_REJECT 11 + /* Failed, rejected by user */ +#define P2P_STATSE_SUCCESS_USER_ACCEPT 12 + /* Success, accepted by user */ + +/* WiFi P2P IE attribute: Extended Listen Timing */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_ext_se_s { + uint8 eltId; /* ID: P2P_SEID_EXT_TIMING */ + uint8 len[2]; /* length not including eltId, len fields */ + uint8 avail[2]; /* availibility period */ + uint8 interval[2]; /* availibility interval */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_ext_se_s wifi_p2p_ext_se_t; + +#define P2P_EXT_MIN 10 /* minimum 10ms */ + +/* WiFi P2P IE subelement: Intended P2P Interface Address */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_intintad_se_s { + uint8 eltId; /* SE ID: P2P_SEID_INTINTADDR */ + uint8 len[2]; /* SE length not including eltId, len fields */ + uint8 mac[6]; /* intended P2P interface MAC address */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_intintad_se_s wifi_p2p_intintad_se_t; + +/* WiFi P2P IE subelement: Channel */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_channel_se_s { + uint8 eltId; /* SE ID: P2P_SEID_STATUS */ + uint8 len[2]; /* SE length not including eltId, len fields */ + uint8 band; /* Regulatory Class (band) */ + uint8 channel; /* Channel */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_channel_se_s wifi_p2p_channel_se_t; + +/* Channel Entry structure within the Channel List SE */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_chanlist_entry_s { + uint8 band; /* Regulatory Class (band) */ + uint8 num_channels; /* # of channels in the channel list */ + uint8 channels[WL_NUMCHANNELS]; /* Channel List */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_chanlist_entry_s wifi_p2p_chanlist_entry_t; +#define WIFI_P2P_CHANLIST_SE_MAX_ENTRIES 2 + +/* WiFi P2P IE subelement: Channel List */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_chanlist_se_s { + uint8 eltId; /* SE ID: P2P_SEID_CHAN_LIST */ + uint8 len[2]; /* SE length not including eltId, len fields */ + uint8 country[3]; /* Country String */ + uint8 num_entries; /* # of channel entries */ + wifi_p2p_chanlist_entry_t entries[WIFI_P2P_CHANLIST_SE_MAX_ENTRIES]; + /* Channel Entry List */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_chanlist_se_s wifi_p2p_chanlist_se_t; + +/* WiFi Primary Device Type structure */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_pri_devtype_s { + uint16 cat_id; /* Category ID */ + uint8 OUI[3]; /* WFA OUI: 0x0050F2 */ + uint8 oui_type; /* WPS_OUI_TYPE */ + uint16 sub_cat_id; /* Sub Category ID */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_pri_devtype_s wifi_p2p_pri_devtype_t; + +/* WiFi P2P Device Info Sub Element Primary Device Type Sub Category + * maximum values for each category + */ +#define P2P_DISE_SUBCATEGORY_MINVAL 1 +#define P2P_DISE_CATEGORY_COMPUTER 1 +#define P2P_DISE_SUBCATEGORY_COMPUTER_MAXVAL 8 +#define P2P_DISE_CATEGORY_INPUT_DEVICE 2 +#define P2P_DISE_SUBCATEGORY_INPUT_DEVICE_MAXVAL 9 +#define P2P_DISE_CATEGORY_PRINTER 3 +#define P2P_DISE_SUBCATEGORY_PRINTER_MAXVAL 5 +#define P2P_DISE_CATEGORY_CAMERA 4 +#define P2P_DISE_SUBCATEGORY_CAMERA_MAXVAL 4 +#define P2P_DISE_CATEGORY_STORAGE 5 +#define P2P_DISE_SUBCATEGORY_STORAGE_MAXVAL 1 +#define P2P_DISE_CATEGORY_NETWORK_INFRA 6 +#define P2P_DISE_SUBCATEGORY_NETWORK_INFRA_MAXVAL 4 +#define P2P_DISE_CATEGORY_DISPLAY 7 +#define P2P_DISE_SUBCATEGORY_DISPLAY_MAXVAL 4 +#define P2P_DISE_CATEGORY_MULTIMEDIA 8 +#define P2P_DISE_SUBCATEGORY_MULTIMEDIA_MAXVAL 6 +#define P2P_DISE_CATEGORY_GAMING 9 +#define P2P_DISE_SUBCATEGORY_GAMING_MAXVAL 5 +#define P2P_DISE_CATEGORY_TELEPHONE 10 +#define P2P_DISE_SUBCATEGORY_TELEPHONE_MAXVAL 5 +#define P2P_DISE_CATEGORY_AUDIO 11 +#define P2P_DISE_SUBCATEGORY_AUDIO_MAXVAL 6 + +/* WiFi P2P IE's Device Info subelement */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_devinfo_se_s { + uint8 eltId; /* SE ID: P2P_SEID_DEVINFO */ + uint8 len[2]; /* SE length not including eltId, len fields */ + uint8 mac[6]; /* P2P Device MAC address */ + uint16 wps_cfg_meths; /* Config Methods: reg_prototlv.h WPS_CONFMET_* */ + uint8 pri_devtype[8]; /* Primary Device Type */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_devinfo_se_s wifi_p2p_devinfo_se_t; + +#define P2P_DEV_TYPE_LEN 8 + +/* WiFi P2P IE's Group Info subelement Client Info Descriptor */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_cid_fixed_s { + uint8 len; + uint8 devaddr[ETHER_ADDR_LEN]; /* P2P Device Address */ + uint8 ifaddr[ETHER_ADDR_LEN]; /* P2P Interface Address */ + uint8 devcap; /* Device Capability */ + uint8 cfg_meths[2]; /* Config Methods: reg_prototlv.h WPS_CONFMET_* */ + uint8 pridt[P2P_DEV_TYPE_LEN]; /* Primary Device Type */ + uint8 secdts; /* Number of Secondary Device Types */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_cid_fixed_s wifi_p2p_cid_fixed_t; + +/* WiFi P2P IE's Device ID subelement */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_devid_se_s { + uint8 eltId; + uint8 len[2]; + struct ether_addr addr; /* P2P Device MAC address */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_devid_se_s wifi_p2p_devid_se_t; + +/* WiFi P2P IE subelement: P2P Manageability */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_mgbt_se_s { + uint8 eltId; /* SE ID: P2P_SEID_P2P_MGBTY */ + uint8 len[2]; /* SE length not including eltId, len fields */ + uint8 mg_bitmap; /* manageability bitmap */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_mgbt_se_s wifi_p2p_mgbt_se_t; +/* mg_bitmap field bit values */ +#define P2P_MGBTSE_P2PDEVMGMT_FLAG 0x1 /* AP supports Managed P2P Device */ + +/* WiFi P2P IE subelement: Group Info */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_grpinfo_se_s { + uint8 eltId; /* SE ID: P2P_SEID_GROUP_INFO */ + uint8 len[2]; /* SE length not including eltId, len fields */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_grpinfo_se_s wifi_p2p_grpinfo_se_t; + +/* WiFi IE subelement: Operating Channel */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_op_channel_se_s { + uint8 eltId; /* SE ID: P2P_SEID_OP_CHANNEL */ + uint8 len[2]; /* SE length not including eltId, len fields */ + uint8 country[3]; /* Country String */ + uint8 op_class; /* Operating Class */ + uint8 channel; /* Channel */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_op_channel_se_s wifi_p2p_op_channel_se_t; + +/* WiFi IE subelement: INVITATION FLAGS */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_invite_flags_se_s { + uint8 eltId; /* SE ID: P2P_SEID_INVITE_FLAGS */ + uint8 len[2]; /* SE length not including eltId, len fields */ + uint8 flags; /* Flags */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_invite_flags_se_s wifi_p2p_invite_flags_se_t; + +/* WiFi P2P IE subelement: Service Hash */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_serv_hash_se_s { + uint8 eltId; /* SE ID: P2P_SEID_SERVICE_HASH */ + uint8 len[2]; /* SE length not including eltId, len fields + * in multiple of 6 Bytes + */ + uint8 hash[1]; /* Variable length - SHA256 hash of + * service names (can be more than one hashes) + */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_serv_hash_se_s wifi_p2p_serv_hash_se_t; + +/* WiFi P2P IE subelement: Service Instance Data */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_serv_inst_data_se_s { + uint8 eltId; /* SE ID: P2P_SEID_SESSION */ + uint8 len[2]; /* SE length not including eltId, len */ + uint8 ssn_info[1]; /* Variable length - Session information as specified by + * the service layer, type matches serv. name + */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_serv_inst_data_se_s wifi_p2p_serv_inst_data_se_t; + +/* WiFi P2P IE subelement: Connection capability */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_conn_cap_data_se_s { + uint8 eltId; /* SE ID: P2P_SEID_CONNECT_CAP */ + uint8 len[2]; /* SE length not including eltId, len */ + uint8 conn_cap; /* 1byte capability as specified by the + * service layer, valid bitmask/values + */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_conn_cap_data_se_s wifi_p2p_conn_cap_data_se_t; + +/* WiFi P2P IE subelement: Advertisement ID */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_advt_id_se_s { + uint8 eltId; /* SE ID: P2P_SEID_ADVERTISE_ID */ + uint8 len[2]; /* SE length not including eltId, len fixed 4 Bytes */ + uint8 advt_id[4]; /* 4byte Advertisement ID of the peer device sent in + * PROV Disc in Network byte order + */ + uint8 advt_mac[6]; /* P2P device address of the service advertiser */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_advt_id_se_s wifi_p2p_advt_id_se_t; + +/* WiFi P2P IE subelement: Advertise Service Hash */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_adv_serv_info_s { + uint8 advt_id[4]; /* SE Advertise ID for the service */ + uint16 nw_cfg_method; /* SE Network Config method for the service */ + uint8 serv_name_len; /* SE length of the service name */ + uint8 serv_name[1]; /* Variable length service name field */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_adv_serv_info_s wifi_p2p_adv_serv_info_t; + +/* WiFi P2P IE subelement: Advertise Service Hash */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_advt_serv_se_s { + uint8 eltId; /* SE ID: P2P_SEID_ADVERTISE_SERVICE */ + uint8 len[2]; /* SE length not including eltId, len fields mutiple len of + * wifi_p2p_adv_serv_info_t entries + */ + wifi_p2p_adv_serv_info_t p_advt_serv_info[1]; /* Variable length + of multiple instances + of the advertise service info + */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_advt_serv_se_s wifi_p2p_advt_serv_se_t; + +/* WiFi P2P IE subelement: Session ID */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_ssn_id_se_s { + uint8 eltId; /* SE ID: P2P_SEID_SESSION_ID */ + uint8 len[2]; /* SE length not including eltId, len fixed 4 Bytes */ + uint8 ssn_id[4]; /* 4byte Session ID of the peer device sent in + * PROV Disc in Network byte order + */ + uint8 ssn_mac[6]; /* P2P device address of the seeker - session mac */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_ssn_id_se_s wifi_p2p_ssn_id_se_t; + +#define P2P_ADVT_SERV_SE_FIXED_LEN 3 /* Includes only the element ID and len */ +#define P2P_ADVT_SERV_INFO_FIXED_LEN 7 /* Per ADV Service Instance advt_id + + * nw_config_method + serv_name_len + */ + +/* WiFi P2P Action Frame */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_action_frame { + uint8 category; /* P2P_AF_CATEGORY */ + uint8 OUI[3]; /* OUI - P2P_OUI */ + uint8 type; /* OUI Type - P2P_VER */ + uint8 subtype; /* OUI Subtype - P2P_AF_* */ + uint8 dialog_token; /* nonzero, identifies req/resp tranaction */ + uint8 elts[1]; /* Variable length information elements. Max size = + * ACTION_FRAME_SIZE - sizeof(this structure) - 1 + */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_action_frame wifi_p2p_action_frame_t; +#define P2P_AF_CATEGORY 0x7f + +#define P2P_AF_FIXED_LEN 7 + +/* WiFi P2P Action Frame OUI Subtypes */ +#define P2P_AF_NOTICE_OF_ABSENCE 0 /* Notice of Absence */ +#define P2P_AF_PRESENCE_REQ 1 /* P2P Presence Request */ +#define P2P_AF_PRESENCE_RSP 2 /* P2P Presence Response */ +#define P2P_AF_GO_DISC_REQ 3 /* GO Discoverability Request */ + +/* WiFi P2P Public Action Frame */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_pub_act_frame { + uint8 category; /* P2P_PUB_AF_CATEGORY */ + uint8 action; /* P2P_PUB_AF_ACTION */ + uint8 oui[3]; /* P2P_OUI */ + uint8 oui_type; /* OUI type - P2P_VER */ + uint8 subtype; /* OUI subtype - P2P_TYPE_* */ + uint8 dialog_token; /* nonzero, identifies req/rsp transaction */ + uint8 elts[1]; /* Variable length information elements. Max size = + * ACTION_FRAME_SIZE - sizeof(this structure) - 1 + */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_pub_act_frame wifi_p2p_pub_act_frame_t; +#define P2P_PUB_AF_FIXED_LEN 8 +#define P2P_PUB_AF_CATEGORY 0x04 +#define P2P_PUB_AF_ACTION 0x09 + +/* WiFi P2P Public Action Frame OUI Subtypes */ +#define P2P_PAF_GON_REQ 0 /* Group Owner Negotiation Req */ +#define P2P_PAF_GON_RSP 1 /* Group Owner Negotiation Rsp */ +#define P2P_PAF_GON_CONF 2 /* Group Owner Negotiation Confirm */ +#define P2P_PAF_INVITE_REQ 3 /* P2P Invitation Request */ +#define P2P_PAF_INVITE_RSP 4 /* P2P Invitation Response */ +#define P2P_PAF_DEVDIS_REQ 5 /* Device Discoverability Request */ +#define P2P_PAF_DEVDIS_RSP 6 /* Device Discoverability Response */ +#define P2P_PAF_PROVDIS_REQ 7 /* Provision Discovery Request */ +#define P2P_PAF_PROVDIS_RSP 8 /* Provision Discovery Response */ +#define P2P_PAF_SUBTYPE_INVALID 255 /* Invalid Subtype */ + +/* TODO: Stop using these obsolete aliases for P2P_PAF_GON_* */ +#define P2P_TYPE_MNREQ P2P_PAF_GON_REQ +#define P2P_TYPE_MNRSP P2P_PAF_GON_RSP +#define P2P_TYPE_MNCONF P2P_PAF_GON_CONF + +/* WiFi P2P IE subelement: Notice of Absence */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_noa_desc { + uint8 cnt_type; /* Count/Type */ + uint32 duration; /* Duration */ + uint32 interval; /* Interval */ + uint32 start; /* Start Time */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_noa_desc wifi_p2p_noa_desc_t; + +BWL_PRE_PACKED_STRUCT struct wifi_p2p_noa_se { + uint8 eltId; /* Subelement ID */ + uint8 len[2]; /* Length */ + uint8 index; /* Index */ + uint8 ops_ctw_parms; /* CTWindow and OppPS Parameters */ + wifi_p2p_noa_desc_t desc[1]; /* Notice of Absence Descriptor(s) */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_noa_se wifi_p2p_noa_se_t; + +#define P2P_NOA_SE_FIXED_LEN 5 + +#define P2P_NOA_SE_MAX_DESC 2 /* max NoA descriptors in presence request */ + +/* cnt_type field values */ +#define P2P_NOA_DESC_CNT_RESERVED 0 /* reserved and should not be used */ +#define P2P_NOA_DESC_CNT_REPEAT 255 /* continuous schedule */ +#define P2P_NOA_DESC_TYPE_PREFERRED 1 /* preferred values */ +#define P2P_NOA_DESC_TYPE_ACCEPTABLE 2 /* acceptable limits */ + +/* ctw_ops_parms field values */ +#define P2P_NOA_CTW_MASK 0x7f +#define P2P_NOA_OPS_MASK 0x80 +#define P2P_NOA_OPS_SHIFT 7 + +#define P2P_CTW_MIN 10 /* minimum 10TU */ + +/* + * P2P Service Discovery related + */ +#define P2PSD_ACTION_CATEGORY 0x04 + /* Public action frame */ +#define P2PSD_ACTION_ID_GAS_IREQ 0x0a + /* Action value for GAS Initial Request AF */ +#define P2PSD_ACTION_ID_GAS_IRESP 0x0b + /* Action value for GAS Initial Response AF */ +#define P2PSD_ACTION_ID_GAS_CREQ 0x0c + /* Action value for GAS Comeback Request AF */ +#define P2PSD_ACTION_ID_GAS_CRESP 0x0d + /* Action value for GAS Comeback Response AF */ +#define P2PSD_AD_EID 0x6c + /* Advertisement Protocol IE ID */ +#define P2PSD_ADP_TUPLE_QLMT_PAMEBI 0x00 + /* Query Response Length Limit 7 bits plus PAME-BI 1 bit */ +#define P2PSD_ADP_PROTO_ID 0x00 + /* Advertisement Protocol ID. Always 0 for P2P SD */ +#define P2PSD_GAS_OUI P2P_OUI + /* WFA OUI */ +#define P2PSD_GAS_OUI_SUBTYPE P2P_VER + /* OUI Subtype for GAS IE */ +#define P2PSD_GAS_NQP_INFOID 0xDDDD + /* NQP Query Info ID: 56797 */ +#define P2PSD_GAS_COMEBACKDEALY 0x00 + /* Not used in the Native GAS protocol */ + +/* Service Protocol Type */ +typedef enum p2psd_svc_protype { + SVC_RPOTYPE_ALL = 0, + SVC_RPOTYPE_BONJOUR = 1, + SVC_RPOTYPE_UPNP = 2, + SVC_RPOTYPE_WSD = 3, + SVC_RPOTYPE_WFDS = 11, + SVC_RPOTYPE_VENDOR = 255 +} p2psd_svc_protype_t; + +/* Service Discovery response status code */ +typedef enum { + P2PSD_RESP_STATUS_SUCCESS = 0, + P2PSD_RESP_STATUS_PROTYPE_NA = 1, + P2PSD_RESP_STATUS_DATA_NA = 2, + P2PSD_RESP_STATUS_BAD_REQUEST = 3 +} p2psd_resp_status_t; + +/* Advertisement Protocol IE tuple field */ +BWL_PRE_PACKED_STRUCT struct wifi_p2psd_adp_tpl { + uint8 llm_pamebi; /* Query Response Length Limit bit 0-6, set to 0 plus + * Pre-Associated Message Exchange BSSID Independent bit 7, set to 0 + */ + uint8 adp_id; /* Advertisement Protocol ID: 0 for NQP Native Query Protocol */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2psd_adp_tpl wifi_p2psd_adp_tpl_t; + +/* Advertisement Protocol IE */ +BWL_PRE_PACKED_STRUCT struct wifi_p2psd_adp_ie { + uint8 id; /* IE ID: 0x6c - 108 */ + uint8 len; /* IE length */ + wifi_p2psd_adp_tpl_t adp_tpl; /* Advertisement Protocol Tuple field. Only one + * tuple is defined for P2P Service Discovery + */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2psd_adp_ie wifi_p2psd_adp_ie_t; + +/* NQP Vendor-specific Content */ +BWL_PRE_PACKED_STRUCT struct wifi_p2psd_nqp_query_vsc { + uint8 oui_subtype; /* OUI Subtype: 0x09 */ + uint16 svc_updi; /* Service Update Indicator */ + uint8 svc_tlvs[1]; /* wifi_p2psd_qreq_tlv_t type for service request, + * wifi_p2psd_qresp_tlv_t type for service response + */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2psd_nqp_query_vsc wifi_p2psd_nqp_query_vsc_t; + +/* Service Request TLV */ +BWL_PRE_PACKED_STRUCT struct wifi_p2psd_qreq_tlv { + uint16 len; /* Length: 5 plus size of Query Data */ + uint8 svc_prot; /* Service Protocol Type */ + uint8 svc_tscid; /* Service Transaction ID */ + uint8 query_data[1]; /* Query Data, passed in from above Layer 2 */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2psd_qreq_tlv wifi_p2psd_qreq_tlv_t; + +/* Query Request Frame, defined in generic format, instead of NQP specific */ +BWL_PRE_PACKED_STRUCT struct wifi_p2psd_qreq_frame { + uint16 info_id; /* Info ID: 0xDDDD */ + uint16 len; /* Length of service request TLV, 5 plus the size of request data */ + uint8 oui[3]; /* WFA OUI: 0x0050F2 */ + uint8 qreq_vsc[1]; /* Vendor-specific Content: wifi_p2psd_nqp_query_vsc_t type for NQP */ + +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2psd_qreq_frame wifi_p2psd_qreq_frame_t; + +/* GAS Initial Request AF body, "elts" in wifi_p2p_pub_act_frame */ +BWL_PRE_PACKED_STRUCT struct wifi_p2psd_gas_ireq_frame { + wifi_p2psd_adp_ie_t adp_ie; /* Advertisement Protocol IE */ + uint16 qreq_len; /* Query Request Length */ + uint8 qreq_frm[1]; /* Query Request Frame wifi_p2psd_qreq_frame_t */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2psd_gas_ireq_frame wifi_p2psd_gas_ireq_frame_t; + +/* Service Response TLV */ +BWL_PRE_PACKED_STRUCT struct wifi_p2psd_qresp_tlv { + uint16 len; /* Length: 5 plus size of Query Data */ + uint8 svc_prot; /* Service Protocol Type */ + uint8 svc_tscid; /* Service Transaction ID */ + uint8 status; /* Value defined in Table 57 of P2P spec. */ + uint8 query_data[1]; /* Response Data, passed in from above Layer 2 */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2psd_qresp_tlv wifi_p2psd_qresp_tlv_t; + +/* Query Response Frame, defined in generic format, instead of NQP specific */ +BWL_PRE_PACKED_STRUCT struct wifi_p2psd_qresp_frame { + uint16 info_id; /* Info ID: 0xDDDD */ + uint16 len; /* Lenth of service response TLV, 6 plus the size of resp data */ + uint8 oui[3]; /* WFA OUI: 0x0050F2 */ + uint8 qresp_vsc[1]; /* Vendor-specific Content: wifi_p2psd_qresp_tlv_t type for NQP */ + +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2psd_qresp_frame wifi_p2psd_qresp_frame_t; + +/* GAS Initial Response AF body, "elts" in wifi_p2p_pub_act_frame */ +BWL_PRE_PACKED_STRUCT struct wifi_p2psd_gas_iresp_frame { + uint16 status; /* Value defined in Table 7-23 of IEEE P802.11u */ + uint16 cb_delay; /* GAS Comeback Delay */ + wifi_p2psd_adp_ie_t adp_ie; /* Advertisement Protocol IE */ + uint16 qresp_len; /* Query Response Length */ + uint8 qresp_frm[1]; /* Query Response Frame wifi_p2psd_qresp_frame_t */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2psd_gas_iresp_frame wifi_p2psd_gas_iresp_frame_t; + +/* GAS Comeback Response AF body, "elts" in wifi_p2p_pub_act_frame */ +BWL_PRE_PACKED_STRUCT struct wifi_p2psd_gas_cresp_frame { + uint16 status; /* Value defined in Table 7-23 of IEEE P802.11u */ + uint8 fragment_id; /* Fragmentation ID */ + uint16 cb_delay; /* GAS Comeback Delay */ + wifi_p2psd_adp_ie_t adp_ie; /* Advertisement Protocol IE */ + uint16 qresp_len; /* Query Response Length */ + uint8 qresp_frm[1]; /* Query Response Frame wifi_p2psd_qresp_frame_t */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2psd_gas_cresp_frame wifi_p2psd_gas_cresp_frame_t; + +/* Wi-Fi GAS Public Action Frame */ +BWL_PRE_PACKED_STRUCT struct wifi_p2psd_gas_pub_act_frame { + uint8 category; /* 0x04 Public Action Frame */ + uint8 action; /* 0x6c Advertisement Protocol */ + uint8 dialog_token; /* nonzero, identifies req/rsp transaction */ + uint8 query_data[1]; /* Query Data. wifi_p2psd_gas_ireq_frame_t + * or wifi_p2psd_gas_iresp_frame_t format + */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2psd_gas_pub_act_frame wifi_p2psd_gas_pub_act_frame_t; + +/* This marks the end of a packed structure section. */ +#include + +#endif /* _P2P_H_ */ diff --git a/bcmdhd.100.10.315.x/include/packed_section_end.h b/bcmdhd.100.10.315.x/include/packed_section_end.h new file mode 100644 index 0000000..886f967 --- /dev/null +++ b/bcmdhd.100.10.315.x/include/packed_section_end.h @@ -0,0 +1,59 @@ +/* + * Declare directives for structure packing. No padding will be provided + * between the members of packed structures, and therefore, there is no + * guarantee that structure members will be aligned. + * + * Declaring packed structures is compiler specific. In order to handle all + * cases, packed structures should be delared as: + * + * #include + * + * typedef BWL_PRE_PACKED_STRUCT struct foobar_t { + * some_struct_members; + * } BWL_POST_PACKED_STRUCT foobar_t; + * + * #include + * + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: packed_section_end.h 666738 2016-10-24 12:16:37Z $ + */ + +/* Error check - BWL_PACKED_SECTION is defined in packed_section_start.h + * and undefined in packed_section_end.h. If it is NOT defined at this + * point, then there is a missing include of packed_section_start.h. + */ +#ifdef BWL_PACKED_SECTION + #undef BWL_PACKED_SECTION +#else + #error "BWL_PACKED_SECTION is NOT defined!" +#endif // endif + +/* Compiler-specific directives for structure packing are declared in + * packed_section_start.h. This marks the end of the structure packing section, + * so, undef them here. + */ +#undef BWL_PRE_PACKED_STRUCT +#undef BWL_POST_PACKED_STRUCT diff --git a/bcmdhd.100.10.315.x/include/packed_section_start.h b/bcmdhd.100.10.315.x/include/packed_section_start.h new file mode 100644 index 0000000..955cf68 --- /dev/null +++ b/bcmdhd.100.10.315.x/include/packed_section_start.h @@ -0,0 +1,104 @@ +/* + * Declare directives for structure packing. No padding will be provided + * between the members of packed structures, and therefore, there is no + * guarantee that structure members will be aligned. + * + * Declaring packed structures is compiler specific. In order to handle all + * cases, packed structures should be delared as: + * + * #include + * + * typedef BWL_PRE_PACKED_STRUCT struct foobar_t { + * some_struct_members; + * } BWL_POST_PACKED_STRUCT foobar_t; + * + * #include + * + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: packed_section_start.h 666738 2016-10-24 12:16:37Z $ + */ + +#ifndef _alignment_test_ +#define _alignment_test_ + +/* ASSERT default packing */ +typedef struct T4 { + uint8 a; + uint32 b; + uint16 c; + uint8 d; +} T4_t; + +/* 4 byte alignment support */ +/* +* a . . . +* b b b b +* c c d . +*/ + +/* + * Below function is meant to verify that this file is compiled with the default alignment of 4. + * Function will fail to compile if the condition is not met. + */ +#ifdef __GNUC__ +#define VARIABLE_IS_NOT_USED __attribute__ ((unused)) +#else +#define VARIABLE_IS_NOT_USED +#endif // endif +static void alignment_test(void); +static void +VARIABLE_IS_NOT_USED alignment_test(void) +{ + /* verify 4 byte alignment support */ + STATIC_ASSERT(sizeof(T4_t) == 12); +} +#endif /* _alignment_test_ */ + +/* Error check - BWL_PACKED_SECTION is defined in packed_section_start.h + * and undefined in packed_section_end.h. If it is already defined at this + * point, then there is a missing include of packed_section_end.h. + */ +#ifdef BWL_PACKED_SECTION + #error "BWL_PACKED_SECTION is already defined!" +#else + #define BWL_PACKED_SECTION +#endif // endif + +#if defined(BWL_DEFAULT_PACKING) + /* generate an error if BWL_DEFAULT_PACKING is defined */ + #error "BWL_DEFAULT_PACKING not supported any more." +#endif /* BWL_PACKED_SECTION */ + +/* Declare compiler-specific directives for structure packing. */ +#if defined(__GNUC__) || defined(__lint) + #define BWL_PRE_PACKED_STRUCT + #define BWL_POST_PACKED_STRUCT __attribute__ ((packed)) +#elif defined(__CC_ARM) + #define BWL_PRE_PACKED_STRUCT __packed + #define BWL_POST_PACKED_STRUCT +#else + #error "Unknown compiler!" +#endif // endif diff --git a/bcmdhd.100.10.315.x/include/pcicfg.h b/bcmdhd.100.10.315.x/include/pcicfg.h new file mode 100644 index 0000000..3b4a736 --- /dev/null +++ b/bcmdhd.100.10.315.x/include/pcicfg.h @@ -0,0 +1,394 @@ +/* + * pcicfg.h: PCI configuration constants and structures. + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: pcicfg.h 756835 2018-04-10 21:24:10Z $ + */ + +#ifndef _h_pcicfg_ +#define _h_pcicfg_ + +/* pci config status reg has a bit to indicate that capability ptr is present */ + +#define PCI_CAPPTR_PRESENT 0x0010 + +/* A structure for the config registers is nice, but in most + * systems the config space is not memory mapped, so we need + * field offsetts. :-( + */ +#define PCI_CFG_VID 0 +#define PCI_CFG_DID 2 +#define PCI_CFG_CMD 4 +#define PCI_CFG_STAT 6 +#define PCI_CFG_REV 8 +#define PCI_CFG_PROGIF 9 +#define PCI_CFG_SUBCL 0xa +#define PCI_CFG_BASECL 0xb +#define PCI_CFG_CLSZ 0xc +#define PCI_CFG_LATTIM 0xd +#define PCI_CFG_HDR 0xe +#define PCI_CFG_BIST 0xf +#define PCI_CFG_BAR0 0x10 +/* +* TODO: PCI_CFG_BAR1 is wrongly defined to be 0x14 whereas it should be +* 0x18 as per the PCIe full dongle spec. Need to modify the values below +* correctly at a later point of time +*/ +#define PCI_CFG_BAR1 0x14 +#define PCI_CFG_BAR2 0x18 +#define PCI_CFG_BAR3 0x1c +#define PCI_CFG_BAR4 0x20 +#define PCI_CFG_BAR5 0x24 +#define PCI_CFG_CIS 0x28 +#define PCI_CFG_SVID 0x2c +#define PCI_CFG_SSID 0x2e +#define PCI_CFG_ROMBAR 0x30 +#define PCI_CFG_CAPPTR 0x34 +#define PCI_CFG_INT 0x3c +#define PCI_CFG_PIN 0x3d +#define PCI_CFG_MINGNT 0x3e +#define PCI_CFG_MAXLAT 0x3f +#define PCI_CFG_DEVCTRL 0xd8 +#define PCI_CFG_TLCNTRL_5 0x814 + +/* PCI CAPABILITY DEFINES */ +#define PCI_CAP_POWERMGMTCAP_ID 0x01 +#define PCI_CAP_MSICAP_ID 0x05 +#define PCI_CAP_VENDSPEC_ID 0x09 +#define PCI_CAP_PCIECAP_ID 0x10 + +/* Data structure to define the Message Signalled Interrupt facility + * Valid for PCI and PCIE configurations + */ +typedef struct _pciconfig_cap_msi { + uint8 capID; + uint8 nextptr; + uint16 msgctrl; + uint32 msgaddr; +} pciconfig_cap_msi; +#define MSI_ENABLE 0x1 /* bit 0 of msgctrl */ + +/* Data structure to define the Power managment facility + * Valid for PCI and PCIE configurations + */ +typedef struct _pciconfig_cap_pwrmgmt { + uint8 capID; + uint8 nextptr; + uint16 pme_cap; + uint16 pme_sts_ctrl; + uint8 pme_bridge_ext; + uint8 data; +} pciconfig_cap_pwrmgmt; + +#define PME_CAP_PM_STATES (0x1f << 27) /* Bits 31:27 states that can generate PME */ +#define PME_CSR_OFFSET 0x4 /* 4-bytes offset */ +#define PME_CSR_PME_EN (1 << 8) /* Bit 8 Enable generating of PME */ +#define PME_CSR_PME_STAT (1 << 15) /* Bit 15 PME got asserted */ + +/* Data structure to define the PCIE capability */ +typedef struct _pciconfig_cap_pcie { + uint8 capID; + uint8 nextptr; + uint16 pcie_cap; + uint32 dev_cap; + uint16 dev_ctrl; + uint16 dev_status; + uint32 link_cap; + uint16 link_ctrl; + uint16 link_status; + uint32 slot_cap; + uint16 slot_ctrl; + uint16 slot_status; + uint16 root_ctrl; + uint16 root_cap; + uint32 root_status; +} pciconfig_cap_pcie; + +/* PCIE Enhanced CAPABILITY DEFINES */ +#define PCIE_EXTCFG_OFFSET 0x100 +#define PCIE_ADVERRREP_CAPID 0x0001 +#define PCIE_VC_CAPID 0x0002 +#define PCIE_DEVSNUM_CAPID 0x0003 +#define PCIE_PWRBUDGET_CAPID 0x0004 + +/* PCIE Extended configuration */ +#define PCIE_ADV_CORR_ERR_MASK 0x114 +#define CORR_ERR_RE (1 << 0) /* Receiver */ +#define CORR_ERR_BT (1 << 6) /* Bad TLP */ +#define CORR_ERR_BD (1 << 7) /* Bad DLLP */ +#define CORR_ERR_RR (1 << 8) /* REPLAY_NUM rollover */ +#define CORR_ERR_RT (1 << 12) /* Reply timer timeout */ +#define ALL_CORR_ERRORS (CORR_ERR_RE | CORR_ERR_BT | CORR_ERR_BD | \ + CORR_ERR_RR | CORR_ERR_RT) + +/* PCIE Root Control Register bits (Host mode only) */ +#define PCIE_RC_CORR_SERR_EN 0x0001 +#define PCIE_RC_NONFATAL_SERR_EN 0x0002 +#define PCIE_RC_FATAL_SERR_EN 0x0004 +#define PCIE_RC_PME_INT_EN 0x0008 +#define PCIE_RC_CRS_EN 0x0010 + +/* PCIE Root Capability Register bits (Host mode only) */ +#define PCIE_RC_CRS_VISIBILITY 0x0001 + +/* PCIe PMCSR Register bits */ +#define PCIE_PMCSR_PMESTAT 0x8000 + +/* Header to define the PCIE specific capabilities in the extended config space */ +typedef struct _pcie_enhanced_caphdr { + uint16 capID; + uint16 cap_ver : 4; + uint16 next_ptr : 12; +} pcie_enhanced_caphdr; + +#define PCIE_CFG_PMCSR 0x4C +#define PCI_BAR0_WIN 0x80 /* backplane addres space accessed by BAR0 */ +#define PCI_BAR1_WIN 0x84 /* backplane addres space accessed by BAR1 */ +#define PCI_SPROM_CONTROL 0x88 /* sprom property control */ +#define PCIE_CFG_SUBSYSTEM_CONTROL 0x88 /* used as subsystem control in PCIE devices */ +#define PCI_BAR1_CONTROL 0x8c /* BAR1 region burst control */ +#define PCI_INT_STATUS 0x90 /* PCI and other cores interrupts */ +#define PCI_INT_MASK 0x94 /* mask of PCI and other cores interrupts */ +#define PCI_TO_SB_MB 0x98 /* signal backplane interrupts */ +#define PCI_BACKPLANE_ADDR 0xa0 /* address an arbitrary location on the system backplane */ +#define PCI_BACKPLANE_DATA 0xa4 /* data at the location specified by above address */ +#define PCI_CLK_CTL_ST 0xa8 /* pci config space clock control/status (>=rev14) */ +#define PCI_BAR0_WIN2 0xac /* backplane addres space accessed by second 4KB of BAR0 */ +#define PCI_GPIO_IN 0xb0 /* pci config space gpio input (>=rev3) */ +#define PCIE_CFG_DEVICE_CAPABILITY 0xb0 /* used as device capability in PCIE devices */ +#define PCI_GPIO_OUT 0xb4 /* pci config space gpio output (>=rev3) */ +#define PCIE_CFG_DEVICE_CONTROL 0xb4 /* 0xb4 is used as device control in PCIE devices */ +#define PCIE_DC_AER_CORR_EN (1u << 0u) +#define PCIE_DC_AER_NON_FATAL_EN (1u << 1u) +#define PCIE_DC_AER_FATAL_EN (1u << 2u) +#define PCIE_DC_AER_UNSUP_EN (1u << 3u) + +#define PCI_BAR0_WIN2_OFFSET 0x1000u +#define PCIE2_BAR0_CORE2_WIN2_OFFSET 0x5000u + +#define PCI_GPIO_OUTEN 0xb8 /* pci config space gpio output enable (>=rev3) */ +#define PCI_L1SS_CTRL2 0x24c /* The L1 PM Substates Control register */ + +/* Private Registers */ +#define PCI_STAT_CTRL 0xa80 +#define PCI_L0_EVENTCNT 0xa84 +#define PCI_L0_STATETMR 0xa88 +#define PCI_L1_EVENTCNT 0xa8c +#define PCI_L1_STATETMR 0xa90 +#define PCI_L1_1_EVENTCNT 0xa94 +#define PCI_L1_1_STATETMR 0xa98 +#define PCI_L1_2_EVENTCNT 0xa9c +#define PCI_L1_2_STATETMR 0xaa0 +#define PCI_L2_EVENTCNT 0xaa4 +#define PCI_L2_STATETMR 0xaa8 + +#define PCI_LINK_STATUS 0x4dc +#define PCI_LINK_SPEED_MASK (15u << 0u) +#define PCI_LINK_SPEED_SHIFT (0) +#define PCIE_LNK_SPEED_GEN1 0x1 +#define PCIE_LNK_SPEED_GEN2 0x2 +#define PCIE_LNK_SPEED_GEN3 0x3 + +#define PCI_PL_SPARE 0x1808 /* Config to Increase external clkreq deasserted minimum time */ +#define PCI_CONFIG_EXT_CLK_MIN_TIME_MASK (1u << 31u) +#define PCI_CONFIG_EXT_CLK_MIN_TIME_SHIFT (31) + +#define PCI_ADV_ERR_CAP 0x100 +#define PCI_UC_ERR_STATUS 0x104 +#define PCI_UNCORR_ERR_MASK 0x108 +#define PCI_UCORR_ERR_SEVR 0x10c +#define PCI_CORR_ERR_STATUS 0x110 +#define PCI_CORR_ERR_MASK 0x114 +#define PCI_ERR_CAP_CTRL 0x118 +#define PCI_TLP_HDR_LOG1 0x11c +#define PCI_TLP_HDR_LOG2 0x120 +#define PCI_TLP_HDR_LOG3 0x124 +#define PCI_TLP_HDR_LOG4 0x128 +#define PCI_TL_CTRL_5 0x814 +#define PCI_TL_HDR_FC_ST 0x980 +#define PCI_TL_TGT_CRDT_ST 0x990 +#define PCI_TL_SMLOGIC_ST 0x998 +#define PCI_DL_ATTN_VEC 0x1040 +#define PCI_DL_STATUS 0x1048 + +#define PCI_PHY_CTL_0 0x1800 +#define PCI_SLOW_PMCLK_EXT_RLOCK (1 << 7) + +#define PCI_LINK_STATE_DEBUG 0x1c24 +#define PCI_RECOVERY_HIST 0x1ce4 +#define PCI_PHY_LTSSM_HIST_0 0x1cec +#define PCI_PHY_LTSSM_HIST_1 0x1cf0 +#define PCI_PHY_LTSSM_HIST_2 0x1cf4 +#define PCI_PHY_LTSSM_HIST_3 0x1cf8 +#define PCI_PHY_DBG_CLKREG_0 0x1e10 +#define PCI_PHY_DBG_CLKREG_1 0x1e14 +#define PCI_PHY_DBG_CLKREG_2 0x1e18 +#define PCI_PHY_DBG_CLKREG_3 0x1e1c + +/* Bit settings for PCIE_CFG_SUBSYSTEM_CONTROL register */ +#define PCIE_SSRESET_STATUS_BIT 13 +#define PCIE_SSRESET_DISABLE_BIT 14 +#define PCIE_SSRESET_DIS_ENUM_RST_BIT 15 + +/* Bit settings for PCI_UC_ERR_STATUS register */ +#define PCI_UC_ERR_URES (1 << 20) /* Unsupported Request Error Status */ +#define PCI_UC_ERR_ECRCS (1 << 19) /* ECRC Error Status */ +#define PCI_UC_ERR_MTLPS (1 << 18) /* Malformed TLP Status */ +#define PCI_UC_ERR_ROS (1 << 17) /* Receiver Overflow Status */ +#define PCI_UC_ERR_UCS (1 << 16) /* Unexpected Completion Status */ +#define PCI_UC_ERR_CAS (1 << 15) /* Completer Abort Status */ +#define PCI_UC_ERR_CTS (1 << 14) /* Completer Timeout Status */ +#define PCI_UC_ERR_FCPES (1 << 13) /* Flow Control Protocol Error Status */ +#define PCI_UC_ERR_PTLPS (1 << 12) /* Poisoned TLP Status */ +#define PCI_UC_ERR_DLPES (1 << 4) /* Data Link Protocol Error Status */ + +#define PCI_DL_STATUS_PHY_LINKUP (1 << 13) /* Status of LINK */ + +#define PCI_PMCR_REFUP 0x1814 /* Trefup time */ +#define PCI_PMCR_TREFUP_LO_MASK 0x3f +#define PCI_PMCR_TREFUP_LO_SHIFT 24 +#define PCI_PMCR_TREFUP_LO_BITS 6 +#define PCI_PMCR_TREFUP_HI_MASK 0xf +#define PCI_PMCR_TREFUP_HI_SHIFT 5 +#define PCI_PMCR_TREFUP_HI_BITS 4 +#define PCI_PMCR_TREFUP_MAX 0x400 +#define PCI_PMCR_TREFUP_MAX_SCALE 0x2000 + +#define PCI_PMCR_REFUP_EXT 0x1818 /* Trefup extend Max */ +#define PCI_PMCR_TREFUP_EXT_SHIFT 22 +#define PCI_PMCR_TREFUP_EXT_SCALE 3 +#define PCI_PMCR_TREFUP_EXT_ON 1 +#define PCI_PMCR_TREFUP_EXT_OFF 0 + +#define PCI_TPOWER_SCALE_MASK 0x3 +#define PCI_TPOWER_SCALE_SHIFT 3 /* 0:1 is scale and 2 is rsvd */ + +#define PCI_BAR0_SHADOW_OFFSET (2 * 1024) /* bar0 + 2K accesses sprom shadow (in pci core) */ +#define PCI_BAR0_SPROM_OFFSET (4 * 1024) /* bar0 + 4K accesses external sprom */ +#define PCI_BAR0_PCIREGS_OFFSET (6 * 1024) /* bar0 + 6K accesses pci core registers */ +#define PCI_BAR0_PCISBR_OFFSET (4 * 1024) /* pci core SB registers are at the end of the + * 8KB window, so their address is the "regular" + * address plus 4K + */ +/* + * PCIE GEN2 changed some of the above locations for + * Bar0WrapperBase, SecondaryBAR0Window and SecondaryBAR0WrapperBase + * BAR0 maps 32K of register space +*/ +#define PCIE2_BAR0_WIN2 0x70 /* backplane addres space accessed by second 4KB of BAR0 */ +#define PCIE2_BAR0_CORE2_WIN 0x74 /* backplane addres space accessed by second 4KB of BAR0 */ +#define PCIE2_BAR0_CORE2_WIN2 0x78 /* backplane addres space accessed by second 4KB of BAR0 */ +#define PCIE2_BAR0_WINSZ 0x8000 + +#define PCI_BAR0_WIN2_OFFSET 0x1000u +#define PCI_CORE_ENUM_OFFSET 0x2000u +#define PCI_CC_CORE_ENUM_OFFSET 0x3000u +#define PCI_SEC_BAR0_WIN_OFFSET 0x4000u +#define PCI_SEC_BAR0_WRAP_OFFSET 0x5000u +#define PCI_CORE_ENUM2_OFFSET 0x6000u +#define PCI_CC_CORE_ENUM2_OFFSET 0x7000u +#define PCI_LAST_OFFSET 0x8000u + +#define PCI_BAR0_WINSZ (16 * 1024) /* bar0 window size Match with corerev 13 */ +/* On pci corerev >= 13 and all pcie, the bar0 is now 16KB and it maps: */ +#define PCI_16KB0_PCIREGS_OFFSET (8 * 1024) /* bar0 + 8K accesses pci/pcie core registers */ +#define PCI_16KB0_CCREGS_OFFSET (12 * 1024) /* bar0 + 12K accesses chipc core registers */ +#define PCI_16KBB0_WINSZ (16 * 1024) /* bar0 window size */ +#define PCI_SECOND_BAR0_OFFSET (16 * 1024) /* secondary bar 0 window */ + +/* On AI chips we have a second window to map DMP regs are mapped: */ +#define PCI_16KB0_WIN2_OFFSET (4 * 1024) /* bar0 + 4K is "Window 2" */ + +/* PCI_INT_STATUS */ +#define PCI_SBIM_STATUS_SERR 0x4 /* backplane SBErr interrupt status */ + +/* PCI_INT_MASK */ +#define PCI_SBIM_SHIFT 8 /* backplane core interrupt mask bits offset */ +#define PCI_SBIM_MASK 0xff00 /* backplane core interrupt mask */ +#define PCI_SBIM_MASK_SERR 0x4 /* backplane SBErr interrupt mask */ +#define PCI_CTO_INT_SHIFT 16 /* backplane SBErr interrupt mask */ +#define PCI_CTO_INT_MASK (1 << PCI_CTO_INT_SHIFT) /* backplane SBErr interrupt mask */ + +/* PCI_SPROM_CONTROL */ +#define SPROM_SZ_MSK 0x02 /* SPROM Size Mask */ +#define SPROM_LOCKED 0x08 /* SPROM Locked */ +#define SPROM_BLANK 0x04 /* indicating a blank SPROM */ +#define SPROM_WRITEEN 0x10 /* SPROM write enable */ +#define SPROM_BOOTROM_WE 0x20 /* external bootrom write enable */ +#define SPROM_BACKPLANE_EN 0x40 /* Enable indirect backplane access */ +#define SPROM_OTPIN_USE 0x80 /* device OTP In use */ +#define SPROM_CFG_TO_SB_RST 0x400 /* backplane reset */ + +/* Bits in PCI command and status regs */ +#define PCI_CMD_IO 0x00000001 /* I/O enable */ +#define PCI_CMD_MEMORY 0x00000002 /* Memory enable */ +#define PCI_CMD_MASTER 0x00000004 /* Master enable */ +#define PCI_CMD_SPECIAL 0x00000008 /* Special cycles enable */ +#define PCI_CMD_INVALIDATE 0x00000010 /* Invalidate? */ +#define PCI_CMD_VGA_PAL 0x00000040 /* VGA Palate */ +#define PCI_STAT_TA 0x08000000 /* target abort status */ + +/* Header types */ +#define PCI_HEADER_MULTI 0x80 +#define PCI_HEADER_MASK 0x7f +typedef enum { + PCI_HEADER_NORMAL, + PCI_HEADER_BRIDGE, + PCI_HEADER_CARDBUS +} pci_header_types; + +#define PCI_CONFIG_SPACE_SIZE 256 + +#define DWORD_ALIGN(x) (x & ~(0x03)) +#define BYTE_POS(x) (x & 0x3) +#define WORD_POS(x) (x & 0x1) + +#define BYTE_SHIFT(x) (8 * BYTE_POS(x)) +#define WORD_SHIFT(x) (16 * WORD_POS(x)) + +#define BYTE_VAL(a, x) ((a >> BYTE_SHIFT(x)) & 0xFF) +#define WORD_VAL(a, x) ((a >> WORD_SHIFT(x)) & 0xFFFF) + +#define read_pci_cfg_byte(a) \ + (BYTE_VAL(OSL_PCI_READ_CONFIG(osh, DWORD_ALIGN(a), 4), a) & 0xff) + +#define read_pci_cfg_word(a) \ + (WORD_VAL(OSL_PCI_READ_CONFIG(osh, DWORD_ALIGN(a), 4), a) & 0xffff) + +#define write_pci_cfg_byte(a, val) do { \ + uint32 tmpval; \ + tmpval = (OSL_PCI_READ_CONFIG(osh, DWORD_ALIGN(a), 4) & ~0xFF << BYTE_POS(a)) | \ + val << BYTE_POS(a); \ + OSL_PCI_WRITE_CONFIG(osh, DWORD_ALIGN(a), 4, tmpval); \ + } while (0) + +#define write_pci_cfg_word(a, val) do { \ + uint32 tmpval; \ + tmpval = (OSL_PCI_READ_CONFIG(osh, DWORD_ALIGN(a), 4) & ~0xFFFF << WORD_POS(a)) | \ + val << WORD_POS(a); \ + OSL_PCI_WRITE_CONFIG(osh, DWORD_ALIGN(a), 4, tmpval); \ + } while (0) + +#endif /* _h_pcicfg_ */ diff --git a/bcmdhd.100.10.315.x/include/pcie_core.h b/bcmdhd.100.10.315.x/include/pcie_core.h new file mode 100644 index 0000000..ef1fb02 --- /dev/null +++ b/bcmdhd.100.10.315.x/include/pcie_core.h @@ -0,0 +1,1156 @@ +/* + * BCM43XX PCIE core hardware definitions. + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: pcie_core.h 770722 2018-07-04 11:24:12Z $ + */ +#ifndef _PCIE_CORE_H +#define _PCIE_CORE_H + +#include +#include + +#define REV_GE_64(rev) (rev >= 64) + +/* cpp contortions to concatenate w/arg prescan */ +#ifndef PAD +#define _PADLINE(line) pad ## line +#define _XSTR(line) _PADLINE(line) +#define PAD _XSTR(__LINE__) +#endif // endif + +/* PCIE Enumeration space offsets */ +#define PCIE_CORE_CONFIG_OFFSET 0x0 +#define PCIE_FUNC0_CONFIG_OFFSET 0x400 +#define PCIE_FUNC1_CONFIG_OFFSET 0x500 +#define PCIE_FUNC2_CONFIG_OFFSET 0x600 +#define PCIE_FUNC3_CONFIG_OFFSET 0x700 +#define PCIE_SPROM_SHADOW_OFFSET 0x800 +#define PCIE_SBCONFIG_OFFSET 0xE00 + +#define PCIEDEV_MAX_DMAS 4 + +/* PCIE Bar0 Address Mapping. Each function maps 16KB config space */ +#define PCIE_DEV_BAR0_SIZE 0x4000 +#define PCIE_BAR0_WINMAPCORE_OFFSET 0x0 +#define PCIE_BAR0_EXTSPROM_OFFSET 0x1000 +#define PCIE_BAR0_PCIECORE_OFFSET 0x2000 +#define PCIE_BAR0_CCCOREREG_OFFSET 0x3000 + +/* different register spaces to access thr'u pcie indirect access */ +#define PCIE_CONFIGREGS 1 /* Access to config space */ +#define PCIE_PCIEREGS 2 /* Access to pcie registers */ + +#define PCIEDEV_HOSTADDR_MAP_BASE 0x8000000 +#define PCIEDEV_HOSTADDR_MAP_WIN_MASK 0xFC000000 + +/* dma regs to control the flow between host2dev and dev2host */ +typedef volatile struct pcie_devdmaregs { + dma64regs_t tx; + uint32 PAD[2]; + dma64regs_t rx; + uint32 PAD[2]; +} pcie_devdmaregs_t; + +#define PCIE_DB_HOST2DEV_0 0x1 +#define PCIE_DB_HOST2DEV_1 0x2 +#define PCIE_DB_DEV2HOST_0 0x3 +#define PCIE_DB_DEV2HOST_1 0x4 + +/* door bell register sets */ +typedef struct pcie_doorbell { + uint32 host2dev_0; + uint32 host2dev_1; + uint32 dev2host_0; + uint32 dev2host_1; +} pcie_doorbell_t; + +/* Flow Ring Manager */ +#define IFRM_FR_IDX_MAX 256 +#define IFRM_FR_GID_MAX 4 +#define IFRM_FR_DEV_MAX 8 +#define IFRM_FR_TID_MAX 8 +#define IFRM_FR_DEV_VALID 2 + +#define IFRM_VEC_REG_BITS 32 + +#define IFRM_FR_PER_VECREG 4 +#define IFRM_FR_PER_VECREG_SHIFT 2 +#define IFRM_FR_PER_VECREG_MASK ((0x1 << IFRM_FR_PER_VECREG_SHIFT) - 1) + +#define IFRM_VEC_BITS_PER_FR (IFRM_VEC_REG_BITS/IFRM_FR_PER_VECREG) + +/* IFRM_DEV_0 : d11AC, IFRM_DEV_1 : d11AD */ +#define IFRM_DEV_0 0 +#define IFRM_DEV_1 1 + +#define IFRM_FR_GID_0 0 +#define IFRM_FR_GID_1 1 +#define IFRM_FR_GID_2 2 +#define IFRM_FR_GID_3 3 + +#define IFRM_TIDMASK 0xffffffff + +/* ifrm_ctrlst register */ +#define IFRM_EN (1<<0) +#define IFRM_BUFF_INIT_DONE (1<<1) +#define IFRM_COMPARE_EN0 (1<<4) +#define IFRM_COMPARE_EN1 (1<<5) +#define IFRM_COMPARE_EN2 (1<<6) +#define IFRM_COMPARE_EN3 (1<<7) +#define IFRM_INIT_DV0 (1<<8) +#define IFRM_INIT_DV1 (1<<9) +#define IFRM_INIT_DV2 (1<<10) +#define IFRM_INIT_DV3 (1<<11) + +/* ifrm_msk_arr.addr, ifrm_tid_arr.addr register */ +#define IFRM_ADDR_SHIFT 0 +#define IFRM_FRG_ID_SHIFT 8 + +/* ifrm_vec.diff_lat register */ +#define IFRM_DV_LAT (1<<0) +#define IFRM_DV_LAT_DONE (1<<1) +#define IFRM_SDV_OFFSET_SHIFT 4 +#define IFRM_SDV_FRGID_SHIFT 8 +#define IFRM_VECSTAT_MASK 0x3 +#define IFRM_VEC_MASK 0xff + +/* HMAP Windows */ +#define HMAP_MAX_WINDOWS 8 + +/* idma frm array */ +typedef struct pcie_ifrm_array { + uint32 addr; + uint32 data; +} pcie_ifrm_array_t; + +/* idma frm vector */ +typedef struct pcie_ifrm_vector { + uint32 diff_lat; + uint32 sav_tid; + uint32 sav_diff; + uint32 PAD[1]; +} pcie_ifrm_vector_t; + +/* idma frm interrupt */ +typedef struct pcie_ifrm_intr { + uint32 intstat; + uint32 intmask; +} pcie_ifrm_intr_t; + +/* HMAP window register set */ +typedef volatile struct pcie_hmapwindow { + uint32 baseaddr_lo; /* BaseAddrLower */ + uint32 baseaddr_hi; /* BaseAddrUpper */ + uint32 windowlength; /* Window Length */ + uint32 PAD[1]; +} pcie_hmapwindow_t; + +typedef volatile struct pcie_hmapviolation { + uint32 hmap_violationaddr_lo; /* violating address lo */ + uint32 hmap_violationaddr_hi; /* violating addr hi */ + uint32 hmap_violation_info; /* violation info */ + uint32 PAD[1]; +} pcie_hmapviolation_t; +/* SB side: PCIE core and host control registers */ +typedef volatile struct sbpcieregs { + uint32 control; /* host mode only */ + uint32 iocstatus; /* PCIE2: iostatus */ + uint32 PAD[1]; + uint32 biststatus; /* bist Status: 0x00C */ + uint32 gpiosel; /* PCIE gpio sel: 0x010 */ + uint32 gpioouten; /* PCIE gpio outen: 0x14 */ + uint32 gpioout; /* PCIE gpio out: 0x18 */ + uint32 PAD; + uint32 intstatus; /* Interrupt status: 0x20 */ + uint32 intmask; /* Interrupt mask: 0x24 */ + uint32 sbtopcimailbox; /* sb to pcie mailbox: 0x028 */ + uint32 obffcontrol; /* PCIE2: 0x2C */ + uint32 obffintstatus; /* PCIE2: 0x30 */ + uint32 obffdatastatus; /* PCIE2: 0x34 */ + uint32 PAD[1]; + uint32 ctoctrl; /* PCIE2: 0x3C */ + uint32 errlog; /* PCIE2: 0x40 */ + uint32 errlogaddr; /* PCIE2: 0x44 */ + uint32 mailboxint; /* PCIE2: 0x48 */ + uint32 mailboxintmsk; /* PCIE2: 0x4c */ + uint32 ltrspacing; /* PCIE2: 0x50 */ + uint32 ltrhysteresiscnt; /* PCIE2: 0x54 */ + uint32 msivectorassign; /* PCIE2: 0x58 */ + uint32 intmask2; /* PCIE2: 0x5C */ + uint32 PAD[40]; + uint32 sbtopcie0; /* sb to pcie translation 0: 0x100 */ + uint32 sbtopcie1; /* sb to pcie translation 1: 0x104 */ + uint32 sbtopcie2; /* sb to pcie translation 2: 0x108 */ + uint32 sbtopcie0upper; /* sb to pcie translation 0: 0x10C */ + uint32 sbtopcie1upper; /* sb to pcie translation 1: 0x110 */ + uint32 PAD[3]; + + /* pcie core supports in direct access to config space */ + uint32 configaddr; /* pcie config space access: Address field: 0x120 */ + uint32 configdata; /* pcie config space access: Data field: 0x124 */ + union { + struct { + /* mdio access to serdes */ + uint32 mdiocontrol; /* controls the mdio access: 0x128 */ + uint32 mdiodata; /* Data to the mdio access: 0x12c */ + /* pcie protocol phy/dllp/tlp register indirect access mechanism */ + uint32 pcieindaddr; /* indirect access to the internal register: 0x130 */ + uint32 pcieinddata; /* Data to/from the internal regsiter: 0x134 */ + uint32 clkreqenctrl; /* >= rev 6, Clkreq rdma control : 0x138 */ + uint32 PAD[177]; + /* 0x400 - 0x7FF, PCIE Cfg Space, note: not used anymore in PcieGen2 */ + uint32 pciecfg[4][64]; + } pcie1; + struct { + /* mdio access to serdes */ + uint32 mdiocontrol; /* controls the mdio access: 0x128 */ + uint32 mdiowrdata; /* write data to mdio 0x12C */ + uint32 mdiorddata; /* read data to mdio 0x130 */ + uint32 PAD[3]; /* 0x134-0x138-0x13c */ + /* door bell registers available from gen2 rev5 onwards */ + pcie_doorbell_t dbls[PCIEDEV_MAX_DMAS]; /* 0x140 - 0x17F */ + uint32 dataintf; /* 0x180 */ + uint32 PAD[1]; /* 0x184 */ + uint32 d2h_intrlazy_0; /* 0x188 */ + uint32 h2d_intrlazy_0; /* 0x18c */ + uint32 h2d_intstat_0; /* 0x190 */ + uint32 h2d_intmask_0; /* 0x194 */ + uint32 d2h_intstat_0; /* 0x198 */ + uint32 d2h_intmask_0; /* 0x19c */ + uint32 ltr_state; /* 0x1A0 */ + uint32 pwr_int_status; /* 0x1A4 */ + uint32 pwr_int_mask; /* 0x1A8 */ + uint32 pme_source; /* 0x1AC */ + uint32 err_hdr_logreg1; /* 0x1B0 */ + uint32 err_hdr_logreg2; /* 0x1B4 */ + uint32 err_hdr_logreg3; /* 0x1B8 */ + uint32 err_hdr_logreg4; /* 0x1BC */ + uint32 err_code_logreg; /* 0x1C0 */ + uint32 axi_dbg_ctl; /* 0x1C4 */ + uint32 axi_dbg_data0; /* 0x1C8 */ + uint32 axi_dbg_data1; /* 0x1CC */ + uint32 PAD[4]; /* 0x1D0 - 0x1DF */ + uint32 clk_ctl_st; /* 0x1E0 */ + uint32 PAD[1]; /* 0x1E4 */ + uint32 powerctl; /* 0x1E8 */ + uint32 PAD[5]; /* 0x1EC - 0x1FF */ + pcie_devdmaregs_t h2d0_dmaregs; /* 0x200 - 0x23c */ + pcie_devdmaregs_t d2h0_dmaregs; /* 0x240 - 0x27c */ + pcie_devdmaregs_t h2d1_dmaregs; /* 0x280 - 0x2bc */ + pcie_devdmaregs_t d2h1_dmaregs; /* 0x2c0 - 0x2fc */ + pcie_devdmaregs_t h2d2_dmaregs; /* 0x300 - 0x33c */ + pcie_devdmaregs_t d2h2_dmaregs; /* 0x340 - 0x37c */ + pcie_devdmaregs_t h2d3_dmaregs; /* 0x380 - 0x3bc */ + pcie_devdmaregs_t d2h3_dmaregs; /* 0x3c0 - 0x3fc */ + uint32 d2h_intrlazy_1; /* 0x400 */ + uint32 h2d_intrlazy_1; /* 0x404 */ + uint32 h2d_intstat_1; /* 0x408 */ + uint32 h2d_intmask_1; /* 0x40c */ + uint32 d2h_intstat_1; /* 0x410 */ + uint32 d2h_intmask_1; /* 0x414 */ + uint32 PAD[2]; /* 0x418 - 0x41C */ + uint32 d2h_intrlazy_2; /* 0x420 */ + uint32 h2d_intrlazy_2; /* 0x424 */ + uint32 h2d_intstat_2; /* 0x428 */ + uint32 h2d_intmask_2; /* 0x42c */ + uint32 d2h_intstat_2; /* 0x430 */ + uint32 d2h_intmask_2; /* 0x434 */ + uint32 PAD[10]; /* 0x438 - 0x45F */ + uint32 ifrm_ctrlst; /* 0x460 */ + uint32 PAD[1]; /* 0x464 */ + pcie_ifrm_array_t ifrm_msk_arr; /* 0x468 - 0x46F */ + pcie_ifrm_array_t ifrm_tid_arr[IFRM_FR_DEV_VALID]; + /* 0x470 - 0x47F */ + pcie_ifrm_vector_t ifrm_vec[IFRM_FR_DEV_MAX]; + /* 0x480 - 0x4FF */ + pcie_ifrm_intr_t ifrm_intr[IFRM_FR_DEV_MAX]; + /* 0x500 - 0x53F */ + /* HMAP regs for PCIE corerev >= 24 [0x540 - 0x5DF] */ + pcie_hmapwindow_t hmapwindow[HMAP_MAX_WINDOWS]; /* 0x540 - 0x5BF */ + pcie_hmapviolation_t hmapviolation; /* 0x5C0 - 0x5CF */ + uint32 hmap_window_config; /* 0x5D0 */ + uint32 PAD[3]; /* 0x5D4 - 0x5DF */ + + uint32 PAD[8]; /* 0x5E0 - 0x5FF */ + uint32 PAD[2][64]; /* 0x600 - 0x7FF */ + } pcie2; + } u; + uint16 sprom[64]; /* SPROM shadow Area : 0x800 - 0x880 */ + uint32 PAD[96]; /* 0x880 - 0x9FF */ + /* direct memory access (pcie2 rev19 and after) : 0xA00 - 0xAFF */ + union { + /* corerev < 64 */ + struct { + uint32 dar_ctrl; /* 0xA00 */ + uint32 PAD[7]; /* 0xA04-0xA1F */ + uint32 intstatus; /* 0xA20 */ + uint32 PAD[1]; /* 0xA24 */ + uint32 h2d_db_0_0; /* 0xA28 */ + uint32 h2d_db_0_1; /* 0xA2C */ + uint32 h2d_db_1_0; /* 0xA30 */ + uint32 h2d_db_1_1; /* 0xA34 */ + uint32 h2d_db_2_0; /* 0xA38 */ + uint32 h2d_db_2_1; /* 0xA3C */ + uint32 errlog; /* 0xA40 */ + uint32 erraddr; /* 0xA44 */ + uint32 mbox_int; /* 0xA48 */ + uint32 fis_ctrl; /* 0xA4C */ + uint32 PAD[36]; /* 0xA50 - 0xADC */ + uint32 clk_ctl_st; /* 0xAE0 */ + uint32 PAD[1]; /* 0xAE4 */ + uint32 powerctl; /* 0xAE8 */ + } dar; + /* corerev > = 64 */ + struct { + uint32 dar_ctrl; /* 0xA00 */ + uint32 dar_cap; /* 0xA04 */ + uint32 clk_ctl_st; /* 0xA08 */ + uint32 powerctl; /* 0xA0C */ + uint32 intstatus; /* 0xA10 */ + uint32 PAD[3]; /* 0xA14-0xA1F */ + uint32 h2d_db_0_0; /* 0xA20 */ + uint32 h2d_db_0_1; /* 0xA24 */ + uint32 h2d_db_1_0; /* 0xA28 */ + uint32 h2d_db_1_1; /* 0xA2C */ + uint32 h2d_db_2_0; /* 0xA30 */ + uint32 h2d_db_2_1; /* 0xA34 */ + uint32 h2d_db_3_0; /* 0xA38 */ + uint32 h2d_db_3_1; /* 0xA3C */ + uint32 h2d_db_4_0; /* 0xA40 */ + uint32 h2d_db_4_1; /* 0xA44 */ + uint32 h2d_db_5_0; /* 0xA48 */ + uint32 h2d_db_5_1; /* 0xA4C */ + uint32 h2d_db_6_0; /* 0xA50 */ + uint32 h2d_db_6_1; /* 0xA54 */ + uint32 h2d_db_7_0; /* 0xA58 */ + uint32 h2d_db_7_1; /* 0xA5C */ + uint32 errlog; /* 0xA60 */ + uint32 erraddr; /* 0xA64 */ + uint32 mbox_int; /* 0xA68 */ + uint32 fis_ctrl; /* 0xA6C */ + } dar_64; + } u1; +} sbpcieregs_t; + +#define PCIE_CFG_DA_OFFSET 0x400 /* direct access register offset for configuration space */ + +/* PCI control */ +#define PCIE_RST_OE 0x01 /* When set, drives PCI_RESET out to pin */ +#define PCIE_RST 0x02 /* Value driven out to pin */ +#define PCIE_SPERST 0x04 /* SurvivePeRst */ +#define PCIE_FORCECFGCLKON_ALP 0x08 +#define PCIE_DISABLE_L1CLK_GATING 0x10 +#define PCIE_DLYPERST 0x100 /* Delay PeRst to CoE Core */ +#define PCIE_DISSPROMLD 0x200 /* DisableSpromLoadOnPerst */ +#define PCIE_WakeModeL2 0x1000 /* Wake on L2 */ +#define PCIE_MULTIMSI_EN 0x2000 /* enable multi-vector MSI messages */ +#define PCIE_PipeIddqDisable0 0x8000 /* Disable assertion of pcie_pipe_iddq during L1.2 and L2 */ +#define PCIE_PipeIddqDisable1 0x10000 /* Disable assertion of pcie_pipe_iddq during L2 */ +#define PCIE_EN_MDIO_IN_PERST 0x20000 /* enable access to internal registers when PERST */ +#define PCIE_MSI_B2B_EN 0x100000 /* enable back-to-back MSI messages */ +#define PCIE_MSI_FIFO_CLEAR 0x200000 /* reset MSI FIFO */ +#define PCIE_IDMA_MODE_EN(rev) (REV_GE_64(rev) ? 0x1 : 0x800000) /* implicit M2M DMA mode */ +#define PCIE_TL_CLK_DETCT 0x4000000 /* enable TL clk detection */ + +#define PCIE_CFGADDR 0x120 /* offsetof(configaddr) */ +#define PCIE_CFGDATA 0x124 /* offsetof(configdata) */ +#define PCIE_SWPME_FN0 0x10000 +#define PCIE_SWPME_FN0_SHF 16 + +/* Interrupt status/mask */ +#define PCIE_INTA 0x01 /* PCIE INTA message is received */ +#define PCIE_INTB 0x02 /* PCIE INTB message is received */ +#define PCIE_INTFATAL 0x04 /* PCIE INTFATAL message is received */ +#define PCIE_INTNFATAL 0x08 /* PCIE INTNONFATAL message is received */ +#define PCIE_INTCORR 0x10 /* PCIE INTCORR message is received */ +#define PCIE_INTPME 0x20 /* PCIE INTPME message is received */ +#define PCIE_PERST 0x40 /* PCIE Reset Interrupt */ + +#define PCIE_INT_MB_FN0_0 0x0100 /* PCIE to SB Mailbox int Fn0.0 is received */ +#define PCIE_INT_MB_FN0_1 0x0200 /* PCIE to SB Mailbox int Fn0.1 is received */ +#define PCIE_INT_MB_FN1_0 0x0400 /* PCIE to SB Mailbox int Fn1.0 is received */ +#define PCIE_INT_MB_FN1_1 0x0800 /* PCIE to SB Mailbox int Fn1.1 is received */ +#define PCIE_INT_MB_FN2_0 0x1000 /* PCIE to SB Mailbox int Fn2.0 is received */ +#define PCIE_INT_MB_FN2_1 0x2000 /* PCIE to SB Mailbox int Fn2.1 is received */ +#define PCIE_INT_MB_FN3_0 0x4000 /* PCIE to SB Mailbox int Fn3.0 is received */ +#define PCIE_INT_MB_FN3_1 0x8000 /* PCIE to SB Mailbox int Fn3.1 is received */ + +/* PCIE MSI Vector Assignment register */ +#define MSIVEC_MB_0 (0x1 << 1) /* MSI Vector offset for mailbox0 is 2 */ +#define MSIVEC_MB_1 (0x1 << 2) /* MSI Vector offset for mailbox1 is 3 */ +#define MSIVEC_D2H0_DB0 (0x1 << 3) /* MSI Vector offset for interface0 door bell 0 is 4 */ +#define MSIVEC_D2H0_DB1 (0x1 << 4) /* MSI Vector offset for interface0 door bell 1 is 5 */ + +/* PCIE MailboxInt/MailboxIntMask register */ +#define PCIE_MB_TOSB_FN0_0 0x0001 /* write to assert PCIEtoSB Mailbox interrupt */ +#define PCIE_MB_TOSB_FN0_1 0x0002 +#define PCIE_MB_TOSB_FN1_0 0x0004 +#define PCIE_MB_TOSB_FN1_1 0x0008 +#define PCIE_MB_TOSB_FN2_0 0x0010 +#define PCIE_MB_TOSB_FN2_1 0x0020 +#define PCIE_MB_TOSB_FN3_0 0x0040 +#define PCIE_MB_TOSB_FN3_1 0x0080 +#define PCIE_MB_TOPCIE_FN0_0 0x0100 /* int status/mask for SBtoPCIE Mailbox interrupts */ +#define PCIE_MB_TOPCIE_FN0_1 0x0200 +#define PCIE_MB_TOPCIE_FN1_0 0x0400 +#define PCIE_MB_TOPCIE_FN1_1 0x0800 +#define PCIE_MB_TOPCIE_FN2_0 0x1000 +#define PCIE_MB_TOPCIE_FN2_1 0x2000 +#define PCIE_MB_TOPCIE_FN3_0 0x4000 +#define PCIE_MB_TOPCIE_FN3_1 0x8000 + +#define PCIE_MB_TOPCIE_DB0_D2H0(rev) (REV_GE_64(rev) ? 0x0001 : 0x010000) +#define PCIE_MB_TOPCIE_DB0_D2H1(rev) (REV_GE_64(rev) ? 0x0002 : 0x020000) +#define PCIE_MB_TOPCIE_DB1_D2H0(rev) (REV_GE_64(rev) ? 0x0004 : 0x040000) +#define PCIE_MB_TOPCIE_DB1_D2H1(rev) (REV_GE_64(rev) ? 0x0008 : 0x080000) +#define PCIE_MB_TOPCIE_DB2_D2H0(rev) (REV_GE_64(rev) ? 0x0010 : 0x100000) +#define PCIE_MB_TOPCIE_DB2_D2H1(rev) (REV_GE_64(rev) ? 0x0020 : 0x200000) +#define PCIE_MB_TOPCIE_DB3_D2H0(rev) (REV_GE_64(rev) ? 0x0040 : 0x400000) +#define PCIE_MB_TOPCIE_DB3_D2H1(rev) (REV_GE_64(rev) ? 0x0080 : 0x800000) +#define PCIE_MB_TOPCIE_DB4_D2H0(rev) (REV_GE_64(rev) ? 0x0100 : 0x0) +#define PCIE_MB_TOPCIE_DB4_D2H1(rev) (REV_GE_64(rev) ? 0x0200 : 0x0) +#define PCIE_MB_TOPCIE_DB5_D2H0(rev) (REV_GE_64(rev) ? 0x0400 : 0x0) +#define PCIE_MB_TOPCIE_DB5_D2H1(rev) (REV_GE_64(rev) ? 0x0800 : 0x0) +#define PCIE_MB_TOPCIE_DB6_D2H0(rev) (REV_GE_64(rev) ? 0x1000 : 0x0) +#define PCIE_MB_TOPCIE_DB6_D2H1(rev) (REV_GE_64(rev) ? 0x2000 : 0x0) +#define PCIE_MB_TOPCIE_DB7_D2H0(rev) (REV_GE_64(rev) ? 0x4000 : 0x0) +#define PCIE_MB_TOPCIE_DB7_D2H1(rev) (REV_GE_64(rev) ? 0x8000 : 0x0) + +#define PCIE_MB_D2H_MB_MASK(rev) \ + (PCIE_MB_TOPCIE_DB0_D2H0(rev) | PCIE_MB_TOPCIE_DB0_D2H1(rev) | \ + PCIE_MB_TOPCIE_DB1_D2H0(rev) | PCIE_MB_TOPCIE_DB1_D2H1(rev) | \ + PCIE_MB_TOPCIE_DB2_D2H0(rev) | PCIE_MB_TOPCIE_DB2_D2H1(rev) | \ + PCIE_MB_TOPCIE_DB3_D2H0(rev) | PCIE_MB_TOPCIE_DB3_D2H1(rev) | \ + PCIE_MB_TOPCIE_DB4_D2H0(rev) | PCIE_MB_TOPCIE_DB4_D2H1(rev) | \ + PCIE_MB_TOPCIE_DB5_D2H0(rev) | PCIE_MB_TOPCIE_DB5_D2H1(rev) | \ + PCIE_MB_TOPCIE_DB6_D2H0(rev) | PCIE_MB_TOPCIE_DB6_D2H1(rev) | \ + PCIE_MB_TOPCIE_DB7_D2H0(rev) | PCIE_MB_TOPCIE_DB7_D2H1(rev)) + +#define SBTOPCIE0_BASE 0x08000000 +#define SBTOPCIE1_BASE 0x0c000000 + +/* On chips with CCI-400, the small pcie 128 MB region base has shifted */ +#define CCI400_SBTOPCIE0_BASE 0x20000000 +#define CCI400_SBTOPCIE1_BASE 0x24000000 + +/* SB to PCIE translation masks */ +#define SBTOPCIE0_MASK 0xfc000000 +#define SBTOPCIE1_MASK 0xfc000000 +#define SBTOPCIE2_MASK 0xc0000000 + +/* Access type bits (0:1) */ +#define SBTOPCIE_MEM 0 +#define SBTOPCIE_IO 1 +#define SBTOPCIE_CFG0 2 +#define SBTOPCIE_CFG1 3 + +/* Prefetch enable bit 2 */ +#define SBTOPCIE_PF 4 + +/* Write Burst enable for memory write bit 3 */ +#define SBTOPCIE_WR_BURST 8 + +/* config access */ +#define CONFIGADDR_FUNC_MASK 0x7000 +#define CONFIGADDR_FUNC_SHF 12 +#define CONFIGADDR_REG_MASK 0x0FFF +#define CONFIGADDR_REG_SHF 0 + +#define PCIE_CONFIG_INDADDR(f, r) ((((f) & CONFIGADDR_FUNC_MASK) << CONFIGADDR_FUNC_SHF) | \ + (((r) & CONFIGADDR_REG_MASK) << CONFIGADDR_REG_SHF)) + +/* PCIE protocol regs Indirect Address */ +#define PCIEADDR_PROT_MASK 0x300 +#define PCIEADDR_PROT_SHF 8 +#define PCIEADDR_PL_TLP 0 +#define PCIEADDR_PL_DLLP 1 +#define PCIEADDR_PL_PLP 2 + +#define PCIE_CORE_REG_CONTROL 0x00u /* Control */ +#define PCIE_CORE_REG_IOSTATUS 0x04u /* IO status */ +#define PCIE_CORE_REG_BITSTATUS 0x0Cu /* bitstatus */ +#define PCIE_CORE_REG_GPIO_SEL 0x10u /* gpio sel */ +#define PCIE_CORE_REG_GPIO_OUT_EN 0x14u /* gpio out en */ +#define PCIE_CORE_REG_INT_STATUS 0x20u /* int status */ +#define PCIE_CORE_REG_INT_MASK 0x24u /* int mask */ +#define PCIE_CORE_REG_SB_PCIE_MB 0x28u /* sbpcie mb */ +#define PCIE_CORE_REG_ERRLOG 0x40u /* errlog */ +#define PCIE_CORE_REG_ERR_ADDR 0x44u /* errlog addr */ +#define PCIE_CORE_REG_MB_INTR 0x48u /* MB intr */ +#define PCIE_CORE_REG_SB_PCIE_0 0x100u /* sbpcie0 map */ +#define PCIE_CORE_REG_SB_PCIE_1 0x104u /* sbpcie1 map */ +#define PCIE_CORE_REG_SB_PCIE_2 0x108u /* sbpcie2 map */ + +/* PCIE Config registers */ +#define PCIE_CFG_DEV_STS_CTRL_2 0x0d4u /* "dev_sts_control_2 */ +#define PCIE_CFG_ADV_ERR_CAP 0x100u /* adv_err_cap */ +#define PCIE_CFG_UC_ERR_STS 0x104u /* uc_err_status */ +#define PCIE_CFG_UC_ERR_MASK 0x108u /* ucorr_err_mask */ +#define PCIE_CFG_UNCOR_ERR_SERV 0x10cu /* ucorr_err_sevr */ +#define PCIE_CFG_CORR_ERR_STS 0x110u /* corr_err_status */ +#define PCIE_CFG_CORR_ERR_MASK 0x114u /* corr_err_mask */ +#define PCIE_CFG_ADV_ERR_CTRL 0x118u /* adv_err_cap_control */ +#define PCIE_CFG_HDR_LOG1 0x11Cu /* header_log1 */ +#define PCIE_CFG_HDR_LOG2 0x120u /* header_log2 */ +#define PCIE_CFG_HDR_LOG3 0x124u /* header_log3 */ +#define PCIE_CFG_HDR_LOG4 0x128u /* header_log4 */ +#define PCIE_CFG_PML1_SUB_CAP_ID 0x240u /* PML1sub_capID */ +#define PCIE_CFG_PML1_SUB_CAP_REG 0x244u /* PML1_sub_Cap_reg */ +#define PCIE_CFG_PML1_SUB_CTRL1 0x248u /* PML1_sub_control1 */ +#define PCIE_CFG_PML1_SUB_CTRL3 0x24Cu /* PML1_sub_control2 */ +#define PCIE_CFG_TL_CTRL_5 0x814u /* tl_control_5 */ +#define PCIE_CFG_PHY_ERR_ATT_VEC 0x1820u /* phy_err_attn_vec */ +#define PCIE_CFG_PHY_ERR_ATT_MASK 0x1824u /* phy_err_attn_mask */ + +/* PCIE protocol PHY diagnostic registers */ +#define PCIE_PLP_MODEREG 0x200u /* Mode */ +#define PCIE_PLP_STATUSREG 0x204u /* Status */ +#define PCIE_PLP_LTSSMCTRLREG 0x208u /* LTSSM control */ +#define PCIE_PLP_LTLINKNUMREG 0x20cu /* Link Training Link number */ +#define PCIE_PLP_LTLANENUMREG 0x210u /* Link Training Lane number */ +#define PCIE_PLP_LTNFTSREG 0x214u /* Link Training N_FTS */ +#define PCIE_PLP_ATTNREG 0x218u /* Attention */ +#define PCIE_PLP_ATTNMASKREG 0x21Cu /* Attention Mask */ +#define PCIE_PLP_RXERRCTR 0x220u /* Rx Error */ +#define PCIE_PLP_RXFRMERRCTR 0x224u /* Rx Framing Error */ +#define PCIE_PLP_RXERRTHRESHREG 0x228u /* Rx Error threshold */ +#define PCIE_PLP_TESTCTRLREG 0x22Cu /* Test Control reg */ +#define PCIE_PLP_SERDESCTRLOVRDREG 0x230u /* SERDES Control Override */ +#define PCIE_PLP_TIMINGOVRDREG 0x234u /* Timing param override */ +#define PCIE_PLP_RXTXSMDIAGREG 0x238u /* RXTX State Machine Diag */ +#define PCIE_PLP_LTSSMDIAGREG 0x23Cu /* LTSSM State Machine Diag */ + +/* PCIE protocol DLLP diagnostic registers */ +#define PCIE_DLLP_LCREG 0x100u /* Link Control */ +#define PCIE_DLLP_LSREG 0x104u /* Link Status */ +#define PCIE_DLLP_LAREG 0x108u /* Link Attention */ +#define PCIE_DLLP_LAMASKREG 0x10Cu /* Link Attention Mask */ +#define PCIE_DLLP_NEXTTXSEQNUMREG 0x110u /* Next Tx Seq Num */ +#define PCIE_DLLP_ACKEDTXSEQNUMREG 0x114u /* Acked Tx Seq Num */ +#define PCIE_DLLP_PURGEDTXSEQNUMREG 0x118u /* Purged Tx Seq Num */ +#define PCIE_DLLP_RXSEQNUMREG 0x11Cu /* Rx Sequence Number */ +#define PCIE_DLLP_LRREG 0x120u /* Link Replay */ +#define PCIE_DLLP_LACKTOREG 0x124u /* Link Ack Timeout */ +#define PCIE_DLLP_PMTHRESHREG 0x128u /* Power Management Threshold */ +#define PCIE_DLLP_RTRYWPREG 0x12Cu /* Retry buffer write ptr */ +#define PCIE_DLLP_RTRYRPREG 0x130u /* Retry buffer Read ptr */ +#define PCIE_DLLP_RTRYPPREG 0x134u /* Retry buffer Purged ptr */ +#define PCIE_DLLP_RTRRWREG 0x138u /* Retry buffer Read/Write */ +#define PCIE_DLLP_ECTHRESHREG 0x13Cu /* Error Count Threshold */ +#define PCIE_DLLP_TLPERRCTRREG 0x140u /* TLP Error Counter */ +#define PCIE_DLLP_ERRCTRREG 0x144u /* Error Counter */ +#define PCIE_DLLP_NAKRXCTRREG 0x148u /* NAK Received Counter */ +#define PCIE_DLLP_TESTREG 0x14Cu /* Test */ +#define PCIE_DLLP_PKTBIST 0x150u /* Packet BIST */ +#define PCIE_DLLP_PCIE11 0x154u /* DLLP PCIE 1.1 reg */ + +#define PCIE_DLLP_LSREG_LINKUP (1u << 16u) + +/* PCIE protocol TLP diagnostic registers */ +#define PCIE_TLP_CONFIGREG 0x000u /* Configuration */ +#define PCIE_TLP_WORKAROUNDSREG 0x004u /* TLP Workarounds */ +#define PCIE_TLP_WRDMAUPPER 0x010u /* Write DMA Upper Address */ +#define PCIE_TLP_WRDMALOWER 0x014u /* Write DMA Lower Address */ +#define PCIE_TLP_WRDMAREQ_LBEREG 0x018u /* Write DMA Len/ByteEn Req */ +#define PCIE_TLP_RDDMAUPPER 0x01Cu /* Read DMA Upper Address */ +#define PCIE_TLP_RDDMALOWER 0x020u /* Read DMA Lower Address */ +#define PCIE_TLP_RDDMALENREG 0x024u /* Read DMA Len Req */ +#define PCIE_TLP_MSIDMAUPPER 0x028u /* MSI DMA Upper Address */ +#define PCIE_TLP_MSIDMALOWER 0x02Cu /* MSI DMA Lower Address */ +#define PCIE_TLP_MSIDMALENREG 0x030u /* MSI DMA Len Req */ +#define PCIE_TLP_SLVREQLENREG 0x034u /* Slave Request Len */ +#define PCIE_TLP_FCINPUTSREQ 0x038u /* Flow Control Inputs */ +#define PCIE_TLP_TXSMGRSREQ 0x03Cu /* Tx StateMachine and Gated Req */ +#define PCIE_TLP_ADRACKCNTARBLEN 0x040u /* Address Ack XferCnt and ARB Len */ +#define PCIE_TLP_DMACPLHDR0 0x044u /* DMA Completion Hdr 0 */ +#define PCIE_TLP_DMACPLHDR1 0x048u /* DMA Completion Hdr 1 */ +#define PCIE_TLP_DMACPLHDR2 0x04Cu /* DMA Completion Hdr 2 */ +#define PCIE_TLP_DMACPLMISC0 0x050u /* DMA Completion Misc0 */ +#define PCIE_TLP_DMACPLMISC1 0x054u /* DMA Completion Misc1 */ +#define PCIE_TLP_DMACPLMISC2 0x058u /* DMA Completion Misc2 */ +#define PCIE_TLP_SPTCTRLLEN 0x05Cu /* Split Controller Req len */ +#define PCIE_TLP_SPTCTRLMSIC0 0x060u /* Split Controller Misc 0 */ +#define PCIE_TLP_SPTCTRLMSIC1 0x064u /* Split Controller Misc 1 */ +#define PCIE_TLP_BUSDEVFUNC 0x068u /* Bus/Device/Func */ +#define PCIE_TLP_RESETCTR 0x06Cu /* Reset Counter */ +#define PCIE_TLP_RTRYBUF 0x070u /* Retry Buffer value */ +#define PCIE_TLP_TGTDEBUG1 0x074u /* Target Debug Reg1 */ +#define PCIE_TLP_TGTDEBUG2 0x078u /* Target Debug Reg2 */ +#define PCIE_TLP_TGTDEBUG3 0x07Cu /* Target Debug Reg3 */ +#define PCIE_TLP_TGTDEBUG4 0x080u /* Target Debug Reg4 */ + +/* PCIE2 MDIO register offsets */ +#define PCIE2_MDIO_CONTROL 0x128 +#define PCIE2_MDIO_WR_DATA 0x12C +#define PCIE2_MDIO_RD_DATA 0x130 + +/* MDIO control */ +#define MDIOCTL_DIVISOR_MASK 0x7fu /* clock to be used on MDIO */ +#define MDIOCTL_DIVISOR_VAL 0x2u +#define MDIOCTL_PREAM_EN 0x80u /* Enable preamble sequnce */ +#define MDIOCTL_ACCESS_DONE 0x100u /* Tranaction complete */ + +/* MDIO Data */ +#define MDIODATA_MASK 0x0000ffff /* data 2 bytes */ +#define MDIODATA_TA 0x00020000 /* Turnaround */ +#define MDIODATA_REGADDR_SHF_OLD 18 /* Regaddr shift (rev < 10) */ +#define MDIODATA_REGADDR_MASK_OLD 0x003c0000 /* Regaddr Mask (rev < 10) */ +#define MDIODATA_DEVADDR_SHF_OLD 22 /* Physmedia devaddr shift (rev < 10) */ +#define MDIODATA_DEVADDR_MASK_OLD 0x0fc00000 /* Physmedia devaddr Mask (rev < 10) */ +#define MDIODATA_REGADDR_SHF 18 /* Regaddr shift */ +#define MDIODATA_REGADDR_MASK 0x007c0000 /* Regaddr Mask */ +#define MDIODATA_DEVADDR_SHF 23 /* Physmedia devaddr shift */ +#define MDIODATA_DEVADDR_MASK 0x0f800000 /* Physmedia devaddr Mask */ +#define MDIODATA_WRITE 0x10000000 /* write Transaction */ +#define MDIODATA_READ 0x20000000 /* Read Transaction */ +#define MDIODATA_START 0x40000000 /* start of Transaction */ + +#define MDIODATA_DEV_ADDR 0x0 /* dev address for serdes */ +#define MDIODATA_BLK_ADDR 0x1F /* blk address for serdes */ + +/* MDIO control/wrData/rdData register defines for PCIE Gen 2 */ +#define MDIOCTL2_DIVISOR_MASK 0x7f /* clock to be used on MDIO */ +#define MDIOCTL2_DIVISOR_VAL 0x2 +#define MDIOCTL2_REGADDR_SHF 8 /* Regaddr shift */ +#define MDIOCTL2_REGADDR_MASK 0x00FFFF00 /* Regaddr Mask */ +#define MDIOCTL2_DEVADDR_SHF 24 /* Physmedia devaddr shift */ +#define MDIOCTL2_DEVADDR_MASK 0x0f000000 /* Physmedia devaddr Mask */ +#define MDIOCTL2_SLAVE_BYPASS 0x10000000 /* IP slave bypass */ +#define MDIOCTL2_READ 0x20000000 /* IP slave bypass */ + +#define MDIODATA2_DONE 0x80000000u /* rd/wr transaction done */ +#define MDIODATA2_MASK 0x7FFFFFFF /* rd/wr transaction data */ +#define MDIODATA2_DEVADDR_SHF 4 /* Physmedia devaddr shift */ + +/* MDIO devices (SERDES modules) + * unlike old pcie cores (rev < 10), rev10 pcie serde organizes registers into a few blocks. + * two layers mapping (blockidx, register offset) is required + */ +#define MDIO_DEV_IEEE0 0x000 +#define MDIO_DEV_IEEE1 0x001 +#define MDIO_DEV_BLK0 0x800 +#define MDIO_DEV_BLK1 0x801 +#define MDIO_DEV_BLK2 0x802 +#define MDIO_DEV_BLK3 0x803 +#define MDIO_DEV_BLK4 0x804 +#define MDIO_DEV_TXPLL 0x808 /* TXPLL register block idx */ +#define MDIO_DEV_TXCTRL0 0x820 +#define MDIO_DEV_SERDESID 0x831 +#define MDIO_DEV_RXCTRL0 0x840 + +/* XgxsBlk1_A Register Offsets */ +#define BLK1_PWR_MGMT0 0x16 +#define BLK1_PWR_MGMT1 0x17 +#define BLK1_PWR_MGMT2 0x18 +#define BLK1_PWR_MGMT3 0x19 +#define BLK1_PWR_MGMT4 0x1A + +/* serdes regs (rev < 10) */ +#define MDIODATA_DEV_PLL 0x1d /* SERDES PLL Dev */ +#define MDIODATA_DEV_TX 0x1e /* SERDES TX Dev */ +#define MDIODATA_DEV_RX 0x1f /* SERDES RX Dev */ + /* SERDES RX registers */ +#define SERDES_RX_CTRL 1 /* Rx cntrl */ +#define SERDES_RX_TIMER1 2 /* Rx Timer1 */ +#define SERDES_RX_CDR 6 /* CDR */ +#define SERDES_RX_CDRBW 7 /* CDR BW */ + + /* SERDES RX control register */ +#define SERDES_RX_CTRL_FORCE 0x80 /* rxpolarity_force */ +#define SERDES_RX_CTRL_POLARITY 0x40 /* rxpolarity_value */ + + /* SERDES PLL registers */ +#define SERDES_PLL_CTRL 1 /* PLL control reg */ +#define PLL_CTRL_FREQDET_EN 0x4000 /* bit 14 is FREQDET on */ + +/* Power management threshold */ +#define PCIE_L0THRESHOLDTIME_MASK 0xFF00u /* bits 0 - 7 */ +#define PCIE_L1THRESHOLDTIME_MASK 0xFF00u /* bits 8 - 15 */ +#define PCIE_L1THRESHOLDTIME_SHIFT 8 /* PCIE_L1THRESHOLDTIME_SHIFT */ +#define PCIE_L1THRESHOLD_WARVAL 0x72 /* WAR value */ +#define PCIE_ASPMTIMER_EXTEND 0x01000000 /* > rev7: enable extend ASPM timer */ + +/* SPROM offsets */ +#define SRSH_ASPM_OFFSET 4 /* word 4 */ +#define SRSH_ASPM_ENB 0x18 /* bit 3, 4 */ +#define SRSH_ASPM_L1_ENB 0x10 /* bit 4 */ +#define SRSH_ASPM_L0s_ENB 0x8 /* bit 3 */ +#define SRSH_PCIE_MISC_CONFIG 5 /* word 5 */ +#define SRSH_L23READY_EXIT_NOPERST 0x8000u /* bit 15 */ +#define SRSH_CLKREQ_OFFSET_REV5 20 /* word 20 for srom rev <= 5 */ +#define SRSH_CLKREQ_OFFSET_REV8 52 /* word 52 for srom rev 8 */ +#define SRSH_CLKREQ_ENB 0x0800 /* bit 11 */ +#define SRSH_BD_OFFSET 6 /* word 6 */ +#define SRSH_AUTOINIT_OFFSET 18 /* auto initialization enable */ + +/* PCI Capability ID's + * Reference include/linux/pci_regs.h + * #define PCI_CAP_LIST_ID 0 // Capability ID + * #define PCI_CAP_ID_PM 0x01 // Power Management + * #define PCI_CAP_ID_AGP 0x02 // Accelerated Graphics Port + * #define PCI_CAP_ID_VPD 0x03 // Vital Product Data + * #define PCI_CAP_ID_SLOTID 0x04 // Slot Identification + * #define PCI_CAP_ID_MSI 0x05 // Message Signalled Interrupts + * #define PCI_CAP_ID_CHSWP 0x06 // CompactPCI HotSwap + * #define PCI_CAP_ID_PCIX 0x07 // PCI-X + * #define PCI_CAP_ID_HT 0x08 // HyperTransport + * #define PCI_CAP_ID_VNDR 0x09 // Vendor-Specific + * #define PCI_CAP_ID_DBG 0x0A // Debug port + * #define PCI_CAP_ID_CCRC 0x0B // CompactPCI Central Resource Control + * #define PCI_CAP_ID_SHPC 0x0C // PCI Standard Hot-Plug Controller + * #define PCI_CAP_ID_SSVID 0x0D // Bridge subsystem vendor/device ID + * #define PCI_CAP_ID_AGP3 0x0E // AGP Target PCI-PCI bridge + * #define PCI_CAP_ID_SECDEV 0x0F // Secure Device + * #define PCI_CAP_ID_MSIX 0x11 // MSI-X + * #define PCI_CAP_ID_SATA 0x12 // SATA Data/Index Conf. + * #define PCI_CAP_ID_AF 0x13 // PCI Advanced Features + * #define PCI_CAP_ID_EA 0x14 // PCI Enhanced Allocation + * #define PCI_CAP_ID_MAX PCI_CAP_ID_EA + */ + +#define PCIE_CAP_ID_EXP 0x10 // PCI Express + +/* PCIe Capabilities Offsets + * Reference include/linux/pci_regs.h + * #define PCIE_CAP_FLAGS 2 // Capabilities register + * #define PCIE_CAP_DEVCAP 4 // Device capabilities + * #define PCIE_CAP_DEVCTL 8 // Device Control + * #define PCIE_CAP_DEVSTA 10 // Device Status + * #define PCIE_CAP_LNKCAP 12 // Link Capabilities + * #define PCIE_CAP_LNKCTL 16 // Link Control + * #define PCIE_CAP_LNKSTA 18 // Link Status + * #define PCI_CAP_EXP_ENDPOINT_SIZEOF_V1 20 // v1 endpoints end here + * #define PCIE_CAP_SLTCAP 20 // Slot Capabilities + * #define PCIE_CAP_SLTCTL 24 // Slot Control + * #define PCIE_CAP_SLTSTA 26 // Slot Status + * #define PCIE_CAP_RTCTL 28 // Root Control + * #define PCIE_CAP_RTCAP 30 // Root Capabilities + * #define PCIE_CAP_RTSTA 32 // Root Status + */ + +/* Linkcapability reg offset in PCIE Cap */ +#define PCIE_CAP_LINKCAP_OFFSET 12 /* linkcap offset in pcie cap */ +#define PCIE_CAP_LINKCAP_LNKSPEED_MASK 0xf /* Supported Link Speeds */ +#define PCIE_CAP_LINKCAP_GEN2 0x2 /* Value for GEN2 */ + +/* Uc_Err reg offset in AER Cap */ +#define PCIE_EXTCAP_ID_ERR 0x01 /* Advanced Error Reporting */ +#define PCIE_EXTCAP_AER_UCERR_OFFSET 4 /* Uc_Err reg offset in AER Cap */ +#define PCIE_EXTCAP_ERR_HEADER_LOG_0 28 +#define PCIE_EXTCAP_ERR_HEADER_LOG_1 32 +#define PCIE_EXTCAP_ERR_HEADER_LOG_2 36 +#define PCIE_EXTCAP_ERR_HEADER_LOG_3 40 + +/* L1SS reg offset in L1SS Ext Cap */ +#define PCIE_EXTCAP_ID_L1SS 0x1e /* PCI Express L1 PM Substates Capability */ +#define PCIE_EXTCAP_L1SS_CAP_OFFSET 4 /* L1SSCap reg offset in L1SS Cap */ +#define PCIE_EXTCAP_L1SS_CONTROL_OFFSET 8 /* L1SSControl reg offset in L1SS Cap */ +#define PCIE_EXTCAP_L1SS_CONTROL2_OFFSET 0xc /* L1SSControl reg offset in L1SS Cap */ + +/* Linkcontrol reg offset in PCIE Cap */ +#define PCIE_CAP_LINKCTRL_OFFSET 16 /* linkctrl offset in pcie cap */ +#define PCIE_CAP_LCREG_ASPML0s 0x01 /* ASPM L0s in linkctrl */ +#define PCIE_CAP_LCREG_ASPML1 0x02 /* ASPM L1 in linkctrl */ +#define PCIE_CLKREQ_ENAB 0x100 /* CLKREQ Enab in linkctrl */ +#define PCIE_LINKSPEED_MASK 0xF0000u /* bits 0 - 3 of high word */ +#define PCIE_LINKSPEED_SHIFT 16 /* PCIE_LINKSPEED_SHIFT */ + +/* Devcontrol reg offset in PCIE Cap */ +#define PCIE_CAP_DEVCTRL_OFFSET 8 /* devctrl offset in pcie cap */ +#define PCIE_CAP_DEVCTRL_MRRS_MASK 0x7000 /* Max read request size mask */ +#define PCIE_CAP_DEVCTRL_MRRS_SHIFT 12 /* Max read request size shift */ +#define PCIE_CAP_DEVCTRL_MRRS_128B 0 /* 128 Byte */ +#define PCIE_CAP_DEVCTRL_MRRS_256B 1 /* 256 Byte */ +#define PCIE_CAP_DEVCTRL_MRRS_512B 2 /* 512 Byte */ +#define PCIE_CAP_DEVCTRL_MRRS_1024B 3 /* 1024 Byte */ +#define PCIE_CAP_DEVCTRL_MPS_MASK 0x00e0 /* Max payload size mask */ +#define PCIE_CAP_DEVCTRL_MPS_SHIFT 5 /* Max payload size shift */ +#define PCIE_CAP_DEVCTRL_MPS_128B 0 /* 128 Byte */ +#define PCIE_CAP_DEVCTRL_MPS_256B 1 /* 256 Byte */ +#define PCIE_CAP_DEVCTRL_MPS_512B 2 /* 512 Byte */ +#define PCIE_CAP_DEVCTRL_MPS_1024B 3 /* 1024 Byte */ + +#define PCIE_ASPM_CTRL_MASK 3 /* bit 0 and 1 */ +#define PCIE_ASPM_ENAB 3 /* ASPM L0s & L1 in linkctrl */ +#define PCIE_ASPM_L1_ENAB 2 /* ASPM L0s & L1 in linkctrl */ +#define PCIE_ASPM_L0s_ENAB 1 /* ASPM L0s & L1 in linkctrl */ +#define PCIE_ASPM_DISAB 0 /* ASPM L0s & L1 in linkctrl */ + +#define PCIE_ASPM_L11_ENAB 8 /* ASPM L1.1 in PML1_sub_control2 */ +#define PCIE_ASPM_L12_ENAB 4 /* ASPM L1.2 in PML1_sub_control2 */ + +#define PCIE_EXT_L1SS_MASK 0xf /* Bits [3:0] of L1SSControl 0x248 */ +#define PCIE_EXT_L1SS_ENAB 0xf /* Bits [3:0] of L1SSControl 0x248 */ + +/* NumMsg and NumMsgEn in PCIE MSI Cap */ +#define MSICAP_NUM_MSG_SHF 17 +#define MSICAP_NUM_MSG_MASK (0x7 << MSICAP_NUM_MSG_SHF) +#define MSICAP_NUM_MSG_EN_SHF 20 +#define MSICAP_NUM_MSG_EN_MASK (0x7 << MSICAP_NUM_MSG_EN_SHF) + +/* Devcontrol2 reg offset in PCIE Cap */ +#define PCIE_CAP_DEVCTRL2_OFFSET 0x28 /* devctrl2 offset in pcie cap */ +#define PCIE_CAP_DEVCTRL2_LTR_ENAB_MASK 0x400 /* Latency Tolerance Reporting Enable */ +#define PCIE_CAP_DEVCTRL2_OBFF_ENAB_SHIFT 13 /* Enable OBFF mechanism, select signaling method */ +#define PCIE_CAP_DEVCTRL2_OBFF_ENAB_MASK 0x6000 /* Enable OBFF mechanism, select signaling method */ + +/* LTR registers in PCIE Cap */ +#define PCIE_LTR0_REG_OFFSET 0x844u /* ltr0_reg offset in pcie cap */ +#define PCIE_LTR1_REG_OFFSET 0x848u /* ltr1_reg offset in pcie cap */ +#define PCIE_LTR2_REG_OFFSET 0x84cu /* ltr2_reg offset in pcie cap */ +#define PCIE_LTR0_REG_DEFAULT_60 0x883c883cu /* active latency default to 60usec */ +#define PCIE_LTR0_REG_DEFAULT_150 0x88968896u /* active latency default to 150usec */ +#define PCIE_LTR1_REG_DEFAULT 0x88648864u /* idle latency default to 100usec */ +#define PCIE_LTR2_REG_DEFAULT 0x90039003u /* sleep latency default to 3msec */ +#define PCIE_LTR_LAT_VALUE_MASK 0x3FF /* LTR Latency mask */ +#define PCIE_LTR_LAT_SCALE_SHIFT 10 /* LTR Scale shift */ +#define PCIE_LTR_LAT_SCALE_MASK 0x1C00 /* LTR Scale mask */ +#define PCIE_LTR_SNOOP_REQ_SHIFT 15 /* LTR SNOOP REQ shift */ +#define PCIE_LTR_SNOOP_REQ_MASK 0x8000 /* LTR SNOOP REQ mask */ + +/* Status reg PCIE_PLP_STATUSREG */ +#define PCIE_PLP_POLARITYINV_STAT 0x10 + +/* PCIE BRCM Vendor CAP REVID reg bits */ +#define BRCMCAP_PCIEREV_CT_MASK 0xF00u +#define BRCMCAP_PCIEREV_CT_SHIFT 8u +#define BRCMCAP_PCIEREV_REVID_MASK 0xFFu +#define BRCMCAP_PCIEREV_REVID_SHIFT 0 + +#define PCIE_REVREG_CT_PCIE1 0 +#define PCIE_REVREG_CT_PCIE2 1 + +/* PCIE GEN2 specific defines */ +/* PCIE BRCM Vendor Cap offsets w.r.t to vendor cap ptr */ +#define PCIE2R0_BRCMCAP_REVID_OFFSET 4 +#define PCIE2R0_BRCMCAP_BAR0_WIN0_WRAP_OFFSET 8 +#define PCIE2R0_BRCMCAP_BAR0_WIN2_OFFSET 12 +#define PCIE2R0_BRCMCAP_BAR0_WIN2_WRAP_OFFSET 16 +#define PCIE2R0_BRCMCAP_BAR0_WIN_OFFSET 20 +#define PCIE2R0_BRCMCAP_BAR1_WIN_OFFSET 24 +#define PCIE2R0_BRCMCAP_SPROM_CTRL_OFFSET 28 +#define PCIE2R0_BRCMCAP_BAR2_WIN_OFFSET 32 +#define PCIE2R0_BRCMCAP_INTSTATUS_OFFSET 36 +#define PCIE2R0_BRCMCAP_INTMASK_OFFSET 40 +#define PCIE2R0_BRCMCAP_PCIE2SB_MB_OFFSET 44 +#define PCIE2R0_BRCMCAP_BPADDR_OFFSET 48 +#define PCIE2R0_BRCMCAP_BPDATA_OFFSET 52 +#define PCIE2R0_BRCMCAP_CLKCTLSTS_OFFSET 56 + +/* definition of configuration space registers of PCIe gen2 + * http://hwnbu-twiki.sj.broadcom.com/twiki/pub/Mwgroup/CurrentPcieGen2ProgramGuide/pcie_ep.htm + */ +#define PCIECFGREG_STATUS_CMD 0x4 +#define PCIECFGREG_PM_CSR 0x4C +#define PCIECFGREG_MSI_CAP 0x58 +#define PCIECFGREG_MSI_ADDR_L 0x5C +#define PCIECFGREG_MSI_ADDR_H 0x60 +#define PCIECFGREG_MSI_DATA 0x64 +#define PCIECFGREG_SPROM_CTRL 0x88 +#define PCIECFGREG_LINK_STATUS_CTRL 0xBCu +#define PCIECFGREG_LINK_STATUS_CTRL2 0xDCu +#define PCIECFGREG_DEV_STATUS_CTRL 0xB4u +#define PCIECFGGEN_DEV_STATUS_CTRL2 0xD4 +#define PCIECFGREG_RBAR_CTRL 0x228 +#define PCIECFGREG_PML1_SUB_CTRL1 0x248 +#define PCIECFGREG_PML1_SUB_CTRL2 0x24C +#define PCIECFGREG_REG_BAR2_CONFIG 0x4E0 +#define PCIECFGREG_REG_BAR3_CONFIG 0x4F4 +#define PCIECFGREG_PDL_CTRL1 0x1004 +#define PCIECFGREG_PDL_IDDQ 0x1814 +#define PCIECFGREG_REG_PHY_CTL7 0x181c +#define PCIECFGREG_PHY_DBG_CLKREQ0 0x1E10 +#define PCIECFGREG_PHY_DBG_CLKREQ1 0x1E14 +#define PCIECFGREG_PHY_DBG_CLKREQ2 0x1E18 +#define PCIECFGREG_PHY_DBG_CLKREQ3 0x1E1C +#define PCIECFGREG_PHY_LTSSM_HIST_0 0x1CEC +#define PCIECFGREG_PHY_LTSSM_HIST_1 0x1CF0 +#define PCIECFGREG_PHY_LTSSM_HIST_2 0x1CF4 +#define PCIECFGREG_PHY_LTSSM_HIST_3 0x1CF8 +#define PCIECFGREG_TREFUP 0x1814 +#define PCIECFGREG_TREFUP_EXT 0x1818 + +/* PCIECFGREG_PML1_SUB_CTRL1 Bit Definition */ +#define PCI_PM_L1_2_ENA_MASK 0x00000001 /* PCI-PM L1.2 Enabled */ +#define PCI_PM_L1_1_ENA_MASK 0x00000002 /* PCI-PM L1.1 Enabled */ +#define ASPM_L1_2_ENA_MASK 0x00000004 /* ASPM L1.2 Enabled */ +#define ASPM_L1_1_ENA_MASK 0x00000008 /* ASPM L1.1 Enabled */ + +/* PCIe gen2 mailbox interrupt masks */ +#define I_MB 0x3 +#define I_BIT0 0x1 +#define I_BIT1 0x2 + +/* PCIE gen2 config regs */ +#define PCIIntstatus 0x090 +#define PCIIntmask 0x094 +#define PCISBMbx 0x98 + +/* enumeration Core regs */ +#define PCIH2D_MailBox 0x140 +#define PCIH2D_DB1 0x144 +#define PCID2H_MailBox 0x148 +#define PCIH2D_MailBox_1 0x150 /* for dma channel1 */ +#define PCIH2D_DB1_1 0x154 +#define PCID2H_MailBox_1 0x158 +#define PCIH2D_MailBox_2 0x160 /* for dma channel2 which will be used for Implicit DMA */ +#define PCIH2D_DB1_2 0x164 +#define PCID2H_MailBox_2 0x168 +#define PCIE_PWR_CTRL 0x1E8 +#define PCIE_CLK_CTRL 0x1E8 + +#define PCIControl(rev) (REV_GE_64(rev) ? 0xC00 : 0x00) +/* for corerev < 64 idma_en is in PCIControl regsiter */ +#define IDMAControl(rev) (REV_GE_64(rev) ? 0x480 : 0x00) +#define PCIMailBoxInt(rev) (REV_GE_64(rev) ? 0xC30 : 0x48) +#define PCIMailBoxMask(rev) (REV_GE_64(rev) ? 0xC34 : 0x4C) +#define PCIFunctionIntstatus(rev) (REV_GE_64(rev) ? 0xC10 : 0x20) +#define PCIDARClkCtl(rev) (REV_GE_64(rev) ? 0xA08 : 0xAE0) +#define PCIDARPwrCtl(rev) (REV_GE_64(rev) ? 0xA0C : 0xAE8) +#define PCIDARFunctionIntstatus(rev) (REV_GE_64(rev) ? 0xA10 : 0xA20) +#define PCIDARH2D_DB0(rev) (REV_GE_64(rev) ? 0xA20 : 0xA28) +#define PCIDARErrlog(rev) (REV_GE_64(rev) ? 0xA60 : 0xA40) +#define PCIDARErrlog_Addr(rev) (REV_GE_64(rev) ? 0xA64 : 0xA44) +#define PCIDARMailboxint(rev) (REV_GE_64(rev) ? 0xA68 : 0xA48) + +#define PCIMSIVecAssign 0x58 + +/* HMAP Registers */ +/* base of all HMAP window registers */ +#define PCI_HMAP_WINDOW_BASE(rev) (REV_GE_64(rev) ? 0x580u : 0x540u) +#define PCI_HMAP_VIOLATION_ADDR_L(rev) (REV_GE_64(rev) ? 0x600u : 0x5C0u) +#define PCI_HMAP_VIOLATION_ADDR_U(rev) (REV_GE_64(rev) ? 0x604u : 0x5C4u) +#define PCI_HMAP_VIOLATION_INFO(rev) (REV_GE_64(rev) ? 0x608u : 0x5C8u) +#define PCI_HMAP_WINDOW_CONFIG(rev) (REV_GE_64(rev) ? 0x610u : 0x5D0u) +#define PCI_HMAP_NWINDOWS_SHIFT 8 +#define PCI_HMAP_NWINDOWS_MASK 0x0000ff00 /* bits 8:15 */ + +#define I_F0_B0 (0x1 << 8) /* Mail box interrupt Function 0 interrupt, bit 0 */ +#define I_F0_B1 (0x1 << 9) /* Mail box interrupt Function 0 interrupt, bit 1 */ + +#define PCIECFGREG_DEVCONTROL 0xB4 +#define PCIECFGREG_BASEADDR0 0x10 +#define PCIECFGREG_BASEADDR1 0x18 +#define PCIECFGREG_DEVCONTROL_MRRS_SHFT 12 +#define PCIECFGREG_DEVCONTROL_MRRS_MASK (0x7 << PCIECFGREG_DEVCONTROL_MRRS_SHFT) +#define PCIECFGREG_DEVCTRL_MPS_SHFT 5 +#define PCIECFGREG_DEVCTRL_MPS_MASK (0x7 << PCIECFGREG_DEVCTRL_MPS_SHFT) +#define PCIECFGREG_PM_CSR_STATE_MASK 0x00000003 +#define PCIECFGREG_PM_CSR_STATE_D0 0 +#define PCIECFGREG_PM_CSR_STATE_D1 1 +#define PCIECFGREG_PM_CSR_STATE_D2 2 +#define PCIECFGREG_PM_CSR_STATE_D3_HOT 3 +#define PCIECFGREG_PM_CSR_STATE_D3_COLD 4 + +/* Direct Access regs */ +#define DAR_ERRADDR(rev) (REV_GE_64(rev) ? \ + OFFSETOF(sbpcieregs_t, u1.dar_64.erraddr) : \ + OFFSETOF(sbpcieregs_t, u1.dar.erraddr)) +#define DAR_ERRLOG(rev) (REV_GE_64(rev) ? \ + OFFSETOF(sbpcieregs_t, u1.dar_64.errlog) : \ + OFFSETOF(sbpcieregs_t, u1.dar.errlog)) +#define DAR_PCIH2D_DB0_0(rev) (REV_GE_64(rev) ? \ + OFFSETOF(sbpcieregs_t, u1.dar_64.h2d_db_0_0) : \ + OFFSETOF(sbpcieregs_t, u1.dar.h2d_db_0_0)) +#define DAR_PCIH2D_DB0_1(rev) (REV_GE_64(rev) ? \ + OFFSETOF(sbpcieregs_t, u1.dar_64.h2d_db_0_1) : \ + OFFSETOF(sbpcieregs_t, u1.dar.h2d_db_0_1)) +#define DAR_PCIH2D_DB1_0(rev) (REV_GE_64(rev) ? \ + OFFSETOF(sbpcieregs_t, u1.dar_64.h2d_db_1_0) : \ + OFFSETOF(sbpcieregs_t, u1.dar.h2d_db_1_0)) +#define DAR_PCIH2D_DB1_1(rev) (REV_GE_64(rev) ? \ + OFFSETOF(sbpcieregs_t, u1.dar_64.h2d_db_1_1) : \ + OFFSETOF(sbpcieregs_t, u1.dar.h2d_db_1_1)) +#define DAR_PCIH2D_DB2_0(rev) (REV_GE_64(rev) ? \ + OFFSETOF(sbpcieregs_t, u1.dar_64.h2d_db_2_0) : \ + OFFSETOF(sbpcieregs_t, u1.dar.h2d_db_2_0)) +#define DAR_PCIH2D_DB2_1(rev) (REV_GE_64(rev) ? \ + OFFSETOF(sbpcieregs_t, u1.dar_64.h2d_db_2_1) : \ + OFFSETOF(sbpcieregs_t, u1.dar.h2d_db_2_1)) +#define DAR_PCIH2D_DB3_0(rev) OFFSETOF(sbpcieregs_t, u1.dar_64.h2d_db_3_0) +#define DAR_PCIH2D_DB3_1(rev) OFFSETOF(sbpcieregs_t, u1.dar_64.h2d_db_3_1) +#define DAR_PCIH2D_DB4_0(rev) OFFSETOF(sbpcieregs_t, u1.dar_64.h2d_db_4_0) +#define DAR_PCIH2D_DB4_1(rev) OFFSETOF(sbpcieregs_t, u1.dar_64.h2d_db_4_1) +#define DAR_PCIH2D_DB5_0(rev) OFFSETOF(sbpcieregs_t, u1.dar_64.h2d_db_5_0) +#define DAR_PCIH2D_DB5_1(rev) OFFSETOF(sbpcieregs_t, u1.dar_64.h2d_db_5_1) +#define DAR_PCIH2D_DB6_0(rev) OFFSETOF(sbpcieregs_t, u1.dar_64.h2d_db_6_0) +#define DAR_PCIH2D_DB6_1(rev) OFFSETOF(sbpcieregs_t, u1.dar_64.h2d_db_6_1) +#define DAR_PCIH2D_DB7_0(rev) OFFSETOF(sbpcieregs_t, u1.dar_64.h2d_db_7_0) +#define DAR_PCIH2D_DB7_1(rev) OFFSETOF(sbpcieregs_t, u1.dar_64.h2d_db_7_1) + +#define DAR_PCIMailBoxInt(rev) (REV_GE_64(rev) ? \ + OFFSETOF(sbpcieregs_t, u1.dar_64.mbox_int) : \ + OFFSETOF(sbpcieregs_t, u1.dar.mbox_int)) +#define DAR_PCIE_PWR_CTRL(rev) (REV_GE_64(rev) ? \ + OFFSETOF(sbpcieregs_t, u1.dar_64.powerctl) : \ + OFFSETOF(sbpcieregs_t, u1.dar.powerctl)) +#define DAR_CLK_CTRL(rev) (REV_GE_64(rev) ? \ + OFFSETOF(sbpcieregs_t, u1.dar_64.clk_ctl_st) : \ + OFFSETOF(sbpcieregs_t, u1.dar.clk_ctl_st)) +#define DAR_INTSTAT(rev) (REV_GE_64(rev) ? \ + OFFSETOF(sbpcieregs_t, u1.dar_64.intstatus) : \ + OFFSETOF(sbpcieregs_t, u1.dar.intstatus)) + +#define DAR_FIS_CTRL(rev) OFFSETOF(sbpcieregs_t, u1.dar_64.fis_ctrl) + +#define DAR_FIS_START_SHIFT 0u +#define DAR_FIS_START_MASK (1u << DAR_FIS_START_SHIFT) + +#define PCIE_PWR_REQ_PCIE (0x1 << 8) + +/* SROM hardware region */ +#define SROM_OFFSET_BAR1_CTRL 52 + +#define BAR1_ENC_SIZE_MASK 0x000e +#define BAR1_ENC_SIZE_SHIFT 1 + +#define BAR1_ENC_SIZE_1M 0 +#define BAR1_ENC_SIZE_2M 1 +#define BAR1_ENC_SIZE_4M 2 + +#define PCIEGEN2_CAP_DEVSTSCTRL2_OFFSET 0xD4 +#define PCIEGEN2_CAP_DEVSTSCTRL2_LTRENAB 0x400 + +/* + * Latency Tolerance Reporting (LTR) states + * Active has the least tolerant latency requirement + * Sleep is most tolerant + */ +#define LTR_ACTIVE 2 +#define LTR_ACTIVE_IDLE 1 +#define LTR_SLEEP 0 +#define LTR_FINAL_MASK 0x300 +#define LTR_FINAL_SHIFT 8 + +/* pwrinstatus, pwrintmask regs */ +#define PCIEGEN2_PWRINT_D0_STATE_SHIFT 0 +#define PCIEGEN2_PWRINT_D1_STATE_SHIFT 1 +#define PCIEGEN2_PWRINT_D2_STATE_SHIFT 2 +#define PCIEGEN2_PWRINT_D3_STATE_SHIFT 3 +#define PCIEGEN2_PWRINT_L0_LINK_SHIFT 4 +#define PCIEGEN2_PWRINT_L0s_LINK_SHIFT 5 +#define PCIEGEN2_PWRINT_L1_LINK_SHIFT 6 +#define PCIEGEN2_PWRINT_L2_L3_LINK_SHIFT 7 +#define PCIEGEN2_PWRINT_OBFF_CHANGE_SHIFT 8 + +#define PCIEGEN2_PWRINT_D0_STATE_MASK (1 << PCIEGEN2_PWRINT_D0_STATE_SHIFT) +#define PCIEGEN2_PWRINT_D1_STATE_MASK (1 << PCIEGEN2_PWRINT_D1_STATE_SHIFT) +#define PCIEGEN2_PWRINT_D2_STATE_MASK (1 << PCIEGEN2_PWRINT_D2_STATE_SHIFT) +#define PCIEGEN2_PWRINT_D3_STATE_MASK (1 << PCIEGEN2_PWRINT_D3_STATE_SHIFT) +#define PCIEGEN2_PWRINT_L0_LINK_MASK (1 << PCIEGEN2_PWRINT_L0_LINK_SHIFT) +#define PCIEGEN2_PWRINT_L0s_LINK_MASK (1 << PCIEGEN2_PWRINT_L0s_LINK_SHIFT) +#define PCIEGEN2_PWRINT_L1_LINK_MASK (1 << PCIEGEN2_PWRINT_L1_LINK_SHIFT) +#define PCIEGEN2_PWRINT_L2_L3_LINK_MASK (1 << PCIEGEN2_PWRINT_L2_L3_LINK_SHIFT) +#define PCIEGEN2_PWRINT_OBFF_CHANGE_MASK (1 << PCIEGEN2_PWRINT_OBFF_CHANGE_SHIFT) + +/* sbtopcie mail box */ +#define SBTOPCIE_MB_FUNC0_SHIFT 8 +#define SBTOPCIE_MB_FUNC1_SHIFT 10 +#define SBTOPCIE_MB_FUNC2_SHIFT 12 +#define SBTOPCIE_MB_FUNC3_SHIFT 14 + +#define SBTOPCIE_MB1_FUNC0_SHIFT 9 +#define SBTOPCIE_MB1_FUNC1_SHIFT 11 +#define SBTOPCIE_MB1_FUNC2_SHIFT 13 +#define SBTOPCIE_MB1_FUNC3_SHIFT 15 + +/* pcieiocstatus */ +#define PCIEGEN2_IOC_D0_STATE_SHIFT 8 +#define PCIEGEN2_IOC_D1_STATE_SHIFT 9 +#define PCIEGEN2_IOC_D2_STATE_SHIFT 10 +#define PCIEGEN2_IOC_D3_STATE_SHIFT 11 +#define PCIEGEN2_IOC_L0_LINK_SHIFT 12 +#define PCIEGEN2_IOC_L1_LINK_SHIFT 13 +#define PCIEGEN2_IOC_L1L2_LINK_SHIFT 14 +#define PCIEGEN2_IOC_L2_L3_LINK_SHIFT 15 +#define PCIEGEN2_IOC_BME_SHIFT 20 + +#define PCIEGEN2_IOC_D0_STATE_MASK (1 << PCIEGEN2_IOC_D0_STATE_SHIFT) +#define PCIEGEN2_IOC_D1_STATE_MASK (1 << PCIEGEN2_IOC_D1_STATE_SHIFT) +#define PCIEGEN2_IOC_D2_STATE_MASK (1 << PCIEGEN2_IOC_D2_STATE_SHIFT) +#define PCIEGEN2_IOC_D3_STATE_MASK (1 << PCIEGEN2_IOC_D3_STATE_SHIFT) +#define PCIEGEN2_IOC_L0_LINK_MASK (1 << PCIEGEN2_IOC_L0_LINK_SHIFT) +#define PCIEGEN2_IOC_L1_LINK_MASK (1 << PCIEGEN2_IOC_L1_LINK_SHIFT) +#define PCIEGEN2_IOC_L1L2_LINK_MASK (1 << PCIEGEN2_IOC_L1L2_LINK_SHIFT) +#define PCIEGEN2_IOC_L2_L3_LINK_MASK (1 << PCIEGEN2_IOC_L2_L3_LINK_SHIFT) +#define PCIEGEN2_IOC_BME_MASK (1 << PCIEGEN2_IOC_BME_SHIFT) + +/* stat_ctrl */ +#define PCIE_STAT_CTRL_RESET 0x1 +#define PCIE_STAT_CTRL_ENABLE 0x2 +#define PCIE_STAT_CTRL_INTENABLE 0x4 +#define PCIE_STAT_CTRL_INTSTATUS 0x8 + +/* SPROMControl */ +#define PCIE_BAR1COHERENTACCEN (1 << 8) +#define PCIE_BAR2COHERENTACCEN (1 << 9) + +/* cpl_timeout_ctrl_reg */ +#define PCIE_CTO_TO_THRESHOLD_SHIFT 0 +#define PCIE_CTO_TO_THRESHHOLD_MASK (0xfffff << PCIE_CTO_TO_THRESHOLD_SHIFT) + +#define PCIE_CTO_CLKCHKCNT_SHIFT 24 +#define PCIE_CTO_CLKCHKCNT_MASK (0xf << PCIE_CTO_CLKCHKCNT_SHIFT) + +#define PCIE_CTO_ENAB_SHIFT 31 +#define PCIE_CTO_ENAB_MASK (0x1 << PCIE_CTO_ENAB_SHIFT) + +#define PCIE_CTO_TO_THRESH_DEFAULT 0x58000 +#define PCIE_CTO_CLKCHKCNT_VAL 0xA + +/* ErrLog */ +#define PCIE_SROMRD_ERR_SHIFT 5 +#define PCIE_SROMRD_ERR_MASK (0x1 << PCIE_SROMRD_ERR_SHIFT) + +#define PCIE_CTO_ERR_SHIFT 8 +#define PCIE_CTO_ERR_MASK (0x1 << PCIE_CTO_ERR_SHIFT) + +#define PCIE_CTO_ERR_CODE_SHIFT 9 +#define PCIE_CTO_ERR_CODE_MASK (0x3 << PCIE_CTO_ERR_CODE_SHIFT) + +#define PCIE_BP_CLK_OFF_ERR_SHIFT 12 +#define PCIE_BP_CLK_OFF_ERR_MASK (0x1 << PCIE_BP_CLK_OFF_ERR_SHIFT) + +#define PCIE_BP_IN_RESET_ERR_SHIFT 13 +#define PCIE_BP_IN_RESET_ERR_MASK (0x1 << PCIE_BP_IN_RESET_ERR_SHIFT) + +#ifdef BCMDRIVER +void pcie_watchdog_reset(osl_t *osh, si_t *sih, uint32 wd_mask, uint32 wd_val); +void pcie_serdes_iddqdisable(osl_t *osh, si_t *sih, sbpcieregs_t *sbpcieregs); +void pcie_set_trefup_time_100us(si_t *sih); +#endif /* BCMDRIVER */ + +/* DMA intstatus and intmask */ +#define I_PC (1 << 10) /* pci descriptor error */ +#define I_PD (1 << 11) /* pci data error */ +#define I_DE (1 << 12) /* descriptor protocol error */ +#define I_RU (1 << 13) /* receive descriptor underflow */ +#define I_RO (1 << 14) /* receive fifo overflow */ +#define I_XU (1 << 15) /* transmit fifo underflow */ +#define I_RI (1 << 16) /* receive interrupt */ +#define I_XI (1 << 24) /* transmit interrupt */ + +#endif /* _PCIE_CORE_H */ diff --git a/bcmdhd.100.10.315.x/include/rte_ioctl.h b/bcmdhd.100.10.315.x/include/rte_ioctl.h new file mode 100644 index 0000000..cbeef7d --- /dev/null +++ b/bcmdhd.100.10.315.x/include/rte_ioctl.h @@ -0,0 +1,103 @@ +/* + * HND Run Time Environment ioctl. + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: rte_ioctl.h 699094 2017-05-11 22:41:10Z $ + */ + +#ifndef _rte_ioctl_h_ +#define _rte_ioctl_h_ + +/* RTE IOCTL definitions for generic ether devices */ +#define RTEGHWADDR 0x8901 +#define RTESHWADDR 0x8902 +#define RTEGMTU 0x8903 +#define RTEGSTATS 0x8904 +#define RTEGALLMULTI 0x8905 +#define RTESALLMULTI 0x8906 +#define RTEGPROMISC 0x8907 +#define RTESPROMISC 0x8908 +#define RTESMULTILIST 0x8909 +#define RTEGUP 0x890A +#define RTEGPERMADDR 0x890B +#define RTEDEVPWRSTCHG 0x890C /* Device pwr state change for PCIedev */ +#define RTEDEVPMETOGGLE 0x890D /* Toggle PME# to wake up the host */ +#define RTEDEVTIMESYNC 0x890E /* Device TimeSync */ +#define RTEDEVDSNOTIFY 0x890F /* Bus DS state notification */ +#define RTED11DMALPBK_INIT 0x8910 /* D11 DMA loopback init */ +#define RTED11DMALPBK_UNINIT 0x8911 /* D11 DMA loopback uninit */ +#define RTED11DMALPBK_RUN 0x8912 /* D11 DMA loopback run */ +#define RTEDEVTSBUFPOST 0x8913 /* Async interface for tsync buffer post */ + +#define RTE_IOCTL_QUERY 0x00 +#define RTE_IOCTL_SET 0x01 +#define RTE_IOCTL_OVL_IDX_MASK 0x1e +#define RTE_IOCTL_OVL_RSV 0x20 +#define RTE_IOCTL_OVL 0x40 +#define RTE_IOCTL_OVL_IDX_SHIFT 1 + +enum hnd_ioctl_cmd { + HND_RTE_DNGL_IS_SS = 1, /* true if device connected at super speed */ + + /* PCIEDEV specific wl <--> bus ioctls */ + BUS_GET_VAR = 2, + BUS_SET_VAR = 3, + BUS_FLUSH_RXREORDER_Q = 4, + BUS_SET_LTR_STATE = 5, + BUS_FLUSH_CHAINED_PKTS = 6, + BUS_SET_COPY_COUNT = 7, + BUS_UPDATE_FLOW_PKTS_MAX = 8, + BUS_UPDATE_EXTRA_TXLFRAGS = 9, + BUS_UPDATE_FRWD_RESRV_BUFCNT = 10, + BUS_PCIE_CONFIG_ACCESS = 11 +}; + +#define SDPCMDEV_SET_MAXTXPKTGLOM 1 +#define RTE_MEMUSEINFO_VER 0x00 + +typedef struct memuse_info { + uint16 ver; /* version of this struct */ + uint16 len; /* length in bytes of this structure */ + uint32 tot; /* Total memory */ + uint32 text_len; /* Size of Text segment memory */ + uint32 data_len; /* Size of Data segment memory */ + uint32 bss_len; /* Size of BSS segment memory */ + + uint32 arena_size; /* Total Heap size */ + uint32 arena_free; /* Heap memory available or free */ + uint32 inuse_size; /* Heap memory currently in use */ + uint32 inuse_hwm; /* High watermark of memory - reclaimed memory */ + uint32 inuse_overhead; /* tally of allocated mem_t blocks */ + uint32 inuse_total; /* Heap in-use + Heap overhead memory */ + uint32 free_lwm; /* Least free size since reclaim */ + uint32 mf_count; /* Malloc failure count */ +} memuse_info_t; + +/* For D11 DMA loopback test */ +typedef struct d11_dmalpbk_args { + uint8 *buf; + int32 len; +} d11_dmalpbk_args_t; +#endif /* _rte_ioctl_h_ */ diff --git a/bcmdhd.100.10.315.x/include/sbchipc.h b/bcmdhd.100.10.315.x/include/sbchipc.h new file mode 100644 index 0000000..e4e25ac --- /dev/null +++ b/bcmdhd.100.10.315.x/include/sbchipc.h @@ -0,0 +1,4643 @@ +/* + * SiliconBackplane Chipcommon core hardware definitions. + * + * The chipcommon core provides chip identification, SB control, + * JTAG, 0/1/2 UARTs, clock frequency control, a watchdog interrupt timer, + * GPIO interface, extbus, and support for serial and parallel flashes. + * + * $Id: sbchipc.h 763883 2018-05-22 17:57:56Z $ + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + */ + +#ifndef _SBCHIPC_H +#define _SBCHIPC_H + +#if !defined(_LANGUAGE_ASSEMBLY) && !defined(__ASSEMBLY__) + +/* cpp contortions to concatenate w/arg prescan */ +#ifndef PAD +#define _PADLINE(line) pad ## line +#define _XSTR(line) _PADLINE(line) +#define PAD _XSTR(__LINE__) +#endif /* PAD */ + +#define BCM_MASK32(msb, lsb) ((~0u >> (32u - (msb) - 1u)) & (~0u << (lsb))) + +/** + * In chipcommon rev 49 the pmu registers have been moved from chipc to the pmu core if the + * 'AOBPresent' bit of 'CoreCapabilitiesExt' is set. If this field is set, the traditional chipc to + * [pmu|gci|sreng] register interface is deprecated and removed. These register blocks would instead + * be assigned their respective chipc-specific address space and connected to the Always On + * Backplane via the APB interface. + */ +typedef volatile struct { + uint32 PAD[384]; + uint32 pmucontrol; /* 0x600 */ + uint32 pmucapabilities; /* 0x604 */ + uint32 pmustatus; /* 0x608 */ + uint32 res_state; /* 0x60C */ + uint32 res_pending; /* 0x610 */ + uint32 pmutimer; /* 0x614 */ + uint32 min_res_mask; /* 0x618 */ + uint32 max_res_mask; /* 0x61C */ + uint32 res_table_sel; /* 0x620 */ + uint32 res_dep_mask; + uint32 res_updn_timer; + uint32 res_timer; + uint32 clkstretch; + uint32 pmuwatchdog; + uint32 gpiosel; /* 0x638, rev >= 1 */ + uint32 gpioenable; /* 0x63c, rev >= 1 */ + uint32 res_req_timer_sel; /* 0x640 */ + uint32 res_req_timer; /* 0x644 */ + uint32 res_req_mask; /* 0x648 */ + uint32 core_cap_ext; /* 0x64C */ + uint32 chipcontrol_addr; /* 0x650 */ + uint32 chipcontrol_data; /* 0x654 */ + uint32 regcontrol_addr; + uint32 regcontrol_data; + uint32 pllcontrol_addr; + uint32 pllcontrol_data; + uint32 pmustrapopt; /* 0x668, corerev >= 28 */ + uint32 pmu_xtalfreq; /* 0x66C, pmurev >= 10 */ + uint32 retention_ctl; /* 0x670 */ + uint32 ILPPeriod; /* 0x674 */ + uint32 PAD[2]; + uint32 retention_grpidx; /* 0x680 */ + uint32 retention_grpctl; /* 0x684 */ + uint32 mac_res_req_timer; /* 0x688 */ + uint32 mac_res_req_mask; /* 0x68c */ + uint32 PAD[18]; + uint32 pmucontrol_ext; /* 0x6d8 */ + uint32 slowclkperiod; /* 0x6dc */ + uint32 pmu_statstimer_addr; /* 0x6e0 */ + uint32 pmu_statstimer_ctrl; /* 0x6e4 */ + uint32 pmu_statstimer_N; /* 0x6e8 */ + uint32 PAD[1]; + uint32 mac_res_req_timer1; /* 0x6f0 */ + uint32 mac_res_req_mask1; /* 0x6f4 */ + uint32 PAD[2]; + uint32 pmuintmask0; /* 0x700 */ + uint32 pmuintmask1; /* 0x704 */ + uint32 PAD[14]; + uint32 pmuintstatus; /* 0x740 */ + uint32 extwakeupstatus; /* 0x744 */ + uint32 watchdog_res_mask; /* 0x748 */ + uint32 PAD[1]; /* 0x74C */ + uint32 swscratch; /* 0x750 */ + uint32 PAD[3]; /* 0x754-0x75C */ + uint32 extwakemask0; /* 0x760 */ + uint32 extwakemask1; /* 0x764 */ + uint32 PAD[2]; /* 0x768-0x76C */ + uint32 extwakereqmask[2]; /* 0x770-0x774 */ + uint32 PAD[2]; /* 0x778-0x77C */ + uint32 pmuintctrl0; /* 0x780 */ + uint32 pmuintctrl1; /* 0x784 */ + uint32 PAD[2]; + uint32 extwakectrl[2]; /* 0x790 */ + uint32 PAD[7]; + uint32 fis_ctrl_status; /* 0x7b4 */ + uint32 fis_min_res_mask; /* 0x7b8 */ + uint32 PAD[1]; + uint32 PrecisionTmrCtrlStatus; /* 0x7c0 */ +} pmuregs_t; + +typedef struct eci_prerev35 { + uint32 eci_output; + uint32 eci_control; + uint32 eci_inputlo; + uint32 eci_inputmi; + uint32 eci_inputhi; + uint32 eci_inputintpolaritylo; + uint32 eci_inputintpolaritymi; + uint32 eci_inputintpolarityhi; + uint32 eci_intmasklo; + uint32 eci_intmaskmi; + uint32 eci_intmaskhi; + uint32 eci_eventlo; + uint32 eci_eventmi; + uint32 eci_eventhi; + uint32 eci_eventmasklo; + uint32 eci_eventmaskmi; + uint32 eci_eventmaskhi; + uint32 PAD[3]; +} eci_prerev35_t; + +typedef struct eci_rev35 { + uint32 eci_outputlo; + uint32 eci_outputhi; + uint32 eci_controllo; + uint32 eci_controlhi; + uint32 eci_inputlo; + uint32 eci_inputhi; + uint32 eci_inputintpolaritylo; + uint32 eci_inputintpolarityhi; + uint32 eci_intmasklo; + uint32 eci_intmaskhi; + uint32 eci_eventlo; + uint32 eci_eventhi; + uint32 eci_eventmasklo; + uint32 eci_eventmaskhi; + uint32 eci_auxtx; + uint32 eci_auxrx; + uint32 eci_datatag; + uint32 eci_uartescvalue; + uint32 eci_autobaudctr; + uint32 eci_uartfifolevel; +} eci_rev35_t; + +typedef struct flash_config { + uint32 PAD[19]; + /* Flash struct configuration registers (0x18c) for BCM4706 (corerev = 31) */ + uint32 flashstrconfig; +} flash_config_t; + +typedef volatile struct { + uint32 chipid; /* 0x0 */ + uint32 capabilities; + uint32 corecontrol; /* corerev >= 1 */ + uint32 bist; + + /* OTP */ + uint32 otpstatus; /* 0x10, corerev >= 10 */ + uint32 otpcontrol; + uint32 otpprog; + uint32 otplayout; /* corerev >= 23 */ + + /* Interrupt control */ + uint32 intstatus; /* 0x20 */ + uint32 intmask; + + /* Chip specific regs */ + uint32 chipcontrol; /* 0x28, rev >= 11 */ + uint32 chipstatus; /* 0x2c, rev >= 11 */ + + /* Jtag Master */ + uint32 jtagcmd; /* 0x30, rev >= 10 */ + uint32 jtagir; + uint32 jtagdr; + uint32 jtagctrl; + + /* serial flash interface registers */ + uint32 flashcontrol; /* 0x40 */ + uint32 flashaddress; + uint32 flashdata; + uint32 otplayoutextension; /* rev >= 35 */ + + /* Silicon backplane configuration broadcast control */ + uint32 broadcastaddress; /* 0x50 */ + uint32 broadcastdata; + + /* gpio - cleared only by power-on-reset */ + uint32 gpiopullup; /* 0x58, corerev >= 20 */ + uint32 gpiopulldown; /* 0x5c, corerev >= 20 */ + uint32 gpioin; /* 0x60 */ + uint32 gpioout; /* 0x64 */ + uint32 gpioouten; /* 0x68 */ + uint32 gpiocontrol; /* 0x6C */ + uint32 gpiointpolarity; /* 0x70 */ + uint32 gpiointmask; /* 0x74 */ + + /* GPIO events corerev >= 11 */ + uint32 gpioevent; + uint32 gpioeventintmask; + + /* Watchdog timer */ + uint32 watchdog; /* 0x80 */ + + /* GPIO events corerev >= 11 */ + uint32 gpioeventintpolarity; + + /* GPIO based LED powersave registers corerev >= 16 */ + uint32 gpiotimerval; /* 0x88 */ + uint32 gpiotimeroutmask; + + /* clock control */ + uint32 clockcontrol_n; /* 0x90 */ + uint32 clockcontrol_sb; /* aka m0 */ + uint32 clockcontrol_pci; /* aka m1 */ + uint32 clockcontrol_m2; /* mii/uart/mipsref */ + uint32 clockcontrol_m3; /* cpu */ + uint32 clkdiv; /* corerev >= 3 */ + uint32 gpiodebugsel; /* corerev >= 28 */ + uint32 capabilities_ext; /* 0xac */ + + /* pll delay registers (corerev >= 4) */ + uint32 pll_on_delay; /* 0xb0 */ + uint32 fref_sel_delay; + uint32 slow_clk_ctl; /* 5 < corerev < 10 */ + uint32 PAD; + + /* Instaclock registers (corerev >= 10) */ + uint32 system_clk_ctl; /* 0xc0 */ + uint32 clkstatestretch; + uint32 PAD[2]; + + /* Indirect backplane access (corerev >= 22) */ + uint32 bp_addrlow; /* 0xd0 */ + uint32 bp_addrhigh; + uint32 bp_data; + uint32 PAD; + uint32 bp_indaccess; + /* SPI registers, corerev >= 37 */ + uint32 gsioctrl; + uint32 gsioaddress; + uint32 gsiodata; + + /* More clock dividers (corerev >= 32) */ + uint32 clkdiv2; + /* FAB ID (corerev >= 40) */ + uint32 otpcontrol1; + uint32 fabid; /* 0xf8 */ + + /* In AI chips, pointer to erom */ + uint32 eromptr; /* 0xfc */ + + /* ExtBus control registers (corerev >= 3) */ + uint32 pcmcia_config; /* 0x100 */ + uint32 pcmcia_memwait; + uint32 pcmcia_attrwait; + uint32 pcmcia_iowait; + uint32 ide_config; + uint32 ide_memwait; + uint32 ide_attrwait; + uint32 ide_iowait; + uint32 prog_config; + uint32 prog_waitcount; + uint32 flash_config; + uint32 flash_waitcount; + uint32 SECI_config; /* 0x130 SECI configuration */ + uint32 SECI_status; + uint32 SECI_statusmask; + uint32 SECI_rxnibchanged; + + uint32 PAD[20]; + + /* SROM interface (corerev >= 32) */ + uint32 sromcontrol; /* 0x190 */ + uint32 sromaddress; + uint32 sromdata; + uint32 PAD[1]; /* 0x19C */ + /* NAND flash registers for BCM4706 (corerev = 31) */ + uint32 nflashctrl; /* 0x1a0 */ + uint32 nflashconf; + uint32 nflashcoladdr; + uint32 nflashrowaddr; + uint32 nflashdata; + uint32 nflashwaitcnt0; /* 0x1b4 */ + uint32 PAD[2]; + + uint32 seci_uart_data; /* 0x1C0 */ + uint32 seci_uart_bauddiv; + uint32 seci_uart_fcr; + uint32 seci_uart_lcr; + uint32 seci_uart_mcr; + uint32 seci_uart_lsr; + uint32 seci_uart_msr; + uint32 seci_uart_baudadj; + /* Clock control and hardware workarounds (corerev >= 20) */ + uint32 clk_ctl_st; /* 0x1e0 */ + uint32 hw_war; + uint32 powerctl; /* 0x1e8 */ + uint32 PAD[69]; + + /* UARTs */ + uint8 uart0data; /* 0x300 */ + uint8 uart0imr; + uint8 uart0fcr; + uint8 uart0lcr; + uint8 uart0mcr; + uint8 uart0lsr; + uint8 uart0msr; + uint8 uart0scratch; + uint8 PAD[248]; /* corerev >= 1 */ + + uint8 uart1data; /* 0x400 */ + uint8 uart1imr; + uint8 uart1fcr; + uint8 uart1lcr; + uint8 uart1mcr; + uint8 uart1lsr; + uint8 uart1msr; + uint8 uart1scratch; /* 0x407 */ + uint32 PAD[50]; + uint32 sr_memrw_addr; /* 0x4d0 */ + uint32 sr_memrw_data; /* 0x4d4 */ + uint32 PAD[10]; + + /* save/restore, corerev >= 48 */ + uint32 sr_capability; /* 0x500 */ + uint32 sr_control0; /* 0x504 */ + uint32 sr_control1; /* 0x508 */ + uint32 gpio_control; /* 0x50C */ + uint32 PAD[29]; + /* 2 SR engines case */ + uint32 sr1_control0; /* 0x584 */ + uint32 sr1_control1; /* 0x588 */ + uint32 PAD[29]; + /* PMU registers (corerev >= 20) */ + /* Note: all timers driven by ILP clock are updated asynchronously to HT/ALP. + * The CPU must read them twice, compare, and retry if different. + */ + uint32 pmucontrol; /* 0x600 */ + uint32 pmucapabilities; + uint32 pmustatus; + uint32 res_state; + uint32 res_pending; + uint32 pmutimer; + uint32 min_res_mask; + uint32 max_res_mask; + uint32 res_table_sel; + uint32 res_dep_mask; + uint32 res_updn_timer; + uint32 res_timer; + uint32 clkstretch; + uint32 pmuwatchdog; + uint32 gpiosel; /* 0x638, rev >= 1 */ + uint32 gpioenable; /* 0x63c, rev >= 1 */ + uint32 res_req_timer_sel; + uint32 res_req_timer; + uint32 res_req_mask; + uint32 core_cap_ext; /* 0x64c */ + uint32 chipcontrol_addr; /* 0x650 */ + uint32 chipcontrol_data; /* 0x654 */ + uint32 regcontrol_addr; + uint32 regcontrol_data; + uint32 pllcontrol_addr; + uint32 pllcontrol_data; + uint32 pmustrapopt; /* 0x668, corerev >= 28 */ + uint32 pmu_xtalfreq; /* 0x66C, pmurev >= 10 */ + uint32 retention_ctl; /* 0x670 */ + uint32 ILPPeriod; /* 0x674 */ + uint32 PAD[2]; + uint32 retention_grpidx; /* 0x680 */ + uint32 retention_grpctl; /* 0x684 */ + uint32 mac_res_req_timer; /* 0x688 */ + uint32 mac_res_req_mask; /* 0x68c */ + uint32 PAD[18]; + uint32 pmucontrol_ext; /* 0x6d8 */ + uint32 slowclkperiod; /* 0x6dc */ + uint32 pmu_statstimer_addr; /* 0x6e0 */ + uint32 pmu_statstimer_ctrl; /* 0x6e4 */ + uint32 pmu_statstimer_N; /* 0x6e8 */ + uint32 PAD[1]; + uint32 mac_res_req_timer1; /* 0x6f0 */ + uint32 mac_res_req_mask1; /* 0x6f4 */ + uint32 PAD[2]; + uint32 pmuintmask0; /* 0x700 */ + uint32 pmuintmask1; /* 0x704 */ + uint32 PAD[14]; + uint32 pmuintstatus; /* 0x740 */ + uint32 extwakeupstatus; /* 0x744 */ + uint32 PAD[6]; + uint32 extwakemask0; /* 0x760 */ + uint32 extwakemask1; /* 0x764 */ + uint32 PAD[2]; /* 0x768-0x76C */ + uint32 extwakereqmask[2]; /* 0x770-0x774 */ + uint32 PAD[2]; /* 0x778-0x77C */ + uint32 pmuintctrl0; /* 0x780 */ + uint32 PAD[3]; /* 0x784 - 0x78c */ + uint32 extwakectrl[1]; /* 0x790 */ + uint32 PAD[8]; + uint32 fis_ctrl_status; /* 0x7b4 */ + uint32 fis_min_res_mask; /* 0x7b8 */ + uint32 PAD[17]; + uint16 sromotp[512]; /* 0x800 */ +#ifdef CCNFLASH_SUPPORT + /* Nand flash MLC controller registers (corerev >= 38) */ + uint32 nand_revision; /* 0xC00 */ + uint32 nand_cmd_start; + uint32 nand_cmd_addr_x; + uint32 nand_cmd_addr; + uint32 nand_cmd_end_addr; + uint32 nand_cs_nand_select; + uint32 nand_cs_nand_xor; + uint32 PAD; + uint32 nand_spare_rd0; + uint32 nand_spare_rd4; + uint32 nand_spare_rd8; + uint32 nand_spare_rd12; + uint32 nand_spare_wr0; + uint32 nand_spare_wr4; + uint32 nand_spare_wr8; + uint32 nand_spare_wr12; + uint32 nand_acc_control; + uint32 PAD; + uint32 nand_config; + uint32 PAD; + uint32 nand_timing_1; + uint32 nand_timing_2; + uint32 nand_semaphore; + uint32 PAD; + uint32 nand_devid; + uint32 nand_devid_x; + uint32 nand_block_lock_status; + uint32 nand_intfc_status; + uint32 nand_ecc_corr_addr_x; + uint32 nand_ecc_corr_addr; + uint32 nand_ecc_unc_addr_x; + uint32 nand_ecc_unc_addr; + uint32 nand_read_error_count; + uint32 nand_corr_stat_threshold; + uint32 PAD[2]; + uint32 nand_read_addr_x; + uint32 nand_read_addr; + uint32 nand_page_program_addr_x; + uint32 nand_page_program_addr; + uint32 nand_copy_back_addr_x; + uint32 nand_copy_back_addr; + uint32 nand_block_erase_addr_x; + uint32 nand_block_erase_addr; + uint32 nand_inv_read_addr_x; + uint32 nand_inv_read_addr; + uint32 PAD[2]; + uint32 nand_blk_wr_protect; + uint32 PAD[3]; + uint32 nand_acc_control_cs1; + uint32 nand_config_cs1; + uint32 nand_timing_1_cs1; + uint32 nand_timing_2_cs1; + uint32 PAD[20]; + uint32 nand_spare_rd16; + uint32 nand_spare_rd20; + uint32 nand_spare_rd24; + uint32 nand_spare_rd28; + uint32 nand_cache_addr; + uint32 nand_cache_data; + uint32 nand_ctrl_config; + uint32 nand_ctrl_status; +#endif /* CCNFLASH_SUPPORT */ + uint32 gci_corecaps0; /* GCI starting at 0xC00 */ + uint32 gci_corecaps1; + uint32 gci_corecaps2; + uint32 gci_corectrl; + uint32 gci_corestat; /* 0xC10 */ + uint32 gci_intstat; /* 0xC14 */ + uint32 gci_intmask; /* 0xC18 */ + uint32 gci_wakemask; /* 0xC1C */ + uint32 gci_levelintstat; /* 0xC20 */ + uint32 gci_eventintstat; /* 0xC24 */ + uint32 PAD[6]; + uint32 gci_indirect_addr; /* 0xC40 */ + uint32 gci_gpioctl; /* 0xC44 */ + uint32 gci_gpiostatus; + uint32 gci_gpiomask; /* 0xC4C */ + uint32 gci_eventsummary; /* 0xC50 */ + uint32 gci_miscctl; /* 0xC54 */ + uint32 gci_gpiointmask; + uint32 gci_gpiowakemask; + uint32 gci_input[32]; /* C60 */ + uint32 gci_event[32]; /* CE0 */ + uint32 gci_output[4]; /* D60 */ + uint32 gci_control_0; /* 0xD70 */ + uint32 gci_control_1; /* 0xD74 */ + uint32 gci_intpolreg; /* 0xD78 */ + uint32 gci_levelintmask; /* 0xD7C */ + uint32 gci_eventintmask; /* 0xD80 */ + uint32 PAD[3]; + uint32 gci_inbandlevelintmask; /* 0xD90 */ + uint32 gci_inbandeventintmask; /* 0xD94 */ + uint32 PAD[2]; + uint32 gci_seciauxtx; /* 0xDA0 */ + uint32 gci_seciauxrx; /* 0xDA4 */ + uint32 gci_secitx_datatag; /* 0xDA8 */ + uint32 gci_secirx_datatag; /* 0xDAC */ + uint32 gci_secitx_datamask; /* 0xDB0 */ + uint32 gci_seciusef0tx_reg; /* 0xDB4 */ + uint32 gci_secif0tx_offset; /* 0xDB8 */ + uint32 gci_secif0rx_offset; /* 0xDBC */ + uint32 gci_secif1tx_offset; /* 0xDC0 */ + uint32 gci_rxfifo_common_ctrl; /* 0xDC4 */ + uint32 gci_rxfifoctrl; /* 0xDC8 */ + uint32 gci_uartreadid; /* DCC */ + uint32 gci_seciuartescval; /* DD0 */ + uint32 PAD; + uint32 gci_secififolevel; /* DD8 */ + uint32 gci_seciuartdata; /* DDC */ + uint32 gci_secibauddiv; /* DE0 */ + uint32 gci_secifcr; /* DE4 */ + uint32 gci_secilcr; /* DE8 */ + uint32 gci_secimcr; /* DEC */ + uint32 gci_secilsr; /* DF0 */ + uint32 gci_secimsr; /* DF4 */ + uint32 gci_baudadj; /* DF8 */ + uint32 PAD; + uint32 gci_chipctrl; /* 0xE00 */ + uint32 gci_chipsts; /* 0xE04 */ + uint32 gci_gpioout; /* 0xE08 */ + uint32 gci_gpioout_read; /* 0xE0C */ + uint32 gci_mpwaketx; /* 0xE10 */ + uint32 gci_mpwakedetect; /* 0xE14 */ + uint32 gci_seciin_ctrl; /* 0xE18 */ + uint32 gci_seciout_ctrl; /* 0xE1C */ + uint32 gci_seciin_auxfifo_en; /* 0xE20 */ + uint32 gci_seciout_txen_txbr; /* 0xE24 */ + uint32 gci_seciin_rxbrstatus; /* 0xE28 */ + uint32 gci_seciin_rxerrstatus; /* 0xE2C */ + uint32 gci_seciin_fcstatus; /* 0xE30 */ + uint32 gci_seciout_txstatus; /* 0xE34 */ + uint32 gci_seciout_txbrstatus; /* 0xE38 */ +} chipcregs_t; + +#endif /* !_LANGUAGE_ASSEMBLY && !__ASSEMBLY__ */ + +#define CC_CHIPID 0 +#define CC_CAPABILITIES 4 +#define CC_CHIPST 0x2c +#define CC_EROMPTR 0xfc + +#define CC_OTPST 0x10 +#define CC_INTSTATUS 0x20 +#define CC_INTMASK 0x24 +#define CC_JTAGCMD 0x30 +#define CC_JTAGIR 0x34 +#define CC_JTAGDR 0x38 +#define CC_JTAGCTRL 0x3c +#define CC_GPIOPU 0x58 +#define CC_GPIOPD 0x5c +#define CC_GPIOIN 0x60 +#define CC_GPIOOUT 0x64 +#define CC_GPIOOUTEN 0x68 +#define CC_GPIOCTRL 0x6c +#define CC_GPIOPOL 0x70 +#define CC_GPIOINTM 0x74 +#define CC_GPIOEVENT 0x78 +#define CC_GPIOEVENTMASK 0x7c +#define CC_WATCHDOG 0x80 +#define CC_GPIOEVENTPOL 0x84 +#define CC_CLKC_N 0x90 +#define CC_CLKC_M0 0x94 +#define CC_CLKC_M1 0x98 +#define CC_CLKC_M2 0x9c +#define CC_CLKC_M3 0xa0 +#define CC_CLKDIV 0xa4 +#define CC_CAP_EXT 0xac +#define CC_SYS_CLK_CTL 0xc0 +#define CC_CLKDIV2 0xf0 +#define CC_CLK_CTL_ST SI_CLK_CTL_ST +#define PMU_CTL 0x600 +#define PMU_CAP 0x604 +#define PMU_ST 0x608 +#define PMU_RES_STATE 0x60c +#define PMU_RES_PENDING 0x610 +#define PMU_TIMER 0x614 +#define PMU_MIN_RES_MASK 0x618 +#define PMU_MAX_RES_MASK 0x61c +#define CC_CHIPCTL_ADDR 0x650 +#define CC_CHIPCTL_DATA 0x654 +#define PMU_REG_CONTROL_ADDR 0x658 +#define PMU_REG_CONTROL_DATA 0x65C +#define PMU_PLL_CONTROL_ADDR 0x660 +#define PMU_PLL_CONTROL_DATA 0x664 + +#define CC_SROM_CTRL 0x190 +#ifdef SROM16K_4364_ADDRSPACE +#define CC_SROM_OTP 0xa000 /* SROM/OTP address space */ +#else +#define CC_SROM_OTP 0x0800 +#endif // endif +#define CC_GCI_INDIRECT_ADDR_REG 0xC40 +#define CC_GCI_CHIP_CTRL_REG 0xE00 +#define CC_GCI_CC_OFFSET_2 2 +#define CC_GCI_CC_OFFSET_5 5 +#define CC_SWD_CTRL 0x380 +#define CC_SWD_REQACK 0x384 +#define CC_SWD_DATA 0x388 +#define GPIO_SEL_0 0x00001111 +#define GPIO_SEL_1 0x11110000 +#define GPIO_SEL_8 0x00001111 +#define GPIO_SEL_9 0x11110000 + +#define CHIPCTRLREG0 0x0 +#define CHIPCTRLREG1 0x1 +#define CHIPCTRLREG2 0x2 +#define CHIPCTRLREG3 0x3 +#define CHIPCTRLREG4 0x4 +#define CHIPCTRLREG5 0x5 +#define CHIPCTRLREG6 0x6 +#define REGCTRLREG4 0x4 +#define REGCTRLREG5 0x5 +#define REGCTRLREG6 0x6 +#define MINRESMASKREG 0x618 +#define MAXRESMASKREG 0x61c +#define CHIPCTRLADDR 0x650 +#define CHIPCTRLDATA 0x654 +#define RSRCTABLEADDR 0x620 +#define PMU_RES_DEP_MASK 0x624 +#define RSRCUPDWNTIME 0x628 +#define PMUREG_RESREQ_MASK 0x68c +#define PMUREG_RESREQ_TIMER 0x688 +#define PMUREG_RESREQ_MASK1 0x6f4 +#define PMUREG_RESREQ_TIMER1 0x6f0 +#define EXT_LPO_AVAIL 0x100 +#define LPO_SEL (1 << 0) +#define CC_EXT_LPO_PU 0x200000 +#define GC_EXT_LPO_PU 0x2 +#define CC_INT_LPO_PU 0x100000 +#define GC_INT_LPO_PU 0x1 +#define EXT_LPO_SEL 0x8 +#define INT_LPO_SEL 0x4 +#define ENABLE_FINE_CBUCK_CTRL (1 << 30) +#define REGCTRL5_PWM_AUTO_CTRL_MASK 0x007e0000 +#define REGCTRL5_PWM_AUTO_CTRL_SHIFT 17 +#define REGCTRL6_PWM_AUTO_CTRL_MASK 0x3fff0000 +#define REGCTRL6_PWM_AUTO_CTRL_SHIFT 16 +#define CC_BP_IND_ACCESS_START_SHIFT 9 +#define CC_BP_IND_ACCESS_START_MASK (1 << CC_BP_IND_ACCESS_START_SHIFT) +#define CC_BP_IND_ACCESS_RDWR_SHIFT 8 +#define CC_BP_IND_ACCESS_RDWR_MASK (1 << CC_BP_IND_ACCESS_RDWR_SHIFT) +#define CC_BP_IND_ACCESS_ERROR_SHIFT 10 +#define CC_BP_IND_ACCESS_ERROR_MASK (1 << CC_BP_IND_ACCESS_ERROR_SHIFT) + +#define LPO_SEL_TIMEOUT 1000 + +#define LPO_FINAL_SEL_SHIFT 18 + +#define LHL_LPO1_SEL 0 +#define LHL_LPO2_SEL 0x1 +#define LHL_32k_SEL 0x2 +#define LHL_EXT_SEL 0x3 + +#define EXTLPO_BUF_PD 0x40 +#define LPO1_PD_EN 0x1 +#define LPO1_PD_SEL 0x6 +#define LPO1_PD_SEL_VAL 0x4 +#define LPO2_PD_EN 0x8 +#define LPO2_PD_SEL 0x30 +#define LPO2_PD_SEL_VAL 0x20 +#define OSC_32k_PD 0x80 + +#define LHL_CLK_DET_CTL_AD_CNTR_CLK_SEL 0x3 + +#define LHL_LPO_AUTO 0x0 +#define LHL_LPO1_ENAB 0x1 +#define LHL_LPO2_ENAB 0x2 +#define LHL_OSC_32k_ENAB 0x3 +#define LHL_EXT_LPO_ENAB 0x4 +#define RADIO_LPO_ENAB 0x5 + +#define LHL_CLK_DET_CTL_ADR_LHL_CNTR_EN 0x4 +#define LHL_CLK_DET_CTL_ADR_LHL_CNTR_CLR 0x8 +#define LHL_CLK_DET_CNT 0xF0 +#define LHL_CLK_DET_CNT_SHIFT 4 +#define LPO_SEL_SHIFT 9 + +#define LHL_MAIN_CTL_ADR_FINAL_CLK_SEL 0x3C0000 +#define LHL_MAIN_CTL_ADR_LHL_WLCLK_SEL 0x600 + +#define CLK_DET_CNT_THRESH 8 + +#ifdef SR_DEBUG +#define SUBCORE_POWER_ON 0x0001 +#define PHY_POWER_ON 0x0010 +#define VDDM_POWER_ON 0x0100 +#define MEMLPLDO_POWER_ON 0x1000 +#define SUBCORE_POWER_ON_CHK 0x00040000 +#define PHY_POWER_ON_CHK 0x00080000 +#define VDDM_POWER_ON_CHK 0x00100000 +#define MEMLPLDO_POWER_ON_CHK 0x00200000 +#endif /* SR_DEBUG */ + +#ifdef CCNFLASH_SUPPORT +/* NAND flash support */ +#define CC_NAND_REVISION 0xC00 +#define CC_NAND_CMD_START 0xC04 +#define CC_NAND_CMD_ADDR 0xC0C +#define CC_NAND_SPARE_RD_0 0xC20 +#define CC_NAND_SPARE_RD_4 0xC24 +#define CC_NAND_SPARE_RD_8 0xC28 +#define CC_NAND_SPARE_RD_C 0xC2C +#define CC_NAND_CONFIG 0xC48 +#define CC_NAND_DEVID 0xC60 +#define CC_NAND_DEVID_EXT 0xC64 +#define CC_NAND_INTFC_STATUS 0xC6C +#endif /* CCNFLASH_SUPPORT */ + +/* chipid */ +#define CID_ID_MASK 0x0000ffff /**< Chip Id mask */ +#define CID_REV_MASK 0x000f0000 /**< Chip Revision mask */ +#define CID_REV_SHIFT 16 /**< Chip Revision shift */ +#define CID_PKG_MASK 0x00f00000 /**< Package Option mask */ +#define CID_PKG_SHIFT 20 /**< Package Option shift */ +#define CID_CC_MASK 0x0f000000 /**< CoreCount (corerev >= 4) */ +#define CID_CC_SHIFT 24 +#define CID_TYPE_MASK 0xf0000000 /**< Chip Type */ +#define CID_TYPE_SHIFT 28 + +/* capabilities */ +#define CC_CAP_UARTS_MASK 0x00000003 /**< Number of UARTs */ +#define CC_CAP_MIPSEB 0x00000004 /**< MIPS is in big-endian mode */ +#define CC_CAP_UCLKSEL 0x00000018 /**< UARTs clock select */ +#define CC_CAP_UINTCLK 0x00000008 /**< UARTs are driven by internal divided clock */ +#define CC_CAP_UARTGPIO 0x00000020 /**< UARTs own GPIOs 15:12 */ +#define CC_CAP_EXTBUS_MASK 0x000000c0 /**< External bus mask */ +#define CC_CAP_EXTBUS_NONE 0x00000000 /**< No ExtBus present */ +#define CC_CAP_EXTBUS_FULL 0x00000040 /**< ExtBus: PCMCIA, IDE & Prog */ +#define CC_CAP_EXTBUS_PROG 0x00000080 /**< ExtBus: ProgIf only */ +#define CC_CAP_FLASH_MASK 0x00000700 /**< Type of flash */ +#define CC_CAP_PLL_MASK 0x00038000 /**< Type of PLL */ +#define CC_CAP_PWR_CTL 0x00040000 /**< Power control */ +#define CC_CAP_OTPSIZE 0x00380000 /**< OTP Size (0 = none) */ +#define CC_CAP_OTPSIZE_SHIFT 19 /**< OTP Size shift */ +#define CC_CAP_OTPSIZE_BASE 5 /**< OTP Size base */ +#define CC_CAP_JTAGP 0x00400000 /**< JTAG Master Present */ +#define CC_CAP_ROM 0x00800000 /**< Internal boot rom active */ +#define CC_CAP_BKPLN64 0x08000000 /**< 64-bit backplane */ +#define CC_CAP_PMU 0x10000000 /**< PMU Present, rev >= 20 */ +#define CC_CAP_ECI 0x20000000 /**< ECI Present, rev >= 21 */ +#define CC_CAP_SROM 0x40000000 /**< Srom Present, rev >= 32 */ +#define CC_CAP_NFLASH 0x80000000 /**< Nand flash present, rev >= 35 */ + +#define CC_CAP2_SECI 0x00000001 /**< SECI Present, rev >= 36 */ +#define CC_CAP2_GSIO 0x00000002 /**< GSIO (spi/i2c) present, rev >= 37 */ + +/* capabilities extension */ +#define CC_CAP_EXT_SECI_PRESENT 0x00000001 /**< SECI present */ +#define CC_CAP_EXT_GSIO_PRESENT 0x00000002 /**< GSIO present */ +#define CC_CAP_EXT_GCI_PRESENT 0x00000004 /**< GCI present */ +#define CC_CAP_EXT_SECI_PUART_PRESENT 0x00000008 /**< UART present */ +#define CC_CAP_EXT_AOB_PRESENT 0x00000040 /**< AOB present */ +#define CC_CAP_EXT_SWD_PRESENT 0x00000400 /**< SWD present */ + +/* WL Channel Info to BT via GCI - bits 40 - 47 */ +#define GCI_WL_CHN_INFO_MASK (0xFF00) +/* WL indication of MCHAN enabled/disabled to BT in awdl mode- bit 36 */ +#define GCI_WL_MCHAN_BIT_MASK (0x0010) + +#ifdef WLC_SW_DIVERSITY +/* WL indication of SWDIV enabled/disabled to BT - bit 33 */ +#define GCI_WL_SWDIV_ANT_VALID_BIT_MASK (0x0002) +#define GCI_SWDIV_ANT_VALID_SHIFT 0x1 +#define GCI_SWDIV_ANT_VALID_DISABLE 0x0 +#endif // endif + +/* WL Strobe to BT */ +#define GCI_WL_STROBE_BIT_MASK (0x0020) +/* bits [51:48] - reserved for wlan TX pwr index */ +/* bits [55:52] btc mode indication */ +#define GCI_WL_BTC_MODE_SHIFT (20) +#define GCI_WL_BTC_MODE_MASK (0xF << GCI_WL_BTC_MODE_SHIFT) +#define GCI_WL_ANT_BIT_MASK (0x00c0) +#define GCI_WL_ANT_SHIFT_BITS (6) +/* PLL type */ +#define PLL_NONE 0x00000000 +#define PLL_TYPE1 0x00010000 /**< 48MHz base, 3 dividers */ +#define PLL_TYPE2 0x00020000 /**< 48MHz, 4 dividers */ +#define PLL_TYPE3 0x00030000 /**< 25MHz, 2 dividers */ +#define PLL_TYPE4 0x00008000 /**< 48MHz, 4 dividers */ +#define PLL_TYPE5 0x00018000 /**< 25MHz, 4 dividers */ +#define PLL_TYPE6 0x00028000 /**< 100/200 or 120/240 only */ +#define PLL_TYPE7 0x00038000 /**< 25MHz, 4 dividers */ + +/* ILP clock */ +#define ILP_CLOCK 32000 + +/* ALP clock on pre-PMU chips */ +#define ALP_CLOCK 20000000 + +#ifdef CFG_SIM +#define NS_ALP_CLOCK 84922 +#define NS_SLOW_ALP_CLOCK 84922 +#define NS_CPU_CLOCK 534500 +#define NS_SLOW_CPU_CLOCK 534500 +#define NS_SI_CLOCK 271750 +#define NS_SLOW_SI_CLOCK 271750 +#define NS_FAST_MEM_CLOCK 271750 +#define NS_MEM_CLOCK 271750 +#define NS_SLOW_MEM_CLOCK 271750 +#else +#define NS_ALP_CLOCK 125000000 +#define NS_SLOW_ALP_CLOCK 100000000 +#define NS_CPU_CLOCK 1000000000 +#define NS_SLOW_CPU_CLOCK 800000000 +#define NS_SI_CLOCK 250000000 +#define NS_SLOW_SI_CLOCK 200000000 +#define NS_FAST_MEM_CLOCK 800000000 +#define NS_MEM_CLOCK 533000000 +#define NS_SLOW_MEM_CLOCK 400000000 +#endif /* CFG_SIM */ + +#define ALP_CLOCK_53573 40000000 + +/* HT clock */ +#define HT_CLOCK 80000000 + +/* corecontrol */ +#define CC_UARTCLKO 0x00000001 /**< Drive UART with internal clock */ +#define CC_SE 0x00000002 /**< sync clk out enable (corerev >= 3) */ +#define CC_ASYNCGPIO 0x00000004 /**< 1=generate GPIO interrupt without backplane clock */ +#define CC_UARTCLKEN 0x00000008 /**< enable UART Clock (corerev > = 21 */ + +/* retention_ctl */ +#define RCTL_MEM_RET_SLEEP_LOG_SHIFT 29 +#define RCTL_MEM_RET_SLEEP_LOG_MASK (1 << RCTL_MEM_RET_SLEEP_LOG_SHIFT) + +/* 4321 chipcontrol */ +#define CHIPCTRL_4321_PLL_DOWN 0x800000 /**< serdes PLL down override */ + +/* Fields in the otpstatus register in rev >= 21 */ +#define OTPS_OL_MASK 0x000000ff +#define OTPS_OL_MFG 0x00000001 /**< manuf row is locked */ +#define OTPS_OL_OR1 0x00000002 /**< otp redundancy row 1 is locked */ +#define OTPS_OL_OR2 0x00000004 /**< otp redundancy row 2 is locked */ +#define OTPS_OL_GU 0x00000008 /**< general use region is locked */ +#define OTPS_GUP_MASK 0x00000f00 +#define OTPS_GUP_SHIFT 8 +#define OTPS_GUP_HW 0x00000100 /**< h/w subregion is programmed */ +#define OTPS_GUP_SW 0x00000200 /**< s/w subregion is programmed */ +#define OTPS_GUP_CI 0x00000400 /**< chipid/pkgopt subregion is programmed */ +#define OTPS_GUP_FUSE 0x00000800 /**< fuse subregion is programmed */ +#define OTPS_READY 0x00001000 +#define OTPS_RV(x) (1 << (16 + (x))) /**< redundancy entry valid */ +#define OTPS_RV_MASK 0x0fff0000 +#define OTPS_PROGOK 0x40000000 + +/* Fields in the otpcontrol register in rev >= 21 */ +#define OTPC_PROGSEL 0x00000001 +#define OTPC_PCOUNT_MASK 0x0000000e +#define OTPC_PCOUNT_SHIFT 1 +#define OTPC_VSEL_MASK 0x000000f0 +#define OTPC_VSEL_SHIFT 4 +#define OTPC_TMM_MASK 0x00000700 +#define OTPC_TMM_SHIFT 8 +#define OTPC_ODM 0x00000800 +#define OTPC_PROGEN 0x80000000 + +/* Fields in the 40nm otpcontrol register in rev >= 40 */ +#define OTPC_40NM_PROGSEL_SHIFT 0 +#define OTPC_40NM_PCOUNT_SHIFT 1 +#define OTPC_40NM_PCOUNT_WR 0xA +#define OTPC_40NM_PCOUNT_V1X 0xB +#define OTPC_40NM_REGCSEL_SHIFT 5 +#define OTPC_40NM_REGCSEL_DEF 0x4 +#define OTPC_40NM_PROGIN_SHIFT 8 +#define OTPC_40NM_R2X_SHIFT 10 +#define OTPC_40NM_ODM_SHIFT 11 +#define OTPC_40NM_DF_SHIFT 15 +#define OTPC_40NM_VSEL_SHIFT 16 +#define OTPC_40NM_VSEL_WR 0xA +#define OTPC_40NM_VSEL_V1X 0xA +#define OTPC_40NM_VSEL_R1X 0x5 +#define OTPC_40NM_COFAIL_SHIFT 30 + +#define OTPC1_CPCSEL_SHIFT 0 +#define OTPC1_CPCSEL_DEF 6 +#define OTPC1_TM_SHIFT 8 +#define OTPC1_TM_WR 0x84 +#define OTPC1_TM_V1X 0x84 +#define OTPC1_TM_R1X 0x4 +#define OTPC1_CLK_EN_MASK 0x00020000 +#define OTPC1_CLK_DIV_MASK 0x00FC0000 + +/* Fields in otpprog in rev >= 21 and HND OTP */ +#define OTPP_COL_MASK 0x000000ff +#define OTPP_COL_SHIFT 0 +#define OTPP_ROW_MASK 0x0000ff00 +#define OTPP_ROW_MASK9 0x0001ff00 /* for ccrev >= 49 */ +#define OTPP_ROW_SHIFT 8 +#define OTPP_OC_MASK 0x0f000000 +#define OTPP_OC_SHIFT 24 +#define OTPP_READERR 0x10000000 +#define OTPP_VALUE_MASK 0x20000000 +#define OTPP_VALUE_SHIFT 29 +#define OTPP_START_BUSY 0x80000000 +#define OTPP_READ 0x40000000 /* HND OTP */ + +/* Fields in otplayout register */ +#define OTPL_HWRGN_OFF_MASK 0x00000FFF +#define OTPL_HWRGN_OFF_SHIFT 0 +#define OTPL_WRAP_REVID_MASK 0x00F80000 +#define OTPL_WRAP_REVID_SHIFT 19 +#define OTPL_WRAP_TYPE_MASK 0x00070000 +#define OTPL_WRAP_TYPE_SHIFT 16 +#define OTPL_WRAP_TYPE_65NM 0 +#define OTPL_WRAP_TYPE_40NM 1 +#define OTPL_WRAP_TYPE_28NM 2 +#define OTPL_ROW_SIZE_MASK 0x0000F000 +#define OTPL_ROW_SIZE_SHIFT 12 + +/* otplayout reg corerev >= 36 */ +#define OTP_CISFORMAT_NEW 0x80000000 + +/* Opcodes for OTPP_OC field */ +#define OTPPOC_READ 0 +#define OTPPOC_BIT_PROG 1 +#define OTPPOC_VERIFY 3 +#define OTPPOC_INIT 4 +#define OTPPOC_SET 5 +#define OTPPOC_RESET 6 +#define OTPPOC_OCST 7 +#define OTPPOC_ROW_LOCK 8 +#define OTPPOC_PRESCN_TEST 9 + +/* Opcodes for OTPP_OC field (40NM) */ +#define OTPPOC_READ_40NM 0 +#define OTPPOC_PROG_ENABLE_40NM 1 +#define OTPPOC_PROG_DISABLE_40NM 2 +#define OTPPOC_VERIFY_40NM 3 +#define OTPPOC_WORD_VERIFY_1_40NM 4 +#define OTPPOC_ROW_LOCK_40NM 5 +#define OTPPOC_STBY_40NM 6 +#define OTPPOC_WAKEUP_40NM 7 +#define OTPPOC_WORD_VERIFY_0_40NM 8 +#define OTPPOC_PRESCN_TEST_40NM 9 +#define OTPPOC_BIT_PROG_40NM 10 +#define OTPPOC_WORDPROG_40NM 11 +#define OTPPOC_BURNIN_40NM 12 +#define OTPPOC_AUTORELOAD_40NM 13 +#define OTPPOC_OVST_READ_40NM 14 +#define OTPPOC_OVST_PROG_40NM 15 + +/* Opcodes for OTPP_OC field (28NM) */ +#define OTPPOC_READ_28NM 0 +#define OTPPOC_READBURST_28NM 1 +#define OTPPOC_PROG_ENABLE_28NM 2 +#define OTPPOC_PROG_DISABLE_28NM 3 +#define OTPPOC_PRESCREEN_28NM 4 +#define OTPPOC_PRESCREEN_RP_28NM 5 +#define OTPPOC_FLUSH_28NM 6 +#define OTPPOC_NOP_28NM 7 +#define OTPPOC_PROG_ECC_28NM 8 +#define OTPPOC_PROG_ECC_READ_28NM 9 +#define OTPPOC_PROG_28NM 10 +#define OTPPOC_PROGRAM_RP_28NM 11 +#define OTPPOC_PROGRAM_OVST_28NM 12 +#define OTPPOC_RELOAD_28NM 13 +#define OTPPOC_ERASE_28NM 14 +#define OTPPOC_LOAD_RF_28NM 15 +#define OTPPOC_CTRL_WR_28NM 16 +#define OTPPOC_CTRL_RD_28NM 17 +#define OTPPOC_READ_HP_28NM 18 +#define OTPPOC_READ_OVST_28NM 19 +#define OTPPOC_READ_VERIFY0_28NM 20 +#define OTPPOC_READ_VERIFY1_28NM 21 +#define OTPPOC_READ_FORCE0_28NM 22 +#define OTPPOC_READ_FORCE1_28NM 23 +#define OTPPOC_BURNIN_28NM 24 +#define OTPPOC_PROGRAM_LOCK_28NM 25 +#define OTPPOC_PROGRAM_TESTCOL_28NM 26 +#define OTPPOC_READ_TESTCOL_28NM 27 +#define OTPPOC_READ_FOUT_28NM 28 +#define OTPPOC_SFT_RESET_28NM 29 + +#define OTPP_OC_MASK_28NM 0x0f800000 +#define OTPP_OC_SHIFT_28NM 23 +#define OTPC_PROGEN_28NM 0x8 +#define OTPC_DBLERRCLR 0x20 +#define OTPC_CLK_EN_MASK 0x00000040 +#define OTPC_CLK_DIV_MASK 0x00000F80 + +/* Fields in otplayoutextension */ +#define OTPLAYOUTEXT_FUSE_MASK 0x3FF + +/* Jtagm characteristics that appeared at a given corerev */ +#define JTAGM_CREV_OLD 10 /**< Old command set, 16bit max IR */ +#define JTAGM_CREV_IRP 22 /**< Able to do pause-ir */ +#define JTAGM_CREV_RTI 28 /**< Able to do return-to-idle */ + +/* jtagcmd */ +#define JCMD_START 0x80000000 +#define JCMD_BUSY 0x80000000 +#define JCMD_STATE_MASK 0x60000000 +#define JCMD_STATE_TLR 0x00000000 /**< Test-logic-reset */ +#define JCMD_STATE_PIR 0x20000000 /**< Pause IR */ +#define JCMD_STATE_PDR 0x40000000 /**< Pause DR */ +#define JCMD_STATE_RTI 0x60000000 /**< Run-test-idle */ +#define JCMD0_ACC_MASK 0x0000f000 +#define JCMD0_ACC_IRDR 0x00000000 +#define JCMD0_ACC_DR 0x00001000 +#define JCMD0_ACC_IR 0x00002000 +#define JCMD0_ACC_RESET 0x00003000 +#define JCMD0_ACC_IRPDR 0x00004000 +#define JCMD0_ACC_PDR 0x00005000 +#define JCMD0_IRW_MASK 0x00000f00 +#define JCMD_ACC_MASK 0x000f0000 /**< Changes for corerev 11 */ +#define JCMD_ACC_IRDR 0x00000000 +#define JCMD_ACC_DR 0x00010000 +#define JCMD_ACC_IR 0x00020000 +#define JCMD_ACC_RESET 0x00030000 +#define JCMD_ACC_IRPDR 0x00040000 +#define JCMD_ACC_PDR 0x00050000 +#define JCMD_ACC_PIR 0x00060000 +#define JCMD_ACC_IRDR_I 0x00070000 /**< rev 28: return to run-test-idle */ +#define JCMD_ACC_DR_I 0x00080000 /**< rev 28: return to run-test-idle */ +#define JCMD_IRW_MASK 0x00001f00 +#define JCMD_IRW_SHIFT 8 +#define JCMD_DRW_MASK 0x0000003f + +/* jtagctrl */ +#define JCTRL_FORCE_CLK 4 /**< Force clock */ +#define JCTRL_EXT_EN 2 /**< Enable external targets */ +#define JCTRL_EN 1 /**< Enable Jtag master */ +#define JCTRL_TAPSEL_BIT 0x00000008 /**< JtagMasterCtrl tap_sel bit */ + +/* swdmasterctrl */ +#define SWDCTRL_INT_EN 8 /**< Enable internal targets */ +#define SWDCTRL_FORCE_CLK 4 /**< Force clock */ +#define SWDCTRL_OVJTAG 2 /**< Enable shared SWD/JTAG pins */ +#define SWDCTRL_EN 1 /**< Enable Jtag master */ + +/* Fields in clkdiv */ +#define CLKD_SFLASH 0x1f000000 +#define CLKD_SFLASH_SHIFT 24 +#define CLKD_OTP 0x000f0000 +#define CLKD_OTP_SHIFT 16 +#define CLKD_JTAG 0x00000f00 +#define CLKD_JTAG_SHIFT 8 +#define CLKD_UART 0x000000ff + +#define CLKD2_SROM 0x00000007 +#define CLKD2_SROMDIV_32 0 +#define CLKD2_SROMDIV_64 1 +#define CLKD2_SROMDIV_96 2 +#define CLKD2_SROMDIV_128 3 +#define CLKD2_SROMDIV_192 4 +#define CLKD2_SROMDIV_256 5 +#define CLKD2_SROMDIV_384 6 +#define CLKD2_SROMDIV_512 7 +#define CLKD2_SWD 0xf8000000 +#define CLKD2_SWD_SHIFT 27 + +/* intstatus/intmask */ +#define CI_GPIO 0x00000001 /**< gpio intr */ +#define CI_EI 0x00000002 /**< extif intr (corerev >= 3) */ +#define CI_TEMP 0x00000004 /**< temp. ctrl intr (corerev >= 15) */ +#define CI_SIRQ 0x00000008 /**< serial IRQ intr (corerev >= 15) */ +#define CI_ECI 0x00000010 /**< eci intr (corerev >= 21) */ +#define CI_PMU 0x00000020 /**< pmu intr (corerev >= 21) */ +#define CI_UART 0x00000040 /**< uart intr (corerev >= 21) */ +#define CI_WECI 0x00000080 /* eci wakeup intr (corerev >= 21) */ +#define CI_WDRESET 0x80000000 /**< watchdog reset occurred */ + +/* slow_clk_ctl */ +#define SCC_SS_MASK 0x00000007 /**< slow clock source mask */ +#define SCC_SS_LPO 0x00000000 /**< source of slow clock is LPO */ +#define SCC_SS_XTAL 0x00000001 /**< source of slow clock is crystal */ +#define SCC_SS_PCI 0x00000002 /**< source of slow clock is PCI */ +#define SCC_LF 0x00000200 /**< LPOFreqSel, 1: 160Khz, 0: 32KHz */ +#define SCC_LP 0x00000400 /**< LPOPowerDown, 1: LPO is disabled, + * 0: LPO is enabled + */ +#define SCC_FS 0x00000800 /**< ForceSlowClk, 1: sb/cores running on slow clock, + * 0: power logic control + */ +#define SCC_IP 0x00001000 /**< IgnorePllOffReq, 1/0: power logic ignores/honors + * PLL clock disable requests from core + */ +#define SCC_XC 0x00002000 /**< XtalControlEn, 1/0: power logic does/doesn't + * disable crystal when appropriate + */ +#define SCC_XP 0x00004000 /**< XtalPU (RO), 1/0: crystal running/disabled */ +#define SCC_CD_MASK 0xffff0000 /**< ClockDivider (SlowClk = 1/(4+divisor)) */ +#define SCC_CD_SHIFT 16 + +/* system_clk_ctl */ +#define SYCC_IE 0x00000001 /**< ILPen: Enable Idle Low Power */ +#define SYCC_AE 0x00000002 /**< ALPen: Enable Active Low Power */ +#define SYCC_FP 0x00000004 /**< ForcePLLOn */ +#define SYCC_AR 0x00000008 /**< Force ALP (or HT if ALPen is not set */ +#define SYCC_HR 0x00000010 /**< Force HT */ +#define SYCC_CD_MASK 0xffff0000 /**< ClkDiv (ILP = 1/(4 * (divisor + 1)) */ +#define SYCC_CD_SHIFT 16 + +/* watchdogcounter */ +/* WL sub-system reset */ +#define WD_SSRESET_PCIE_F0_EN 0x10000000 +/* BT sub-system reset */ +#define WD_SSRESET_PCIE_F1_EN 0x20000000 +#define WD_SSRESET_PCIE_F2_EN 0x40000000 +/* Both WL and BT sub-system reset */ +#define WD_SSRESET_PCIE_ALL_FN_EN 0x80000000 +#define WD_COUNTER_MASK 0x0fffffff +#define WD_ENABLE_MASK \ + (WD_SSRESET_PCIE_F0_EN | WD_SSRESET_PCIE_F1_EN | \ + WD_SSRESET_PCIE_F2_EN | WD_SSRESET_PCIE_ALL_FN_EN) + +/* Indirect backplane access */ +#define BPIA_BYTEEN 0x0000000f +#define BPIA_SZ1 0x00000001 +#define BPIA_SZ2 0x00000003 +#define BPIA_SZ4 0x00000007 +#define BPIA_SZ8 0x0000000f +#define BPIA_WRITE 0x00000100 +#define BPIA_START 0x00000200 +#define BPIA_BUSY 0x00000200 +#define BPIA_ERROR 0x00000400 + +/* pcmcia/prog/flash_config */ +#define CF_EN 0x00000001 /**< enable */ +#define CF_EM_MASK 0x0000000e /**< mode */ +#define CF_EM_SHIFT 1 +#define CF_EM_FLASH 0 /**< flash/asynchronous mode */ +#define CF_EM_SYNC 2 /**< synchronous mode */ +#define CF_EM_PCMCIA 4 /**< pcmcia mode */ +#define CF_DS 0x00000010 /**< destsize: 0=8bit, 1=16bit */ +#define CF_BS 0x00000020 /**< byteswap */ +#define CF_CD_MASK 0x000000c0 /**< clock divider */ +#define CF_CD_SHIFT 6 +#define CF_CD_DIV2 0x00000000 /**< backplane/2 */ +#define CF_CD_DIV3 0x00000040 /**< backplane/3 */ +#define CF_CD_DIV4 0x00000080 /**< backplane/4 */ +#define CF_CE 0x00000100 /**< clock enable */ +#define CF_SB 0x00000200 /**< size/bytestrobe (synch only) */ + +/* pcmcia_memwait */ +#define PM_W0_MASK 0x0000003f /**< waitcount0 */ +#define PM_W1_MASK 0x00001f00 /**< waitcount1 */ +#define PM_W1_SHIFT 8 +#define PM_W2_MASK 0x001f0000 /**< waitcount2 */ +#define PM_W2_SHIFT 16 +#define PM_W3_MASK 0x1f000000 /**< waitcount3 */ +#define PM_W3_SHIFT 24 + +/* pcmcia_attrwait */ +#define PA_W0_MASK 0x0000003f /**< waitcount0 */ +#define PA_W1_MASK 0x00001f00 /**< waitcount1 */ +#define PA_W1_SHIFT 8 +#define PA_W2_MASK 0x001f0000 /**< waitcount2 */ +#define PA_W2_SHIFT 16 +#define PA_W3_MASK 0x1f000000 /**< waitcount3 */ +#define PA_W3_SHIFT 24 + +/* pcmcia_iowait */ +#define PI_W0_MASK 0x0000003f /**< waitcount0 */ +#define PI_W1_MASK 0x00001f00 /**< waitcount1 */ +#define PI_W1_SHIFT 8 +#define PI_W2_MASK 0x001f0000 /**< waitcount2 */ +#define PI_W2_SHIFT 16 +#define PI_W3_MASK 0x1f000000 /**< waitcount3 */ +#define PI_W3_SHIFT 24 + +/* prog_waitcount */ +#define PW_W0_MASK 0x0000001f /**< waitcount0 */ +#define PW_W1_MASK 0x00001f00 /**< waitcount1 */ +#define PW_W1_SHIFT 8 +#define PW_W2_MASK 0x001f0000 /**< waitcount2 */ +#define PW_W2_SHIFT 16 +#define PW_W3_MASK 0x1f000000 /**< waitcount3 */ +#define PW_W3_SHIFT 24 + +#define PW_W0 0x0000000c +#define PW_W1 0x00000a00 +#define PW_W2 0x00020000 +#define PW_W3 0x01000000 + +/* flash_waitcount */ +#define FW_W0_MASK 0x0000003f /**< waitcount0 */ +#define FW_W1_MASK 0x00001f00 /**< waitcount1 */ +#define FW_W1_SHIFT 8 +#define FW_W2_MASK 0x001f0000 /**< waitcount2 */ +#define FW_W2_SHIFT 16 +#define FW_W3_MASK 0x1f000000 /**< waitcount3 */ +#define FW_W3_SHIFT 24 + +/* When Srom support present, fields in sromcontrol */ +#define SRC_START 0x80000000 +#define SRC_BUSY 0x80000000 +#define SRC_OPCODE 0x60000000 +#define SRC_OP_READ 0x00000000 +#define SRC_OP_WRITE 0x20000000 +#define SRC_OP_WRDIS 0x40000000 +#define SRC_OP_WREN 0x60000000 +#define SRC_OTPSEL 0x00000010 +#define SRC_OTPPRESENT 0x00000020 +#define SRC_LOCK 0x00000008 +#define SRC_SIZE_MASK 0x00000006 +#define SRC_SIZE_1K 0x00000000 +#define SRC_SIZE_4K 0x00000002 +#define SRC_SIZE_16K 0x00000004 +#define SRC_SIZE_SHIFT 1 +#define SRC_PRESENT 0x00000001 + +/* Fields in pmucontrol */ +#define PCTL_ILP_DIV_MASK 0xffff0000 +#define PCTL_ILP_DIV_SHIFT 16 +#define PCTL_LQ_REQ_EN 0x00008000 +#define PCTL_PLL_PLLCTL_UPD 0x00000400 /**< rev 2 */ +#define PCTL_NOILP_ON_WAIT 0x00000200 /**< rev 1 */ +#define PCTL_HT_REQ_EN 0x00000100 +#define PCTL_ALP_REQ_EN 0x00000080 +#define PCTL_XTALFREQ_MASK 0x0000007c +#define PCTL_XTALFREQ_SHIFT 2 +#define PCTL_ILP_DIV_EN 0x00000002 +#define PCTL_LPO_SEL 0x00000001 + +/* Fields in pmucontrol_ext */ +#define PCTL_EXT_USE_LHL_TIMER 0x00000010 +#define PCTL_EXT_FASTLPO_ENAB 0x00000080 +#define PCTL_EXT_FASTLPO_SWENAB 0x00000200 +#define PCTL_EXT_FASTSEQ_ENAB 0x00001000 +#define PCTL_EXT_FASTLPO_PCIE_SWENAB 0x00004000 /**< rev33 for FLL1M */ + +#define DEFAULT_43012_MIN_RES_MASK 0x0f8bfe77 + +/* Retention Control */ +#define PMU_RCTL_CLK_DIV_SHIFT 0 +#define PMU_RCTL_CHAIN_LEN_SHIFT 12 +#define PMU_RCTL_MACPHY_DISABLE_SHIFT 26 +#define PMU_RCTL_MACPHY_DISABLE_MASK (1 << 26) +#define PMU_RCTL_LOGIC_DISABLE_SHIFT 27 +#define PMU_RCTL_LOGIC_DISABLE_MASK (1 << 27) +#define PMU_RCTL_MEMSLP_LOG_SHIFT 28 +#define PMU_RCTL_MEMSLP_LOG_MASK (1 << 28) +#define PMU_RCTL_MEMRETSLP_LOG_SHIFT 29 +#define PMU_RCTL_MEMRETSLP_LOG_MASK (1 << 29) + +/* Retention Group Control */ +#define PMU_RCTLGRP_CHAIN_LEN_SHIFT 0 +#define PMU_RCTLGRP_RMODE_ENABLE_SHIFT 14 +#define PMU_RCTLGRP_RMODE_ENABLE_MASK (1 << 14) +#define PMU_RCTLGRP_DFT_ENABLE_SHIFT 15 +#define PMU_RCTLGRP_DFT_ENABLE_MASK (1 << 15) +#define PMU_RCTLGRP_NSRST_DISABLE_SHIFT 16 +#define PMU_RCTLGRP_NSRST_DISABLE_MASK (1 << 16) + +/* Fields in clkstretch */ +#define CSTRETCH_HT 0xffff0000 +#define CSTRETCH_ALP 0x0000ffff +#define CSTRETCH_REDUCE_8 0x00080008 + +/* gpiotimerval */ +#define GPIO_ONTIME_SHIFT 16 + +/* clockcontrol_n */ +#define CN_N1_MASK 0x3f /**< n1 control */ +#define CN_N2_MASK 0x3f00 /**< n2 control */ +#define CN_N2_SHIFT 8 +#define CN_PLLC_MASK 0xf0000 /**< pll control */ +#define CN_PLLC_SHIFT 16 + +/* clockcontrol_sb/pci/uart */ +#define CC_M1_MASK 0x3f /**< m1 control */ +#define CC_M2_MASK 0x3f00 /**< m2 control */ +#define CC_M2_SHIFT 8 +#define CC_M3_MASK 0x3f0000 /**< m3 control */ +#define CC_M3_SHIFT 16 +#define CC_MC_MASK 0x1f000000 /**< mux control */ +#define CC_MC_SHIFT 24 + +/* N3M Clock control magic field values */ +#define CC_F6_2 0x02 /**< A factor of 2 in */ +#define CC_F6_3 0x03 /**< 6-bit fields like */ +#define CC_F6_4 0x05 /**< N1, M1 or M3 */ +#define CC_F6_5 0x09 +#define CC_F6_6 0x11 +#define CC_F6_7 0x21 + +#define CC_F5_BIAS 5 /**< 5-bit fields get this added */ + +#define CC_MC_BYPASS 0x08 +#define CC_MC_M1 0x04 +#define CC_MC_M1M2 0x02 +#define CC_MC_M1M2M3 0x01 +#define CC_MC_M1M3 0x11 + +/* Type 2 Clock control magic field values */ +#define CC_T2_BIAS 2 /**< n1, n2, m1 & m3 bias */ +#define CC_T2M2_BIAS 3 /**< m2 bias */ + +#define CC_T2MC_M1BYP 1 +#define CC_T2MC_M2BYP 2 +#define CC_T2MC_M3BYP 4 + +/* Type 6 Clock control magic field values */ +#define CC_T6_MMASK 1 /**< bits of interest in m */ +#define CC_T6_M0 120000000 /**< sb clock for m = 0 */ +#define CC_T6_M1 100000000 /**< sb clock for m = 1 */ +#define SB2MIPS_T6(sb) (2 * (sb)) + +/* Common clock base */ +#define CC_CLOCK_BASE1 24000000 /**< Half the clock freq */ +#define CC_CLOCK_BASE2 12500000 /**< Alternate crystal on some PLLs */ + +/* Clock control values for 200MHz in 5350 */ +#define CLKC_5350_N 0x0311 +#define CLKC_5350_M 0x04020009 + +/* Flash types in the chipcommon capabilities register */ +#define FLASH_NONE 0x000 /**< No flash */ +#define SFLASH_ST 0x100 /**< ST serial flash */ +#define SFLASH_AT 0x200 /**< Atmel serial flash */ +#define NFLASH 0x300 +#define PFLASH 0x700 /**< Parallel flash */ +#define QSPIFLASH_ST 0x800 +#define QSPIFLASH_AT 0x900 + +/* Bits in the ExtBus config registers */ +#define CC_CFG_EN 0x0001 /**< Enable */ +#define CC_CFG_EM_MASK 0x000e /**< Extif Mode */ +#define CC_CFG_EM_ASYNC 0x0000 /**< Async/Parallel flash */ +#define CC_CFG_EM_SYNC 0x0002 /**< Synchronous */ +#define CC_CFG_EM_PCMCIA 0x0004 /**< PCMCIA */ +#define CC_CFG_EM_IDE 0x0006 /**< IDE */ +#define CC_CFG_DS 0x0010 /**< Data size, 0=8bit, 1=16bit */ +#define CC_CFG_CD_MASK 0x00e0 /**< Sync: Clock divisor, rev >= 20 */ +#define CC_CFG_CE 0x0100 /**< Sync: Clock enable, rev >= 20 */ +#define CC_CFG_SB 0x0200 /**< Sync: Size/Bytestrobe, rev >= 20 */ +#define CC_CFG_IS 0x0400 /**< Extif Sync Clk Select, rev >= 20 */ + +/* ExtBus address space */ +#define CC_EB_BASE 0x1a000000 /**< Chipc ExtBus base address */ +#define CC_EB_PCMCIA_MEM 0x1a000000 /**< PCMCIA 0 memory base address */ +#define CC_EB_PCMCIA_IO 0x1a200000 /**< PCMCIA 0 I/O base address */ +#define CC_EB_PCMCIA_CFG 0x1a400000 /**< PCMCIA 0 config base address */ +#define CC_EB_IDE 0x1a800000 /**< IDE memory base */ +#define CC_EB_PCMCIA1_MEM 0x1a800000 /**< PCMCIA 1 memory base address */ +#define CC_EB_PCMCIA1_IO 0x1aa00000 /**< PCMCIA 1 I/O base address */ +#define CC_EB_PCMCIA1_CFG 0x1ac00000 /**< PCMCIA 1 config base address */ +#define CC_EB_PROGIF 0x1b000000 /**< ProgIF Async/Sync base address */ + +/* Start/busy bit in flashcontrol */ +#define SFLASH_OPCODE 0x000000ff +#define SFLASH_ACTION 0x00000700 +#define SFLASH_CS_ACTIVE 0x00001000 /**< Chip Select Active, rev >= 20 */ +#define SFLASH_START 0x80000000 +#define SFLASH_BUSY SFLASH_START + +/* flashcontrol action codes */ +#define SFLASH_ACT_OPONLY 0x0000 /**< Issue opcode only */ +#define SFLASH_ACT_OP1D 0x0100 /**< opcode + 1 data byte */ +#define SFLASH_ACT_OP3A 0x0200 /**< opcode + 3 addr bytes */ +#define SFLASH_ACT_OP3A1D 0x0300 /**< opcode + 3 addr & 1 data bytes */ +#define SFLASH_ACT_OP3A4D 0x0400 /**< opcode + 3 addr & 4 data bytes */ +#define SFLASH_ACT_OP3A4X4D 0x0500 /**< opcode + 3 addr, 4 don't care & 4 data bytes */ +#define SFLASH_ACT_OP3A1X4D 0x0700 /**< opcode + 3 addr, 1 don't care & 4 data bytes */ + +/* flashcontrol action+opcodes for ST flashes */ +#define SFLASH_ST_WREN 0x0006 /**< Write Enable */ +#define SFLASH_ST_WRDIS 0x0004 /**< Write Disable */ +#define SFLASH_ST_RDSR 0x0105 /**< Read Status Register */ +#define SFLASH_ST_WRSR 0x0101 /**< Write Status Register */ +#define SFLASH_ST_READ 0x0303 /**< Read Data Bytes */ +#define SFLASH_ST_PP 0x0302 /**< Page Program */ +#define SFLASH_ST_SE 0x02d8 /**< Sector Erase */ +#define SFLASH_ST_BE 0x00c7 /**< Bulk Erase */ +#define SFLASH_ST_DP 0x00b9 /**< Deep Power-down */ +#define SFLASH_ST_RES 0x03ab /**< Read Electronic Signature */ +#define SFLASH_ST_CSA 0x1000 /**< Keep chip select asserted */ +#define SFLASH_ST_SSE 0x0220 /**< Sub-sector Erase */ + +#define SFLASH_ST_READ4B 0x6313 /* Read Data Bytes in 4Byte address */ +#define SFLASH_ST_PP4B 0x6312 /* Page Program in 4Byte address */ +#define SFLASH_ST_SE4B 0x62dc /* Sector Erase in 4Byte address */ +#define SFLASH_ST_SSE4B 0x6221 /* Sub-sector Erase */ + +#define SFLASH_MXIC_RDID 0x0390 /* Read Manufacture ID */ +#define SFLASH_MXIC_MFID 0xc2 /* MXIC Manufacture ID */ + +/* Status register bits for ST flashes */ +#define SFLASH_ST_WIP 0x01 /**< Write In Progress */ +#define SFLASH_ST_WEL 0x02 /**< Write Enable Latch */ +#define SFLASH_ST_BP_MASK 0x1c /**< Block Protect */ +#define SFLASH_ST_BP_SHIFT 2 +#define SFLASH_ST_SRWD 0x80 /**< Status Register Write Disable */ + +/* flashcontrol action+opcodes for Atmel flashes */ +#define SFLASH_AT_READ 0x07e8 +#define SFLASH_AT_PAGE_READ 0x07d2 +#define SFLASH_AT_BUF1_READ +#define SFLASH_AT_BUF2_READ +#define SFLASH_AT_STATUS 0x01d7 +#define SFLASH_AT_BUF1_WRITE 0x0384 +#define SFLASH_AT_BUF2_WRITE 0x0387 +#define SFLASH_AT_BUF1_ERASE_PROGRAM 0x0283 +#define SFLASH_AT_BUF2_ERASE_PROGRAM 0x0286 +#define SFLASH_AT_BUF1_PROGRAM 0x0288 +#define SFLASH_AT_BUF2_PROGRAM 0x0289 +#define SFLASH_AT_PAGE_ERASE 0x0281 +#define SFLASH_AT_BLOCK_ERASE 0x0250 +#define SFLASH_AT_BUF1_WRITE_ERASE_PROGRAM 0x0382 +#define SFLASH_AT_BUF2_WRITE_ERASE_PROGRAM 0x0385 +#define SFLASH_AT_BUF1_LOAD 0x0253 +#define SFLASH_AT_BUF2_LOAD 0x0255 +#define SFLASH_AT_BUF1_COMPARE 0x0260 +#define SFLASH_AT_BUF2_COMPARE 0x0261 +#define SFLASH_AT_BUF1_REPROGRAM 0x0258 +#define SFLASH_AT_BUF2_REPROGRAM 0x0259 + +/* Status register bits for Atmel flashes */ +#define SFLASH_AT_READY 0x80 +#define SFLASH_AT_MISMATCH 0x40 +#define SFLASH_AT_ID_MASK 0x38 +#define SFLASH_AT_ID_SHIFT 3 + +/* SPI register bits, corerev >= 37 */ +#define GSIO_START 0x80000000 +#define GSIO_BUSY GSIO_START + +/* GCI UART Function sel related */ +#define MUXENAB_GCI_UART_MASK (0x00000f00) +#define MUXENAB_GCI_UART_SHIFT 8 +#define MUXENAB_GCI_UART_FNSEL_MASK (0x00003000) +#define MUXENAB_GCI_UART_FNSEL_SHIFT 12 + +/* + * These are the UART port assignments, expressed as offsets from the base + * register. These assignments should hold for any serial port based on + * a 8250, 16450, or 16550(A). + */ + +#define UART_RX 0 /**< In: Receive buffer (DLAB=0) */ +#define UART_TX 0 /**< Out: Transmit buffer (DLAB=0) */ +#define UART_DLL 0 /**< Out: Divisor Latch Low (DLAB=1) */ +#define UART_IER 1 /**< In/Out: Interrupt Enable Register (DLAB=0) */ +#define UART_DLM 1 /**< Out: Divisor Latch High (DLAB=1) */ +#define UART_IIR 2 /**< In: Interrupt Identity Register */ +#define UART_FCR 2 /**< Out: FIFO Control Register */ +#define UART_LCR 3 /**< Out: Line Control Register */ +#define UART_MCR 4 /**< Out: Modem Control Register */ +#define UART_LSR 5 /**< In: Line Status Register */ +#define UART_MSR 6 /**< In: Modem Status Register */ +#define UART_SCR 7 /**< I/O: Scratch Register */ +#define UART_LCR_DLAB 0x80 /**< Divisor latch access bit */ +#define UART_LCR_WLEN8 0x03 /**< Word length: 8 bits */ +#define UART_MCR_OUT2 0x08 /**< MCR GPIO out 2 */ +#define UART_MCR_LOOP 0x10 /**< Enable loopback test mode */ +#define UART_LSR_RX_FIFO 0x80 /**< Receive FIFO error */ +#define UART_LSR_TDHR 0x40 /**< Data-hold-register empty */ +#define UART_LSR_THRE 0x20 /**< Transmit-hold-register empty */ +#define UART_LSR_BREAK 0x10 /**< Break interrupt */ +#define UART_LSR_FRAMING 0x08 /**< Framing error */ +#define UART_LSR_PARITY 0x04 /**< Parity error */ +#define UART_LSR_OVERRUN 0x02 /**< Overrun error */ +#define UART_LSR_RXRDY 0x01 /**< Receiver ready */ +#define UART_FCR_FIFO_ENABLE 1 /**< FIFO control register bit controlling FIFO enable/disable */ + +/* Interrupt Identity Register (IIR) bits */ +#define UART_IIR_FIFO_MASK 0xc0 /**< IIR FIFO disable/enabled mask */ +#define UART_IIR_INT_MASK 0xf /**< IIR interrupt ID source */ +#define UART_IIR_MDM_CHG 0x0 /**< Modem status changed */ +#define UART_IIR_NOINT 0x1 /**< No interrupt pending */ +#define UART_IIR_THRE 0x2 /**< THR empty */ +#define UART_IIR_RCVD_DATA 0x4 /**< Received data available */ +#define UART_IIR_RCVR_STATUS 0x6 /**< Receiver status */ +#define UART_IIR_CHAR_TIME 0xc /**< Character time */ + +/* Interrupt Enable Register (IER) bits */ +#define UART_IER_PTIME 128 /**< Programmable THRE Interrupt Mode Enable */ +#define UART_IER_EDSSI 8 /**< enable modem status interrupt */ +#define UART_IER_ELSI 4 /**< enable receiver line status interrupt */ +#define UART_IER_ETBEI 2 /**< enable transmitter holding register empty interrupt */ +#define UART_IER_ERBFI 1 /**< enable data available interrupt */ + +/* pmustatus */ +#define PST_SLOW_WR_PENDING 0x0400 +#define PST_EXTLPOAVAIL 0x0100 +#define PST_WDRESET 0x0080 +#define PST_INTPEND 0x0040 +#define PST_SBCLKST 0x0030 +#define PST_SBCLKST_ILP 0x0010 +#define PST_SBCLKST_ALP 0x0020 +#define PST_SBCLKST_HT 0x0030 +#define PST_ALPAVAIL 0x0008 +#define PST_HTAVAIL 0x0004 +#define PST_RESINIT 0x0003 +#define PST_ILPFASTLPO 0x00010000 + +/* pmucapabilities */ +#define PCAP_REV_MASK 0x000000ff +#define PCAP_RC_MASK 0x00001f00 +#define PCAP_RC_SHIFT 8 +#define PCAP_TC_MASK 0x0001e000 +#define PCAP_TC_SHIFT 13 +#define PCAP_PC_MASK 0x001e0000 +#define PCAP_PC_SHIFT 17 +#define PCAP_VC_MASK 0x01e00000 +#define PCAP_VC_SHIFT 21 +#define PCAP_CC_MASK 0x1e000000 +#define PCAP_CC_SHIFT 25 +#define PCAP5_PC_MASK 0x003e0000 /**< PMU corerev >= 5 */ +#define PCAP5_PC_SHIFT 17 +#define PCAP5_VC_MASK 0x07c00000 +#define PCAP5_VC_SHIFT 22 +#define PCAP5_CC_MASK 0xf8000000 +#define PCAP5_CC_SHIFT 27 + +/* pmucapabilities ext */ +#define PCAP_EXT_ST_NUM_SHIFT (8) /* stat timer number */ +#define PCAP_EXT_ST_NUM_MASK (0xf << PCAP_EXT_ST_NUM_SHIFT) +#define PCAP_EXT_ST_SRC_NUM_SHIFT (12) /* stat timer source number */ +#define PCAP_EXT_ST_SRC_NUM_MASK (0xf << PCAP_EXT_ST_SRC_NUM_SHIFT) + +/* pmustattimer ctrl */ +#define PMU_ST_SRC_SHIFT (0) /* stat timer source number */ +#define PMU_ST_SRC_MASK (0xff << PMU_ST_SRC_SHIFT) +#define PMU_ST_CNT_MODE_SHIFT (10) /* stat timer count mode */ +#define PMU_ST_CNT_MODE_MASK (0x3 << PMU_ST_CNT_MODE_SHIFT) +#define PMU_ST_EN_SHIFT (8) /* stat timer enable */ +#define PMU_ST_EN_MASK (0x1 << PMU_ST_EN_SHIFT) +#define PMU_ST_ENAB 1 +#define PMU_ST_DISAB 0 +#define PMU_ST_INT_EN_SHIFT (9) /* stat timer enable */ +#define PMU_ST_INT_EN_MASK (0x1 << PMU_ST_INT_EN_SHIFT) +#define PMU_ST_INT_ENAB 1 +#define PMU_ST_INT_DISAB 0 + +/* CoreCapabilitiesExtension */ +#define PCAP_EXT_USE_MUXED_ILP_CLK_MASK 0x04000000 + +/* PMU Resource Request Timer registers */ +/* This is based on PmuRev0 */ +#define PRRT_TIME_MASK 0x03ff +#define PRRT_INTEN 0x0400 +/* ReqActive 25 + * The hardware sets this field to 1 when the timer expires. + * Software writes this field to 1 to make immediate resource requests. + */ +#define PRRT_REQ_ACTIVE 0x0800 /* To check h/w status */ +#define PRRT_IMMEDIATE_RES_REQ 0x0800 /* macro for sw immediate res req */ +#define PRRT_ALP_REQ 0x1000 +#define PRRT_HT_REQ 0x2000 +#define PRRT_HQ_REQ 0x4000 + +/* PMU Int Control register bits */ +#define PMU_INTC_ALP_REQ 0x1 +#define PMU_INTC_HT_REQ 0x2 +#define PMU_INTC_HQ_REQ 0x4 + +/* bit 0 of the PMU interrupt vector is asserted if this mask is enabled */ +#define RSRC_INTR_MASK_TIMER_INT_0 1 +#define PMU_INTR_MASK_EXTWAKE_REQ_ACTIVE_0 (1 << 20) + +/* bit 16 of the PMU interrupt vector - Stats Timer Interrupt */ +#define PMU_INT_STAT_TIMER_INT_SHIFT 16 +#define PMU_INT_STAT_TIMER_INT_MASK (1 << PMU_INT_STAT_TIMER_INT_SHIFT) + +/* PMU resource bit position */ +#define PMURES_BIT(bit) (1 << (bit)) + +/* PMU resource number limit */ +#define PMURES_MAX_RESNUM 30 + +/* PMU chip control0 register */ +#define PMU_CHIPCTL0 0 + +#define PMU_CC0_4369_XTALCORESIZE_BIAS_ADJ_START_VAL (0x20 << 0) +#define PMU_CC0_4369_XTALCORESIZE_BIAS_ADJ_START_MASK (0x3F << 0) +#define PMU_CC0_4369_XTALCORESIZE_BIAS_ADJ_NORMAL_VAL (0xF << 6) +#define PMU_CC0_4369_XTALCORESIZE_BIAS_ADJ_NORMAL_MASK (0x3F << 6) +#define PMU_CC0_4369_XTAL_RES_BYPASS_START_VAL (0 << 12) +#define PMU_CC0_4369_XTAL_RES_BYPASS_START_MASK (0x7 << 12) +#define PMU_CC0_4369_XTAL_RES_BYPASS_NORMAL_VAL (0x1 << 15) +#define PMU_CC0_4369_XTAL_RES_BYPASS_NORMAL_MASK (0x7 << 15) + +/* clock req types */ +#define PMU_CC1_CLKREQ_TYPE_SHIFT 19 +#define PMU_CC1_CLKREQ_TYPE_MASK (1 << PMU_CC1_CLKREQ_TYPE_SHIFT) + +#define CLKREQ_TYPE_CONFIG_OPENDRAIN 0 +#define CLKREQ_TYPE_CONFIG_PUSHPULL 1 + +/* Power Control */ +#define PWRCTL_ENAB_MEM_CLK_GATE_SHIFT 5 +#define PWRCTL_AUTO_MEM_STBYRET 28 + +/* PMU chip control1 register */ +#define PMU_CHIPCTL1 1 +#define PMU_CC1_RXC_DLL_BYPASS 0x00010000 +#define PMU_CC1_ENABLE_BBPLL_PWR_DOWN 0x00000010 + +#define PMU_CC1_IF_TYPE_MASK 0x00000030 +#define PMU_CC1_IF_TYPE_RMII 0x00000000 +#define PMU_CC1_IF_TYPE_MII 0x00000010 +#define PMU_CC1_IF_TYPE_RGMII 0x00000020 + +#define PMU_CC1_SW_TYPE_MASK 0x000000c0 +#define PMU_CC1_SW_TYPE_EPHY 0x00000000 +#define PMU_CC1_SW_TYPE_EPHYMII 0x00000040 +#define PMU_CC1_SW_TYPE_EPHYRMII 0x00000080 +#define PMU_CC1_SW_TYPE_RGMII 0x000000c0 + +#define PMU_CC1_ENABLE_CLOSED_LOOP_MASK 0x00000080 +#define PMU_CC1_ENABLE_CLOSED_LOOP 0x00000000 + +#define PMU_CC1_PWRSW_CLKSTRSTP_DELAY_MASK 0x00003F00u +#define PMU_CC1_PWRSW_CLKSTRSTP_DELAY 0x00000400u + +/* PMU chip control2 register */ +#define PMU_CC2_RFLDO3P3_PU_FORCE_ON (1 << 15) +#define PMU_CC2_RFLDO3P3_PU_CLEAR 0x00000000 + +#define PMU_CC2_WL2CDIG_I_PMU_SLEEP (1 << 16) +#define PMU_CHIPCTL2 2 +#define PMU_CC2_FORCE_SUBCORE_PWR_SWITCH_ON (1 << 18) +#define PMU_CC2_FORCE_PHY_PWR_SWITCH_ON (1 << 19) +#define PMU_CC2_FORCE_VDDM_PWR_SWITCH_ON (1 << 20) +#define PMU_CC2_FORCE_MEMLPLDO_PWR_SWITCH_ON (1 << 21) +#define PMU_CC2_MASK_WL_DEV_WAKE (1 << 22) +#define PMU_CC2_INV_GPIO_POLARITY_PMU_WAKE (1 << 25) +#define PMU_CC2_GCI2_WAKE (1 << 31) + +#define PMU_CC2_4369_XTALCORESIZE_BIAS_ADJ_START_VAL (0x3 << 26) +#define PMU_CC2_4369_XTALCORESIZE_BIAS_ADJ_START_MASK (0x3 << 26) +#define PMU_CC2_4369_XTALCORESIZE_BIAS_ADJ_NORMAL_VAL (0x0 << 28) +#define PMU_CC2_4369_XTALCORESIZE_BIAS_ADJ_NORMAL_MASK (0x3 << 28) + +/* PMU chip control3 register */ +#define PMU_CHIPCTL3 3 +#define PMU_CC3_ENABLE_SDIO_WAKEUP_SHIFT 19 +#define PMU_CC3_ENABLE_RF_SHIFT 22 +#define PMU_CC3_RF_DISABLE_IVALUE_SHIFT 23 + +#define PMU_CC3_4369_XTALCORESIZE_PMOS_START_VAL (0x3F << 0) +#define PMU_CC3_4369_XTALCORESIZE_PMOS_START_MASK (0x3F << 0) +#define PMU_CC3_4369_XTALCORESIZE_PMOS_NORMAL_VAL (0x3F << 15) +#define PMU_CC3_4369_XTALCORESIZE_PMOS_NORMAL_MASK (0x3F << 15) +#define PMU_CC3_4369_XTALCORESIZE_NMOS_START_VAL (0x3F << 6) +#define PMU_CC3_4369_XTALCORESIZE_NMOS_START_MASK (0x3F << 6) +#define PMU_CC3_4369_XTALCORESIZE_NMOS_NORMAL_VAL (0x3F << 21) +#define PMU_CC3_4369_XTALCORESIZE_NMOS_NORMAL_MASK (0x3F << 21) +#define PMU_CC3_4369_XTALSEL_BIAS_RES_START_VAL (0x2 << 12) +#define PMU_CC3_4369_XTALSEL_BIAS_RES_START_MASK (0x7 << 12) +#define PMU_CC3_4369_XTALSEL_BIAS_RES_NORMAL_VAL (0x6 << 27) +#define PMU_CC3_4369_XTALSEL_BIAS_RES_NORMAL_MASK (0x7 << 27) + +/* PMU chip control4 register */ +#define PMU_CHIPCTL4 4 + +/* 53537 series moved switch_type and gmac_if_type to CC4 [15:14] and [13:12] */ +#define PMU_CC4_IF_TYPE_MASK 0x00003000 +#define PMU_CC4_IF_TYPE_RMII 0x00000000 +#define PMU_CC4_IF_TYPE_MII 0x00001000 +#define PMU_CC4_IF_TYPE_RGMII 0x00002000 + +#define PMU_CC4_SW_TYPE_MASK 0x0000c000 +#define PMU_CC4_SW_TYPE_EPHY 0x00000000 +#define PMU_CC4_SW_TYPE_EPHYMII 0x00004000 +#define PMU_CC4_SW_TYPE_EPHYRMII 0x00008000 +#define PMU_CC4_SW_TYPE_RGMII 0x0000c000 +#define PMU_CC4_DISABLE_LQ_AVAIL (1<<27) + +#define PMU_CC4_4369_MAIN_PD_CBUCK2VDDB_ON (1u << 15u) +#define PMU_CC4_4369_MAIN_PD_CBUCK2VDDRET_ON (1u << 16u) +#define PMU_CC4_4369_MAIN_PD_MEMLPLDO2VDDB_ON (1u << 17u) +#define PMU_CC4_4369_MAIN_PD_MEMLPDLO2VDDRET_ON (1u << 18u) + +#define PMU_CC4_4369_AUX_PD_CBUCK2VDDB_ON (1u << 21u) +#define PMU_CC4_4369_AUX_PD_CBUCK2VDDRET_ON (1u << 22u) +#define PMU_CC4_4369_AUX_PD_MEMLPLDO2VDDB_ON (1u << 23u) +#define PMU_CC4_4369_AUX_PD_MEMLPLDO2VDDRET_ON (1u << 24u) + +/* PMU chip control5 register */ +#define PMU_CHIPCTL5 5 + +#define PMU_CC5_4369_SUBCORE_CBUCK2VDDB_ON (1u << 9u) +#define PMU_CC5_4369_SUBCORE_CBUCK2VDDRET_ON (1u << 10u) +#define PMU_CC5_4369_SUBCORE_MEMLPLDO2VDDB_ON (1u << 11u) +#define PMU_CC5_4369_SUBCORE_MEMLPLDO2VDDRET_ON (1u << 12u) + +/* PMU chip control6 register */ +#define PMU_CHIPCTL6 6 +#define PMU_CC6_ENABLE_CLKREQ_WAKEUP (1 << 4) +#define PMU_CC6_ENABLE_PMU_WAKEUP_ALP (1 << 6) +#define PMU_CC6_ENABLE_PCIE_RETENTION (1 << 12) +#define PMU_CC6_ENABLE_PMU_EXT_PERST (1 << 13) +#define PMU_CC6_ENABLE_PMU_WAKEUP_PERST (1 << 14) + +/* PMU chip control7 register */ +#define PMU_CHIPCTL7 7 +#define PMU_CC7_ENABLE_L2REFCLKPAD_PWRDWN (1 << 25) +#define PMU_CC7_ENABLE_MDIO_RESET_WAR (1 << 27) +/* 53537 series have gmca1 gmac_if_type in cc7 [7:6](defalut 0b01) */ +#define PMU_CC7_IF_TYPE_MASK 0x000000c0 +#define PMU_CC7_IF_TYPE_RMII 0x00000000 +#define PMU_CC7_IF_TYPE_MII 0x00000040 +#define PMU_CC7_IF_TYPE_RGMII 0x00000080 + +#define PMU_CHIPCTL8 8 +#define PMU_CHIPCTL9 9 + +#define PMU_CHIPCTL10 10 +#define PMU_CC10_PCIE_PWRSW_RESET0_CNT_SHIFT 0 +#define PMU_CC10_PCIE_PWRSW_RESET0_CNT_MASK 0x000000ff +#define PMU_CC10_PCIE_PWRSW_RESET1_CNT_SHIFT 8 +#define PMU_CC10_PCIE_PWRSW_RESET1_CNT_MASK 0x0000ff00 +#define PMU_CC10_PCIE_PWRSW_UP_DLY_SHIFT 16 +#define PMU_CC10_PCIE_PWRSW_UP_DLY_MASK 0x000f0000 +#define PMU_CC10_PCIE_PWRSW_FORCE_PWROK_DLY_SHIFT 20 +#define PMU_CC10_PCIE_PWRSW_FORCE_PWROK_DLY_MASK 0x00f00000 +#define PMU_CC10_FORCE_PCIE_ON (1 << 24) +#define PMU_CC10_FORCE_PCIE_SW_ON (1 << 25) +#define PMU_CC10_FORCE_PCIE_RETNT_ON (1 << 26) + +#define PMU_CC10_PCIE_PWRSW_RESET_CNT_4US 1 +#define PMU_CC10_PCIE_PWRSW_RESET_CNT_8US 2 + +#define PMU_CC10_PCIE_PWRSW_UP_DLY_0US 0 + +#define PMU_CC10_PCIE_PWRSW_FORCE_PWROK_DLY_4US 1 + +#define PMU_CHIPCTL11 11 +#define PMU_CHIPCTL12 12 + +/* PMU chip control13 register */ +#define PMU_CHIPCTL13 13 + +#define PMU_CC13_SUBCORE_CBUCK2VDDB_OFF (1u << 0u) +#define PMU_CC13_SUBCORE_CBUCK2VDDRET_OFF (1u << 1u) +#define PMU_CC13_SUBCORE_MEMLPLDO2VDDB_OFF (1u << 2u) +#define PMU_CC13_SUBCORE_MEMLPLDO2VDDRET_OFF (1u << 3u) + +#define PMU_CC13_MAIN_CBUCK2VDDB_OFF (1u << 4u) +#define PMU_CC13_MAIN_CBUCK2VDDRET_OFF (1u << 5u) +#define PMU_CC13_MAIN_MEMLPLDO2VDDB_OFF (1u << 6u) +#define PMU_CC13_MAIN_MEMLPLDO2VDDRET_OFF (1u << 7u) + +#define PMU_CC13_AUX_CBUCK2VDDB_OFF (1u << 8u) +#define PMU_CC13_AUX_MEMLPLDO2VDDB_OFF (1u << 10u) +#define PMU_CC13_AUX_MEMLPLDO2VDDRET_OFF (1u << 11u) +#define PMU_CC13_AUX_CBUCK2VDDRET_OFF (1u << 12u) + +#define PMU_CHIPCTL14 14 +#define PMU_CHIPCTL15 15 +#define PMU_CHIPCTL16 16 +#define PMU_CC16_CLK4M_DIS (1 << 4) +#define PMU_CC16_FF_ZERO_ADJ (4 << 5) + +/* PMU chip control14 register */ +#define PMU_CC14_MAIN_VDDB2VDDRET_UP_DLY_MASK (0xF) +#define PMU_CC14_MAIN_VDDB2VDD_UP_DLY_MASK (0xF << 4) +#define PMU_CC14_AUX_VDDB2VDDRET_UP_DLY_MASK (0xF << 8) +#define PMU_CC14_AUX_VDDB2VDD_UP_DLY_MASK (0xF << 12) +#define PMU_CC14_PCIE_VDDB2VDDRET_UP_DLY_MASK (0xF << 16) +#define PMU_CC14_PCIE_VDDB2VDD_UP_DLY_MASK (0xF << 20) + +/* PMU corerev and chip specific PLL controls. + * PMU_PLL_XX where is PMU corerev and is an arbitrary number + * to differentiate different PLLs controlled by the same PMU rev. + */ +/* pllcontrol registers */ +/* PDIV, div_phy, div_arm, div_adc, dith_sel, ioff, kpd_scale, lsb_sel, mash_sel, lf_c & lf_r */ +#define PMU0_PLL0_PLLCTL0 0 +#define PMU0_PLL0_PC0_PDIV_MASK 1 +#define PMU0_PLL0_PC0_PDIV_FREQ 25000 +#define PMU0_PLL0_PC0_DIV_ARM_MASK 0x00000038 +#define PMU0_PLL0_PC0_DIV_ARM_SHIFT 3 +#define PMU0_PLL0_PC0_DIV_ARM_BASE 8 + +/* PC0_DIV_ARM for PLLOUT_ARM */ +#define PMU0_PLL0_PC0_DIV_ARM_110MHZ 0 +#define PMU0_PLL0_PC0_DIV_ARM_97_7MHZ 1 +#define PMU0_PLL0_PC0_DIV_ARM_88MHZ 2 +#define PMU0_PLL0_PC0_DIV_ARM_80MHZ 3 /* Default */ +#define PMU0_PLL0_PC0_DIV_ARM_73_3MHZ 4 +#define PMU0_PLL0_PC0_DIV_ARM_67_7MHZ 5 +#define PMU0_PLL0_PC0_DIV_ARM_62_9MHZ 6 +#define PMU0_PLL0_PC0_DIV_ARM_58_6MHZ 7 + +/* Wildcard base, stop_mod, en_lf_tp, en_cal & lf_r2 */ +#define PMU0_PLL0_PLLCTL1 1 +#define PMU0_PLL0_PC1_WILD_INT_MASK 0xf0000000 +#define PMU0_PLL0_PC1_WILD_INT_SHIFT 28 +#define PMU0_PLL0_PC1_WILD_FRAC_MASK 0x0fffff00 +#define PMU0_PLL0_PC1_WILD_FRAC_SHIFT 8 +#define PMU0_PLL0_PC1_STOP_MOD 0x00000040 + +/* Wildcard base, vco_calvar, vco_swc, vco_var_selref, vso_ical & vco_sel_avdd */ +#define PMU0_PLL0_PLLCTL2 2 +#define PMU0_PLL0_PC2_WILD_INT_MASK 0xf +#define PMU0_PLL0_PC2_WILD_INT_SHIFT 4 + +/* pllcontrol registers */ +/* ndiv_pwrdn, pwrdn_ch, refcomp_pwrdn, dly_ch, p1div, p2div, _bypass_sdmod */ +#define PMU1_PLL0_PLLCTL0 0 +#define PMU1_PLL0_PC0_P1DIV_MASK 0x00f00000 +#define PMU1_PLL0_PC0_P1DIV_SHIFT 20 +#define PMU1_PLL0_PC0_P2DIV_MASK 0x0f000000 +#define PMU1_PLL0_PC0_P2DIV_SHIFT 24 + +/* mdiv */ +#define PMU1_PLL0_PLLCTL1 1 +#define PMU1_PLL0_PC1_M1DIV_MASK 0x000000ff +#define PMU1_PLL0_PC1_M1DIV_SHIFT 0 +#define PMU1_PLL0_PC1_M2DIV_MASK 0x0000ff00 +#define PMU1_PLL0_PC1_M2DIV_SHIFT 8 +#define PMU1_PLL0_PC1_M3DIV_MASK 0x00ff0000 +#define PMU1_PLL0_PC1_M3DIV_SHIFT 16 +#define PMU1_PLL0_PC1_M4DIV_MASK 0xff000000 +#define PMU1_PLL0_PC1_M4DIV_SHIFT 24 +#define PMU1_PLL0_PC1_M4DIV_BY_9 9 +#define PMU1_PLL0_PC1_M4DIV_BY_18 0x12 +#define PMU1_PLL0_PC1_M4DIV_BY_36 0x24 +#define PMU1_PLL0_PC1_M4DIV_BY_60 0x3C +#define PMU1_PLL0_PC1_M2_M4DIV_MASK 0xff00ff00 +#define PMU1_PLL0_PC1_HOLD_LOAD_CH 0x28 +#define DOT11MAC_880MHZ_CLK_DIVISOR_SHIFT 8 +#define DOT11MAC_880MHZ_CLK_DIVISOR_MASK (0xFF << DOT11MAC_880MHZ_CLK_DIVISOR_SHIFT) +#define DOT11MAC_880MHZ_CLK_DIVISOR_VAL (0xE << DOT11MAC_880MHZ_CLK_DIVISOR_SHIFT) + +/* mdiv, ndiv_dither_mfb, ndiv_mode, ndiv_int */ +#define PMU1_PLL0_PLLCTL2 2 +#define PMU1_PLL0_PC2_M5DIV_MASK 0x000000ff +#define PMU1_PLL0_PC2_M5DIV_SHIFT 0 +#define PMU1_PLL0_PC2_M5DIV_BY_12 0xc +#define PMU1_PLL0_PC2_M5DIV_BY_18 0x12 +#define PMU1_PLL0_PC2_M5DIV_BY_31 0x1f +#define PMU1_PLL0_PC2_M5DIV_BY_36 0x24 +#define PMU1_PLL0_PC2_M5DIV_BY_42 0x2a +#define PMU1_PLL0_PC2_M5DIV_BY_60 0x3c +#define PMU1_PLL0_PC2_M6DIV_MASK 0x0000ff00 +#define PMU1_PLL0_PC2_M6DIV_SHIFT 8 +#define PMU1_PLL0_PC2_M6DIV_BY_18 0x12 +#define PMU1_PLL0_PC2_M6DIV_BY_36 0x24 +#define PMU1_PLL0_PC2_NDIV_MODE_MASK 0x000e0000 +#define PMU1_PLL0_PC2_NDIV_MODE_SHIFT 17 +#define PMU1_PLL0_PC2_NDIV_MODE_MASH 1 +#define PMU1_PLL0_PC2_NDIV_MODE_MFB 2 /**< recommended for 4319 */ +#define PMU1_PLL0_PC2_NDIV_INT_MASK 0x1ff00000 +#define PMU1_PLL0_PC2_NDIV_INT_SHIFT 20 + +/* ndiv_frac */ +#define PMU1_PLL0_PLLCTL3 3 +#define PMU1_PLL0_PC3_NDIV_FRAC_MASK 0x00ffffff +#define PMU1_PLL0_PC3_NDIV_FRAC_SHIFT 0 + +/* pll_ctrl */ +#define PMU1_PLL0_PLLCTL4 4 + +/* pll_ctrl, vco_rng, clkdrive_ch */ +#define PMU1_PLL0_PLLCTL5 5 +#define PMU1_PLL0_PC5_CLK_DRV_MASK 0xffffff00 +#define PMU1_PLL0_PC5_CLK_DRV_SHIFT 8 +#define PMU1_PLL0_PC5_ASSERT_CH_MASK 0x3f000000 +#define PMU1_PLL0_PC5_ASSERT_CH_SHIFT 24 +#define PMU1_PLL0_PC5_DEASSERT_CH_MASK 0xff000000 + +#define PMU1_PLL0_PLLCTL6 6 +#define PMU1_PLL0_PLLCTL7 7 +#define PMU1_PLL0_PLLCTL8 8 + +#define PMU1_PLLCTL8_OPENLOOP_MASK (1 << 1) +#define PMU_PLL4350_OPENLOOP_MASK (1 << 7) + +#define PMU1_PLL0_PLLCTL9 9 + +#define PMU1_PLL0_PLLCTL10 10 + +/* PMU rev 2 control words */ +#define PMU2_PHY_PLL_PLLCTL 4 +#define PMU2_SI_PLL_PLLCTL 10 + +/* PMU rev 2 */ +/* pllcontrol registers */ +/* ndiv_pwrdn, pwrdn_ch, refcomp_pwrdn, dly_ch, p1div, p2div, _bypass_sdmod */ +#define PMU2_PLL_PLLCTL0 0 +#define PMU2_PLL_PC0_P1DIV_MASK 0x00f00000 +#define PMU2_PLL_PC0_P1DIV_SHIFT 20 +#define PMU2_PLL_PC0_P2DIV_MASK 0x0f000000 +#define PMU2_PLL_PC0_P2DIV_SHIFT 24 + +/* mdiv */ +#define PMU2_PLL_PLLCTL1 1 +#define PMU2_PLL_PC1_M1DIV_MASK 0x000000ff +#define PMU2_PLL_PC1_M1DIV_SHIFT 0 +#define PMU2_PLL_PC1_M2DIV_MASK 0x0000ff00 +#define PMU2_PLL_PC1_M2DIV_SHIFT 8 +#define PMU2_PLL_PC1_M3DIV_MASK 0x00ff0000 +#define PMU2_PLL_PC1_M3DIV_SHIFT 16 +#define PMU2_PLL_PC1_M4DIV_MASK 0xff000000 +#define PMU2_PLL_PC1_M4DIV_SHIFT 24 + +/* mdiv, ndiv_dither_mfb, ndiv_mode, ndiv_int */ +#define PMU2_PLL_PLLCTL2 2 +#define PMU2_PLL_PC2_M5DIV_MASK 0x000000ff +#define PMU2_PLL_PC2_M5DIV_SHIFT 0 +#define PMU2_PLL_PC2_M6DIV_MASK 0x0000ff00 +#define PMU2_PLL_PC2_M6DIV_SHIFT 8 +#define PMU2_PLL_PC2_NDIV_MODE_MASK 0x000e0000 +#define PMU2_PLL_PC2_NDIV_MODE_SHIFT 17 +#define PMU2_PLL_PC2_NDIV_INT_MASK 0x1ff00000 +#define PMU2_PLL_PC2_NDIV_INT_SHIFT 20 + +/* ndiv_frac */ +#define PMU2_PLL_PLLCTL3 3 +#define PMU2_PLL_PC3_NDIV_FRAC_MASK 0x00ffffff +#define PMU2_PLL_PC3_NDIV_FRAC_SHIFT 0 + +/* pll_ctrl */ +#define PMU2_PLL_PLLCTL4 4 + +/* pll_ctrl, vco_rng, clkdrive_ch */ +#define PMU2_PLL_PLLCTL5 5 +#define PMU2_PLL_PC5_CLKDRIVE_CH1_MASK 0x00000f00 +#define PMU2_PLL_PC5_CLKDRIVE_CH1_SHIFT 8 +#define PMU2_PLL_PC5_CLKDRIVE_CH2_MASK 0x0000f000 +#define PMU2_PLL_PC5_CLKDRIVE_CH2_SHIFT 12 +#define PMU2_PLL_PC5_CLKDRIVE_CH3_MASK 0x000f0000 +#define PMU2_PLL_PC5_CLKDRIVE_CH3_SHIFT 16 +#define PMU2_PLL_PC5_CLKDRIVE_CH4_MASK 0x00f00000 +#define PMU2_PLL_PC5_CLKDRIVE_CH4_SHIFT 20 +#define PMU2_PLL_PC5_CLKDRIVE_CH5_MASK 0x0f000000 +#define PMU2_PLL_PC5_CLKDRIVE_CH5_SHIFT 24 +#define PMU2_PLL_PC5_CLKDRIVE_CH6_MASK 0xf0000000 +#define PMU2_PLL_PC5_CLKDRIVE_CH6_SHIFT 28 + +/* PMU rev 5 (& 6) */ +#define PMU5_PLL_P1P2_OFF 0 +#define PMU5_PLL_P1_MASK 0x0f000000 +#define PMU5_PLL_P1_SHIFT 24 +#define PMU5_PLL_P2_MASK 0x00f00000 +#define PMU5_PLL_P2_SHIFT 20 +#define PMU5_PLL_M14_OFF 1 +#define PMU5_PLL_MDIV_MASK 0x000000ff +#define PMU5_PLL_MDIV_WIDTH 8 +#define PMU5_PLL_NM5_OFF 2 +#define PMU5_PLL_NDIV_MASK 0xfff00000 +#define PMU5_PLL_NDIV_SHIFT 20 +#define PMU5_PLL_NDIV_MODE_MASK 0x000e0000 +#define PMU5_PLL_NDIV_MODE_SHIFT 17 +#define PMU5_PLL_FMAB_OFF 3 +#define PMU5_PLL_MRAT_MASK 0xf0000000 +#define PMU5_PLL_MRAT_SHIFT 28 +#define PMU5_PLL_ABRAT_MASK 0x08000000 +#define PMU5_PLL_ABRAT_SHIFT 27 +#define PMU5_PLL_FDIV_MASK 0x07ffffff +#define PMU5_PLL_PLLCTL_OFF 4 +#define PMU5_PLL_PCHI_OFF 5 +#define PMU5_PLL_PCHI_MASK 0x0000003f + +/* pmu XtalFreqRatio */ +#define PMU_XTALFREQ_REG_ILPCTR_MASK 0x00001FFF +#define PMU_XTALFREQ_REG_MEASURE_MASK 0x80000000 +#define PMU_XTALFREQ_REG_MEASURE_SHIFT 31 + +/* Divider allocation in 4716/47162/5356/5357 */ +#define PMU5_MAINPLL_CPU 1 +#define PMU5_MAINPLL_MEM 2 +#define PMU5_MAINPLL_SI 3 + +#define PMU7_PLL_PLLCTL7 7 +#define PMU7_PLL_CTL7_M4DIV_MASK 0xff000000 +#define PMU7_PLL_CTL7_M4DIV_SHIFT 24 +#define PMU7_PLL_CTL7_M4DIV_BY_6 6 +#define PMU7_PLL_CTL7_M4DIV_BY_12 0xc +#define PMU7_PLL_CTL7_M4DIV_BY_24 0x18 +#define PMU7_PLL_PLLCTL8 8 +#define PMU7_PLL_CTL8_M5DIV_MASK 0x000000ff +#define PMU7_PLL_CTL8_M5DIV_SHIFT 0 +#define PMU7_PLL_CTL8_M5DIV_BY_8 8 +#define PMU7_PLL_CTL8_M5DIV_BY_12 0xc +#define PMU7_PLL_CTL8_M5DIV_BY_24 0x18 +#define PMU7_PLL_CTL8_M6DIV_MASK 0x0000ff00 +#define PMU7_PLL_CTL8_M6DIV_SHIFT 8 +#define PMU7_PLL_CTL8_M6DIV_BY_12 0xc +#define PMU7_PLL_CTL8_M6DIV_BY_24 0x18 +#define PMU7_PLL_PLLCTL11 11 +#define PMU7_PLL_PLLCTL11_MASK 0xffffff00 +#define PMU7_PLL_PLLCTL11_VAL 0x22222200 + +/* PMU rev 15 */ +#define PMU15_PLL_PLLCTL0 0 +#define PMU15_PLL_PC0_CLKSEL_MASK 0x00000003 +#define PMU15_PLL_PC0_CLKSEL_SHIFT 0 +#define PMU15_PLL_PC0_FREQTGT_MASK 0x003FFFFC +#define PMU15_PLL_PC0_FREQTGT_SHIFT 2 +#define PMU15_PLL_PC0_PRESCALE_MASK 0x00C00000 +#define PMU15_PLL_PC0_PRESCALE_SHIFT 22 +#define PMU15_PLL_PC0_KPCTRL_MASK 0x07000000 +#define PMU15_PLL_PC0_KPCTRL_SHIFT 24 +#define PMU15_PLL_PC0_FCNTCTRL_MASK 0x38000000 +#define PMU15_PLL_PC0_FCNTCTRL_SHIFT 27 +#define PMU15_PLL_PC0_FDCMODE_MASK 0x40000000 +#define PMU15_PLL_PC0_FDCMODE_SHIFT 30 +#define PMU15_PLL_PC0_CTRLBIAS_MASK 0x80000000 +#define PMU15_PLL_PC0_CTRLBIAS_SHIFT 31 + +#define PMU15_PLL_PLLCTL1 1 +#define PMU15_PLL_PC1_BIAS_CTLM_MASK 0x00000060 +#define PMU15_PLL_PC1_BIAS_CTLM_SHIFT 5 +#define PMU15_PLL_PC1_BIAS_CTLM_RST_MASK 0x00000040 +#define PMU15_PLL_PC1_BIAS_CTLM_RST_SHIFT 6 +#define PMU15_PLL_PC1_BIAS_SS_DIVR_MASK 0x0001FF80 +#define PMU15_PLL_PC1_BIAS_SS_DIVR_SHIFT 7 +#define PMU15_PLL_PC1_BIAS_SS_RSTVAL_MASK 0x03FE0000 +#define PMU15_PLL_PC1_BIAS_SS_RSTVAL_SHIFT 17 +#define PMU15_PLL_PC1_BIAS_INTG_BW_MASK 0x0C000000 +#define PMU15_PLL_PC1_BIAS_INTG_BW_SHIFT 26 +#define PMU15_PLL_PC1_BIAS_INTG_BYP_MASK 0x10000000 +#define PMU15_PLL_PC1_BIAS_INTG_BYP_SHIFT 28 +#define PMU15_PLL_PC1_OPENLP_EN_MASK 0x40000000 +#define PMU15_PLL_PC1_OPENLP_EN_SHIFT 30 + +#define PMU15_PLL_PLLCTL2 2 +#define PMU15_PLL_PC2_CTEN_MASK 0x00000001 +#define PMU15_PLL_PC2_CTEN_SHIFT 0 + +#define PMU15_PLL_PLLCTL3 3 +#define PMU15_PLL_PC3_DITHER_EN_MASK 0x00000001 +#define PMU15_PLL_PC3_DITHER_EN_SHIFT 0 +#define PMU15_PLL_PC3_DCOCTLSP_MASK 0xFE000000 +#define PMU15_PLL_PC3_DCOCTLSP_SHIFT 25 +#define PMU15_PLL_PC3_DCOCTLSP_DIV2EN_MASK 0x01 +#define PMU15_PLL_PC3_DCOCTLSP_DIV2EN_SHIFT 0 +#define PMU15_PLL_PC3_DCOCTLSP_CH0EN_MASK 0x02 +#define PMU15_PLL_PC3_DCOCTLSP_CH0EN_SHIFT 1 +#define PMU15_PLL_PC3_DCOCTLSP_CH1EN_MASK 0x04 +#define PMU15_PLL_PC3_DCOCTLSP_CH1EN_SHIFT 2 +#define PMU15_PLL_PC3_DCOCTLSP_CH0SEL_MASK 0x18 +#define PMU15_PLL_PC3_DCOCTLSP_CH0SEL_SHIFT 3 +#define PMU15_PLL_PC3_DCOCTLSP_CH1SEL_MASK 0x60 +#define PMU15_PLL_PC3_DCOCTLSP_CH1SEL_SHIFT 5 +#define PMU15_PLL_PC3_DCOCTLSP_CHSEL_OUTP_DIV1 0 +#define PMU15_PLL_PC3_DCOCTLSP_CHSEL_OUTP_DIV2 1 +#define PMU15_PLL_PC3_DCOCTLSP_CHSEL_OUTP_DIV3 2 +#define PMU15_PLL_PC3_DCOCTLSP_CHSEL_OUTP_DIV5 3 + +#define PMU15_PLL_PLLCTL4 4 +#define PMU15_PLL_PC4_FLLCLK1_DIV_MASK 0x00000007 +#define PMU15_PLL_PC4_FLLCLK1_DIV_SHIFT 0 +#define PMU15_PLL_PC4_FLLCLK2_DIV_MASK 0x00000038 +#define PMU15_PLL_PC4_FLLCLK2_DIV_SHIFT 3 +#define PMU15_PLL_PC4_FLLCLK3_DIV_MASK 0x000001C0 +#define PMU15_PLL_PC4_FLLCLK3_DIV_SHIFT 6 +#define PMU15_PLL_PC4_DBGMODE_MASK 0x00000E00 +#define PMU15_PLL_PC4_DBGMODE_SHIFT 9 +#define PMU15_PLL_PC4_FLL480_CTLSP_LK_MASK 0x00001000 +#define PMU15_PLL_PC4_FLL480_CTLSP_LK_SHIFT 12 +#define PMU15_PLL_PC4_FLL480_CTLSP_MASK 0x000FE000 +#define PMU15_PLL_PC4_FLL480_CTLSP_SHIFT 13 +#define PMU15_PLL_PC4_DINPOL_MASK 0x00100000 +#define PMU15_PLL_PC4_DINPOL_SHIFT 20 +#define PMU15_PLL_PC4_CLKOUT_PD_MASK 0x00200000 +#define PMU15_PLL_PC4_CLKOUT_PD_SHIFT 21 +#define PMU15_PLL_PC4_CLKDIV2_PD_MASK 0x00400000 +#define PMU15_PLL_PC4_CLKDIV2_PD_SHIFT 22 +#define PMU15_PLL_PC4_CLKDIV4_PD_MASK 0x00800000 +#define PMU15_PLL_PC4_CLKDIV4_PD_SHIFT 23 +#define PMU15_PLL_PC4_CLKDIV8_PD_MASK 0x01000000 +#define PMU15_PLL_PC4_CLKDIV8_PD_SHIFT 24 +#define PMU15_PLL_PC4_CLKDIV16_PD_MASK 0x02000000 +#define PMU15_PLL_PC4_CLKDIV16_PD_SHIFT 25 +#define PMU15_PLL_PC4_TEST_EN_MASK 0x04000000 +#define PMU15_PLL_PC4_TEST_EN_SHIFT 26 + +#define PMU15_PLL_PLLCTL5 5 +#define PMU15_PLL_PC5_FREQTGT_MASK 0x000FFFFF +#define PMU15_PLL_PC5_FREQTGT_SHIFT 0 +#define PMU15_PLL_PC5_DCOCTLSP_MASK 0x07F00000 +#define PMU15_PLL_PC5_DCOCTLSP_SHIFT 20 +#define PMU15_PLL_PC5_PRESCALE_MASK 0x18000000 +#define PMU15_PLL_PC5_PRESCALE_SHIFT 27 + +#define PMU15_PLL_PLLCTL6 6 +#define PMU15_PLL_PC6_FREQTGT_MASK 0x000FFFFF +#define PMU15_PLL_PC6_FREQTGT_SHIFT 0 +#define PMU15_PLL_PC6_DCOCTLSP_MASK 0x07F00000 +#define PMU15_PLL_PC6_DCOCTLSP_SHIFT 20 +#define PMU15_PLL_PC6_PRESCALE_MASK 0x18000000 +#define PMU15_PLL_PC6_PRESCALE_SHIFT 27 + +#define PMU15_FREQTGT_480_DEFAULT 0x19AB1 +#define PMU15_FREQTGT_492_DEFAULT 0x1A4F5 +#define PMU15_ARM_96MHZ 96000000 /**< 96 Mhz */ +#define PMU15_ARM_98MHZ 98400000 /**< 98.4 Mhz */ +#define PMU15_ARM_97MHZ 97000000 /**< 97 Mhz */ + +#define PMU17_PLLCTL2_NDIVTYPE_MASK 0x00000070 +#define PMU17_PLLCTL2_NDIVTYPE_SHIFT 4 + +#define PMU17_PLLCTL2_NDIV_MODE_INT 0 +#define PMU17_PLLCTL2_NDIV_MODE_INT1B8 1 +#define PMU17_PLLCTL2_NDIV_MODE_MASH111 2 +#define PMU17_PLLCTL2_NDIV_MODE_MASH111B8 3 + +#define PMU17_PLLCTL0_BBPLL_PWRDWN 0 +#define PMU17_PLLCTL0_BBPLL_DRST 3 +#define PMU17_PLLCTL0_BBPLL_DISBL_CLK 8 + +/* PLL usage in 4716/47162 */ +#define PMU4716_MAINPLL_PLL0 12 + +/* PLL usage in 4335 */ +#define PMU4335_PLL0_PC2_P1DIV_MASK 0x000f0000 +#define PMU4335_PLL0_PC2_P1DIV_SHIFT 16 +#define PMU4335_PLL0_PC2_NDIV_INT_MASK 0xff800000 +#define PMU4335_PLL0_PC2_NDIV_INT_SHIFT 23 +#define PMU4335_PLL0_PC1_MDIV2_MASK 0x0000ff00 +#define PMU4335_PLL0_PC1_MDIV2_SHIFT 8 + +/* PLL usage in 4347 */ +#define PMU4347_PLL0_PC2_P1DIV_MASK 0x000f0000 +#define PMU4347_PLL0_PC2_P1DIV_SHIFT 16 +#define PMU4347_PLL0_PC2_NDIV_INT_MASK 0x3ff00000 +#define PMU4347_PLL0_PC2_NDIV_INT_SHIFT 20 +#define PMU4347_PLL0_PC3_NDIV_FRAC_MASK 0x000fffff +#define PMU4347_PLL0_PC3_NDIV_FRAC_SHIFT 0 +#define PMU4347_PLL1_PC5_P1DIV_MASK 0xc0000000 +#define PMU4347_PLL1_PC5_P1DIV_SHIFT 30 +#define PMU4347_PLL1_PC6_P1DIV_MASK 0x00000003 +#define PMU4347_PLL1_PC6_P1DIV_SHIFT 0 +#define PMU4347_PLL1_PC6_NDIV_INT_MASK 0x00000ffc +#define PMU4347_PLL1_PC6_NDIV_INT_SHIFT 2 +#define PMU4347_PLL1_PC6_NDIV_FRAC_MASK 0xfffff000 +#define PMU4347_PLL1_PC6_NDIV_FRAC_SHIFT 12 + +/* Even though the masks are same as 4347, separate macros are +created for 4369 +*/ +/* PLL usage in 4369 */ +#define PMU4369_PLL0_PC2_PDIV_MASK 0x000f0000 +#define PMU4369_PLL0_PC2_PDIV_SHIFT 16 +#define PMU4369_PLL0_PC2_NDIV_INT_MASK 0x3ff00000 +#define PMU4369_PLL0_PC2_NDIV_INT_SHIFT 20 +#define PMU4369_PLL0_PC3_NDIV_FRAC_MASK 0x000fffff +#define PMU4369_PLL0_PC3_NDIV_FRAC_SHIFT 0 +#define PMU4369_PLL1_PC5_P1DIV_MASK 0xc0000000 +#define PMU4369_PLL1_PC5_P1DIV_SHIFT 30 +#define PMU4369_PLL1_PC6_P1DIV_MASK 0x00000003 +#define PMU4369_PLL1_PC6_P1DIV_SHIFT 0 +#define PMU4369_PLL1_PC6_NDIV_INT_MASK 0x00000ffc +#define PMU4369_PLL1_PC6_NDIV_INT_SHIFT 2 +#define PMU4369_PLL1_PC6_NDIV_FRAC_MASK 0xfffff000 +#define PMU4369_PLL1_PC6_NDIV_FRAC_SHIFT 12 + +/* 5357 Chip specific ChipControl register bits */ +#define CCTRL5357_EXTPA (1<<14) /* extPA in ChipControl 1, bit 14 */ +#define CCTRL5357_ANT_MUX_2o3 (1<<15) /* 2o3 in ChipControl 1, bit 15 */ +#define CCTRL5357_NFLASH (1<<16) /* Nandflash in ChipControl 1, bit 16 */ +/* 43217 Chip specific ChipControl register bits */ +#define CCTRL43217_EXTPA_C0 (1<<13) /* core0 extPA in ChipControl 1, bit 13 */ +#define CCTRL43217_EXTPA_C1 (1<<8) /* core1 extPA in ChipControl 1, bit 8 */ + +/* 43236 resources */ +#define RES43236_REGULATOR 0 +#define RES43236_ILP_REQUEST 1 +#define RES43236_XTAL_PU 2 +#define RES43236_ALP_AVAIL 3 +#define RES43236_SI_PLL_ON 4 +#define RES43236_HT_SI_AVAIL 5 + +/* 43236 chip-specific ChipControl register bits */ +#define CCTRL43236_BT_COEXIST (1<<0) /**< 0 disable */ +#define CCTRL43236_SECI (1<<1) /**< 0 SECI is disabled (JATG functional) */ +#define CCTRL43236_EXT_LNA (1<<2) /**< 0 disable */ +#define CCTRL43236_ANT_MUX_2o3 (1<<3) /**< 2o3 mux, chipcontrol bit 3 */ +#define CCTRL43236_GSIO (1<<4) /**< 0 disable */ + +/* 43236 Chip specific ChipStatus register bits */ +#define CST43236_SFLASH_MASK 0x00000040 +#define CST43236_OTP_SEL_MASK 0x00000080 +#define CST43236_OTP_SEL_SHIFT 7 +#define CST43236_HSIC_MASK 0x00000100 /**< USB/HSIC */ +#define CST43236_BP_CLK 0x00000200 /**< 120/96Mbps */ +#define CST43236_BOOT_MASK 0x00001800 +#define CST43236_BOOT_SHIFT 11 +#define CST43236_BOOT_FROM_SRAM 0 /**< boot from SRAM, ARM in reset */ +#define CST43236_BOOT_FROM_ROM 1 /**< boot from ROM */ +#define CST43236_BOOT_FROM_FLASH 2 /**< boot from FLASH */ +#define CST43236_BOOT_FROM_INVALID 3 + +#define PMU1_PLL0_CHIPCTL0 0 +#define PMU1_PLL0_CHIPCTL1 1 +#define PMU1_PLL0_CHIPCTL2 2 + +#define SOCDEVRAM_BP_ADDR 0x1E000000 +#define SOCDEVRAM_ARM_ADDR 0x00800000 + +#define PMU_VREG0_I_SR_CNTL_EN_SHIFT 0 +#define PMU_VREG0_DISABLE_PULLD_BT_SHIFT 2 +#define PMU_VREG0_DISABLE_PULLD_WL_SHIFT 3 +#define PMU_VREG0_CBUCKFSW_ADJ_SHIFT 7 +#define PMU_VREG0_CBUCKFSW_ADJ_MASK 0x1F +#define PMU_VREG0_RAMP_SEL_SHIFT 13 +#define PMU_VREG0_RAMP_SEL_MASK 0x7 +#define PMU_VREG0_VFB_RSEL_SHIFT 17 +#define PMU_VREG0_VFB_RSEL_MASK 3 + +#define PMU_VREG4_ADDR 4 + +#define PMU_VREG4_CLDO_PWM_SHIFT 4 +#define PMU_VREG4_CLDO_PWM_MASK 0x7 + +#define PMU_VREG4_LPLDO1_SHIFT 15 +#define PMU_VREG4_LPLDO1_MASK 0x7 +#define PMU_VREG4_LPLDO1_1p20V 0 +#define PMU_VREG4_LPLDO1_1p15V 1 +#define PMU_VREG4_LPLDO1_1p10V 2 +#define PMU_VREG4_LPLDO1_1p25V 3 +#define PMU_VREG4_LPLDO1_1p05V 4 +#define PMU_VREG4_LPLDO1_1p00V 5 +#define PMU_VREG4_LPLDO1_0p95V 6 +#define PMU_VREG4_LPLDO1_0p90V 7 + +/* 4350/4345 VREG4 settings */ +#define PMU4350_VREG4_LPLDO1_1p10V 0 +#define PMU4350_VREG4_LPLDO1_1p15V 1 +#define PMU4350_VREG4_LPLDO1_1p21V 2 +#define PMU4350_VREG4_LPLDO1_1p24V 3 +#define PMU4350_VREG4_LPLDO1_0p90V 4 +#define PMU4350_VREG4_LPLDO1_0p96V 5 +#define PMU4350_VREG4_LPLDO1_1p01V 6 +#define PMU4350_VREG4_LPLDO1_1p04V 7 + +#define PMU_VREG4_LPLDO2_LVM_SHIFT 18 +#define PMU_VREG4_LPLDO2_LVM_MASK 0x7 +#define PMU_VREG4_LPLDO2_HVM_SHIFT 21 +#define PMU_VREG4_LPLDO2_HVM_MASK 0x7 +#define PMU_VREG4_LPLDO2_LVM_HVM_MASK 0x3f +#define PMU_VREG4_LPLDO2_1p00V 0 +#define PMU_VREG4_LPLDO2_1p15V 1 +#define PMU_VREG4_LPLDO2_1p20V 2 +#define PMU_VREG4_LPLDO2_1p10V 3 +#define PMU_VREG4_LPLDO2_0p90V 4 /**< 4 - 7 is 0.90V */ + +#define PMU_VREG4_HSICLDO_BYPASS_SHIFT 27 +#define PMU_VREG4_HSICLDO_BYPASS_MASK 0x1 + +#define PMU_VREG5_ADDR 5 +#define PMU_VREG5_HSICAVDD_PD_SHIFT 6 +#define PMU_VREG5_HSICAVDD_PD_MASK 0x1 +#define PMU_VREG5_HSICDVDD_PD_SHIFT 11 +#define PMU_VREG5_HSICDVDD_PD_MASK 0x1 + +/* 43228 chipstatus reg bits */ +#define CST43228_OTP_PRESENT 0x2 + +/* 4360 Chip specific ChipControl register bits */ +#define CCTRL4360_I2C_MODE (1 << 0) +#define CCTRL4360_UART_MODE (1 << 1) +#define CCTRL4360_SECI_MODE (1 << 2) +#define CCTRL4360_BTSWCTRL_MODE (1 << 3) +#define CCTRL4360_DISCRETE_FEMCTRL_MODE (1 << 4) +#define CCTRL4360_DIGITAL_PACTRL_MODE (1 << 5) +#define CCTRL4360_BTSWCTRL_AND_DIGPA_PRESENT (1 << 6) +#define CCTRL4360_EXTRA_GPIO_MODE (1 << 7) +#define CCTRL4360_EXTRA_FEMCTRL_MODE (1 << 8) +#define CCTRL4360_BT_LGCY_MODE (1 << 9) +#define CCTRL4360_CORE2FEMCTRL4_ON (1 << 21) +#define CCTRL4360_SECI_ON_GPIO01 (1 << 24) + +/* 4360 Chip specific Regulator Control register bits */ +#define RCTRL4360_RFLDO_PWR_DOWN (1 << 1) + +/* 4360 PMU resources and chip status bits */ +#define RES4360_REGULATOR 0 +#define RES4360_ILP_AVAIL 1 +#define RES4360_ILP_REQ 2 +#define RES4360_XTAL_LDO_PU 3 +#define RES4360_XTAL_PU 4 +#define RES4360_ALP_AVAIL 5 +#define RES4360_BBPLLPWRSW_PU 6 +#define RES4360_HT_AVAIL 7 +#define RES4360_OTP_PU 8 +#define RES4360_AVB_PLL_PWRSW_PU 9 +#define RES4360_PCIE_TL_CLK_AVAIL 10 + +#define CST4360_XTAL_40MZ 0x00000001 +#define CST4360_SFLASH 0x00000002 +#define CST4360_SPROM_PRESENT 0x00000004 +#define CST4360_SFLASH_TYPE 0x00000004 +#define CST4360_OTP_ENABLED 0x00000008 +#define CST4360_REMAP_ROM 0x00000010 +#define CST4360_RSRC_INIT_MODE_MASK 0x00000060 +#define CST4360_RSRC_INIT_MODE_SHIFT 5 +#define CST4360_ILP_DIVEN 0x00000080 +#define CST4360_MODE_USB 0x00000100 +#define CST4360_SPROM_SIZE_MASK 0x00000600 +#define CST4360_SPROM_SIZE_SHIFT 9 +#define CST4360_BBPLL_LOCK 0x00000800 +#define CST4360_AVBBPLL_LOCK 0x00001000 +#define CST4360_USBBBPLL_LOCK 0x00002000 +#define CST4360_RSRC_INIT_MODE(cs) ((cs & CST4360_RSRC_INIT_MODE_MASK) >> \ + CST4360_RSRC_INIT_MODE_SHIFT) + +#define CCTRL_4360_UART_SEL 0x2 + +#define CST4360_RSRC_INIT_MODE(cs) ((cs & CST4360_RSRC_INIT_MODE_MASK) >> \ + CST4360_RSRC_INIT_MODE_SHIFT) + +#define PMU4360_CC1_GPIO7_OVRD (1<<23) /* GPIO7 override */ + +/* 43602 PMU resources based on pmu_params.xls version v0.95 */ +#define RES43602_LPLDO_PU 0 +#define RES43602_REGULATOR 1 +#define RES43602_PMU_SLEEP 2 +#define RES43602_RSVD_3 3 +#define RES43602_XTALLDO_PU 4 +#define RES43602_SERDES_PU 5 +#define RES43602_BBPLL_PWRSW_PU 6 +#define RES43602_SR_CLK_START 7 +#define RES43602_SR_PHY_PWRSW 8 +#define RES43602_SR_SUBCORE_PWRSW 9 +#define RES43602_XTAL_PU 10 +#define RES43602_PERST_OVR 11 +#define RES43602_SR_CLK_STABLE 12 +#define RES43602_SR_SAVE_RESTORE 13 +#define RES43602_SR_SLEEP 14 +#define RES43602_LQ_START 15 +#define RES43602_LQ_AVAIL 16 +#define RES43602_WL_CORE_RDY 17 +#define RES43602_ILP_REQ 18 +#define RES43602_ALP_AVAIL 19 +#define RES43602_RADIO_PU 20 +#define RES43602_RFLDO_PU 21 +#define RES43602_HT_START 22 +#define RES43602_HT_AVAIL 23 +#define RES43602_MACPHY_CLKAVAIL 24 +#define RES43602_PARLDO_PU 25 +#define RES43602_RSVD_26 26 + +/* 43602 chip status bits */ +#define CST43602_SPROM_PRESENT (1<<1) +#define CST43602_SPROM_SIZE (1<<10) /* 0 = 16K, 1 = 4K */ +#define CST43602_BBPLL_LOCK (1<<11) +#define CST43602_RF_LDO_OUT_OK (1<<15) /* RF LDO output OK */ + +#define PMU43602_CC1_GPIO12_OVRD (1<<28) /* GPIO12 override */ + +#define PMU43602_CC2_PCIE_CLKREQ_L_WAKE_EN (1<<1) /* creates gated_pcie_wake, pmu_wakeup logic */ +#define PMU43602_CC2_PCIE_PERST_L_WAKE_EN (1<<2) /* creates gated_pcie_wake, pmu_wakeup logic */ +#define PMU43602_CC2_ENABLE_L2REFCLKPAD_PWRDWN (1<<3) +#define PMU43602_CC2_PMU_WAKE_ALP_AVAIL_EN (1<<5) /* enable pmu_wakeup to request for ALP_AVAIL */ +#define PMU43602_CC2_PERST_L_EXTEND_EN (1<<9) /* extend perst_l until rsc PERST_OVR comes up */ +#define PMU43602_CC2_FORCE_EXT_LPO (1<<19) /* 1=ext LPO clock is the final LPO clock */ +#define PMU43602_CC2_XTAL32_SEL (1<<30) /* 0=ext_clock, 1=xtal */ + +#define CC_SR1_43602_SR_ASM_ADDR (0x0) + +/* PLL CTL register values for open loop, used during S/R operation */ +#define PMU43602_PLL_CTL6_VAL 0x68000528 +#define PMU43602_PLL_CTL7_VAL 0x6 + +#define PMU43602_CC3_ARMCR4_DBG_CLK (1 << 29) + +/* 4365 PMU resources */ +#define RES4365_REGULATOR_PU 0 +#define RES4365_XTALLDO_PU 1 +#define RES4365_XTAL_PU 2 +#define RES4365_CPU_PLLLDO_PU 3 +#define RES4365_CPU_PLL_PU 4 +#define RES4365_WL_CORE_RDY 5 +#define RES4365_ILP_REQ 6 +#define RES4365_ALP_AVAIL 7 +#define RES4365_HT_AVAIL 8 +#define RES4365_BB_PLLLDO_PU 9 +#define RES4365_BB_PLL_PU 10 +#define RES4365_MINIMU_PU 11 +#define RES4365_RADIO_PU 12 +#define RES4365_MACPHY_CLK_AVAIL 13 + +/* 43684 PMU resources */ +#define RES43684_REGULATOR_PU 0 +#define RES43684_PCIE_LDO_BG_PU 1 +#define RES43684_XTAL_LDO_PU 2 +#define RES43684_XTAL_PU 3 +#define RES43684_CPU_PLL_LDO_PU 4 +#define RES43684_CPU_PLL_PU 5 +#define RES43684_WL_CORE_RDY 6 +#define RES43684_ILP_REQ 7 +#define RES43684_ALP_AVAIL 8 +#define RES43684_HT_AVAIL 9 +#define RES43684_BB_PLL_LDO_PU 10 +#define RES43684_BB_PLL_PU 11 +#define RES43684_MINI_PMU_PU 12 +#define RES43684_RADIO_PU 13 +#define RES43684_MACPHY_CLK_AVAIL 14 +#define RES43684_PCIE_LDO_PU 15 + +/* 7271 PMU resources */ +#define RES7271_REGULATOR_PU 0 +#define RES7271_WL_CORE_RDY 1 +#define RES7271_ILP_REQ 2 +#define RES7271_ALP_AVAIL 3 +#define RES7271_HT_AVAIL 4 +#define RES7271_BB_PLL_PU 5 +#define RES7271_MINIPMU_PU 6 +#define RES7271_RADIO_PU 7 +#define RES7271_MACPHY_CLK_AVAIL 8 + +/* 4349 related */ +#define RES4349_LPLDO_PU 0 +#define RES4349_BG_PU 1 +#define RES4349_PMU_SLEEP 2 +#define RES4349_PALDO3P3_PU 3 +#define RES4349_CBUCK_LPOM_PU 4 +#define RES4349_CBUCK_PFM_PU 5 +#define RES4349_COLD_START_WAIT 6 +#define RES4349_RSVD_7 7 +#define RES4349_LNLDO_PU 8 +#define RES4349_XTALLDO_PU 9 +#define RES4349_LDO3P3_PU 10 +#define RES4349_OTP_PU 11 +#define RES4349_XTAL_PU 12 +#define RES4349_SR_CLK_START 13 +#define RES4349_LQ_AVAIL 14 +#define RES4349_LQ_START 15 +#define RES4349_PERST_OVR 16 +#define RES4349_WL_CORE_RDY 17 +#define RES4349_ILP_REQ 18 +#define RES4349_ALP_AVAIL 19 +#define RES4349_MINI_PMU 20 +#define RES4349_RADIO_PU 21 +#define RES4349_SR_CLK_STABLE 22 +#define RES4349_SR_SAVE_RESTORE 23 +#define RES4349_SR_PHY_PWRSW 24 +#define RES4349_SR_VDDM_PWRSW 25 +#define RES4349_SR_SUBCORE_PWRSW 26 +#define RES4349_SR_SLEEP 27 +#define RES4349_HT_START 28 +#define RES4349_HT_AVAIL 29 +#define RES4349_MACPHY_CLKAVAIL 30 + +/* 4373 PMU resources */ +#define RES4373_LPLDO_PU 0 +#define RES4373_BG_PU 1 +#define RES4373_PMU_SLEEP 2 +#define RES4373_PALDO3P3_PU 3 +#define RES4373_CBUCK_LPOM_PU 4 +#define RES4373_CBUCK_PFM_PU 5 +#define RES4373_COLD_START_WAIT 6 +#define RES4373_RSVD_7 7 +#define RES4373_LNLDO_PU 8 +#define RES4373_XTALLDO_PU 9 +#define RES4373_LDO3P3_PU 10 +#define RES4373_OTP_PU 11 +#define RES4373_XTAL_PU 12 +#define RES4373_SR_CLK_START 13 +#define RES4373_LQ_AVAIL 14 +#define RES4373_LQ_START 15 +#define RES4373_PERST_OVR 16 +#define RES4373_WL_CORE_RDY 17 +#define RES4373_ILP_REQ 18 +#define RES4373_ALP_AVAIL 19 +#define RES4373_MINI_PMU 20 +#define RES4373_RADIO_PU 21 +#define RES4373_SR_CLK_STABLE 22 +#define RES4373_SR_SAVE_RESTORE 23 +#define RES4373_SR_PHY_PWRSW 24 +#define RES4373_SR_VDDM_PWRSW 25 +#define RES4373_SR_SUBCORE_PWRSW 26 +#define RES4373_SR_SLEEP 27 +#define RES4373_HT_START 28 +#define RES4373_HT_AVAIL 29 +#define RES4373_MACPHY_CLKAVAIL 30 +/* SR Control0 bits */ +#define CC_SR0_4349_SR_ENG_EN_MASK 0x1 +#define CC_SR0_4349_SR_ENG_EN_SHIFT 0 +#define CC_SR0_4349_SR_ENG_CLK_EN (1 << 1) +#define CC_SR0_4349_SR_RSRC_TRIGGER (0xC << 2) +#define CC_SR0_4349_SR_WD_MEM_MIN_DIV (0x3 << 6) +#define CC_SR0_4349_SR_MEM_STBY_ALLOW_MSK (1 << 16) +#define CC_SR0_4349_SR_MEM_STBY_ALLOW_SHIFT 16 +#define CC_SR0_4349_SR_ENABLE_ILP (1 << 17) +#define CC_SR0_4349_SR_ENABLE_ALP (1 << 18) +#define CC_SR0_4349_SR_ENABLE_HT (1 << 19) +#define CC_SR0_4349_SR_ALLOW_PIC (3 << 20) +#define CC_SR0_4349_SR_PMU_MEM_DISABLE (1 << 30) +/* SR Control0 bits */ +#define CC_SR0_4349_SR_ENG_EN_MASK 0x1 +#define CC_SR0_4349_SR_ENG_EN_SHIFT 0 +#define CC_SR0_4349_SR_ENG_CLK_EN (1 << 1) +#define CC_SR0_4349_SR_RSRC_TRIGGER (0xC << 2) +#define CC_SR0_4349_SR_WD_MEM_MIN_DIV (0x3 << 6) +#define CC_SR0_4349_SR_MEM_STBY_ALLOW (1 << 16) +#define CC_SR0_4349_SR_ENABLE_ILP (1 << 17) +#define CC_SR0_4349_SR_ENABLE_ALP (1 << 18) +#define CC_SR0_4349_SR_ENABLE_HT (1 << 19) +#define CC_SR0_4349_SR_ALLOW_PIC (3 << 20) +#define CC_SR0_4349_SR_PMU_MEM_DISABLE (1 << 30) + +/* SR binary offset is at 8K */ +#define CC_SR1_4349_SR_ASM_ADDR (0x10) +#define CST4349_CHIPMODE_SDIOD(cs) (((cs) & (1 << 6)) != 0) /* SDIO */ +#define CST4349_CHIPMODE_PCIE(cs) (((cs) & (1 << 7)) != 0) /* PCIE */ +#define CST4349_SPROM_PRESENT 0x00000010 + +/* 4373 related */ +#define CST4373_CHIPMODE_USB20D(cs) (((cs) & (1 << 8)) != 0) /* USB */ +#define CST4373_CHIPMODE_SDIOD(cs) (((cs) & (1 << 7)) != 0) /* SDIO */ +#define CST4373_CHIPMODE_PCIE(cs) (((cs) & (1 << 6)) != 0) /* PCIE */ +#define CST4373_SFLASH_PRESENT 0x00000010 + +#define VREG4_4349_MEMLPLDO_PWRUP_MASK (1 << 31) +#define VREG4_4349_MEMLPLDO_PWRUP_SHIFT (31) +#define VREG4_4349_LPLDO1_OUTPUT_VOLT_ADJ_MASK (0x7 << 15) +#define VREG4_4349_LPLDO1_OUTPUT_VOLT_ADJ_SHIFT (15) +#define CC2_4349_PHY_PWRSE_RST_CNT_MASK (0xF << 0) +#define CC2_4349_PHY_PWRSE_RST_CNT_SHIFT (0) +#define CC2_4349_VDDM_PWRSW_EN_MASK (1 << 20) +#define CC2_4349_VDDM_PWRSW_EN_SHIFT (20) +#define CC2_4349_MEMLPLDO_PWRSW_EN_MASK (1 << 21) +#define CC2_4349_MEMLPLDO_PWRSW_EN_SHIFT (21) +#define CC2_4349_SDIO_AOS_WAKEUP_MASK (1 << 24) +#define CC2_4349_SDIO_AOS_WAKEUP_SHIFT (24) +#define CC2_4349_PMUWAKE_EN_MASK (1 << 31) +#define CC2_4349_PMUWAKE_EN_SHIFT (31) + +#define CC5_4349_MAC_PHY_CLK_8_DIV (1 << 27) + +#define CC6_4349_PCIE_CLKREQ_WAKEUP_MASK (1 << 4) +#define CC6_4349_PCIE_CLKREQ_WAKEUP_SHIFT (4) +#define CC6_4349_PMU_WAKEUP_ALPAVAIL_MASK (1 << 6) +#define CC6_4349_PMU_WAKEUP_ALPAVAIL_SHIFT (6) +#define CC6_4349_PMU_EN_EXT_PERST_MASK (1 << 13) +#define CC6_4349_PMU_EN_L2_DEASSERT_MASK (1 << 14) +#define CC6_4349_PMU_EN_L2_DEASSERT_SHIF (14) +#define CC6_4349_PMU_ENABLE_L2REFCLKPAD_PWRDWN (1 << 15) +#define CC6_4349_PMU_EN_MDIO_MASK (1 << 16) +#define CC6_4349_PMU_EN_ASSERT_L2_MASK (1 << 25) + +/* 4349 GCI function sel values */ +/* + * Reference + * http://hwnbu-twiki.sj.broadcom.com/bin/view/Mwgroup/ToplevelArchitecture4349B0#Function_Sel + */ +#define CC4349_FNSEL_HWDEF (0) +#define CC4349_FNSEL_SAMEASPIN (1) +#define CC4349_FNSEL_GPIO (2) +#define CC4349_FNSEL_FAST_UART (3) +#define CC4349_FNSEL_GCI0 (4) +#define CC4349_FNSEL_GCI1 (5) +#define CC4349_FNSEL_DGB_UART (6) +#define CC4349_FNSEL_I2C (7) +#define CC4349_FNSEL_SPROM (8) +#define CC4349_FNSEL_MISC0 (9) +#define CC4349_FNSEL_MISC1 (10) +#define CC4349_FNSEL_MISC2 (11) +#define CC4349_FNSEL_IND (12) +#define CC4349_FNSEL_PDN (13) +#define CC4349_FNSEL_PUP (14) +#define CC4349_FNSEL_TRISTATE (15) + +/* 4364 related */ +#define RES4364_LPLDO_PU 0 +#define RES4364_BG_PU 1 +#define RES4364_MEMLPLDO_PU 2 +#define RES4364_PALDO3P3_PU 3 +#define RES4364_CBUCK_1P2 4 +#define RES4364_CBUCK_1V8 5 +#define RES4364_COLD_START_WAIT 6 +#define RES4364_SR_3x3_VDDM_PWRSW 7 +#define RES4364_3x3_MACPHY_CLKAVAIL 8 +#define RES4364_XTALLDO_PU 9 +#define RES4364_LDO3P3_PU 10 +#define RES4364_OTP_PU 11 +#define RES4364_XTAL_PU 12 +#define RES4364_SR_CLK_START 13 +#define RES4364_3x3_RADIO_PU 14 +#define RES4364_RF_LDO 15 +#define RES4364_PERST_OVR 16 +#define RES4364_WL_CORE_RDY 17 +#define RES4364_ILP_REQ 18 +#define RES4364_ALP_AVAIL 19 +#define RES4364_1x1_MINI_PMU 20 +#define RES4364_1x1_RADIO_PU 21 +#define RES4364_SR_CLK_STABLE 22 +#define RES4364_SR_SAVE_RESTORE 23 +#define RES4364_SR_PHY_PWRSW 24 +#define RES4364_SR_VDDM_PWRSW 25 +#define RES4364_SR_SUBCORE_PWRSW 26 +#define RES4364_SR_SLEEP 27 +#define RES4364_HT_START 28 +#define RES4364_HT_AVAIL 29 +#define RES4364_MACPHY_CLKAVAIL 30 + +/* 4349 GPIO */ +#define CC4349_PIN_GPIO_00 (0) +#define CC4349_PIN_GPIO_01 (1) +#define CC4349_PIN_GPIO_02 (2) +#define CC4349_PIN_GPIO_03 (3) +#define CC4349_PIN_GPIO_04 (4) +#define CC4349_PIN_GPIO_05 (5) +#define CC4349_PIN_GPIO_06 (6) +#define CC4349_PIN_GPIO_07 (7) +#define CC4349_PIN_GPIO_08 (8) +#define CC4349_PIN_GPIO_09 (9) +#define CC4349_PIN_GPIO_10 (10) +#define CC4349_PIN_GPIO_11 (11) +#define CC4349_PIN_GPIO_12 (12) +#define CC4349_PIN_GPIO_13 (13) +#define CC4349_PIN_GPIO_14 (14) +#define CC4349_PIN_GPIO_15 (15) +#define CC4349_PIN_GPIO_16 (16) +#define CC4349_PIN_GPIO_17 (17) +#define CC4349_PIN_GPIO_18 (18) +#define CC4349_PIN_GPIO_19 (19) + +/* Mask used to decide whether HOSTWAKE MUX to be performed or not */ +#define MUXENAB4349_HOSTWAKE_MASK (0x000000f0) /* configure GPIO for SDIO host_wake */ +#define MUXENAB4349_HOSTWAKE_SHIFT 4 +#define MUXENAB4349_GETIX(val, name) \ + ((((val) & MUXENAB4349_ ## name ## _MASK) >> MUXENAB4349_ ## name ## _SHIFT) - 1) + +#define CR4_4364_RAM_BASE (0x160000) + +/* SR binary offset is at 8K */ +#define CC_SR1_4364_SR_CORE0_ASM_ADDR (0x10) +#define CC_SR1_4364_SR_CORE1_ASM_ADDR (0x10) + +#define CC_SR0_4364_SR_ENG_EN_MASK 0x1 +#define CC_SR0_4364_SR_ENG_EN_SHIFT 0 +#define CC_SR0_4364_SR_ENG_CLK_EN (1 << 1) +#define CC_SR0_4364_SR_RSRC_TRIGGER (0xC << 2) +#define CC_SR0_4364_SR_WD_MEM_MIN_DIV (0x3 << 6) +#define CC_SR0_4364_SR_MEM_STBY_ALLOW_MSK (1 << 16) +#define CC_SR0_4364_SR_MEM_STBY_ALLOW_SHIFT 16 +#define CC_SR0_4364_SR_ENABLE_ILP (1 << 17) +#define CC_SR0_4364_SR_ENABLE_ALP (1 << 18) +#define CC_SR0_4364_SR_ENABLE_HT (1 << 19) +#define CC_SR0_4364_SR_INVERT_CLK (1 << 11) +#define CC_SR0_4364_SR_ALLOW_PIC (3 << 20) +#define CC_SR0_4364_SR_PMU_MEM_DISABLE (1 << 30) + +#define PMU_4364_CC1_ENABLE_BBPLL_PWR_DWN (0x1 << 4) +#define PMU_4364_CC1_BBPLL_ARESET_LQ_TIME (0x1 << 8) +#define PMU_4364_CC1_BBPLL_ARESET_HT_UPTIME (0x1 << 10) +#define PMU_4364_CC1_BBPLL_DRESET_LQ_UPTIME (0x1 << 12) +#define PMU_4364_CC1_BBPLL_DRESET_HT_UPTIME (0x4 << 16) +#define PMU_4364_CC1_SUBCORE_PWRSW_UP_DELAY (0x8 << 20) +#define PMU_4364_CC1_SUBCORE_PWRSW_RESET_CNT (0x4 << 24) + +#define PMU_4364_CC2_PHY_PWRSW_RESET_CNT (0x2 << 0) +#define PMU_4364_CC2_PHY_PWRSW_RESET_MASK (0x7) +#define PMU_4364_CC2_SEL_CHIPC_IF_FOR_SR (1 << 21) + +#define PMU_4364_CC3_MEMLPLDO3x3_PWRSW_FORCE_MASK (1 << 23) +#define PMU_4364_CC3_MEMLPLDO1x1_PWRSW_FORCE_MASK (1 << 24) +#define PMU_4364_CC3_CBUCK1P2_PU_SR_VDDM_REQ_ON (1 << 25) +#define PMU_4364_CC3_MEMLPLDO3x3_PWRSW_FORCE_OFF (0) +#define PMU_4364_CC3_MEMLPLDO1x1_PWRSW_FORCE_OFF (0) + +#define PMU_4364_CC5_DISABLE_BBPLL_CLKOUT6_DIV2_MASK (1 << 26) +#define PMU_4364_CC5_ENABLE_ARMCR4_DEBUG_CLK_MASK (1 << 4) +#define PMU_4364_CC5_DISABLE_BBPLL_CLKOUT6_DIV2 (1 << 26) +#define PMU_4364_CC5_ENABLE_ARMCR4_DEBUG_CLK_OFF (0) + +#define PMU_4364_CC6_MDI_RESET_MASK (1 << 16) +#define PMU_4364_CC6_USE_CLK_REQ_MASK (1 << 18) +#define PMU_4364_CC6_HIGHER_CLK_REQ_ALP_MASK (1 << 20) +#define PMU_4364_CC6_HT_AVAIL_REQ_ALP_AVAIL_MASK (1 << 21) +#define PMU_4364_CC6_PHY_CLK_REQUESTS_ALP_AVAIL_MASK (1 << 22) +#define PMU_4364_CC6_MDI_RESET (1 << 16) +#define PMU_4364_CC6_USE_CLK_REQ (1 << 18) + +#define PMU_4364_CC6_HIGHER_CLK_REQ_ALP (1 << 20) +#define PMU_4364_CC6_HT_AVAIL_REQ_ALP_AVAIL (1 << 21) +#define PMU_4364_CC6_PHY_CLK_REQUESTS_ALP_AVAIL (1 << 22) + +#define PMU_4364_VREG0_DISABLE_BT_PULL_DOWN (1 << 2) +#define PMU_4364_VREG1_DISABLE_WL_PULL_DOWN (1 << 2) + +/* Indices of PMU voltage regulator registers */ +#define PMU_VREG_0 (0u) +#define PMU_VREG_1 (1u) +#define PMU_VREG_2 (2u) +#define PMU_VREG_3 (3u) +#define PMU_VREG_4 (4u) +#define PMU_VREG_5 (5u) +#define PMU_VREG_6 (6u) +#define PMU_VREG_7 (7u) +#define PMU_VREG_8 (8u) +#define PMU_VREG_9 (9u) +#define PMU_VREG_10 (10u) +#define PMU_VREG_11 (11u) +#define PMU_VREG_12 (12u) +#define PMU_VREG_13 (13u) +#define PMU_VREG_14 (14u) +#define PMU_VREG_15 (15u) +#define PMU_VREG_16 (16u) + +/* 43012 Chipcommon ChipStatus bits */ +#define CST43012_FLL_LOCK (1 << 13) +/* 43012 resources - End */ + +/* 43012 related Cbuck modes */ +#define PMU_43012_VREG8_DYNAMIC_CBUCK_MODE0 0x00001c03 +#define PMU_43012_VREG9_DYNAMIC_CBUCK_MODE0 0x00492490 +#define PMU_43012_VREG8_DYNAMIC_CBUCK_MODE1 0x00001c03 +#define PMU_43012_VREG9_DYNAMIC_CBUCK_MODE1 0x00490410 + +/* 43012 related dynamic cbuck mode mask */ +#define PMU_43012_VREG8_DYNAMIC_CBUCK_MODE_MASK 0xFFFFFC07 +#define PMU_43012_VREG9_DYNAMIC_CBUCK_MODE_MASK 0xFFFFFFFF + +/* 4369 related VREG masks */ +#define PMU_4369_VREG_5_MISCLDO_POWER_UP_MASK (1u << 11u) +#define PMU_4369_VREG_5_MISCLDO_POWER_UP_SHIFT 11u +#define PMU_4369_VREG_5_LPLDO_POWER_UP_MASK (1u << 27u) +#define PMU_4369_VREG_5_LPLDO_POWER_UP_SHIFT 27u +#define PMU_4369_VREG_5_MEMLPLDO_OP_VLT_ADJ_CTRL_MASK BCM_MASK32(31, 28) +#define PMU_4369_VREG_5_MEMLPLDO_OP_VLT_ADJ_CTRL_SHIFT 28u + +#define PMU_4369_VREG_6_MEMLPLDO_POWER_UP_MASK (1u << 3u) +#define PMU_4369_VREG_6_MEMLPLDO_POWER_UP_SHIFT 3u + +#define PMU_4369_VREG_7_PMU_FORCE_HP_MODE_MASK (1u << 27u) +#define PMU_4369_VREG_7_PMU_FORCE_HP_MODE_SHIFT 27u +#define PMU_4369_VREG_7_WL_PMU_LP_MODE_MASK (1u << 28u) +#define PMU_4369_VREG_7_WL_PMU_LP_MODE_SHIFT 28u +#define PMU_4369_VREG_7_WL_PMU_LV_MODE_MASK (1u << 29u) +#define PMU_4369_VREG_7_WL_PMU_LV_MODE_SHIFT 29u + +#define PMU_4369_VREG8_ASR_OVADJ_LPPFM_MASK BCM_MASK32(4, 0) +#define PMU_4369_VREG8_ASR_OVADJ_LPPFM_SHIFT 0u + +#define PMU_4369_VREG13_RSRC_EN_ASR_MASK4_MASK BCM_MASK32(10, 9) +#define PMU_4369_VREG13_RSRC_EN_ASR_MASK4_SHIFT 9u + +#define PMU_4369_VREG14_RSRC_EN_CSR_MASK0_MASK (1u << 23u) +#define PMU_4369_VREG14_RSRC_EN_CSR_MASK0_SHIFT 23u + +#define PMU_4369_VREG16_RSRC0_CBUCK_MODE_MASK BCM_MASK32(2, 0) +#define PMU_4369_VREG16_RSRC0_CBUCK_MODE_SHIFT 0u +#define PMU_4369_VREG16_RSRC0_ABUCK_MODE_MASK BCM_MASK32(17, 15) +#define PMU_4369_VREG16_RSRC0_ABUCK_MODE_SHIFT 15u +#define PMU_4369_VREG16_RSRC1_ABUCK_MODE_MASK BCM_MASK32(20, 18) +#define PMU_4369_VREG16_RSRC1_ABUCK_MODE_SHIFT 18u + +/* 4364 related VREG masks */ +#define PMU_4364_VREG3_DISABLE_WPT_REG_ON_PULL_DOWN (1 << 11) + +#define PMU_4364_VREG4_MEMLPLDO_PU_ON (1 << 31) +#define PMU_4364_VREG4_LPLPDO_ADJ (3 << 16) +#define PMU_4364_VREG4_LPLPDO_ADJ_MASK (3 << 16) +#define PMU_4364_VREG5_MAC_CLK_1x1_AUTO (0x1 << 18) +#define PMU_4364_VREG5_SR_AUTO (0x1 << 20) +#define PMU_4364_VREG5_BT_PWM_MASK (0x1 << 21) +#define PMU_4364_VREG5_BT_AUTO (0x1 << 22) +#define PMU_4364_VREG5_WL2CLB_DVFS_EN_MASK (0x1 << 23) +#define PMU_4364_VREG5_BT_PWMK (0) +#define PMU_4364_VREG5_WL2CLB_DVFS_EN (0) + +#define PMU_4364_VREG6_BBPLL_AUTO (0x1 << 17) +#define PMU_4364_VREG6_MINI_PMU_PWM (0x1 << 18) +#define PMU_4364_VREG6_LNLDO_AUTO (0x1 << 21) +#define PMU_4364_VREG6_PCIE_PWRDN_0_AUTO (0x1 << 23) +#define PMU_4364_VREG6_PCIE_PWRDN_1_AUTO (0x1 << 25) +#define PMU_4364_VREG6_MAC_CLK_3x3_PWM (0x1 << 27) +#define PMU_4364_VREG6_ENABLE_FINE_CTRL (0x1 << 30) + +#define PMU_4364_PLL0_DISABLE_CHANNEL6 (0x1 << 18) + +#define CC_GCI1_REG (0x1) +#define CC_GCI1_4364_IND_STATE_FOR_GPIO9_11 (0x0ccccccc) +#define CC2_4364_SDIO_AOS_WAKEUP_MASK (1 << 24) +#define CC2_4364_SDIO_AOS_WAKEUP_SHIFT (24) + +#define CC6_4364_PCIE_CLKREQ_WAKEUP_MASK (1 << 4) +#define CC6_4364_PCIE_CLKREQ_WAKEUP_SHIFT (4) +#define CC6_4364_PMU_WAKEUP_ALPAVAIL_MASK (1 << 6) +#define CC6_4364_PMU_WAKEUP_ALPAVAIL_SHIFT (6) + +#define CST4364_CHIPMODE_SDIOD(cs) (((cs) & (1 << 6)) != 0) /* SDIO */ +#define CST4364_CHIPMODE_PCIE(cs) (((cs) & (1 << 7)) != 0) /* PCIE */ +#define CST4364_SPROM_PRESENT 0x00000010 + +#define PMU_4364_MACCORE_0_RES_REQ_MASK 0x3FCBF7FF +#define PMU_4364_MACCORE_1_RES_REQ_MASK 0x7FFB3647 + +#define PMU_4364_RSDB_MODE (0) +#define PMU_4364_1x1_MODE (1) +#define PMU_4364_3x3_MODE (2) + +#define PMU_4364_MAX_MASK_1x1 (0x7FFF3E47) +#define PMU_4364_MAX_MASK_RSDB (0x7FFFFFFF) +#define PMU_4364_MAX_MASK_3x3 (0x3FCFFFFF) + +#define PMU_4364_SAVE_RESTORE_UPDNTIME_1x1 (0xC000C) +#define PMU_4364_SAVE_RESTORE_UPDNTIME_3x3 (0xF000F) + +#define FORCE_CLK_ON 1 +#define FORCE_CLK_OFF 0 + +#define PMU1_PLL0_SWITCH_MACCLOCK_120MHZ (0) +#define PMU1_PLL0_SWITCH_MACCLOCK_160MHZ (1) +#define TSF_CLK_FRAC_L_4364_120MHZ 0x8889 +#define TSF_CLK_FRAC_H_4364_120MHZ 0x8 +#define TSF_CLK_FRAC_L_4364_160MHZ 0x6666 +#define TSF_CLK_FRAC_H_4364_160MHZ 0x6 +#define PMU1_PLL0_PC1_M2DIV_VALUE_120MHZ 8 +#define PMU1_PLL0_PC1_M2DIV_VALUE_160MHZ 6 + +/* 4347/4369 Related */ + +/* + * PMU VREG Definitions: + * http://confluence.broadcom.com/display/WLAN/BCM4347+PMU+Vreg+Control+Register + * http://confluence.broadcom.com/display/WLAN/BCM4369+PMU+Vreg+Control+Register + */ +/* PMU VREG4 */ +#define PMU_28NM_VREG4_WL_LDO_CNTL_EN (0x1 << 10) + +/* PMU VREG6 */ +#define PMU_28NM_VREG6_BTLDO3P3_PU (0x1 << 12) + +/* PMU resources */ +#define RES4347_MEMLPLDO_PU 0 +#define RES4347_AAON 1 +#define RES4347_PMU_SLEEP 2 +#define RES4347_RESERVED_3 3 +#define RES4347_LDO3P3_PU 4 +#define RES4347_FAST_LPO_AVAIL 5 +#define RES4347_XTAL_PU 6 +#define RES4347_XTAL_STABLE 7 +#define RES4347_PWRSW_DIG 8 +#define RES4347_SR_DIG 9 +#define RES4347_SLEEP_DIG 10 +#define RES4347_PWRSW_AUX 11 +#define RES4347_SR_AUX 12 +#define RES4347_SLEEP_AUX 13 +#define RES4347_PWRSW_MAIN 14 +#define RES4347_SR_MAIN 15 +#define RES4347_SLEEP_MAIN 16 +#define RES4347_CORE_RDY_DIG 17 +#define RES4347_CORE_RDY_AUX 18 +#define RES4347_ALP_AVAIL 19 +#define RES4347_RADIO_AUX_PU 20 +#define RES4347_MINIPMU_AUX_PU 21 +#define RES4347_CORE_RDY_MAIN 22 +#define RES4347_RADIO_MAIN_PU 23 +#define RES4347_MINIPMU_MAIN_PU 24 +#define RES4347_PCIE_EP_PU 25 +#define RES4347_COLD_START_WAIT 26 +#define RES4347_ARMHTAVAIL 27 +#define RES4347_HT_AVAIL 28 +#define RES4347_MACPHY_AUX_CLK_AVAIL 29 +#define RES4347_MACPHY_MAIN_CLK_AVAIL 30 +#define RES4347_RESERVED_31 31 + +/* 4369 PMU Resources */ +#define RES4369_DUMMY 0 +#define RES4369_ABUCK 1 +#define RES4369_PMU_SLEEP 2 +#define RES4369_MISCLDO 3 +#define RES4369_LDO3P3 4 +#define RES4369_FAST_LPO_AVAIL 5 +#define RES4369_XTAL_PU 6 +#define RES4369_XTAL_STABLE 7 +#define RES4369_PWRSW_DIG 8 +#define RES4369_SR_DIG 9 +#define RES4369_SLEEP_DIG 10 +#define RES4369_PWRSW_AUX 11 +#define RES4369_SR_AUX 12 +#define RES4369_SLEEP_AUX 13 +#define RES4369_PWRSW_MAIN 14 +#define RES4369_SR_MAIN 15 +#define RES4369_SLEEP_MAIN 16 +#define RES4369_DIG_CORE_RDY 17 +#define RES4369_CORE_RDY_AUX 18 +#define RES4369_ALP_AVAIL 19 +#define RES4369_RADIO_AUX_PU 20 +#define RES4369_MINIPMU_AUX_PU 21 +#define RES4369_CORE_RDY_MAIN 22 +#define RES4369_RADIO_MAIN_PU 23 +#define RES4369_MINIPMU_MAIN_PU 24 +#define RES4369_PCIE_EP_PU 25 +#define RES4369_COLD_START_WAIT 26 +#define RES4369_ARMHTAVAIL 27 +#define RES4369_HT_AVAIL 28 +#define RES4369_MACPHY_AUX_CLK_AVAIL 29 +#define RES4369_MACPHY_MAIN_CLK_AVAIL 30 + +/* chip status */ +#define CST4347_CHIPMODE_SDIOD(cs) (((cs) & (1 << 6)) != 0) /* SDIO */ +#define CST4347_CHIPMODE_PCIE(cs) (((cs) & (1 << 7)) != 0) /* PCIE */ +#define CST4347_JTAG_STRAP_ENABLED(cs) (((cs) & (1 << 20)) != 0) /* JTAG strap st */ +#define CST4347_SPROM_PRESENT 0x00000010 + +/* GCI chip status */ +#define GCI_CS_4347_FLL1MHZ_LOCK_MASK (1 << 1) + +/* GCI chip control registers */ +#define GCI_CC7_AAON_BYPASS_PWRSW_SEL 13 +#define GCI_CC7_AAON_BYPASS_PWRSW_SEQ_ON 14 + +/* PMU chip control registers */ +#define CC2_4347_VASIP_MEMLPLDO_VDDB_OFF_MASK (1 << 11) +#define CC2_4347_VASIP_MEMLPLDO_VDDB_OFF_SHIFT 11 +#define CC2_4347_MAIN_MEMLPLDO_VDDB_OFF_MASK (1 << 12) +#define CC2_4347_MAIN_MEMLPLDO_VDDB_OFF_SHIFT 12 +#define CC2_4347_AUX_MEMLPLDO_VDDB_OFF_MASK (1 << 13) +#define CC2_4347_AUX_MEMLPLDO_VDDB_OFF_SHIFT 13 +#define CC2_4347_VASIP_VDDRET_ON_MASK (1 << 14) +#define CC2_4347_VASIP_VDDRET_ON_SHIFT 14 +#define CC2_4347_MAIN_VDDRET_ON_MASK (1 << 15) +#define CC2_4347_MAIN_VDDRET_ON_SHIFT 15 +#define CC2_4347_AUX_VDDRET_ON_MASK (1 << 16) +#define CC2_4347_AUX_VDDRET_ON_SHIFT 16 +#define CC2_4347_GCI2WAKE_MASK (1 << 31) +#define CC2_4347_GCI2WAKE_SHIFT 31 + +#define CC2_4347_SDIO_AOS_WAKEUP_MASK (1 << 24) +#define CC2_4347_SDIO_AOS_WAKEUP_SHIFT 24 + +#define CC4_4347_LHL_TIMER_SELECT (1 << 0) + +#define CC6_4347_PWROK_WDT_EN_IN_MASK (1 << 6) +#define CC6_4347_PWROK_WDT_EN_IN_SHIFT 6 + +#define CC6_4347_SDIO_AOS_CHIP_WAKEUP_MASK (1 << 24) +#define CC6_4347_SDIO_AOS_CHIP_WAKEUP_SHIFT 24 + +#define PCIE_GPIO1_GPIO_PIN CC_GCI_GPIO_0 +#define PCIE_PERST_GPIO_PIN CC_GCI_GPIO_1 +#define PCIE_CLKREQ_GPIO_PIN CC_GCI_GPIO_2 + +#define VREG5_4347_MEMLPLDO_ADJ_MASK 0xF0000000 +#define VREG5_4347_MEMLPLDO_ADJ_SHIFT 28 +#define VREG5_4347_LPLDO_ADJ_MASK 0x00F00000 +#define VREG5_4347_LPLDO_ADJ_SHIFT 20 + +/* lpldo/memlpldo voltage */ +#define PMU_VREG5_LPLDO_VOLT_0_88 0xf /* 0.88v */ +#define PMU_VREG5_LPLDO_VOLT_0_86 0xe /* 0.86v */ +#define PMU_VREG5_LPLDO_VOLT_0_84 0xd /* 0.84v */ +#define PMU_VREG5_LPLDO_VOLT_0_82 0xc /* 0.82v */ +#define PMU_VREG5_LPLDO_VOLT_0_80 0xb /* 0.80v */ +#define PMU_VREG5_LPLDO_VOLT_0_78 0xa /* 0.78v */ +#define PMU_VREG5_LPLDO_VOLT_0_76 0x9 /* 0.76v */ +#define PMU_VREG5_LPLDO_VOLT_0_74 0x8 /* 0.74v */ +#define PMU_VREG5_LPLDO_VOLT_0_72 0x7 /* 0.72v */ +#define PMU_VREG5_LPLDO_VOLT_1_10 0x6 /* 1.10v */ +#define PMU_VREG5_LPLDO_VOLT_1_00 0x5 /* 1.00v */ +#define PMU_VREG5_LPLDO_VOLT_0_98 0x4 /* 0.98v */ +#define PMU_VREG5_LPLDO_VOLT_0_96 0x3 /* 0.96v */ +#define PMU_VREG5_LPLDO_VOLT_0_94 0x2 /* 0.94v */ +#define PMU_VREG5_LPLDO_VOLT_0_92 0x1 /* 0.92v */ +#define PMU_VREG5_LPLDO_VOLT_0_90 0x0 /* 0.90v */ + +/* Save/Restore engine */ + +#define BM_ADDR_TO_SR_ADDR(bmaddr) ((bmaddr) >> 9) + +/* Txfifo is 512KB for main core and 128KB for aux core + * We use first 12kB (0x3000) in BMC buffer for template in main core and + * 6.5kB (0x1A00) in aux core, followed by ASM code + */ +#define SR_ASM_ADDR_MAIN_4347 (0x18) +#define SR_ASM_ADDR_AUX_4347 (0xd) +#define SR_ASM_ADDR_DIG_4347 (0x0) + +#define SR_ASM_ADDR_MAIN_4369 BM_ADDR_TO_SR_ADDR(0xC00) +#define SR_ASM_ADDR_AUX_4369 BM_ADDR_TO_SR_ADDR(0xC00) +#define SR_ASM_ADDR_DIG_4369 (0x0) + +/* 512 bytes block */ +#define SR_ASM_ADDR_BLK_SIZE_SHIFT 9 + +/* SR Control0 bits */ +#define SR0_SR_ENG_EN_MASK 0x1 +#define SR0_SR_ENG_EN_SHIFT 0 +#define SR0_SR_ENG_CLK_EN (1 << 1) +#define SR0_RSRC_TRIGGER (0xC << 2) +#define SR0_WD_MEM_MIN_DIV (0x3 << 6) +#define SR0_INVERT_SR_CLK (1 << 11) +#define SR0_MEM_STBY_ALLOW (1 << 16) +#define SR0_ENABLE_SR_ILP (1 << 17) +#define SR0_ENABLE_SR_ALP (1 << 18) +#define SR0_ENABLE_SR_HT (1 << 19) +#define SR0_ALLOW_PIC (3 << 20) +#define SR0_ENB_PMU_MEM_DISABLE (1 << 30) + +/* SR Control0 bits for 4369 */ +#define SR0_4369_SR_ENG_EN_MASK 0x1 +#define SR0_4369_SR_ENG_EN_SHIFT 0 +#define SR0_4369_SR_ENG_CLK_EN (1 << 1) +#define SR0_4369_RSRC_TRIGGER (0xC << 2) +#define SR0_4369_WD_MEM_MIN_DIV (0x2 << 6) +#define SR0_4369_INVERT_SR_CLK (1 << 11) +#define SR0_4369_MEM_STBY_ALLOW (1 << 16) +#define SR0_4369_ENABLE_SR_ILP (1 << 17) +#define SR0_4369_ENABLE_SR_ALP (1 << 18) +#define SR0_4369_ENABLE_SR_HT (1 << 19) +#define SR0_4369_ALLOW_PIC (3 << 20) +#define SR0_4369_ENB_PMU_MEM_DISABLE (1 << 30) +/* =========== LHL regs =========== */ +/* 4369 LHL register settings */ +#define LHL4369_UP_CNT 0 +#define LHL4369_DN_CNT 2 +#define LHL4369_PWRSW_EN_DWN_CNT (LHL4369_DN_CNT + 2) +#define LHL4369_ISO_EN_DWN_CNT (LHL4369_PWRSW_EN_DWN_CNT + 3) +#define LHL4369_SLB_EN_DWN_CNT (LHL4369_ISO_EN_DWN_CNT + 1) +#define LHL4369_ASR_CLK4M_DIS_DWN_CNT (LHL4369_DN_CNT) +#define LHL4369_ASR_LPPFM_MODE_DWN_CNT (LHL4369_DN_CNT) +#define LHL4369_ASR_MODE_SEL_DWN_CNT (LHL4369_DN_CNT) +#define LHL4369_ASR_MANUAL_MODE_DWN_CNT (LHL4369_DN_CNT) +#define LHL4369_ASR_ADJ_DWN_CNT (LHL4369_DN_CNT) +#define LHL4369_ASR_OVERI_DIS_DWN_CNT (LHL4369_DN_CNT) +#define LHL4369_ASR_TRIM_ADJ_DWN_CNT (LHL4369_DN_CNT) +#define LHL4369_VDDC_SW_DIS_DWN_CNT (LHL4369_SLB_EN_DWN_CNT + 1) +#define LHL4369_VMUX_ASR_SEL_DWN_CNT (LHL4369_VDDC_SW_DIS_DWN_CNT + 1) +#define LHL4369_CSR_ADJ_DWN_CNT (LHL4369_VMUX_ASR_SEL_DWN_CNT + 1) +#define LHL4369_CSR_MODE_DWN_CNT (LHL4369_VMUX_ASR_SEL_DWN_CNT + 1) +#define LHL4369_CSR_OVERI_DIS_DWN_CNT (LHL4369_VMUX_ASR_SEL_DWN_CNT + 1) +#define LHL4369_HPBG_CHOP_DIS_DWN_CNT (LHL4369_VMUX_ASR_SEL_DWN_CNT + 1) +#define LHL4369_SRBG_REF_SEL_DWN_CNT (LHL4369_VMUX_ASR_SEL_DWN_CNT + 1) +#define LHL4369_PFM_PWR_SLICE_DWN_CNT (LHL4369_VMUX_ASR_SEL_DWN_CNT + 1) +#define LHL4369_CSR_TRIM_ADJ_DWN_CNT (LHL4369_VMUX_ASR_SEL_DWN_CNT + 1) +#define LHL4369_CSR_VOLTAGE_DWN_CNT (LHL4369_VMUX_ASR_SEL_DWN_CNT + 1) +#define LHL4369_HPBG_PU_EN_DWN_CNT (LHL4369_CSR_MODE_DWN_CNT + 1) + +#define LHL4369_HPBG_PU_EN_UP_CNT (LHL4369_UP_CNT + 1) +#define LHL4369_CSR_ADJ_UP_CNT (LHL4369_HPBG_PU_EN_UP_CNT + 1) +#define LHL4369_CSR_MODE_UP_CNT (LHL4369_HPBG_PU_EN_UP_CNT + 1) +#define LHL4369_CSR_OVERI_DIS_UP_CNT (LHL4369_HPBG_PU_EN_UP_CNT + 1) +#define LHL4369_HPBG_CHOP_DIS_UP_CNT (LHL4369_HPBG_PU_EN_UP_CNT + 1) +#define LHL4369_SRBG_REF_SEL_UP_CNT (LHL4369_HPBG_PU_EN_UP_CNT + 1) +#define LHL4369_PFM_PWR_SLICE_UP_CNT (LHL4369_HPBG_PU_EN_UP_CNT + 1) +#define LHL4369_CSR_TRIM_ADJ_UP_CNT (LHL4369_HPBG_PU_EN_UP_CNT + 1) +#define LHL4369_CSR_VOLTAGE_UP_CNT (LHL4369_HPBG_PU_EN_UP_CNT + 1) +#define LHL4369_VMUX_ASR_SEL_UP_CNT (LHL4369_CSR_MODE_UP_CNT + 1) +#define LHL4369_VDDC_SW_DIS_UP_CNT (LHL4369_VMUX_ASR_SEL_UP_CNT + 1) +#define LHL4369_SLB_EN_UP_CNT (LHL4369_VDDC_SW_DIS_UP_CNT + 8) +#define LHL4369_ISO_EN_UP_CNT (LHL4369_SLB_EN_UP_CNT + 1) +#define LHL4369_PWRSW_EN_UP_CNT (LHL4369_ISO_EN_UP_CNT + 3) +#define LHL4369_ASR_ADJ_UP_CNT (LHL4369_PWRSW_EN_UP_CNT + 1) +#define LHL4369_ASR_CLK4M_DIS_UP_CNT (LHL4369_PWRSW_EN_UP_CNT + 1) +#define LHL4369_ASR_LPPFM_MODE_UP_CNT (LHL4369_PWRSW_EN_UP_CNT + 1) +#define LHL4369_ASR_MODE_SEL_UP_CNT (LHL4369_PWRSW_EN_UP_CNT + 1) +#define LHL4369_ASR_MANUAL_MODE_UP_CNT (LHL4369_PWRSW_EN_UP_CNT + 1) +#define LHL4369_ASR_OVERI_DIS_UP_CNT (LHL4369_PWRSW_EN_UP_CNT + 1) +#define LHL4369_ASR_TRIM_ADJ_UP_CNT (LHL4369_PWRSW_EN_UP_CNT + 1) + +/* MacResourceReqTimer0/1 */ +#define MAC_RSRC_REQ_TIMER_INT_ENAB_SHIFT 24 +#define MAC_RSRC_REQ_TIMER_FORCE_ALP_SHIFT 26 +#define MAC_RSRC_REQ_TIMER_FORCE_HT_SHIFT 27 +#define MAC_RSRC_REQ_TIMER_FORCE_HQ_SHIFT 28 +#define MAC_RSRC_REQ_TIMER_CLKREQ_GRP_SEL_SHIFT 29 + +/* for pmu rev32 and higher */ +#define PMU32_MAC_MAIN_RSRC_REQ_TIMER ((1 << MAC_RSRC_REQ_TIMER_INT_ENAB_SHIFT) | \ + (1 << MAC_RSRC_REQ_TIMER_FORCE_ALP_SHIFT) | \ + (1 << MAC_RSRC_REQ_TIMER_FORCE_HT_SHIFT) | \ + (1 << MAC_RSRC_REQ_TIMER_FORCE_HQ_SHIFT) | \ + (0 << MAC_RSRC_REQ_TIMER_CLKREQ_GRP_SEL_SHIFT)) + +#define PMU32_MAC_AUX_RSRC_REQ_TIMER ((1 << MAC_RSRC_REQ_TIMER_INT_ENAB_SHIFT) | \ + (1 << MAC_RSRC_REQ_TIMER_FORCE_ALP_SHIFT) | \ + (1 << MAC_RSRC_REQ_TIMER_FORCE_HT_SHIFT) | \ + (1 << MAC_RSRC_REQ_TIMER_FORCE_HQ_SHIFT) | \ + (0 << MAC_RSRC_REQ_TIMER_CLKREQ_GRP_SEL_SHIFT)) + +/* 4369 related: 4369 parameters + * http://www.sj.broadcom.com/projects/BCM4369/gallery_backend.RC6.0/design/backplane/pmu_params.xls + */ +#define RES4369_DUMMY 0 +#define RES4369_ABUCK 1 +#define RES4369_PMU_SLEEP 2 +#define RES4369_MISCLDO_PU 3 +#define RES4369_LDO3P3_PU 4 +#define RES4369_FAST_LPO_AVAIL 5 +#define RES4369_XTAL_PU 6 +#define RES4369_XTAL_STABLE 7 +#define RES4369_PWRSW_DIG 8 +#define RES4369_SR_DIG 9 +#define RES4369_SLEEP_DIG 10 +#define RES4369_PWRSW_AUX 11 +#define RES4369_SR_AUX 12 +#define RES4369_SLEEP_AUX 13 +#define RES4369_PWRSW_MAIN 14 +#define RES4369_SR_MAIN 15 +#define RES4369_SLEEP_MAIN 16 +#define RES4369_DIG_CORE_RDY 17 +#define RES4369_CORE_RDY_AUX 18 +#define RES4369_ALP_AVAIL 19 +#define RES4369_RADIO_AUX_PU 20 +#define RES4369_MINIPMU_AUX_PU 21 +#define RES4369_CORE_RDY_MAIN 22 +#define RES4369_RADIO_MAIN_PU 23 +#define RES4369_MINIPMU_MAIN_PU 24 +#define RES4369_PCIE_EP_PU 25 +#define RES4369_COLD_START_WAIT 26 +#define RES4369_ARMHTAVAIL 27 +#define RES4369_HT_AVAIL 28 +#define RES4369_MACPHY_AUX_CLK_AVAIL 29 +#define RES4369_MACPHY_MAIN_CLK_AVAIL 30 +#define RES4369_RESERVED_31 31 + +#define CST4369_CHIPMODE_SDIOD(cs) (((cs) & (1 << 6)) != 0) /* SDIO */ +#define CST4369_CHIPMODE_PCIE(cs) (((cs) & (1 << 7)) != 0) /* PCIE */ +#define CST4369_SPROM_PRESENT 0x00000010 + +#define PMU_4369_MACCORE_0_RES_REQ_MASK 0x3FCBF7FF +#define PMU_4369_MACCORE_1_RES_REQ_MASK 0x7FFB3647 + +/* 4367 related */ +#define RES4367_ABUCK 0 +#define RES4367_CBUCK 1 +#define RES4367_MISCLDO_PU 2 +#define RES4367_VBOOST 3 +#define RES4367_LDO3P3_PU 4 +#define RES4367_LAST_LPO_AVAIL 5 +#define RES4367_XTAL_PU 6 +#define RES4367_XTAL_STABLE 7 +#define RES4367_PWRSW_DIG 8 +#define RES4367_SR_DIG 9 +#define RES4367_SPARE10 10 +#define RES4367_PWRSW_AUX 11 +#define RES4367_SR_AUX 12 +#define RES4367_SPARE2 13 +#define RES4367_PWRSW_MAIN 14 +#define RES4367_SR_MAIN 15 +#define RES4367_ARMPLL_PWRUP 16 +#define RES4367_DIG_CORE_RDY 17 +#define RES4367_CORE_RDY_AUX 18 +#define RES4367_ALP_AVAIL 19 +#define RES4367_RADIO_AUX_PU 20 +#define RES4367_MINIPMU_AUX_PU 21 +#define RES4367_CORE_RDY_MAIN 22 +#define RES4367_RADIO_MAIN_PU 23 +#define RES4367_MINIPMU_MAIN_PU 24 +#define RES4367_PCIE_RET 25 +#define RES4367_COLD_START_WAIT 26 +#define RES4367_ARMPLL_HTAVAIL 27 +#define RES4367_HT_AVAIL 28 +#define RES4367_MACPHY_AUX_CLK_AVAIL 29 +#define RES4367_MACPHY_MAIN_CLK_AVAIL 30 +#define RES4367_RESERVED_31 31 + +#define CST4367_SPROM_PRESENT (1 << 17) + +/* 43430 PMU resources based on pmu_params.xls */ +#define RES43430_LPLDO_PU 0 +#define RES43430_BG_PU 1 +#define RES43430_PMU_SLEEP 2 +#define RES43430_RSVD_3 3 +#define RES43430_CBUCK_LPOM_PU 4 +#define RES43430_CBUCK_PFM_PU 5 +#define RES43430_COLD_START_WAIT 6 +#define RES43430_RSVD_7 7 +#define RES43430_LNLDO_PU 8 +#define RES43430_RSVD_9 9 +#define RES43430_LDO3P3_PU 10 +#define RES43430_OTP_PU 11 +#define RES43430_XTAL_PU 12 +#define RES43430_SR_CLK_START 13 +#define RES43430_LQ_AVAIL 14 +#define RES43430_LQ_START 15 +#define RES43430_RSVD_16 16 +#define RES43430_WL_CORE_RDY 17 +#define RES43430_ILP_REQ 18 +#define RES43430_ALP_AVAIL 19 +#define RES43430_MINI_PMU 20 +#define RES43430_RADIO_PU 21 +#define RES43430_SR_CLK_STABLE 22 +#define RES43430_SR_SAVE_RESTORE 23 +#define RES43430_SR_PHY_PWRSW 24 +#define RES43430_SR_VDDM_PWRSW 25 +#define RES43430_SR_SUBCORE_PWRSW 26 +#define RES43430_SR_SLEEP 27 +#define RES43430_HT_START 28 +#define RES43430_HT_AVAIL 29 +#define RES43430_MACPHY_CLK_AVAIL 30 + +/* 43430 chip status bits */ +#define CST43430_SDIO_MODE 0x00000001 +#define CST43430_GSPI_MODE 0x00000002 +#define CST43430_RSRC_INIT_MODE_0 0x00000080 +#define CST43430_RSRC_INIT_MODE_1 0x00000100 +#define CST43430_SEL0_SDIO 0x00000200 +#define CST43430_SEL1_SDIO 0x00000400 +#define CST43430_SEL2_SDIO 0x00000800 +#define CST43430_BBPLL_LOCKED 0x00001000 +#define CST43430_DBG_INST_DETECT 0x00004000 +#define CST43430_CLB2WL_BT_READY 0x00020000 +#define CST43430_JTAG_MODE 0x00100000 +#define CST43430_HOST_IFACE 0x00400000 +#define CST43430_TRIM_EN 0x00800000 +#define CST43430_DIN_PACKAGE_OPTION 0x10000000 + +#define PMU43430_PLL0_PC2_P1DIV_MASK 0x0000000f +#define PMU43430_PLL0_PC2_P1DIV_SHIFT 0 +#define PMU43430_PLL0_PC2_NDIV_INT_MASK 0x0000ff80 +#define PMU43430_PLL0_PC2_NDIV_INT_SHIFT 7 +#define PMU43430_PLL0_PC4_MDIV2_MASK 0x0000ff00 +#define PMU43430_PLL0_PC4_MDIV2_SHIFT 8 + +/* 43430 chip SR definitions */ +#define SRAM_43430_SR_ASM_ADDR 0x7f800 +#define CC_SR1_43430_SR_ASM_ADDR ((SRAM_43430_SR_ASM_ADDR - 0x60000) >> 8) + +/* 43430 PMU Chip Control bits */ +#define CC2_43430_SDIO_AOS_WAKEUP_MASK (1 << 24) +#define CC2_43430_SDIO_AOS_WAKEUP_SHIFT (24) + +#define PMU_MACCORE_0_RES_REQ_TIMER 0x1d000000 +#define PMU_MACCORE_0_RES_REQ_MASK 0x5FF2364F + +#define PMU43012_MAC_RES_REQ_TIMER 0x1D000000 +#define PMU43012_MAC_RES_REQ_MASK 0x3FBBF7FF + +#define PMU_MACCORE_1_RES_REQ_TIMER 0x1d000000 +#define PMU_MACCORE_1_RES_REQ_MASK 0x5FF2364F + +/* defines to detect active host interface in use */ +#define CHIP_HOSTIF_PCIEMODE 0x1 +#define CHIP_HOSTIF_USBMODE 0x2 +#define CHIP_HOSTIF_SDIOMODE 0x4 +#define CHIP_HOSTIF_PCIE(sih) (si_chip_hostif(sih) == CHIP_HOSTIF_PCIEMODE) +#define CHIP_HOSTIF_USB(sih) (si_chip_hostif(sih) == CHIP_HOSTIF_USBMODE) +#define CHIP_HOSTIF_SDIO(sih) (si_chip_hostif(sih) == CHIP_HOSTIF_SDIOMODE) + +/* 4335 resources */ +#define RES4335_LPLDO_PO 0 +#define RES4335_PMU_BG_PU 1 +#define RES4335_PMU_SLEEP 2 +#define RES4335_RSVD_3 3 +#define RES4335_CBUCK_LPOM_PU 4 +#define RES4335_CBUCK_PFM_PU 5 +#define RES4335_RSVD_6 6 +#define RES4335_RSVD_7 7 +#define RES4335_LNLDO_PU 8 +#define RES4335_XTALLDO_PU 9 +#define RES4335_LDO3P3_PU 10 +#define RES4335_OTP_PU 11 +#define RES4335_XTAL_PU 12 +#define RES4335_SR_CLK_START 13 +#define RES4335_LQ_AVAIL 14 +#define RES4335_LQ_START 15 +#define RES4335_RSVD_16 16 +#define RES4335_WL_CORE_RDY 17 +#define RES4335_ILP_REQ 18 +#define RES4335_ALP_AVAIL 19 +#define RES4335_MINI_PMU 20 +#define RES4335_RADIO_PU 21 +#define RES4335_SR_CLK_STABLE 22 +#define RES4335_SR_SAVE_RESTORE 23 +#define RES4335_SR_PHY_PWRSW 24 +#define RES4335_SR_VDDM_PWRSW 25 +#define RES4335_SR_SUBCORE_PWRSW 26 +#define RES4335_SR_SLEEP 27 +#define RES4335_HT_START 28 +#define RES4335_HT_AVAIL 29 +#define RES4335_MACPHY_CLKAVAIL 30 + +/* 4335 Chip specific ChipStatus register bits */ +#define CST4335_SPROM_MASK 0x00000020 +#define CST4335_SFLASH_MASK 0x00000040 +#define CST4335_RES_INIT_MODE_SHIFT 7 +#define CST4335_RES_INIT_MODE_MASK 0x00000180 +#define CST4335_CHIPMODE_MASK 0xF +#define CST4335_CHIPMODE_SDIOD(cs) (((cs) & (1 << 0)) != 0) /* SDIO */ +#define CST4335_CHIPMODE_GSPI(cs) (((cs) & (1 << 1)) != 0) /* gSPI */ +#define CST4335_CHIPMODE_USB20D(cs) (((cs) & (1 << 2)) != 0) /**< HSIC || USBDA */ +#define CST4335_CHIPMODE_PCIE(cs) (((cs) & (1 << 3)) != 0) /* PCIE */ + +/* 4335 Chip specific ChipControl1 register bits */ +#define CCTRL1_4335_GPIO_SEL (1 << 0) /* 1=select GPIOs to be muxed out */ +#define CCTRL1_4335_SDIO_HOST_WAKE (1 << 2) /* SDIO: 1=configure GPIO0 for host wake */ + +/* 4335 Chip specific ChipControl2 register bits */ +#define CCTRL2_4335_AOSBLOCK (1 << 30) +#define CCTRL2_4335_PMUWAKE (1 << 31) +#define PATCHTBL_SIZE (0x800) +#define CR4_4335_RAM_BASE (0x180000) +#define CR4_4345_LT_C0_RAM_BASE (0x1b0000) +#define CR4_4345_GE_C0_RAM_BASE (0x198000) +#define CR4_4349_RAM_BASE (0x180000) +#define CR4_4349_RAM_BASE_FROM_REV_9 (0x160000) +#define CR4_4350_RAM_BASE (0x180000) +#define CR4_4360_RAM_BASE (0x0) +#define CR4_43602_RAM_BASE (0x180000) +#define CA7_4365_RAM_BASE (0x200000) + +#define CR4_4347_RAM_BASE (0x170000) +#define CR4_4362_RAM_BASE (0x170000) +#define CR4_4369_RAM_BASE (0x170000) +#define CR4_4377_RAM_BASE (0x170000) +#define CR4_43751_RAM_BASE (0x170000) +#define CA7_4367_RAM_BASE (0x200000) +#define CR4_4378_RAM_BASE (0x352000) + +/* 4335 chip OTP present & OTP select bits. */ +#define SPROM4335_OTP_SELECT 0x00000010 +#define SPROM4335_OTP_PRESENT 0x00000020 + +/* 4335 GCI specific bits. */ +#define CC4335_GCI_STRAP_OVERRIDE_SFLASH_PRESENT (1 << 24) +#define CC4335_GCI_STRAP_OVERRIDE_SFLASH_TYPE 25 +#define CC4335_GCI_FUNC_SEL_PAD_SDIO 0x00707770 + +/* SFLASH clkdev specific bits. */ +#define CC4335_SFLASH_CLKDIV_MASK 0x1F000000 +#define CC4335_SFLASH_CLKDIV_SHIFT 25 + +/* 4335 OTP bits for SFLASH. */ +#define CC4335_SROM_OTP_SFLASH 40 +#define CC4335_SROM_OTP_SFLASH_PRESENT 0x1 +#define CC4335_SROM_OTP_SFLASH_TYPE 0x2 +#define CC4335_SROM_OTP_SFLASH_CLKDIV_MASK 0x003C +#define CC4335_SROM_OTP_SFLASH_CLKDIV_SHIFT 2 + +/* 4335 chip OTP present & OTP select bits. */ +#define SPROM4335_OTP_SELECT 0x00000010 +#define SPROM4335_OTP_PRESENT 0x00000020 + +/* 4335 GCI specific bits. */ +#define CC4335_GCI_STRAP_OVERRIDE_SFLASH_PRESENT (1 << 24) +#define CC4335_GCI_STRAP_OVERRIDE_SFLASH_TYPE 25 +#define CC4335_GCI_FUNC_SEL_PAD_SDIO 0x00707770 + +/* SFLASH clkdev specific bits. */ +#define CC4335_SFLASH_CLKDIV_MASK 0x1F000000 +#define CC4335_SFLASH_CLKDIV_SHIFT 25 + +/* 4335 OTP bits for SFLASH. */ +#define CC4335_SROM_OTP_SFLASH 40 +#define CC4335_SROM_OTP_SFLASH_PRESENT 0x1 +#define CC4335_SROM_OTP_SFLASH_TYPE 0x2 +#define CC4335_SROM_OTP_SFLASH_CLKDIV_MASK 0x003C +#define CC4335_SROM_OTP_SFLASH_CLKDIV_SHIFT 2 + +/* 4335 resources--END */ + +/* 43012 PMU resources based on pmu_params.xls - Start */ +#define RES43012_MEMLPLDO_PU 0 +#define RES43012_PMU_SLEEP 1 +#define RES43012_FAST_LPO 2 +#define RES43012_BTLPO_3P3 3 +#define RES43012_SR_POK 4 +#define RES43012_DUMMY_PWRSW 5 +#define RES43012_DUMMY_LDO3P3 6 +#define RES43012_DUMMY_BT_LDO3P3 7 +#define RES43012_DUMMY_RADIO 8 +#define RES43012_VDDB_VDDRET 9 +#define RES43012_HV_LDO3P3 10 +#define RES43012_OTP_PU 11 +#define RES43012_XTAL_PU 12 +#define RES43012_SR_CLK_START 13 +#define RES43012_XTAL_STABLE 14 +#define RES43012_FCBS 15 +#define RES43012_CBUCK_MODE 16 +#define RES43012_CORE_READY 17 +#define RES43012_ILP_REQ 18 +#define RES43012_ALP_AVAIL 19 +#define RES43012_RADIOLDO_1P8 20 +#define RES43012_MINI_PMU 21 +#define RES43012_UNUSED 22 +#define RES43012_SR_SAVE_RESTORE 23 +#define RES43012_PHY_PWRSW 24 +#define RES43012_VDDB_CLDO 25 +#define RES43012_SUBCORE_PWRSW 26 +#define RES43012_SR_SLEEP 27 +#define RES43012_HT_START 28 +#define RES43012_HT_AVAIL 29 +#define RES43012_MACPHY_CLK_AVAIL 30 +#define CST43012_SPROM_PRESENT 0x00000010 + +/* SR Control0 bits */ +#define SR0_43012_SR_ENG_EN_MASK 0x1 +#define SR0_43012_SR_ENG_EN_SHIFT 0 +#define SR0_43012_SR_ENG_CLK_EN (1 << 1) +#define SR0_43012_SR_RSRC_TRIGGER (0xC << 2) +#define SR0_43012_SR_WD_MEM_MIN_DIV (0x3 << 6) +#define SR0_43012_SR_MEM_STBY_ALLOW_MSK (1 << 16) +#define SR0_43012_SR_MEM_STBY_ALLOW_SHIFT 16 +#define SR0_43012_SR_ENABLE_ILP (1 << 17) +#define SR0_43012_SR_ENABLE_ALP (1 << 18) +#define SR0_43012_SR_ENABLE_HT (1 << 19) +#define SR0_43012_SR_ALLOW_PIC (3 << 20) +#define SR0_43012_SR_PMU_MEM_DISABLE (1 << 30) +#define CC_43012_VDDM_PWRSW_EN_MASK (1 << 20) +#define CC_43012_VDDM_PWRSW_EN_SHIFT (20) +#define CC_43012_SDIO_AOS_WAKEUP_MASK (1 << 24) +#define CC_43012_SDIO_AOS_WAKEUP_SHIFT (24) + +/* 43012 - offset at 5K */ +#define SR1_43012_SR_INIT_ADDR_MASK 0x3ff +#define SR1_43012_SR_ASM_ADDR 0xA + +/* PLL usage in 43012 */ +#define PMU43012_PLL0_PC0_NDIV_INT_MASK 0x0000003f +#define PMU43012_PLL0_PC0_NDIV_INT_SHIFT 0 +#define PMU43012_PLL0_PC0_NDIV_FRAC_MASK 0xfffffc00 +#define PMU43012_PLL0_PC0_NDIV_FRAC_SHIFT 10 +#define PMU43012_PLL0_PC3_PDIV_MASK 0x00003c00 +#define PMU43012_PLL0_PC3_PDIV_SHIFT 10 +#define PMU43012_PLL_NDIV_FRAC_BITS 20 +#define PMU43012_PLL_P_DIV_SCALE_BITS 10 + +#define CCTL_43012_ARM_OFFCOUNT_MASK 0x00000003 +#define CCTL_43012_ARM_OFFCOUNT_SHIFT 0 +#define CCTL_43012_ARM_ONCOUNT_MASK 0x0000000c +#define CCTL_43012_ARM_ONCOUNT_SHIFT 2 + +/* PMU Rev >= 30 */ +#define PMU30_ALPCLK_ONEMHZ_ENAB 0x80000000 + +#define BCM7271_PMU30_ALPCLK_ONEMHZ_ENAB 0x00010000 + +/* 43012 PMU Chip Control Registers */ +#define PMUCCTL02_43012_SUBCORE_PWRSW_FORCE_ON 0x00000010 +#define PMUCCTL02_43012_PHY_PWRSW_FORCE_ON 0x00000040 +#define PMUCCTL02_43012_LHL_TIMER_SELECT 0x00000800 +#define PMUCCTL02_43012_RFLDO3P3_PU_FORCE_ON 0x00008000 +#define PMUCCTL02_43012_WL2CDIG_I_PMU_SLEEP_ENAB 0x00010000 +#define PMUCCTL02_43012_BTLDO3P3_PU_FORCE_OFF (1 << 12) + +#define PMUCCTL04_43012_BBPLL_ENABLE_PWRDN 0x00100000 +#define PMUCCTL04_43012_BBPLL_ENABLE_PWROFF 0x00200000 +#define PMUCCTL04_43012_FORCE_BBPLL_ARESET 0x00400000 +#define PMUCCTL04_43012_FORCE_BBPLL_DRESET 0x00800000 +#define PMUCCTL04_43012_FORCE_BBPLL_PWRDN 0x01000000 +#define PMUCCTL04_43012_FORCE_BBPLL_ISOONHIGH 0x02000000 +#define PMUCCTL04_43012_FORCE_BBPLL_PWROFF 0x04000000 +#define PMUCCTL04_43012_DISABLE_LQ_AVAIL 0x08000000 +#define PMUCCTL04_43012_DISABLE_HT_AVAIL 0x10000000 +#define PMUCCTL04_43012_USE_LOCK 0x20000000 +#define PMUCCTL04_43012_OPEN_LOOP_ENABLE 0x40000000 +#define PMUCCTL04_43012_FORCE_OPEN_LOOP 0x80000000 +#define PMUCCTL05_43012_DISABLE_SPM_CLK (1 << 8) +#define PMUCCTL05_43012_RADIO_DIG_CLK_GATING_EN (1 << 14) +#define PMUCCTL06_43012_GCI2RDIG_USE_ASYNCAPB (1 << 31) +#define PMUCCTL08_43012_XTAL_CORE_SIZE_PMOS_NORMAL_MASK 0x00000FC0 +#define PMUCCTL08_43012_XTAL_CORE_SIZE_PMOS_NORMAL_SHIFT 6 +#define PMUCCTL08_43012_XTAL_CORE_SIZE_NMOS_NORMAL_MASK 0x00FC0000 +#define PMUCCTL08_43012_XTAL_CORE_SIZE_NMOS_NORMAL_SHIFT 18 +#define PMUCCTL08_43012_XTAL_SEL_BIAS_RES_NORMAL_MASK 0x07000000 +#define PMUCCTL08_43012_XTAL_SEL_BIAS_RES_NORMAL_SHIFT 24 +#define PMUCCTL09_43012_XTAL_CORESIZE_BIAS_ADJ_NORMAL_MASK 0x0003F000 +#define PMUCCTL09_43012_XTAL_CORESIZE_BIAS_ADJ_NORMAL_SHIFT 12 +#define PMUCCTL09_43012_XTAL_CORESIZE_RES_BYPASS_NORMAL_MASK 0x00000038 +#define PMUCCTL09_43012_XTAL_CORESIZE_RES_BYPASS_NORMAL_SHIFT 3 + +#define PMUCCTL09_43012_XTAL_CORESIZE_BIAS_ADJ_STARTUP_MASK 0x00000FC0 +#define PMUCCTL09_43012_XTAL_CORESIZE_BIAS_ADJ_STARTUP_SHIFT 6 +/* during normal operation normal value is reduced for optimized power */ +#define PMUCCTL09_43012_XTAL_CORESIZE_BIAS_ADJ_STARTUP_VAL 0x1F + +#define PMUCCTL13_43012_FCBS_UP_TRIG_EN 0x00000400 + +#define PMUCCTL14_43012_ARMCM3_RESET_INITVAL 0x00000001 +#define PMUCCTL14_43012_DOT11MAC_CLKEN_INITVAL 0x00000020 +#define PMUCCTL14_43012_DOT11MAC_PHY_CLK_EN_INITVAL 0x00000080 +#define PMUCCTL14_43012_DOT11MAC_PHY_CNTL_EN_INITVAL 0x00000200 +#define PMUCCTL14_43012_SDIOD_RESET_INIVAL 0x00000400 +#define PMUCCTL14_43012_SDIO_CLK_DMN_RESET_INITVAL 0x00001000 +#define PMUCCTL14_43012_SOCRAM_CLKEN_INITVAL 0x00004000 +#define PMUCCTL14_43012_M2MDMA_RESET_INITVAL 0x00008000 +#define PMUCCTL14_43012_DISABLE_LQ_AVAIL 0x08000000 + +#define VREG6_43012_MEMLPLDO_ADJ_MASK 0x0000F000 +#define VREG6_43012_MEMLPLDO_ADJ_SHIFT 12 + +#define VREG6_43012_LPLDO_ADJ_MASK 0x000000F0 +#define VREG6_43012_LPLDO_ADJ_SHIFT 4 + +#define VREG7_43012_PWRSW_1P8_PU_MASK 0x00400000 +#define VREG7_43012_PWRSW_1P8_PU_SHIFT 22 + +/* 4347 PMU Chip Control Registers */ +#define PMUCCTL03_4347_XTAL_CORESIZE_PMOS_NORMAL_MASK 0x001F8000 +#define PMUCCTL03_4347_XTAL_CORESIZE_PMOS_NORMAL_SHIFT 15 +#define PMUCCTL03_4347_XTAL_CORESIZE_PMOS_NORMAL_VAL 0x3F + +#define PMUCCTL03_4347_XTAL_CORESIZE_NMOS_NORMAL_MASK 0x07E00000 +#define PMUCCTL03_4347_XTAL_CORESIZE_NMOS_NORMAL_SHIFT 21 +#define PMUCCTL03_4347_XTAL_CORESIZE_NMOS_NORMAL_VAL 0x3F + +#define PMUCCTL03_4347_XTAL_SEL_BIAS_RES_NORMAL_MASK 0x38000000 +#define PMUCCTL03_4347_XTAL_SEL_BIAS_RES_NORMAL_SHIFT 27 +#define PMUCCTL03_4347_XTAL_SEL_BIAS_RES_NORMAL_VAL 0x0 + +#define PMUCCTL00_4347_XTAL_CORESIZE_BIAS_ADJ_NORMAL_MASK 0x00000FC0 +#define PMUCCTL00_4347_XTAL_CORESIZE_BIAS_ADJ_NORMAL_SHIFT 6 +#define PMUCCTL00_4347_XTAL_CORESIZE_BIAS_ADJ_NORMAL_VAL 0x5 + +#define PMUCCTL00_4347_XTAL_RES_BYPASS_NORMAL_MASK 0x00038000 +#define PMUCCTL00_4347_XTAL_RES_BYPASS_NORMAL_SHIFT 15 +#define PMUCCTL00_4347_XTAL_RES_BYPASS_NORMAL_VAL 0x7 + +/* 4345 Chip specific ChipStatus register bits */ +#define CST4345_SPROM_MASK 0x00000020 +#define CST4345_SFLASH_MASK 0x00000040 +#define CST4345_RES_INIT_MODE_SHIFT 7 +#define CST4345_RES_INIT_MODE_MASK 0x00000180 +#define CST4345_CHIPMODE_MASK 0x4000F +#define CST4345_CHIPMODE_SDIOD(cs) (((cs) & (1 << 0)) != 0) /* SDIO */ +#define CST4345_CHIPMODE_GSPI(cs) (((cs) & (1 << 1)) != 0) /* gSPI */ +#define CST4345_CHIPMODE_HSIC(cs) (((cs) & (1 << 2)) != 0) /* HSIC */ +#define CST4345_CHIPMODE_PCIE(cs) (((cs) & (1 << 3)) != 0) /* PCIE */ +#define CST4345_CHIPMODE_USB20D(cs) (((cs) & (1 << 18)) != 0) /* USBDA */ + +/* 4350 Chipcommon ChipStatus bits */ +#define CST4350_SDIO_MODE 0x00000001 +#define CST4350_HSIC20D_MODE 0x00000002 +#define CST4350_BP_ON_HSIC_CLK 0x00000004 +#define CST4350_PCIE_MODE 0x00000008 +#define CST4350_USB20D_MODE 0x00000010 +#define CST4350_USB30D_MODE 0x00000020 +#define CST4350_SPROM_PRESENT 0x00000040 +#define CST4350_RSRC_INIT_MODE_0 0x00000080 +#define CST4350_RSRC_INIT_MODE_1 0x00000100 +#define CST4350_SEL0_SDIO 0x00000200 +#define CST4350_SEL1_SDIO 0x00000400 +#define CST4350_SDIO_PAD_MODE 0x00000800 +#define CST4350_BBPLL_LOCKED 0x00001000 +#define CST4350_USBPLL_LOCKED 0x00002000 +#define CST4350_LINE_STATE 0x0000C000 +#define CST4350_SERDES_PIPE_PLLLOCK 0x00010000 +#define CST4350_BT_READY 0x00020000 +#define CST4350_SFLASH_PRESENT 0x00040000 +#define CST4350_CPULESS_ENABLE 0x00080000 +#define CST4350_STRAP_HOST_IFC_1 0x00100000 +#define CST4350_STRAP_HOST_IFC_2 0x00200000 +#define CST4350_STRAP_HOST_IFC_3 0x00400000 +#define CST4350_RAW_SPROM_PRESENT 0x00800000 +#define CST4350_APP_CLK_SWITCH_SEL_RDBACK 0x01000000 +#define CST4350_RAW_RSRC_INIT_MODE_0 0x02000000 +#define CST4350_SDIO_PAD_VDDIO 0x04000000 +#define CST4350_GSPI_MODE 0x08000000 +#define CST4350_PACKAGE_OPTION 0xF0000000 +#define CST4350_PACKAGE_SHIFT 28 + +/* package option for 4350 */ +#define CST4350_PACKAGE_WLCSP 0x0 +#define CST4350_PACKAGE_PCIE 0x1 +#define CST4350_PACKAGE_WLBGA 0x2 +#define CST4350_PACKAGE_DBG 0x3 +#define CST4350_PACKAGE_USB 0x4 +#define CST4350_PACKAGE_USB_HSIC 0x4 + +#define CST4350_PKG_MODE(cs) ((cs & CST4350_PACKAGE_OPTION) >> CST4350_PACKAGE_SHIFT) + +#define CST4350_PKG_WLCSP(cs) (CST4350_PKG_MODE(cs) == (CST4350_PACKAGE_WLCSP)) +#define CST4350_PKG_PCIE(cs) (CST4350_PKG_MODE(cs) == (CST4350_PACKAGE_PCIE)) +#define CST4350_PKG_WLBGA(cs) (CST4350_PKG_MODE(cs) == (CST4350_PACKAGE_WLBGA)) +#define CST4350_PKG_USB(cs) (CST4350_PKG_MODE(cs) == (CST4350_PACKAGE_USB)) +#define CST4350_PKG_USB_HSIC(cs) (CST4350_PKG_MODE(cs) == (CST4350_PACKAGE_USB_HSIC)) + +/* 4350C0 USB PACKAGE using raw_sprom_present to indicate 40mHz xtal */ +#define CST4350_PKG_USB_40M(cs) (cs & CST4350_RAW_SPROM_PRESENT) + +#define CST4350_CHIPMODE_SDIOD(cs) (CST4350_IFC_MODE(cs) == (CST4350_IFC_MODE_SDIOD)) +#define CST4350_CHIPMODE_USB20D(cs) ((CST4350_IFC_MODE(cs)) == (CST4350_IFC_MODE_USB20D)) +#define CST4350_CHIPMODE_HSIC20D(cs) (CST4350_IFC_MODE(cs) == (CST4350_IFC_MODE_HSIC20D)) +#define CST4350_CHIPMODE_HSIC30D(cs) (CST4350_IFC_MODE(cs) == (CST4350_IFC_MODE_HSIC30D)) +#define CST4350_CHIPMODE_USB30D(cs) (CST4350_IFC_MODE(cs) == (CST4350_IFC_MODE_USB30D)) +#define CST4350_CHIPMODE_USB30D_WL(cs) (CST4350_IFC_MODE(cs) == (CST4350_IFC_MODE_USB30D_WL)) +#define CST4350_CHIPMODE_PCIE(cs) (CST4350_IFC_MODE(cs) == (CST4350_IFC_MODE_PCIE)) + +/* strap_host_ifc strap value */ +#define CST4350_HOST_IFC_MASK 0x00700000 +#define CST4350_HOST_IFC_SHIFT 20 + +/* host_ifc raw mode */ +#define CST4350_IFC_MODE_SDIOD 0x0 +#define CST4350_IFC_MODE_HSIC20D 0x1 +#define CST4350_IFC_MODE_HSIC30D 0x2 +#define CST4350_IFC_MODE_PCIE 0x3 +#define CST4350_IFC_MODE_USB20D 0x4 +#define CST4350_IFC_MODE_USB30D 0x5 +#define CST4350_IFC_MODE_USB30D_WL 0x6 +#define CST4350_IFC_MODE_USB30D_BT 0x7 + +#define CST4350_IFC_MODE(cs) ((cs & CST4350_HOST_IFC_MASK) >> CST4350_HOST_IFC_SHIFT) + +/* 4350 PMU resources */ +#define RES4350_LPLDO_PU 0 +#define RES4350_PMU_BG_PU 1 +#define RES4350_PMU_SLEEP 2 +#define RES4350_RSVD_3 3 +#define RES4350_CBUCK_LPOM_PU 4 +#define RES4350_CBUCK_PFM_PU 5 +#define RES4350_COLD_START_WAIT 6 +#define RES4350_RSVD_7 7 +#define RES4350_LNLDO_PU 8 +#define RES4350_XTALLDO_PU 9 +#define RES4350_LDO3P3_PU 10 +#define RES4350_OTP_PU 11 +#define RES4350_XTAL_PU 12 +#define RES4350_SR_CLK_START 13 +#define RES4350_LQ_AVAIL 14 +#define RES4350_LQ_START 15 +#define RES4350_PERST_OVR 16 +#define RES4350_WL_CORE_RDY 17 +#define RES4350_ILP_REQ 18 +#define RES4350_ALP_AVAIL 19 +#define RES4350_MINI_PMU 20 +#define RES4350_RADIO_PU 21 +#define RES4350_SR_CLK_STABLE 22 +#define RES4350_SR_SAVE_RESTORE 23 +#define RES4350_SR_PHY_PWRSW 24 +#define RES4350_SR_VDDM_PWRSW 25 +#define RES4350_SR_SUBCORE_PWRSW 26 +#define RES4350_SR_SLEEP 27 +#define RES4350_HT_START 28 +#define RES4350_HT_AVAIL 29 +#define RES4350_MACPHY_CLKAVAIL 30 + +#define MUXENAB4350_UART_MASK (0x0000000f) +#define MUXENAB4350_UART_SHIFT 0 +#define MUXENAB4350_HOSTWAKE_MASK (0x000000f0) /**< configure GPIO for host_wake */ +#define MUXENAB4350_HOSTWAKE_SHIFT 4 +#define MUXENAB4349_UART_MASK (0xf) + +#define CC4350_GPIO_COUNT 16 + +/* 4350 GCI function sel values */ +#define CC4350_FNSEL_HWDEF (0) +#define CC4350_FNSEL_SAMEASPIN (1) +#define CC4350_FNSEL_UART (2) +#define CC4350_FNSEL_SFLASH (3) +#define CC4350_FNSEL_SPROM (4) +#define CC4350_FNSEL_I2C (5) +#define CC4350_FNSEL_MISC0 (6) +#define CC4350_FNSEL_GCI (7) +#define CC4350_FNSEL_MISC1 (8) +#define CC4350_FNSEL_MISC2 (9) +#define CC4350_FNSEL_PWDOG (10) +#define CC4350_FNSEL_IND (12) +#define CC4350_FNSEL_PDN (13) +#define CC4350_FNSEL_PUP (14) +#define CC4350_FNSEL_TRISTATE (15) +#define CC4350C_FNSEL_UART (3) + +/* 4350 GPIO */ +#define CC4350_PIN_GPIO_00 (0) +#define CC4350_PIN_GPIO_01 (1) +#define CC4350_PIN_GPIO_02 (2) +#define CC4350_PIN_GPIO_03 (3) +#define CC4350_PIN_GPIO_04 (4) +#define CC4350_PIN_GPIO_05 (5) +#define CC4350_PIN_GPIO_06 (6) +#define CC4350_PIN_GPIO_07 (7) +#define CC4350_PIN_GPIO_08 (8) +#define CC4350_PIN_GPIO_09 (9) +#define CC4350_PIN_GPIO_10 (10) +#define CC4350_PIN_GPIO_11 (11) +#define CC4350_PIN_GPIO_12 (12) +#define CC4350_PIN_GPIO_13 (13) +#define CC4350_PIN_GPIO_14 (14) +#define CC4350_PIN_GPIO_15 (15) + +#define CC4350_RSVD_16_SHIFT 16 + +#define CC2_4350_PHY_PWRSW_UPTIME_MASK (0xf << 0) +#define CC2_4350_PHY_PWRSW_UPTIME_SHIFT (0) +#define CC2_4350_VDDM_PWRSW_UPDELAY_MASK (0xf << 4) +#define CC2_4350_VDDM_PWRSW_UPDELAY_SHIFT (4) +#define CC2_4350_VDDM_PWRSW_UPTIME_MASK (0xf << 8) +#define CC2_4350_VDDM_PWRSW_UPTIME_SHIFT (8) +#define CC2_4350_SBC_PWRSW_DNDELAY_MASK (0x3 << 12) +#define CC2_4350_SBC_PWRSW_DNDELAY_SHIFT (12) +#define CC2_4350_PHY_PWRSW_DNDELAY_MASK (0x3 << 14) +#define CC2_4350_PHY_PWRSW_DNDELAY_SHIFT (14) +#define CC2_4350_VDDM_PWRSW_DNDELAY_MASK (0x3 << 16) +#define CC2_4350_VDDM_PWRSW_DNDELAY_SHIFT (16) +#define CC2_4350_VDDM_PWRSW_EN_MASK (1 << 20) +#define CC2_4350_VDDM_PWRSW_EN_SHIFT (20) +#define CC2_4350_MEMLPLDO_PWRSW_EN_MASK (1 << 21) +#define CC2_4350_MEMLPLDO_PWRSW_EN_SHIFT (21) +#define CC2_4350_SDIO_AOS_WAKEUP_MASK (1 << 24) +#define CC2_4350_SDIO_AOS_WAKEUP_SHIFT (24) + +/* Applies to 4335/4350/4345 */ +#define CC3_SR_CLK_SR_MEM_MASK (1 << 0) +#define CC3_SR_CLK_SR_MEM_SHIFT (0) +#define CC3_SR_BIT1_TBD_MASK (1 << 1) +#define CC3_SR_BIT1_TBD_SHIFT (1) +#define CC3_SR_ENGINE_ENABLE_MASK (1 << 2) +#define CC3_SR_ENGINE_ENABLE_SHIFT (2) +#define CC3_SR_BIT3_TBD_MASK (1 << 3) +#define CC3_SR_BIT3_TBD_SHIFT (3) +#define CC3_SR_MINDIV_FAST_CLK_MASK (0xF << 4) +#define CC3_SR_MINDIV_FAST_CLK_SHIFT (4) +#define CC3_SR_R23_SR2_RISE_EDGE_TRIG_MASK (1 << 8) +#define CC3_SR_R23_SR2_RISE_EDGE_TRIG_SHIFT (8) +#define CC3_SR_R23_SR2_FALL_EDGE_TRIG_MASK (1 << 9) +#define CC3_SR_R23_SR2_FALL_EDGE_TRIG_SHIFT (9) +#define CC3_SR_R23_SR_RISE_EDGE_TRIG_MASK (1 << 10) +#define CC3_SR_R23_SR_RISE_EDGE_TRIG_SHIFT (10) +#define CC3_SR_R23_SR_FALL_EDGE_TRIG_MASK (1 << 11) +#define CC3_SR_R23_SR_FALL_EDGE_TRIG_SHIFT (11) +#define CC3_SR_NUM_CLK_HIGH_MASK (0x7 << 12) +#define CC3_SR_NUM_CLK_HIGH_SHIFT (12) +#define CC3_SR_BIT15_TBD_MASK (1 << 15) +#define CC3_SR_BIT15_TBD_SHIFT (15) +#define CC3_SR_PHY_FUNC_PIC_MASK (1 << 16) +#define CC3_SR_PHY_FUNC_PIC_SHIFT (16) +#define CC3_SR_BIT17_19_TBD_MASK (0x7 << 17) +#define CC3_SR_BIT17_19_TBD_SHIFT (17) +#define CC3_SR_CHIP_TRIGGER_1_MASK (1 << 20) +#define CC3_SR_CHIP_TRIGGER_1_SHIFT (20) +#define CC3_SR_CHIP_TRIGGER_2_MASK (1 << 21) +#define CC3_SR_CHIP_TRIGGER_2_SHIFT (21) +#define CC3_SR_CHIP_TRIGGER_3_MASK (1 << 22) +#define CC3_SR_CHIP_TRIGGER_3_SHIFT (22) +#define CC3_SR_CHIP_TRIGGER_4_MASK (1 << 23) +#define CC3_SR_CHIP_TRIGGER_4_SHIFT (23) +#define CC3_SR_ALLOW_SBC_FUNC_PIC_MASK (1 << 24) +#define CC3_SR_ALLOW_SBC_FUNC_PIC_SHIFT (24) +#define CC3_SR_BIT25_26_TBD_MASK (0x3 << 25) +#define CC3_SR_BIT25_26_TBD_SHIFT (25) +#define CC3_SR_ALLOW_SBC_STBY_MASK (1 << 27) +#define CC3_SR_ALLOW_SBC_STBY_SHIFT (27) +#define CC3_SR_GPIO_MUX_MASK (0xF << 28) +#define CC3_SR_GPIO_MUX_SHIFT (28) + +/* Applies to 4335/4350/4345 */ +#define CC4_SR_INIT_ADDR_MASK (0x3FF0000) +#define CC4_4350_SR_ASM_ADDR (0x30) +#define CC4_4350_C0_SR_ASM_ADDR (0x0) +#define CC4_4335_SR_ASM_ADDR (0x48) +#define CC4_4345_SR_ASM_ADDR (0x48) +#define CC4_SR_INIT_ADDR_SHIFT (16) + +#define CC4_4350_EN_SR_CLK_ALP_MASK (1 << 30) +#define CC4_4350_EN_SR_CLK_ALP_SHIFT (30) +#define CC4_4350_EN_SR_CLK_HT_MASK (1 << 31) +#define CC4_4350_EN_SR_CLK_HT_SHIFT (31) + +#define VREG4_4350_MEMLPDO_PU_MASK (1 << 31) +#define VREG4_4350_MEMLPDO_PU_SHIFT 31 + +#define VREG6_4350_SR_EXT_CLKDIR_MASK (1 << 20) +#define VREG6_4350_SR_EXT_CLKDIR_SHIFT 20 +#define VREG6_4350_SR_EXT_CLKDIV_MASK (0x3 << 21) +#define VREG6_4350_SR_EXT_CLKDIV_SHIFT 21 +#define VREG6_4350_SR_EXT_CLKEN_MASK (1 << 23) +#define VREG6_4350_SR_EXT_CLKEN_SHIFT 23 + +#define CC5_4350_PMU_EN_ASSERT_MASK (1 << 13) +#define CC5_4350_PMU_EN_ASSERT_SHIFT (13) + +#define CC6_4350_PCIE_CLKREQ_WAKEUP_MASK (1 << 4) +#define CC6_4350_PCIE_CLKREQ_WAKEUP_SHIFT (4) +#define CC6_4350_PMU_WAKEUP_ALPAVAIL_MASK (1 << 6) +#define CC6_4350_PMU_WAKEUP_ALPAVAIL_SHIFT (6) +#define CC6_4350_PMU_EN_EXT_PERST_MASK (1 << 17) +#define CC6_4350_PMU_EN_EXT_PERST_SHIFT (17) +#define CC6_4350_PMU_EN_WAKEUP_MASK (1 << 18) +#define CC6_4350_PMU_EN_WAKEUP_SHIFT (18) + +#define CC7_4350_PMU_EN_ASSERT_L2_MASK (1 << 26) +#define CC7_4350_PMU_EN_ASSERT_L2_SHIFT (26) +#define CC7_4350_PMU_EN_MDIO_MASK (1 << 27) +#define CC7_4350_PMU_EN_MDIO_SHIFT (27) + +#define CC6_4345_PMU_EN_PERST_DEASSERT_MASK (1 << 13) +#define CC6_4345_PMU_EN_PERST_DEASSERT_SHIF (13) +#define CC6_4345_PMU_EN_L2_DEASSERT_MASK (1 << 14) +#define CC6_4345_PMU_EN_L2_DEASSERT_SHIF (14) +#define CC6_4345_PMU_EN_ASSERT_L2_MASK (1 << 15) +#define CC6_4345_PMU_EN_ASSERT_L2_SHIFT (15) +#define CC6_4345_PMU_EN_MDIO_MASK (1 << 24) +#define CC6_4345_PMU_EN_MDIO_SHIFT (24) + +/* 4347 GCI function sel values */ +#define CC4347_FNSEL_HWDEF (0) +#define CC4347_FNSEL_SAMEASPIN (1) +#define CC4347_FNSEL_GPIO0 (2) +#define CC4347_FNSEL_FUART (3) +#define CC4347_FNSEL_GCI0 (4) +#define CC4347_FNSEL_GCI1 (5) +#define CC4347_FNSEL_DBG_UART (6) +#define CC4347_FNSEL_SPI (7) +#define CC4347_FNSEL_SPROM (8) +#define CC4347_FNSEL_MISC0 (9) +#define CC4347_FNSEL_MISC1 (10) +#define CC4347_FNSEL_MISC2 (11) +#define CC4347_FNSEL_IND (12) +#define CC4347_FNSEL_PDN (13) +#define CC4347_FNSEL_PUP (14) +#define CC4347_FNSEL_TRISTATE (15) + +/* 4347 GPIO */ +#define CC4347_PIN_GPIO_02 (2) +#define CC4347_PIN_GPIO_03 (3) +#define CC4347_PIN_GPIO_04 (4) +#define CC4347_PIN_GPIO_05 (5) +#define CC4347_PIN_GPIO_06 (6) +#define CC4347_PIN_GPIO_07 (7) +#define CC4347_PIN_GPIO_08 (8) +#define CC4347_PIN_GPIO_09 (9) +#define CC4347_PIN_GPIO_10 (10) +#define CC4347_PIN_GPIO_11 (11) +#define CC4347_PIN_GPIO_12 (12) +#define CC4347_PIN_GPIO_13 (13) +/* GCI chipcontrol register indices */ +#define CC_GCI_CHIPCTRL_00 (0) +#define CC_GCI_CHIPCTRL_01 (1) +#define CC_GCI_CHIPCTRL_02 (2) +#define CC_GCI_CHIPCTRL_03 (3) +#define CC_GCI_CHIPCTRL_04 (4) +#define CC_GCI_CHIPCTRL_05 (5) +#define CC_GCI_CHIPCTRL_06 (6) +#define CC_GCI_CHIPCTRL_07 (7) +#define CC_GCI_CHIPCTRL_08 (8) +#define CC_GCI_CHIPCTRL_09 (9) +#define CC_GCI_CHIPCTRL_10 (10) +#define CC_GCI_CHIPCTRL_10 (10) +#define CC_GCI_CHIPCTRL_11 (11) +#define CC_GCI_XTAL_BUFSTRG_NFC (0xff << 12) + +#define CC_GCI_04_SDIO_DRVSTR_SHIFT 15 +#define CC_GCI_04_SDIO_DRVSTR_MASK (0x0f << CC_GCI_04_SDIO_DRVSTR_SHIFT) /* 0x00078000 */ +#define CC_GCI_04_SDIO_DRVSTR_OVERRIDE_BIT (1 << 18) +#define CC_GCI_04_SDIO_DRVSTR_DEFAULT_MA 14 +#define CC_GCI_04_SDIO_DRVSTR_MIN_MA 2 +#define CC_GCI_04_SDIO_DRVSTR_MAX_MA 16 + +#define CC_GCI_06_JTAG_SEL_SHIFT 4 +#define CC_GCI_06_JTAG_SEL_MASK (1 << 4) + +#define CC_GCI_NUMCHIPCTRLREGS(cap1) ((cap1 & 0xF00) >> 8) + +#define CC_GCI_03_LPFLAGS_SFLASH_MASK (0xFFFFFF << 8) +#define CC_GCI_03_LPFLAGS_SFLASH_VAL (0xCCCCCC << 8) +#define GPIO_CTRL_REG_DISABLE_INTERRUPT (3 << 9) +#define GPIO_CTRL_REG_COUNT 40 + +/* GCI chipstatus register indices */ +#define GCI_CHIPSTATUS_00 (0) +#define GCI_CHIPSTATUS_01 (1) +#define GCI_CHIPSTATUS_02 (2) +#define GCI_CHIPSTATUS_03 (3) +#define GCI_CHIPSTATUS_04 (4) +#define GCI_CHIPSTATUS_05 (5) +#define GCI_CHIPSTATUS_06 (6) +#define GCI_CHIPSTATUS_07 (7) +#define GCI_CHIPSTATUS_08 (8) +#define GCI_CHIPSTATUS_09 (9) +#define GCI_CHIPSTATUS_10 (10) +#define GCI_CHIPSTATUS_11 (11) +#define GCI_CHIPSTATUS_12 (12) +#define GCI_CHIPSTATUS_13 (13) + +/* 43021 GCI chipstatus registers */ +#define GCI43012_CHIPSTATUS_07_BBPLL_LOCK_MASK (1 << 3) + +/* 4345 PMU resources */ +#define RES4345_LPLDO_PU 0 +#define RES4345_PMU_BG_PU 1 +#define RES4345_PMU_SLEEP 2 +#define RES4345_HSICLDO_PU 3 +#define RES4345_CBUCK_LPOM_PU 4 +#define RES4345_CBUCK_PFM_PU 5 +#define RES4345_COLD_START_WAIT 6 +#define RES4345_RSVD_7 7 +#define RES4345_LNLDO_PU 8 +#define RES4345_XTALLDO_PU 9 +#define RES4345_LDO3P3_PU 10 +#define RES4345_OTP_PU 11 +#define RES4345_XTAL_PU 12 +#define RES4345_SR_CLK_START 13 +#define RES4345_LQ_AVAIL 14 +#define RES4345_LQ_START 15 +#define RES4345_PERST_OVR 16 +#define RES4345_WL_CORE_RDY 17 +#define RES4345_ILP_REQ 18 +#define RES4345_ALP_AVAIL 19 +#define RES4345_MINI_PMU 20 +#define RES4345_RADIO_PU 21 +#define RES4345_SR_CLK_STABLE 22 +#define RES4345_SR_SAVE_RESTORE 23 +#define RES4345_SR_PHY_PWRSW 24 +#define RES4345_SR_VDDM_PWRSW 25 +#define RES4345_SR_SUBCORE_PWRSW 26 +#define RES4345_SR_SLEEP 27 +#define RES4345_HT_START 28 +#define RES4345_HT_AVAIL 29 +#define RES4345_MACPHY_CLK_AVAIL 30 + +/* 43012 pins + * note: only the values set as default/used are added here. + */ +#define CC43012_PIN_GPIO_00 (0) +#define CC43012_PIN_GPIO_01 (1) +#define CC43012_PIN_GPIO_02 (2) +#define CC43012_PIN_GPIO_03 (3) +#define CC43012_PIN_GPIO_04 (4) +#define CC43012_PIN_GPIO_05 (5) +#define CC43012_PIN_GPIO_06 (6) +#define CC43012_PIN_GPIO_07 (7) +#define CC43012_PIN_GPIO_08 (8) +#define CC43012_PIN_GPIO_09 (9) +#define CC43012_PIN_GPIO_10 (10) +#define CC43012_PIN_GPIO_11 (11) +#define CC43012_PIN_GPIO_12 (12) +#define CC43012_PIN_GPIO_13 (13) +#define CC43012_PIN_GPIO_14 (14) +#define CC43012_PIN_GPIO_15 (15) + +/* 43012 GCI function sel values */ +#define CC43012_FNSEL_HWDEF (0) +#define CC43012_FNSEL_SAMEASPIN (1) +#define CC43012_FNSEL_GPIO0 (2) +#define CC43012_FNSEL_GPIO1 (3) +#define CC43012_FNSEL_GCI0 (4) +#define CC43012_FNSEL_GCI1 (5) +#define CC43012_FNSEL_DBG_UART (6) +#define CC43012_FNSEL_I2C (7) +#define CC43012_FNSEL_BT_SFLASH (8) +#define CC43012_FNSEL_MISC0 (9) +#define CC43012_FNSEL_MISC1 (10) +#define CC43012_FNSEL_MISC2 (11) +#define CC43012_FNSEL_IND (12) +#define CC43012_FNSEL_PDN (13) +#define CC43012_FNSEL_PUP (14) +#define CC43012_FNSEL_TRI (15) + +/* 4335 pins +* note: only the values set as default/used are added here. +*/ +#define CC4335_PIN_GPIO_00 (0) +#define CC4335_PIN_GPIO_01 (1) +#define CC4335_PIN_GPIO_02 (2) +#define CC4335_PIN_GPIO_03 (3) +#define CC4335_PIN_GPIO_04 (4) +#define CC4335_PIN_GPIO_05 (5) +#define CC4335_PIN_GPIO_06 (6) +#define CC4335_PIN_GPIO_07 (7) +#define CC4335_PIN_GPIO_08 (8) +#define CC4335_PIN_GPIO_09 (9) +#define CC4335_PIN_GPIO_10 (10) +#define CC4335_PIN_GPIO_11 (11) +#define CC4335_PIN_GPIO_12 (12) +#define CC4335_PIN_GPIO_13 (13) +#define CC4335_PIN_GPIO_14 (14) +#define CC4335_PIN_GPIO_15 (15) +#define CC4335_PIN_SDIO_CLK (16) +#define CC4335_PIN_SDIO_CMD (17) +#define CC4335_PIN_SDIO_DATA0 (18) +#define CC4335_PIN_SDIO_DATA1 (19) +#define CC4335_PIN_SDIO_DATA2 (20) +#define CC4335_PIN_SDIO_DATA3 (21) +#define CC4335_PIN_RF_SW_CTRL_6 (22) +#define CC4335_PIN_RF_SW_CTRL_7 (23) +#define CC4335_PIN_RF_SW_CTRL_8 (24) +#define CC4335_PIN_RF_SW_CTRL_9 (25) +/* Last GPIO Pad */ +#define CC4335_PIN_GPIO_LAST (31) + +/* 4335 GCI function sel values +*/ +#define CC4335_FNSEL_HWDEF (0) +#define CC4335_FNSEL_SAMEASPIN (1) +#define CC4335_FNSEL_GPIO0 (2) +#define CC4335_FNSEL_GPIO1 (3) +#define CC4335_FNSEL_GCI0 (4) +#define CC4335_FNSEL_GCI1 (5) +#define CC4335_FNSEL_UART (6) +#define CC4335_FNSEL_SFLASH (7) +#define CC4335_FNSEL_SPROM (8) +#define CC4335_FNSEL_MISC0 (9) +#define CC4335_FNSEL_MISC1 (10) +#define CC4335_FNSEL_MISC2 (11) +#define CC4335_FNSEL_IND (12) +#define CC4335_FNSEL_PDN (13) +#define CC4335_FNSEL_PUP (14) +#define CC4335_FNSEL_TRI (15) + +/* GCI Core Control Reg */ +#define GCI_CORECTRL_SR_MASK (1 << 0) /**< SECI block Reset */ +#define GCI_CORECTRL_RSL_MASK (1 << 1) /**< ResetSECILogic */ +#define GCI_CORECTRL_ES_MASK (1 << 2) /**< EnableSECI */ +#define GCI_CORECTRL_FSL_MASK (1 << 3) /**< Force SECI Out Low */ +#define GCI_CORECTRL_SOM_MASK (7 << 4) /**< SECI Op Mode */ +#define GCI_CORECTRL_US_MASK (1 << 7) /**< Update SECI */ +#define GCI_CORECTRL_BOS_MASK (1 << 8) /**< Break On Sleep */ +#define GCI_CORECTRL_FORCEREGCLK_MASK (1 << 18) /* ForceRegClk */ + +/* 4345 pins +* note: only the values set as default/used are added here. +*/ +#define CC4345_PIN_GPIO_00 (0) +#define CC4345_PIN_GPIO_01 (1) +#define CC4345_PIN_GPIO_02 (2) +#define CC4345_PIN_GPIO_03 (3) +#define CC4345_PIN_GPIO_04 (4) +#define CC4345_PIN_GPIO_05 (5) +#define CC4345_PIN_GPIO_06 (6) +#define CC4345_PIN_GPIO_07 (7) +#define CC4345_PIN_GPIO_08 (8) +#define CC4345_PIN_GPIO_09 (9) +#define CC4345_PIN_GPIO_10 (10) +#define CC4345_PIN_GPIO_11 (11) +#define CC4345_PIN_GPIO_12 (12) +#define CC4345_PIN_GPIO_13 (13) +#define CC4345_PIN_GPIO_14 (14) +#define CC4345_PIN_GPIO_15 (15) +#define CC4345_PIN_GPIO_16 (16) +#define CC4345_PIN_SDIO_CLK (17) +#define CC4345_PIN_SDIO_CMD (18) +#define CC4345_PIN_SDIO_DATA0 (19) +#define CC4345_PIN_SDIO_DATA1 (20) +#define CC4345_PIN_SDIO_DATA2 (21) +#define CC4345_PIN_SDIO_DATA3 (22) +#define CC4345_PIN_RF_SW_CTRL_0 (23) +#define CC4345_PIN_RF_SW_CTRL_1 (24) +#define CC4345_PIN_RF_SW_CTRL_2 (25) +#define CC4345_PIN_RF_SW_CTRL_3 (26) +#define CC4345_PIN_RF_SW_CTRL_4 (27) +#define CC4345_PIN_RF_SW_CTRL_5 (28) +#define CC4345_PIN_RF_SW_CTRL_6 (29) +#define CC4345_PIN_RF_SW_CTRL_7 (30) +#define CC4345_PIN_RF_SW_CTRL_8 (31) +#define CC4345_PIN_RF_SW_CTRL_9 (32) + +/* 4345 GCI function sel values +*/ +#define CC4345_FNSEL_HWDEF (0) +#define CC4345_FNSEL_SAMEASPIN (1) +#define CC4345_FNSEL_GPIO0 (2) +#define CC4345_FNSEL_GPIO1 (3) +#define CC4345_FNSEL_GCI0 (4) +#define CC4345_FNSEL_GCI1 (5) +#define CC4345_FNSEL_UART (6) +#define CC4345_FNSEL_SFLASH (7) +#define CC4345_FNSEL_SPROM (8) +#define CC4345_FNSEL_MISC0 (9) +#define CC4345_FNSEL_MISC1 (10) +#define CC4345_FNSEL_MISC2 (11) +#define CC4345_FNSEL_IND (12) +#define CC4345_FNSEL_PDN (13) +#define CC4345_FNSEL_PUP (14) +#define CC4345_FNSEL_TRI (15) + +#define MUXENAB4345_UART_MASK (0x0000000f) +#define MUXENAB4345_UART_SHIFT 0 +#define MUXENAB4345_HOSTWAKE_MASK (0x000000f0) +#define MUXENAB4345_HOSTWAKE_SHIFT 4 + +/* 4349 Group (4349, 4355, 4359) GCI AVS function sel values */ +#define CC4349_GRP_GCI_AVS_CTRL_MASK (0xffe00000) +#define CC4349_GRP_GCI_AVS_CTRL_SHIFT (21) +#define CC4349_GRP_GCI_AVS_CTRL_ENAB (1 << 5) + +/* 4345 GCI AVS function sel values */ +#define CC4345_GCI_AVS_CTRL_MASK (0xfc) +#define CC4345_GCI_AVS_CTRL_SHIFT (2) +#define CC4345_GCI_AVS_CTRL_ENAB (1 << 5) + +/* 43430 Pin */ +#define CC43430_PIN_GPIO_00 (0) +#define CC43430_PIN_GPIO_01 (1) +#define CC43430_PIN_GPIO_02 (2) +#define CC43430_PIN_GPIO_07 (7) +#define CC43430_PIN_GPIO_08 (8) +#define CC43430_PIN_GPIO_09 (9) +#define CC43430_PIN_GPIO_10 (10) + +#define CC43430_FNSEL_SDIO_INT (2) +#define CC43430_FNSEL_6_FAST_UART (6) +#define CC43430_FNSEL_10_FAST_UART (10) + +#define MUXENAB43430_UART_MASK (0x0000000f) +#define MUXENAB43430_UART_SHIFT 0 +#define MUXENAB43430_HOSTWAKE_MASK (0x000000f0) /* configure GPIO for SDIO host_wake */ +#define MUXENAB43430_HOSTWAKE_SHIFT 4 + +#define CC43430_FNSEL_SAMEASPIN (1) +#define CC43430_RFSWCTRL_EN_MASK (0x7f8) +#define CC43430_RFSWCTRL_EN_SHIFT (3) + +/* GCI GPIO for function sel GCI-0/GCI-1 */ +#define CC_GCI_GPIO_0 (0) +#define CC_GCI_GPIO_1 (1) +#define CC_GCI_GPIO_2 (2) +#define CC_GCI_GPIO_3 (3) +#define CC_GCI_GPIO_4 (4) +#define CC_GCI_GPIO_5 (5) +#define CC_GCI_GPIO_6 (6) +#define CC_GCI_GPIO_7 (7) +#define CC_GCI_GPIO_8 (8) +#define CC_GCI_GPIO_9 (9) +#define CC_GCI_GPIO_10 (10) +#define CC_GCI_GPIO_11 (11) +#define CC_GCI_GPIO_12 (12) +#define CC_GCI_GPIO_13 (13) +#define CC_GCI_GPIO_14 (14) +#define CC_GCI_GPIO_15 (15) + +/* indicates Invalid GPIO, e.g. when PAD GPIO doesn't map to GCI GPIO */ +#define CC_GCI_GPIO_INVALID 0xFF + +/* find the 4 bit mask given the bit position */ +#define GCIMASK(pos) (((uint32)0xF) << pos) +/* get the value which can be used to directly OR with chipcontrol reg */ +#define GCIPOSVAL(val, pos) ((((uint32)val) << pos) & GCIMASK(pos)) +/* Extract nibble from a given position */ +#define GCIGETNBL(val, pos) ((val >> pos) & 0xF) + +/* find the 8 bit mask given the bit position */ +#define GCIMASK_8B(pos) (((uint32)0xFF) << pos) +/* get the value which can be used to directly OR with chipcontrol reg */ +#define GCIPOSVAL_8B(val, pos) ((((uint32)val) << pos) & GCIMASK_8B(pos)) +/* Extract nibble from a given position */ +#define GCIGETNBL_8B(val, pos) ((val >> pos) & 0xFF) + +/* find the 4 bit mask given the bit position */ +#define GCIMASK_4B(pos) (((uint32)0xF) << pos) +/* get the value which can be used to directly OR with chipcontrol reg */ +#define GCIPOSVAL_4B(val, pos) ((((uint32)val) << pos) & GCIMASK_4B(pos)) +/* Extract nibble from a given position */ +#define GCIGETNBL_4B(val, pos) ((val >> pos) & 0xF) + +/* 4335 GCI Intstatus(Mask)/WakeMask Register bits. */ +#define GCI_INTSTATUS_RBI (1 << 0) /**< Rx Break Interrupt */ +#define GCI_INTSTATUS_UB (1 << 1) /**< UART Break Interrupt */ +#define GCI_INTSTATUS_SPE (1 << 2) /**< SECI Parity Error Interrupt */ +#define GCI_INTSTATUS_SFE (1 << 3) /**< SECI Framing Error Interrupt */ +#define GCI_INTSTATUS_SRITI (1 << 9) /**< SECI Rx Idle Timer Interrupt */ +#define GCI_INTSTATUS_STFF (1 << 10) /**< SECI Tx FIFO Full Interrupt */ +#define GCI_INTSTATUS_STFAE (1 << 11) /**< SECI Tx FIFO Almost Empty Intr */ +#define GCI_INTSTATUS_SRFAF (1 << 12) /**< SECI Rx FIFO Almost Full */ +#define GCI_INTSTATUS_SRFNE (1 << 14) /**< SECI Rx FIFO Not Empty */ +#define GCI_INTSTATUS_SRFOF (1 << 15) /**< SECI Rx FIFO Not Empty Timeout */ +#define GCI_INTSTATUS_EVENT (1 << 21) /* GCI Event Interrupt */ +#define GCI_INTSTATUS_LEVELWAKE (1 << 22) /* GCI Wake Level Interrupt */ +#define GCI_INTSTATUS_EVENTWAKE (1 << 23) /* GCI Wake Event Interrupt */ +#define GCI_INTSTATUS_GPIOINT (1 << 25) /**< GCIGpioInt */ +#define GCI_INTSTATUS_GPIOWAKE (1 << 26) /**< GCIGpioWake */ +#define GCI_INTSTATUS_LHLWLWAKE (1 << 30) /* LHL WL wake */ + +/* 4335 GCI IntMask Register bits. */ +#define GCI_INTMASK_RBI (1 << 0) /**< Rx Break Interrupt */ +#define GCI_INTMASK_UB (1 << 1) /**< UART Break Interrupt */ +#define GCI_INTMASK_SPE (1 << 2) /**< SECI Parity Error Interrupt */ +#define GCI_INTMASK_SFE (1 << 3) /**< SECI Framing Error Interrupt */ +#define GCI_INTMASK_SRITI (1 << 9) /**< SECI Rx Idle Timer Interrupt */ +#define GCI_INTMASK_STFF (1 << 10) /**< SECI Tx FIFO Full Interrupt */ +#define GCI_INTMASK_STFAE (1 << 11) /**< SECI Tx FIFO Almost Empty Intr */ +#define GCI_INTMASK_SRFAF (1 << 12) /**< SECI Rx FIFO Almost Full */ +#define GCI_INTMASK_SRFNE (1 << 14) /**< SECI Rx FIFO Not Empty */ +#define GCI_INTMASK_SRFOF (1 << 15) /**< SECI Rx FIFO Not Empty Timeout */ +#define GCI_INTMASK_EVENT (1 << 21) /* GCI Event Interrupt */ +#define GCI_INTMASK_LEVELWAKE (1 << 22) /* GCI Wake Level Interrupt */ +#define GCI_INTMASK_EVENTWAKE (1 << 23) /* GCI Wake Event Interrupt */ +#define GCI_INTMASK_GPIOINT (1 << 25) /**< GCIGpioInt */ +#define GCI_INTMASK_GPIOWAKE (1 << 26) /**< GCIGpioWake */ +#define GCI_INTMASK_LHLWLWAKE (1 << 30) /* LHL WL wake */ + +/* 4335 GCI WakeMask Register bits. */ +#define GCI_WAKEMASK_RBI (1 << 0) /**< Rx Break Interrupt */ +#define GCI_WAKEMASK_UB (1 << 1) /**< UART Break Interrupt */ +#define GCI_WAKEMASK_SPE (1 << 2) /**< SECI Parity Error Interrupt */ +#define GCI_WAKEMASK_SFE (1 << 3) /**< SECI Framing Error Interrupt */ +#define GCI_WAKE_SRITI (1 << 9) /**< SECI Rx Idle Timer Interrupt */ +#define GCI_WAKEMASK_STFF (1 << 10) /**< SECI Tx FIFO Full Interrupt */ +#define GCI_WAKEMASK_STFAE (1 << 11) /**< SECI Tx FIFO Almost Empty Intr */ +#define GCI_WAKEMASK_SRFAF (1 << 12) /**< SECI Rx FIFO Almost Full */ +#define GCI_WAKEMASK_SRFNE (1 << 14) /**< SECI Rx FIFO Not Empty */ +#define GCI_WAKEMASK_SRFOF (1 << 15) /**< SECI Rx FIFO Not Empty Timeout */ +#define GCI_WAKEMASK_EVENT (1 << 21) /* GCI Event Interrupt */ +#define GCI_WAKEMASK_LEVELWAKE (1 << 22) /* GCI Wake Level Interrupt */ +#define GCI_WAKEMASK_EVENTWAKE (1 << 23) /* GCI Wake Event Interrupt */ +#define GCI_WAKEMASK_GPIOINT (1 << 25) /**< GCIGpioInt */ +#define GCI_WAKEMASK_GPIOWAKE (1 << 26) /**< GCIGpioWake */ +#define GCI_WAKEMASK_LHLWLWAKE (1 << 30) /* LHL WL wake */ + +#define GCI_WAKE_ON_GCI_GPIO1 1 +#define GCI_WAKE_ON_GCI_GPIO2 2 +#define GCI_WAKE_ON_GCI_GPIO3 3 +#define GCI_WAKE_ON_GCI_GPIO4 4 +#define GCI_WAKE_ON_GCI_GPIO5 5 +#define GCI_WAKE_ON_GCI_GPIO6 6 +#define GCI_WAKE_ON_GCI_GPIO7 7 +#define GCI_WAKE_ON_GCI_GPIO8 8 +#define GCI_WAKE_ON_GCI_SECI_IN 9 + +#define PMU_EXT_WAKE_MASK_0_SDIO (1 << 2) + +/* =========== LHL regs =========== */ +#define LHL_PWRSEQCTL_SLEEP_EN (1 << 0) +#define LHL_PWRSEQCTL_PMU_SLEEP_MODE (1 << 1) +#define LHL_PWRSEQCTL_PMU_FINAL_PMU_SLEEP_EN (1 << 2) +#define LHL_PWRSEQCTL_PMU_TOP_ISO_EN (1 << 3) +#define LHL_PWRSEQCTL_PMU_TOP_SLB_EN (1 << 4) +#define LHL_PWRSEQCTL_PMU_TOP_PWRSW_EN (1 << 5) +#define LHL_PWRSEQCTL_PMU_CLDO_PD (1 << 6) +#define LHL_PWRSEQCTL_PMU_LPLDO_PD (1 << 7) +#define LHL_PWRSEQCTL_PMU_RSRC6_EN (1 << 8) + +#define PMU_SLEEP_MODE_0 (LHL_PWRSEQCTL_SLEEP_EN |\ + LHL_PWRSEQCTL_PMU_FINAL_PMU_SLEEP_EN) + +#define PMU_SLEEP_MODE_1 (LHL_PWRSEQCTL_SLEEP_EN |\ + LHL_PWRSEQCTL_PMU_SLEEP_MODE |\ + LHL_PWRSEQCTL_PMU_FINAL_PMU_SLEEP_EN |\ + LHL_PWRSEQCTL_PMU_TOP_ISO_EN |\ + LHL_PWRSEQCTL_PMU_TOP_SLB_EN |\ + LHL_PWRSEQCTL_PMU_TOP_PWRSW_EN |\ + LHL_PWRSEQCTL_PMU_CLDO_PD |\ + LHL_PWRSEQCTL_PMU_RSRC6_EN) + +#define PMU_SLEEP_MODE_2 (LHL_PWRSEQCTL_SLEEP_EN |\ + LHL_PWRSEQCTL_PMU_SLEEP_MODE |\ + LHL_PWRSEQCTL_PMU_FINAL_PMU_SLEEP_EN |\ + LHL_PWRSEQCTL_PMU_TOP_ISO_EN |\ + LHL_PWRSEQCTL_PMU_TOP_SLB_EN |\ + LHL_PWRSEQCTL_PMU_TOP_PWRSW_EN |\ + LHL_PWRSEQCTL_PMU_CLDO_PD |\ + LHL_PWRSEQCTL_PMU_LPLDO_PD |\ + LHL_PWRSEQCTL_PMU_RSRC6_EN) + +#define LHL_PWRSEQ_CTL (0x000000ff) + +/* LHL Top Level Power Up Control Register (lhl_top_pwrup_ctl_adr, Offset 0xE78) +* Top Level Counter values for isolation, retention, Power Switch control +*/ +#define LHL_PWRUP_ISOLATION_CNT (0x6 << 8) +#define LHL_PWRUP_RETENTION_CNT (0x5 << 16) +#define LHL_PWRUP_PWRSW_CNT (0x7 << 24) +/* Mask is taken only for isolation 8:13 , Retention 16:21 , +* Power Switch control 24:29 +*/ +#define LHL_PWRUP_CTL_MASK (0x3F3F3F00) +#define LHL_PWRUP_CTL (LHL_PWRUP_ISOLATION_CNT |\ + LHL_PWRUP_RETENTION_CNT |\ + LHL_PWRUP_PWRSW_CNT) + +#define LHL_PWRUP_ISOLATION_CNT_4347 (0x7 << 8) +#define LHL_PWRUP_RETENTION_CNT_4347 (0x5 << 16) +#define LHL_PWRUP_PWRSW_CNT_4347 (0x7 << 24) + +#define LHL_PWRUP_CTL_4347 (LHL_PWRUP_ISOLATION_CNT_4347 |\ + LHL_PWRUP_RETENTION_CNT_4347 |\ + LHL_PWRUP_PWRSW_CNT_4347) + +#define LHL_PWRUP2_CLDO_DN_CNT (0x0) +#define LHL_PWRUP2_LPLDO_DN_CNT (0x0 << 8) +#define LHL_PWRUP2_RSRC6_DN_CN (0x4 << 16) +#define LHL_PWRUP2_RSRC7_DN_CN (0x0 << 24) +#define LHL_PWRUP2_CTL_MASK (0x3F3F3F3F) +#define LHL_PWRUP2_CTL (LHL_PWRUP2_CLDO_DN_CNT |\ + LHL_PWRUP2_LPLDO_DN_CNT |\ + LHL_PWRUP2_RSRC6_DN_CN |\ + LHL_PWRUP2_RSRC7_DN_CN) + +/* LHL Top Level Power Down Control Register (lhl_top_pwrdn_ctl_adr, Offset 0xE74) */ +#define LHL_PWRDN_SLEEP_CNT (0x4) +#define LHL_PWRDN_CTL_MASK (0x3F) + +/* LHL Top Level Power Down Control 2 Register (lhl_top_pwrdn2_ctl_adr, Offset 0xE80) */ +#define LHL_PWRDN2_CLDO_DN_CNT (0x4) +#define LHL_PWRDN2_LPLDO_DN_CNT (0x4 << 8) +#define LHL_PWRDN2_RSRC6_DN_CN (0x3 << 16) +#define LHL_PWRDN2_RSRC7_DN_CN (0x0 << 24) +#define LHL_PWRDN2_CTL (LHL_PWRDN2_CLDO_DN_CNT |\ + LHL_PWRDN2_LPLDO_DN_CNT |\ + LHL_PWRDN2_RSRC6_DN_CN |\ + LHL_PWRDN2_RSRC7_DN_CN) +#define LHL_PWRDN2_CTL_MASK (0x3F3F3F3F) + +#define LHL_FAST_WRITE_EN (1 << 14) + +/* WL ARM Timer0 Interrupt Mask (lhl_wl_armtim0_intrp_adr) */ +#define LHL_WL_ARMTIM0_INTRP_EN 0x00000001 +#define LHL_WL_ARMTIM0_INTRP_EDGE_TRIGGER 0x00000002 + +/* WL MAC Timer0 Interrupt Mask (lhl_wl_mactim0_intrp_adr) */ +#define LHL_WL_MACTIM0_INTRP_EN 0x00000001 +#define LHL_WL_MACTIM0_INTRP_EDGE_TRIGGER 0x00000002 + +/* LHL Wakeup Status (lhl_wkup_status_adr) */ +#define LHL_WKUP_STATUS_WR_PENDING_ARMTIM0 0x00100000 + +/* WL ARM Timer0 Interrupt Status (lhl_wl_armtim0_st_adr) */ +#define LHL_WL_ARMTIM0_ST_WL_ARMTIM_INT_ST 0x00000001 + +#define LHL_PS_MODE_0 0 +#define LHL_PS_MODE_1 1 + +/* GCI EventIntMask Register SW bits */ +#define GCI_MAILBOXDATA_TOWLAN (1 << 0) +#define GCI_MAILBOXDATA_TOBT (1 << 1) +#define GCI_MAILBOXDATA_TONFC (1 << 2) +#define GCI_MAILBOXDATA_TOGPS (1 << 3) +#define GCI_MAILBOXDATA_TOLTE (1 << 4) +#define GCI_MAILBOXACK_TOWLAN (1 << 8) +#define GCI_MAILBOXACK_TOBT (1 << 9) +#define GCI_MAILBOXACK_TONFC (1 << 10) +#define GCI_MAILBOXACK_TOGPS (1 << 11) +#define GCI_MAILBOXACK_TOLTE (1 << 12) +#define GCI_WAKE_TOWLAN (1 << 16) +#define GCI_WAKE_TOBT (1 << 17) +#define GCI_WAKE_TONFC (1 << 18) +#define GCI_WAKE_TOGPS (1 << 19) +#define GCI_WAKE_TOLTE (1 << 20) +#define GCI_SWREADY (1 << 24) + +/* 4349 Group (4349, 4355, 4359) GCI SECI_OUT TX Status Regiser bits */ +#define GCI_SECIOUT_TXSTATUS_TXHALT (1 << 0) +#define GCI_SECIOUT_TXSTATUS_TI (1 << 16) + +/* 4335 MUX options. each nibble belongs to a setting. Non-zero value specifies a logic +* for now only UART for bootloader. +*/ +#define MUXENAB4335_UART_MASK (0x0000000f) + +#define MUXENAB4335_UART_SHIFT 0 +#define MUXENAB4335_HOSTWAKE_MASK (0x000000f0) /**< configure GPIO for SDIO host_wake */ +#define MUXENAB4335_HOSTWAKE_SHIFT 4 +#define MUXENAB4335_GETIX(val, name) \ + ((((val) & MUXENAB4335_ ## name ## _MASK) >> MUXENAB4335_ ## name ## _SHIFT) - 1) + +/* 43012 MUX options */ +#define MUXENAB43012_HOSTWAKE_MASK (0x00000001) +#define MUXENAB43012_GETIX(val, name) (val - 1) + +/* +* Maximum delay for the PMU state transition in us. +* This is an upper bound intended for spinwaits etc. +*/ +#define PMU_MAX_TRANSITION_DLY 15000 + +/* PMU resource up transition time in ILP cycles */ +#define PMURES_UP_TRANSITION 2 + +/* 53573 PMU Resource */ +#define RES53573_REGULATOR_PU 0 +#define RES53573_XTALLDO_PU 1 +#define RES53573_XTAL_PU 2 +#define RES53573_MINI_PMU 3 +#define RES53573_RADIO_PU 4 +#define RES53573_ILP_REQ 5 +#define RES53573_ALP_AVAIL 6 +#define RES53573_CPUPLL_LDO_PU 7 +#define RES53573_CPU_PLL_PU 8 +#define RES53573_WLAN_BB_PLL_PU 9 +#define RES53573_MISCPLL_LDO_PU 10 +#define RES53573_MISCPLL_PU 11 +#define RES53573_AUDIOPLL_PU 12 +#define RES53573_PCIEPLL_LDO_PU 13 +#define RES53573_PCIEPLL_PU 14 +#define RES53573_DDRPLL_LDO_PU 15 +#define RES53573_DDRPLL_PU 16 +#define RES53573_HT_AVAIL 17 +#define RES53573_MACPHY_CLK_AVAIL 18 +#define RES53573_OTP_PU 19 +#define RES53573_RSVD20 20 + +/* 53573 Chip status registers */ +#define CST53573_LOCK_CPUPLL 0x00000001 +#define CST53573_LOCK_MISCPLL 0x00000002 +#define CST53573_LOCK_DDRPLL 0x00000004 +#define CST53573_LOCK_PCIEPLL 0x00000008 +#define CST53573_EPHY_ENERGY_DET 0x00001f00 +#define CST53573_RAW_ENERGY 0x0003e000 +#define CST53573_BBPLL_LOCKED_O 0x00040000 +#define CST53573_SERDES_PIPE_PLLLOCK 0x00080000 +#define CST53573_STRAP_PCIE_EP_MODE 0x00100000 +#define CST53573_EPHY_PLL_LOCK 0x00200000 +#define CST53573_AUDIO_PLL_LOCKED_O 0x00400000 +#define CST53573_PCIE_LINK_IN_L11 0x01000000 +#define CST53573_PCIE_LINK_IN_L12 0x02000000 +#define CST53573_DIN_PACKAGEOPTION 0xf0000000 + +/* 53573 Chip control registers macro definitions */ +#define PMU_53573_CHIPCTL1 1 +#define PMU_53573_CC1_HT_CLK_REQ_CTRL_MASK 0x00000010 +#define PMU_53573_CC1_HT_CLK_REQ_CTRL 0x00000010 + +#define PMU_53573_CHIPCTL3 3 +#define PMU_53573_CC3_ENABLE_CLOSED_LOOP_MASK 0x00000010 +#define PMU_53573_CC3_ENABLE_CLOSED_LOOP 0x00000000 +#define PMU_53573_CC3_ENABLE_BBPLL_PWRDOWN_MASK 0x00000002 +#define PMU_53573_CC3_ENABLE_BBPLL_PWRDOWN 0x00000002 + +#define CST53573_CHIPMODE_PCIE(cs) FALSE + +/* SECI Status (0x134) & Mask (0x138) bits - Rev 35 */ +#define SECI_STAT_BI (1 << 0) /* Break Interrupt */ +#define SECI_STAT_SPE (1 << 1) /* Parity Error */ +#define SECI_STAT_SFE (1 << 2) /* Parity Error */ +#define SECI_STAT_SDU (1 << 3) /* Data Updated */ +#define SECI_STAT_SADU (1 << 4) /* Auxiliary Data Updated */ +#define SECI_STAT_SAS (1 << 6) /* AUX State */ +#define SECI_STAT_SAS2 (1 << 7) /* AUX2 State */ +#define SECI_STAT_SRITI (1 << 8) /* Idle Timer Interrupt */ +#define SECI_STAT_STFF (1 << 9) /* Tx FIFO Full */ +#define SECI_STAT_STFAE (1 << 10) /* Tx FIFO Almost Empty */ +#define SECI_STAT_SRFE (1 << 11) /* Rx FIFO Empty */ +#define SECI_STAT_SRFAF (1 << 12) /* Rx FIFO Almost Full */ +#define SECI_STAT_SFCE (1 << 13) /* Flow Control Event */ + +/* SECI configuration */ +#define SECI_MODE_UART 0x0 +#define SECI_MODE_SECI 0x1 +#define SECI_MODE_LEGACY_3WIRE_BT 0x2 +#define SECI_MODE_LEGACY_3WIRE_WLAN 0x3 +#define SECI_MODE_HALF_SECI 0x4 + +#define SECI_RESET (1 << 0) +#define SECI_RESET_BAR_UART (1 << 1) +#define SECI_ENAB_SECI_ECI (1 << 2) +#define SECI_ENAB_SECIOUT_DIS (1 << 3) +#define SECI_MODE_MASK 0x7 +#define SECI_MODE_SHIFT 4 /* (bits 5, 6, 7) */ +#define SECI_UPD_SECI (1 << 7) + +#define SECI_AUX_TX_START (1 << 31) +#define SECI_SLIP_ESC_CHAR 0xDB +#define SECI_SIGNOFF_0 SECI_SLIP_ESC_CHAR +#define SECI_SIGNOFF_1 0 +#define SECI_REFRESH_REQ 0xDA + +/* seci clk_ctl_st bits */ +#define CLKCTL_STS_HT_AVAIL_REQ (1 << 4) +#define CLKCTL_STS_SECI_CLK_REQ (1 << 8) +#define CLKCTL_STS_SECI_CLK_AVAIL (1 << 24) + +#define SECI_UART_MSR_CTS_STATE (1 << 0) +#define SECI_UART_MSR_RTS_STATE (1 << 1) +#define SECI_UART_SECI_IN_STATE (1 << 2) +#define SECI_UART_SECI_IN2_STATE (1 << 3) + +/* GCI RX FIFO Control Register */ +#define GCI_RXF_LVL_MASK (0xFF << 0) +#define GCI_RXF_TIMEOUT_MASK (0xFF << 8) + +/* GCI UART Registers' Bit definitions */ +/* Seci Fifo Level Register */ +#define SECI_TXF_LVL_MASK (0x3F << 8) +#define TXF_AE_LVL_DEFAULT 0x4 +#define SECI_RXF_LVL_FC_MASK (0x3F << 16) + +/* SeciUARTFCR Bit definitions */ +#define SECI_UART_FCR_RFR (1 << 0) +#define SECI_UART_FCR_TFR (1 << 1) +#define SECI_UART_FCR_SR (1 << 2) +#define SECI_UART_FCR_THP (1 << 3) +#define SECI_UART_FCR_AB (1 << 4) +#define SECI_UART_FCR_ATOE (1 << 5) +#define SECI_UART_FCR_ARTSOE (1 << 6) +#define SECI_UART_FCR_ABV (1 << 7) +#define SECI_UART_FCR_ALM (1 << 8) + +/* SECI UART LCR register bits */ +#define SECI_UART_LCR_STOP_BITS (1 << 0) /* 0 - 1bit, 1 - 2bits */ +#define SECI_UART_LCR_PARITY_EN (1 << 1) +#define SECI_UART_LCR_PARITY (1 << 2) /* 0 - odd, 1 - even */ +#define SECI_UART_LCR_RX_EN (1 << 3) +#define SECI_UART_LCR_LBRK_CTRL (1 << 4) /* 1 => SECI_OUT held low */ +#define SECI_UART_LCR_TXO_EN (1 << 5) +#define SECI_UART_LCR_RTSO_EN (1 << 6) +#define SECI_UART_LCR_SLIPMODE_EN (1 << 7) +#define SECI_UART_LCR_RXCRC_CHK (1 << 8) +#define SECI_UART_LCR_TXCRC_INV (1 << 9) +#define SECI_UART_LCR_TXCRC_LSBF (1 << 10) +#define SECI_UART_LCR_TXCRC_EN (1 << 11) +#define SECI_UART_LCR_RXSYNC_EN (1 << 12) + +#define SECI_UART_MCR_TX_EN (1 << 0) +#define SECI_UART_MCR_PRTS (1 << 1) +#define SECI_UART_MCR_SWFLCTRL_EN (1 << 2) +#define SECI_UART_MCR_HIGHRATE_EN (1 << 3) +#define SECI_UART_MCR_LOOPBK_EN (1 << 4) +#define SECI_UART_MCR_AUTO_RTS (1 << 5) +#define SECI_UART_MCR_AUTO_TX_DIS (1 << 6) +#define SECI_UART_MCR_BAUD_ADJ_EN (1 << 7) +#define SECI_UART_MCR_XONOFF_RPT (1 << 9) + +/* SeciUARTLSR Bit Mask */ +#define SECI_UART_LSR_RXOVR_MASK (1 << 0) +#define SECI_UART_LSR_RFF_MASK (1 << 1) +#define SECI_UART_LSR_TFNE_MASK (1 << 2) +#define SECI_UART_LSR_TI_MASK (1 << 3) +#define SECI_UART_LSR_TPR_MASK (1 << 4) +#define SECI_UART_LSR_TXHALT_MASK (1 << 5) + +/* SeciUARTMSR Bit Mask */ +#define SECI_UART_MSR_CTSS_MASK (1 << 0) +#define SECI_UART_MSR_RTSS_MASK (1 << 1) +#define SECI_UART_MSR_SIS_MASK (1 << 2) +#define SECI_UART_MSR_SIS2_MASK (1 << 3) + +/* SeciUARTData Bits */ +#define SECI_UART_DATA_RF_NOT_EMPTY_BIT (1 << 12) +#define SECI_UART_DATA_RF_FULL_BIT (1 << 13) +#define SECI_UART_DATA_RF_OVRFLOW_BIT (1 << 14) +#define SECI_UART_DATA_FIFO_PTR_MASK 0xFF +#define SECI_UART_DATA_RF_RD_PTR_SHIFT 16 +#define SECI_UART_DATA_RF_WR_PTR_SHIFT 24 + +/* LTECX: ltecxmux */ +#define LTECX_EXTRACT_MUX(val, idx) (getbit4(&(val), (idx))) + +/* LTECX: ltecxmux MODE */ +#define LTECX_MUX_MODE_IDX 0 +#define LTECX_MUX_MODE_WCI2 0x0 +#define LTECX_MUX_MODE_GPIO 0x1 + +/* LTECX GPIO Information Index */ +#define LTECX_NVRAM_FSYNC_IDX 0 +#define LTECX_NVRAM_LTERX_IDX 1 +#define LTECX_NVRAM_LTETX_IDX 2 +#define LTECX_NVRAM_WLPRIO_IDX 3 + +/* LTECX WCI2 Information Index */ +#define LTECX_NVRAM_WCI2IN_IDX 0 +#define LTECX_NVRAM_WCI2OUT_IDX 1 + +/* LTECX: Macros to get GPIO/FNSEL/GCIGPIO */ +#define LTECX_EXTRACT_PADNUM(val, idx) (getbit8(&(val), (idx))) +#define LTECX_EXTRACT_FNSEL(val, idx) (getbit4(&(val), (idx))) +#define LTECX_EXTRACT_GCIGPIO(val, idx) (getbit4(&(val), (idx))) + +/* WLAN channel numbers - used from wifi.h */ + +/* WLAN BW */ +#define ECI_BW_20 0x0 +#define ECI_BW_25 0x1 +#define ECI_BW_30 0x2 +#define ECI_BW_35 0x3 +#define ECI_BW_40 0x4 +#define ECI_BW_45 0x5 +#define ECI_BW_50 0x6 +#define ECI_BW_ALL 0x7 + +/* WLAN - number of antenna */ +#define WLAN_NUM_ANT1 TXANT_0 +#define WLAN_NUM_ANT2 TXANT_1 + +/* otpctrl1 0xF4 */ +#define OTPC_FORCE_PWR_OFF 0x02000000 +/* chipcommon s/r registers introduced with cc rev >= 48 */ +#define CC_SR_CTL0_ENABLE_MASK 0x1 +#define CC_SR_CTL0_ENABLE_SHIFT 0 +#define CC_SR_CTL0_EN_SR_ENG_CLK_SHIFT 1 /* sr_clk to sr_memory enable */ +#define CC_SR_CTL0_RSRC_TRIGGER_SHIFT 2 /* Rising edge resource trigger 0 to sr_engine */ +#define CC_SR_CTL0_MIN_DIV_SHIFT 6 /* Min division value for fast clk in sr_engine */ +#define CC_SR_CTL0_EN_SBC_STBY_SHIFT 16 /* Allow Subcore mem StandBy? */ +#define CC_SR_CTL0_EN_SR_ALP_CLK_MASK_SHIFT 18 +#define CC_SR_CTL0_EN_SR_HT_CLK_SHIFT 19 +#define CC_SR_CTL0_ALLOW_PIC_SHIFT 20 /* Allow pic to separate power domains */ +#define CC_SR_CTL0_MAX_SR_LQ_CLK_CNT_SHIFT 25 +#define CC_SR_CTL0_EN_MEM_DISABLE_FOR_SLEEP 30 + +#define CC_SR_CTL1_SR_INIT_MASK 0x3FF +#define CC_SR_CTL1_SR_INIT_SHIFT 0 + +#define ECI_INLO_PKTDUR_MASK 0x000000f0 /* [7:4] - 4 bits */ +#define ECI_INLO_PKTDUR_SHIFT 4 + +/* gci chip control bits */ +#define GCI_GPIO_CHIPCTRL_ENAB_IN_BIT 0 +#define GCI_GPIO_CHIPCTRL_ENAB_OP_BIT 1 +#define GCI_GPIO_CHIPCTRL_INVERT_BIT 2 +#define GCI_GPIO_CHIPCTRL_PULLUP_BIT 3 +#define GCI_GPIO_CHIPCTRL_PULLDN_BIT 4 +#define GCI_GPIO_CHIPCTRL_ENAB_BTSIG_BIT 5 +#define GCI_GPIO_CHIPCTRL_ENAB_OD_OP_BIT 6 +#define GCI_GPIO_CHIPCTRL_ENAB_EXT_GPIO_BIT 7 + +/* gci GPIO input status bits */ +#define GCI_GPIO_STS_VALUE_BIT 0 +#define GCI_GPIO_STS_POS_EDGE_BIT 1 +#define GCI_GPIO_STS_NEG_EDGE_BIT 2 +#define GCI_GPIO_STS_FAST_EDGE_BIT 3 +#define GCI_GPIO_STS_CLEAR 0xF + +#define GCI_GPIO_STS_EDGE_TRIG_BIT 0 +#define GCI_GPIO_STS_NEG_EDGE_TRIG_BIT 1 +#define GCI_GPIO_STS_DUAL_EDGE_TRIG_BIT 2 +#define GCI_GPIO_STS_WL_DIN_SELECT 6 + +#define GCI_GPIO_STS_VALUE (1 << GCI_GPIO_STS_VALUE_BIT) + +/* SR Power Control */ +#define SRPWR_DMN0_PCIE (0) /* PCIE */ +#define SRPWR_DMN0_PCIE_SHIFT (SRPWR_DMN0_PCIE) /* PCIE */ +#define SRPWR_DMN0_PCIE_MASK (1 << SRPWR_DMN0_PCIE_SHIFT) /* PCIE */ +#define SRPWR_DMN1_ARMBPSD (1) /* ARM/BP/SDIO */ +#define SRPWR_DMN1_ARMBPSD_SHIFT (SRPWR_DMN1_ARMBPSD) /* ARM/BP/SDIO */ +#define SRPWR_DMN1_ARMBPSD_MASK (1 << SRPWR_DMN1_ARMBPSD_SHIFT) /* ARM/BP/SDIO */ +#define SRPWR_DMN2_MACAUX (2) /* MAC/Phy Aux */ +#define SRPWR_DMN2_MACAUX_SHIFT (SRPWR_DMN2_MACAUX) /* MAC/Phy Aux */ +#define SRPWR_DMN2_MACAUX_MASK (1 << SRPWR_DMN2_MACAUX_SHIFT) /* MAC/Phy Aux */ +#define SRPWR_DMN3_MACMAIN (3) /* MAC/Phy Main */ +#define SRPWR_DMN3_MACMAIN_SHIFT (SRPWR_DMN3_MACMAIN) /* MAC/Phy Main */ +#define SRPWR_DMN3_MACMAIN_MASK (1 << SRPWR_DMN3_MACMAIN_SHIFT) /* MAC/Phy Main */ +#define SRPWR_DMN_ALL_MASK (0xF) + +#define SRPWR_REQON_SHIFT (8) /* PowerOnRequest[11:8] */ +#define SRPWR_REQON_MASK (SRPWR_DMN_ALL_MASK << SRPWR_REQON_SHIFT) +#define SRPWR_STATUS_SHIFT (16) /* ExtPwrStatus[19:16], RO */ +#define SRPWR_STATUS_MASK (SRPWR_DMN_ALL_MASK << SRPWR_STATUS_SHIFT) +#define SRPWR_DMN_SHIFT (28) /* PowerDomain[31:28], RO */ +#define SRPWR_DMN_MASK (SRPWR_DMN_ALL_MASK << SRPWR_DMN_SHIFT) + +/* PMU Precision Usec Timer */ +#define PMU_PREC_USEC_TIMER_ENABLE 0x1 + +/* FISCtrlStatus */ +#define PMU_CLEAR_FIS_DONE_SHIFT 1u +#define PMU_CLEAR_FIS_DONE_MASK (1u << PMU_CLEAR_FIS_DONE_SHIFT) + +#endif /* _SBCHIPC_H */ diff --git a/bcmdhd.100.10.315.x/include/sbconfig.h b/bcmdhd.100.10.315.x/include/sbconfig.h new file mode 100644 index 0000000..d38364e --- /dev/null +++ b/bcmdhd.100.10.315.x/include/sbconfig.h @@ -0,0 +1,285 @@ +/* + * Broadcom SiliconBackplane hardware register definitions. + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: sbconfig.h 654158 2016-08-11 09:30:01Z $ + */ + +#ifndef _SBCONFIG_H +#define _SBCONFIG_H + +/* cpp contortions to concatenate w/arg prescan */ +#ifndef PAD +#define _PADLINE(line) pad ## line +#define _XSTR(line) _PADLINE(line) +#define PAD _XSTR(__LINE__) +#endif // endif + +/* enumeration in SB is based on the premise that cores are contiguous in the + * enumeration space. + */ +#define SB_BUS_SIZE 0x10000 /**< Each bus gets 64Kbytes for cores */ +#define SB_BUS_BASE(sih, b) (SI_ENUM_BASE(sih) + (b) * SB_BUS_SIZE) +#define SB_BUS_MAXCORES (SB_BUS_SIZE / SI_CORE_SIZE) /**< Max cores per bus */ + +/* + * Sonics Configuration Space Registers. + */ +#define SBCONFIGOFF 0xf00 /**< core sbconfig regs are top 256bytes of regs */ +#define SBCONFIGSIZE 256 /**< sizeof (sbconfig_t) */ + +#define SBIPSFLAG 0x08 +#define SBTPSFLAG 0x18 +#define SBTMERRLOGA 0x48 /**< sonics >= 2.3 */ +#define SBTMERRLOG 0x50 /**< sonics >= 2.3 */ +#define SBADMATCH3 0x60 +#define SBADMATCH2 0x68 +#define SBADMATCH1 0x70 +#define SBIMSTATE 0x90 +#define SBINTVEC 0x94 +#define SBTMSTATELOW 0x98 +#define SBTMSTATEHIGH 0x9c +#define SBBWA0 0xa0 +#define SBIMCONFIGLOW 0xa8 +#define SBIMCONFIGHIGH 0xac +#define SBADMATCH0 0xb0 +#define SBTMCONFIGLOW 0xb8 +#define SBTMCONFIGHIGH 0xbc +#define SBBCONFIG 0xc0 +#define SBBSTATE 0xc8 +#define SBACTCNFG 0xd8 +#define SBFLAGST 0xe8 +#define SBIDLOW 0xf8 +#define SBIDHIGH 0xfc + +/* All the previous registers are above SBCONFIGOFF, but with Sonics 2.3, we have + * a few registers *below* that line. I think it would be very confusing to try + * and change the value of SBCONFIGOFF, so I'm definig them as absolute offsets here, + */ + +#define SBIMERRLOGA 0xea8 +#define SBIMERRLOG 0xeb0 +#define SBTMPORTCONNID0 0xed8 +#define SBTMPORTLOCK0 0xef8 + +#if !defined(_LANGUAGE_ASSEMBLY) && !defined(__ASSEMBLY__) + +typedef volatile struct _sbconfig { + uint32 PAD[2]; + uint32 sbipsflag; /**< initiator port ocp slave flag */ + uint32 PAD[3]; + uint32 sbtpsflag; /**< target port ocp slave flag */ + uint32 PAD[11]; + uint32 sbtmerrloga; /**< (sonics >= 2.3) */ + uint32 PAD; + uint32 sbtmerrlog; /**< (sonics >= 2.3) */ + uint32 PAD[3]; + uint32 sbadmatch3; /**< address match3 */ + uint32 PAD; + uint32 sbadmatch2; /**< address match2 */ + uint32 PAD; + uint32 sbadmatch1; /**< address match1 */ + uint32 PAD[7]; + uint32 sbimstate; /**< initiator agent state */ + uint32 sbintvec; /**< interrupt mask */ + uint32 sbtmstatelow; /**< target state */ + uint32 sbtmstatehigh; /**< target state */ + uint32 sbbwa0; /**< bandwidth allocation table0 */ + uint32 PAD; + uint32 sbimconfiglow; /**< initiator configuration */ + uint32 sbimconfighigh; /**< initiator configuration */ + uint32 sbadmatch0; /**< address match0 */ + uint32 PAD; + uint32 sbtmconfiglow; /**< target configuration */ + uint32 sbtmconfighigh; /**< target configuration */ + uint32 sbbconfig; /**< broadcast configuration */ + uint32 PAD; + uint32 sbbstate; /**< broadcast state */ + uint32 PAD[3]; + uint32 sbactcnfg; /**< activate configuration */ + uint32 PAD[3]; + uint32 sbflagst; /**< current sbflags */ + uint32 PAD[3]; + uint32 sbidlow; /**< identification */ + uint32 sbidhigh; /**< identification */ +} sbconfig_t; + +#endif /* !_LANGUAGE_ASSEMBLY && !__ASSEMBLY__ */ + +/* sbipsflag */ +#define SBIPS_INT1_MASK 0x3f /**< which sbflags get routed to mips interrupt 1 */ +#define SBIPS_INT1_SHIFT 0 +#define SBIPS_INT2_MASK 0x3f00 /**< which sbflags get routed to mips interrupt 2 */ +#define SBIPS_INT2_SHIFT 8 +#define SBIPS_INT3_MASK 0x3f0000 /**< which sbflags get routed to mips interrupt 3 */ +#define SBIPS_INT3_SHIFT 16 +#define SBIPS_INT4_MASK 0x3f000000 /**< which sbflags get routed to mips interrupt 4 */ +#define SBIPS_INT4_SHIFT 24 + +/* sbtpsflag */ +#define SBTPS_NUM0_MASK 0x3f /**< interrupt sbFlag # generated by this core */ +#define SBTPS_F0EN0 0x40 /**< interrupt is always sent on the backplane */ + +/* sbtmerrlog */ +#define SBTMEL_CM 0x00000007 /**< command */ +#define SBTMEL_CI 0x0000ff00 /**< connection id */ +#define SBTMEL_EC 0x0f000000 /**< error code */ +#define SBTMEL_ME 0x80000000 /**< multiple error */ + +/* sbimstate */ +#define SBIM_PC 0xf /**< pipecount */ +#define SBIM_AP_MASK 0x30 /**< arbitration policy */ +#define SBIM_AP_BOTH 0x00 /**< use both timeslaces and token */ +#define SBIM_AP_TS 0x10 /**< use timesliaces only */ +#define SBIM_AP_TK 0x20 /**< use token only */ +#define SBIM_AP_RSV 0x30 /**< reserved */ +#define SBIM_IBE 0x20000 /**< inbanderror */ +#define SBIM_TO 0x40000 /**< timeout */ +#define SBIM_BY 0x01800000 /**< busy (sonics >= 2.3) */ +#define SBIM_RJ 0x02000000 /**< reject (sonics >= 2.3) */ + +/* sbtmstatelow */ +#define SBTML_RESET 0x0001 /**< reset */ +#define SBTML_REJ_MASK 0x0006 /**< reject field */ +#define SBTML_REJ 0x0002 /**< reject */ +#define SBTML_TMPREJ 0x0004 /**< temporary reject, for error recovery */ + +#define SBTML_SICF_SHIFT 16 /**< Shift to locate the SI control flags in sbtml */ + +/* sbtmstatehigh */ +#define SBTMH_SERR 0x0001 /**< serror */ +#define SBTMH_INT 0x0002 /**< interrupt */ +#define SBTMH_BUSY 0x0004 /**< busy */ +#define SBTMH_TO 0x0020 /**< timeout (sonics >= 2.3) */ + +#define SBTMH_SISF_SHIFT 16 /**< Shift to locate the SI status flags in sbtmh */ + +/* sbbwa0 */ +#define SBBWA_TAB0_MASK 0xffff /**< lookup table 0 */ +#define SBBWA_TAB1_MASK 0xffff /**< lookup table 1 */ +#define SBBWA_TAB1_SHIFT 16 + +/* sbimconfiglow */ +#define SBIMCL_STO_MASK 0x7 /**< service timeout */ +#define SBIMCL_RTO_MASK 0x70 /**< request timeout */ +#define SBIMCL_RTO_SHIFT 4 +#define SBIMCL_CID_MASK 0xff0000 /**< connection id */ +#define SBIMCL_CID_SHIFT 16 + +/* sbimconfighigh */ +#define SBIMCH_IEM_MASK 0xc /**< inband error mode */ +#define SBIMCH_TEM_MASK 0x30 /**< timeout error mode */ +#define SBIMCH_TEM_SHIFT 4 +#define SBIMCH_BEM_MASK 0xc0 /**< bus error mode */ +#define SBIMCH_BEM_SHIFT 6 + +/* sbadmatch0 */ +#define SBAM_TYPE_MASK 0x3 /**< address type */ +#define SBAM_AD64 0x4 /**< reserved */ +#define SBAM_ADINT0_MASK 0xf8 /**< type0 size */ +#define SBAM_ADINT0_SHIFT 3 +#define SBAM_ADINT1_MASK 0x1f8 /**< type1 size */ +#define SBAM_ADINT1_SHIFT 3 +#define SBAM_ADINT2_MASK 0x1f8 /**< type2 size */ +#define SBAM_ADINT2_SHIFT 3 +#define SBAM_ADEN 0x400 /**< enable */ +#define SBAM_ADNEG 0x800 /**< negative decode */ +#define SBAM_BASE0_MASK 0xffffff00 /**< type0 base address */ +#define SBAM_BASE0_SHIFT 8 +#define SBAM_BASE1_MASK 0xfffff000 /**< type1 base address for the core */ +#define SBAM_BASE1_SHIFT 12 +#define SBAM_BASE2_MASK 0xffff0000 /**< type2 base address for the core */ +#define SBAM_BASE2_SHIFT 16 + +/* sbtmconfiglow */ +#define SBTMCL_CD_MASK 0xff /**< clock divide */ +#define SBTMCL_CO_MASK 0xf800 /**< clock offset */ +#define SBTMCL_CO_SHIFT 11 +#define SBTMCL_IF_MASK 0xfc0000 /**< interrupt flags */ +#define SBTMCL_IF_SHIFT 18 +#define SBTMCL_IM_MASK 0x3000000 /**< interrupt mode */ +#define SBTMCL_IM_SHIFT 24 + +/* sbtmconfighigh */ +#define SBTMCH_BM_MASK 0x3 /**< busy mode */ +#define SBTMCH_RM_MASK 0x3 /**< retry mode */ +#define SBTMCH_RM_SHIFT 2 +#define SBTMCH_SM_MASK 0x30 /**< stop mode */ +#define SBTMCH_SM_SHIFT 4 +#define SBTMCH_EM_MASK 0x300 /**< sb error mode */ +#define SBTMCH_EM_SHIFT 8 +#define SBTMCH_IM_MASK 0xc00 /**< int mode */ +#define SBTMCH_IM_SHIFT 10 + +/* sbbconfig */ +#define SBBC_LAT_MASK 0x3 /**< sb latency */ +#define SBBC_MAX0_MASK 0xf0000 /**< maxccntr0 */ +#define SBBC_MAX0_SHIFT 16 +#define SBBC_MAX1_MASK 0xf00000 /**< maxccntr1 */ +#define SBBC_MAX1_SHIFT 20 + +/* sbbstate */ +#define SBBS_SRD 0x1 /**< st reg disable */ +#define SBBS_HRD 0x2 /**< hold reg disable */ + +/* sbidlow */ +#define SBIDL_CS_MASK 0x3 /**< config space */ +#define SBIDL_AR_MASK 0x38 /**< # address ranges supported */ +#define SBIDL_AR_SHIFT 3 +#define SBIDL_SYNCH 0x40 /**< sync */ +#define SBIDL_INIT 0x80 /**< initiator */ +#define SBIDL_MINLAT_MASK 0xf00 /**< minimum backplane latency */ +#define SBIDL_MINLAT_SHIFT 8 +#define SBIDL_MAXLAT 0xf000 /**< maximum backplane latency */ +#define SBIDL_MAXLAT_SHIFT 12 +#define SBIDL_FIRST 0x10000 /**< this initiator is first */ +#define SBIDL_CW_MASK 0xc0000 /**< cycle counter width */ +#define SBIDL_CW_SHIFT 18 +#define SBIDL_TP_MASK 0xf00000 /**< target ports */ +#define SBIDL_TP_SHIFT 20 +#define SBIDL_IP_MASK 0xf000000 /**< initiator ports */ +#define SBIDL_IP_SHIFT 24 +#define SBIDL_RV_MASK 0xf0000000 /**< sonics backplane revision code */ +#define SBIDL_RV_SHIFT 28 +#define SBIDL_RV_2_2 0x00000000 /**< version 2.2 or earlier */ +#define SBIDL_RV_2_3 0x10000000 /**< version 2.3 */ + +/* sbidhigh */ +#define SBIDH_RC_MASK 0x000f /**< revision code */ +#define SBIDH_RCE_MASK 0x7000 /**< revision code extension field */ +#define SBIDH_RCE_SHIFT 8 +#define SBCOREREV(sbidh) \ + ((((sbidh) & SBIDH_RCE_MASK) >> SBIDH_RCE_SHIFT) | ((sbidh) & SBIDH_RC_MASK)) +#define SBIDH_CC_MASK 0x8ff0 /**< core code */ +#define SBIDH_CC_SHIFT 4 +#define SBIDH_VC_MASK 0xffff0000 /**< vendor code */ +#define SBIDH_VC_SHIFT 16 + +#define SB_COMMIT 0xfd8 /**< update buffered registers value */ + +/* vendor codes */ +#define SB_VEND_BCM 0x4243 /**< Broadcom's SB vendor code */ + +#endif /* _SBCONFIG_H */ diff --git a/bcmdhd.100.10.315.x/include/sbgci.h b/bcmdhd.100.10.315.x/include/sbgci.h new file mode 100644 index 0000000..121f04a --- /dev/null +++ b/bcmdhd.100.10.315.x/include/sbgci.h @@ -0,0 +1,273 @@ +/* + * SiliconBackplane GCI core hardware definitions + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: sbgci.h 696881 2017-04-28 17:20:35Z $ + */ + +#ifndef _SBGCI_H +#define _SBGCI_H + +#include + +#if !defined(_LANGUAGE_ASSEMBLY) && !defined(__ASSEMBLY__) + +/* cpp contortions to concatenate w/arg prescan */ +#ifndef PAD +#define _PADLINE(line) pad ## line +#define _XSTR(line) _PADLINE(line) +#define PAD _XSTR(__LINE__) +#endif /* PAD */ + +#define GCI_OFFSETOF(sih, reg) \ + (AOB_ENAB(sih) ? OFFSETOF(gciregs_t, reg) : OFFSETOF(chipcregs_t, reg)) +#define GCI_CORE_IDX(sih) (AOB_ENAB(sih) ? si_findcoreidx(sih, GCI_CORE_ID, 0) : SI_CC_IDX) + +typedef volatile struct { + uint32 gci_corecaps0; /* 0x000 */ + uint32 gci_corecaps1; /* 0x004 */ + uint32 gci_corecaps2; /* 0x008 */ + uint32 gci_corectrl; /* 0x00c */ + uint32 gci_corestat; /* 0x010 */ + uint32 gci_intstat; /* 0x014 */ + uint32 gci_intmask; /* 0x018 */ + uint32 gci_wakemask; /* 0x01c */ + uint32 gci_levelintstat; /* 0x020 */ + uint32 gci_eventintstat; /* 0x024 */ + uint32 gci_wakelevelintstat; /* 0x028 */ + uint32 gci_wakeeventintstat; /* 0x02c */ + uint32 semaphoreintstatus; /* 0x030 */ + uint32 semaphoreintmask; /* 0x034 */ + uint32 semaphorerequest; /* 0x038 */ + uint32 semaphorereserve; /* 0x03c */ + uint32 gci_indirect_addr; /* 0x040 */ + uint32 gci_gpioctl; /* 0x044 */ + uint32 gci_gpiostatus; /* 0x048 */ + uint32 gci_gpiomask; /* 0x04c */ + uint32 gci_eventsummary; /* 0x050 */ + uint32 gci_miscctl; /* 0x054 */ + uint32 gci_gpiointmask; /* 0x058 */ + uint32 gci_gpiowakemask; /* 0x05c */ + uint32 gci_input[32]; /* 0x060 */ + uint32 gci_event[32]; /* 0x0e0 */ + uint32 gci_output[4]; /* 0x160 */ + uint32 gci_control_0; /* 0x170 */ + uint32 gci_control_1; /* 0x174 */ + uint32 gci_intpolreg; /* 0x178 */ + uint32 gci_levelintmask; /* 0x17c */ + uint32 gci_eventintmask; /* 0x180 */ + uint32 wakelevelintmask; /* 0x184 */ + uint32 wakeeventintmask; /* 0x188 */ + uint32 hwmask; /* 0x18c */ + uint32 PAD; + uint32 gci_inbandeventintmask; /* 0x194 */ + uint32 PAD; + uint32 gci_inbandeventstatus; /* 0x19c */ + uint32 gci_seciauxtx; /* 0x1a0 */ + uint32 gci_seciauxrx; /* 0x1a4 */ + uint32 gci_secitx_datatag; /* 0x1a8 */ + uint32 gci_secirx_datatag; /* 0x1ac */ + uint32 gci_secitx_datamask; /* 0x1b0 */ + uint32 gci_seciusef0tx_reg; /* 0x1b4 */ + uint32 gci_secif0tx_offset; /* 0x1b8 */ + uint32 gci_secif0rx_offset; /* 0x1bc */ + uint32 gci_secif1tx_offset; /* 0x1c0 */ + uint32 gci_rxfifo_common_ctrl; /* 0x1c4 */ + uint32 gci_rxfifoctrl; /* 0x1c8 */ + uint32 gci_hw_sema_status; /* 0x1cc */ + uint32 gci_seciuartescval; /* 0x1d0 */ + uint32 gic_seciuartautobaudctr; /* 0x1d4 */ + uint32 gci_secififolevel; /* 0x1d8 */ + uint32 gci_seciuartdata; /* 0x1dc */ + uint32 gci_secibauddiv; /* 0x1e0 */ + uint32 gci_secifcr; /* 0x1e4 */ + uint32 gci_secilcr; /* 0x1e8 */ + uint32 gci_secimcr; /* 0x1ec */ + uint32 gci_secilsr; /* 0x1f0 */ + uint32 gci_secimsr; /* 0x1f4 */ + uint32 gci_baudadj; /* 0x1f8 */ + uint32 gci_inbandintmask; /* 0x1fc */ + uint32 gci_chipctrl; /* 0x200 */ + uint32 gci_chipsts; /* 0x204 */ + uint32 gci_gpioout; /* 0x208 */ + uint32 gci_gpioout_read; /* 0x20C */ + uint32 gci_mpwaketx; /* 0x210 */ + uint32 gci_mpwakedetect; /* 0x214 */ + uint32 gci_seciin_ctrl; /* 0x218 */ + uint32 gci_seciout_ctrl; /* 0x21C */ + uint32 gci_seciin_auxfifo_en; /* 0x220 */ + uint32 gci_seciout_txen_txbr; /* 0x224 */ + uint32 gci_seciin_rxbrstatus; /* 0x228 */ + uint32 gci_seciin_rxerrstatus; /* 0x22C */ + uint32 gci_seciin_fcstatus; /* 0x230 */ + uint32 gci_seciout_txstatus; /* 0x234 */ + uint32 gci_seciout_txbrstatus; /* 0x238 */ + uint32 wlan_mem_info; /* 0x23C */ + uint32 wlan_bankxinfo; /* 0x240 */ + uint32 bt_smem_select; /* 0x244 */ + uint32 bt_smem_stby; /* 0x248 */ + uint32 bt_smem_status; /* 0x24C */ + uint32 wlan_bankxactivepda; /* 0x250 */ + uint32 wlan_bankxsleeppda; /* 0x254 */ + uint32 wlan_bankxkill; /* 0x258 */ + uint32 PAD[PADSZ(0x25c, 0x268)]; /* 0x25c-0x268 */ + uint32 bt_smem_control0; /* 0x26C */ + uint32 bt_smem_control1; /* 0x270 */ + uint32 PAD[PADSZ(0x274, 0x2fc)]; /* 0x274-0x2fc */ + uint32 gci_chipid; /* 0x300 */ + uint32 PAD[PADSZ(0x304, 0x30c)]; /* 0x304-0x30c */ + uint32 otpstatus; /* 0x310 */ + uint32 otpcontrol; /* 0x314 */ + uint32 otpprog; /* 0x318 */ + uint32 otplayout; /* 0x31c */ + uint32 otplayoutextension; /* 0x320 */ + uint32 otpcontrol1; /* 0x324 */ + uint32 otpprogdata; /* 0x328 */ + uint32 PAD[PADSZ(0x32c, 0x3f8)]; /* 0x32c-0x3f8 */ + uint32 otpECCstatus; /* 0x3FC */ + uint32 PAD[PADSZ(0x400, 0xbfc)]; /* 0x400-0xbfc */ + uint32 lhl_core_capab_adr; /* 0xC00 */ + uint32 lhl_main_ctl_adr; /* 0xC04 */ + uint32 lhl_pmu_ctl_adr; /* 0xC08 */ + uint32 lhl_extlpo_ctl_adr; /* 0xC0C */ + uint32 lpo_ctl_adr; /* 0xC10 */ + uint32 lhl_lpo2_ctl_adr; /* 0xC14 */ + uint32 lhl_osc32k_ctl_adr; /* 0xC18 */ + uint32 lhl_clk_status_adr; /* 0xC1C */ + uint32 lhl_clk_det_ctl_adr; /* 0xC20 */ + uint32 lhl_clk_sel_adr; /* 0xC24 */ + uint32 hidoff_cnt_adr[2]; /* 0xC28-0xC2C */ + uint32 lhl_autoclk_ctl_adr; /* 0xC30 */ + uint32 PAD; /* reserved */ + uint32 lhl_hibtim_adr; /* 0xC38 */ + uint32 lhl_wl_ilp_val_adr; /* 0xC3C */ + uint32 lhl_wl_armtim0_intrp_adr; /* 0xC40 */ + uint32 lhl_wl_armtim0_st_adr; /* 0xC44 */ + uint32 lhl_wl_armtim0_adr; /* 0xC48 */ + uint32 PAD[PADSZ(0xc4c, 0xc6c)]; /* 0xC4C-0xC6C */ + uint32 lhl_wl_mactim0_intrp_adr; /* 0xC70 */ + uint32 lhl_wl_mactim0_st_adr; /* 0xC74 */ + uint32 lhl_wl_mactim_int0_adr; /* 0xC78 */ + uint32 lhl_wl_mactim_frac0_adr; /* 0xC7C */ + uint32 lhl_wl_mactim1_intrp_adr; /* 0xC80 */ + uint32 lhl_wl_mactim1_st_adr; /* 0xC84 */ + uint32 lhl_wl_mactim_int1_adr; /* 0xC88 */ + uint32 lhl_wl_mactim_frac1_adr; /* 0xC8C */ + uint32 PAD[PADSZ(0xc90, 0xcac)]; /* 0xC90-0xCAC */ + uint32 gpio_int_en_port_adr[4]; /* 0xCB0-0xCBC */ + uint32 gpio_int_st_port_adr[4]; /* 0xCC0-0xCCC */ + uint32 gpio_ctrl_iocfg_p_adr[40]; /* 0xCD0-0xD6C */ + uint32 lhl_lp_up_ctl1_adr; /* 0xd70 */ + uint32 lhl_lp_dn_ctl1_adr; /* 0xd74 */ + uint32 PAD[PADSZ(0xd78, 0xdb4)]; /* 0xd78-0xdb4 */ + uint32 lhl_sleep_timer_adr; /* 0xDB8 */ + uint32 lhl_sleep_timer_ctl_adr; /* 0xDBC */ + uint32 lhl_sleep_timer_load_val_adr; /* 0xDC0 */ + uint32 lhl_lp_main_ctl_adr; /* 0xDC4 */ + uint32 lhl_lp_up_ctl_adr; /* 0xDC8 */ + uint32 lhl_lp_dn_ctl_adr; /* 0xDCC */ + uint32 gpio_gctrl_iocfg_p0_p39_adr; /* 0xDD0 */ + uint32 gpio_gdsctrl_iocfg_p0_p25_p30_p39_adr; /* 0xDD4 */ + uint32 gpio_gdsctrl_iocfg_p26_p29_adr; /* 0xDD8 */ + uint32 PAD[PADSZ(0xddc, 0xdf8)]; /* 0xDDC-0xDF8 */ + uint32 lhl_gpio_din0_adr; /* 0xDFC */ + uint32 lhl_gpio_din1_adr; /* 0xE00 */ + uint32 lhl_wkup_status_adr; /* 0xE04 */ + uint32 lhl_ctl_adr; /* 0xE08 */ + uint32 lhl_adc_ctl_adr; /* 0xE0C */ + uint32 lhl_qdxyz_in_dly_adr; /* 0xE10 */ + uint32 lhl_optctl_adr; /* 0xE14 */ + uint32 lhl_optct2_adr; /* 0xE18 */ + uint32 lhl_scanp_cntr_init_val_adr; /* 0xE1C */ + uint32 lhl_opt_togg_val_adr[6]; /* 0xE20-0xE34 */ + uint32 lhl_optx_smp_val_adr; /* 0xE38 */ + uint32 lhl_opty_smp_val_adr; /* 0xE3C */ + uint32 lhl_optz_smp_val_adr; /* 0xE40 */ + uint32 lhl_hidoff_keepstate_adr[3]; /* 0xE44-0xE4C */ + uint32 lhl_bt_slmboot_ctl0_adr[4]; /* 0xE50-0xE5C */ + uint32 lhl_wl_fw_ctl; /* 0xE60 */ + uint32 lhl_wl_hw_ctl_adr[2]; /* 0xE64-0xE68 */ + uint32 lhl_bt_hw_ctl_adr; /* 0xE6C */ + uint32 lhl_top_pwrseq_en_adr; /* 0xE70 */ + uint32 lhl_top_pwrdn_ctl_adr; /* 0xE74 */ + uint32 lhl_top_pwrup_ctl_adr; /* 0xE78 */ + uint32 lhl_top_pwrseq_ctl_adr; /* 0xE7C */ + uint32 lhl_top_pwrdn2_ctl_adr; /* 0xE80 */ + uint32 lhl_top_pwrup2_ctl_adr; /* 0xE84 */ + uint32 wpt_regon_intrp_cfg_adr; /* 0xE88 */ + uint32 bt_regon_intrp_cfg_adr; /* 0xE8C */ + uint32 wl_regon_intrp_cfg_adr; /* 0xE90 */ + uint32 regon_intrp_st_adr; /* 0xE94 */ + uint32 regon_intrp_en_adr; /* 0xE98 */ + uint32 PAD[PADSZ(0xe9c, 0xeb4)]; /* 0xe9c-0xeb4 */ + uint32 lhl_lp_main_ctl1_adr; /* 0xeb8 */ + uint32 lhl_lp_up_ctl2_adr; /* 0xebc */ + uint32 lhl_lp_dn_ctl2_adr; /* 0xec0 */ + uint32 lhl_lp_up_ctl3_adr; /* 0xec4 */ + uint32 lhl_lp_dn_ctl3_adr; /* 0xec8 */ + uint32 PAD[PADSZ(0xecc, 0xed8)]; /* 0xecc-0xed8 */ + uint32 lhl_lp_main_ctl2_adr; /* 0xedc */ + uint32 lhl_lp_up_ctl4_adr; /* 0xee0 */ + uint32 lhl_lp_dn_ctl4_adr; /* 0xee4 */ + uint32 lhl_lp_up_ctl5_adr; /* 0xee8 */ + uint32 lhl_lp_dn_ctl5_adr; /* 0xeec */ + +} gciregs_t; + +#define GCI_CAP0_REV_MASK 0x000000ff + +/* GCI Capabilities registers */ +#define GCI_CORE_CAP_0_COREREV_MASK 0xFF +#define GCI_CORE_CAP_0_COREREV_SHIFT 0 + +#define GCI_INDIRECT_ADDRESS_REG_REGINDEX_MASK 0x3F +#define GCI_INDIRECT_ADDRESS_REG_REGINDEX_SHIFT 0 +#define GCI_INDIRECT_ADDRESS_REG_GPIOINDEX_MASK 0xF +#define GCI_INDIRECT_ADDRESS_REG_GPIOINDEX_SHIFT 16 + +#define WLAN_BANKX_SLEEPPDA_REG_SLEEPPDA_MASK 0xFFFF + +#define WLAN_BANKX_PKILL_REG_SLEEPPDA_MASK 0x1 + +/* WLAN BankXInfo Register */ +#define WLAN_BANKXINFO_BANK_SIZE_MASK 0x00FFF000 +#define WLAN_BANKXINFO_BANK_SIZE_SHIFT 12 + +/* WLAN Mem Info Register */ +#define WLAN_MEM_INFO_REG_NUMSOCRAMBANKS_MASK 0x000000FF +#define WLAN_MEM_INFO_REG_NUMSOCRAMBANKS_SHIFT 0 + +#define WLAN_MEM_INFO_REG_NUMD11MACBM_MASK 0x0000FF00 +#define WLAN_MEM_INFO_REG_NUMD11MACBM_SHIFT 8 + +#define WLAN_MEM_INFO_REG_NUMD11MACUCM_MASK 0x00FF0000 +#define WLAN_MEM_INFO_REG_NUMD11MACUCM_SHIFT 16 + +#define WLAN_MEM_INFO_REG_NUMD11MACSHM_MASK 0xFF000000 +#define WLAN_MEM_INFO_REG_NUMD11MACSHM_SHIFT 24 + +#endif /* !_LANGUAGE_ASSEMBLY && !__ASSEMBLY__ */ + +#endif /* _SBGCI_H */ diff --git a/bcmdhd.100.10.315.x/include/sbhnddma.h b/bcmdhd.100.10.315.x/include/sbhnddma.h new file mode 100644 index 0000000..88a8844 --- /dev/null +++ b/bcmdhd.100.10.315.x/include/sbhnddma.h @@ -0,0 +1,449 @@ +/* + * Generic Broadcom Home Networking Division (HND) DMA engine HW interface + * This supports the following chips: BCM42xx, 44xx, 47xx . + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: sbhnddma.h 694506 2017-04-13 05:10:05Z $ + */ + +#ifndef _sbhnddma_h_ +#define _sbhnddma_h_ + +/* DMA structure: + * support two DMA engines: 32 bits address or 64 bit addressing + * basic DMA register set is per channel(transmit or receive) + * a pair of channels is defined for convenience + */ + +/* 32 bits addressing */ + +/** dma registers per channel(xmt or rcv) */ +typedef volatile struct { + uint32 control; /**< enable, et al */ + uint32 addr; /**< descriptor ring base address (4K aligned) */ + uint32 ptr; /**< last descriptor posted to chip */ + uint32 status; /**< current active descriptor, et al */ +} dma32regs_t; + +typedef volatile struct { + dma32regs_t xmt; /**< dma tx channel */ + dma32regs_t rcv; /**< dma rx channel */ +} dma32regp_t; + +typedef volatile struct { /* diag access */ + uint32 fifoaddr; /**< diag address */ + uint32 fifodatalow; /**< low 32bits of data */ + uint32 fifodatahigh; /**< high 32bits of data */ + uint32 pad; /**< reserved */ +} dma32diag_t; + +/** + * DMA Descriptor + * Descriptors are only read by the hardware, never written back. + */ +typedef volatile struct { + uint32 ctrl; /**< misc control bits & bufcount */ + uint32 addr; /**< data buffer address */ +} dma32dd_t; + +/** Each descriptor ring must be 4096byte aligned, and fit within a single 4096byte page. */ +#define D32RINGALIGN_BITS 12 +#define D32MAXRINGSZ (1 << D32RINGALIGN_BITS) +#define D32RINGALIGN (1 << D32RINGALIGN_BITS) + +#define D32MAXDD (D32MAXRINGSZ / sizeof (dma32dd_t)) + +/* transmit channel control */ +#define XC_XE ((uint32)1 << 0) /**< transmit enable */ +#define XC_SE ((uint32)1 << 1) /**< transmit suspend request */ +#define XC_LE ((uint32)1 << 2) /**< loopback enable */ +#define XC_FL ((uint32)1 << 4) /**< flush request */ +#define XC_MR_MASK 0x000001C0 /**< Multiple outstanding reads */ +#define XC_MR_SHIFT 6 +#define XC_PD ((uint32)1 << 11) /**< parity check disable */ +#define XC_AE ((uint32)3 << 16) /**< address extension bits */ +#define XC_AE_SHIFT 16 +#define XC_BL_MASK 0x001C0000 /**< BurstLen bits */ +#define XC_BL_SHIFT 18 +#define XC_PC_MASK 0x00E00000 /**< Prefetch control */ +#define XC_PC_SHIFT 21 +#define XC_PT_MASK 0x03000000 /**< Prefetch threshold */ +#define XC_PT_SHIFT 24 + +/** Multiple outstanding reads */ +#define DMA_MR_1 0 +#define DMA_MR_2 1 +#define DMA_MR_4 2 +#define DMA_MR_8 3 +#define DMA_MR_12 4 +#define DMA_MR_16 5 +#define DMA_MR_20 6 +#define DMA_MR_32 7 + +/** DMA Burst Length in bytes */ +#define DMA_BL_16 0 +#define DMA_BL_32 1 +#define DMA_BL_64 2 +#define DMA_BL_128 3 +#define DMA_BL_256 4 +#define DMA_BL_512 5 +#define DMA_BL_1024 6 + +/** Prefetch control */ +#define DMA_PC_0 0 +#define DMA_PC_4 1 +#define DMA_PC_8 2 +#define DMA_PC_16 3 +#define DMA_PC_32 4 +/* others: reserved */ + +/** Prefetch threshold */ +#define DMA_PT_1 0 +#define DMA_PT_2 1 +#define DMA_PT_4 2 +#define DMA_PT_8 3 + +/** Channel Switch */ +#define DMA_CS_OFF 0 +#define DMA_CS_ON 1 + +/* transmit descriptor table pointer */ +#define XP_LD_MASK 0xfff /**< last valid descriptor */ + +/* transmit channel status */ +#define XS_CD_MASK 0x0fff /**< current descriptor pointer */ +#define XS_XS_MASK 0xf000 /**< transmit state */ +#define XS_XS_SHIFT 12 +#define XS_XS_DISABLED 0x0000 /**< disabled */ +#define XS_XS_ACTIVE 0x1000 /**< active */ +#define XS_XS_IDLE 0x2000 /**< idle wait */ +#define XS_XS_STOPPED 0x3000 /**< stopped */ +#define XS_XS_SUSP 0x4000 /**< suspend pending */ +#define XS_XE_MASK 0xf0000 /**< transmit errors */ +#define XS_XE_SHIFT 16 +#define XS_XE_NOERR 0x00000 /**< no error */ +#define XS_XE_DPE 0x10000 /**< descriptor protocol error */ +#define XS_XE_DFU 0x20000 /**< data fifo underrun */ +#define XS_XE_BEBR 0x30000 /**< bus error on buffer read */ +#define XS_XE_BEDA 0x40000 /**< bus error on descriptor access */ +#define XS_AD_MASK 0xfff00000 /**< active descriptor */ +#define XS_AD_SHIFT 20 + +/* receive channel control */ +#define RC_RE ((uint32)1 << 0) /**< receive enable */ +#define RC_RO_MASK 0xfe /**< receive frame offset */ +#define RC_RO_SHIFT 1 +#define RC_FM ((uint32)1 << 8) /**< direct fifo receive (pio) mode */ +#define RC_SH ((uint32)1 << 9) /**< separate rx header descriptor enable */ +#define RC_OC ((uint32)1 << 10) /**< overflow continue */ +#define RC_PD ((uint32)1 << 11) /**< parity check disable */ +#define RC_AE ((uint32)3 << 16) /**< address extension bits */ +#define RC_AE_SHIFT 16 +#define RC_BL_MASK 0x001C0000 /**< BurstLen bits */ +#define RC_BL_SHIFT 18 +#define RC_PC_MASK 0x00E00000 /**< Prefetch control */ +#define RC_PC_SHIFT 21 +#define RC_PT_MASK 0x03000000 /**< Prefetch threshold */ +#define RC_PT_SHIFT 24 +#define RC_WAITCMP_MASK 0x00001000 +#define RC_WAITCMP_SHIFT 12 +/* receive descriptor table pointer */ +#define RP_LD_MASK 0xfff /**< last valid descriptor */ + +/* receive channel status */ +#define RS_CD_MASK 0x0fff /**< current descriptor pointer */ +#define RS_RS_MASK 0xf000 /**< receive state */ +#define RS_RS_SHIFT 12 +#define RS_RS_DISABLED 0x0000 /**< disabled */ +#define RS_RS_ACTIVE 0x1000 /**< active */ +#define RS_RS_IDLE 0x2000 /**< idle wait */ +#define RS_RS_STOPPED 0x3000 /**< reserved */ +#define RS_RE_MASK 0xf0000 /**< receive errors */ +#define RS_RE_SHIFT 16 +#define RS_RE_NOERR 0x00000 /**< no error */ +#define RS_RE_DPE 0x10000 /**< descriptor protocol error */ +#define RS_RE_DFO 0x20000 /**< data fifo overflow */ +#define RS_RE_BEBW 0x30000 /**< bus error on buffer write */ +#define RS_RE_BEDA 0x40000 /**< bus error on descriptor access */ +#define RS_AD_MASK 0xfff00000 /**< active descriptor */ +#define RS_AD_SHIFT 20 + +/* fifoaddr */ +#define FA_OFF_MASK 0xffff /**< offset */ +#define FA_SEL_MASK 0xf0000 /**< select */ +#define FA_SEL_SHIFT 16 +#define FA_SEL_XDD 0x00000 /**< transmit dma data */ +#define FA_SEL_XDP 0x10000 /**< transmit dma pointers */ +#define FA_SEL_RDD 0x40000 /**< receive dma data */ +#define FA_SEL_RDP 0x50000 /**< receive dma pointers */ +#define FA_SEL_XFD 0x80000 /**< transmit fifo data */ +#define FA_SEL_XFP 0x90000 /**< transmit fifo pointers */ +#define FA_SEL_RFD 0xc0000 /**< receive fifo data */ +#define FA_SEL_RFP 0xd0000 /**< receive fifo pointers */ +#define FA_SEL_RSD 0xe0000 /**< receive frame status data */ +#define FA_SEL_RSP 0xf0000 /**< receive frame status pointers */ + +/* descriptor control flags */ +#define CTRL_BC_MASK 0x00001fff /**< buffer byte count, real data len must <= 4KB */ +#define CTRL_AE ((uint32)3 << 16) /**< address extension bits */ +#define CTRL_AE_SHIFT 16 +#define CTRL_PARITY ((uint32)3 << 18) /**< parity bit */ +#define CTRL_EOT ((uint32)1 << 28) /**< end of descriptor table */ +#define CTRL_IOC ((uint32)1 << 29) /**< interrupt on completion */ +#define CTRL_EOF ((uint32)1 << 30) /**< end of frame */ +#define CTRL_SOF ((uint32)1 << 31) /**< start of frame */ + +/** control flags in the range [27:20] are core-specific and not defined here */ +#define CTRL_CORE_MASK 0x0ff00000 + +/* 64 bits addressing */ + +/** dma registers per channel(xmt or rcv) */ +typedef volatile struct { + uint32 control; /**< enable, et al */ + uint32 ptr; /**< last descriptor posted to chip */ + uint32 addrlow; /**< descriptor ring base address low 32-bits (8K aligned) */ + uint32 addrhigh; /**< descriptor ring base address bits 63:32 (8K aligned) */ + uint32 status0; /**< current descriptor, xmt state */ + uint32 status1; /**< active descriptor, xmt error */ +} dma64regs_t; + +typedef volatile struct { + dma64regs_t tx; /**< dma64 tx channel */ + dma64regs_t rx; /**< dma64 rx channel */ +} dma64regp_t; + +typedef volatile struct { /**< diag access */ + uint32 fifoaddr; /**< diag address */ + uint32 fifodatalow; /**< low 32bits of data */ + uint32 fifodatahigh; /**< high 32bits of data */ + uint32 pad; /**< reserved */ +} dma64diag_t; + +/** + * DMA Descriptor + * Descriptors are only read by the hardware, never written back. + */ +typedef volatile struct { + uint32 ctrl1; /**< misc control bits */ + uint32 ctrl2; /**< buffer count and address extension */ + uint32 addrlow; /**< memory address of the date buffer, bits 31:0 */ + uint32 addrhigh; /**< memory address of the date buffer, bits 63:32 */ +} dma64dd_t; + +/** + * Each descriptor ring must be 8kB aligned, and fit within a contiguous 8kB physical addresss. + */ +#define D64RINGALIGN_BITS 13 +#define D64MAXRINGSZ (1 << D64RINGALIGN_BITS) +#define D64RINGBOUNDARY (1 << D64RINGALIGN_BITS) + +#define D64MAXDD (D64MAXRINGSZ / sizeof (dma64dd_t)) + +/** for cores with large descriptor ring support, descriptor ring size can be up to 4096 */ +#define D64MAXDD_LARGE ((1 << 16) / sizeof (dma64dd_t)) + +/** + * for cores with large descriptor ring support (4k descriptors), descriptor ring cannot cross + * 64K boundary + */ +#define D64RINGBOUNDARY_LARGE (1 << 16) + +/* + * Default DMA Burstlen values for USBRev >= 12 and SDIORev >= 11. + * When this field contains the value N, the burst length is 2**(N + 4) bytes. + */ +#define D64_DEF_USBBURSTLEN 2 +#define D64_DEF_SDIOBURSTLEN 1 + +#ifndef D64_USBBURSTLEN +#define D64_USBBURSTLEN DMA_BL_64 +#endif // endif +#ifndef D64_SDIOBURSTLEN +#define D64_SDIOBURSTLEN DMA_BL_32 +#endif // endif + +/* transmit channel control */ +#define D64_XC_XE 0x00000001 /**< transmit enable */ +#define D64_XC_SE 0x00000002 /**< transmit suspend request */ +#define D64_XC_LE 0x00000004 /**< loopback enable */ +#define D64_XC_FL 0x00000010 /**< flush request */ +#define D64_XC_MR_MASK 0x000001C0 /**< Multiple outstanding reads */ +#define D64_XC_MR_SHIFT 6 +#define D64_XC_CS_SHIFT 9 /**< channel switch enable */ +#define D64_XC_CS_MASK 0x00000200 /**< channel switch enable */ +#define D64_XC_PD 0x00000800 /**< parity check disable */ +#define D64_XC_AE 0x00030000 /**< address extension bits */ +#define D64_XC_AE_SHIFT 16 +#define D64_XC_BL_MASK 0x001C0000 /**< BurstLen bits */ +#define D64_XC_BL_SHIFT 18 +#define D64_XC_PC_MASK 0x00E00000 /**< Prefetch control */ +#define D64_XC_PC_SHIFT 21 +#define D64_XC_PT_MASK 0x03000000 /**< Prefetch threshold */ +#define D64_XC_PT_SHIFT 24 +#define D64_XC_CO_MASK 0x04000000 /**< coherent transactions for descriptors */ +#define D64_XC_CO_SHIFT 26 + +/* transmit descriptor table pointer */ +#define D64_XP_LD_MASK 0x00001fff /**< last valid descriptor */ + +/* transmit channel status */ +#define D64_XS0_CD_MASK (di->d64_xs0_cd_mask) /**< current descriptor pointer */ +#define D64_XS0_XS_MASK 0xf0000000 /**< transmit state */ +#define D64_XS0_XS_SHIFT 28 +#define D64_XS0_XS_DISABLED 0x00000000 /**< disabled */ +#define D64_XS0_XS_ACTIVE 0x10000000 /**< active */ +#define D64_XS0_XS_IDLE 0x20000000 /**< idle wait */ +#define D64_XS0_XS_STOPPED 0x30000000 /**< stopped */ +#define D64_XS0_XS_SUSP 0x40000000 /**< suspend pending */ + +#define D64_XS1_AD_MASK (di->d64_xs1_ad_mask) /**< active descriptor */ +#define D64_XS1_XE_MASK 0xf0000000 /**< transmit errors */ +#define D64_XS1_XE_SHIFT 28 +#define D64_XS1_XE_NOERR 0x00000000 /**< no error */ +#define D64_XS1_XE_DPE 0x10000000 /**< descriptor protocol error */ +#define D64_XS1_XE_DFU 0x20000000 /**< data fifo underrun */ +#define D64_XS1_XE_DTE 0x30000000 /**< data transfer error */ +#define D64_XS1_XE_DESRE 0x40000000 /**< descriptor read error */ +#define D64_XS1_XE_COREE 0x50000000 /**< core error */ + +/* receive channel control */ +#define D64_RC_RE 0x00000001 /**< receive enable */ +#define D64_RC_RO_MASK 0x000000fe /**< receive frame offset */ +#define D64_RC_RO_SHIFT 1 +#define D64_RC_FM 0x00000100 /**< direct fifo receive (pio) mode */ +#define D64_RC_SH 0x00000200 /**< separate rx header descriptor enable */ +#define D64_RC_SHIFT 9 /**< separate rx header descriptor enable */ +#define D64_RC_OC 0x00000400 /**< overflow continue */ +#define D64_RC_PD 0x00000800 /**< parity check disable */ +#define D64_RC_WAITCMP_MASK 0x00001000 +#define D64_RC_WAITCMP_SHIFT 12 +#define D64_RC_SA 0x00002000 /**< select active */ +#define D64_RC_GE 0x00004000 /**< Glom enable */ +#define D64_RC_AE 0x00030000 /**< address extension bits */ +#define D64_RC_AE_SHIFT 16 +#define D64_RC_BL_MASK 0x001C0000 /**< BurstLen bits */ +#define D64_RC_BL_SHIFT 18 +#define D64_RC_PC_MASK 0x00E00000 /**< Prefetch control */ +#define D64_RC_PC_SHIFT 21 +#define D64_RC_PT_MASK 0x03000000 /**< Prefetch threshold */ +#define D64_RC_PT_SHIFT 24 +#define D64_RC_CO_MASK 0x04000000 /**< coherent transactions for descriptors */ +#define D64_RC_CO_SHIFT 26 +#define D64_RC_ROEXT_MASK 0x08000000 /**< receive frame offset extension bit */ +#define D64_RC_ROEXT_SHIFT 27 + +/* flags for dma controller */ +#define DMA_CTRL_PEN (1 << 0) /**< partity enable */ +#define DMA_CTRL_ROC (1 << 1) /**< rx overflow continue */ +#define DMA_CTRL_RXMULTI (1 << 2) /**< allow rx scatter to multiple descriptors */ +#define DMA_CTRL_UNFRAMED (1 << 3) /**< Unframed Rx/Tx data */ +#define DMA_CTRL_USB_BOUNDRY4KB_WAR (1 << 4) +#define DMA_CTRL_DMA_AVOIDANCE_WAR (1 << 5) /**< DMA avoidance WAR for 4331 */ +#define DMA_CTRL_RXSINGLE (1 << 6) /**< always single buffer */ +#define DMA_CTRL_SDIO_RXGLOM (1 << 7) /**< DMA Rx glome is enabled */ +#define DMA_CTRL_DESC_ONLY_FLAG (1 << 8) /**< For DMA which posts only descriptors, + * no packets + */ +#define DMA_CTRL_DESC_CD_WAR (1 << 9) /**< WAR for descriptor only DMA's CD not being + * updated correctly by HW in CT mode. + */ +#define DMA_CTRL_CS (1 << 10) /* channel switch enable */ +#define DMA_CTRL_ROEXT (1 << 11) /* receive frame offset extension support */ +#define DMA_CTRL_RX_ALIGN_8BYTE (1 << 12) /* RXDMA address 8-byte aligned for 43684A0 */ + +/* receive descriptor table pointer */ +#define D64_RP_LD_MASK 0x00001fff /**< last valid descriptor */ + +/* receive channel status */ +#define D64_RS0_CD_MASK (di->d64_rs0_cd_mask) /**< current descriptor pointer */ +#define D64_RS0_RS_MASK 0xf0000000 /**< receive state */ +#define D64_RS0_RS_SHIFT 28 +#define D64_RS0_RS_DISABLED 0x00000000 /**< disabled */ +#define D64_RS0_RS_ACTIVE 0x10000000 /**< active */ +#define D64_RS0_RS_IDLE 0x20000000 /**< idle wait */ +#define D64_RS0_RS_STOPPED 0x30000000 /**< stopped */ +#define D64_RS0_RS_SUSP 0x40000000 /**< suspend pending */ + +#define D64_RS1_AD_MASK (di->d64_rs1_ad_mask) /* active descriptor pointer */ +#define D64_RS1_RE_MASK 0xf0000000 /* receive errors */ +#define D64_RS1_RE_SHIFT 28 +#define D64_RS1_RE_NOERR 0x00000000 /**< no error */ +#define D64_RS1_RE_DPO 0x10000000 /**< descriptor protocol error */ +#define D64_RS1_RE_DFU 0x20000000 /**< data fifo overflow */ +#define D64_RS1_RE_DTE 0x30000000 /**< data transfer error */ +#define D64_RS1_RE_DESRE 0x40000000 /**< descriptor read error */ +#define D64_RS1_RE_COREE 0x50000000 /**< core error */ + +/* fifoaddr */ +#define D64_FA_OFF_MASK 0xffff /**< offset */ +#define D64_FA_SEL_MASK 0xf0000 /**< select */ +#define D64_FA_SEL_SHIFT 16 +#define D64_FA_SEL_XDD 0x00000 /**< transmit dma data */ +#define D64_FA_SEL_XDP 0x10000 /**< transmit dma pointers */ +#define D64_FA_SEL_RDD 0x40000 /**< receive dma data */ +#define D64_FA_SEL_RDP 0x50000 /**< receive dma pointers */ +#define D64_FA_SEL_XFD 0x80000 /**< transmit fifo data */ +#define D64_FA_SEL_XFP 0x90000 /**< transmit fifo pointers */ +#define D64_FA_SEL_RFD 0xc0000 /**< receive fifo data */ +#define D64_FA_SEL_RFP 0xd0000 /**< receive fifo pointers */ +#define D64_FA_SEL_RSD 0xe0000 /**< receive frame status data */ +#define D64_FA_SEL_RSP 0xf0000 /**< receive frame status pointers */ + +/* descriptor control flags 1 */ +#define D64_CTRL_COREFLAGS 0x0ff00000 /**< core specific flags */ +#define D64_CTRL1_COHERENT ((uint32)1 << 17) /* cache coherent per transaction */ +#define D64_CTRL1_NOTPCIE ((uint32)1 << 18) /**< buirst size control */ +#define D64_CTRL1_EOT ((uint32)1 << 28) /**< end of descriptor table */ +#define D64_CTRL1_IOC ((uint32)1 << 29) /**< interrupt on completion */ +#define D64_CTRL1_EOF ((uint32)1 << 30) /**< end of frame */ +#define D64_CTRL1_SOF ((uint32)1 << 31) /**< start of frame */ + +/* descriptor control flags 2 */ +#define D64_CTRL2_MAX_LEN 0x0000fff7 /* Max transfer length (buffer byte count) <= 65527 */ +#define D64_CTRL2_BC_MASK 0x0000ffff /**< mask for buffer byte count */ +#define D64_CTRL2_AE 0x00030000 /**< address extension bits */ +#define D64_CTRL2_AE_SHIFT 16 +#define D64_CTRL2_PARITY 0x00040000 /* parity bit */ + +/** control flags in the range [27:20] are core-specific and not defined here */ +#define D64_CTRL_CORE_MASK 0x0ff00000 + +#define D64_RX_FRM_STS_LEN 0x0000ffff /**< frame length mask */ +#define D64_RX_FRM_STS_OVFL 0x00800000 /**< RxOverFlow */ +#define D64_RX_FRM_STS_DSCRCNT 0x0f000000 /**< no. of descriptors used - 1, d11corerev >= 22 */ +#define D64_RX_FRM_STS_DSCRCNT_SHIFT 24 /* Shift for no .of dma descriptor field */ +#define D64_RX_FRM_STS_DATATYPE 0xf0000000 /**< core-dependent data type */ + +#define BCM_D64_CTRL2_BOUND_DMA_LENGTH(len) \ +(((len) > D64_CTRL2_MAX_LEN) ? D64_CTRL2_MAX_LEN : (len)) + +/** receive frame status */ +typedef volatile struct { + uint16 len; + uint16 flags; +} dma_rxh_t; + +#endif /* _sbhnddma_h_ */ diff --git a/bcmdhd.100.10.315.x/include/sbpcmcia.h b/bcmdhd.100.10.315.x/include/sbpcmcia.h new file mode 100644 index 0000000..9dec294 --- /dev/null +++ b/bcmdhd.100.10.315.x/include/sbpcmcia.h @@ -0,0 +1,137 @@ +/* + * BCM43XX Sonics SiliconBackplane PCMCIA core hardware definitions. + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: sbpcmcia.h 647676 2016-07-07 02:59:05Z $ + */ + +#ifndef _SBPCMCIA_H +#define _SBPCMCIA_H + +/* All the addresses that are offsets in attribute space are divided + * by two to account for the fact that odd bytes are invalid in + * attribute space and our read/write routines make the space appear + * as if they didn't exist. Still we want to show the original numbers + * as documented in the hnd_pcmcia core manual. + */ + +/* PCMCIA Function Configuration Registers */ +#define PCMCIA_FCR (0x700 / 2) + +#define FCR0_OFF 0 +#define FCR1_OFF (0x40 / 2) +#define FCR2_OFF (0x80 / 2) +#define FCR3_OFF (0xc0 / 2) + +#define PCMCIA_FCR0 (0x700 / 2) +#define PCMCIA_FCR1 (0x740 / 2) +#define PCMCIA_FCR2 (0x780 / 2) +#define PCMCIA_FCR3 (0x7c0 / 2) + +/* Standard PCMCIA FCR registers */ + +#define PCMCIA_COR 0 + +#define COR_RST 0x80 +#define COR_LEV 0x40 +#define COR_IRQEN 0x04 +#define COR_BLREN 0x01 +#define COR_FUNEN 0x01 + +#define PCICIA_FCSR (2 / 2) +#define PCICIA_PRR (4 / 2) +#define PCICIA_SCR (6 / 2) +#define PCICIA_ESR (8 / 2) + +#define PCM_MEMOFF 0x0000 +#define F0_MEMOFF 0x1000 +#define F1_MEMOFF 0x2000 +#define F2_MEMOFF 0x3000 +#define F3_MEMOFF 0x4000 + +/* Memory base in the function fcr's */ +#define MEM_ADDR0 (0x728 / 2) +#define MEM_ADDR1 (0x72a / 2) +#define MEM_ADDR2 (0x72c / 2) + +/* PCMCIA base plus Srom access in fcr0: */ +#define PCMCIA_ADDR0 (0x072e / 2) +#define PCMCIA_ADDR1 (0x0730 / 2) +#define PCMCIA_ADDR2 (0x0732 / 2) + +#define MEM_SEG (0x0734 / 2) +#define SROM_CS (0x0736 / 2) +#define SROM_DATAL (0x0738 / 2) +#define SROM_DATAH (0x073a / 2) +#define SROM_ADDRL (0x073c / 2) +#define SROM_ADDRH (0x073e / 2) +#define SROM_INFO2 (0x0772 / 2) /* Corerev >= 2 && <= 5 */ +#define SROM_INFO (0x07be / 2) /* Corerev >= 6 */ + +/* Values for srom_cs: */ +#define SROM_IDLE 0 +#define SROM_WRITE 1 +#define SROM_READ 2 +#define SROM_WEN 4 +#define SROM_WDS 7 +#define SROM_DONE 8 + +/* Fields in srom_info: */ +#define SRI_SZ_MASK 0x03 +#define SRI_BLANK 0x04 +#define SRI_OTP 0x80 + +#define SROM16K_BANK_SEL_MASK (3 << 11) +#define SROM16K_BANK_SHFT_MASK 11 +#define SROM16K_ADDR_SEL_MASK ((1 << SROM16K_BANK_SHFT_MASK) - 1) +#define SROM_PRSNT_MASK 0x1 +#define SROM_SUPPORT_SHIFT_MASK 30 +#define SROM_SUPPORTED (0x1 << SROM_SUPPORT_SHIFT_MASK) +#define SROM_SIZE_MASK 0x00000006 +#define SROM_SIZE_2K 2 +#define SROM_SIZE_512 1 +#define SROM_SIZE_128 0 +#define SROM_SIZE_SHFT_MASK 1 + +/* Standard tuples we know about */ + +#define CISTPL_NULL 0x00 +#define CISTPL_END 0xff /* End of the CIS tuple chain */ + +#define CISTPL_BRCM_HNBU 0x80 + +#define HNBU_BOARDREV 0x02 /* One byte board revision */ + +#define HNBU_BOARDTYPE 0x1b /* 2 bytes; boardtype */ + +#define HNBU_HNBUCIS 0x1d /* what follows is proprietary HNBU CIS format */ + +/* sbtmstatelow */ +#define SBTML_INT_ACK 0x40000 /* ack the sb interrupt */ +#define SBTML_INT_EN 0x20000 /* enable sb interrupt */ + +/* sbtmstatehigh */ +#define SBTMH_INT_STATUS 0x40000 /* sb interrupt status */ +#endif /* _SBPCMCIA_H */ diff --git a/bcmdhd.100.10.315.x/include/sbsdio.h b/bcmdhd.100.10.315.x/include/sbsdio.h new file mode 100644 index 0000000..942fc4b --- /dev/null +++ b/bcmdhd.100.10.315.x/include/sbsdio.h @@ -0,0 +1,188 @@ +/* + * SDIO device core hardware definitions. + * sdio is a portion of the pcmcia core in core rev 3 - rev 8 + * + * SDIO core support 1bit, 4 bit SDIO mode as well as SPI mode. + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: sbsdio.h 665717 2016-10-18 23:29:25Z $ + */ + +#ifndef _SBSDIO_H +#define _SBSDIO_H + +#define SBSDIO_NUM_FUNCTION 3 /* as of sdiod rev 0, supports 3 functions */ + +/* function 1 miscellaneous registers */ +#define SBSDIO_SPROM_CS 0x10000 /* sprom command and status */ +#define SBSDIO_SPROM_INFO 0x10001 /* sprom info register */ +#define SBSDIO_SPROM_DATA_LOW 0x10002 /* sprom indirect access data byte 0 */ +#define SBSDIO_SPROM_DATA_HIGH 0x10003 /* sprom indirect access data byte 1 */ +#define SBSDIO_SPROM_ADDR_LOW 0x10004 /* sprom indirect access addr byte 0 */ +#define SBSDIO_SPROM_ADDR_HIGH 0x10005 /* sprom indirect access addr byte 0 */ +#define SBSDIO_CHIP_CTRL_DATA 0x10006 /* xtal_pu (gpio) output */ +#define SBSDIO_CHIP_CTRL_EN 0x10007 /* xtal_pu (gpio) enable */ +#define SBSDIO_WATERMARK 0x10008 /* rev < 7, watermark for sdio device */ +#define SBSDIO_DEVICE_CTL 0x10009 /* control busy signal generation */ + +/* registers introduced in rev 8, some content (mask/bits) defs in sbsdpcmdev.h */ +#define SBSDIO_FUNC1_SBADDRLOW 0x1000A /* SB Address Window Low (b15) */ +#define SBSDIO_FUNC1_SBADDRMID 0x1000B /* SB Address Window Mid (b23:b16) */ +#define SBSDIO_FUNC1_SBADDRHIGH 0x1000C /* SB Address Window High (b31:b24) */ +#define SBSDIO_FUNC1_FRAMECTRL 0x1000D /* Frame Control (frame term/abort) */ +#define SBSDIO_FUNC1_CHIPCLKCSR 0x1000E /* ChipClockCSR (ALP/HT ctl/status) */ +#define SBSDIO_FUNC1_SDIOPULLUP 0x1000F /* SdioPullUp (on cmd, d0-d2) */ +#define SBSDIO_FUNC1_WFRAMEBCLO 0x10019 /* Write Frame Byte Count Low */ +#define SBSDIO_FUNC1_WFRAMEBCHI 0x1001A /* Write Frame Byte Count High */ +#define SBSDIO_FUNC1_RFRAMEBCLO 0x1001B /* Read Frame Byte Count Low */ +#define SBSDIO_FUNC1_RFRAMEBCHI 0x1001C /* Read Frame Byte Count High */ +#define SBSDIO_FUNC1_MESBUSYCTRL 0x1001D /* MesBusyCtl at 0x1001D (rev 11) */ + +#define SBSDIO_FUNC1_MISC_REG_START 0x10000 /* f1 misc register start */ +#define SBSDIO_FUNC1_MISC_REG_LIMIT 0x1001C /* f1 misc register end */ + +/* Sdio Core Rev 12 */ +#define SBSDIO_FUNC1_WAKEUPCTRL 0x1001E +#define SBSDIO_FUNC1_WCTRL_ALPWAIT_MASK 0x1 +#define SBSDIO_FUNC1_WCTRL_ALPWAIT_SHIFT 0 +#define SBSDIO_FUNC1_WCTRL_HTWAIT_MASK 0x2 +#define SBSDIO_FUNC1_WCTRL_HTWAIT_SHIFT 1 +#define SBSDIO_FUNC1_SLEEPCSR 0x1001F +#define SBSDIO_FUNC1_SLEEPCSR_KSO_MASK 0x1 +#define SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT 0 +#define SBSDIO_FUNC1_SLEEPCSR_KSO_EN 1 +#define SBSDIO_FUNC1_SLEEPCSR_DEVON_MASK 0x2 +#define SBSDIO_FUNC1_SLEEPCSR_DEVON_SHIFT 1 + +/* SBSDIO_SPROM_CS */ +#define SBSDIO_SPROM_IDLE 0 +#define SBSDIO_SPROM_WRITE 1 +#define SBSDIO_SPROM_READ 2 +#define SBSDIO_SPROM_WEN 4 +#define SBSDIO_SPROM_WDS 7 +#define SBSDIO_SPROM_DONE 8 + +/* SBSDIO_SPROM_INFO */ +#define SROM_SZ_MASK 0x03 /* SROM size, 1: 4k, 2: 16k */ +#define SROM_BLANK 0x04 /* depreciated in corerev 6 */ +#define SROM_OTP 0x80 /* OTP present */ + +/* SBSDIO_WATERMARK */ +#define SBSDIO_WATERMARK_MASK 0x7f /* number of words - 1 for sd device + * to wait before sending data to host + */ + +/* SBSDIO_MESBUSYCTRL */ +/* When RX FIFO has less entries than this & MBE is set + * => busy signal is asserted between data blocks. +*/ +#define SBSDIO_MESBUSYCTRL_MASK 0x7f +#define SBSDIO_MESBUSYCTRL_ENAB 0x80 /* Enable busy capability for MES access */ + +/* SBSDIO_DEVICE_CTL */ +#define SBSDIO_DEVCTL_SETBUSY 0x01 /* 1: device will assert busy signal when + * receiving CMD53 + */ +#define SBSDIO_DEVCTL_SPI_INTR_SYNC 0x02 /* 1: assertion of sdio interrupt is + * synchronous to the sdio clock + */ +#define SBSDIO_DEVCTL_CA_INT_ONLY 0x04 /* 1: mask all interrupts to host + * except the chipActive (rev 8) + */ +#define SBSDIO_DEVCTL_PADS_ISO 0x08 /* 1: isolate internal sdio signals, put + * external pads in tri-state; requires + * sdio bus power cycle to clear (rev 9) + */ +#define SBSDIO_DEVCTL_EN_F2_BLK_WATERMARK 0x10 /* Enable function 2 tx for each block */ +#define SBSDIO_DEVCTL_F2WM_ENAB 0x10 /* Enable F2 Watermark */ +#define SBSDIO_DEVCTL_NONDAT_PADS_ISO 0x20 /* Isolate sdio clk and cmd (non-data) */ + +/* SBSDIO_FUNC1_CHIPCLKCSR */ +#define SBSDIO_FORCE_ALP 0x01 /* Force ALP request to backplane */ +#define SBSDIO_FORCE_HT 0x02 /* Force HT request to backplane */ +#define SBSDIO_FORCE_ILP 0x04 /* Force ILP request to backplane */ +#define SBSDIO_ALP_AVAIL_REQ 0x08 /* Make ALP ready (power up xtal) */ +#define SBSDIO_HT_AVAIL_REQ 0x10 /* Make HT ready (power up PLL) */ +#define SBSDIO_FORCE_HW_CLKREQ_OFF 0x20 /* Squelch clock requests from HW */ +#define SBSDIO_ALP_AVAIL 0x40 /* Status: ALP is ready */ +#define SBSDIO_HT_AVAIL 0x80 /* Status: HT is ready */ +/* In rev8, actual avail bits followed original docs */ +#define SBSDIO_Rev8_HT_AVAIL 0x40 +#define SBSDIO_Rev8_ALP_AVAIL 0x80 +#define SBSDIO_CSR_MASK 0x1F + +#define SBSDIO_AVBITS (SBSDIO_HT_AVAIL | SBSDIO_ALP_AVAIL) +#define SBSDIO_ALPAV(regval) ((regval) & SBSDIO_AVBITS) +#define SBSDIO_HTAV(regval) (((regval) & SBSDIO_AVBITS) == SBSDIO_AVBITS) +#define SBSDIO_ALPONLY(regval) (SBSDIO_ALPAV(regval) && !SBSDIO_HTAV(regval)) +#define SBSDIO_CLKAV(regval, alponly) (SBSDIO_ALPAV(regval) && \ + (alponly ? 1 : SBSDIO_HTAV(regval))) + +/* SBSDIO_FUNC1_SDIOPULLUP */ +#define SBSDIO_PULLUP_D0 0x01 /* Enable D0/MISO pullup */ +#define SBSDIO_PULLUP_D1 0x02 /* Enable D1/INT# pullup */ +#define SBSDIO_PULLUP_D2 0x04 /* Enable D2 pullup */ +#define SBSDIO_PULLUP_CMD 0x08 /* Enable CMD/MOSI pullup */ +#define SBSDIO_PULLUP_ALL 0x0f /* All valid bits */ + +/* function 1 OCP space */ +#define SBSDIO_SB_OFT_ADDR_MASK 0x07FFF /* sb offset addr is <= 15 bits, 32k */ +#define SBSDIO_SB_OFT_ADDR_LIMIT 0x08000 +#define SBSDIO_SB_ACCESS_2_4B_FLAG 0x08000 /* with b15, maps to 32-bit SB access */ + +/* some duplication with sbsdpcmdev.h here */ +/* valid bits in SBSDIO_FUNC1_SBADDRxxx regs */ +#define SBSDIO_SBADDRLOW_MASK 0x80 /* Valid bits in SBADDRLOW */ +#define SBSDIO_SBADDRMID_MASK 0xff /* Valid bits in SBADDRMID */ +#define SBSDIO_SBADDRHIGH_MASK 0xffU /* Valid bits in SBADDRHIGH */ +#define SBSDIO_SBWINDOW_MASK 0xffff8000 /* Address bits from SBADDR regs */ + +/* direct(mapped) cis space */ +#define SBSDIO_CIS_BASE_COMMON 0x1000 /* MAPPED common CIS address */ +#ifdef BCMSPI +#define SBSDIO_CIS_SIZE_LIMIT 0x100 /* maximum bytes in one spi CIS */ +#else +#define SBSDIO_CIS_SIZE_LIMIT 0x200 /* maximum bytes in one CIS */ +#endif /* !BCMSPI */ +#define SBSDIO_OTP_CIS_SIZE_LIMIT 0x078 /* maximum bytes OTP CIS */ + +#define SBSDIO_CIS_OFT_ADDR_MASK 0x1FFFF /* cis offset addr is < 17 bits */ + +#define SBSDIO_CIS_MANFID_TUPLE_LEN 6 /* manfid tuple length, include tuple, + * link bytes + */ + +/* indirect cis access (in sprom) */ +#define SBSDIO_SPROM_CIS_OFFSET 0x8 /* 8 control bytes first, CIS starts from + * 8th byte + */ + +#define SBSDIO_BYTEMODE_DATALEN_MAX 64 /* sdio byte mode: maximum length of one + * data comamnd + */ + +#define SBSDIO_CORE_ADDR_MASK 0x1FFFF /* sdio core function one address mask */ + +#endif /* _SBSDIO_H */ diff --git a/bcmdhd.100.10.315.x/include/sbsdpcmdev.h b/bcmdhd.100.10.315.x/include/sbsdpcmdev.h new file mode 100644 index 0000000..78bd979 --- /dev/null +++ b/bcmdhd.100.10.315.x/include/sbsdpcmdev.h @@ -0,0 +1,309 @@ +/* + * Broadcom SiliconBackplane SDIO/PCMCIA hardware-specific + * device core support + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: sbsdpcmdev.h 616398 2016-02-01 09:37:52Z $ + */ + +#ifndef _sbsdpcmdev_h_ +#define _sbsdpcmdev_h_ + +/* cpp contortions to concatenate w/arg prescan */ +#ifndef PAD +#define _PADLINE(line) pad ## line +#define _XSTR(line) _PADLINE(line) +#define PAD _XSTR(__LINE__) +#endif /* PAD */ + +typedef volatile struct { + dma64regs_t xmt; /* dma tx */ + uint32 PAD[2]; + dma64regs_t rcv; /* dma rx */ + uint32 PAD[2]; +} dma64p_t; + +/* dma64 sdiod corerev >= 1 */ +typedef volatile struct { + dma64p_t dma64regs[2]; + dma64diag_t dmafifo; /* DMA Diagnostic Regs, 0x280-0x28c */ + uint32 PAD[92]; +} sdiodma64_t; + +/* dma32 sdiod corerev == 0 */ +typedef volatile struct { + dma32regp_t dma32regs[2]; /* dma tx & rx, 0x200-0x23c */ + dma32diag_t dmafifo; /* DMA Diagnostic Regs, 0x240-0x24c */ + uint32 PAD[108]; +} sdiodma32_t; + +/* dma32 regs for pcmcia core */ +typedef volatile struct { + dma32regp_t dmaregs; /* DMA Regs, 0x200-0x21c, rev8 */ + dma32diag_t dmafifo; /* DMA Diagnostic Regs, 0x220-0x22c */ + uint32 PAD[116]; +} pcmdma32_t; + +/* core registers */ +typedef volatile struct { + uint32 corecontrol; /* CoreControl, 0x000, rev8 */ + uint32 corestatus; /* CoreStatus, 0x004, rev8 */ + uint32 PAD[1]; + uint32 biststatus; /* BistStatus, 0x00c, rev8 */ + + /* PCMCIA access */ + uint16 pcmciamesportaladdr; /* PcmciaMesPortalAddr, 0x010, rev8 */ + uint16 PAD[1]; + uint16 pcmciamesportalmask; /* PcmciaMesPortalMask, 0x014, rev8 */ + uint16 PAD[1]; + uint16 pcmciawrframebc; /* PcmciaWrFrameBC, 0x018, rev8 */ + uint16 PAD[1]; + uint16 pcmciaunderflowtimer; /* PcmciaUnderflowTimer, 0x01c, rev8 */ + uint16 PAD[1]; + + /* interrupt */ + uint32 intstatus; /* IntStatus, 0x020, rev8 */ + uint32 hostintmask; /* IntHostMask, 0x024, rev8 */ + uint32 intmask; /* IntSbMask, 0x028, rev8 */ + uint32 sbintstatus; /* SBIntStatus, 0x02c, rev8 */ + uint32 sbintmask; /* SBIntMask, 0x030, rev8 */ + uint32 funcintmask; /* SDIO Function Interrupt Mask, SDIO rev4 */ + uint32 PAD[2]; + uint32 tosbmailbox; /* ToSBMailbox, 0x040, rev8 */ + uint32 tohostmailbox; /* ToHostMailbox, 0x044, rev8 */ + uint32 tosbmailboxdata; /* ToSbMailboxData, 0x048, rev8 */ + uint32 tohostmailboxdata; /* ToHostMailboxData, 0x04c, rev8 */ + + /* synchronized access to registers in SDIO clock domain */ + uint32 sdioaccess; /* SdioAccess, 0x050, rev8 */ + uint32 PAD[1]; + uint32 MiscHostAccessIntEn; + uint32 PAD[1]; + + /* PCMCIA frame control */ + uint8 pcmciaframectrl; /* pcmciaFrameCtrl, 0x060, rev8 */ + uint8 PAD[3]; + uint8 pcmciawatermark; /* pcmciaWaterMark, 0x064, rev8 */ + uint8 PAD[155]; + + /* interrupt batching control */ + uint32 intrcvlazy; /* IntRcvLazy, 0x100, rev8 */ + uint32 PAD[3]; + + /* counters */ + uint32 cmd52rd; /* Cmd52RdCount, 0x110, rev8, SDIO: cmd52 reads */ + uint32 cmd52wr; /* Cmd52WrCount, 0x114, rev8, SDIO: cmd52 writes */ + uint32 cmd53rd; /* Cmd53RdCount, 0x118, rev8, SDIO: cmd53 reads */ + uint32 cmd53wr; /* Cmd53WrCount, 0x11c, rev8, SDIO: cmd53 writes */ + uint32 abort; /* AbortCount, 0x120, rev8, SDIO: aborts */ + uint32 datacrcerror; /* DataCrcErrorCount, 0x124, rev8, SDIO: frames w/bad CRC */ + uint32 rdoutofsync; /* RdOutOfSyncCount, 0x128, rev8, SDIO/PCMCIA: Rd Frm OOS */ + uint32 wroutofsync; /* RdOutOfSyncCount, 0x12c, rev8, SDIO/PCMCIA: Wr Frm OOS */ + uint32 writebusy; /* WriteBusyCount, 0x130, rev8, SDIO: dev asserted "busy" */ + uint32 readwait; /* ReadWaitCount, 0x134, rev8, SDIO: read: no data avail */ + uint32 readterm; /* ReadTermCount, 0x138, rev8, SDIO: rd frm terminates */ + uint32 writeterm; /* WriteTermCount, 0x13c, rev8, SDIO: wr frm terminates */ + uint32 PAD[40]; + uint32 clockctlstatus; /* ClockCtlStatus, 0x1e0, rev8 */ + uint32 PAD[1]; + uint32 powerctl; /* 0x1e8 */ + uint32 PAD[5]; + + /* DMA engines */ + volatile union { + pcmdma32_t pcm32; + sdiodma32_t sdiod32; + sdiodma64_t sdiod64; + } dma; + + /* SDIO/PCMCIA CIS region */ + char cis[512]; /* 512 byte CIS, 0x400-0x5ff, rev6 */ + + /* PCMCIA function control registers */ + char pcmciafcr[256]; /* PCMCIA FCR, 0x600-6ff, rev6 */ + uint16 PAD[55]; + + /* PCMCIA backplane access */ + uint16 backplanecsr; /* BackplaneCSR, 0x76E, rev6 */ + uint16 backplaneaddr0; /* BackplaneAddr0, 0x770, rev6 */ + uint16 backplaneaddr1; /* BackplaneAddr1, 0x772, rev6 */ + uint16 backplaneaddr2; /* BackplaneAddr2, 0x774, rev6 */ + uint16 backplaneaddr3; /* BackplaneAddr3, 0x776, rev6 */ + uint16 backplanedata0; /* BackplaneData0, 0x778, rev6 */ + uint16 backplanedata1; /* BackplaneData1, 0x77a, rev6 */ + uint16 backplanedata2; /* BackplaneData2, 0x77c, rev6 */ + uint16 backplanedata3; /* BackplaneData3, 0x77e, rev6 */ + uint16 PAD[31]; + + /* sprom "size" & "blank" info */ + uint16 spromstatus; /* SPROMStatus, 0x7BE, rev2 */ + uint32 PAD[464]; + + /* Sonics SiliconBackplane registers */ + sbconfig_t sbconfig; /* SbConfig Regs, 0xf00-0xfff, rev8 */ +} sdpcmd_regs_t; + +/* corecontrol */ +#define CC_CISRDY (1 << 0) /* CIS Ready */ +#define CC_BPRESEN (1 << 1) /* CCCR RES signal causes backplane reset */ +#define CC_F2RDY (1 << 2) /* set CCCR IOR2 bit */ +#define CC_CLRPADSISO (1 << 3) /* clear SDIO pads isolation bit (rev 11) */ +#define CC_XMTDATAAVAIL_MODE (1 << 4) /* data avail generates an interrupt */ +#define CC_XMTDATAAVAIL_CTRL (1 << 5) /* data avail interrupt ctrl */ + +/* corestatus */ +#define CS_PCMCIAMODE (1 << 0) /* Device Mode; 0=SDIO, 1=PCMCIA */ +#define CS_SMARTDEV (1 << 1) /* 1=smartDev enabled */ +#define CS_F2ENABLED (1 << 2) /* 1=host has enabled the device */ + +#define PCMCIA_MES_PA_MASK 0x7fff /* PCMCIA Message Portal Address Mask */ +#define PCMCIA_MES_PM_MASK 0x7fff /* PCMCIA Message Portal Mask Mask */ +#define PCMCIA_WFBC_MASK 0xffff /* PCMCIA Write Frame Byte Count Mask */ +#define PCMCIA_UT_MASK 0x07ff /* PCMCIA Underflow Timer Mask */ + +/* intstatus */ +#define I_SMB_SW0 (1 << 0) /* To SB Mail S/W interrupt 0 */ +#define I_SMB_SW1 (1 << 1) /* To SB Mail S/W interrupt 1 */ +#define I_SMB_SW2 (1 << 2) /* To SB Mail S/W interrupt 2 */ +#define I_SMB_SW3 (1 << 3) /* To SB Mail S/W interrupt 3 */ +#define I_SMB_SW_MASK 0x0000000f /* To SB Mail S/W interrupts mask */ +#define I_SMB_SW_SHIFT 0 /* To SB Mail S/W interrupts shift */ +#define I_HMB_SW0 (1 << 4) /* To Host Mail S/W interrupt 0 */ +#define I_HMB_SW1 (1 << 5) /* To Host Mail S/W interrupt 1 */ +#define I_HMB_SW2 (1 << 6) /* To Host Mail S/W interrupt 2 */ +#define I_HMB_SW3 (1 << 7) /* To Host Mail S/W interrupt 3 */ +#define I_HMB_SW_MASK 0x000000f0 /* To Host Mail S/W interrupts mask */ +#define I_HMB_SW_SHIFT 4 /* To Host Mail S/W interrupts shift */ +#define I_WR_OOSYNC (1 << 8) /* Write Frame Out Of Sync */ +#define I_RD_OOSYNC (1 << 9) /* Read Frame Out Of Sync */ +#define I_PC (1 << 10) /* descriptor error */ +#define I_PD (1 << 11) /* data error */ +#define I_DE (1 << 12) /* Descriptor protocol Error */ +#define I_RU (1 << 13) /* Receive descriptor Underflow */ +#define I_RO (1 << 14) /* Receive fifo Overflow */ +#define I_XU (1 << 15) /* Transmit fifo Underflow */ +#define I_RI (1 << 16) /* Receive Interrupt */ +#define I_BUSPWR (1 << 17) /* SDIO Bus Power Change (rev 9) */ +#define I_XMTDATA_AVAIL (1 << 23) /* bits in fifo */ +#define I_XI (1 << 24) /* Transmit Interrupt */ +#define I_RF_TERM (1 << 25) /* Read Frame Terminate */ +#define I_WF_TERM (1 << 26) /* Write Frame Terminate */ +#define I_PCMCIA_XU (1 << 27) /* PCMCIA Transmit FIFO Underflow */ +#define I_SBINT (1 << 28) /* sbintstatus Interrupt */ +#define I_CHIPACTIVE (1 << 29) /* chip transitioned from doze to active state */ +#define I_SRESET (1 << 30) /* CCCR RES interrupt */ +#define I_IOE2 (1U << 31) /* CCCR IOE2 Bit Changed */ +#define I_ERRORS (I_PC | I_PD | I_DE | I_RU | I_RO | I_XU) /* DMA Errors */ +#define I_DMA (I_RI | I_XI | I_ERRORS) + +/* sbintstatus */ +#define I_SB_SERR (1 << 8) /* Backplane SError (write) */ +#define I_SB_RESPERR (1 << 9) /* Backplane Response Error (read) */ +#define I_SB_SPROMERR (1 << 10) /* Error accessing the sprom */ + +/* sdioaccess */ +#define SDA_DATA_MASK 0x000000ff /* Read/Write Data Mask */ +#define SDA_ADDR_MASK 0x000fff00 /* Read/Write Address Mask */ +#define SDA_ADDR_SHIFT 8 /* Read/Write Address Shift */ +#define SDA_WRITE 0x01000000 /* Write bit */ +#define SDA_READ 0x00000000 /* Write bit cleared for Read */ +#define SDA_BUSY 0x80000000 /* Busy bit */ + +/* sdioaccess-accessible register address spaces */ +#define SDA_CCCR_SPACE 0x000 /* sdioAccess CCCR register space */ +#define SDA_F1_FBR_SPACE 0x100 /* sdioAccess F1 FBR register space */ +#define SDA_F2_FBR_SPACE 0x200 /* sdioAccess F2 FBR register space */ +#define SDA_F1_REG_SPACE 0x300 /* sdioAccess F1 core-specific register space */ +#define SDA_F3_FBR_SPACE 0x400 /* sdioAccess F3 FBR register space */ + +/* SDA_F1_REG_SPACE sdioaccess-accessible F1 reg space register offsets */ +#define SDA_CHIPCONTROLDATA 0x006 /* ChipControlData */ +#define SDA_CHIPCONTROLENAB 0x007 /* ChipControlEnable */ +#define SDA_F2WATERMARK 0x008 /* Function 2 Watermark */ +#define SDA_DEVICECONTROL 0x009 /* DeviceControl */ +#define SDA_SBADDRLOW 0x00a /* SbAddrLow */ +#define SDA_SBADDRMID 0x00b /* SbAddrMid */ +#define SDA_SBADDRHIGH 0x00c /* SbAddrHigh */ +#define SDA_FRAMECTRL 0x00d /* FrameCtrl */ +#define SDA_CHIPCLOCKCSR 0x00e /* ChipClockCSR */ +#define SDA_SDIOPULLUP 0x00f /* SdioPullUp */ +#define SDA_SDIOWRFRAMEBCLOW 0x019 /* SdioWrFrameBCLow */ +#define SDA_SDIOWRFRAMEBCHIGH 0x01a /* SdioWrFrameBCHigh */ +#define SDA_SDIORDFRAMEBCLOW 0x01b /* SdioRdFrameBCLow */ +#define SDA_SDIORDFRAMEBCHIGH 0x01c /* SdioRdFrameBCHigh */ +#define SDA_MESBUSYCNTRL 0x01d /* mesBusyCntrl */ +#define SDA_WAKEUPCTRL 0x01e /* WakeupCtrl */ +#define SDA_SLEEPCSR 0x01f /* sleepCSR */ + +/* SDA_F1_REG_SPACE register bits */ +/* sleepCSR register */ +#define SDA_SLEEPCSR_KEEP_SDIO_ON 0x1 + +/* SDA_F2WATERMARK */ +#define SDA_F2WATERMARK_MASK 0x7f /* F2Watermark Mask */ + +/* SDA_SBADDRLOW */ +#define SDA_SBADDRLOW_MASK 0x80 /* SbAddrLow Mask */ + +/* SDA_SBADDRMID */ +#define SDA_SBADDRMID_MASK 0xff /* SbAddrMid Mask */ + +/* SDA_SBADDRHIGH */ +#define SDA_SBADDRHIGH_MASK 0xff /* SbAddrHigh Mask */ + +/* SDA_FRAMECTRL */ +#define SFC_RF_TERM (1 << 0) /* Read Frame Terminate */ +#define SFC_WF_TERM (1 << 1) /* Write Frame Terminate */ +#define SFC_CRC4WOOS (1 << 2) /* HW reports CRC error for write out of sync */ +#define SFC_ABORTALL (1 << 3) /* Abort cancels all in-progress frames */ + +/* pcmciaframectrl */ +#define PFC_RF_TERM (1 << 0) /* Read Frame Terminate */ +#define PFC_WF_TERM (1 << 1) /* Write Frame Terminate */ + +/* intrcvlazy */ +#define IRL_TO_MASK 0x00ffffff /* timeout */ +#define IRL_FC_MASK 0xff000000 /* frame count */ +#define IRL_FC_SHIFT 24 /* frame count */ + +/* rx header */ +typedef volatile struct { + uint16 len; + uint16 flags; +} sdpcmd_rxh_t; + +/* rx header flags */ +#define RXF_CRC 0x0001 /* CRC error detected */ +#define RXF_WOOS 0x0002 /* write frame out of sync */ +#define RXF_WF_TERM 0x0004 /* write frame terminated */ +#define RXF_ABORT 0x0008 /* write frame aborted */ +#define RXF_DISCARD (RXF_CRC | RXF_WOOS | RXF_WF_TERM | RXF_ABORT) /* bad frame */ + +/* HW frame tag */ +#define SDPCM_FRAMETAG_LEN 4 /* HW frametag: 2 bytes len, 2 bytes check val */ + +#define SDPCM_HWEXT_LEN 8 + +#endif /* _sbsdpcmdev_h_ */ diff --git a/bcmdhd.100.10.315.x/include/sbsocram.h b/bcmdhd.100.10.315.x/include/sbsocram.h new file mode 100644 index 0000000..c78479e --- /dev/null +++ b/bcmdhd.100.10.315.x/include/sbsocram.h @@ -0,0 +1,204 @@ +/* + * BCM47XX Sonics SiliconBackplane embedded ram core + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: sbsocram.h 619629 2016-02-17 18:37:56Z $ + */ + +#ifndef _SBSOCRAM_H +#define _SBSOCRAM_H + +#ifndef _LANGUAGE_ASSEMBLY + +/* cpp contortions to concatenate w/arg prescan */ +#ifndef PAD +#define _PADLINE(line) pad ## line +#define _XSTR(line) _PADLINE(line) +#define PAD _XSTR(__LINE__) +#endif /* PAD */ + +/* Memcsocram core registers */ +typedef volatile struct sbsocramregs { + uint32 coreinfo; + uint32 bwalloc; + uint32 extracoreinfo; + uint32 biststat; + uint32 bankidx; + uint32 standbyctrl; + + uint32 errlogstatus; /* rev 6 */ + uint32 errlogaddr; /* rev 6 */ + /* used for patching rev 3 & 5 */ + uint32 cambankidx; + uint32 cambankstandbyctrl; + uint32 cambankpatchctrl; + uint32 cambankpatchtblbaseaddr; + uint32 cambankcmdreg; + uint32 cambankdatareg; + uint32 cambankmaskreg; + uint32 PAD[1]; + uint32 bankinfo; /* corev 8 */ + uint32 bankpda; + uint32 PAD[14]; + uint32 extmemconfig; + uint32 extmemparitycsr; + uint32 extmemparityerrdata; + uint32 extmemparityerrcnt; + uint32 extmemwrctrlandsize; + uint32 PAD[84]; + uint32 workaround; + uint32 pwrctl; /* corerev >= 2 */ + uint32 PAD[133]; + uint32 sr_control; /* corerev >= 15 */ + uint32 sr_status; /* corerev >= 15 */ + uint32 sr_address; /* corerev >= 15 */ + uint32 sr_data; /* corerev >= 15 */ +} sbsocramregs_t; + +#endif /* _LANGUAGE_ASSEMBLY */ + +/* Register offsets */ +#define SR_COREINFO 0x00 +#define SR_BWALLOC 0x04 +#define SR_BISTSTAT 0x0c +#define SR_BANKINDEX 0x10 +#define SR_BANKSTBYCTL 0x14 +#define SR_PWRCTL 0x1e8 + +/* Coreinfo register */ +#define SRCI_PT_MASK 0x00070000 /* corerev >= 6; port type[18:16] */ +#define SRCI_PT_SHIFT 16 +/* port types : SRCI_PT__ */ +#define SRCI_PT_OCP_OCP 0 +#define SRCI_PT_AXI_OCP 1 +#define SRCI_PT_ARM7AHB_OCP 2 +#define SRCI_PT_CM3AHB_OCP 3 +#define SRCI_PT_AXI_AXI 4 +#define SRCI_PT_AHB_AXI 5 +/* corerev >= 3 */ +#define SRCI_LSS_MASK 0x00f00000 +#define SRCI_LSS_SHIFT 20 +#define SRCI_LRS_MASK 0x0f000000 +#define SRCI_LRS_SHIFT 24 + +/* In corerev 0, the memory size is 2 to the power of the + * base plus 16 plus to the contents of the memsize field plus 1. + */ +#define SRCI_MS0_MASK 0xf +#define SR_MS0_BASE 16 + +/* + * In corerev 1 the bank size is 2 ^ the bank size field plus 14, + * the memory size is number of banks times bank size. + * The same applies to rom size. + */ +#define SRCI_ROMNB_MASK 0xf000 +#define SRCI_ROMNB_SHIFT 12 +#define SRCI_ROMBSZ_MASK 0xf00 +#define SRCI_ROMBSZ_SHIFT 8 +#define SRCI_SRNB_MASK 0xf0 +#define SRCI_SRNB_SHIFT 4 +#define SRCI_SRBSZ_MASK 0xf +#define SRCI_SRBSZ_SHIFT 0 + +#define SRCI_SRNB_MASK_EXT 0x100 + +#define SR_BSZ_BASE 14 + +/* Standby control register */ +#define SRSC_SBYOVR_MASK 0x80000000 +#define SRSC_SBYOVR_SHIFT 31 +#define SRSC_SBYOVRVAL_MASK 0x60000000 +#define SRSC_SBYOVRVAL_SHIFT 29 +#define SRSC_SBYEN_MASK 0x01000000 /* rev >= 3 */ +#define SRSC_SBYEN_SHIFT 24 + +/* Power control register */ +#define SRPC_PMU_STBYDIS_MASK 0x00000010 /* rev >= 3 */ +#define SRPC_PMU_STBYDIS_SHIFT 4 +#define SRPC_STBYOVRVAL_MASK 0x00000008 +#define SRPC_STBYOVRVAL_SHIFT 3 +#define SRPC_STBYOVR_MASK 0x00000007 +#define SRPC_STBYOVR_SHIFT 0 + +/* Extra core capability register */ +#define SRECC_NUM_BANKS_MASK 0x000000F0 +#define SRECC_NUM_BANKS_SHIFT 4 +#define SRECC_BANKSIZE_MASK 0x0000000F +#define SRECC_BANKSIZE_SHIFT 0 + +#define SRECC_BANKSIZE(value) (1 << (value)) + +/* CAM bank patch control */ +#define SRCBPC_PATCHENABLE 0x80000000 + +#define SRP_ADDRESS 0x0001FFFC +#define SRP_VALID 0x8000 + +/* CAM bank command reg */ +#define SRCMD_WRITE 0x00020000 +#define SRCMD_READ 0x00010000 +#define SRCMD_DONE 0x80000000 + +#define SRCMD_DONE_DLY 1000 + +/* bankidx and bankinfo reg defines corerev >= 8 */ +#define SOCRAM_BANKINFO_SZMASK 0x7f +#define SOCRAM_BANKIDX_ROM_MASK 0x100 + +#define SOCRAM_BANKIDX_MEMTYPE_SHIFT 8 +/* socram bankinfo memtype */ +#define SOCRAM_MEMTYPE_RAM 0 +#define SOCRAM_MEMTYPE_ROM 1 +#define SOCRAM_MEMTYPE_DEVRAM 2 + +#define SOCRAM_BANKINFO_REG 0x40 +#define SOCRAM_BANKIDX_REG 0x10 +#define SOCRAM_BANKINFO_STDBY_MASK 0x400 +#define SOCRAM_BANKINFO_STDBY_TIMER 0x800 + +/* bankinfo rev >= 10 */ +#define SOCRAM_BANKINFO_DEVRAMSEL_SHIFT 13 +#define SOCRAM_BANKINFO_DEVRAMSEL_MASK 0x2000 +#define SOCRAM_BANKINFO_DEVRAMPRO_SHIFT 14 +#define SOCRAM_BANKINFO_DEVRAMPRO_MASK 0x4000 +#define SOCRAM_BANKINFO_SLPSUPP_SHIFT 15 +#define SOCRAM_BANKINFO_SLPSUPP_MASK 0x8000 +#define SOCRAM_BANKINFO_RETNTRAM_SHIFT 16 +#define SOCRAM_BANKINFO_RETNTRAM_MASK 0x00010000 +#define SOCRAM_BANKINFO_PDASZ_SHIFT 17 +#define SOCRAM_BANKINFO_PDASZ_MASK 0x003E0000 +#define SOCRAM_BANKINFO_DEVRAMREMAP_SHIFT 24 +#define SOCRAM_BANKINFO_DEVRAMREMAP_MASK 0x01000000 + +/* extracoreinfo register */ +#define SOCRAM_DEVRAMBANK_MASK 0xF000 +#define SOCRAM_DEVRAMBANK_SHIFT 12 + +/* bank info to calculate bank size */ +#define SOCRAM_BANKINFO_SZBASE 8192 +#define SOCRAM_BANKSIZE_SHIFT 13 /* SOCRAM_BANKINFO_SZBASE */ + +#endif /* _SBSOCRAM_H */ diff --git a/bcmdhd.100.10.315.x/include/sbsysmem.h b/bcmdhd.100.10.315.x/include/sbsysmem.h new file mode 100644 index 0000000..7c5ad99 --- /dev/null +++ b/bcmdhd.100.10.315.x/include/sbsysmem.h @@ -0,0 +1,180 @@ +/* + * SiliconBackplane System Memory core + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: sbsysmem.h 563229 2015-06-12 04:50:06Z $ + */ + +#ifndef _SBSYSMEM_H +#define _SBSYSMEM_H + +#ifndef _LANGUAGE_ASSEMBLY + +/* cpp contortions to concatenate w/arg prescan */ +#ifndef PAD +#define _PADLINE(line) pad ## line +#define _XSTR(line) _PADLINE(line) +#define PAD _XSTR(__LINE__) +#endif /* PAD */ + +/* sysmem core registers */ +typedef volatile struct sysmemregs { + uint32 coreinfo; + uint32 bwalloc; + uint32 extracoreinfo; + uint32 biststat; + uint32 bankidx; + uint32 standbyctrl; + + uint32 errlogstatus; + uint32 errlogaddr; + + uint32 cambankidx; + uint32 cambankstandbyctrl; + uint32 cambankpatchctrl; + uint32 cambankpatchtblbaseaddr; + uint32 cambankcmdreg; + uint32 cambankdatareg; + uint32 cambankmaskreg; + uint32 PAD[1]; + uint32 bankinfo; + uint32 PAD[15]; + uint32 extmemconfig; + uint32 extmemparitycsr; + uint32 extmemparityerrdata; + uint32 extmemparityerrcnt; + uint32 extmemwrctrlandsize; + uint32 PAD[84]; + uint32 workaround; + uint32 pwrctl; + uint32 PAD[133]; + uint32 sr_control; + uint32 sr_status; + uint32 sr_address; + uint32 sr_data; +} sysmemregs_t; + +#endif /* _LANGUAGE_ASSEMBLY */ + +/* Register offsets */ +#define SR_COREINFO 0x00 +#define SR_BWALLOC 0x04 +#define SR_BISTSTAT 0x0c +#define SR_BANKINDEX 0x10 +#define SR_BANKSTBYCTL 0x14 +#define SR_PWRCTL 0x1e8 + +/* Coreinfo register */ +#define SRCI_PT_MASK 0x00070000 /* port type[18:16] */ +#define SRCI_PT_SHIFT 16 +/* port types : SRCI_PT__ */ +#define SRCI_PT_OCP_OCP 0 +#define SRCI_PT_AXI_OCP 1 +#define SRCI_PT_ARM7AHB_OCP 2 +#define SRCI_PT_CM3AHB_OCP 3 +#define SRCI_PT_AXI_AXI 4 +#define SRCI_PT_AHB_AXI 5 + +#define SRCI_LSS_MASK 0x00f00000 +#define SRCI_LSS_SHIFT 20 +#define SRCI_LRS_MASK 0x0f000000 +#define SRCI_LRS_SHIFT 24 + +/* In corerev 0, the memory size is 2 to the power of the + * base plus 16 plus to the contents of the memsize field plus 1. + */ +#define SRCI_MS0_MASK 0xf +#define SR_MS0_BASE 16 + +/* + * In corerev 1 the bank size is 2 ^ the bank size field plus 14, + * the memory size is number of banks times bank size. + * The same applies to rom size. + */ +#define SYSMEM_SRCI_ROMNB_MASK 0x3e0 +#define SYSMEM_SRCI_ROMNB_SHIFT 5 +#define SYSMEM_SRCI_SRNB_MASK 0x1f +#define SYSMEM_SRCI_SRNB_SHIFT 0 + +/* Standby control register */ +#define SRSC_SBYOVR_MASK 0x80000000 +#define SRSC_SBYOVR_SHIFT 31 +#define SRSC_SBYOVRVAL_MASK 0x60000000 +#define SRSC_SBYOVRVAL_SHIFT 29 +#define SRSC_SBYEN_MASK 0x01000000 +#define SRSC_SBYEN_SHIFT 24 + +/* Power control register */ +#define SRPC_PMU_STBYDIS_MASK 0x00000010 +#define SRPC_PMU_STBYDIS_SHIFT 4 +#define SRPC_STBYOVRVAL_MASK 0x00000008 +#define SRPC_STBYOVRVAL_SHIFT 3 +#define SRPC_STBYOVR_MASK 0x00000007 +#define SRPC_STBYOVR_SHIFT 0 + +/* Extra core capability register */ +#define SRECC_NUM_BANKS_MASK 0x000000F0 +#define SRECC_NUM_BANKS_SHIFT 4 +#define SRECC_BANKSIZE_MASK 0x0000000F +#define SRECC_BANKSIZE_SHIFT 0 + +#define SRECC_BANKSIZE(value) (1 << (value)) + +/* CAM bank patch control */ +#define SRCBPC_PATCHENABLE 0x80000000 + +#define SRP_ADDRESS 0x0001FFFC +#define SRP_VALID 0x8000 + +/* CAM bank command reg */ +#define SRCMD_WRITE 0x00020000 +#define SRCMD_READ 0x00010000 +#define SRCMD_DONE 0x80000000 + +#define SRCMD_DONE_DLY 1000 + +/* bankidx and bankinfo reg defines */ +#define SYSMEM_BANKINFO_SZMASK 0x7f +#define SYSMEM_BANKIDX_ROM_MASK 0x80 + +#define SYSMEM_BANKINFO_REG 0x40 +#define SYSMEM_BANKIDX_REG 0x10 +#define SYSMEM_BANKINFO_STDBY_MASK 0x200 +#define SYSMEM_BANKINFO_STDBY_TIMER 0x400 + +#define SYSMEM_BANKINFO_SLPSUPP_SHIFT 14 +#define SYSMEM_BANKINFO_SLPSUPP_MASK 0x4000 +#define SYSMEM_BANKINFO_PDASZ_SHIFT 16 +#define SYSMEM_BANKINFO_PDASZ_MASK 0x001F0000 + +/* extracoreinfo register */ +#define SYSMEM_DEVRAMBANK_MASK 0xF000 +#define SYSMEM_DEVRAMBANK_SHIFT 12 + +/* bank info to calculate bank size */ +#define SYSMEM_BANKINFO_SZBASE 8192 +#define SYSMEM_BANKSIZE_SHIFT 13 /* SYSMEM_BANKINFO_SZBASE */ + +#endif /* _SBSYSMEM_H */ diff --git a/bcmdhd.100.10.315.x/include/sdio.h b/bcmdhd.100.10.315.x/include/sdio.h new file mode 100644 index 0000000..d26c45b --- /dev/null +++ b/bcmdhd.100.10.315.x/include/sdio.h @@ -0,0 +1,625 @@ +/* + * SDIO spec header file + * Protocol and standard (common) device definitions + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: sdio.h 689948 2017-03-14 05:21:03Z $ + */ + +#ifndef _SDIO_H +#define _SDIO_H + +#ifdef BCMSDIO + +/* CCCR structure for function 0 */ +typedef volatile struct { + uint8 cccr_sdio_rev; /* RO, cccr and sdio revision */ + uint8 sd_rev; /* RO, sd spec revision */ + uint8 io_en; /* I/O enable */ + uint8 io_rdy; /* I/O ready reg */ + uint8 intr_ctl; /* Master and per function interrupt enable control */ + uint8 intr_status; /* RO, interrupt pending status */ + uint8 io_abort; /* read/write abort or reset all functions */ + uint8 bus_inter; /* bus interface control */ + uint8 capability; /* RO, card capability */ + + uint8 cis_base_low; /* 0x9 RO, common CIS base address, LSB */ + uint8 cis_base_mid; + uint8 cis_base_high; /* 0xB RO, common CIS base address, MSB */ + + /* suspend/resume registers */ + uint8 bus_suspend; /* 0xC */ + uint8 func_select; /* 0xD */ + uint8 exec_flag; /* 0xE */ + uint8 ready_flag; /* 0xF */ + + uint8 fn0_blk_size[2]; /* 0x10(LSB), 0x11(MSB) */ + + uint8 power_control; /* 0x12 (SDIO version 1.10) */ + + uint8 speed_control; /* 0x13 */ +} sdio_regs_t; + +/* SDIO Device CCCR offsets */ +#define SDIOD_CCCR_REV 0x00 +#define SDIOD_CCCR_SDREV 0x01 +#define SDIOD_CCCR_IOEN 0x02 +#define SDIOD_CCCR_IORDY 0x03 +#define SDIOD_CCCR_INTEN 0x04 +#define SDIOD_CCCR_INTPEND 0x05 +#define SDIOD_CCCR_IOABORT 0x06 +#define SDIOD_CCCR_BICTRL 0x07 +#define SDIOD_CCCR_CAPABLITIES 0x08 +#define SDIOD_CCCR_CISPTR_0 0x09 +#define SDIOD_CCCR_CISPTR_1 0x0A +#define SDIOD_CCCR_CISPTR_2 0x0B +#define SDIOD_CCCR_BUSSUSP 0x0C +#define SDIOD_CCCR_FUNCSEL 0x0D +#define SDIOD_CCCR_EXECFLAGS 0x0E +#define SDIOD_CCCR_RDYFLAGS 0x0F +#define SDIOD_CCCR_BLKSIZE_0 0x10 +#define SDIOD_CCCR_BLKSIZE_1 0x11 +#define SDIOD_CCCR_POWER_CONTROL 0x12 +#define SDIOD_CCCR_SPEED_CONTROL 0x13 +#define SDIOD_CCCR_UHSI_SUPPORT 0x14 +#define SDIOD_CCCR_DRIVER_STRENGTH 0x15 +#define SDIOD_CCCR_INTR_EXTN 0x16 + +/* Broadcom extensions (corerev >= 1) */ +#define SDIOD_CCCR_BRCM_CARDCAP 0xf0 +#define SDIOD_CCCR_BRCM_CARDCAP_CMD14_SUPPORT 0x02 +#define SDIOD_CCCR_BRCM_CARDCAP_CMD14_EXT 0x04 +#define SDIOD_CCCR_BRCM_CARDCAP_CMD_NODEC 0x08 +#define SDIOD_CCCR_BRCM_CARDCTL 0xf1 +#define SDIOD_CCCR_BRCM_SEPINT 0xf2 + +/* cccr_sdio_rev */ +#define SDIO_REV_SDIOID_MASK 0xf0 /* SDIO spec revision number */ +#define SDIO_REV_CCCRID_MASK 0x0f /* CCCR format version number */ +#define SDIO_SPEC_VERSION_3_0 0x40 /* SDIO spec version 3.0 */ + +/* sd_rev */ +#define SD_REV_PHY_MASK 0x0f /* SD format version number */ + +/* io_en */ +#define SDIO_FUNC_ENABLE_1 0x02 /* function 1 I/O enable */ +#define SDIO_FUNC_ENABLE_2 0x04 /* function 2 I/O enable */ +#if defined(BT_OVER_SDIO) +#define SDIO_FUNC_ENABLE_3 0x08 /* function 2 I/O enable */ +#define SDIO_FUNC_DISABLE_3 0xF0 /* function 2 I/O enable */ +#endif /* defined (BT_OVER_SDIO) */ + +/* io_rdys */ +#define SDIO_FUNC_READY_1 0x02 /* function 1 I/O ready */ +#define SDIO_FUNC_READY_2 0x04 /* function 2 I/O ready */ + +/* intr_ctl */ +#define INTR_CTL_MASTER_EN 0x1 /* interrupt enable master */ +#define INTR_CTL_FUNC1_EN 0x2 /* interrupt enable for function 1 */ +#define INTR_CTL_FUNC2_EN 0x4 /* interrupt enable for function 2 */ +#if defined(BT_OVER_SDIO) +#define INTR_CTL_FUNC3_EN 0x8 /* interrupt enable for function 3 */ +#endif /* defined (BT_OVER_SDIO) */ +/* intr_status */ +#define INTR_STATUS_FUNC1 0x2 /* interrupt pending for function 1 */ +#define INTR_STATUS_FUNC2 0x4 /* interrupt pending for function 2 */ + +/* io_abort */ +#define IO_ABORT_RESET_ALL 0x08 /* I/O card reset */ +#define IO_ABORT_FUNC_MASK 0x07 /* abort selction: function x */ + +/* bus_inter */ +#define BUS_CARD_DETECT_DIS 0x80 /* Card Detect disable */ +#define BUS_SPI_CONT_INTR_CAP 0x40 /* support continuous SPI interrupt */ +#define BUS_SPI_CONT_INTR_EN 0x20 /* continuous SPI interrupt enable */ +#define BUS_SD_DATA_WIDTH_MASK 0x03 /* bus width mask */ +#define BUS_SD_DATA_WIDTH_4BIT 0x02 /* bus width 4-bit mode */ +#define BUS_SD_DATA_WIDTH_1BIT 0x00 /* bus width 1-bit mode */ + +/* capability */ +#define SDIO_CAP_4BLS 0x80 /* 4-bit support for low speed card */ +#define SDIO_CAP_LSC 0x40 /* low speed card */ +#define SDIO_CAP_E4MI 0x20 /* enable interrupt between block of data in 4-bit mode */ +#define SDIO_CAP_S4MI 0x10 /* support interrupt between block of data in 4-bit mode */ +#define SDIO_CAP_SBS 0x08 /* support suspend/resume */ +#define SDIO_CAP_SRW 0x04 /* support read wait */ +#define SDIO_CAP_SMB 0x02 /* support multi-block transfer */ +#define SDIO_CAP_SDC 0x01 /* Support Direct commands during multi-byte transfer */ + +/* power_control */ +#define SDIO_POWER_SMPC 0x01 /* supports master power control (RO) */ +#define SDIO_POWER_EMPC 0x02 /* enable master power control (allow > 200mA) (RW) */ + +/* speed_control (control device entry into high-speed clocking mode) */ +#define SDIO_SPEED_SHS 0x01 /* supports high-speed [clocking] mode (RO) */ +#define SDIO_SPEED_EHS 0x02 /* enable high-speed [clocking] mode (RW) */ +#define SDIO_SPEED_UHSI_DDR50 0x08 + +/* for setting bus speed in card: 0x13h */ +#define SDIO_BUS_SPEED_UHSISEL_M BITFIELD_MASK(3) +#define SDIO_BUS_SPEED_UHSISEL_S 1 + +/* for getting bus speed cap in card: 0x14h */ +#define SDIO_BUS_SPEED_UHSICAP_M BITFIELD_MASK(3) +#define SDIO_BUS_SPEED_UHSICAP_S 0 + +/* for getting driver type CAP in card: 0x15h */ +#define SDIO_BUS_DRVR_TYPE_CAP_M BITFIELD_MASK(3) +#define SDIO_BUS_DRVR_TYPE_CAP_S 0 + +/* for setting driver type selection in card: 0x15h */ +#define SDIO_BUS_DRVR_TYPE_SEL_M BITFIELD_MASK(2) +#define SDIO_BUS_DRVR_TYPE_SEL_S 4 + +/* for getting async int support in card: 0x16h */ +#define SDIO_BUS_ASYNCINT_CAP_M BITFIELD_MASK(1) +#define SDIO_BUS_ASYNCINT_CAP_S 0 + +/* for setting async int selection in card: 0x16h */ +#define SDIO_BUS_ASYNCINT_SEL_M BITFIELD_MASK(1) +#define SDIO_BUS_ASYNCINT_SEL_S 1 + +/* brcm sepint */ +#define SDIO_SEPINT_MASK 0x01 /* route sdpcmdev intr onto separate pad (chip-specific) */ +#define SDIO_SEPINT_OE 0x02 /* 1 asserts output enable for above pad */ +#define SDIO_SEPINT_ACT_HI 0x04 /* use active high interrupt level instead of active low */ + +/* FBR structure for function 1-7, FBR addresses and register offsets */ +typedef volatile struct { + uint8 devctr; /* device interface, CSA control */ + uint8 ext_dev; /* extended standard I/O device type code */ + uint8 pwr_sel; /* power selection support */ + uint8 PAD[6]; /* reserved */ + + uint8 cis_low; /* CIS LSB */ + uint8 cis_mid; + uint8 cis_high; /* CIS MSB */ + uint8 csa_low; /* code storage area, LSB */ + uint8 csa_mid; + uint8 csa_high; /* code storage area, MSB */ + uint8 csa_dat_win; /* data access window to function */ + + uint8 fnx_blk_size[2]; /* block size, little endian */ +} sdio_fbr_t; + +/* Maximum number of I/O funcs */ +#define SDIOD_MAX_FUNCS 8 +#define SDIOD_MAX_IOFUNCS 7 + +/* SDIO Device FBR Start Address */ +#define SDIOD_FBR_STARTADDR 0x100 + +/* SDIO Device FBR Size */ +#define SDIOD_FBR_SIZE 0x100 + +/* Macro to calculate FBR register base */ +#define SDIOD_FBR_BASE(n) ((n) * 0x100) + +/* Function register offsets */ +#define SDIOD_FBR_DEVCTR 0x00 /* basic info for function */ +#define SDIOD_FBR_EXT_DEV 0x01 /* extended I/O device code */ +#define SDIOD_FBR_PWR_SEL 0x02 /* power selection bits */ + +/* SDIO Function CIS ptr offset */ +#define SDIOD_FBR_CISPTR_0 0x09 +#define SDIOD_FBR_CISPTR_1 0x0A +#define SDIOD_FBR_CISPTR_2 0x0B + +/* Code Storage Area pointer */ +#define SDIOD_FBR_CSA_ADDR_0 0x0C +#define SDIOD_FBR_CSA_ADDR_1 0x0D +#define SDIOD_FBR_CSA_ADDR_2 0x0E +#define SDIOD_FBR_CSA_DATA 0x0F + +/* SDIO Function I/O Block Size */ +#define SDIOD_FBR_BLKSIZE_0 0x10 +#define SDIOD_FBR_BLKSIZE_1 0x11 + +/* devctr */ +#define SDIOD_FBR_DEVCTR_DIC 0x0f /* device interface code */ +#define SDIOD_FBR_DECVTR_CSA 0x40 /* CSA support flag */ +#define SDIOD_FBR_DEVCTR_CSA_EN 0x80 /* CSA enabled */ +/* interface codes */ +#define SDIOD_DIC_NONE 0 /* SDIO standard interface is not supported */ +#define SDIOD_DIC_UART 1 +#define SDIOD_DIC_BLUETOOTH_A 2 +#define SDIOD_DIC_BLUETOOTH_B 3 +#define SDIOD_DIC_GPS 4 +#define SDIOD_DIC_CAMERA 5 +#define SDIOD_DIC_PHS 6 +#define SDIOD_DIC_WLAN 7 +#define SDIOD_DIC_EXT 0xf /* extended device interface, read ext_dev register */ + +/* pwr_sel */ +#define SDIOD_PWR_SEL_SPS 0x01 /* supports power selection */ +#define SDIOD_PWR_SEL_EPS 0x02 /* enable power selection (low-current mode) */ + +/* misc defines */ +#define SDIO_FUNC_0 0 +#define SDIO_FUNC_1 1 +#define SDIO_FUNC_2 2 +#define SDIO_FUNC_4 4 +#define SDIO_FUNC_5 5 +#define SDIO_FUNC_6 6 +#define SDIO_FUNC_7 7 + +#define SD_CARD_TYPE_UNKNOWN 0 /* bad type or unrecognized */ +#define SD_CARD_TYPE_IO 1 /* IO only card */ +#define SD_CARD_TYPE_MEMORY 2 /* memory only card */ +#define SD_CARD_TYPE_COMBO 3 /* IO and memory combo card */ + +#define SDIO_MAX_BLOCK_SIZE 2048 /* maximum block size for block mode operation */ +#define SDIO_MIN_BLOCK_SIZE 1 /* minimum block size for block mode operation */ + +/* Card registers: status bit position */ +#define CARDREG_STATUS_BIT_OUTOFRANGE 31 +#define CARDREG_STATUS_BIT_COMCRCERROR 23 +#define CARDREG_STATUS_BIT_ILLEGALCOMMAND 22 +#define CARDREG_STATUS_BIT_ERROR 19 +#define CARDREG_STATUS_BIT_IOCURRENTSTATE3 12 +#define CARDREG_STATUS_BIT_IOCURRENTSTATE2 11 +#define CARDREG_STATUS_BIT_IOCURRENTSTATE1 10 +#define CARDREG_STATUS_BIT_IOCURRENTSTATE0 9 +#define CARDREG_STATUS_BIT_FUN_NUM_ERROR 4 + +#define SD_CMD_GO_IDLE_STATE 0 /* mandatory for SDIO */ +#define SD_CMD_SEND_OPCOND 1 +#define SD_CMD_MMC_SET_RCA 3 +#define SD_CMD_IO_SEND_OP_COND 5 /* mandatory for SDIO */ +#define SD_CMD_SELECT_DESELECT_CARD 7 +#define SD_CMD_SEND_CSD 9 +#define SD_CMD_SEND_CID 10 +#define SD_CMD_STOP_TRANSMISSION 12 +#define SD_CMD_SEND_STATUS 13 +#define SD_CMD_GO_INACTIVE_STATE 15 +#define SD_CMD_SET_BLOCKLEN 16 +#define SD_CMD_READ_SINGLE_BLOCK 17 +#define SD_CMD_READ_MULTIPLE_BLOCK 18 +#define SD_CMD_WRITE_BLOCK 24 +#define SD_CMD_WRITE_MULTIPLE_BLOCK 25 +#define SD_CMD_PROGRAM_CSD 27 +#define SD_CMD_SET_WRITE_PROT 28 +#define SD_CMD_CLR_WRITE_PROT 29 +#define SD_CMD_SEND_WRITE_PROT 30 +#define SD_CMD_ERASE_WR_BLK_START 32 +#define SD_CMD_ERASE_WR_BLK_END 33 +#define SD_CMD_ERASE 38 +#define SD_CMD_LOCK_UNLOCK 42 +#define SD_CMD_IO_RW_DIRECT 52 /* mandatory for SDIO */ +#define SD_CMD_IO_RW_EXTENDED 53 /* mandatory for SDIO */ +#define SD_CMD_APP_CMD 55 +#define SD_CMD_GEN_CMD 56 +#define SD_CMD_READ_OCR 58 +#define SD_CMD_CRC_ON_OFF 59 /* mandatory for SDIO */ +#define SD_ACMD_SD_STATUS 13 +#define SD_ACMD_SEND_NUM_WR_BLOCKS 22 +#define SD_ACMD_SET_WR_BLOCK_ERASE_CNT 23 +#define SD_ACMD_SD_SEND_OP_COND 41 +#define SD_ACMD_SET_CLR_CARD_DETECT 42 +#define SD_ACMD_SEND_SCR 51 + +/* argument for SD_CMD_IO_RW_DIRECT and SD_CMD_IO_RW_EXTENDED */ +#define SD_IO_OP_READ 0 /* Read_Write: Read */ +#define SD_IO_OP_WRITE 1 /* Read_Write: Write */ +#define SD_IO_RW_NORMAL 0 /* no RAW */ +#define SD_IO_RW_RAW 1 /* RAW */ +#define SD_IO_BYTE_MODE 0 /* Byte Mode */ +#define SD_IO_BLOCK_MODE 1 /* BlockMode */ +#define SD_IO_FIXED_ADDRESS 0 /* fix Address */ +#define SD_IO_INCREMENT_ADDRESS 1 /* IncrementAddress */ + +/* build SD_CMD_IO_RW_DIRECT Argument */ +#define SDIO_IO_RW_DIRECT_ARG(rw, raw, func, addr, data) \ + ((((rw) & 1) << 31) | (((func) & 0x7) << 28) | (((raw) & 1) << 27) | \ + (((addr) & 0x1FFFF) << 9) | ((data) & 0xFF)) + +/* build SD_CMD_IO_RW_EXTENDED Argument */ +#define SDIO_IO_RW_EXTENDED_ARG(rw, blk, func, addr, inc_addr, count) \ + ((((rw) & 1) << 31) | (((func) & 0x7) << 28) | (((blk) & 1) << 27) | \ + (((inc_addr) & 1) << 26) | (((addr) & 0x1FFFF) << 9) | ((count) & 0x1FF)) + +/* SDIO response parameters */ +#define SD_RSP_NO_NONE 0 +#define SD_RSP_NO_1 1 +#define SD_RSP_NO_2 2 +#define SD_RSP_NO_3 3 +#define SD_RSP_NO_4 4 +#define SD_RSP_NO_5 5 +#define SD_RSP_NO_6 6 + + /* Modified R6 response (to CMD3) */ +#define SD_RSP_MR6_COM_CRC_ERROR 0x8000 +#define SD_RSP_MR6_ILLEGAL_COMMAND 0x4000 +#define SD_RSP_MR6_ERROR 0x2000 + + /* Modified R1 in R4 Response (to CMD5) */ +#define SD_RSP_MR1_SBIT 0x80 +#define SD_RSP_MR1_PARAMETER_ERROR 0x40 +#define SD_RSP_MR1_RFU5 0x20 +#define SD_RSP_MR1_FUNC_NUM_ERROR 0x10 +#define SD_RSP_MR1_COM_CRC_ERROR 0x08 +#define SD_RSP_MR1_ILLEGAL_COMMAND 0x04 +#define SD_RSP_MR1_RFU1 0x02 +#define SD_RSP_MR1_IDLE_STATE 0x01 + + /* R5 response (to CMD52 and CMD53) */ +#define SD_RSP_R5_COM_CRC_ERROR 0x80 +#define SD_RSP_R5_ILLEGAL_COMMAND 0x40 +#define SD_RSP_R5_IO_CURRENTSTATE1 0x20 +#define SD_RSP_R5_IO_CURRENTSTATE0 0x10 +#define SD_RSP_R5_ERROR 0x08 +#define SD_RSP_R5_RFU 0x04 +#define SD_RSP_R5_FUNC_NUM_ERROR 0x02 +#define SD_RSP_R5_OUT_OF_RANGE 0x01 + +#define SD_RSP_R5_ERRBITS 0xCB + +/* ------------------------------------------------ + * SDIO Commands and responses + * + * I/O only commands are: + * CMD0, CMD3, CMD5, CMD7, CMD14, CMD15, CMD52, CMD53 + * ------------------------------------------------ + */ + +/* SDIO Commands */ +#define SDIOH_CMD_0 0 +#define SDIOH_CMD_3 3 +#define SDIOH_CMD_5 5 +#define SDIOH_CMD_7 7 +#define SDIOH_CMD_11 11 +#define SDIOH_CMD_14 14 +#define SDIOH_CMD_15 15 +#define SDIOH_CMD_19 19 +#define SDIOH_CMD_52 52 +#define SDIOH_CMD_53 53 +#define SDIOH_CMD_59 59 + +/* SDIO Command Responses */ +#define SDIOH_RSP_NONE 0 +#define SDIOH_RSP_R1 1 +#define SDIOH_RSP_R2 2 +#define SDIOH_RSP_R3 3 +#define SDIOH_RSP_R4 4 +#define SDIOH_RSP_R5 5 +#define SDIOH_RSP_R6 6 + +/* + * SDIO Response Error flags + */ +#define SDIOH_RSP5_ERROR_FLAGS 0xCB + +/* ------------------------------------------------ + * SDIO Command structures. I/O only commands are: + * + * CMD0, CMD3, CMD5, CMD7, CMD15, CMD52, CMD53 + * ------------------------------------------------ + */ + +#define CMD5_OCR_M BITFIELD_MASK(24) +#define CMD5_OCR_S 0 + +#define CMD5_S18R_M BITFIELD_MASK(1) +#define CMD5_S18R_S 24 + +#define CMD7_RCA_M BITFIELD_MASK(16) +#define CMD7_RCA_S 16 + +#define CMD14_RCA_M BITFIELD_MASK(16) +#define CMD14_RCA_S 16 +#define CMD14_SLEEP_M BITFIELD_MASK(1) +#define CMD14_SLEEP_S 15 + +#define CMD_15_RCA_M BITFIELD_MASK(16) +#define CMD_15_RCA_S 16 + +#define CMD52_DATA_M BITFIELD_MASK(8) /* Bits [7:0] - Write Data/Stuff bits of CMD52 + */ +#define CMD52_DATA_S 0 +#define CMD52_REG_ADDR_M BITFIELD_MASK(17) /* Bits [25:9] - register address */ +#define CMD52_REG_ADDR_S 9 +#define CMD52_RAW_M BITFIELD_MASK(1) /* Bit 27 - Read after Write flag */ +#define CMD52_RAW_S 27 +#define CMD52_FUNCTION_M BITFIELD_MASK(3) /* Bits [30:28] - Function number */ +#define CMD52_FUNCTION_S 28 +#define CMD52_RW_FLAG_M BITFIELD_MASK(1) /* Bit 31 - R/W flag */ +#define CMD52_RW_FLAG_S 31 + +#define CMD53_BYTE_BLK_CNT_M BITFIELD_MASK(9) /* Bits [8:0] - Byte/Block Count of CMD53 */ +#define CMD53_BYTE_BLK_CNT_S 0 +#define CMD53_REG_ADDR_M BITFIELD_MASK(17) /* Bits [25:9] - register address */ +#define CMD53_REG_ADDR_S 9 +#define CMD53_OP_CODE_M BITFIELD_MASK(1) /* Bit 26 - R/W Operation Code */ +#define CMD53_OP_CODE_S 26 +#define CMD53_BLK_MODE_M BITFIELD_MASK(1) /* Bit 27 - Block Mode */ +#define CMD53_BLK_MODE_S 27 +#define CMD53_FUNCTION_M BITFIELD_MASK(3) /* Bits [30:28] - Function number */ +#define CMD53_FUNCTION_S 28 +#define CMD53_RW_FLAG_M BITFIELD_MASK(1) /* Bit 31 - R/W flag */ +#define CMD53_RW_FLAG_S 31 + +/* ------------------------------------------------------ + * SDIO Command Response structures for SD1 and SD4 modes + * ----------------------------------------------------- + */ +#define RSP4_IO_OCR_M BITFIELD_MASK(24) /* Bits [23:0] - Card's OCR Bits [23:0] */ +#define RSP4_IO_OCR_S 0 + +#define RSP4_S18A_M BITFIELD_MASK(1) /* Bits [23:0] - Card's OCR Bits [23:0] */ +#define RSP4_S18A_S 24 + +#define RSP4_STUFF_M BITFIELD_MASK(3) /* Bits [26:24] - Stuff bits */ +#define RSP4_STUFF_S 24 +#define RSP4_MEM_PRESENT_M BITFIELD_MASK(1) /* Bit 27 - Memory present */ +#define RSP4_MEM_PRESENT_S 27 +#define RSP4_NUM_FUNCS_M BITFIELD_MASK(3) /* Bits [30:28] - Number of I/O funcs */ +#define RSP4_NUM_FUNCS_S 28 +#define RSP4_CARD_READY_M BITFIELD_MASK(1) /* Bit 31 - SDIO card ready */ +#define RSP4_CARD_READY_S 31 + +#define RSP6_STATUS_M BITFIELD_MASK(16) /* Bits [15:0] - Card status bits [19,22,23,12:0] + */ +#define RSP6_STATUS_S 0 +#define RSP6_IO_RCA_M BITFIELD_MASK(16) /* Bits [31:16] - RCA bits[31-16] */ +#define RSP6_IO_RCA_S 16 + +#define RSP1_AKE_SEQ_ERROR_M BITFIELD_MASK(1) /* Bit 3 - Authentication seq error */ +#define RSP1_AKE_SEQ_ERROR_S 3 +#define RSP1_APP_CMD_M BITFIELD_MASK(1) /* Bit 5 - Card expects ACMD */ +#define RSP1_APP_CMD_S 5 +#define RSP1_READY_FOR_DATA_M BITFIELD_MASK(1) /* Bit 8 - Ready for data (buff empty) */ +#define RSP1_READY_FOR_DATA_S 8 +#define RSP1_CURR_STATE_M BITFIELD_MASK(4) /* Bits [12:9] - State of card + * when Cmd was received + */ +#define RSP1_CURR_STATE_S 9 +#define RSP1_EARSE_RESET_M BITFIELD_MASK(1) /* Bit 13 - Erase seq cleared */ +#define RSP1_EARSE_RESET_S 13 +#define RSP1_CARD_ECC_DISABLE_M BITFIELD_MASK(1) /* Bit 14 - Card ECC disabled */ +#define RSP1_CARD_ECC_DISABLE_S 14 +#define RSP1_WP_ERASE_SKIP_M BITFIELD_MASK(1) /* Bit 15 - Partial blocks erased due to W/P */ +#define RSP1_WP_ERASE_SKIP_S 15 +#define RSP1_CID_CSD_OVERW_M BITFIELD_MASK(1) /* Bit 16 - Illegal write to CID or R/O bits + * of CSD + */ +#define RSP1_CID_CSD_OVERW_S 16 +#define RSP1_ERROR_M BITFIELD_MASK(1) /* Bit 19 - General/Unknown error */ +#define RSP1_ERROR_S 19 +#define RSP1_CC_ERROR_M BITFIELD_MASK(1) /* Bit 20 - Internal Card Control error */ +#define RSP1_CC_ERROR_S 20 +#define RSP1_CARD_ECC_FAILED_M BITFIELD_MASK(1) /* Bit 21 - Card internal ECC failed + * to correct data + */ +#define RSP1_CARD_ECC_FAILED_S 21 +#define RSP1_ILLEGAL_CMD_M BITFIELD_MASK(1) /* Bit 22 - Cmd not legal for the card state */ +#define RSP1_ILLEGAL_CMD_S 22 +#define RSP1_COM_CRC_ERROR_M BITFIELD_MASK(1) /* Bit 23 - CRC check of previous command failed + */ +#define RSP1_COM_CRC_ERROR_S 23 +#define RSP1_LOCK_UNLOCK_FAIL_M BITFIELD_MASK(1) /* Bit 24 - Card lock-unlock Cmd Seq error */ +#define RSP1_LOCK_UNLOCK_FAIL_S 24 +#define RSP1_CARD_LOCKED_M BITFIELD_MASK(1) /* Bit 25 - Card locked by the host */ +#define RSP1_CARD_LOCKED_S 25 +#define RSP1_WP_VIOLATION_M BITFIELD_MASK(1) /* Bit 26 - Attempt to program + * write-protected blocks + */ +#define RSP1_WP_VIOLATION_S 26 +#define RSP1_ERASE_PARAM_M BITFIELD_MASK(1) /* Bit 27 - Invalid erase blocks */ +#define RSP1_ERASE_PARAM_S 27 +#define RSP1_ERASE_SEQ_ERR_M BITFIELD_MASK(1) /* Bit 28 - Erase Cmd seq error */ +#define RSP1_ERASE_SEQ_ERR_S 28 +#define RSP1_BLK_LEN_ERR_M BITFIELD_MASK(1) /* Bit 29 - Block length error */ +#define RSP1_BLK_LEN_ERR_S 29 +#define RSP1_ADDR_ERR_M BITFIELD_MASK(1) /* Bit 30 - Misaligned address */ +#define RSP1_ADDR_ERR_S 30 +#define RSP1_OUT_OF_RANGE_M BITFIELD_MASK(1) /* Bit 31 - Cmd arg was out of range */ +#define RSP1_OUT_OF_RANGE_S 31 + +#define RSP5_DATA_M BITFIELD_MASK(8) /* Bits [0:7] - data */ +#define RSP5_DATA_S 0 +#define RSP5_FLAGS_M BITFIELD_MASK(8) /* Bit [15:8] - Rsp flags */ +#define RSP5_FLAGS_S 8 +#define RSP5_STUFF_M BITFIELD_MASK(16) /* Bits [31:16] - Stuff bits */ +#define RSP5_STUFF_S 16 + +/* ---------------------------------------------- + * SDIO Command Response structures for SPI mode + * ---------------------------------------------- + */ +#define SPIRSP4_IO_OCR_M BITFIELD_MASK(16) /* Bits [15:0] - Card's OCR Bits [23:8] */ +#define SPIRSP4_IO_OCR_S 0 +#define SPIRSP4_STUFF_M BITFIELD_MASK(3) /* Bits [18:16] - Stuff bits */ +#define SPIRSP4_STUFF_S 16 +#define SPIRSP4_MEM_PRESENT_M BITFIELD_MASK(1) /* Bit 19 - Memory present */ +#define SPIRSP4_MEM_PRESENT_S 19 +#define SPIRSP4_NUM_FUNCS_M BITFIELD_MASK(3) /* Bits [22:20] - Number of I/O funcs */ +#define SPIRSP4_NUM_FUNCS_S 20 +#define SPIRSP4_CARD_READY_M BITFIELD_MASK(1) /* Bit 23 - SDIO card ready */ +#define SPIRSP4_CARD_READY_S 23 +#define SPIRSP4_IDLE_STATE_M BITFIELD_MASK(1) /* Bit 24 - idle state */ +#define SPIRSP4_IDLE_STATE_S 24 +#define SPIRSP4_ILLEGAL_CMD_M BITFIELD_MASK(1) /* Bit 26 - Illegal Cmd error */ +#define SPIRSP4_ILLEGAL_CMD_S 26 +#define SPIRSP4_COM_CRC_ERROR_M BITFIELD_MASK(1) /* Bit 27 - COM CRC error */ +#define SPIRSP4_COM_CRC_ERROR_S 27 +#define SPIRSP4_FUNC_NUM_ERROR_M BITFIELD_MASK(1) /* Bit 28 - Function number error + */ +#define SPIRSP4_FUNC_NUM_ERROR_S 28 +#define SPIRSP4_PARAM_ERROR_M BITFIELD_MASK(1) /* Bit 30 - Parameter Error Bit */ +#define SPIRSP4_PARAM_ERROR_S 30 +#define SPIRSP4_START_BIT_M BITFIELD_MASK(1) /* Bit 31 - Start Bit */ +#define SPIRSP4_START_BIT_S 31 + +#define SPIRSP5_DATA_M BITFIELD_MASK(8) /* Bits [23:16] - R/W Data */ +#define SPIRSP5_DATA_S 16 +#define SPIRSP5_IDLE_STATE_M BITFIELD_MASK(1) /* Bit 24 - Idle state */ +#define SPIRSP5_IDLE_STATE_S 24 +#define SPIRSP5_ILLEGAL_CMD_M BITFIELD_MASK(1) /* Bit 26 - Illegal Cmd error */ +#define SPIRSP5_ILLEGAL_CMD_S 26 +#define SPIRSP5_COM_CRC_ERROR_M BITFIELD_MASK(1) /* Bit 27 - COM CRC error */ +#define SPIRSP5_COM_CRC_ERROR_S 27 +#define SPIRSP5_FUNC_NUM_ERROR_M BITFIELD_MASK(1) /* Bit 28 - Function number error + */ +#define SPIRSP5_FUNC_NUM_ERROR_S 28 +#define SPIRSP5_PARAM_ERROR_M BITFIELD_MASK(1) /* Bit 30 - Parameter Error Bit */ +#define SPIRSP5_PARAM_ERROR_S 30 +#define SPIRSP5_START_BIT_M BITFIELD_MASK(1) /* Bit 31 - Start Bit */ +#define SPIRSP5_START_BIT_S 31 + +/* RSP6 card status format; Pg 68 Physical Layer spec v 1.10 */ +#define RSP6STAT_AKE_SEQ_ERROR_M BITFIELD_MASK(1) /* Bit 3 - Authentication seq error + */ +#define RSP6STAT_AKE_SEQ_ERROR_S 3 +#define RSP6STAT_APP_CMD_M BITFIELD_MASK(1) /* Bit 5 - Card expects ACMD */ +#define RSP6STAT_APP_CMD_S 5 +#define RSP6STAT_READY_FOR_DATA_M BITFIELD_MASK(1) /* Bit 8 - Ready for data + * (buff empty) + */ +#define RSP6STAT_READY_FOR_DATA_S 8 +#define RSP6STAT_CURR_STATE_M BITFIELD_MASK(4) /* Bits [12:9] - Card state at + * Cmd reception + */ +#define RSP6STAT_CURR_STATE_S 9 +#define RSP6STAT_ERROR_M BITFIELD_MASK(1) /* Bit 13 - General/Unknown error Bit 19 + */ +#define RSP6STAT_ERROR_S 13 +#define RSP6STAT_ILLEGAL_CMD_M BITFIELD_MASK(1) /* Bit 14 - Illegal cmd for + * card state Bit 22 + */ +#define RSP6STAT_ILLEGAL_CMD_S 14 +#define RSP6STAT_COM_CRC_ERROR_M BITFIELD_MASK(1) /* Bit 15 - CRC previous command + * failed Bit 23 + */ +#define RSP6STAT_COM_CRC_ERROR_S 15 + +#define SDIOH_XFER_TYPE_READ SD_IO_OP_READ +#define SDIOH_XFER_TYPE_WRITE SD_IO_OP_WRITE + +/* command issue options */ +#define CMD_OPTION_DEFAULT 0 +#define CMD_OPTION_TUNING 1 + +#endif /* def BCMSDIO */ +#endif /* _SDIO_H */ diff --git a/bcmdhd.100.10.315.x/include/sdioh.h b/bcmdhd.100.10.315.x/include/sdioh.h new file mode 100644 index 0000000..90e7208 --- /dev/null +++ b/bcmdhd.100.10.315.x/include/sdioh.h @@ -0,0 +1,450 @@ +/* + * SDIO Host Controller Spec header file + * Register map and definitions for the Standard Host Controller + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: sdioh.h 768099 2018-06-18 13:58:07Z $ + */ + +#ifndef _SDIOH_H +#define _SDIOH_H + +#define SD_SysAddr 0x000 +#define SD_BlockSize 0x004 +#define SD_BlockCount 0x006 +#define SD_Arg0 0x008 +#define SD_Arg1 0x00A +#define SD_TransferMode 0x00C +#define SD_Command 0x00E +#define SD_Response0 0x010 +#define SD_Response1 0x012 +#define SD_Response2 0x014 +#define SD_Response3 0x016 +#define SD_Response4 0x018 +#define SD_Response5 0x01A +#define SD_Response6 0x01C +#define SD_Response7 0x01E +#define SD_BufferDataPort0 0x020 +#define SD_BufferDataPort1 0x022 +#define SD_PresentState 0x024 +#define SD_HostCntrl 0x028 +#define SD_PwrCntrl 0x029 +#define SD_BlockGapCntrl 0x02A +#define SD_WakeupCntrl 0x02B +#define SD_ClockCntrl 0x02C +#define SD_TimeoutCntrl 0x02E +#define SD_SoftwareReset 0x02F +#define SD_IntrStatus 0x030 +#define SD_ErrorIntrStatus 0x032 +#define SD_IntrStatusEnable 0x034 +#define SD_ErrorIntrStatusEnable 0x036 +#define SD_IntrSignalEnable 0x038 +#define SD_ErrorIntrSignalEnable 0x03A +#define SD_CMD12ErrorStatus 0x03C +#define SD_Capabilities 0x040 +#define SD_Capabilities3 0x044 +#define SD_MaxCurCap 0x048 +#define SD_MaxCurCap_Reserved 0x04C +#define SD_ADMA_ErrStatus 0x054 +#define SD_ADMA_SysAddr 0x58 +#define SD_SlotInterruptStatus 0x0FC +#define SD_HostControllerVersion 0x0FE +#define SD_GPIO_Reg 0x100 +#define SD_GPIO_OE 0x104 +#define SD_GPIO_Enable 0x108 + +/* SD specific registers in PCI config space */ +#define SD_SlotInfo 0x40 + +/* HC 3.0 specific registers and offsets */ +#define SD3_HostCntrl2 0x03E +/* preset regsstart and count */ +#define SD3_PresetValStart 0x060 +#define SD3_PresetValCount 8 +/* preset-indiv regs */ +#define SD3_PresetVal_init 0x060 +#define SD3_PresetVal_default 0x062 +#define SD3_PresetVal_HS 0x064 +#define SD3_PresetVal_SDR12 0x066 +#define SD3_PresetVal_SDR25 0x068 +#define SD3_PresetVal_SDR50 0x06a +#define SD3_PresetVal_SDR104 0x06c +#define SD3_PresetVal_DDR50 0x06e +/* SDIO3.0 Revx specific Registers */ +#define SD3_Tuning_Info_Register 0x0EC +#define SD3_WL_BT_reset_register 0x0F0 + +/* preset value indices */ +#define SD3_PRESETVAL_INITIAL_IX 0 +#define SD3_PRESETVAL_DESPEED_IX 1 +#define SD3_PRESETVAL_HISPEED_IX 2 +#define SD3_PRESETVAL_SDR12_IX 3 +#define SD3_PRESETVAL_SDR25_IX 4 +#define SD3_PRESETVAL_SDR50_IX 5 +#define SD3_PRESETVAL_SDR104_IX 6 +#define SD3_PRESETVAL_DDR50_IX 7 + +/* SD_Capabilities reg (0x040) */ +#define CAP_TO_CLKFREQ_M BITFIELD_MASK(6) +#define CAP_TO_CLKFREQ_S 0 +#define CAP_TO_CLKUNIT_M BITFIELD_MASK(1) +#define CAP_TO_CLKUNIT_S 7 +/* Note: for sdio-2.0 case, this mask has to be 6 bits, but msb 2 + bits are reserved. going ahead with 8 bits, as it is req for 3.0 +*/ +#define CAP_BASECLK_M BITFIELD_MASK(8) +#define CAP_BASECLK_S 8 +#define CAP_MAXBLOCK_M BITFIELD_MASK(2) +#define CAP_MAXBLOCK_S 16 +#define CAP_ADMA2_M BITFIELD_MASK(1) +#define CAP_ADMA2_S 19 +#define CAP_ADMA1_M BITFIELD_MASK(1) +#define CAP_ADMA1_S 20 +#define CAP_HIGHSPEED_M BITFIELD_MASK(1) +#define CAP_HIGHSPEED_S 21 +#define CAP_DMA_M BITFIELD_MASK(1) +#define CAP_DMA_S 22 +#define CAP_SUSPEND_M BITFIELD_MASK(1) +#define CAP_SUSPEND_S 23 +#define CAP_VOLT_3_3_M BITFIELD_MASK(1) +#define CAP_VOLT_3_3_S 24 +#define CAP_VOLT_3_0_M BITFIELD_MASK(1) +#define CAP_VOLT_3_0_S 25 +#define CAP_VOLT_1_8_M BITFIELD_MASK(1) +#define CAP_VOLT_1_8_S 26 +#define CAP_64BIT_HOST_M BITFIELD_MASK(1) +#define CAP_64BIT_HOST_S 28 + +#define SDIO_OCR_READ_FAIL (2) + +#define CAP_ASYNCINT_SUP_M BITFIELD_MASK(1) +#define CAP_ASYNCINT_SUP_S 29 + +#define CAP_SLOTTYPE_M BITFIELD_MASK(2) +#define CAP_SLOTTYPE_S 30 + +#define CAP3_MSBits_OFFSET (32) +/* note: following are caps MSB32 bits. + So the bits start from 0, instead of 32. that is why + CAP3_MSBits_OFFSET is subtracted. +*/ +#define CAP3_SDR50_SUP_M BITFIELD_MASK(1) +#define CAP3_SDR50_SUP_S (32 - CAP3_MSBits_OFFSET) + +#define CAP3_SDR104_SUP_M BITFIELD_MASK(1) +#define CAP3_SDR104_SUP_S (33 - CAP3_MSBits_OFFSET) + +#define CAP3_DDR50_SUP_M BITFIELD_MASK(1) +#define CAP3_DDR50_SUP_S (34 - CAP3_MSBits_OFFSET) + +/* for knowing the clk caps in a single read */ +#define CAP3_30CLKCAP_M BITFIELD_MASK(3) +#define CAP3_30CLKCAP_S (32 - CAP3_MSBits_OFFSET) + +#define CAP3_DRIVTYPE_A_M BITFIELD_MASK(1) +#define CAP3_DRIVTYPE_A_S (36 - CAP3_MSBits_OFFSET) + +#define CAP3_DRIVTYPE_C_M BITFIELD_MASK(1) +#define CAP3_DRIVTYPE_C_S (37 - CAP3_MSBits_OFFSET) + +#define CAP3_DRIVTYPE_D_M BITFIELD_MASK(1) +#define CAP3_DRIVTYPE_D_S (38 - CAP3_MSBits_OFFSET) + +#define CAP3_RETUNING_TC_M BITFIELD_MASK(4) +#define CAP3_RETUNING_TC_S (40 - CAP3_MSBits_OFFSET) + +#define CAP3_TUNING_SDR50_M BITFIELD_MASK(1) +#define CAP3_TUNING_SDR50_S (45 - CAP3_MSBits_OFFSET) + +#define CAP3_RETUNING_MODES_M BITFIELD_MASK(2) +#define CAP3_RETUNING_MODES_S (46 - CAP3_MSBits_OFFSET) + +#define CAP3_RETUNING_TC_DISABLED (0x0) +#define CAP3_RETUNING_TC_1024S (0xB) +#define CAP3_RETUNING_TC_OTHER (0xF) + +#define CAP3_CLK_MULT_M BITFIELD_MASK(8) +#define CAP3_CLK_MULT_S (48 - CAP3_MSBits_OFFSET) + +#define PRESET_DRIVR_SELECT_M BITFIELD_MASK(2) +#define PRESET_DRIVR_SELECT_S 14 + +#define PRESET_CLK_DIV_M BITFIELD_MASK(10) +#define PRESET_CLK_DIV_S 0 + +/* SD_MaxCurCap reg (0x048) */ +#define CAP_CURR_3_3_M BITFIELD_MASK(8) +#define CAP_CURR_3_3_S 0 +#define CAP_CURR_3_0_M BITFIELD_MASK(8) +#define CAP_CURR_3_0_S 8 +#define CAP_CURR_1_8_M BITFIELD_MASK(8) +#define CAP_CURR_1_8_S 16 + +/* SD_SysAddr: Offset 0x0000, Size 4 bytes */ + +/* SD_BlockSize: Offset 0x004, Size 2 bytes */ +#define BLKSZ_BLKSZ_M BITFIELD_MASK(12) +#define BLKSZ_BLKSZ_S 0 +#define BLKSZ_BNDRY_M BITFIELD_MASK(3) +#define BLKSZ_BNDRY_S 12 + +/* SD_BlockCount: Offset 0x006, size 2 bytes */ + +/* SD_Arg0: Offset 0x008, size = 4 bytes */ +/* SD_TransferMode Offset 0x00C, size = 2 bytes */ +#define XFER_DMA_ENABLE_M BITFIELD_MASK(1) +#define XFER_DMA_ENABLE_S 0 +#define XFER_BLK_COUNT_EN_M BITFIELD_MASK(1) +#define XFER_BLK_COUNT_EN_S 1 +#define XFER_CMD_12_EN_M BITFIELD_MASK(1) +#define XFER_CMD_12_EN_S 2 +#define XFER_DATA_DIRECTION_M BITFIELD_MASK(1) +#define XFER_DATA_DIRECTION_S 4 +#define XFER_MULTI_BLOCK_M BITFIELD_MASK(1) +#define XFER_MULTI_BLOCK_S 5 + +/* SD_Command: Offset 0x00E, size = 2 bytes */ +/* resp_type field */ +#define RESP_TYPE_NONE 0 +#define RESP_TYPE_136 1 +#define RESP_TYPE_48 2 +#define RESP_TYPE_48_BUSY 3 +/* type field */ +#define CMD_TYPE_NORMAL 0 +#define CMD_TYPE_SUSPEND 1 +#define CMD_TYPE_RESUME 2 +#define CMD_TYPE_ABORT 3 + +#define CMD_RESP_TYPE_M BITFIELD_MASK(2) /* Bits [0-1] - Response type */ +#define CMD_RESP_TYPE_S 0 +#define CMD_CRC_EN_M BITFIELD_MASK(1) /* Bit 3 - CRC enable */ +#define CMD_CRC_EN_S 3 +#define CMD_INDEX_EN_M BITFIELD_MASK(1) /* Bit 4 - Enable index checking */ +#define CMD_INDEX_EN_S 4 +#define CMD_DATA_EN_M BITFIELD_MASK(1) /* Bit 5 - Using DAT line */ +#define CMD_DATA_EN_S 5 +#define CMD_TYPE_M BITFIELD_MASK(2) /* Bit [6-7] - Normal, abort, resume, etc + */ +#define CMD_TYPE_S 6 +#define CMD_INDEX_M BITFIELD_MASK(6) /* Bits [8-13] - Command number */ +#define CMD_INDEX_S 8 + +/* SD_BufferDataPort0 : Offset 0x020, size = 2 or 4 bytes */ +/* SD_BufferDataPort1 : Offset 0x022, size = 2 bytes */ +/* SD_PresentState : Offset 0x024, size = 4 bytes */ +#define PRES_CMD_INHIBIT_M BITFIELD_MASK(1) /* Bit 0 May use CMD */ +#define PRES_CMD_INHIBIT_S 0 +#define PRES_DAT_INHIBIT_M BITFIELD_MASK(1) /* Bit 1 May use DAT */ +#define PRES_DAT_INHIBIT_S 1 +#define PRES_DAT_BUSY_M BITFIELD_MASK(1) /* Bit 2 DAT is busy */ +#define PRES_DAT_BUSY_S 2 +#define PRES_PRESENT_RSVD_M BITFIELD_MASK(5) /* Bit [3-7] rsvd */ +#define PRES_PRESENT_RSVD_S 3 +#define PRES_WRITE_ACTIVE_M BITFIELD_MASK(1) /* Bit 8 Write is active */ +#define PRES_WRITE_ACTIVE_S 8 +#define PRES_READ_ACTIVE_M BITFIELD_MASK(1) /* Bit 9 Read is active */ +#define PRES_READ_ACTIVE_S 9 +#define PRES_WRITE_DATA_RDY_M BITFIELD_MASK(1) /* Bit 10 Write buf is avail */ +#define PRES_WRITE_DATA_RDY_S 10 +#define PRES_READ_DATA_RDY_M BITFIELD_MASK(1) /* Bit 11 Read buf data avail */ +#define PRES_READ_DATA_RDY_S 11 +#define PRES_CARD_PRESENT_M BITFIELD_MASK(1) /* Bit 16 Card present - debounced */ +#define PRES_CARD_PRESENT_S 16 +#define PRES_CARD_STABLE_M BITFIELD_MASK(1) /* Bit 17 Debugging */ +#define PRES_CARD_STABLE_S 17 +#define PRES_CARD_PRESENT_RAW_M BITFIELD_MASK(1) /* Bit 18 Not debounced */ +#define PRES_CARD_PRESENT_RAW_S 18 +#define PRES_WRITE_ENABLED_M BITFIELD_MASK(1) /* Bit 19 Write protected? */ +#define PRES_WRITE_ENABLED_S 19 +#define PRES_DAT_SIGNAL_M BITFIELD_MASK(4) /* Bit [20-23] Debugging */ +#define PRES_DAT_SIGNAL_S 20 +#define PRES_CMD_SIGNAL_M BITFIELD_MASK(1) /* Bit 24 Debugging */ +#define PRES_CMD_SIGNAL_S 24 + +/* SD_HostCntrl: Offset 0x028, size = 1 bytes */ +#define HOST_LED_M BITFIELD_MASK(1) /* Bit 0 LED On/Off */ +#define HOST_LED_S 0 +#define HOST_DATA_WIDTH_M BITFIELD_MASK(1) /* Bit 1 4 bit enable */ +#define HOST_DATA_WIDTH_S 1 +#define HOST_HI_SPEED_EN_M BITFIELD_MASK(1) /* Bit 2 High speed vs low speed */ +#define HOST_DMA_SEL_S 3 +#define HOST_DMA_SEL_M BITFIELD_MASK(2) /* Bit 4:3 DMA Select */ +#define HOST_HI_SPEED_EN_S 2 + +/* Host Control2: */ +#define HOSTCtrl2_PRESVAL_EN_M BITFIELD_MASK(1) /* 1 bit */ +#define HOSTCtrl2_PRESVAL_EN_S 15 /* bit# */ + +#define HOSTCtrl2_ASYINT_EN_M BITFIELD_MASK(1) /* 1 bit */ +#define HOSTCtrl2_ASYINT_EN_S 14 /* bit# */ + +#define HOSTCtrl2_SAMPCLK_SEL_M BITFIELD_MASK(1) /* 1 bit */ +#define HOSTCtrl2_SAMPCLK_SEL_S 7 /* bit# */ + +#define HOSTCtrl2_EXEC_TUNING_M BITFIELD_MASK(1) /* 1 bit */ +#define HOSTCtrl2_EXEC_TUNING_S 6 /* bit# */ + +#define HOSTCtrl2_DRIVSTRENGTH_SEL_M BITFIELD_MASK(2) /* 2 bit */ +#define HOSTCtrl2_DRIVSTRENGTH_SEL_S 4 /* bit# */ + +#define HOSTCtrl2_1_8SIG_EN_M BITFIELD_MASK(1) /* 1 bit */ +#define HOSTCtrl2_1_8SIG_EN_S 3 /* bit# */ + +#define HOSTCtrl2_UHSMODE_SEL_M BITFIELD_MASK(3) /* 3 bit */ +#define HOSTCtrl2_UHSMODE_SEL_S 0 /* bit# */ + +#define HOST_CONTR_VER_2 (1) +#define HOST_CONTR_VER_3 (2) + +/* misc defines */ +#define SD1_MODE 0x1 /* SD Host Cntrlr Spec */ +#define SD4_MODE 0x2 /* SD Host Cntrlr Spec */ + +/* SD_PwrCntrl: Offset 0x029, size = 1 bytes */ +#define PWR_BUS_EN_M BITFIELD_MASK(1) /* Bit 0 Power the bus */ +#define PWR_BUS_EN_S 0 +#define PWR_VOLTS_M BITFIELD_MASK(3) /* Bit [1-3] Voltage Select */ +#define PWR_VOLTS_S 1 + +/* SD_SoftwareReset: Offset 0x02F, size = 1 byte */ +#define SW_RESET_ALL_M BITFIELD_MASK(1) /* Bit 0 Reset All */ +#define SW_RESET_ALL_S 0 +#define SW_RESET_CMD_M BITFIELD_MASK(1) /* Bit 1 CMD Line Reset */ +#define SW_RESET_CMD_S 1 +#define SW_RESET_DAT_M BITFIELD_MASK(1) /* Bit 2 DAT Line Reset */ +#define SW_RESET_DAT_S 2 + +/* SD_IntrStatus: Offset 0x030, size = 2 bytes */ +/* Defs also serve SD_IntrStatusEnable and SD_IntrSignalEnable */ +#define INTSTAT_CMD_COMPLETE_M BITFIELD_MASK(1) /* Bit 0 */ +#define INTSTAT_CMD_COMPLETE_S 0 +#define INTSTAT_XFER_COMPLETE_M BITFIELD_MASK(1) +#define INTSTAT_XFER_COMPLETE_S 1 +#define INTSTAT_BLOCK_GAP_EVENT_M BITFIELD_MASK(1) +#define INTSTAT_BLOCK_GAP_EVENT_S 2 +#define INTSTAT_DMA_INT_M BITFIELD_MASK(1) +#define INTSTAT_DMA_INT_S 3 +#define INTSTAT_BUF_WRITE_READY_M BITFIELD_MASK(1) +#define INTSTAT_BUF_WRITE_READY_S 4 +#define INTSTAT_BUF_READ_READY_M BITFIELD_MASK(1) +#define INTSTAT_BUF_READ_READY_S 5 +#define INTSTAT_CARD_INSERTION_M BITFIELD_MASK(1) +#define INTSTAT_CARD_INSERTION_S 6 +#define INTSTAT_CARD_REMOVAL_M BITFIELD_MASK(1) +#define INTSTAT_CARD_REMOVAL_S 7 +#define INTSTAT_CARD_INT_M BITFIELD_MASK(1) +#define INTSTAT_CARD_INT_S 8 +#define INTSTAT_RETUNING_INT_M BITFIELD_MASK(1) /* Bit 12 */ +#define INTSTAT_RETUNING_INT_S 12 +#define INTSTAT_ERROR_INT_M BITFIELD_MASK(1) /* Bit 15 */ +#define INTSTAT_ERROR_INT_S 15 + +/* SD_ErrorIntrStatus: Offset 0x032, size = 2 bytes */ +/* Defs also serve SD_ErrorIntrStatusEnable and SD_ErrorIntrSignalEnable */ +#define ERRINT_CMD_TIMEOUT_M BITFIELD_MASK(1) +#define ERRINT_CMD_TIMEOUT_S 0 +#define ERRINT_CMD_CRC_M BITFIELD_MASK(1) +#define ERRINT_CMD_CRC_S 1 +#define ERRINT_CMD_ENDBIT_M BITFIELD_MASK(1) +#define ERRINT_CMD_ENDBIT_S 2 +#define ERRINT_CMD_INDEX_M BITFIELD_MASK(1) +#define ERRINT_CMD_INDEX_S 3 +#define ERRINT_DATA_TIMEOUT_M BITFIELD_MASK(1) +#define ERRINT_DATA_TIMEOUT_S 4 +#define ERRINT_DATA_CRC_M BITFIELD_MASK(1) +#define ERRINT_DATA_CRC_S 5 +#define ERRINT_DATA_ENDBIT_M BITFIELD_MASK(1) +#define ERRINT_DATA_ENDBIT_S 6 +#define ERRINT_CURRENT_LIMIT_M BITFIELD_MASK(1) +#define ERRINT_CURRENT_LIMIT_S 7 +#define ERRINT_AUTO_CMD12_M BITFIELD_MASK(1) +#define ERRINT_AUTO_CMD12_S 8 +#define ERRINT_VENDOR_M BITFIELD_MASK(4) +#define ERRINT_VENDOR_S 12 +#define ERRINT_ADMA_M BITFIELD_MASK(1) +#define ERRINT_ADMA_S 9 + +/* Also provide definitions in "normal" form to allow combined masks */ +#define ERRINT_CMD_TIMEOUT_BIT 0x0001 +#define ERRINT_CMD_CRC_BIT 0x0002 +#define ERRINT_CMD_ENDBIT_BIT 0x0004 +#define ERRINT_CMD_INDEX_BIT 0x0008 +#define ERRINT_DATA_TIMEOUT_BIT 0x0010 +#define ERRINT_DATA_CRC_BIT 0x0020 +#define ERRINT_DATA_ENDBIT_BIT 0x0040 +#define ERRINT_CURRENT_LIMIT_BIT 0x0080 +#define ERRINT_AUTO_CMD12_BIT 0x0100 +#define ERRINT_ADMA_BIT 0x0200 + +/* Masks to select CMD vs. DATA errors */ +#define ERRINT_CMD_ERRS (ERRINT_CMD_TIMEOUT_BIT | ERRINT_CMD_CRC_BIT |\ + ERRINT_CMD_ENDBIT_BIT | ERRINT_CMD_INDEX_BIT) +#define ERRINT_DATA_ERRS (ERRINT_DATA_TIMEOUT_BIT | ERRINT_DATA_CRC_BIT |\ + ERRINT_DATA_ENDBIT_BIT | ERRINT_ADMA_BIT) +#define ERRINT_TRANSFER_ERRS (ERRINT_CMD_ERRS | ERRINT_DATA_ERRS) + +/* SD_WakeupCntr_BlockGapCntrl : Offset 0x02A , size = bytes */ +/* SD_ClockCntrl : Offset 0x02C , size = bytes */ +/* SD_SoftwareReset_TimeoutCntrl : Offset 0x02E , size = bytes */ +/* SD_IntrStatus : Offset 0x030 , size = bytes */ +/* SD_ErrorIntrStatus : Offset 0x032 , size = bytes */ +/* SD_IntrStatusEnable : Offset 0x034 , size = bytes */ +/* SD_ErrorIntrStatusEnable : Offset 0x036 , size = bytes */ +/* SD_IntrSignalEnable : Offset 0x038 , size = bytes */ +/* SD_ErrorIntrSignalEnable : Offset 0x03A , size = bytes */ +/* SD_CMD12ErrorStatus : Offset 0x03C , size = bytes */ +/* SD_Capabilities : Offset 0x040 , size = bytes */ +/* SD_MaxCurCap : Offset 0x048 , size = bytes */ +/* SD_MaxCurCap_Reserved: Offset 0x04C , size = bytes */ +/* SD_SlotInterruptStatus: Offset 0x0FC , size = bytes */ +/* SD_HostControllerVersion : Offset 0x0FE , size = bytes */ + +/* SDIO Host Control Register DMA Mode Definitions */ +#define SDIOH_SDMA_MODE 0 +#define SDIOH_ADMA1_MODE 1 +#define SDIOH_ADMA2_MODE 2 +#define SDIOH_ADMA2_64_MODE 3 + +#define ADMA2_ATTRIBUTE_VALID (1 << 0) /* ADMA Descriptor line valid */ +#define ADMA2_ATTRIBUTE_END (1 << 1) /* End of Descriptor */ +#define ADMA2_ATTRIBUTE_INT (1 << 2) /* Interrupt when line is done */ +#define ADMA2_ATTRIBUTE_ACT_NOP (0 << 4) /* Skip current line, go to next. */ +#define ADMA2_ATTRIBUTE_ACT_RSV (1 << 4) /* Same as NOP */ +#define ADMA1_ATTRIBUTE_ACT_SET (1 << 4) /* ADMA1 Only - set transfer length */ +#define ADMA2_ATTRIBUTE_ACT_TRAN (2 << 4) /* Transfer Data of one descriptor line. */ +#define ADMA2_ATTRIBUTE_ACT_LINK (3 << 4) /* Link Descriptor */ + +/* ADMA2 Descriptor Table Entry for 32-bit Address */ +typedef struct adma2_dscr_32b { + uint32 len_attr; + uint32 phys_addr; +} adma2_dscr_32b_t; + +/* ADMA1 Descriptor Table Entry */ +typedef struct adma1_dscr { + uint32 phys_addr_attr; +} adma1_dscr_t; + +#endif /* _SDIOH_H */ diff --git a/bcmdhd.100.10.315.x/include/sdiovar.h b/bcmdhd.100.10.315.x/include/sdiovar.h new file mode 100644 index 0000000..9c1a94e --- /dev/null +++ b/bcmdhd.100.10.315.x/include/sdiovar.h @@ -0,0 +1,124 @@ +/* + * Structure used by apps whose drivers access SDIO drivers. + * Pulled out separately so dhdu and wlu can both use it. + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: sdiovar.h 660496 2016-09-20 19:28:50Z $ + */ + +#ifndef _sdiovar_h_ +#define _sdiovar_h_ + +#include + +typedef struct sdreg { + int func; + int offset; + int value; +} sdreg_t; + +/* Common msglevel constants */ +#define SDH_ERROR_VAL 0x0001 /* Error */ +#define SDH_TRACE_VAL 0x0002 /* Trace */ +#define SDH_INFO_VAL 0x0004 /* Info */ +#define SDH_DEBUG_VAL 0x0008 /* Debug */ +#define SDH_DATA_VAL 0x0010 /* Data */ +#define SDH_CTRL_VAL 0x0020 /* Control Regs */ +#define SDH_LOG_VAL 0x0040 /* Enable bcmlog */ +#define SDH_DMA_VAL 0x0080 /* DMA */ +#define SDH_COST_VAL 0x8000 /* Control Regs */ + +#define NUM_PREV_TRANSACTIONS 16 + +#ifdef BCMSPI +/* Error statistics for gSPI */ +struct spierrstats_t { + uint32 dna; /* The requested data is not available. */ + uint32 rdunderflow; /* FIFO underflow happened due to current (F2, F3) rd command */ + uint32 wroverflow; /* FIFO underflow happened due to current (F1, F2, F3) wr command */ + + uint32 f2interrupt; /* OR of all F2 related intr status bits. */ + uint32 f3interrupt; /* OR of all F3 related intr status bits. */ + + uint32 f2rxnotready; /* F2 FIFO is not ready to receive data (FIFO empty) */ + uint32 f3rxnotready; /* F3 FIFO is not ready to receive data (FIFO empty) */ + + uint32 hostcmddataerr; /* Error in command or host data, detected by CRC/checksum + * (optional) + */ + uint32 f2pktavailable; /* Packet is available in F2 TX FIFO */ + uint32 f3pktavailable; /* Packet is available in F2 TX FIFO */ + + uint32 dstatus[NUM_PREV_TRANSACTIONS]; /* dstatus bits of last 16 gSPI transactions */ + uint32 spicmd[NUM_PREV_TRANSACTIONS]; +}; +#endif /* BCMSPI */ + +typedef struct sdio_bus_metrics { + uint32 active_dur; /* msecs */ + + /* Generic */ + uint32 data_intr_cnt; /* data interrupt counter */ + uint32 mb_intr_cnt; /* mailbox interrupt counter */ + uint32 error_intr_cnt; /* error interrupt counter */ + uint32 wakehost_cnt; /* counter for OOB wakehost */ + + /* DS forcewake */ + uint32 ds_wake_on_cnt; /* counter for (clock) ON */ + uint32 ds_wake_on_dur; /* duration for (clock) ON) */ + uint32 ds_wake_off_cnt; /* counter for (clock) OFF */ + uint32 ds_wake_off_dur; /* duration for (clock) OFF */ + + /* DS_D0 state */ + uint32 ds_d0_cnt; /* counter for DS_D0 state */ + uint32 ds_d0_dur; /* duration for DS_D0 state */ + + /* DS_D3 state */ + uint32 ds_d3_cnt; /* counter for DS_D3 state */ + uint32 ds_d3_dur; /* duration for DS_D3 state */ + + /* DS DEV_WAKE */ + uint32 ds_dw_assrt_cnt; /* counter for DW_ASSERT */ + uint32 ds_dw_dassrt_cnt; /* counter for DW_DASSERT */ + + /* DS mailbox signals */ + uint32 ds_tx_dsreq_cnt; /* counter for tx HMB_DATA_DSREQ */ + uint32 ds_tx_dsexit_cnt; /* counter for tx HMB_DATA_DSEXIT */ + uint32 ds_tx_d3ack_cnt; /* counter for tx HMB_DATA_D3ACK */ + uint32 ds_tx_d3exit_cnt; /* counter for tx HMB_DATA_D3EXIT */ + uint32 ds_rx_dsack_cnt; /* counter for rx SMB_DATA_DSACK */ + uint32 ds_rx_dsnack_cnt; /* counter for rx SMB_DATA_DSNACK */ + uint32 ds_rx_d3inform_cnt; /* counter for rx SMB_DATA_D3INFORM */ +} sdio_bus_metrics_t; + +/* Bus interface info for SDIO */ +typedef struct wl_pwr_sdio_stats { + uint16 type; /* WL_PWRSTATS_TYPE_SDIO */ + uint16 len; /* Up to 4K-1, top 4 bits are reserved */ + + sdio_bus_metrics_t sdio; /* stats from SDIO bus driver */ +} wl_pwr_sdio_stats_t; + +#endif /* _sdiovar_h_ */ diff --git a/bcmdhd.100.10.315.x/include/sdspi.h b/bcmdhd.100.10.315.x/include/sdspi.h new file mode 100644 index 0000000..d16c788 --- /dev/null +++ b/bcmdhd.100.10.315.x/include/sdspi.h @@ -0,0 +1,78 @@ +/* + * SD-SPI Protocol Standard + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: sdspi.h 700076 2017-05-17 14:42:22Z $ + */ +#ifndef _SD_SPI_H +#define _SD_SPI_H + +#define SPI_START_M BITFIELD_MASK(1) /* Bit [31] - Start Bit */ +#define SPI_START_S 31 +#define SPI_DIR_M BITFIELD_MASK(1) /* Bit [30] - Direction */ +#define SPI_DIR_S 30 +#define SPI_CMD_INDEX_M BITFIELD_MASK(6) /* Bits [29:24] - Command number */ +#define SPI_CMD_INDEX_S 24 +#define SPI_RW_M BITFIELD_MASK(1) /* Bit [23] - Read=0, Write=1 */ +#define SPI_RW_S 23 +#define SPI_FUNC_M BITFIELD_MASK(3) /* Bits [22:20] - Function Number */ +#define SPI_FUNC_S 20 +#define SPI_RAW_M BITFIELD_MASK(1) /* Bit [19] - Read After Wr */ +#define SPI_RAW_S 19 +#define SPI_STUFF_M BITFIELD_MASK(1) /* Bit [18] - Stuff bit */ +#define SPI_STUFF_S 18 +#define SPI_BLKMODE_M BITFIELD_MASK(1) /* Bit [19] - Blockmode 1=blk */ +#define SPI_BLKMODE_S 19 +#define SPI_OPCODE_M BITFIELD_MASK(1) /* Bit [18] - OP Code */ +#define SPI_OPCODE_S 18 +#define SPI_ADDR_M BITFIELD_MASK(17) /* Bits [17:1] - Address */ +#define SPI_ADDR_S 1 +#define SPI_STUFF0_M BITFIELD_MASK(1) /* Bit [0] - Stuff bit */ +#define SPI_STUFF0_S 0 + +#define SPI_RSP_START_M BITFIELD_MASK(1) /* Bit [7] - Start Bit (always 0) */ +#define SPI_RSP_START_S 7 +#define SPI_RSP_PARAM_ERR_M BITFIELD_MASK(1) /* Bit [6] - Parameter Error */ +#define SPI_RSP_PARAM_ERR_S 6 +#define SPI_RSP_RFU5_M BITFIELD_MASK(1) /* Bit [5] - RFU (Always 0) */ +#define SPI_RSP_RFU5_S 5 +#define SPI_RSP_FUNC_ERR_M BITFIELD_MASK(1) /* Bit [4] - Function number error */ +#define SPI_RSP_FUNC_ERR_S 4 +#define SPI_RSP_CRC_ERR_M BITFIELD_MASK(1) /* Bit [3] - COM CRC Error */ +#define SPI_RSP_CRC_ERR_S 3 +#define SPI_RSP_ILL_CMD_M BITFIELD_MASK(1) /* Bit [2] - Illegal Command error */ +#define SPI_RSP_ILL_CMD_S 2 +#define SPI_RSP_RFU1_M BITFIELD_MASK(1) /* Bit [1] - RFU (Always 0) */ +#define SPI_RSP_RFU1_S 1 +#define SPI_RSP_IDLE_M BITFIELD_MASK(1) /* Bit [0] - In idle state */ +#define SPI_RSP_IDLE_S 0 + +/* SD-SPI Protocol Definitions */ +#define SDSPI_COMMAND_LEN 6 /* Number of bytes in an SD command */ +#define SDSPI_START_BLOCK 0xFE /* SD Start Block Token */ +#define SDSPI_IDLE_PAD 0xFF /* SD-SPI idle value for MOSI */ +#define SDSPI_START_BIT_MASK 0x80 + +#endif /* _SD_SPI_H */ diff --git a/bcmdhd.100.10.315.x/include/siutils.h b/bcmdhd.100.10.315.x/include/siutils.h new file mode 100644 index 0000000..a4251b8 --- /dev/null +++ b/bcmdhd.100.10.315.x/include/siutils.h @@ -0,0 +1,801 @@ +/* + * Misc utility routines for accessing the SOC Interconnects + * of Broadcom HNBU chips. + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: siutils.h 769534 2018-06-26 21:19:11Z $ + */ + +#ifndef _siutils_h_ +#define _siutils_h_ + +#ifdef SR_DEBUG +#include "wlioctl.h" +#endif /* SR_DEBUG */ + +#define WARM_BOOT 0xA0B0C0D0 + +#ifdef BCM_BACKPLANE_TIMEOUT + +#define SI_MAX_ERRLOG_SIZE 4 +typedef struct si_axi_error +{ + uint32 error; + uint32 coreid; + uint32 errlog_lo; + uint32 errlog_hi; + uint32 errlog_id; + uint32 errlog_flags; + uint32 errlog_status; +} si_axi_error_t; + +typedef struct si_axi_error_info +{ + uint32 count; + si_axi_error_t axi_error[SI_MAX_ERRLOG_SIZE]; +} si_axi_error_info_t; +#endif /* BCM_BACKPLANE_TIMEOUT */ + +/** + * Data structure to export all chip specific common variables + * public (read-only) portion of siutils handle returned by si_attach()/si_kattach() + */ +struct si_pub { + uint socitype; /**< SOCI_SB, SOCI_AI */ + + uint bustype; /**< SI_BUS, PCI_BUS */ + uint buscoretype; /**< PCI_CORE_ID, PCIE_CORE_ID, PCMCIA_CORE_ID */ + uint buscorerev; /**< buscore rev */ + uint buscoreidx; /**< buscore index */ + int ccrev; /**< chip common core rev */ + uint32 cccaps; /**< chip common capabilities */ + uint32 cccaps_ext; /**< chip common capabilities extension */ + int pmurev; /**< pmu core rev */ + uint32 pmucaps; /**< pmu capabilities */ + uint boardtype; /**< board type */ + uint boardrev; /* board rev */ + uint boardvendor; /**< board vendor */ + uint boardflags; /**< board flags */ + uint boardflags2; /**< board flags2 */ + uint boardflags4; /**< board flags4 */ + uint chip; /**< chip number */ + uint chiprev; /**< chip revision */ + uint chippkg; /**< chip package option */ + uint32 chipst; /**< chip status */ + bool issim; /**< chip is in simulation or emulation */ + uint socirev; /**< SOC interconnect rev */ + bool pci_pr32414; + int gcirev; /**< gci core rev */ + int lpflags; /**< low power flags */ + uint32 enum_base; /**< backplane address where the chipcommon core resides */ + +#ifdef BCM_BACKPLANE_TIMEOUT + si_axi_error_info_t * err_info; +#endif /* BCM_BACKPLANE_TIMEOUT */ + + bool _multibp_enable; +}; + +/* for HIGH_ONLY driver, the si_t must be writable to allow states sync from BMAC to HIGH driver + * for monolithic driver, it is readonly to prevent accident change + */ +typedef struct si_pub si_t; + +/* + * Many of the routines below take an 'sih' handle as their first arg. + * Allocate this by calling si_attach(). Free it by calling si_detach(). + * At any one time, the sih is logically focused on one particular si core + * (the "current core"). + * Use si_setcore() or si_setcoreidx() to change the association to another core. + */ +#define SI_OSH NULL /**< Use for si_kattach when no osh is available */ + +#define BADIDX (SI_MAXCORES + 1) + +/* clkctl xtal what flags */ +#define XTAL 0x1 /**< primary crystal oscillator (2050) */ +#define PLL 0x2 /**< main chip pll */ + +/* clkctl clk mode */ +#define CLK_FAST 0 /**< force fast (pll) clock */ +#define CLK_DYNAMIC 2 /**< enable dynamic clock control */ + +/* GPIO usage priorities */ +#define GPIO_DRV_PRIORITY 0 /**< Driver */ +#define GPIO_APP_PRIORITY 1 /**< Application */ +#define GPIO_HI_PRIORITY 2 /**< Highest priority. Ignore GPIO reservation */ + +/* GPIO pull up/down */ +#define GPIO_PULLUP 0 +#define GPIO_PULLDN 1 + +/* GPIO event regtype */ +#define GPIO_REGEVT 0 /**< GPIO register event */ +#define GPIO_REGEVT_INTMSK 1 /**< GPIO register event int mask */ +#define GPIO_REGEVT_INTPOL 2 /**< GPIO register event int polarity */ + +/* device path */ +#define SI_DEVPATH_BUFSZ 16 /**< min buffer size in bytes */ + +/* SI routine enumeration: to be used by update function with multiple hooks */ +#define SI_DOATTACH 1 +#define SI_PCIDOWN 2 /**< wireless interface is down */ +#define SI_PCIUP 3 /**< wireless interface is up */ + +#ifdef SR_DEBUG +#define PMU_RES 31 +#endif /* SR_DEBUG */ + +/* "access" param defines for si_seci_access() below */ +#define SECI_ACCESS_STATUSMASK_SET 0 +#define SECI_ACCESS_INTRS 1 +#define SECI_ACCESS_UART_CTS 2 +#define SECI_ACCESS_UART_RTS 3 +#define SECI_ACCESS_UART_RXEMPTY 4 +#define SECI_ACCESS_UART_GETC 5 +#define SECI_ACCESS_UART_TXFULL 6 +#define SECI_ACCESS_UART_PUTC 7 +#define SECI_ACCESS_STATUSMASK_GET 8 + +#define ISSIM_ENAB(sih) FALSE + +#define INVALID_ADDR (~0) + +/* PMU clock/power control */ +#if defined(BCMPMUCTL) +#define PMUCTL_ENAB(sih) (BCMPMUCTL) +#else +#define PMUCTL_ENAB(sih) ((sih)->cccaps & CC_CAP_PMU) +#endif // endif + +#if defined(BCMAOBENAB) +#define AOB_ENAB(sih) (BCMAOBENAB) +#else +#define AOB_ENAB(sih) ((sih)->ccrev >= 35 ? \ + ((sih)->cccaps_ext & CC_CAP_EXT_AOB_PRESENT) : 0) +#endif /* BCMAOBENAB */ + +/* chipcommon clock/power control (exclusive with PMU's) */ +#if defined(BCMPMUCTL) && BCMPMUCTL +#define CCCTL_ENAB(sih) (0) +#define CCPLL_ENAB(sih) (0) +#else +#define CCCTL_ENAB(sih) ((sih)->cccaps & CC_CAP_PWR_CTL) +#define CCPLL_ENAB(sih) ((sih)->cccaps & CC_CAP_PLL_MASK) +#endif // endif + +typedef void (*gci_gpio_handler_t)(uint32 stat, void *arg); + +/* External BT Coex enable mask */ +#define CC_BTCOEX_EN_MASK 0x01 +/* External PA enable mask */ +#define GPIO_CTRL_EPA_EN_MASK 0x40 +/* WL/BT control enable mask */ +#define GPIO_CTRL_5_6_EN_MASK 0x60 +#define GPIO_CTRL_7_6_EN_MASK 0xC0 +#define GPIO_OUT_7_EN_MASK 0x80 + +/* CR4 specific defines used by the host driver */ +#define SI_CR4_CAP (0x04) +#define SI_CR4_BANKIDX (0x40) +#define SI_CR4_BANKINFO (0x44) +#define SI_CR4_BANKPDA (0x4C) + +#define ARMCR4_TCBBNB_MASK 0xf0 +#define ARMCR4_TCBBNB_SHIFT 4 +#define ARMCR4_TCBANB_MASK 0xf +#define ARMCR4_TCBANB_SHIFT 0 + +#define SICF_CPUHALT (0x0020) +#define ARMCR4_BSZ_MASK 0x3f +#define ARMCR4_BSZ_MULT 8192 +#define SI_BPIND_1BYTE 0x1 +#define SI_BPIND_2BYTE 0x3 +#define SI_BPIND_4BYTE 0xF + +#define GET_GCI_OFFSET(sih, gci_reg) \ + (AOB_ENAB(sih)? OFFSETOF(gciregs_t, gci_reg) : OFFSETOF(chipcregs_t, gci_reg)) + +#define GET_GCI_CORE(sih) \ + (AOB_ENAB(sih)? si_findcoreidx(sih, GCI_CORE_ID, 0) : SI_CC_IDX) + +#include +/* === exported functions === */ +extern si_t *si_attach(uint pcidev, osl_t *osh, volatile void *regs, uint bustype, + void *sdh, char **vars, uint *varsz); +extern si_t *si_kattach(osl_t *osh); +extern void si_detach(si_t *sih); +extern volatile void * +si_d11_switch_addrbase(si_t *sih, uint coreunit); +extern uint si_corelist(si_t *sih, uint coreid[]); +extern uint si_coreid(si_t *sih); +extern uint si_flag(si_t *sih); +extern uint si_flag_alt(si_t *sih); +extern uint si_intflag(si_t *sih); +extern uint si_coreidx(si_t *sih); +extern uint si_coreunit(si_t *sih); +extern uint si_corevendor(si_t *sih); +extern uint si_corerev(si_t *sih); +extern uint si_corerev_minor(si_t *sih); +extern void *si_osh(si_t *sih); +extern void si_setosh(si_t *sih, osl_t *osh); +extern int si_backplane_access(si_t *sih, uint addr, uint size, + uint *val, bool read); +extern uint si_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val); +extern uint si_corereg_writeonly(si_t *sih, uint coreidx, uint regoff, uint mask, uint val); +extern uint si_pmu_corereg(si_t *sih, uint32 idx, uint regoff, uint mask, uint val); +extern volatile uint32 *si_corereg_addr(si_t *sih, uint coreidx, uint regoff); +extern volatile void *si_coreregs(si_t *sih); +extern uint si_wrapperreg(si_t *sih, uint32 offset, uint32 mask, uint32 val); +extern uint si_core_wrapperreg(si_t *sih, uint32 coreidx, uint32 offset, uint32 mask, uint32 val); +extern void *si_wrapperregs(si_t *sih); +extern uint32 si_core_cflags(si_t *sih, uint32 mask, uint32 val); +extern void si_core_cflags_wo(si_t *sih, uint32 mask, uint32 val); +extern uint32 si_core_sflags(si_t *sih, uint32 mask, uint32 val); +extern void si_commit(si_t *sih); +extern bool si_iscoreup(si_t *sih); +extern uint si_numcoreunits(si_t *sih, uint coreid); +extern uint si_numd11coreunits(si_t *sih); +extern uint si_findcoreidx(si_t *sih, uint coreid, uint coreunit); +extern volatile void *si_setcoreidx(si_t *sih, uint coreidx); +extern volatile void *si_setcore(si_t *sih, uint coreid, uint coreunit); +extern volatile void *si_switch_core(si_t *sih, uint coreid, uint *origidx, uint *intr_val); +extern void si_restore_core(si_t *sih, uint coreid, uint intr_val); +extern int si_numaddrspaces(si_t *sih); +extern uint32 si_addrspace(si_t *sih, uint spidx, uint baidx); +extern uint32 si_addrspacesize(si_t *sih, uint spidx, uint baidx); +extern void si_coreaddrspaceX(si_t *sih, uint asidx, uint32 *addr, uint32 *size); +extern int si_corebist(si_t *sih); +extern void si_core_reset(si_t *sih, uint32 bits, uint32 resetbits); +extern void si_core_disable(si_t *sih, uint32 bits); +extern uint32 si_clock_rate(uint32 pll_type, uint32 n, uint32 m); +extern uint si_chip_hostif(si_t *sih); +extern uint32 si_clock(si_t *sih); +extern uint32 si_alp_clock(si_t *sih); /* returns [Hz] units */ +extern uint32 si_ilp_clock(si_t *sih); /* returns [Hz] units */ +extern void si_pci_setup(si_t *sih, uint coremask); +extern void si_pcmcia_init(si_t *sih); +extern void si_setint(si_t *sih, int siflag); +extern bool si_backplane64(si_t *sih); +extern void si_register_intr_callback(si_t *sih, void *intrsoff_fn, void *intrsrestore_fn, + void *intrsenabled_fn, void *intr_arg); +extern void si_deregister_intr_callback(si_t *sih); +extern void si_clkctl_init(si_t *sih); +extern uint16 si_clkctl_fast_pwrup_delay(si_t *sih); +extern bool si_clkctl_cc(si_t *sih, uint mode); +extern int si_clkctl_xtal(si_t *sih, uint what, bool on); +extern uint32 si_gpiotimerval(si_t *sih, uint32 mask, uint32 val); +extern void si_btcgpiowar(si_t *sih); +extern bool si_deviceremoved(si_t *sih); +extern void si_set_device_removed(si_t *sih, bool status); +extern uint32 si_sysmem_size(si_t *sih); +extern uint32 si_socram_size(si_t *sih); +extern uint32 si_socdevram_size(si_t *sih); +extern uint32 si_socram_srmem_size(si_t *sih); +extern void si_socram_set_bankpda(si_t *sih, uint32 bankidx, uint32 bankpda); +extern void si_socdevram(si_t *sih, bool set, uint8 *ennable, uint8 *protect, uint8 *remap); +extern bool si_socdevram_pkg(si_t *sih); +extern bool si_socdevram_remap_isenb(si_t *sih); +extern uint32 si_socdevram_remap_size(si_t *sih); + +extern void si_watchdog(si_t *sih, uint ticks); +extern void si_watchdog_ms(si_t *sih, uint32 ms); +extern uint32 si_watchdog_msticks(void); +extern volatile void *si_gpiosetcore(si_t *sih); +extern uint32 si_gpiocontrol(si_t *sih, uint32 mask, uint32 val, uint8 priority); +extern uint32 si_gpioouten(si_t *sih, uint32 mask, uint32 val, uint8 priority); +extern uint32 si_gpioout(si_t *sih, uint32 mask, uint32 val, uint8 priority); +extern uint32 si_gpioin(si_t *sih); +extern uint32 si_gpiointpolarity(si_t *sih, uint32 mask, uint32 val, uint8 priority); +extern uint32 si_gpiointmask(si_t *sih, uint32 mask, uint32 val, uint8 priority); +extern uint32 si_gpioeventintmask(si_t *sih, uint32 mask, uint32 val, uint8 priority); +extern uint32 si_gpioled(si_t *sih, uint32 mask, uint32 val); +extern uint32 si_gpioreserve(si_t *sih, uint32 gpio_num, uint8 priority); +extern uint32 si_gpiorelease(si_t *sih, uint32 gpio_num, uint8 priority); +extern uint32 si_gpiopull(si_t *sih, bool updown, uint32 mask, uint32 val); +extern uint32 si_gpioevent(si_t *sih, uint regtype, uint32 mask, uint32 val); +extern uint32 si_gpio_int_enable(si_t *sih, bool enable); +extern void si_gci_uart_init(si_t *sih, osl_t *osh, uint8 seci_mode); +extern void si_gci_enable_gpio(si_t *sih, uint8 gpio, uint32 mask, uint32 value); +extern uint8 si_gci_host_wake_gpio_init(si_t *sih); +extern uint8 si_gci_time_sync_gpio_init(si_t *sih); +extern void si_gci_host_wake_gpio_enable(si_t *sih, uint8 gpio, bool state); +extern void si_gci_time_sync_gpio_enable(si_t *sih, uint8 gpio, bool state); + +extern void si_invalidate_second_bar0win(si_t *sih); + +extern void si_gci_shif_config_wake_pin(si_t *sih, uint8 gpio_n, + uint8 wake_events, bool gci_gpio); +extern void si_shif_int_enable(si_t *sih, uint8 gpio_n, uint8 wake_events, bool enable); + +/* GCI interrupt handlers */ +extern void si_gci_handler_process(si_t *sih); + +extern void si_enable_gpio_wake(si_t *sih, uint8 *wake_mask, uint8 *cur_status, uint8 gci_gpio, + uint32 pmu_cc2_mask, uint32 pmu_cc2_value); + +/* GCI GPIO event handlers */ +extern void *si_gci_gpioint_handler_register(si_t *sih, uint8 gpio, uint8 sts, + gci_gpio_handler_t cb, void *arg); +extern void si_gci_gpioint_handler_unregister(si_t *sih, void* gci_i); + +extern uint8 si_gci_gpio_status(si_t *sih, uint8 gci_gpio, uint8 mask, uint8 value); +extern void si_gci_config_wake_pin(si_t *sih, uint8 gpio_n, uint8 wake_events, + bool gci_gpio); +extern void si_gci_free_wake_pin(si_t *sih, uint8 gpio_n); + +/* Wake-on-wireless-LAN (WOWL) */ +extern bool si_pci_pmecap(si_t *sih); +extern bool si_pci_fastpmecap(struct osl_info *osh); +extern bool si_pci_pmestat(si_t *sih); +extern void si_pci_pmeclr(si_t *sih); +extern void si_pci_pmeen(si_t *sih); +extern void si_pci_pmestatclr(si_t *sih); +extern uint si_pcie_readreg(void *sih, uint addrtype, uint offset); +extern uint si_pcie_writereg(void *sih, uint addrtype, uint offset, uint val); +extern void si_deepsleep_count(si_t *sih, bool arm_wakeup); + +#ifdef BCMSDIO +extern void si_sdio_init(si_t *sih); +#endif // endif + +extern uint16 si_d11_devid(si_t *sih); +extern int si_corepciid(si_t *sih, uint func, uint16 *pcivendor, uint16 *pcidevice, + uint8 *pciclass, uint8 *pcisubclass, uint8 *pciprogif, uint8 *pciheader); + +extern uint32 si_seci_access(si_t *sih, uint32 val, int access); +extern volatile void* si_seci_init(si_t *sih, uint8 seci_mode); +extern void si_seci_clk_force(si_t *sih, bool val); +extern bool si_seci_clk_force_status(si_t *sih); + +#define si_eci(sih) 0 +static INLINE void * si_eci_init(si_t *sih) {return NULL;} +#define si_eci_notify_bt(sih, type, val) (0) +#define si_seci(sih) 0 +#define si_seci_upd(sih, a) do {} while (0) +static INLINE void * si_gci_init(si_t *sih) {return NULL;} +#define si_seci_down(sih) do {} while (0) +#define si_gci(sih) 0 + +/* OTP status */ +extern bool si_is_otp_disabled(si_t *sih); +extern bool si_is_otp_powered(si_t *sih); +extern void si_otp_power(si_t *sih, bool on, uint32* min_res_mask); + +/* SPROM availability */ +extern bool si_is_sprom_available(si_t *sih); + +/* OTP/SROM CIS stuff */ +extern int si_cis_source(si_t *sih); +#define CIS_DEFAULT 0 +#define CIS_SROM 1 +#define CIS_OTP 2 + +/* Fab-id information */ +#define DEFAULT_FAB 0x0 /**< Original/first fab used for this chip */ +#define CSM_FAB7 0x1 /**< CSM Fab7 chip */ +#define TSMC_FAB12 0x2 /**< TSMC Fab12/Fab14 chip */ +#define SMIC_FAB4 0x3 /**< SMIC Fab4 chip */ + +extern uint16 si_fabid(si_t *sih); +extern uint16 si_chipid(si_t *sih); + +/* + * Build device path. Path size must be >= SI_DEVPATH_BUFSZ. + * The returned path is NULL terminated and has trailing '/'. + * Return 0 on success, nonzero otherwise. + */ +extern int si_devpath(si_t *sih, char *path, int size); +extern int si_devpath_pcie(si_t *sih, char *path, int size); +/* Read variable with prepending the devpath to the name */ +extern char *si_getdevpathvar(si_t *sih, const char *name); +extern int si_getdevpathintvar(si_t *sih, const char *name); +extern char *si_coded_devpathvar(si_t *sih, char *varname, int var_len, const char *name); + +extern uint8 si_pcieclkreq(si_t *sih, uint32 mask, uint32 val); +extern uint32 si_pcielcreg(si_t *sih, uint32 mask, uint32 val); +extern uint8 si_pcieltrenable(si_t *sih, uint32 mask, uint32 val); +extern uint8 si_pcieobffenable(si_t *sih, uint32 mask, uint32 val); +extern uint32 si_pcieltr_reg(si_t *sih, uint32 reg, uint32 mask, uint32 val); +extern uint32 si_pcieltrspacing_reg(si_t *sih, uint32 mask, uint32 val); +extern uint32 si_pcieltrhysteresiscnt_reg(si_t *sih, uint32 mask, uint32 val); +extern void si_pcie_set_error_injection(si_t *sih, uint32 mode); +extern void si_pcie_set_L1substate(si_t *sih, uint32 substate); +extern uint32 si_pcie_get_L1substate(si_t *sih); +extern void si_war42780_clkreq(si_t *sih, bool clkreq); +extern void si_pci_down(si_t *sih); +extern void si_pci_up(si_t *sih); +extern void si_pci_sleep(si_t *sih); +extern void si_pcie_war_ovr_update(si_t *sih, uint8 aspm); +extern void si_pcie_power_save_enable(si_t *sih, bool enable); +extern void si_pcie_extendL1timer(si_t *sih, bool extend); +extern int si_pci_fixcfg(si_t *sih); +extern void si_chippkg_set(si_t *sih, uint); +extern bool si_is_warmboot(void); + +extern void si_chipcontrl_restore(si_t *sih, uint32 val); +extern uint32 si_chipcontrl_read(si_t *sih); +extern void si_chipcontrl_srom4360(si_t *sih, bool on); +extern void si_srom_clk_set(si_t *sih); /**< for chips with fast BP clock */ +extern void si_btc_enable_chipcontrol(si_t *sih); +extern void si_pmu_avb_clk_set(si_t *sih, osl_t *osh, bool set_flag); +/* === debug routines === */ + +extern bool si_taclear(si_t *sih, bool details); + +#if defined(BCMDBG_PHYDUMP) +struct bcmstrbuf; +extern int si_dump_pcieinfo(si_t *sih, struct bcmstrbuf *b); +extern void si_dump_pmuregs(si_t *sih, struct bcmstrbuf *b); +extern int si_dump_pcieregs(si_t *sih, struct bcmstrbuf *b); +#endif // endif + +#if defined(BCMDBG_PHYDUMP) +extern void si_dumpregs(si_t *sih, struct bcmstrbuf *b); +#endif // endif + +extern uint32 si_ccreg(si_t *sih, uint32 offset, uint32 mask, uint32 val); +extern uint32 si_pciereg(si_t *sih, uint32 offset, uint32 mask, uint32 val, uint type); +extern int si_bpind_access(si_t *sih, uint32 addr_high, uint32 addr_low, + int32* data, bool read); +#ifdef SR_DEBUG +extern void si_dump_pmu(si_t *sih, void *pmu_var); +extern void si_pmu_keep_on(si_t *sih, int32 int_val); +extern uint32 si_pmu_keep_on_get(si_t *sih); +extern uint32 si_power_island_set(si_t *sih, uint32 int_val); +extern uint32 si_power_island_get(si_t *sih); +#endif /* SR_DEBUG */ +extern uint32 si_pcieserdesreg(si_t *sih, uint32 mdioslave, uint32 offset, uint32 mask, uint32 val); +extern void si_pcie_set_request_size(si_t *sih, uint16 size); +extern uint16 si_pcie_get_request_size(si_t *sih); +extern void si_pcie_set_maxpayload_size(si_t *sih, uint16 size); +extern uint16 si_pcie_get_maxpayload_size(si_t *sih); +extern uint16 si_pcie_get_ssid(si_t *sih); +extern uint32 si_pcie_get_bar0(si_t *sih); +extern int si_pcie_configspace_cache(si_t *sih); +extern int si_pcie_configspace_restore(si_t *sih); +extern int si_pcie_configspace_get(si_t *sih, uint8 *buf, uint size); + +#ifdef BCM_BACKPLANE_TIMEOUT +extern const si_axi_error_info_t * si_get_axi_errlog_info(si_t *sih); +extern void si_reset_axi_errlog_info(si_t * sih); +#endif /* BCM_BACKPLANE_TIMEOUT */ + +extern void si_update_backplane_timeouts(si_t *sih, bool enable, uint32 timeout, uint32 cid); + +extern uint32 si_tcm_size(si_t *sih); +extern bool si_has_flops(si_t *sih); + +extern int si_set_sromctl(si_t *sih, uint32 value); +extern uint32 si_get_sromctl(si_t *sih); + +extern uint32 si_gci_direct(si_t *sih, uint offset, uint32 mask, uint32 val); +extern uint32 si_gci_indirect(si_t *sih, uint regidx, uint offset, uint32 mask, uint32 val); +extern uint32 si_gci_output(si_t *sih, uint reg, uint32 mask, uint32 val); +extern uint32 si_gci_input(si_t *sih, uint reg); +extern uint32 si_gci_int_enable(si_t *sih, bool enable); +extern void si_gci_reset(si_t *sih); +#ifdef BCMLTECOEX +extern void si_ercx_init(si_t *sih, uint32 ltecx_mux, uint32 ltecx_padnum, + uint32 ltecx_fnsel, uint32 ltecx_gcigpio); +#endif /* BCMLTECOEX */ +extern void si_gci_seci_init(si_t *sih); +extern void si_wci2_init(si_t *sih, uint8 baudrate, uint32 ltecx_mux, uint32 ltecx_padnum, + uint32 ltecx_fnsel, uint32 ltecx_gcigpio, uint32 xtalfreq); + +extern bool si_btcx_wci2_init(si_t *sih); + +extern void si_gci_set_functionsel(si_t *sih, uint32 pin, uint8 fnsel); +extern uint32 si_gci_get_functionsel(si_t *sih, uint32 pin); +extern void si_gci_clear_functionsel(si_t *sih, uint8 fnsel); +extern uint8 si_gci_get_chipctrlreg_idx(uint32 pin, uint32 *regidx, uint32 *pos); +extern uint32 si_gci_chipcontrol(si_t *sih, uint reg, uint32 mask, uint32 val); +extern uint32 si_gci_chipstatus(si_t *sih, uint reg); +extern uint8 si_enable_device_wake(si_t *sih, uint8 *wake_status, uint8 *cur_status); +extern uint8 si_get_device_wake_opt(si_t *sih); +extern void si_swdenable(si_t *sih, uint32 swdflag); +extern uint8 si_enable_perst_wake(si_t *sih, uint8 *perst_wake_mask, uint8 *perst_cur_status); + +extern uint32 si_get_pmu_reg_addr(si_t *sih, uint32 offset); +#define CHIPCTRLREG1 0x1 +#define CHIPCTRLREG2 0x2 +#define CHIPCTRLREG3 0x3 +#define CHIPCTRLREG4 0x4 +#define CHIPCTRLREG5 0x5 +#define MINRESMASKREG 0x618 +#define MAXRESMASKREG 0x61c +#define CHIPCTRLADDR 0x650 +#define CHIPCTRLDATA 0x654 +#define RSRCTABLEADDR 0x620 +#define RSRCUPDWNTIME 0x628 +#define PMUREG_RESREQ_MASK 0x68c + +void si_update_masks(si_t *sih); +void si_force_islanding(si_t *sih, bool enable); +extern uint32 si_pmu_res_req_timer_clr(si_t *sih); +extern void si_pmu_rfldo(si_t *sih, bool on); +extern uint32 si_pcie_set_ctrlreg(si_t *sih, uint32 sperst_mask, uint32 spert_val); +extern void si_pcie_ltr_war(si_t *sih); +extern void si_pcie_hw_LTR_war(si_t *sih); +extern void si_pcie_hw_L1SS_war(si_t *sih); +extern void si_pciedev_crwlpciegen2(si_t *sih); +extern void si_pcie_prep_D3(si_t *sih, bool enter_D3); +extern void si_pciedev_reg_pm_clk_period(si_t *sih); +extern void si_d11rsdb_core1_alt_reg_clk_dis(si_t *sih); +extern void si_d11rsdb_core1_alt_reg_clk_en(si_t *sih); +extern void si_pcie_disable_oobselltr(si_t *sih); +extern uint32 si_raw_reg(si_t *sih, uint32 reg, uint32 val, uint32 wrire_req); + +#ifdef WLRSDB +extern void si_d11rsdb_core_disable(si_t *sih, uint32 bits); +extern void si_d11rsdb_core_reset(si_t *sih, uint32 bits, uint32 resetbits); +extern void set_secondary_d11_core(si_t *sih, volatile void **secmap, volatile void **secwrap); +#endif // endif + +/* Macro to enable clock gating changes in different cores */ +#define MEM_CLK_GATE_BIT 5 +#define GCI_CLK_GATE_BIT 18 + +#define USBAPP_CLK_BIT 0 +#define PCIE_CLK_BIT 3 +#define ARMCR4_DBG_CLK_BIT 4 +#define SAMPLE_SYNC_CLK_BIT 17 +#define PCIE_TL_CLK_BIT 18 +#define HQ_REQ_BIT 24 +#define PLL_DIV2_BIT_START 9 +#define PLL_DIV2_MASK (0x37 << PLL_DIV2_BIT_START) +#define PLL_DIV2_DIS_OP (0x37 << PLL_DIV2_BIT_START) + +#define pmu_corereg(si, cc_idx, member, mask, val) \ + (AOB_ENAB(si) ? \ + si_pmu_corereg(si, si_findcoreidx(si, PMU_CORE_ID, 0), \ + OFFSETOF(pmuregs_t, member), mask, val): \ + si_pmu_corereg(si, cc_idx, OFFSETOF(chipcregs_t, member), mask, val)) + +/* Used only for the regs present in the pmu core and not present in the old cc core */ +#define PMU_REG_NEW(si, member, mask, val) \ + si_corereg(si, si_findcoreidx(si, PMU_CORE_ID, 0), \ + OFFSETOF(pmuregs_t, member), mask, val) + +#define PMU_REG(si, member, mask, val) \ + (AOB_ENAB(si) ? \ + si_corereg(si, si_findcoreidx(si, PMU_CORE_ID, 0), \ + OFFSETOF(pmuregs_t, member), mask, val): \ + si_corereg(si, SI_CC_IDX, OFFSETOF(chipcregs_t, member), mask, val)) + +/* Used only for the regs present in the pmu core and not present in the old cc core */ +#define PMU_REG_NEW(si, member, mask, val) \ + si_corereg(si, si_findcoreidx(si, PMU_CORE_ID, 0), \ + OFFSETOF(pmuregs_t, member), mask, val) + +#define GCI_REG(si, offset, mask, val) \ + (AOB_ENAB(si) ? \ + si_corereg(si, si_findcoreidx(si, GCI_CORE_ID, 0), \ + offset, mask, val): \ + si_corereg(si, SI_CC_IDX, offset, mask, val)) + +/* Used only for the regs present in the gci core and not present in the old cc core */ +#define GCI_REG_NEW(si, member, mask, val) \ + si_corereg(si, si_findcoreidx(si, GCI_CORE_ID, 0), \ + OFFSETOF(gciregs_t, member), mask, val) + +#define LHL_REG(si, member, mask, val) \ + si_corereg(si, si_findcoreidx(si, GCI_CORE_ID, 0), \ + OFFSETOF(gciregs_t, member), mask, val) + +#define CHIPC_REG(si, member, mask, val) \ + si_corereg(si, SI_CC_IDX, OFFSETOF(chipcregs_t, member), mask, val) + +/* GCI Macros */ +#define ALLONES_32 0xFFFFFFFF +#define GCI_CCTL_SECIRST_OFFSET 0 /**< SeciReset */ +#define GCI_CCTL_RSTSL_OFFSET 1 /**< ResetSeciLogic */ +#define GCI_CCTL_SECIEN_OFFSET 2 /**< EnableSeci */ +#define GCI_CCTL_FSL_OFFSET 3 /**< ForceSeciOutLow */ +#define GCI_CCTL_SMODE_OFFSET 4 /**< SeciOpMode, 6:4 */ +#define GCI_CCTL_US_OFFSET 7 /**< UpdateSeci */ +#define GCI_CCTL_BRKONSLP_OFFSET 8 /**< BreakOnSleep */ +#define GCI_CCTL_SILOWTOUT_OFFSET 9 /**< SeciInLowTimeout, 10:9 */ +#define GCI_CCTL_RSTOCC_OFFSET 11 /**< ResetOffChipCoex */ +#define GCI_CCTL_ARESEND_OFFSET 12 /**< AutoBTSigResend */ +#define GCI_CCTL_FGCR_OFFSET 16 /**< ForceGciClkReq */ +#define GCI_CCTL_FHCRO_OFFSET 17 /**< ForceHWClockReqOff */ +#define GCI_CCTL_FREGCLK_OFFSET 18 /**< ForceRegClk */ +#define GCI_CCTL_FSECICLK_OFFSET 19 /**< ForceSeciClk */ +#define GCI_CCTL_FGCA_OFFSET 20 /**< ForceGciClkAvail */ +#define GCI_CCTL_FGCAV_OFFSET 21 /**< ForceGciClkAvailValue */ +#define GCI_CCTL_SCS_OFFSET 24 /**< SeciClkStretch, 31:24 */ +#define GCI_CCTL_SCS 25 /* SeciClkStretch */ + +#define GCI_MODE_UART 0x0 +#define GCI_MODE_SECI 0x1 +#define GCI_MODE_BTSIG 0x2 +#define GCI_MODE_GPIO 0x3 +#define GCI_MODE_MASK 0x7 + +#define GCI_CCTL_LOWTOUT_DIS 0x0 +#define GCI_CCTL_LOWTOUT_10BIT 0x1 +#define GCI_CCTL_LOWTOUT_20BIT 0x2 +#define GCI_CCTL_LOWTOUT_30BIT 0x3 +#define GCI_CCTL_LOWTOUT_MASK 0x3 + +#define GCI_CCTL_SCS_DEF 0x19 +#define GCI_CCTL_SCS_MASK 0xFF + +#define GCI_SECIIN_MODE_OFFSET 0 +#define GCI_SECIIN_GCIGPIO_OFFSET 4 +#define GCI_SECIIN_RXID2IP_OFFSET 8 + +#define GCI_SECIIN_MODE_MASK 0x7 +#define GCI_SECIIN_GCIGPIO_MASK 0xF + +#define GCI_SECIOUT_MODE_OFFSET 0 +#define GCI_SECIOUT_GCIGPIO_OFFSET 4 +#define GCI_SECIOUT_LOOPBACK_OFFSET 8 +#define GCI_SECIOUT_SECIINRELATED_OFFSET 16 + +#define GCI_SECIOUT_MODE_MASK 0x7 +#define GCI_SECIOUT_GCIGPIO_MASK 0xF +#define GCI_SECIOUT_SECIINRELATED_MASK 0x1 + +#define GCI_SECIOUT_SECIINRELATED 0x1 + +#define GCI_SECIAUX_RXENABLE_OFFSET 0 +#define GCI_SECIFIFO_RXENABLE_OFFSET 16 + +#define GCI_SECITX_ENABLE_OFFSET 0 + +#define GCI_GPIOCTL_INEN_OFFSET 0 +#define GCI_GPIOCTL_OUTEN_OFFSET 1 +#define GCI_GPIOCTL_PDN_OFFSET 4 + +#define GCI_GPIOIDX_OFFSET 16 + +#define GCI_LTECX_SECI_ID 0 /**< SECI port for LTECX */ +#define GCI_LTECX_TXCONF_EN_OFFSET 2 +#define GCI_LTECX_PRISEL_EN_OFFSET 3 + +/* To access per GCI bit registers */ +#define GCI_REG_WIDTH 32 + +/* number of event summary bits */ +#define GCI_EVENT_NUM_BITS 32 + +/* gci event bits per core */ +#define GCI_EVENT_BITS_PER_CORE 4 +#define GCI_EVENT_HWBIT_1 1 +#define GCI_EVENT_HWBIT_2 2 +#define GCI_EVENT_SWBIT_1 3 +#define GCI_EVENT_SWBIT_2 4 + +#define GCI_MBDATA_TOWLAN_POS 96 +#define GCI_MBACK_TOWLAN_POS 104 +#define GCI_WAKE_TOWLAN_PO 112 +#define GCI_SWREADY_POS 120 + +/* GCI bit positions */ +/* GCI [127:000] = WLAN [127:0] */ +#define GCI_WLAN_IP_ID 0 +#define GCI_WLAN_BEGIN 0 +#define GCI_WLAN_PRIO_POS (GCI_WLAN_BEGIN + 4) +#define GCI_WLAN_PERST_POS (GCI_WLAN_BEGIN + 15) + +/* GCI [255:128] = BT [127:0] */ +#define GCI_BT_IP_ID 1 +#define GCI_BT_BEGIN 128 +#define GCI_BT_MBDATA_TOWLAN_POS (GCI_BT_BEGIN + GCI_MBDATA_TOWLAN_POS) +#define GCI_BT_MBACK_TOWLAN_POS (GCI_BT_BEGIN + GCI_MBACK_TOWLAN_POS) +#define GCI_BT_WAKE_TOWLAN_POS (GCI_BT_BEGIN + GCI_WAKE_TOWLAN_PO) +#define GCI_BT_SWREADY_POS (GCI_BT_BEGIN + GCI_SWREADY_POS) + +/* GCI [639:512] = LTE [127:0] */ +#define GCI_LTE_IP_ID 4 +#define GCI_LTE_BEGIN 512 +#define GCI_LTE_FRAMESYNC_POS (GCI_LTE_BEGIN + 0) +#define GCI_LTE_RX_POS (GCI_LTE_BEGIN + 1) +#define GCI_LTE_TX_POS (GCI_LTE_BEGIN + 2) +#define GCI_LTE_WCI2TYPE_POS (GCI_LTE_BEGIN + 48) +#define GCI_LTE_WCI2TYPE_MASK 7 +#define GCI_LTE_AUXRXDVALID_POS (GCI_LTE_BEGIN + 56) + +/* Reg Index corresponding to ECI bit no x of ECI space */ +#define GCI_REGIDX(x) ((x)/GCI_REG_WIDTH) +/* Bit offset of ECI bit no x in 32-bit words */ +#define GCI_BITOFFSET(x) ((x)%GCI_REG_WIDTH) + +/* BT SMEM Control Register 0 */ +#define GCI_BT_SMEM_CTRL0_SUBCORE_ENABLE_PKILL (1 << 28) + +/* End - GCI Macros */ + +#define AXI_OOB 0x7 + +extern void si_pll_sr_reinit(si_t *sih); +extern void si_pll_closeloop(si_t *sih); +void si_config_4364_d11_oob(si_t *sih, uint coreid); +extern void si_gci_set_femctrl(si_t *sih, osl_t *osh, bool set); +extern void si_gci_set_femctrl_mask_ant01(si_t *sih, osl_t *osh, bool set); +extern uint si_num_slaveports(si_t *sih, uint coreid); +extern uint32 si_get_slaveport_addr(si_t *sih, uint spidx, uint baidx, + uint core_id, uint coreunit); +extern uint32 si_get_d11_slaveport_addr(si_t *sih, uint spidx, + uint baidx, uint coreunit); +uint si_introff(si_t *sih); +void si_intrrestore(si_t *sih, uint intr_val); +void si_nvram_res_masks(si_t *sih, uint32 *min_mask, uint32 *max_mask); +extern uint32 si_xtalfreq(si_t *sih); +extern uint8 si_getspurmode(si_t *sih); +extern uint32 si_get_openloop_dco_code(si_t *sih); +extern void si_set_openloop_dco_code(si_t *sih, uint32 openloop_dco_code); +extern uint32 si_wrapper_dump_buf_size(si_t *sih); +extern uint32 si_wrapper_dump_binary(si_t *sih, uchar *p); +extern uint32 si_wrapper_dump_last_timeout(si_t *sih, uint32 *error, uint32 *core, uint32 *ba, + uchar *p); + +/* SR Power Control */ +extern uint32 si_srpwr_request(si_t *sih, uint32 mask, uint32 val); +extern uint32 si_srpwr_stat_spinwait(si_t *sih, uint32 mask, uint32 val); +extern uint32 si_srpwr_stat(si_t *sih); +extern uint32 si_srpwr_domain(si_t *sih); + +/* SR Power Control */ + /* No capabilities bit so using chipid for now */ +#define SRPWR_CAP(sih) (BCM4347_CHIP(sih->chip) || BCM4369_CHIP(sih->chip)) + +#ifdef BCMSRPWR + extern bool _bcmsrpwr; + #if defined(ROM_ENAB_RUNTIME_CHECK) || !defined(DONGLEBUILD) + #define SRPWR_ENAB() (_bcmsrpwr) + #elif defined(BCMSRPWR_DISABLED) + #define SRPWR_ENAB() (0) + #else + #define SRPWR_ENAB() (1) + #endif +#else + #define SRPWR_ENAB() (0) +#endif /* BCMSRPWR */ + +/* + * Multi-BackPlane architecture. Each can power up/down independently. + * Common backplane: shared between BT and WL + * ChipC, PCIe, GCI, PMU, SRs + * HW powers up as needed + * WL BackPlane (WLBP): + * ARM, TCM, Main, Aux + * Host needs to power up + */ +#define MULTIBP_ENAB(sih) ((sih) && (sih)->_multibp_enable) + +uint32 si_enum_base(uint devid); + +extern uint8 si_lhl_ps_mode(si_t *sih); + +#ifdef UART_TRAP_DBG +void ai_dump_APB_Bridge_registers(si_t *sih); +#endif /* UART_TRAP_DBG */ + +void si_clrirq_idx(si_t *sih, uint core_idx); + +#endif /* _siutils_h_ */ diff --git a/bcmdhd.100.10.315.x/include/spid.h b/bcmdhd.100.10.315.x/include/spid.h new file mode 100644 index 0000000..6d41222 --- /dev/null +++ b/bcmdhd.100.10.315.x/include/spid.h @@ -0,0 +1,168 @@ +/* + * SPI device spec header file + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: spid.h 514727 2014-11-12 03:02:48Z $ + */ + +#ifndef _SPI_H +#define _SPI_H + +/* + * Brcm SPI Device Register Map. + * + */ + +typedef volatile struct { + uint8 config; /* 0x00, len, endian, clock, speed, polarity, wakeup */ + uint8 response_delay; /* 0x01, read response delay in bytes (corerev < 3) */ + uint8 status_enable; /* 0x02, status-enable, intr with status, response_delay + * function selection, command/data error check + */ + uint8 reset_bp; /* 0x03, reset on wlan/bt backplane reset (corerev >= 1) */ + uint16 intr_reg; /* 0x04, Intr status register */ + uint16 intr_en_reg; /* 0x06, Intr mask register */ + uint32 status_reg; /* 0x08, RO, Status bits of last spi transfer */ + uint16 f1_info_reg; /* 0x0c, RO, enabled, ready for data transfer, blocksize */ + uint16 f2_info_reg; /* 0x0e, RO, enabled, ready for data transfer, blocksize */ + uint16 f3_info_reg; /* 0x10, RO, enabled, ready for data transfer, blocksize */ + uint32 test_read; /* 0x14, RO 0xfeedbead signature */ + uint32 test_rw; /* 0x18, RW */ + uint8 resp_delay_f0; /* 0x1c, read resp delay bytes for F0 (corerev >= 3) */ + uint8 resp_delay_f1; /* 0x1d, read resp delay bytes for F1 (corerev >= 3) */ + uint8 resp_delay_f2; /* 0x1e, read resp delay bytes for F2 (corerev >= 3) */ + uint8 resp_delay_f3; /* 0x1f, read resp delay bytes for F3 (corerev >= 3) */ +} spi_regs_t; + +/* SPI device register offsets */ +#define SPID_CONFIG 0x00 +#define SPID_RESPONSE_DELAY 0x01 +#define SPID_STATUS_ENABLE 0x02 +#define SPID_RESET_BP 0x03 /* (corerev >= 1) */ +#define SPID_INTR_REG 0x04 /* 16 bits - Interrupt status */ +#define SPID_INTR_EN_REG 0x06 /* 16 bits - Interrupt mask */ +#define SPID_STATUS_REG 0x08 /* 32 bits */ +#define SPID_F1_INFO_REG 0x0C /* 16 bits */ +#define SPID_F2_INFO_REG 0x0E /* 16 bits */ +#define SPID_F3_INFO_REG 0x10 /* 16 bits */ +#define SPID_TEST_READ 0x14 /* 32 bits */ +#define SPID_TEST_RW 0x18 /* 32 bits */ +#define SPID_RESP_DELAY_F0 0x1c /* 8 bits (corerev >= 3) */ +#define SPID_RESP_DELAY_F1 0x1d /* 8 bits (corerev >= 3) */ +#define SPID_RESP_DELAY_F2 0x1e /* 8 bits (corerev >= 3) */ +#define SPID_RESP_DELAY_F3 0x1f /* 8 bits (corerev >= 3) */ + +/* Bit masks for SPID_CONFIG device register */ +#define WORD_LENGTH_32 0x1 /* 0/1 16/32 bit word length */ +#define ENDIAN_BIG 0x2 /* 0/1 Little/Big Endian */ +#define CLOCK_PHASE 0x4 /* 0/1 clock phase delay */ +#define CLOCK_POLARITY 0x8 /* 0/1 Idle state clock polarity is low/high */ +#define HIGH_SPEED_MODE 0x10 /* 1/0 High Speed mode / Normal mode */ +#define INTR_POLARITY 0x20 /* 1/0 Interrupt active polarity is high/low */ +#define WAKE_UP 0x80 /* 0/1 Wake-up command from Host to WLAN */ + +/* Bit mask for SPID_RESPONSE_DELAY device register */ +#define RESPONSE_DELAY_MASK 0xFF /* Configurable rd response delay in multiples of 8 bits */ + +/* Bit mask for SPID_STATUS_ENABLE device register */ +#define STATUS_ENABLE 0x1 /* 1/0 Status sent/not sent to host after read/write */ +#define INTR_WITH_STATUS 0x2 /* 0/1 Do-not / do-interrupt if status is sent */ +#define RESP_DELAY_ALL 0x4 /* Applicability of resp delay to F1 or all func's read */ +#define DWORD_PKT_LEN_EN 0x8 /* Packet len denoted in dwords instead of bytes */ +#define CMD_ERR_CHK_EN 0x20 /* Command error check enable */ +#define DATA_ERR_CHK_EN 0x40 /* Data error check enable */ + +/* Bit mask for SPID_RESET_BP device register */ +#define RESET_ON_WLAN_BP_RESET 0x4 /* enable reset for WLAN backplane */ +#define RESET_ON_BT_BP_RESET 0x8 /* enable reset for BT backplane */ +#define RESET_SPI 0x80 /* reset the above enabled logic */ + +/* Bit mask for SPID_INTR_REG device register */ +#define DATA_UNAVAILABLE 0x0001 /* Requested data not available; Clear by writing a "1" */ +#define F2_F3_FIFO_RD_UNDERFLOW 0x0002 +#define F2_F3_FIFO_WR_OVERFLOW 0x0004 +#define COMMAND_ERROR 0x0008 /* Cleared by writing 1 */ +#define DATA_ERROR 0x0010 /* Cleared by writing 1 */ +#define F2_PACKET_AVAILABLE 0x0020 +#define F3_PACKET_AVAILABLE 0x0040 +#define F1_OVERFLOW 0x0080 /* Due to last write. Bkplane has pending write requests */ +#define MISC_INTR0 0x0100 +#define MISC_INTR1 0x0200 +#define MISC_INTR2 0x0400 +#define MISC_INTR3 0x0800 +#define MISC_INTR4 0x1000 +#define F1_INTR 0x2000 +#define F2_INTR 0x4000 +#define F3_INTR 0x8000 + +/* Bit mask for 32bit SPID_STATUS_REG device register */ +#define STATUS_DATA_NOT_AVAILABLE 0x00000001 +#define STATUS_UNDERFLOW 0x00000002 +#define STATUS_OVERFLOW 0x00000004 +#define STATUS_F2_INTR 0x00000008 +#define STATUS_F3_INTR 0x00000010 +#define STATUS_F2_RX_READY 0x00000020 +#define STATUS_F3_RX_READY 0x00000040 +#define STATUS_HOST_CMD_DATA_ERR 0x00000080 +#define STATUS_F2_PKT_AVAILABLE 0x00000100 +#define STATUS_F2_PKT_LEN_MASK 0x000FFE00 +#define STATUS_F2_PKT_LEN_SHIFT 9 +#define STATUS_F3_PKT_AVAILABLE 0x00100000 +#define STATUS_F3_PKT_LEN_MASK 0xFFE00000 +#define STATUS_F3_PKT_LEN_SHIFT 21 + +/* Bit mask for 16 bits SPID_F1_INFO_REG device register */ +#define F1_ENABLED 0x0001 +#define F1_RDY_FOR_DATA_TRANSFER 0x0002 +#define F1_MAX_PKT_SIZE 0x01FC + +/* Bit mask for 16 bits SPID_F2_INFO_REG device register */ +#define F2_ENABLED 0x0001 +#define F2_RDY_FOR_DATA_TRANSFER 0x0002 +#define F2_MAX_PKT_SIZE 0x3FFC + +/* Bit mask for 16 bits SPID_F3_INFO_REG device register */ +#define F3_ENABLED 0x0001 +#define F3_RDY_FOR_DATA_TRANSFER 0x0002 +#define F3_MAX_PKT_SIZE 0x3FFC + +/* Bit mask for 32 bits SPID_TEST_READ device register read in 16bit LE mode */ +#define TEST_RO_DATA_32BIT_LE 0xFEEDBEAD + +/* Maximum number of I/O funcs */ +#define SPI_MAX_IOFUNCS 4 + +#define SPI_MAX_PKT_LEN (2048*4) + +/* Misc defines */ +#define SPI_FUNC_0 0 +#define SPI_FUNC_1 1 +#define SPI_FUNC_2 2 +#define SPI_FUNC_3 3 + +#define WAIT_F2RXFIFORDY 100 +#define WAIT_F2RXFIFORDY_DELAY 20 + +#endif /* _SPI_H */ diff --git a/bcmdhd.100.10.315.x/include/trxhdr.h b/bcmdhd.100.10.315.x/include/trxhdr.h new file mode 100644 index 0000000..77a7f09 --- /dev/null +++ b/bcmdhd.100.10.315.x/include/trxhdr.h @@ -0,0 +1,95 @@ +/* + * TRX image file header format. + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: trxhdr.h 520026 2014-12-10 01:29:40Z $ + */ + +#ifndef _TRX_HDR_H +#define _TRX_HDR_H + +#include + +#define TRX_MAGIC 0x30524448 /* "HDR0" */ +#define TRX_MAX_LEN 0x3B0000 /* Max length */ +#define TRX_NO_HEADER 1 /* Do not write TRX header */ +#define TRX_GZ_FILES 0x2 /* Contains up to TRX_MAX_OFFSET individual gzip files */ +#define TRX_EMBED_UCODE 0x8 /* Trx contains embedded ucode image */ +#define TRX_ROMSIM_IMAGE 0x10 /* Trx contains ROM simulation image */ +#define TRX_UNCOMP_IMAGE 0x20 /* Trx contains uncompressed rtecdc.bin image */ +#define TRX_BOOTLOADER 0x40 /* the image is a bootloader */ + +#define TRX_V1 1 +#define TRX_V1_MAX_OFFSETS 3 /* V1: Max number of individual files */ + +#ifndef BCMTRXV2 +#define TRX_VERSION TRX_V1 /* Version 1 */ +#define TRX_MAX_OFFSET TRX_V1_MAX_OFFSETS +#endif // endif + +/* BMAC Host driver/application like bcmdl need to support both Ver 1 as well as + * Ver 2 of trx header. To make it generic, trx_header is structure is modified + * as below where size of "offsets" field will vary as per the TRX version. + * Currently, BMAC host driver and bcmdl are modified to support TRXV2 as well. + * To make sure, other applications like "dhdl" which are yet to be enhanced to support + * TRXV2 are not broken, new macro and structure defintion take effect only when BCMTRXV2 + * is defined. + */ +struct trx_header { + uint32 magic; /* "HDR0" */ + uint32 len; /* Length of file including header */ + uint32 crc32; /* 32-bit CRC from flag_version to end of file */ + uint32 flag_version; /* 0:15 flags, 16:31 version */ +#ifndef BCMTRXV2 + uint32 offsets[TRX_MAX_OFFSET]; /* Offsets of partitions from start of header */ +#else + uint32 offsets[1]; /* Offsets of partitions from start of header */ +#endif // endif +}; + +#ifdef BCMTRXV2 +#define TRX_VERSION TRX_V2 /* Version 2 */ +#define TRX_MAX_OFFSET TRX_V2_MAX_OFFSETS + +#define TRX_V2 2 +/* V2: Max number of individual files + * To support SDR signature + Config data region + */ +#define TRX_V2_MAX_OFFSETS 5 +#define SIZEOF_TRXHDR_V1 (sizeof(struct trx_header)+(TRX_V1_MAX_OFFSETS-1)*sizeof(uint32)) +#define SIZEOF_TRXHDR_V2 (sizeof(struct trx_header)+(TRX_V2_MAX_OFFSETS-1)*sizeof(uint32)) +#define TRX_VER(trx) ((trx)->flag_version>>16) +#define ISTRX_V1(trx) (TRX_VER(trx) == TRX_V1) +#define ISTRX_V2(trx) (TRX_VER(trx) == TRX_V2) +/* For V2, return size of V2 size: others, return V1 size */ +#define SIZEOF_TRX(trx) (ISTRX_V2(trx) ? SIZEOF_TRXHDR_V2: SIZEOF_TRXHDR_V1) +#else +#define SIZEOF_TRX(trx) (sizeof(struct trx_header)) +#endif /* BCMTRXV2 */ + +/* Compatibility */ +typedef struct trx_header TRXHDR, *PTRXHDR; + +#endif /* _TRX_HDR_H */ diff --git a/bcmdhd.100.10.315.x/include/typedefs.h b/bcmdhd.100.10.315.x/include/typedefs.h new file mode 100644 index 0000000..397a343 --- /dev/null +++ b/bcmdhd.100.10.315.x/include/typedefs.h @@ -0,0 +1,367 @@ +/* + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: typedefs.h 742663 2018-01-23 06:57:52Z $ + */ + +#ifndef _TYPEDEFS_H_ +#define _TYPEDEFS_H_ + +#if (!defined(EDK_RELEASE_VERSION) || (EDK_RELEASE_VERSION < 0x00020000)) || \ + !defined(BWL_NO_INTERNAL_STDLIB_SUPPORT) + +#ifdef SITE_TYPEDEFS + +/* + * Define SITE_TYPEDEFS in the compile to include a site-specific + * typedef file "site_typedefs.h". + * + * If SITE_TYPEDEFS is not defined, then the code section below makes + * inferences about the compile environment based on defined symbols and + * possibly compiler pragmas. + * + * Following these two sections is the Default Typedefs section. + * This section is only processed if USE_TYPEDEF_DEFAULTS is + * defined. This section has a default set of typedefs and a few + * preprocessor symbols (TRUE, FALSE, NULL, ...). + */ + +#include "site_typedefs.h" + +#else + +/* + * Infer the compile environment based on preprocessor symbols and pragmas. + * Override type definitions as needed, and include configuration-dependent + * header files to define types. + */ + +#ifdef __cplusplus + +#define TYPEDEF_BOOL +#ifndef FALSE +#define FALSE false +#endif // endif +#ifndef TRUE +#define TRUE true +#endif // endif + +#else /* ! __cplusplus */ + +#endif /* ! __cplusplus */ + +#if !defined(TYPEDEF_UINTPTR) +#if defined(__LP64__) +#define TYPEDEF_UINTPTR +typedef unsigned long long int uintptr; +#endif // endif +#endif /* TYPEDEF_UINTPTR */ + +/* float_t types conflict with the same typedefs from the standard ANSI-C +** math.h header file. Don't re-typedef them here. +*/ + +#if defined(_NEED_SIZE_T_) +typedef long unsigned int size_t; +#endif // endif + +#if defined(__sparc__) +#define TYPEDEF_ULONG +#endif // endif + +/* + * If this is either a Linux hybrid build or the per-port code of a hybrid build + * then use the Linux header files to get some of the typedefs. Otherwise, define + * them entirely in this file. We can't always define the types because we get + * a duplicate typedef error; there is no way to "undefine" a typedef. + * We know when it's per-port code because each file defines LINUX_PORT at the top. + */ +#define TYPEDEF_UINT +#ifndef TARGETENV_android +#define TYPEDEF_USHORT +#define TYPEDEF_ULONG +#endif /* TARGETENV_android */ +#ifdef __KERNEL__ +#include +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)) +#define TYPEDEF_BOOL +#endif /* >= 2.6.19 */ +/* special detection for 2.6.18-128.7.1.0.1.el5 */ +#if (LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 18)) +#include +#ifdef noinline_for_stack +#define TYPEDEF_BOOL +#endif // endif +#endif /* == 2.6.18 */ +#endif /* __KERNEL__ */ + +/* Do not support the (u)int64 types with strict ansi for GNU C */ +#if defined(__GNUC__) && defined(__STRICT_ANSI__) +#define TYPEDEF_INT64 +#define TYPEDEF_UINT64 +#endif /* defined(__GNUC__) && defined(__STRICT_ANSI__) */ + +/* ICL accepts unsigned 64 bit type only, and complains in ANSI mode + * for signed or unsigned + */ +#if defined(__ICL) + +#define TYPEDEF_INT64 + +#if defined(__STDC__) +#define TYPEDEF_UINT64 +#endif // endif + +#endif /* __ICL */ + +#if !defined(__DJGPP__) + +/* pick up ushort & uint from standard types.h */ +#if defined(__KERNEL__) + +/* See note above */ +#include /* sys/types.h and linux/types.h are oil and water */ + +#else + +#include + +#endif /* linux && __KERNEL__ */ + +#endif // endif + +/* use the default typedefs in the next section of this file */ +#define USE_TYPEDEF_DEFAULTS + +#endif /* SITE_TYPEDEFS */ + +/* + * Default Typedefs + */ + +#ifdef USE_TYPEDEF_DEFAULTS +#undef USE_TYPEDEF_DEFAULTS + +#ifndef TYPEDEF_BOOL +typedef /* @abstract@ */ unsigned char bool; +#endif /* endif TYPEDEF_BOOL */ + +/* define uchar, ushort, uint, ulong */ + +#ifndef TYPEDEF_UCHAR +typedef unsigned char uchar; +#endif // endif + +#ifndef TYPEDEF_USHORT +typedef unsigned short ushort; +#endif // endif + +#ifndef TYPEDEF_UINT +typedef unsigned int uint; +#endif // endif + +#ifndef TYPEDEF_ULONG +typedef unsigned long ulong; +#endif // endif + +/* define [u]int8/16/32/64, uintptr */ + +#ifndef TYPEDEF_UINT8 +typedef unsigned char uint8; +#endif // endif + +#ifndef TYPEDEF_UINT16 +typedef unsigned short uint16; +#endif // endif + +#ifndef TYPEDEF_UINT32 +typedef unsigned int uint32; +#endif // endif + +#ifndef TYPEDEF_UINT64 +typedef unsigned long long uint64; +#endif // endif + +#ifndef TYPEDEF_UINTPTR +typedef unsigned int uintptr; +#endif // endif + +#ifndef TYPEDEF_INT8 +typedef signed char int8; +#endif // endif + +#ifndef TYPEDEF_INT16 +typedef signed short int16; +#endif // endif + +#ifndef TYPEDEF_INT32 +typedef signed int int32; +#endif // endif + +#ifndef TYPEDEF_INT64 +typedef signed long long int64; +#endif // endif + +/* define float32/64, float_t */ + +#ifndef TYPEDEF_FLOAT32 +typedef float float32; +#endif // endif + +#ifndef TYPEDEF_FLOAT64 +typedef double float64; +#endif // endif + +/* + * abstracted floating point type allows for compile time selection of + * single or double precision arithmetic. Compiling with -DFLOAT32 + * selects single precision; the default is double precision. + */ + +#ifndef TYPEDEF_FLOAT_T + +#if defined(FLOAT32) +typedef float32 float_t; +#else /* default to double precision floating point */ +typedef float64 float_t; +#endif // endif + +#endif /* TYPEDEF_FLOAT_T */ + +/* define macro values */ + +#ifndef FALSE +#define FALSE 0 +#endif // endif + +#ifndef TRUE +#define TRUE 1 /* TRUE */ +#endif // endif + +#ifndef NULL +#define NULL 0 +#endif // endif + +#ifndef OFF +#define OFF 0 +#endif // endif + +#ifndef ON +#define ON 1 /* ON = 1 */ +#endif // endif + +#define AUTO (-1) /* Auto = -1 */ + +/* define PTRSZ, INLINE */ + +#ifndef PTRSZ +#define PTRSZ sizeof(char*) +#endif // endif + +/* Detect compiler type. */ +#if defined(__GNUC__) || defined(__lint) + #define BWL_COMPILER_GNU +#elif defined(__CC_ARM) && __CC_ARM + #define BWL_COMPILER_ARMCC +#else + #error "Unknown compiler!" +#endif // endif + +#ifndef INLINE + #if defined(BWL_COMPILER_MICROSOFT) + #define INLINE __inline + #elif defined(BWL_COMPILER_GNU) + #define INLINE __inline__ + #elif defined(BWL_COMPILER_ARMCC) + #define INLINE __inline + #else + #define INLINE + #endif +#endif /* INLINE */ + +#undef TYPEDEF_BOOL +#undef TYPEDEF_UCHAR +#undef TYPEDEF_USHORT +#undef TYPEDEF_UINT +#undef TYPEDEF_ULONG +#undef TYPEDEF_UINT8 +#undef TYPEDEF_UINT16 +#undef TYPEDEF_UINT32 +#undef TYPEDEF_UINT64 +#undef TYPEDEF_UINTPTR +#undef TYPEDEF_INT8 +#undef TYPEDEF_INT16 +#undef TYPEDEF_INT32 +#undef TYPEDEF_INT64 +#undef TYPEDEF_FLOAT32 +#undef TYPEDEF_FLOAT64 +#undef TYPEDEF_FLOAT_T + +#endif /* USE_TYPEDEF_DEFAULTS */ + +/* Suppress unused parameter warning */ +#define UNUSED_PARAMETER(x) (void)(x) + +/* Avoid warning for discarded const or volatile qualifier in special cases (-Wcast-qual) */ +#define DISCARD_QUAL(ptr, type) ((type *)(uintptr)(ptr)) + +#else /* !EDK_RELEASE_VERSION || (EDK_RELEASE_VERSION < 0x00020000) */ + +#include +#include +#include + +#ifdef stderr +#undef stderr +#define stderr stdout +#endif // endif + +typedef UINT8 uint8; +typedef UINT16 uint16; +typedef UINT32 uint32; +typedef UINT64 uint64; +typedef INT8 int8; +typedef INT16 int16; +typedef INT32 int32; +typedef INT64 int64; + +typedef BOOLEAN bool; +typedef unsigned char uchar; +typedef UINTN uintptr; + +#define UNUSED_PARAMETER(x) (void)(x) +#define DISCARD_QUAL(ptr, type) ((type *)(uintptr)(ptr)) +#define INLINE +#define AUTO (-1) /* Auto = -1 */ +#define ON 1 /* ON = 1 */ +#define OFF 0 + +#endif /* !EDK_RELEASE_VERSION || (EDK_RELEASE_VERSION < 0x00020000) */ + +/* + * Including the bcmdefs.h here, to make sure everyone including typedefs.h + * gets this automatically +*/ +#include +#endif /* _TYPEDEFS_H_ */ diff --git a/bcmdhd.100.10.315.x/include/usbrdl.h b/bcmdhd.100.10.315.x/include/usbrdl.h new file mode 100644 index 0000000..be5bd69 --- /dev/null +++ b/bcmdhd.100.10.315.x/include/usbrdl.h @@ -0,0 +1,134 @@ +/* + * Broadcom USB remote download definitions + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: usbrdl.h 597933 2015-11-06 18:52:06Z $ + */ + +#ifndef _USB_RDL_H +#define _USB_RDL_H + +/* Control messages: bRequest values */ +#define DL_GETSTATE 0 /* returns the rdl_state_t struct */ +#define DL_CHECK_CRC 1 /* currently unused */ +#define DL_GO 2 /* execute downloaded image */ +#define DL_START 3 /* initialize dl state */ +#define DL_REBOOT 4 /* reboot the device in 2 seconds */ +#define DL_GETVER 5 /* returns the bootrom_id_t struct */ +#define DL_GO_PROTECTED 6 /* execute the downloaded code and set reset event + * to occur in 2 seconds. It is the responsibility + * of the downloaded code to clear this event + */ +#define DL_EXEC 7 /* jump to a supplied address */ +#define DL_RESETCFG 8 /* To support single enum on dongle + * - Not used by bootloader + */ +#define DL_DEFER_RESP_OK 9 /* Potentially defer the response to setup + * if resp unavailable + */ +#define DL_CHGSPD 0x0A + +#define DL_HWCMD_MASK 0xfc /* Mask for hardware read commands: */ +#define DL_RDHW 0x10 /* Read a hardware address (Ctl-in) */ +#define DL_RDHW32 0x10 /* Read a 32 bit word */ +#define DL_RDHW16 0x11 /* Read 16 bits */ +#define DL_RDHW8 0x12 /* Read an 8 bit byte */ +#define DL_WRHW 0x14 /* Write a hardware address (Ctl-out) */ +#define DL_WRHW_BLK 0x13 /* Block write to hardware access */ + +#define DL_CMD_WRHW 2 + + +/* states */ +#define DL_WAITING 0 /* waiting to rx first pkt that includes the hdr info */ +#define DL_READY 1 /* hdr was good, waiting for more of the compressed image */ +#define DL_BAD_HDR 2 /* hdr was corrupted */ +#define DL_BAD_CRC 3 /* compressed image was corrupted */ +#define DL_RUNNABLE 4 /* download was successful, waiting for go cmd */ +#define DL_START_FAIL 5 /* failed to initialize correctly */ +#define DL_NVRAM_TOOBIG 6 /* host specified nvram data exceeds DL_NVRAM value */ +#define DL_IMAGE_TOOBIG 7 /* download image too big (exceeds DATA_START for rdl) */ + +#define TIMEOUT 5000 /* Timeout for usb commands */ + +struct bcm_device_id { + char *name; + uint32 vend; + uint32 prod; +}; + +typedef struct { + uint32 state; + uint32 bytes; +} rdl_state_t; + +typedef struct { + uint32 chip; /* Chip id */ + uint32 chiprev; /* Chip rev */ + uint32 ramsize; /* Size of RAM */ + uint32 remapbase; /* Current remap base address */ + uint32 boardtype; /* Type of board */ + uint32 boardrev; /* Board revision */ +} bootrom_id_t; + +/* struct for backplane & jtag accesses */ +typedef struct { + uint32 cmd; /* tag to identify the cmd */ + uint32 addr; /* backplane address for write */ + uint32 len; /* length of data: 1, 2, 4 bytes */ + uint32 data; /* data to write */ +} hwacc_t; + + +/* struct for querying nvram params from bootloader */ +#define QUERY_STRING_MAX 32 +typedef struct { + uint32 cmd; /* tag to identify the cmd */ + char var[QUERY_STRING_MAX]; /* param name */ +} nvparam_t; + +typedef void (*exec_fn_t)(void *sih); + +#define USB_CTRL_IN (USB_TYPE_VENDOR | 0x80 | USB_RECIP_INTERFACE) +#define USB_CTRL_OUT (USB_TYPE_VENDOR | 0 | USB_RECIP_INTERFACE) + +#define USB_CTRL_EP_TIMEOUT 500 /* Timeout used in USB control_msg transactions. */ +#define USB_BULK_EP_TIMEOUT 500 /* Timeout used in USB bulk transactions. */ + +#define RDL_CHUNK_MAX (64 * 1024) /* max size of each dl transfer */ +#define RDL_CHUNK 1500 /* size of each dl transfer */ + +/* bootloader makes special use of trx header "offsets" array */ +#define TRX_OFFSETS_DLFWLEN_IDX 0 /* Size of the fw; used in uncompressed case */ +#define TRX_OFFSETS_JUMPTO_IDX 1 /* RAM address for jumpto after download */ +#define TRX_OFFSETS_NVM_LEN_IDX 2 /* Length of appended NVRAM data */ +#ifdef BCMTRXV2 +#define TRX_OFFSETS_DSG_LEN_IDX 3 /* Length of digital signature for the first image */ +#define TRX_OFFSETS_CFG_LEN_IDX 4 /* Length of config region, which is not digitally signed */ +#endif /* BCMTRXV2 */ + +#define TRX_OFFSETS_DLBASE_IDX 0 /* RAM start address for download */ + +#endif /* _USB_RDL_H */ diff --git a/bcmdhd.100.10.315.x/include/vlan.h b/bcmdhd.100.10.315.x/include/vlan.h new file mode 100644 index 0000000..9f5fcdd --- /dev/null +++ b/bcmdhd.100.10.315.x/include/vlan.h @@ -0,0 +1,97 @@ +/* + * 802.1Q VLAN protocol definitions + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: vlan.h 700076 2017-05-17 14:42:22Z $ + */ + +#ifndef _vlan_h_ +#define _vlan_h_ + +#ifndef _TYPEDEFS_H_ +#include +#endif // endif + +/* This marks the start of a packed structure section. */ +#include + +#ifndef VLAN_VID_MASK +#define VLAN_VID_MASK 0xfff /* low 12 bits are vlan id */ +#endif // endif + +#define VLAN_CFI_SHIFT 12 /* canonical format indicator bit */ +#define VLAN_PRI_SHIFT 13 /* user priority */ + +#define VLAN_PRI_MASK 7 /* 3 bits of priority */ + +#define VLAN_TPID_OFFSET 12 /* offset of tag protocol id field */ +#define VLAN_TCI_OFFSET 14 /* offset of tag ctrl info field */ + +#define VLAN_TAG_LEN 4 +#define VLAN_TAG_OFFSET (2 * ETHER_ADDR_LEN) /* offset in Ethernet II packet only */ + +#define VLAN_TPID 0x8100 /* VLAN ethertype/Tag Protocol ID */ + +struct vlan_header { + uint16 vlan_type; /* 0x8100 */ + uint16 vlan_tag; /* priority, cfi and vid */ +}; + +struct ethervlan_header { + uint8 ether_dhost[ETHER_ADDR_LEN]; + uint8 ether_shost[ETHER_ADDR_LEN]; + uint16 vlan_type; /* 0x8100 */ + uint16 vlan_tag; /* priority, cfi and vid */ + uint16 ether_type; +}; + +struct dot3_mac_llc_snapvlan_header { + uint8 ether_dhost[ETHER_ADDR_LEN]; /* dest mac */ + uint8 ether_shost[ETHER_ADDR_LEN]; /* src mac */ + uint16 length; /* frame length incl header */ + uint8 dsap; /* always 0xAA */ + uint8 ssap; /* always 0xAA */ + uint8 ctl; /* always 0x03 */ + uint8 oui[3]; /* RFC1042: 0x00 0x00 0x00 + * Bridge-Tunnel: 0x00 0x00 0xF8 + */ + uint16 vlan_type; /* 0x8100 */ + uint16 vlan_tag; /* priority, cfi and vid */ + uint16 ether_type; /* ethertype */ +}; + +#define ETHERVLAN_HDR_LEN (ETHER_HDR_LEN + VLAN_TAG_LEN) + +/* This marks the end of a packed structure section. */ +#include + +#define ETHERVLAN_MOVE_HDR(d, s) \ +do { \ + struct ethervlan_header t; \ + t = *(struct ethervlan_header *)(s); \ + *(struct ethervlan_header *)(d) = t; \ +} while (0) + +#endif /* _vlan_h_ */ diff --git a/bcmdhd.100.10.315.x/include/wlfc_proto.h b/bcmdhd.100.10.315.x/include/wlfc_proto.h new file mode 100644 index 0000000..4684634 --- /dev/null +++ b/bcmdhd.100.10.315.x/include/wlfc_proto.h @@ -0,0 +1,413 @@ +/* + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: wlfc_proto.h 735303 2017-12-08 06:20:29Z $ + * + */ + +/** WL flow control for PROP_TXSTATUS. Related to host AMPDU reordering. */ + +#ifndef __wlfc_proto_definitions_h__ +#define __wlfc_proto_definitions_h__ + + /* Use TLV to convey WLFC information. + --------------------------------------------------------------------------- + | Type | Len | value | Description + --------------------------------------------------------------------------- + | 1 | 1 | (handle) | MAC OPEN + --------------------------------------------------------------------------- + | 2 | 1 | (handle) | MAC CLOSE + --------------------------------------------------------------------------- + | 3 | 2 | (count, handle, prec_bmp)| Set the credit depth for a MAC dstn + --------------------------------------------------------------------------- + | 4 | 4+ | see pkttag comments | TXSTATUS + | | 12 | TX status & timestamps | Present only when pkt timestamp is enabled + --------------------------------------------------------------------------- + | 5 | 4 | see pkttag comments | PKKTTAG [host->firmware] + --------------------------------------------------------------------------- + | 6 | 8 | (handle, ifid, MAC) | MAC ADD + --------------------------------------------------------------------------- + | 7 | 8 | (handle, ifid, MAC) | MAC DEL + --------------------------------------------------------------------------- + | 8 | 1 | (rssi) | RSSI - RSSI value for the packet. + --------------------------------------------------------------------------- + | 9 | 1 | (interface ID) | Interface OPEN + --------------------------------------------------------------------------- + | 10 | 1 | (interface ID) | Interface CLOSE + --------------------------------------------------------------------------- + | 11 | 8 | fifo credit returns map | FIFO credits back to the host + | | | | + | | | | -------------------------------------- + | | | | | ac0 | ac1 | ac2 | ac3 | bcmc | atim | + | | | | -------------------------------------- + | | | | + --------------------------------------------------------------------------- + | 12 | 2 | MAC handle, | Host provides a bitmap of pending + | | | AC[0-3] traffic bitmap | unicast traffic for MAC-handle dstn. + | | | | [host->firmware] + --------------------------------------------------------------------------- + | 13 | 3 | (count, handle, prec_bmp)| One time request for packet to a specific + | | | | MAC destination. + --------------------------------------------------------------------------- + | 15 | 12 | (pkttag, timestamps) | Send TX timestamp at reception from host + --------------------------------------------------------------------------- + | 16 | 12 | (pkttag, timestamps) | Send WLAN RX timestamp along with RX frame + --------------------------------------------------------------------------- + | 255 | N/A | N/A | FILLER - This is a special type + | | | | that has no length or value. + | | | | Typically used for padding. + --------------------------------------------------------------------------- + */ + +typedef enum { + WLFC_CTL_TYPE_MAC_OPEN = 1, + WLFC_CTL_TYPE_MAC_CLOSE = 2, + WLFC_CTL_TYPE_MAC_REQUEST_CREDIT = 3, + WLFC_CTL_TYPE_TXSTATUS = 4, + WLFC_CTL_TYPE_PKTTAG = 5, /** host<->dongle */ + + WLFC_CTL_TYPE_MACDESC_ADD = 6, + WLFC_CTL_TYPE_MACDESC_DEL = 7, + WLFC_CTL_TYPE_RSSI = 8, + + WLFC_CTL_TYPE_INTERFACE_OPEN = 9, + WLFC_CTL_TYPE_INTERFACE_CLOSE = 10, + + WLFC_CTL_TYPE_FIFO_CREDITBACK = 11, + + WLFC_CTL_TYPE_PENDING_TRAFFIC_BMP = 12, /** host->dongle */ + WLFC_CTL_TYPE_MAC_REQUEST_PACKET = 13, + WLFC_CTL_TYPE_HOST_REORDER_RXPKTS = 14, + + WLFC_CTL_TYPE_TX_ENTRY_STAMP = 15, + WLFC_CTL_TYPE_RX_STAMP = 16, + WLFC_CTL_TYPE_TX_STATUS_STAMP = 17, /** obsolete */ + + WLFC_CTL_TYPE_TRANS_ID = 18, + WLFC_CTL_TYPE_COMP_TXSTATUS = 19, + + WLFC_CTL_TYPE_TID_OPEN = 20, + WLFC_CTL_TYPE_TID_CLOSE = 21, + WLFC_CTL_TYPE_UPD_FLR_WEIGHT = 22, + WLFC_CTL_TYPE_ENAB_FFSCH = 23, + WLFC_CTL_TYPE_UPDATE_FLAGS = 24, /* clear the flags set in flowring */ + WLFC_CTL_TYPE_CLEAR_SUPPR = 25, /* free the supression info in the flowring */ + + WLFC_CTL_TYPE_FLOWID_OPEN = 26, + WLFC_CTL_TYPE_FLOWID_CLOSE = 27, + + WLFC_CTL_TYPE_FILLER = 255 +} wlfc_ctl_type_t; + +#define WLFC_CTL_VALUE_LEN_FLOWID 2 + +#define WLFC_CTL_VALUE_LEN_MACDESC 8 /** handle, interface, MAC */ + +#define WLFC_CTL_VALUE_LEN_MAC 1 /** MAC-handle */ +#define WLFC_CTL_VALUE_LEN_RSSI 1 + +#define WLFC_CTL_VALUE_LEN_INTERFACE 1 +#define WLFC_CTL_VALUE_LEN_PENDING_TRAFFIC_BMP 2 + +#define WLFC_CTL_VALUE_LEN_TXSTATUS 4 +#define WLFC_CTL_VALUE_LEN_PKTTAG 4 +#define WLFC_CTL_VALUE_LEN_TIMESTAMP 12 /** 4-byte rate info + 2 TSF */ + +#define WLFC_CTL_VALUE_LEN_SEQ 2 + +/* Reset the flags set for the corresponding flowring of the SCB which is de-inited */ +/* FLOW_RING_FLAG_LAST_TIM | FLOW_RING_FLAG_INFORM_PKTPEND | FLOW_RING_FLAG_PKT_REQ */ +#define WLFC_RESET_ALL_FLAGS 0 +#define WLFC_CTL_VALUE_LEN_FLAGS 7 /** flags, MAC */ + +/* free the data stored to be used for suppressed packets in future */ +#define WLFC_CTL_VALUE_LEN_SUPR 7 /** tid, MAC */ + +/* The high bits of ratespec report in timestamp are used for various status */ +#define WLFC_TSFLAGS_RX_RETRY (1 << 31) +#define WLFC_TSFLAGS_PM_ENABLED (1 << 30) +#define WLFC_TSFLAGS_MASK (WLFC_TSFLAGS_RX_RETRY | WLFC_TSFLAGS_PM_ENABLED) + +/* enough space to host all 4 ACs, bc/mc and atim fifo credit */ +#define WLFC_CTL_VALUE_LEN_FIFO_CREDITBACK 6 + +#define WLFC_CTL_VALUE_LEN_REQUEST_CREDIT 3 /* credit, MAC-handle, prec_bitmap */ +#define WLFC_CTL_VALUE_LEN_REQUEST_PACKET 3 /* credit, MAC-handle, prec_bitmap */ + +#define WLFC_PKTFLAG_PKTFROMHOST 0x01 +#define WLFC_PKTFLAG_PKT_REQUESTED 0x02 +#define WLFC_PKTFLAG_PKT_SENDTOHOST 0x04 + +#define WL_TXSTATUS_STATUS_MASK 0xff /* allow 8 bits */ +#define WL_TXSTATUS_STATUS_SHIFT 24 + +#define WL_TXSTATUS_SET_STATUS(x, status) ((x) = \ + ((x) & ~(WL_TXSTATUS_STATUS_MASK << WL_TXSTATUS_STATUS_SHIFT)) | \ + (((status) & WL_TXSTATUS_STATUS_MASK) << WL_TXSTATUS_STATUS_SHIFT)) +#define WL_TXSTATUS_GET_STATUS(x) (((x) >> WL_TXSTATUS_STATUS_SHIFT) & \ + WL_TXSTATUS_STATUS_MASK) + +/** + * Bit 31 of the 32-bit packet tag is defined as 'generation ID'. It is set by the host to the + * "current" generation, and by the firmware to the "expected" generation, toggling on suppress. The + * firmware accepts a packet when the generation matches; on reset (startup) both "current" and + * "expected" are set to 0. + */ +#define WL_TXSTATUS_GENERATION_MASK 1 /* allow 1 bit */ +#define WL_TXSTATUS_GENERATION_SHIFT 31 + +#define WL_TXSTATUS_SET_GENERATION(x, gen) ((x) = \ + ((x) & ~(WL_TXSTATUS_GENERATION_MASK << WL_TXSTATUS_GENERATION_SHIFT)) | \ + (((gen) & WL_TXSTATUS_GENERATION_MASK) << WL_TXSTATUS_GENERATION_SHIFT)) + +#define WL_TXSTATUS_GET_GENERATION(x) (((x) >> WL_TXSTATUS_GENERATION_SHIFT) & \ + WL_TXSTATUS_GENERATION_MASK) + +#define WL_TXSTATUS_FLAGS_MASK 0xf /* allow 4 bits only */ +#define WL_TXSTATUS_FLAGS_SHIFT 27 + +#define WL_TXSTATUS_SET_FLAGS(x, flags) ((x) = \ + ((x) & ~(WL_TXSTATUS_FLAGS_MASK << WL_TXSTATUS_FLAGS_SHIFT)) | \ + (((flags) & WL_TXSTATUS_FLAGS_MASK) << WL_TXSTATUS_FLAGS_SHIFT)) +#define WL_TXSTATUS_GET_FLAGS(x) (((x) >> WL_TXSTATUS_FLAGS_SHIFT) & \ + WL_TXSTATUS_FLAGS_MASK) + +#define WL_TXSTATUS_FIFO_MASK 0x7 /* allow 3 bits for FIFO ID */ +#define WL_TXSTATUS_FIFO_SHIFT 24 + +#define WL_TXSTATUS_SET_FIFO(x, flags) ((x) = \ + ((x) & ~(WL_TXSTATUS_FIFO_MASK << WL_TXSTATUS_FIFO_SHIFT)) | \ + (((flags) & WL_TXSTATUS_FIFO_MASK) << WL_TXSTATUS_FIFO_SHIFT)) +#define WL_TXSTATUS_GET_FIFO(x) (((x) >> WL_TXSTATUS_FIFO_SHIFT) & WL_TXSTATUS_FIFO_MASK) + +#define WL_TXSTATUS_PKTID_MASK 0xffffff /* allow 24 bits */ +#define WL_TXSTATUS_SET_PKTID(x, num) ((x) = \ + ((x) & ~WL_TXSTATUS_PKTID_MASK) | (num)) +#define WL_TXSTATUS_GET_PKTID(x) ((x) & WL_TXSTATUS_PKTID_MASK) + +#define WL_TXSTATUS_HSLOT_MASK 0xffff /* allow 16 bits */ +#define WL_TXSTATUS_HSLOT_SHIFT 8 + +#define WL_TXSTATUS_SET_HSLOT(x, hslot) ((x) = \ + ((x) & ~(WL_TXSTATUS_HSLOT_MASK << WL_TXSTATUS_HSLOT_SHIFT)) | \ + (((hslot) & WL_TXSTATUS_HSLOT_MASK) << WL_TXSTATUS_HSLOT_SHIFT)) +#define WL_TXSTATUS_GET_HSLOT(x) (((x) >> WL_TXSTATUS_HSLOT_SHIFT)& \ + WL_TXSTATUS_HSLOT_MASK) + +#define WL_TXSTATUS_FREERUNCTR_MASK 0xff /* allow 8 bits */ + +#define WL_TXSTATUS_SET_FREERUNCTR(x, ctr) ((x) = \ + ((x) & ~(WL_TXSTATUS_FREERUNCTR_MASK)) | \ + ((ctr) & WL_TXSTATUS_FREERUNCTR_MASK)) +#define WL_TXSTATUS_GET_FREERUNCTR(x) ((x)& WL_TXSTATUS_FREERUNCTR_MASK) + +/* AMSDU part of d11 seq number */ +#define WL_SEQ_AMSDU_MASK 0x1 /* allow 1 bit */ +#define WL_SEQ_AMSDU_SHIFT 14 +#define WL_SEQ_SET_AMSDU(x, val) ((x) = \ + ((x) & ~(WL_SEQ_AMSDU_MASK << WL_SEQ_AMSDU_SHIFT)) | \ + (((val) & WL_SEQ_AMSDU_MASK) << WL_SEQ_AMSDU_SHIFT)) /**< sets a single AMSDU bit */ +/** returns TRUE if ring item is AMSDU (seq = d11 seq nr) */ +#define WL_SEQ_IS_AMSDU(x) (((x) >> WL_SEQ_AMSDU_SHIFT) & \ + WL_SEQ_AMSDU_MASK) + +/* indicates last_suppr_seq is valid */ +#define WL_SEQ_VALIDSUPPR_MASK 0x1 /* allow 1 bit */ +#define WL_SEQ_VALIDSUPPR_SHIFT 12 +#define WL_SEQ_SET_VALIDSUPPR(x, val) ((x) = \ + ((x) & ~(WL_SEQ_VALIDSUPPR_MASK << WL_SEQ_VALIDSUPPR_SHIFT)) | \ + (((val) & WL_SEQ_VALIDSUPPR_MASK) << WL_SEQ_VALIDSUPPR_SHIFT)) +#define WL_SEQ_GET_VALIDSUPPR(x) (((x) >> WL_SEQ_VALIDSUPPR_SHIFT) & \ + WL_SEQ_VALIDSUPPR_MASK) + +#define WL_SEQ_FROMFW_MASK 0x1 /* allow 1 bit */ +#define WL_SEQ_FROMFW_SHIFT 13 +#define WL_SEQ_SET_FROMFW(x, val) ((x) = \ + ((x) & ~(WL_SEQ_FROMFW_MASK << WL_SEQ_FROMFW_SHIFT)) | \ + (((val) & WL_SEQ_FROMFW_MASK) << WL_SEQ_FROMFW_SHIFT)) +/** Set when firmware assigns D11 sequence number to packet */ +#define SET_WL_HAS_ASSIGNED_SEQ(x) WL_SEQ_SET_FROMFW((x), 1) + +/** returns TRUE if packet has been assigned a d11 seq number by the WL firmware layer */ +#define GET_WL_HAS_ASSIGNED_SEQ(x) (((x) >> WL_SEQ_FROMFW_SHIFT) & WL_SEQ_FROMFW_MASK) + +/** + * Proptxstatus related. + * + * When a packet is suppressed by WL or the D11 core, the packet has to be retried. Assigning + * a new d11 sequence number for the packet when retrying would cause the peer to be unable to + * reorder the packets within an AMPDU. So, suppressed packet from bus layer (DHD for SDIO and + * pciedev for PCIE) is re-using d11 seq number, so FW should not assign a new one. + */ +#define WL_SEQ_FROMDRV_MASK 0x1 /* allow 1 bit */ +#define WL_SEQ_FROMDRV_SHIFT 12 + +/** + * Proptxstatus, host or fw PCIe layer requests WL layer to reuse d11 seq no. Bit is reset by WL + * subsystem when it reuses the seq number. + */ +#define WL_SEQ_SET_REUSE(x, val) ((x) = \ + ((x) & ~(WL_SEQ_FROMDRV_MASK << WL_SEQ_FROMDRV_SHIFT)) | \ + (((val) & WL_SEQ_FROMDRV_MASK) << WL_SEQ_FROMDRV_SHIFT)) +#define SET_WL_TO_REUSE_SEQ(x) WL_SEQ_SET_REUSE((x), 1) +#define RESET_WL_TO_REUSE_SEQ(x) WL_SEQ_SET_REUSE((x), 0) + +/** Proptxstatus, related to reuse of d11 seq numbers when retransmitting */ +#define IS_WL_TO_REUSE_SEQ(x) (((x) >> WL_SEQ_FROMDRV_SHIFT) & \ + WL_SEQ_FROMDRV_MASK) + +#define WL_SEQ_NUM_MASK 0xfff /* allow 12 bit */ +#define WL_SEQ_NUM_SHIFT 0 +/** Proptxstatus, sets d11seq no in pkt tag, related to reuse of d11seq no when retransmitting */ +#define WL_SEQ_SET_NUM(x, val) ((x) = \ + ((x) & ~(WL_SEQ_NUM_MASK << WL_SEQ_NUM_SHIFT)) | \ + (((val) & WL_SEQ_NUM_MASK) << WL_SEQ_NUM_SHIFT)) +/** Proptxstatus, gets d11seq no from pkt tag, related to reuse of d11seq no when retransmitting */ +#define WL_SEQ_GET_NUM(x) (((x) >> WL_SEQ_NUM_SHIFT) & \ + WL_SEQ_NUM_MASK) + +#define WL_SEQ_AMSDU_SUPPR_MASK ((WL_SEQ_FROMDRV_MASK << WL_SEQ_FROMDRV_SHIFT) | \ + (WL_SEQ_AMSDU_MASK << WL_SEQ_AMSDU_SHIFT) | \ + (WL_SEQ_NUM_MASK << WL_SEQ_NUM_SHIFT)) + +/* 32 STA should be enough??, 6 bits; Must be power of 2 */ +#define WLFC_MAC_DESC_TABLE_SIZE 32 +#define WLFC_MAX_IFNUM 16 +#define WLFC_MAC_DESC_ID_INVALID 0xff + +/* b[7:5] -reuse guard, b[4:0] -value */ +#define WLFC_MAC_DESC_GET_LOOKUP_INDEX(x) ((x) & 0x1f) + +#define WLFC_PKTFLAG_SET_PKTREQUESTED(x) (x) |= \ + (WLFC_PKTFLAG_PKT_REQUESTED << WL_TXSTATUS_FLAGS_SHIFT) + +#define WLFC_PKTFLAG_CLR_PKTREQUESTED(x) (x) &= \ + ~(WLFC_PKTFLAG_PKT_REQUESTED << WL_TXSTATUS_FLAGS_SHIFT) + +#define WLFC_MAX_PENDING_DATALEN 120 + +/* host is free to discard the packet */ +#define WLFC_CTL_PKTFLAG_DISCARD 0 +/* D11 suppressed a packet */ +#define WLFC_CTL_PKTFLAG_D11SUPPRESS 1 +/* WL firmware suppressed a packet because MAC is + already in PSMode (short time window) +*/ +#define WLFC_CTL_PKTFLAG_WLSUPPRESS 2 +/* Firmware tossed this packet */ +#define WLFC_CTL_PKTFLAG_TOSSED_BYWLC 3 +/* Firmware tossed after retries */ +#define WLFC_CTL_PKTFLAG_DISCARD_NOACK 4 +/* Firmware wrongly reported suppressed previously,now fixing to acked */ +#define WLFC_CTL_PKTFLAG_SUPPRESS_ACKED 5 +/* Firmware send this packet expired, lifetime expiration */ +#define WLFC_CTL_PKTFLAG_EXPIRED 6 +/* Firmware drop this packet for any other reason */ +#define WLFC_CTL_PKTFLAG_DROPPED 7 +/* Firmware free this packet */ +#define WLFC_CTL_PKTFLAG_MKTFREE 8 +#define WLFC_CTL_PKTFLAG_MASK (0x0f) /* For 4-bit mask with one extra bit */ + +#ifdef PROP_TXSTATUS_DEBUG +#define WLFC_DBGMESG(x) printf x +/* wlfc-breadcrumb */ +#define WLFC_BREADCRUMB(x) do {if ((x) == NULL) \ + {printf("WLFC: %s():%d:caller:%p\n", \ + __FUNCTION__, __LINE__, CALL_SITE);}} while (0) +#define WLFC_WHEREIS(s) printf("WLFC: at %s():%d, %s\n", __FUNCTION__, __LINE__, (s)) +#else +#define WLFC_DBGMESG(x) +#define WLFC_BREADCRUMB(x) +#define WLFC_WHEREIS(s) +#endif /* PROP_TXSTATUS_DEBUG */ + +/* AMPDU host reorder packet flags */ +#define WLHOST_REORDERDATA_MAXFLOWS 256 +#define WLHOST_REORDERDATA_LEN 10 +#define WLHOST_REORDERDATA_TOTLEN (WLHOST_REORDERDATA_LEN + 1 + 1) /* +tag +len */ + +#define WLHOST_REORDERDATA_FLOWID_OFFSET 0 +#define WLHOST_REORDERDATA_MAXIDX_OFFSET 2 +#define WLHOST_REORDERDATA_FLAGS_OFFSET 4 +#define WLHOST_REORDERDATA_CURIDX_OFFSET 6 +#define WLHOST_REORDERDATA_EXPIDX_OFFSET 8 + +#define WLHOST_REORDERDATA_DEL_FLOW 0x01 +#define WLHOST_REORDERDATA_FLUSH_ALL 0x02 +#define WLHOST_REORDERDATA_CURIDX_VALID 0x04 +#define WLHOST_REORDERDATA_EXPIDX_VALID 0x08 +#define WLHOST_REORDERDATA_NEW_HOLE 0x10 + +/* transaction id data len byte 0: rsvd, byte 1: seqnumber, byte 2-5 will be used for timestampe */ +#define WLFC_CTL_TRANS_ID_LEN 6 +#define WLFC_TYPE_TRANS_ID_LEN 6 + +#define WLFC_MODE_HANGER 1 /* use hanger */ +#define WLFC_MODE_AFQ 2 /* use afq (At Firmware Queue) */ +#define WLFC_IS_OLD_DEF(x) ((x & 1) || (x & 2)) + +#define WLFC_MODE_AFQ_SHIFT 2 /* afq bit */ +#define WLFC_SET_AFQ(x, val) ((x) = \ + ((x) & ~(1 << WLFC_MODE_AFQ_SHIFT)) | \ + (((val) & 1) << WLFC_MODE_AFQ_SHIFT)) +/** returns TRUE if firmware supports 'at firmware queue' feature */ +#define WLFC_GET_AFQ(x) (((x) >> WLFC_MODE_AFQ_SHIFT) & 1) + +#define WLFC_MODE_REUSESEQ_SHIFT 3 /* seq reuse bit */ +#define WLFC_SET_REUSESEQ(x, val) ((x) = \ + ((x) & ~(1 << WLFC_MODE_REUSESEQ_SHIFT)) | \ + (((val) & 1) << WLFC_MODE_REUSESEQ_SHIFT)) + +/** returns TRUE if 'd11 sequence reuse' has been agreed upon between host and dongle */ +#if defined(BCMPCIEDEV_ENABLED) && !defined(ROM_ENAB_RUNTIME_CHECK) +/* GET_REUSESEQ is always TRUE in pciedev */ +#define WLFC_GET_REUSESEQ(x) (TRUE) +#else +#define WLFC_GET_REUSESEQ(x) (((x) >> WLFC_MODE_REUSESEQ_SHIFT) & 1) +#endif /* defined(BCMPCIEDEV_ENABLED) && !defined(ROM_ENAB_RUNTIME_CHECK) */ + +#define WLFC_MODE_REORDERSUPP_SHIFT 4 /* host reorder suppress pkt bit */ +#define WLFC_SET_REORDERSUPP(x, val) ((x) = \ + ((x) & ~(1 << WLFC_MODE_REORDERSUPP_SHIFT)) | \ + (((val) & 1) << WLFC_MODE_REORDERSUPP_SHIFT)) +/** returns TRUE if 'reorder suppress' has been agreed upon between host and dongle */ +#define WLFC_GET_REORDERSUPP(x) (((x) >> WLFC_MODE_REORDERSUPP_SHIFT) & 1) + +#define FLOW_RING_CREATE 1 +#define FLOW_RING_DELETE 2 +#define FLOW_RING_FLUSH 3 +#define FLOW_RING_OPEN 4 +#define FLOW_RING_CLOSED 5 +#define FLOW_RING_FLUSHED 6 +#define FLOW_RING_TIM_SET 7 +#define FLOW_RING_TIM_RESET 8 +#define FLOW_RING_FLUSH_TXFIFO 9 +#define FLOW_RING_GET_PKT_MAX 10 +#define FLOW_RING_RESET_WEIGHT 11 +#define FLOW_RING_UPD_PRIOMAP 12 + +/* bit 7, indicating if is TID(1) or AC(0) mapped info in tid field) */ +#define PCIEDEV_IS_AC_TID_MAP_MASK 0x80 + +#endif /* __wlfc_proto_definitions_h__ */ diff --git a/bcmdhd.100.10.315.x/include/wlioctl.h b/bcmdhd.100.10.315.x/include/wlioctl.h new file mode 100644 index 0000000..dbda76b --- /dev/null +++ b/bcmdhd.100.10.315.x/include/wlioctl.h @@ -0,0 +1,18636 @@ +/* + * Custom OID/ioctl definitions for + * + * + * Broadcom 802.11abg Networking Device Driver + * + * Definitions subject to change without notice. + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: wlioctl.h 771856 2018-07-12 05:11:56Z $ + */ + +#ifndef _wlioctl_h_ +#define _wlioctl_h_ + +#include +#include +#include +#include +#include +#include +#include +#include <802.11.h> +#include <802.11s.h> +#include <802.1d.h> +#include +#ifdef WL11AX +#include <802.11ax.h> +#endif /* WL11AX */ +#include +#include +#include + +#include +#include + +/* NOTE re: Module specific error codes. + * + * BCME_.. error codes are extended by various features - e.g. FTM, NAN, SAE etc. + * The current process is to allocate a range of 1024 negative 32 bit integers to + * each module that extends the error codes to indicate a module specific status. + * + * The next range to use is below. If that range is used for a new feature, please + * update the range to be used by the next feature. + * + * The error codes -4096 ... -5119 are reserved for firmware signing. + * + * Next available (inclusive) range: [-7*1024 + 1, -6*1024] + * + * End Note + */ + +/* 11ax trigger frame format - versioning info */ +#define TRIG_FRAME_FORMAT_11AX_DRAFT_1P1 0 + +typedef struct { + uint32 num; + chanspec_t list[1]; +} chanspec_list_t; + +#define RSN_KCK_LENGTH 16 +#define RSN_KEK_LENGTH 16 +#define TPK_FTM_LEN 16 +#ifndef INTF_NAME_SIZ +#define INTF_NAME_SIZ 16 +#endif // endif + +#define WL_ASSOC_START_EVT_DATA_VERSION 1 + +typedef struct assoc_event_data { + uint32 version; + uint32 flags; + chanspec_t join_chspec; +} assoc_event_data_t; + +/**Used to send ioctls over the transport pipe */ +typedef struct remote_ioctl { + cdc_ioctl_t msg; + uint32 data_len; + char intf_name[INTF_NAME_SIZ]; +} rem_ioctl_t; +#define REMOTE_SIZE sizeof(rem_ioctl_t) + +#define BCM_IOV_XTLV_VERSION 0 + +#define MAX_NUM_D11CORES 2 + +/**DFS Forced param */ +typedef struct wl_dfs_forced_params { + chanspec_t chspec; + uint16 version; + chanspec_list_t chspec_list; +} wl_dfs_forced_t; + +#define DFS_PREFCHANLIST_VER 0x01 +#define WL_CHSPEC_LIST_FIXED_SIZE OFFSETOF(chanspec_list_t, list) +/* size of dfs forced param size given n channels are in the list */ +#define WL_DFS_FORCED_PARAMS_SIZE(n) \ + (sizeof(wl_dfs_forced_t) + (((n) < 1) ? (0) : (((n) - 1)* sizeof(chanspec_t)))) +#define WL_DFS_FORCED_PARAMS_FIXED_SIZE \ + (WL_CHSPEC_LIST_FIXED_SIZE + OFFSETOF(wl_dfs_forced_t, chspec_list)) +#define WL_DFS_FORCED_PARAMS_MAX_SIZE \ + WL_DFS_FORCED_PARAMS_FIXED_SIZE + (WL_NUMCHANNELS * sizeof(chanspec_t)) + +/**association decision information */ +typedef struct { + uint8 assoc_approved; /**< (re)association approved */ + uint8 pad; + uint16 reject_reason; /**< reason code for rejecting association */ + struct ether_addr da; + uint8 pad1[6]; + int64 sys_time; /**< current system time */ +} assoc_decision_t; + +#define DFS_SCAN_S_IDLE -1 +#define DFS_SCAN_S_RADAR_FREE 0 +#define DFS_SCAN_S_RADAR_FOUND 1 +#define DFS_SCAN_S_INPROGESS 2 +#define DFS_SCAN_S_SCAN_ABORTED 3 +#define DFS_SCAN_S_SCAN_MODESW_INPROGRESS 4 +#define DFS_SCAN_S_MAX 5 + +#define ACTION_FRAME_SIZE 1800 + +typedef struct wl_action_frame { + struct ether_addr da; + uint16 len; + uint32 packetId; + uint8 data[ACTION_FRAME_SIZE]; +} wl_action_frame_t; + +#define WL_WIFI_ACTION_FRAME_SIZE sizeof(struct wl_action_frame) + +typedef struct ssid_info +{ + uint8 ssid_len; /**< the length of SSID */ + uint8 ssid[32]; /**< SSID string */ +} ssid_info_t; + +typedef struct wl_af_params { + uint32 channel; + int32 dwell_time; + struct ether_addr BSSID; + uint8 PAD[2]; + wl_action_frame_t action_frame; +} wl_af_params_t; + +#define WL_WIFI_AF_PARAMS_SIZE sizeof(struct wl_af_params) + +#define MFP_TEST_FLAG_NORMAL 0 +#define MFP_TEST_FLAG_ANY_KEY 1 +typedef struct wl_sa_query { + uint32 flag; + uint8 action; + uint8 PAD; + uint16 id; + struct ether_addr da; + uint16 PAD; +} wl_sa_query_t; + +/* EXT_STA */ +/**association information */ +typedef struct { + uint32 assoc_req; /**< offset to association request frame */ + uint32 assoc_req_len; /**< association request frame length */ + uint32 assoc_rsp; /**< offset to association response frame */ + uint32 assoc_rsp_len; /**< association response frame length */ + uint32 bcn; /**< offset to AP beacon */ + uint32 bcn_len; /**< AP beacon length */ + uint32 wsec; /**< ucast security algo */ + uint32 wpaie; /**< offset to WPA ie */ + uint8 auth_alg; /**< 802.11 authentication mode */ + uint8 WPA_auth; /**< WPA: authenticated key management */ + uint8 ewc_cap; /**< EWC (MIMO) capable */ + uint8 ofdm; /**< OFDM */ +} assoc_info_t; +/* defined(EXT_STA) */ + +/* Flags for OBSS IOVAR Parameters */ +#define WL_OBSS_DYN_BWSW_FLAG_ACTIVITY_PERIOD (0x01) +#define WL_OBSS_DYN_BWSW_FLAG_NOACTIVITY_PERIOD (0x02) +#define WL_OBSS_DYN_BWSW_FLAG_NOACTIVITY_INCR_PERIOD (0x04) +#define WL_OBSS_DYN_BWSW_FLAG_PSEUDO_SENSE_PERIOD (0x08) +#define WL_OBSS_DYN_BWSW_FLAG_RX_CRS_PERIOD (0x10) +#define WL_OBSS_DYN_BWSW_FLAG_DUR_THRESHOLD (0x20) +#define WL_OBSS_DYN_BWSW_FLAG_TXOP_PERIOD (0x40) + +/* OBSS IOVAR Version information */ +#define WL_PROT_OBSS_CONFIG_PARAMS_VERSION 1 + +#include +typedef BWL_PRE_PACKED_STRUCT struct { + uint8 obss_bwsw_activity_cfm_count_cfg; /**< configurable count in + * seconds before we confirm that OBSS is present and + * dynamically activate dynamic bwswitch. + */ + uint8 obss_bwsw_no_activity_cfm_count_cfg; /**< configurable count in + * seconds before we confirm that OBSS is GONE and + * dynamically start pseudo upgrade. If in pseudo sense time, we + * will see OBSS, [means that, we false detected that OBSS-is-gone + * in watchdog] this count will be incremented in steps of + * obss_bwsw_no_activity_cfm_count_incr_cfg for confirming OBSS + * detection again. Note that, at present, max 30seconds is + * allowed like this. [OBSS_BWSW_NO_ACTIVITY_MAX_INCR_DEFAULT] + */ + uint8 obss_bwsw_no_activity_cfm_count_incr_cfg; /* see above + */ + uint16 obss_bwsw_pseudo_sense_count_cfg; /**< number of msecs/cnt to be in + * pseudo state. This is used to sense/measure the stats from lq. + */ + uint8 obss_bwsw_rx_crs_threshold_cfg; /**< RX CRS default threshold */ + uint8 obss_bwsw_dur_thres; /**< OBSS dyn bwsw trigger/RX CRS Sec */ + uint8 obss_bwsw_txop_threshold_cfg; /**< TXOP default threshold */ +} BWL_POST_PACKED_STRUCT wlc_obss_dynbwsw_config_t; +#include + +#include +typedef BWL_PRE_PACKED_STRUCT struct { + uint32 version; /**< version field */ + uint32 config_mask; + uint32 reset_mask; + wlc_obss_dynbwsw_config_t config_params; +} BWL_POST_PACKED_STRUCT obss_config_params_t; +#include + +/**bsscfg type */ +typedef enum bsscfg_type { + BSSCFG_TYPE_GENERIC = 0, /**< Generic AP/STA/IBSS BSS */ + BSSCFG_TYPE_P2P = 1, /**< P2P BSS */ + /* index 2 earlier used for BTAMP */ + BSSCFG_TYPE_PSTA = 3, + BSSCFG_TYPE_TDLS = 4, + BSSCFG_TYPE_SLOTTED_BSS = 5, + BSSCFG_TYPE_PROXD = 6, + BSSCFG_TYPE_NAN = 7, + BSSCFG_TYPE_MESH = 8, + BSSCFG_TYPE_AIBSS = 9 +} bsscfg_type_t; + +/* bsscfg subtype */ +typedef enum bsscfg_subtype { + BSSCFG_SUBTYPE_NONE = 0, + BSSCFG_GENERIC_STA = 1, /* GENERIC */ + BSSCFG_GENERIC_AP = 2, + BSSCFG_GENERIC_IBSS = 6, + BSSCFG_P2P_GC = 3, /* P2P */ + BSSCFG_P2P_GO = 4, + BSSCFG_P2P_DISC = 5, + /* Index 7 & 8 earlier used for BTAMP */ + BSSCFG_SUBTYPE_AWDL = 9, /* SLOTTED_BSS_TYPE */ + BSSCFG_SUBTYPE_NAN_MGMT = 10, + BSSCFG_SUBTYPE_NAN_DATA = 11, + BSSCFG_SUBTYPE_NAN_MGMT_DATA = 12 +} bsscfg_subtype_t; + +typedef struct wlc_bsscfg_info { + uint32 type; + uint32 subtype; +} wlc_bsscfg_info_t; + +/* ULP SHM Offsets info */ +typedef struct ulp_shm_info { + uint32 m_ulp_ctrl_sdio; + uint32 m_ulp_wakeevt_ind; + uint32 m_ulp_wakeind; +} ulp_shm_info_t; + +/* Legacy structure to help keep backward compatible wl tool and tray app */ + +#define LEGACY_WL_BSS_INFO_VERSION 107 /**< older version of wl_bss_info struct */ + +typedef struct wl_bss_info_107 { + uint32 version; /**< version field */ + uint32 length; /**< byte length of data in this record, + * starting at version and including IEs + */ + struct ether_addr BSSID; + uint16 beacon_period; /**< units are Kusec */ + uint16 capability; /**< Capability information */ + uint8 SSID_len; + uint8 SSID[32]; + uint8 PAD; + struct { + uint32 count; /**< # rates in this set */ + uint8 rates[16]; /**< rates in 500kbps units w/hi bit set if basic */ + } rateset; /**< supported rates */ + uint8 channel; /**< Channel no. */ + uint8 PAD; + uint16 atim_window; /**< units are Kusec */ + uint8 dtim_period; /**< DTIM period */ + uint8 PAD; + int16 RSSI; /**< receive signal strength (in dBm) */ + int8 phy_noise; /**< noise (in dBm) */ + uint8 PAD[3]; + uint32 ie_length; /**< byte length of Information Elements */ + /* variable length Information Elements */ +} wl_bss_info_107_t; + +/* + * Per-BSS information structure. + */ + +#define LEGACY2_WL_BSS_INFO_VERSION 108 /**< old version of wl_bss_info struct */ + +/** + * BSS info structure + * Applications MUST CHECK ie_offset field and length field to access IEs and + * next bss_info structure in a vector (in wl_scan_results_t) + */ +typedef struct wl_bss_info_108 { + uint32 version; /**< version field */ + uint32 length; /**< byte length of data in this record, + * starting at version and including IEs + */ + struct ether_addr BSSID; + uint16 beacon_period; /**< units are Kusec */ + uint16 capability; /**< Capability information */ + uint8 SSID_len; + uint8 SSID[32]; + uint8 PAD[1]; + struct { + uint32 count; /**< # rates in this set */ + uint8 rates[16]; /**< rates in 500kbps units w/hi bit set if basic */ + } rateset; /**< supported rates */ + chanspec_t chanspec; /**< chanspec for bss */ + uint16 atim_window; /**< units are Kusec */ + uint8 dtim_period; /**< DTIM period */ + uint8 PAD; + int16 RSSI; /**< receive signal strength (in dBm) */ + int8 phy_noise; /**< noise (in dBm) */ + + uint8 n_cap; /**< BSS is 802.11N Capable */ + uint8 PAD[2]; + uint32 nbss_cap; /**< 802.11N BSS Capabilities (based on HT_CAP_*) */ + uint8 ctl_ch; /**< 802.11N BSS control channel number */ + uint8 PAD[3]; + uint32 reserved32[1]; /**< Reserved for expansion of BSS properties */ + uint8 flags; /**< flags */ + uint8 reserved[3]; /**< Reserved for expansion of BSS properties */ + uint8 basic_mcs[MCSSET_LEN]; /**< 802.11N BSS required MCS set */ + + uint16 ie_offset; /**< offset at which IEs start, from beginning */ + uint8 PAD[2]; + uint32 ie_length; /**< byte length of Information Elements */ + /* Add new fields here */ + /* variable length Information Elements */ +} wl_bss_info_108_t; + +#define WL_BSS_INFO_VERSION 109 /**< current version of wl_bss_info struct */ + +/** + * BSS info structure + * Applications MUST CHECK ie_offset field and length field to access IEs and + * next bss_info structure in a vector (in wl_scan_results_t) + */ +typedef struct wl_bss_info { + uint32 version; /**< version field */ + uint32 length; /**< byte length of data in this record, + * starting at version and including IEs + */ + struct ether_addr BSSID; + uint16 beacon_period; /**< units are Kusec */ + uint16 capability; /**< Capability information */ + uint8 SSID_len; + uint8 SSID[32]; + uint8 bcnflags; /* additional flags w.r.t. beacon */ + struct { + uint32 count; /**< # rates in this set */ + uint8 rates[16]; /**< rates in 500kbps units w/hi bit set if basic */ + } rateset; /**< supported rates */ + chanspec_t chanspec; /**< chanspec for bss */ + uint16 atim_window; /**< units are Kusec */ + uint8 dtim_period; /**< DTIM period */ + uint8 accessnet; /* from beacon interwork IE (if bcnflags) */ + int16 RSSI; /**< receive signal strength (in dBm) */ + int8 phy_noise; /**< noise (in dBm) */ + uint8 n_cap; /**< BSS is 802.11N Capable */ + uint16 freespace1; /* make implicit padding explicit */ + uint32 nbss_cap; /**< 802.11N+AC BSS Capabilities */ + uint8 ctl_ch; /**< 802.11N BSS control channel number */ + uint8 padding1[3]; /**< explicit struct alignment padding */ + uint16 vht_rxmcsmap; /**< VHT rx mcs map (802.11ac IE, VHT_CAP_MCS_MAP_*) */ + uint16 vht_txmcsmap; /**< VHT tx mcs map (802.11ac IE, VHT_CAP_MCS_MAP_*) */ + uint8 flags; /**< flags */ + uint8 vht_cap; /**< BSS is vht capable */ + uint8 reserved[2]; /**< Reserved for expansion of BSS properties */ + uint8 basic_mcs[MCSSET_LEN]; /**< 802.11N BSS required MCS set */ + + uint16 ie_offset; /**< offset at which IEs start, from beginning */ + uint16 freespace2; /* making implicit padding explicit */ + uint32 ie_length; /**< byte length of Information Elements */ + int16 SNR; /**< average SNR of during frame reception */ + uint16 vht_mcsmap; /**< STA's Associated vhtmcsmap */ + uint16 vht_mcsmap_prop; /**< STA's Associated prop vhtmcsmap */ + uint16 vht_txmcsmap_prop; /**< prop VHT tx mcs prop */ +} wl_bss_info_v109_t; + +/** + * BSS info structure + * Applications MUST CHECK ie_offset field and length field to access IEs and + * next bss_info structure in a vector (in wl_scan_results_t) + */ +typedef struct wl_bss_info_v109_1 { + uint32 version; /**< version field */ + uint32 length; /**< byte length of data in this record, + * starting at version and including IEs + */ + struct ether_addr BSSID; + uint16 beacon_period; /**< units are Kusec */ + uint16 capability; /**< Capability information */ + uint8 SSID_len; + uint8 SSID[32]; + uint8 bcnflags; /* additional flags w.r.t. beacon */ + struct { + uint32 count; /**< # rates in this set */ + uint8 rates[16]; /**< rates in 500kbps units w/hi bit set if basic */ + } rateset; /**< supported rates */ + chanspec_t chanspec; /**< chanspec for bss */ + uint16 atim_window; /**< units are Kusec */ + uint8 dtim_period; /**< DTIM period */ + uint8 accessnet; /* from beacon interwork IE (if bcnflags) */ + int16 RSSI; /**< receive signal strength (in dBm) */ + int8 phy_noise; /**< noise (in dBm) */ + uint8 n_cap; /**< BSS is 802.11N Capable */ + uint8 he_cap; /**< BSS is he capable */ + uint8 freespace1; /* make implicit padding explicit */ + uint32 nbss_cap; /**< 802.11N+AC BSS Capabilities */ + uint8 ctl_ch; /**< 802.11N BSS control channel number */ + uint8 padding1[3]; /**< explicit struct alignment padding */ + uint16 vht_rxmcsmap; /**< VHT rx mcs map (802.11ac IE, VHT_CAP_MCS_MAP_*) */ + uint16 vht_txmcsmap; /**< VHT tx mcs map (802.11ac IE, VHT_CAP_MCS_MAP_*) */ + uint8 flags; /**< flags */ + uint8 vht_cap; /**< BSS is vht capable */ + uint8 reserved[2]; /**< Reserved for expansion of BSS properties */ + uint8 basic_mcs[MCSSET_LEN]; /**< 802.11N BSS required MCS set */ + + uint16 ie_offset; /**< offset at which IEs start, from beginning */ + uint16 freespace2; /* making implicit padding explicit */ + uint32 ie_length; /**< byte length of Information Elements */ + int16 SNR; /**< average SNR of during frame reception */ + uint16 vht_mcsmap; /**< STA's Associated vhtmcsmap */ + uint16 vht_mcsmap_prop; /**< STA's Associated prop vhtmcsmap */ + uint16 vht_txmcsmap_prop; /**< prop VHT tx mcs prop */ + uint32 he_mcsmap; /**< STA's Associated hemcsmap */ + uint32 he_rxmcsmap; /**< HE rx mcs map (802.11ax IE, HE_CAP_MCS_MAP_*) */ + uint32 he_txmcsmap; /**< HE tx mcs map (802.11ax IE, HE_CAP_MCS_MAP_*) */ +} wl_bss_info_v109_1_t; + +/** + * BSS info structure + * Applications MUST CHECK ie_offset field and length field to access IEs and + * next bss_info structure in a vector (in wl_scan_results_t) + */ +typedef struct wl_bss_info_v109_2 { + uint32 version; /**< version field */ + uint32 length; /**< byte length of data in this record, + * starting at version and including IEs + */ + struct ether_addr BSSID; + uint16 beacon_period; /**< units are Kusec */ + uint16 capability; /**< Capability information */ + uint8 SSID_len; + uint8 SSID[32]; + uint8 bcnflags; /* additional flags w.r.t. beacon */ + struct { + uint32 count; /**< # rates in this set */ + uint8 rates[16]; /**< rates in 500kbps units w/hi bit set if basic */ + } rateset; /**< supported rates */ + chanspec_t chanspec; /**< chanspec for bss */ + uint16 atim_window; /**< units are Kusec */ + uint8 dtim_period; /**< DTIM period */ + uint8 accessnet; /* from beacon interwork IE (if bcnflags) */ + int16 RSSI; /**< receive signal strength (in dBm) */ + int8 phy_noise; /**< noise (in dBm) */ + uint8 n_cap; /**< BSS is 802.11N Capable */ + uint8 he_cap; /**< BSS is he capable */ + uint8 freespace1; /* make implicit padding explicit */ + uint32 nbss_cap; /**< 802.11N+AC BSS Capabilities */ + uint8 ctl_ch; /**< 802.11N BSS control channel number */ + uint8 padding1[3]; /**< explicit struct alignment padding */ + uint16 vht_rxmcsmap; /**< VHT rx mcs map (802.11ac IE, VHT_CAP_MCS_MAP_*) */ + uint16 vht_txmcsmap; /**< VHT tx mcs map (802.11ac IE, VHT_CAP_MCS_MAP_*) */ + uint8 flags; /**< flags */ + uint8 vht_cap; /**< BSS is vht capable */ + uint8 reserved[2]; /**< Reserved for expansion of BSS properties */ + uint8 basic_mcs[MCSSET_LEN]; /**< 802.11N BSS required MCS set */ + + uint16 ie_offset; /**< offset at which IEs start, from beginning */ + uint16 freespace2; /* making implicit padding explicit */ + uint32 ie_length; /**< byte length of Information Elements */ + int16 SNR; /**< average SNR of during frame reception */ + uint16 vht_mcsmap; /**< STA's Associated vhtmcsmap */ + uint16 vht_mcsmap_prop; /**< STA's Associated prop vhtmcsmap */ + uint16 vht_txmcsmap_prop; /**< prop VHT tx mcs prop */ + uint32 he_mcsmap; /**< STA's Associated hemcsmap */ + uint32 he_rxmcsmap; /**< HE rx mcs map (802.11ax IE, HE_CAP_MCS_MAP_*) */ + uint32 he_txmcsmap; /**< HE tx mcs map (802.11ax IE, HE_CAP_MCS_MAP_*) */ + uint32 timestamp[2]; /* Beacon Timestamp for FAKEAP req */ +} wl_bss_info_v109_2_t; + +#ifndef WL_BSS_INFO_TYPEDEF_HAS_ALIAS +typedef wl_bss_info_v109_t wl_bss_info_t; +#endif // endif + +#define WL_GSCAN_FULL_RESULT_VERSION 2 /* current version of wl_gscan_result_t struct */ + +typedef struct wl_gscan_bss_info { + uint32 timestamp[2]; + wl_bss_info_v109_t info; + /* Do not add any more members below, fixed */ + /* and variable length Information Elements to follow */ +} wl_gscan_bss_info_v2_t; + +typedef struct wl_gscan_bss_info_v3 { + uint32 timestamp[2]; + uint8 info[]; /* var length wl_bss_info_X structures */ + /* Do not add any more members below, fixed */ + /* and variable length Information Elements to follow */ +} wl_gscan_bss_info_v3_t; + +#ifndef WL_BSS_INFO_TYPEDEF_HAS_ALIAS +typedef wl_gscan_bss_info_v2_t wl_gscan_bss_info_t; +#define WL_GSCAN_INFO_FIXED_FIELD_SIZE (sizeof(wl_gscan_bss_info_t) - sizeof(wl_bss_info_t)) +#endif // endif + +typedef struct wl_bsscfg { + uint32 bsscfg_idx; + uint32 wsec; + uint32 WPA_auth; + uint32 wsec_index; + uint32 associated; + uint32 BSS; + uint32 phytest_on; + struct ether_addr prev_BSSID; + struct ether_addr BSSID; + uint32 targetbss_wpa2_flags; + uint32 assoc_type; + uint32 assoc_state; +} wl_bsscfg_t; + +typedef struct wl_if_add { + uint32 bsscfg_flags; + uint32 if_flags; + uint32 ap; + struct ether_addr mac_addr; + uint16 PAD; + uint32 wlc_index; +} wl_if_add_t; + +typedef struct wl_bss_config { + uint32 atim_window; + uint32 beacon_period; + uint32 chanspec; +} wl_bss_config_t; + +/* Number of Bsscolor supported per core */ +#ifndef HE_MAX_BSSCOLOR_RES +#define HE_MAX_BSSCOLOR_RES 2 +#endif // endif + +#ifndef HE_MAX_STAID_PER_BSSCOLOR +#define HE_MAX_STAID_PER_BSSCOLOR 4 +#endif // endif + +/* BSSColor indices */ +#define BSSCOLOR0_IDX 0 +#define BSSCOLOR1_IDX 1 +#define HE_BSSCOLOR0 0 +#define HE_BSSCOLOR_MAX_VAL 63 + +/* STAID indices */ +#define STAID0_IDX 0 +#define STAID1_IDX 1 +#define STAID2_IDX 2 +#define STAID3_IDX 3 +#define HE_STAID_MAX_VAL 0x07FF + +typedef struct wl_bsscolor_info { + uint16 version; /**< structure version */ + uint16 length; /**< length of the bsscolor info */ + uint8 bsscolor_index; /**< bsscolor index 0-1 */ + uint8 bsscolor; /**c0, B[9:5]=>c1, B[14:10]=>c2, B[19:15]=>c[3-7] + * B[24:20]=>c[8-9], B[29:25]=>c[10-11] + */ + uint32 bfgain_2x1[NUM_BFGAIN_ARRAY_1RX]; /* exp 1ss, imp 1ss */ + uint32 bfgain_2x2[NUM_BFGAIN_ARRAY_2RX]; /* exp [1-2]ss, imp 1ss */ + uint32 bfgain_3x1[NUM_BFGAIN_ARRAY_1RX]; + uint32 bfgain_3x2[NUM_BFGAIN_ARRAY_2RX]; + uint32 bfgain_3x3[NUM_BFGAIN_ARRAY_3RX]; /* exp [1-3]ss, imp 1ss */ + uint32 bfgain_4x1[NUM_BFGAIN_ARRAY_1RX]; + uint32 bfgain_4x2[NUM_BFGAIN_ARRAY_2RX]; + uint32 bfgain_4x3[NUM_BFGAIN_ARRAY_3RX]; + uint32 bfgain_4x4[NUM_BFGAIN_ARRAY_4RX]; /* exp [1-4]ss, imp 1ss */ +} wl_txbf_expgainset_t; + +#define OFDM_RATE_MASK 0x0000007f +typedef uint8 ofdm_rates_t; + +typedef struct wl_rates_info { + wl_rateset_t rs_tgt; + uint32 phy_type; + int32 bandtype; + uint8 cck_only; + uint8 rate_mask; + uint8 mcsallow; + uint8 bw; + uint8 txstreams; + uint8 PAD[3]; +} wl_rates_info_t; + +/**uint32 list */ +typedef struct wl_uint32_list { + /** in - # of elements, out - # of entries */ + uint32 count; + /** variable length uint32 list */ + uint32 element[1]; +} wl_uint32_list_t; + +/* WLC_SET_ALLOW_MODE values */ +#define ALLOW_MODE_ANY_BSSID 0 +#define ALLOW_MODE_ONLY_DESIRED_BSSID 1 +#define ALLOW_MODE_NO_BSSID 2 + +/** used for association with a specific BSSID and chanspec list */ +typedef struct wl_assoc_params { + struct ether_addr bssid; /**< 00:00:00:00:00:00: broadcast scan */ + uint16 bssid_cnt; /**< 0: use chanspec_num, and the single bssid, + * otherwise count of chanspecs in chanspec_list + * AND paired bssids following chanspec_list + * also, chanspec_num has to be set to zero + * for bssid list to be used + */ + int32 chanspec_num; /**< 0: all available channels, + * otherwise count of chanspecs in chanspec_list + */ + chanspec_t chanspec_list[1]; /**< list of chanspecs */ +} wl_assoc_params_t; + +#define WL_ASSOC_PARAMS_FIXED_SIZE OFFSETOF(wl_assoc_params_t, chanspec_list) + +/** used for reassociation/roam to a specific BSSID and channel */ +typedef wl_assoc_params_t wl_reassoc_params_t; +#define WL_REASSOC_PARAMS_FIXED_SIZE WL_ASSOC_PARAMS_FIXED_SIZE + +/** used for association to a specific BSSID and channel */ +typedef wl_assoc_params_t wl_join_assoc_params_t; +#define WL_JOIN_ASSOC_PARAMS_FIXED_SIZE WL_ASSOC_PARAMS_FIXED_SIZE + +/** used for join with or without a specific bssid and channel list */ +typedef struct wl_join_params { + wlc_ssid_t ssid; + wl_assoc_params_t params; /**< optional field, but it must include the fixed portion + * of the wl_assoc_params_t struct when it does present. + */ +} wl_join_params_t; + +#define WL_JOIN_PARAMS_FIXED_SIZE (OFFSETOF(wl_join_params_t, params) + \ + WL_ASSOC_PARAMS_FIXED_SIZE) + +typedef struct wlc_roam_exp_params { + int8 a_band_boost_threshold; + int8 a_band_penalty_threshold; + int8 a_band_boost_factor; + int8 a_band_penalty_factor; + int8 cur_bssid_boost; + int8 alert_roam_trigger_threshold; + int16 a_band_max_boost; +} wlc_roam_exp_params_t; + +#define ROAM_EXP_CFG_VERSION 1 + +#define ROAM_EXP_ENABLE_FLAG (1 << 0) + +#define ROAM_EXP_CFG_PRESENT (1 << 1) + +typedef struct wl_roam_exp_cfg { + uint16 version; + uint16 flags; + wlc_roam_exp_params_t params; +} wl_roam_exp_cfg_t; + +typedef struct wl_bssid_pref_list { + struct ether_addr bssid; + /* Add this to modify rssi */ + int8 rssi_factor; + int8 flags; +} wl_bssid_pref_list_t; + +#define BSSID_PREF_LIST_VERSION 1 +#define ROAM_EXP_CLEAR_BSSID_PREF (1 << 0) + +typedef struct wl_bssid_pref_cfg { + uint16 version; + uint16 flags; + uint16 count; + uint16 reserved; + wl_bssid_pref_list_t bssids[]; +} wl_bssid_pref_cfg_t; + +#define SSID_WHITELIST_VERSION 1 + +#define ROAM_EXP_CLEAR_SSID_WHITELIST (1 << 0) + +/* Roam SSID whitelist, ssids in this list are ok to */ +/* be considered as targets to join when considering a roam */ + +typedef struct wl_ssid_whitelist { + + uint16 version; + uint16 flags; + + uint8 ssid_count; + uint8 reserved[3]; + wlc_ssid_t ssids[]; +} wl_ssid_whitelist_t; + +#define ROAM_EXP_EVENT_VERSION 1 + +typedef struct wl_roam_exp_event { + + uint16 version; + uint16 flags; + wlc_ssid_t cur_ssid; +} wl_roam_exp_event_t; + +/** scan params for extended join */ +typedef struct wl_join_scan_params { + uint8 scan_type; /**< 0 use default, active or passive scan */ + uint8 PAD[3]; + int32 nprobes; /**< -1 use default, number of probes per channel */ + int32 active_time; /**< -1 use default, dwell time per channel for + * active scanning + */ + int32 passive_time; /**< -1 use default, dwell time per channel + * for passive scanning + */ + int32 home_time; /**< -1 use default, dwell time for the home channel + * between channel scans + */ +} wl_join_scan_params_t; + +/** extended join params */ +typedef struct wl_extjoin_params { + wlc_ssid_t ssid; /**< {0, ""}: wildcard scan */ + wl_join_scan_params_t scan; + wl_join_assoc_params_t assoc; /**< optional field, but it must include the fixed portion + * of the wl_join_assoc_params_t struct when it does + * present. + */ +} wl_extjoin_params_t; +#define WL_EXTJOIN_PARAMS_FIXED_SIZE (OFFSETOF(wl_extjoin_params_t, assoc) + \ + WL_JOIN_ASSOC_PARAMS_FIXED_SIZE) + +#define ANT_SELCFG_MAX 4 /**< max number of antenna configurations */ +#define MAX_STREAMS_SUPPORTED 4 /**< max number of streams supported */ +typedef struct { + uint8 ant_config[ANT_SELCFG_MAX]; /**< antenna configuration */ + uint8 num_antcfg; /**< number of available antenna configurations */ +} wlc_antselcfg_t; + +typedef struct { + uint32 duration; /**< millisecs spent sampling this channel */ + uint32 congest_ibss; /**< millisecs in our bss (presumably this traffic will */ + /**< move if cur bss moves channels) */ + uint32 congest_obss; /**< traffic not in our bss */ + uint32 interference; /**< millisecs detecting a non 802.11 interferer. */ + uint32 timestamp; /**< second timestamp */ +} cca_congest_t; + +typedef struct { + chanspec_t chanspec; /**< Which channel? */ + uint16 num_secs; /**< How many secs worth of data */ + cca_congest_t secs[1]; /**< Data */ +} cca_congest_channel_req_t; +typedef struct { + uint32 duration; /**< millisecs spent sampling this channel */ + uint32 congest; /**< millisecs detecting busy CCA */ + uint32 timestamp; /**< second timestamp */ +} cca_congest_simple_t; + +typedef struct { + uint16 status; + uint16 id; + chanspec_t chanspec; /**< Which channel? */ + uint16 len; + union { + cca_congest_simple_t cca_busy; /**< CCA busy */ + cca_congest_t cca_busy_ext; /**< Extended CCA report */ + int32 noise; /**< noise floor */ + }; +} cca_chan_qual_event_t; + +typedef struct { + uint32 msrmnt_time; /**< Time for Measurement (msec) */ + uint32 msrmnt_done; /**< flag set when measurement complete */ + char buf[]; +} cca_stats_n_flags; + +typedef struct { + uint32 msrmnt_query; /* host to driver query for measurement done */ + uint32 time_req; /* time required for measurement */ + uint8 report_opt; /* option to print different stats in report */ + uint8 PAD[3]; +} cca_msrmnt_query; + +/* interference sources */ +enum interference_source { + ITFR_NONE = 0, /**< interference */ + ITFR_PHONE, /**< wireless phone */ + ITFR_VIDEO_CAMERA, /**< wireless video camera */ + ITFR_MICROWAVE_OVEN, /**< microwave oven */ + ITFR_BABY_MONITOR, /**< wireless baby monitor */ + ITFR_BLUETOOTH, /**< bluetooth */ + ITFR_VIDEO_CAMERA_OR_BABY_MONITOR, /**< wireless camera or baby monitor */ + ITFR_BLUETOOTH_OR_BABY_MONITOR, /**< bluetooth or baby monitor */ + ITFR_VIDEO_CAMERA_OR_PHONE, /**< video camera or phone */ + ITFR_UNIDENTIFIED /**< interference from unidentified source */ +}; + +/** structure for interference source report */ +typedef struct { + uint32 flags; /**< flags. bit definitions below */ + uint32 source; /**< last detected interference source */ + uint32 timestamp; /**< second timestamp on interferenced flag change */ +} interference_source_rep_t; + +#define WLC_CNTRY_BUF_SZ 4 /**< Country string is 3 bytes + NUL */ + +typedef struct wl_country { + char country_abbrev[WLC_CNTRY_BUF_SZ]; /**< nul-terminated country code used in + * the Country IE + */ + int32 rev; /**< revision specifier for ccode + * on set, -1 indicates unspecified. + * on get, rev >= 0 + */ + char ccode[WLC_CNTRY_BUF_SZ]; /**< nul-terminated built-in country code. + * variable length, but fixed size in + * struct allows simple allocation for + * expected country strings <= 3 chars. + */ +} wl_country_t; + +#define CCODE_INFO_VERSION 1 + +typedef enum wl_ccode_role { + WLC_CCODE_ROLE_ACTIVE = 0, + WLC_CCODE_ROLE_HOST, + WLC_CCODE_ROLE_80211D_ASSOC, + WLC_CCODE_ROLE_80211D_SCAN, + WLC_CCODE_ROLE_DEFAULT, + WLC_CCODE_ROLE_DEFAULT_SROM_BKUP, + WLC_CCODE_LAST +} wl_ccode_role_t; +#define WLC_NUM_CCODE_INFO WLC_CCODE_LAST + +typedef struct wl_ccode_entry { + uint16 reserved; + uint8 band; + uint8 role; + char ccode[WLC_CNTRY_BUF_SZ]; +} wl_ccode_entry_t; + +typedef struct wl_ccode_info { + uint16 version; + uint16 count; /**< Number of ccodes entries in the set */ + wl_ccode_entry_t ccodelist[1]; +} wl_ccode_info_t; +#define WL_CCODE_INFO_FIXED_LEN OFFSETOF(wl_ccode_info_t, ccodelist) +typedef struct wl_channels_in_country { + uint32 buflen; + uint32 band; + char country_abbrev[WLC_CNTRY_BUF_SZ]; + uint32 count; + uint32 channel[1]; +} wl_channels_in_country_t; + +typedef struct wl_country_list { + uint32 buflen; + uint32 band_set; + uint32 band; + uint32 count; + char country_abbrev[1]; +} wl_country_list_t; + +typedef struct wl_rm_req_elt { + int8 type; + int8 flags; + chanspec_t chanspec; + uint32 token; /**< token for this measurement */ + uint32 tsf_h; /**< TSF high 32-bits of Measurement start time */ + uint32 tsf_l; /**< TSF low 32-bits */ + uint32 dur; /**< TUs */ +} wl_rm_req_elt_t; + +typedef struct wl_rm_req { + uint32 token; /**< overall measurement set token */ + uint32 count; /**< number of measurement requests */ + void *cb; /**< completion callback function: may be NULL */ + void *cb_arg; /**< arg to completion callback function */ + wl_rm_req_elt_t req[1]; /**< variable length block of requests */ +} wl_rm_req_t; +#define WL_RM_REQ_FIXED_LEN OFFSETOF(wl_rm_req_t, req) + +typedef struct wl_rm_rep_elt { + int8 type; + int8 flags; + chanspec_t chanspec; + uint32 token; /**< token for this measurement */ + uint32 tsf_h; /**< TSF high 32-bits of Measurement start time */ + uint32 tsf_l; /**< TSF low 32-bits */ + uint32 dur; /**< TUs */ + uint32 len; /**< byte length of data block */ + uint8 data[1]; /**< variable length data block */ +} wl_rm_rep_elt_t; +#define WL_RM_REP_ELT_FIXED_LEN 24 /**< length excluding data block */ + +#define WL_RPI_REP_BIN_NUM 8 +typedef struct wl_rm_rpi_rep { + uint8 rpi[WL_RPI_REP_BIN_NUM]; + int8 rpi_max[WL_RPI_REP_BIN_NUM]; +} wl_rm_rpi_rep_t; + +typedef struct wl_rm_rep { + uint32 token; /**< overall measurement set token */ + uint32 len; /**< length of measurement report block */ + wl_rm_rep_elt_t rep[1]; /**< variable length block of reports */ +} wl_rm_rep_t; +#define WL_RM_REP_FIXED_LEN 8 +#ifdef BCMCCX + +#define LEAP_USER_MAX 32 +#define LEAP_DOMAIN_MAX 32 +#define LEAP_PASSWORD_MAX 32 + +typedef struct wl_leap_info { + wlc_ssid_t ssid; + uint8 user_len; + uint8 user[LEAP_USER_MAX]; + uint8 password_len; + uint8 password[LEAP_PASSWORD_MAX]; + uint8 domain_len; + uint8 domain[LEAP_DOMAIN_MAX]; + uint8 PAD; +} wl_leap_info_t; + +typedef struct wl_leap_list { + uint32 buflen; + uint32 version; + uint32 count; + wl_leap_info_t leap_info[1]; +} wl_leap_list_t; +#endif /* BCMCCX */ +typedef enum sup_auth_status { + /* Basic supplicant authentication states */ + WLC_SUP_DISCONNECTED = 0, + WLC_SUP_CONNECTING, + WLC_SUP_IDREQUIRED, + WLC_SUP_AUTHENTICATING, + WLC_SUP_AUTHENTICATED, + WLC_SUP_KEYXCHANGE, + WLC_SUP_KEYED, + WLC_SUP_TIMEOUT, + WLC_SUP_LAST_BASIC_STATE, + + /* Extended supplicant authentication states */ + /** Waiting to receive handshake msg M1 */ + WLC_SUP_KEYXCHANGE_WAIT_M1 = WLC_SUP_AUTHENTICATED, + /** Preparing to send handshake msg M2 */ + WLC_SUP_KEYXCHANGE_PREP_M2 = WLC_SUP_KEYXCHANGE, + /* Waiting to receive handshake msg M3 */ + WLC_SUP_KEYXCHANGE_WAIT_M3 = WLC_SUP_LAST_BASIC_STATE, + WLC_SUP_KEYXCHANGE_PREP_M4, /**< Preparing to send handshake msg M4 */ + WLC_SUP_KEYXCHANGE_WAIT_G1, /**< Waiting to receive handshake msg G1 */ + WLC_SUP_KEYXCHANGE_PREP_G2 /**< Preparing to send handshake msg G2 */ +} sup_auth_status_t; + +typedef struct wl_wsec_key { + uint32 index; /**< key index */ + uint32 len; /**< key length */ + uint8 data[DOT11_MAX_KEY_SIZE]; /**< key data */ + uint32 pad_1[18]; + uint32 algo; /**< CRYPTO_ALGO_AES_CCM, CRYPTO_ALGO_WEP128, etc */ + uint32 flags; /**< misc flags */ + uint32 pad_2[2]; + int32 pad_3; + int32 iv_initialized; /**< has IV been initialized already? */ + int32 pad_4; + /* Rx IV */ + struct { + uint32 hi; /**< upper 32 bits of IV */ + uint16 lo; /**< lower 16 bits of IV */ + uint16 PAD; + } rxiv; + uint32 pad_5[2]; + struct ether_addr ea; /**< per station */ + uint16 PAD; +} wl_wsec_key_t; + +#define WSEC_MIN_PSK_LEN 8 +#define WSEC_MAX_PSK_LEN 64 + +/** Flag for key material needing passhash'ing */ +#define WSEC_PASSPHRASE (1<<0) + +/**receptacle for WLC_SET_WSEC_PMK parameter */ +typedef struct wsec_pmk { + ushort key_len; /**< octets in key material */ + ushort flags; /**< key handling qualification */ + uint8 key[WSEC_MAX_PSK_LEN]; /**< PMK material */ +} wsec_pmk_t; + +#define WL_AUTH_EVENT_DATA_V1 0x1 + +/* tlv ids for auth event */ +#define WL_AUTH_PMK_TLV_ID 1 +#define WL_AUTH_PMKID_TLV_ID 2 +/* AUTH event data +* pmk and pmkid in case of SAE auth +* xtlvs will be 32 bit alligned +*/ +typedef struct wl_auth_event { + uint16 version; + uint16 length; + uint8 xtlvs[]; +} wl_auth_event_t; + +#define WL_AUTH_EVENT_FIXED_LEN_V1 OFFSETOF(wl_auth_event_t, xtlvs) + +#define FILS_CACHE_ID_LEN 2 +#define PMK_LEN_MAX 48 + +typedef struct _pmkid_v1 { + struct ether_addr BSSID; + uint8 PMKID[WPA2_PMKID_LEN]; +} pmkid_v1_t; + +#define PMKID_ELEM_V2_LENGTH (sizeof(struct ether_addr) + WPA2_PMKID_LEN + PMK_LEN_MAX + \ + sizeof(ssid_info_t) + FILS_CACHE_ID_LEN) + +typedef struct _pmkid_v2 { + uint16 length; /* Should match PMKID_ELEM_VX_LENGTH */ + struct ether_addr BSSID; + uint8 PMKID[WPA2_PMKID_LEN]; + uint8 pmk[PMK_LEN_MAX]; /* for FILS key deriviation */ + uint16 pmk_len; + ssid_info_t ssid; + uint8 fils_cache_id[FILS_CACHE_ID_LEN]; +} pmkid_v2_t; + +#define PMKID_LIST_VER_2 2 +typedef struct _pmkid_list_v1 { + uint32 npmkid; + pmkid_v1_t pmkid[1]; +} pmkid_list_v1_t; + +typedef struct _pmkid_list_v2 { + uint16 version; + uint16 length; + pmkid_v2_t pmkid[1]; +} pmkid_list_v2_t; + +#ifndef PMKID_VERSION_ENABLED +/* pmkid structure before versioning. legacy. DONOT update anymore here */ +typedef pmkid_v1_t pmkid_t; +typedef pmkid_list_v1_t pmkid_list_t; +#endif /* PMKID_VERSION_ENABLED */ + +typedef struct _pmkid_cand { + struct ether_addr BSSID; + uint8 preauth; +} pmkid_cand_t; + +typedef struct _pmkid_cand_list { + uint32 npmkid_cand; + pmkid_cand_t pmkid_cand[1]; +} pmkid_cand_list_t; + +#define WL_STA_ANT_MAX 4 /**< max possible rx antennas */ + +typedef struct wl_assoc_info { + uint32 req_len; + uint32 resp_len; + uint32 flags; + struct dot11_assoc_req req; + struct ether_addr reassoc_bssid; /**< used in reassoc's */ + struct dot11_assoc_resp resp; + uint32 state; +} wl_assoc_info_t; + +typedef struct wl_led_info { + uint32 index; /**< led index */ + uint32 behavior; + uint8 activehi; + uint8 PAD[3]; +} wl_led_info_t; + +/** srom read/write struct passed through ioctl */ +typedef struct { + uint32 byteoff; /**< byte offset */ + uint32 nbytes; /**< number of bytes */ + uint16 buf[]; +} srom_rw_t; + +#define CISH_FLAG_PCIECIS (1 << 15) /**< write CIS format bit for PCIe CIS */ + +/** similar cis (srom or otp) struct [iovar: may not be aligned] */ +typedef struct { + uint16 source; /**< cis source */ + uint16 flags; /**< flags */ + uint32 byteoff; /**< byte offset */ + uint32 nbytes; /**< number of bytes */ + /* data follows here */ +} cis_rw_t; + +/** R_REG and W_REG struct passed through ioctl */ +typedef struct { + uint32 byteoff; /**< byte offset of the field in d11regs_t */ + uint32 val; /**< read/write value of the field */ + uint32 size; /**< sizeof the field */ + uint32 band; /**< band (optional) */ +} rw_reg_t; + +/** + * Structure used by GET/SET_ATTEN ioctls - it controls power in b/g-band + * PCL - Power Control Loop + */ +typedef struct { + uint16 auto_ctrl; /**< WL_ATTEN_XX */ + uint16 bb; /**< Baseband attenuation */ + uint16 radio; /**< Radio attenuation */ + uint16 txctl1; /**< Radio TX_CTL1 value */ +} atten_t; + +/** Per-AC retry parameters */ +struct wme_tx_params_s { + uint8 short_retry; + uint8 short_fallback; + uint8 long_retry; + uint8 long_fallback; + uint16 max_rate; /**< In units of 512 Kbps */ +}; + +typedef struct wme_tx_params_s wme_tx_params_t; + +#define WL_WME_TX_PARAMS_IO_BYTES (sizeof(wme_tx_params_t) * AC_COUNT) + +/**Used to get specific link/ac parameters */ +typedef struct { + int32 ac; + uint8 val; + struct ether_addr ea; + uint8 PAD; +} link_val_t; + +#define WL_PM_MUTE_TX_VER 1 + +typedef struct wl_pm_mute_tx { + uint16 version; /**< version */ + uint16 len; /**< length */ + uint16 deadline; /**< deadline timer (in milliseconds) */ + uint8 enable; /**< set to 1 to enable mode; set to 0 to disable it */ + uint8 PAD; +} wl_pm_mute_tx_t; + +/* sta_info_t version 4 */ +typedef struct { + uint16 ver; /**< version of this struct */ + uint16 len; /**< length in bytes of this structure */ + uint16 cap; /**< sta's advertised capabilities */ + uint16 PAD; + uint32 flags; /**< flags defined below */ + uint32 idle; /**< time since data pkt rx'd from sta */ + struct ether_addr ea; /**< Station address */ + uint16 PAD; + wl_rateset_t rateset; /**< rateset in use */ + uint32 in; /**< seconds elapsed since associated */ + uint32 listen_interval_inms; /**< Min Listen interval in ms for this STA */ + uint32 tx_pkts; /**< # of user packets transmitted (unicast) */ + uint32 tx_failures; /**< # of user packets failed */ + uint32 rx_ucast_pkts; /**< # of unicast packets received */ + uint32 rx_mcast_pkts; /**< # of multicast packets received */ + uint32 tx_rate; /**< Rate used by last tx frame */ + uint32 rx_rate; /**< Rate of last successful rx frame */ + uint32 rx_decrypt_succeeds; /**< # of packet decrypted successfully */ + uint32 rx_decrypt_failures; /**< # of packet decrypted unsuccessfully */ + uint32 tx_tot_pkts; /**< # of user tx pkts (ucast + mcast) */ + uint32 rx_tot_pkts; /**< # of data packets recvd (uni + mcast) */ + uint32 tx_mcast_pkts; /**< # of mcast pkts txed */ + uint64 tx_tot_bytes; /**< data bytes txed (ucast + mcast) */ + uint64 rx_tot_bytes; /**< data bytes recvd (ucast + mcast) */ + uint64 tx_ucast_bytes; /**< data bytes txed (ucast) */ + uint64 tx_mcast_bytes; /**< # data bytes txed (mcast) */ + uint64 rx_ucast_bytes; /**< data bytes recvd (ucast) */ + uint64 rx_mcast_bytes; /**< data bytes recvd (mcast) */ + int8 rssi[WL_STA_ANT_MAX]; /**< average rssi per antenna + * of data frames + */ + int8 nf[WL_STA_ANT_MAX]; /**< per antenna noise floor */ + uint16 aid; /**< association ID */ + uint16 ht_capabilities; /**< advertised ht caps */ + uint16 vht_flags; /**< converted vht flags */ + uint16 PAD; + uint32 tx_pkts_retried; /**< # of frames where a retry was + * necessary + */ + uint32 tx_pkts_retry_exhausted; /**< # of user frames where a retry + * was exhausted + */ + int8 rx_lastpkt_rssi[WL_STA_ANT_MAX]; /**< Per antenna RSSI of last + * received data frame. + */ + /* TX WLAN retry/failure statistics: + * Separated for host requested frames and WLAN locally generated frames. + * Include unicast frame only where the retries/failures can be counted. + */ + uint32 tx_pkts_total; /**< # user frames sent successfully */ + uint32 tx_pkts_retries; /**< # user frames retries */ + uint32 tx_pkts_fw_total; /**< # FW generated sent successfully */ + uint32 tx_pkts_fw_retries; /**< # retries for FW generated frames */ + uint32 tx_pkts_fw_retry_exhausted; /**< # FW generated where a retry + * was exhausted + */ + uint32 rx_pkts_retried; /**< # rx with retry bit set */ + uint32 tx_rate_fallback; /**< lowest fallback TX rate */ + /* Fields above this line are common to sta_info_t versions 4 and 5 */ + + uint32 rx_dur_total; /* total user RX duration (estimated) */ + + chanspec_t chanspec; /** chanspec this sta is on */ + uint16 PAD; + wl_rateset_args_v1_t rateset_adv; /* rateset along with mcs index bitmap */ + uint32 PAD; +} sta_info_v4_t; + +/* Note: Version 4 is the latest version of sta_info_t. Version 5 is abandoned. + * Please add new fields to version 4, not version 5. + */ +/* sta_info_t version 5 */ +typedef struct { + uint16 ver; /**< version of this struct */ + uint16 len; /**< length in bytes of this structure */ + uint16 cap; /**< sta's advertised capabilities */ + uint16 PAD; + uint32 flags; /**< flags defined below */ + uint32 idle; /**< time since data pkt rx'd from sta */ + struct ether_addr ea; /**< Station address */ + uint16 PAD; + wl_rateset_t rateset; /**< rateset in use */ + uint32 in; /**< seconds elapsed since associated */ + uint32 listen_interval_inms; /**< Min Listen interval in ms for this STA */ + uint32 tx_pkts; /**< # of user packets transmitted (unicast) */ + uint32 tx_failures; /**< # of user packets failed */ + uint32 rx_ucast_pkts; /**< # of unicast packets received */ + uint32 rx_mcast_pkts; /**< # of multicast packets received */ + uint32 tx_rate; /**< Rate used by last tx frame */ + uint32 rx_rate; /**< Rate of last successful rx frame */ + uint32 rx_decrypt_succeeds; /**< # of packet decrypted successfully */ + uint32 rx_decrypt_failures; /**< # of packet decrypted unsuccessfully */ + uint32 tx_tot_pkts; /**< # of user tx pkts (ucast + mcast) */ + uint32 rx_tot_pkts; /**< # of data packets recvd (uni + mcast) */ + uint32 tx_mcast_pkts; /**< # of mcast pkts txed */ + uint64 tx_tot_bytes; /**< data bytes txed (ucast + mcast) */ + uint64 rx_tot_bytes; /**< data bytes recvd (ucast + mcast) */ + uint64 tx_ucast_bytes; /**< data bytes txed (ucast) */ + uint64 tx_mcast_bytes; /**< # data bytes txed (mcast) */ + uint64 rx_ucast_bytes; /**< data bytes recvd (ucast) */ + uint64 rx_mcast_bytes; /**< data bytes recvd (mcast) */ + int8 rssi[WL_STA_ANT_MAX]; /**< average rssi per antenna + * of data frames + */ + int8 nf[WL_STA_ANT_MAX]; /**< per antenna noise floor */ + uint16 aid; /**< association ID */ + uint16 ht_capabilities; /**< advertised ht caps */ + uint16 vht_flags; /**< converted vht flags */ + uint16 PAD; + uint32 tx_pkts_retried; /**< # of frames where a retry was + * necessary + */ + uint32 tx_pkts_retry_exhausted; /**< # of user frames where a retry + * was exhausted + */ + int8 rx_lastpkt_rssi[WL_STA_ANT_MAX]; /**< Per antenna RSSI of last + * received data frame. + */ + /* TX WLAN retry/failure statistics: + * Separated for host requested frames and WLAN locally generated frames. + * Include unicast frame only where the retries/failures can be counted. + */ + uint32 tx_pkts_total; /**< # user frames sent successfully */ + uint32 tx_pkts_retries; /**< # user frames retries */ + uint32 tx_pkts_fw_total; /**< # FW generated sent successfully */ + uint32 tx_pkts_fw_retries; /**< # retries for FW generated frames */ + uint32 tx_pkts_fw_retry_exhausted; /**< # FW generated where a retry + * was exhausted + */ + uint32 rx_pkts_retried; /**< # rx with retry bit set */ + uint32 tx_rate_fallback; /**< lowest fallback TX rate */ + /* Fields above this line are common to sta_info_t versions 4 and 5 */ + + chanspec_t chanspec; /** chanspec this sta is on */ + uint16 PAD; + wl_rateset_args_v1_t rateset_adv; /* rateset along with mcs index bitmap */ +} sta_info_v5_t; + +/* sta_info_t version 6 + changes to wl_rateset_args_t is leading to update this struct version as well. + */ +typedef struct { + uint16 ver; /**< version of this struct */ + uint16 len; /**< length in bytes of this structure */ + uint16 cap; /**< sta's advertised capabilities */ + uint16 PAD; + uint32 flags; /**< flags defined below */ + uint32 idle; /**< time since data pkt rx'd from sta */ + struct ether_addr ea; /**< Station address */ + uint16 PAD; + wl_rateset_t rateset; /**< rateset in use */ + uint32 in; /**< seconds elapsed since associated */ + uint32 listen_interval_inms; /**< Min Listen interval in ms for this STA */ + uint32 tx_pkts; /**< # of user packets transmitted (unicast) */ + uint32 tx_failures; /**< # of user packets failed */ + uint32 rx_ucast_pkts; /**< # of unicast packets received */ + uint32 rx_mcast_pkts; /**< # of multicast packets received */ + uint32 tx_rate; /**< Rate used by last tx frame */ + uint32 rx_rate; /**< Rate of last successful rx frame */ + uint32 rx_decrypt_succeeds; /**< # of packet decrypted successfully */ + uint32 rx_decrypt_failures; /**< # of packet decrypted unsuccessfully */ + uint32 tx_tot_pkts; /**< # of user tx pkts (ucast + mcast) */ + uint32 rx_tot_pkts; /**< # of data packets recvd (uni + mcast) */ + uint32 tx_mcast_pkts; /**< # of mcast pkts txed */ + uint64 tx_tot_bytes; /**< data bytes txed (ucast + mcast) */ + uint64 rx_tot_bytes; /**< data bytes recvd (ucast + mcast) */ + uint64 tx_ucast_bytes; /**< data bytes txed (ucast) */ + uint64 tx_mcast_bytes; /**< # data bytes txed (mcast) */ + uint64 rx_ucast_bytes; /**< data bytes recvd (ucast) */ + uint64 rx_mcast_bytes; /**< data bytes recvd (mcast) */ + int8 rssi[WL_STA_ANT_MAX]; /**< average rssi per antenna + * of data frames + */ + int8 nf[WL_STA_ANT_MAX]; /**< per antenna noise floor */ + uint16 aid; /**< association ID */ + uint16 ht_capabilities; /**< advertised ht caps */ + uint16 vht_flags; /**< converted vht flags */ + uint16 PAD; + uint32 tx_pkts_retried; /**< # of frames where a retry was + * necessary + */ + uint32 tx_pkts_retry_exhausted; /**< # of user frames where a retry + * was exhausted + */ + int8 rx_lastpkt_rssi[WL_STA_ANT_MAX]; /**< Per antenna RSSI of last + * received data frame. + */ + /* TX WLAN retry/failure statistics: + * Separated for host requested frames and WLAN locally generated frames. + * Include unicast frame only where the retries/failures can be counted. + */ + uint32 tx_pkts_total; /**< # user frames sent successfully */ + uint32 tx_pkts_retries; /**< # user frames retries */ + uint32 tx_pkts_fw_total; /**< # FW generated sent successfully */ + uint32 tx_pkts_fw_retries; /**< # retries for FW generated frames */ + uint32 tx_pkts_fw_retry_exhausted; /**< # FW generated where a retry + * was exhausted + */ + uint32 rx_pkts_retried; /**< # rx with retry bit set */ + uint32 tx_rate_fallback; /**< lowest fallback TX rate */ + /* Fields above this line are common to sta_info_t versions 4 and 5 */ + + uint32 rx_dur_total; /* total user RX duration (estimated) */ + + chanspec_t chanspec; /** chanspec this sta is on */ + uint16 PAD; + wl_rateset_args_v2_t rateset_adv; /* rateset along with mcs index bitmap */ +} sta_info_v6_t; + +#define WL_OLD_STAINFO_SIZE OFFSETOF(sta_info_t, tx_tot_pkts) + +#define WL_STA_VER_4 4 +#define WL_STA_VER_5 5 +#define WL_STA_VER WL_STA_VER_4 + +#define SWDIV_STATS_VERSION_2 2 +#define SWDIV_STATS_CURRENT_VERSION SWDIV_STATS_VERSION_2 + +struct wlc_swdiv_stats_v1 { + uint32 auto_en; + uint32 active_ant; + uint32 rxcount; + int32 avg_snr_per_ant0; + int32 avg_snr_per_ant1; + int32 avg_snr_per_ant2; + uint32 swap_ge_rxcount0; + uint32 swap_ge_rxcount1; + uint32 swap_ge_snrthresh0; + uint32 swap_ge_snrthresh1; + uint32 swap_txfail0; + uint32 swap_txfail1; + uint32 swap_timer0; + uint32 swap_timer1; + uint32 swap_alivecheck0; + uint32 swap_alivecheck1; + uint32 rxcount_per_ant0; + uint32 rxcount_per_ant1; + uint32 acc_rxcount; + uint32 acc_rxcount_per_ant0; + uint32 acc_rxcount_per_ant1; + uint32 tx_auto_en; + uint32 tx_active_ant; + uint32 rx_policy; + uint32 tx_policy; + uint32 cell_policy; + uint32 swap_snrdrop0; + uint32 swap_snrdrop1; + uint32 mws_antsel_ovr_tx; + uint32 mws_antsel_ovr_rx; + uint8 swap_trig_event_id; +}; + +struct wlc_swdiv_stats_v2 { + uint16 version; /* version of the structure + * as defined by SWDIV_STATS_CURRENT_VERSION + */ + uint16 length; /* length of the entire structure */ + uint32 auto_en; + uint32 active_ant; + uint32 rxcount; + int32 avg_snr_per_ant0; + int32 avg_snr_per_ant1; + int32 avg_snr_per_ant2; + uint32 swap_ge_rxcount0; + uint32 swap_ge_rxcount1; + uint32 swap_ge_snrthresh0; + uint32 swap_ge_snrthresh1; + uint32 swap_txfail0; + uint32 swap_txfail1; + uint32 swap_timer0; + uint32 swap_timer1; + uint32 swap_alivecheck0; + uint32 swap_alivecheck1; + uint32 rxcount_per_ant0; + uint32 rxcount_per_ant1; + uint32 acc_rxcount; + uint32 acc_rxcount_per_ant0; + uint32 acc_rxcount_per_ant1; + uint32 tx_auto_en; + uint32 tx_active_ant; + uint32 rx_policy; + uint32 tx_policy; + uint32 cell_policy; + uint32 swap_snrdrop0; + uint32 swap_snrdrop1; + uint32 mws_antsel_ovr_tx; + uint32 mws_antsel_ovr_rx; + uint32 swap_trig_event_id; +}; + +#define WLC_NUMRATES 16 /**< max # of rates in a rateset */ + +/**Used to get specific STA parameters */ +typedef struct { + uint32 val; + struct ether_addr ea; + uint16 PAD; +} scb_val_t; + +/**Used by iovar versions of some ioctls, i.e. WLC_SCB_AUTHORIZE et al */ +typedef struct { + uint32 code; + scb_val_t ioctl_args; +} authops_t; + +/** channel encoding */ +typedef struct channel_info { + int32 hw_channel; + int32 target_channel; + int32 scan_channel; +} channel_info_t; + +/** For ioctls that take a list of MAC addresses */ +typedef struct maclist { + uint32 count; /**< number of MAC addresses */ + struct ether_addr ea[1]; /**< variable length array of MAC addresses */ +} maclist_t; + +typedef struct wds_client_info { + char ifname[INTF_NAME_SIZ]; /* WDS ifname */ + struct ether_addr ea; /* WDS client MAC address */ +} wds_client_info_t; + +#define WDS_MACLIST_MAGIC 0xFFFFFFFF +#define WDS_MACLIST_VERSION 1 + +/* For wds MAC list ioctls */ +typedef struct wds_maclist { + uint32 count; /* Number of WDS clients */ + uint32 magic; /* Magic number */ + uint32 version; /* Version number */ + struct wds_client_info client_list[1]; /* Variable length array of WDS clients */ +} wds_maclist_t; + +/**get pkt count struct passed through ioctl */ +typedef struct get_pktcnt { + uint32 rx_good_pkt; + uint32 rx_bad_pkt; + uint32 tx_good_pkt; + uint32 tx_bad_pkt; + uint32 rx_ocast_good_pkt; /**< unicast packets destined for others */ +} get_pktcnt_t; + +/* NINTENDO2 */ +#define LQ_IDX_MIN 0 +#define LQ_IDX_MAX 1 +#define LQ_IDX_AVG 2 +#define LQ_IDX_SUM 2 +#define LQ_IDX_LAST 3 +#define LQ_STOP_MONITOR 0 +#define LQ_START_MONITOR 1 + +/** Get averages RSSI, Rx PHY rate and SNR values */ +/* Link Quality */ +typedef struct { + int32 rssi[LQ_IDX_LAST]; /**< Array to keep min, max, avg rssi */ + int32 snr[LQ_IDX_LAST]; /**< Array to keep min, max, avg snr */ + int32 isvalid; /**< Flag indicating whether above data is valid */ +} wl_lq_t; + +typedef enum wl_wakeup_reason_type { + LCD_ON = 1, + LCD_OFF, + DRC1_WAKE, + DRC2_WAKE, + REASON_LAST +} wl_wr_type_t; + +typedef struct { + /** Unique filter id */ + uint32 id; + /** stores the reason for the last wake up */ + uint8 reason; + uint8 PAD[3]; +} wl_wr_t; + +/** Get MAC specific rate histogram command */ +typedef struct { + struct ether_addr ea; /**< MAC Address */ + uint8 ac_cat; /**< Access Category */ + uint8 num_pkts; /**< Number of packet entries to be averaged */ +} wl_mac_ratehisto_cmd_t; +/** Get MAC rate histogram response */ +typedef struct { + uint32 rate[DOT11_RATE_MAX + 1]; /**< Rates */ + uint32 mcs[WL_RATESET_SZ_HT_IOCTL * WL_TX_CHAINS_MAX]; /**< MCS counts */ + uint32 vht[WL_RATESET_SZ_VHT_MCS][WL_TX_CHAINS_MAX]; /**< VHT counts */ + uint32 tsf_timer[2][2]; /**< Start and End time for 8bytes value */ + uint32 prop11n_mcs[WLC_11N_LAST_PROP_MCS - WLC_11N_FIRST_PROP_MCS + 1]; /** MCS counts */ +} wl_mac_ratehisto_res_t; + +/* sta_info ecounters */ +typedef struct { + struct ether_addr ea; /* Station MAC addr */ + struct ether_addr BSSID; /* BSSID of the BSS */ + uint32 tx_pkts_fw_total; /* # FW generated sent successfully */ + uint32 tx_pkts_fw_retries; /* # retries for FW generated frames */ + uint32 tx_pkts_fw_retry_exhausted; /* # FW generated which + * failed after retry + */ +} sta_info_ecounters_t; + +#define STAMON_MODULE_VER 1 + +/**Linux network driver ioctl encoding */ +typedef struct wl_ioctl { + uint32 cmd; /**< common ioctl definition */ + void *buf; /**< pointer to user buffer */ + uint32 len; /**< length of user buffer */ + uint8 set; /**< 1=set IOCTL; 0=query IOCTL */ + uint32 used; /**< bytes read or written (optional) */ + uint32 needed; /**< bytes needed (optional) */ +} wl_ioctl_t; + +#ifdef CONFIG_COMPAT +typedef struct compat_wl_ioctl { + uint32 cmd; /**< common ioctl definition */ + uint32 buf; /**< pointer to user buffer */ + uint32 len; /**< length of user buffer */ + uint8 set; /**< 1=set IOCTL; 0=query IOCTL */ + uint32 used; /**< bytes read or written (optional) */ + uint32 needed; /**< bytes needed (optional) */ +} compat_wl_ioctl_t; +#endif /* CONFIG_COMPAT */ + +#define WL_NUM_RATES_CCK 4 /**< 1, 2, 5.5, 11 Mbps */ +#define WL_NUM_RATES_OFDM 8 /**< 6, 9, 12, 18, 24, 36, 48, 54 Mbps SISO/CDD */ +#define WL_NUM_RATES_MCS_1STREAM 8 /**< MCS 0-7 1-stream rates - SISO/CDD/STBC/MCS */ +#define WL_NUM_RATES_EXTRA_VHT 2 /**< Additional VHT 11AC rates */ +#define WL_NUM_RATES_VHT 10 +#define WL_NUM_RATES_VHT_ALL (WL_NUM_RATES_VHT + WL_NUM_RATES_EXTRA_VHT) +#define WL_NUM_RATES_HE 12 +#define WL_NUM_RATES_MCS32 1 +#define UC_URL_LEN 128u /**< uCode URL length */ + +/* + * Structure for passing hardware and software + * revision info up from the driver. + */ +typedef struct wlc_rev_info { + uint32 vendorid; /**< PCI vendor id */ + uint32 deviceid; /**< device id of chip */ + uint32 radiorev; /**< radio revision */ + uint32 chiprev; /**< chip revision */ + uint32 corerev; /**< core revision */ + uint32 boardid; /**< board identifier (usu. PCI sub-device id) */ + uint32 boardvendor; /**< board vendor (usu. PCI sub-vendor id) */ + uint32 boardrev; /**< board revision */ + uint32 driverrev; /**< driver version */ + uint32 ucoderev; /**< uCode version */ + uint32 bus; /**< bus type */ + uint32 chipnum; /**< chip number */ + uint32 phytype; /**< phy type */ + uint32 phyrev; /**< phy revision */ + uint32 anarev; /**< anacore rev */ + uint32 chippkg; /**< chip package info */ + uint32 nvramrev; /**< nvram revision number */ + uint32 phyminorrev; /**< phy minor rev */ + uint32 coreminorrev; /**< core minor rev */ + uint32 drvrev_major; /**< driver version: major */ + uint32 drvrev_minor; /**< driver version: minor */ + uint32 drvrev_rc; /**< driver version: rc */ + uint32 drvrev_rc_inc; /**< driver version: rc incremental */ + uint16 ucodeprebuilt; /**< uCode prebuilt flag */ + uint16 ucodediffct; /**< uCode diff count */ + uchar ucodeurl[UC_URL_LEN]; /**< uCode repo URL@cmt_id */ +} wlc_rev_info_t; + +#define WL_REV_INFO_LEGACY_LENGTH 48 + +#define WL_BRAND_MAX 10 +typedef struct wl_instance_info { + uint32 instance; + int8 brand[WL_BRAND_MAX]; + int8 PAD[4-(WL_BRAND_MAX%4)]; +} wl_instance_info_t; + +/** structure to change size of tx fifo */ +typedef struct wl_txfifo_sz { + uint16 magic; + uint16 fifo; + uint16 size; +} wl_txfifo_sz_t; + +/* Transfer info about an IOVar from the driver */ +/**Max supported IOV name size in bytes, + 1 for nul termination */ +#define WLC_IOV_NAME_LEN (32 + 1) + +typedef struct wlc_iov_trx_s { + uint8 module; + uint8 type; + char name[WLC_IOV_NAME_LEN]; +} wlc_iov_trx_t; + +/** bump this number if you change the ioctl interface */ +#define WLC_IOCTL_VERSION 2 +#define WLC_IOCTL_VERSION_LEGACY_IOTYPES 1 +/* ifdef EXT_STA */ +typedef struct _wl_assoc_result { + ulong associated; + ulong NDIS_auth; + ulong NDIS_infra; +} wl_assoc_result_t; +/* EXT_STA */ + +#define WL_PHY_PAVARS_LEN 32 /**< Phytype, Bandrange, chain, a[0], b[0], c[0], d[0] .. */ + +#define WL_PHY_PAVAR_VER 1 /**< pavars version */ +#define WL_PHY_PAVARS2_NUM 3 /**< a1, b0, b1 */ +typedef struct wl_pavars2 { + uint16 ver; /**< version of this struct */ + uint16 len; /**< len of this structure */ + uint16 inuse; /**< driver return 1 for a1,b0,b1 in current band range */ + uint16 phy_type; /**< phy type */ + uint16 bandrange; + uint16 chain; + uint16 inpa[WL_PHY_PAVARS2_NUM]; /**< phy pavars for one band range */ +} wl_pavars2_t; + +typedef struct wl_po { + uint16 phy_type; /**< Phy type */ + uint16 band; + uint16 cckpo; + uint16 PAD; + uint32 ofdmpo; + uint16 mcspo[8]; +} wl_po_t; + +#define WL_NUM_RPCALVARS 5 /**< number of rpcal vars */ + +typedef struct wl_rpcal { + uint16 value; + uint16 update; +} wl_rpcal_t; + +#define WL_NUM_RPCALPHASEVARS 5 /* number of rpcal phase vars */ + +typedef struct wl_rpcal_phase { + uint16 value; + uint16 update; +} wl_rpcal_phase_t; + +typedef struct wl_aci_args { + int32 enter_aci_thresh; /* Trigger level to start detecting ACI */ + int32 exit_aci_thresh; /* Trigger level to exit ACI mode */ + int32 usec_spin; /* microsecs to delay between rssi samples */ + int32 glitch_delay; /* interval between ACI scans when glitch count is consistently high */ + uint16 nphy_adcpwr_enter_thresh; /**< ADC power to enter ACI mitigation mode */ + uint16 nphy_adcpwr_exit_thresh; /**< ADC power to exit ACI mitigation mode */ + uint16 nphy_repeat_ctr; /**< Number of tries per channel to compute power */ + uint16 nphy_num_samples; /**< Number of samples to compute power on one channel */ + uint16 nphy_undetect_window_sz; /**< num of undetects to exit ACI Mitigation mode */ + uint16 nphy_b_energy_lo_aci; /**< low ACI power energy threshold for bphy */ + uint16 nphy_b_energy_md_aci; /**< mid ACI power energy threshold for bphy */ + uint16 nphy_b_energy_hi_aci; /**< high ACI power energy threshold for bphy */ + uint16 nphy_noise_noassoc_glitch_th_up; /**< wl interference 4 */ + uint16 nphy_noise_noassoc_glitch_th_dn; + uint16 nphy_noise_assoc_glitch_th_up; + uint16 nphy_noise_assoc_glitch_th_dn; + uint16 nphy_noise_assoc_aci_glitch_th_up; + uint16 nphy_noise_assoc_aci_glitch_th_dn; + uint16 nphy_noise_assoc_enter_th; + uint16 nphy_noise_noassoc_enter_th; + uint16 nphy_noise_assoc_rx_glitch_badplcp_enter_th; + uint16 nphy_noise_noassoc_crsidx_incr; + uint16 nphy_noise_assoc_crsidx_incr; + uint16 nphy_noise_crsidx_decr; +} wl_aci_args_t; + +#define WL_ACI_ARGS_LEGACY_LENGTH 16 /**< bytes of pre NPHY aci args */ +#define WL_SAMPLECOLLECT_T_VERSION 2 /**< version of wl_samplecollect_args_t struct */ +typedef struct wl_samplecollect_args { + /* version 0 fields */ + uint8 coll_us; + uint8 PAD[3]; + int32 cores; + /* add'l version 1 fields */ + uint16 version; /**< see definition of WL_SAMPLECOLLECT_T_VERSION */ + uint16 length; /**< length of entire structure */ + int8 trigger; + uint8 PAD; + uint16 timeout; + uint16 mode; + uint16 PAD; + uint32 pre_dur; + uint32 post_dur; + uint8 gpio_sel; + uint8 downsamp; + uint8 be_deaf; + uint8 agc; /**< loop from init gain and going down */ + uint8 filter; /**< override high pass corners to lowest */ + /* add'l version 2 fields */ + uint8 trigger_state; + uint8 module_sel1; + uint8 module_sel2; + uint16 nsamps; + uint16 PAD; + int32 bitStart; + uint32 gpioCapMask; + uint8 gpio_collection; + uint8 PAD[3]; +} wl_samplecollect_args_t; + +#define WL_SAMPLEDATA_T_VERSION 1 /**< version of wl_samplecollect_args_t struct */ +/* version for unpacked sample data, int16 {(I,Q),Core(0..N)} */ +#define WL_SAMPLEDATA_T_VERSION_SPEC_AN 2 + +typedef struct wl_sampledata { + uint16 version; /**< structure version */ + uint16 size; /**< size of structure */ + uint16 tag; /**< Header/Data */ + uint16 length; /**< data length */ + uint32 flag; /**< bit def */ +} wl_sampledata_t; + +/* WL_OTA START */ +/* OTA Test Status */ +enum { + WL_OTA_TEST_IDLE = 0, /**< Default Idle state */ + WL_OTA_TEST_ACTIVE = 1, /**< Test Running */ + WL_OTA_TEST_SUCCESS = 2, /**< Successfully Finished Test */ + WL_OTA_TEST_FAIL = 3 /**< Test Failed in the Middle */ +}; + +/* OTA SYNC Status */ +enum { + WL_OTA_SYNC_IDLE = 0, /**< Idle state */ + WL_OTA_SYNC_ACTIVE = 1, /**< Waiting for Sync */ + WL_OTA_SYNC_FAIL = 2 /**< Sync pkt not recieved */ +}; + +/* Various error states dut can get stuck during test */ +enum { + WL_OTA_SKIP_TEST_CAL_FAIL = 1, /**< Phy calibration failed */ + WL_OTA_SKIP_TEST_SYNCH_FAIL = 2, /**< Sync Packet not recieved */ + WL_OTA_SKIP_TEST_FILE_DWNLD_FAIL = 3, /**< Cmd flow file download failed */ + WL_OTA_SKIP_TEST_NO_TEST_FOUND = 4, /**< No test found in Flow file */ + WL_OTA_SKIP_TEST_WL_NOT_UP = 5, /**< WL UP failed */ + WL_OTA_SKIP_TEST_UNKNOWN_CALL /**< Unintentional scheduling on ota test */ +}; + +/* Differentiator for ota_tx and ota_rx */ +enum { + WL_OTA_TEST_TX = 0, /**< ota_tx */ + WL_OTA_TEST_RX = 1, /**< ota_rx */ +}; + +/* Catch 3 modes of operation: 20Mhz, 40Mhz, 20 in 40 Mhz */ +enum { + WL_OTA_TEST_BW_20_IN_40MHZ = 0, /**< 20 in 40 operation */ + WL_OTA_TEST_BW_20MHZ = 1, /**< 20 Mhz operation */ + WL_OTA_TEST_BW_40MHZ = 2, /**< full 40Mhz operation */ + WL_OTA_TEST_BW_80MHZ = 3 /* full 80Mhz operation */ +}; +#define HT_MCS_INUSE 0x00000080 /* HT MCS in use,indicates b0-6 holds an mcs */ +#define VHT_MCS_INUSE 0x00000100 /* VHT MCS in use,indicates b0-6 holds an mcs */ +#define OTA_RATE_MASK 0x0000007f /* rate/mcs value */ +#define OTA_STF_SISO 0 +#define OTA_STF_CDD 1 +#define OTA_STF_STBC 2 +#define OTA_STF_SDM 3 + +typedef struct ota_rate_info { + uint8 rate_cnt; /**< Total number of rates */ + uint8 PAD; + uint16 rate_val_mbps[WL_OTA_TEST_MAX_NUM_RATE]; /**< array of rates from 1mbps to 130mbps */ + /**< for legacy rates : ratein mbps * 2 */ + /**< for HT rates : mcs index */ +} ota_rate_info_t; + +typedef struct ota_power_info { + int8 pwr_ctrl_on; /**< power control on/off */ + int8 start_pwr; /**< starting power/index */ + int8 delta_pwr; /**< delta power/index */ + int8 end_pwr; /**< end power/index */ +} ota_power_info_t; + +typedef struct ota_packetengine { + uint16 delay; /**< Inter-packet delay */ + /**< for ota_tx, delay is tx ifs in micro seconds */ + /* for ota_rx, delay is wait time in milliseconds */ + uint16 nframes; /**< Number of frames */ + uint16 length; /**< Packet length */ +} ota_packetengine_t; + +/* + * OTA txant/rxant parameter + * bit7-4: 4 bits swdiv_tx/rx_policy bitmask, specify antenna-policy for SW diversity + * bit3-0: 4 bits TxCore bitmask, specify cores used for transmit frames + * (maximum spatial expansion) + */ +#define WL_OTA_TEST_ANT_MASK 0xF0 +#define WL_OTA_TEST_CORE_MASK 0x0F + +/* OTA txant/rxant 'ant_mask' field; map to Tx/Rx antenna policy for SW diversity */ +enum { + WL_OTA_TEST_FORCE_ANT0 = 0x10, /* force antenna to Ant 0 */ + WL_OTA_TEST_FORCE_ANT1 = 0x20, /* force antenna to Ant 1 */ +}; + +/* antenna/core fields access */ +#define WL_OTA_TEST_GET_ANT(_txant) ((_txant) & WL_OTA_TEST_ANT_MASK) +#define WL_OTA_TEST_GET_CORE(_txant) ((_txant) & WL_OTA_TEST_CORE_MASK) + +/** Test info vector */ +typedef struct wl_ota_test_args { + uint8 cur_test; /**< test phase */ + uint8 chan; /**< channel */ + uint8 bw; /**< bandwidth */ + uint8 control_band; /**< control band */ + uint8 stf_mode; /**< stf mode */ + uint8 PAD; + ota_rate_info_t rt_info; /**< Rate info */ + ota_packetengine_t pkteng; /**< packeteng info */ + uint8 txant; /**< tx antenna */ + uint8 rxant; /**< rx antenna */ + ota_power_info_t pwr_info; /**< power sweep info */ + uint8 wait_for_sync; /**< wait for sync or not */ + uint8 ldpc; + uint8 sgi; + uint8 PAD; + /* Update WL_OTA_TESTVEC_T_VERSION for adding new members to this structure */ +} wl_ota_test_args_t; + +#define WL_OTA_TESTVEC_T_VERSION 1 /* version of wl_ota_test_vector_t struct */ +typedef struct wl_ota_test_vector { + uint16 version; + wl_ota_test_args_t test_arg[WL_OTA_TEST_MAX_NUM_SEQ]; /**< Test argument struct */ + uint16 test_cnt; /**< Total no of test */ + uint8 file_dwnld_valid; /**< File successfully downloaded */ + uint8 sync_timeout; /**< sync packet timeout */ + int8 sync_fail_action; /**< sync fail action */ + struct ether_addr sync_mac; /**< macaddress for sync pkt */ + struct ether_addr tx_mac; /**< macaddress for tx */ + struct ether_addr rx_mac; /**< macaddress for rx */ + int8 loop_test; /**< dbg feature to loop the test */ + uint16 test_rxcnt; + /* Update WL_OTA_TESTVEC_T_VERSION for adding new members to this structure */ +} wl_ota_test_vector_t; + +/** struct copied back form dongle to host to query the status */ +typedef struct wl_ota_test_status { + int16 cur_test_cnt; /**< test phase */ + int8 skip_test_reason; /**< skip test reasoin */ + uint8 PAD; + wl_ota_test_args_t test_arg; /**< cur test arg details */ + uint16 test_cnt; /**< total no of test downloaded */ + uint8 file_dwnld_valid; /**< file successfully downloaded ? */ + uint8 sync_timeout; /**< sync timeout */ + int8 sync_fail_action; /**< sync fail action */ + struct ether_addr sync_mac; /**< macaddress for sync pkt */ + struct ether_addr tx_mac; /**< tx mac address */ + struct ether_addr rx_mac; /**< rx mac address */ + uint8 test_stage; /**< check the test status */ + int8 loop_test; /**< Debug feature to puts test enfine in a loop */ + uint8 sync_status; /**< sync status */ +} wl_ota_test_status_t; + +/* FOR ioctl that take the sta monitor information */ +typedef struct stamon_data { + struct ether_addr ea; + uint8 PAD[2]; + int32 rssi; +} stamon_data_t; + +typedef struct stamon_info { + int32 version; + uint32 count; + stamon_data_t sta_data[1]; +} stamon_info_t; + +typedef struct wl_ota_rx_rssi { + uint16 pktcnt; /* Pkt count used for this rx test */ + chanspec_t chanspec; /* Channel info on which the packets are received */ + int16 rssi; /* Average RSSI of the first 50% packets received */ +} wl_ota_rx_rssi_t; + +#define WL_OTARSSI_T_VERSION 1 /* version of wl_ota_test_rssi_t struct */ +#define WL_OTA_TEST_RSSI_FIXED_SIZE OFFSETOF(wl_ota_test_rssi_t, rx_rssi) + +typedef struct wl_ota_test_rssi { + uint8 version; + uint8 testcnt; /* total measured RSSI values, valid on output only */ + wl_ota_rx_rssi_t rx_rssi[1]; /* Variable length array of wl_ota_rx_rssi_t */ +} wl_ota_test_rssi_t; + +/* WL_OTA END */ + +/**wl_radar_args_t */ +typedef struct { + int32 npulses; /**< required number of pulses at n * t_int */ + int32 ncontig; /**< required number of pulses at t_int */ + int32 min_pw; /**< minimum pulse width (20 MHz clocks) */ + int32 max_pw; /**< maximum pulse width (20 MHz clocks) */ + uint16 thresh0; /**< Radar detection, thresh 0 */ + uint16 thresh1; /**< Radar detection, thresh 1 */ + uint16 blank; /**< Radar detection, blank control */ + uint16 fmdemodcfg; /**< Radar detection, fmdemod config */ + int32 npulses_lp; /**< Radar detection, minimum long pulses */ + int32 min_pw_lp; /**< Minimum pulsewidth for long pulses */ + int32 max_pw_lp; /**< Maximum pulsewidth for long pulses */ + int32 min_fm_lp; /**< Minimum fm for long pulses */ + int32 max_span_lp; /**< Maximum deltat for long pulses */ + int32 min_deltat; /**< Minimum spacing between pulses */ + int32 max_deltat; /**< Maximum spacing between pulses */ + uint16 autocorr; /**< Radar detection, autocorr on or off */ + uint16 st_level_time; /**< Radar detection, start_timing level */ + uint16 t2_min; /**< minimum clocks needed to remain in state 2 */ + uint8 PAD[2]; + uint32 version; /**< version */ + uint32 fra_pulse_err; /**< sample error margin for detecting French radar pulsed */ + int32 npulses_fra; /**< Radar detection, minimum French pulses set */ + int32 npulses_stg2; /**< Radar detection, minimum staggered-2 pulses set */ + int32 npulses_stg3; /**< Radar detection, minimum staggered-3 pulses set */ + uint16 percal_mask; /**< defines which period cal is masked from radar detection */ + uint8 PAD[2]; + int32 quant; /**< quantization resolution to pulse positions */ + uint32 min_burst_intv_lp; /**< minimum burst to burst interval for bin3 radar */ + uint32 max_burst_intv_lp; /**< maximum burst to burst interval for bin3 radar */ + int32 nskip_rst_lp; /**< number of skipped pulses before resetting lp buffer */ + int32 max_pw_tol; /* maximum tolerance allowd in detected pulse width for radar detection */ + uint16 feature_mask; /**< 16-bit mask to specify enabled features */ + uint16 thresh0_sc; /**< Radar detection, thresh 0 */ + uint16 thresh1_sc; /**< Radar detection, thresh 1 */ + uint8 PAD[2]; +} wl_radar_args_t; + +#define WL_RADAR_ARGS_VERSION 2 + +typedef struct { + uint32 version; /**< version */ + uint16 thresh0_20_lo; /**< Radar detection, thresh 0 (range 5250-5350MHz) for BW 20MHz */ + uint16 thresh1_20_lo; /**< Radar detection, thresh 1 (range 5250-5350MHz) for BW 20MHz */ + uint16 thresh0_40_lo; /**< Radar detection, thresh 0 (range 5250-5350MHz) for BW 40MHz */ + uint16 thresh1_40_lo; /**< Radar detection, thresh 1 (range 5250-5350MHz) for BW 40MHz */ + uint16 thresh0_80_lo; /**< Radar detection, thresh 0 (range 5250-5350MHz) for BW 80MHz */ + uint16 thresh1_80_lo; /**< Radar detection, thresh 1 (range 5250-5350MHz) for BW 80MHz */ + uint16 thresh0_20_hi; /**< Radar detection, thresh 0 (range 5470-5725MHz) for BW 20MHz */ + uint16 thresh1_20_hi; /**< Radar detection, thresh 1 (range 5470-5725MHz) for BW 20MHz */ + uint16 thresh0_40_hi; /**< Radar detection, thresh 0 (range 5470-5725MHz) for BW 40MHz */ + uint16 thresh1_40_hi; /**< Radar detection, thresh 1 (range 5470-5725MHz) for BW 40MHz */ + uint16 thresh0_80_hi; /**< Radar detection, thresh 0 (range 5470-5725MHz) for BW 80MHz */ + uint16 thresh1_80_hi; /**< Radar detection, thresh 1 (range 5470-5725MHz) for BW 80MHz */ + uint16 thresh0_160_lo; /**< Radar detection, thresh 0 (range 5250-5350MHz) for BW 160MHz */ + uint16 thresh1_160_lo; /**< Radar detection, thresh 1 (range 5250-5350MHz) for BW 160MHz */ + uint16 thresh0_160_hi; /**< Radar detection, thresh 0 (range 5470-5725MHz) for BW 160MHz */ + uint16 thresh1_160_hi; /**< Radar detection, thresh 1 (range 5470-5725MHz) for BW 160MHz */ +} wl_radar_thr_t; + +typedef struct { + uint32 version; /* version */ + uint16 thresh0_sc_20_lo; + uint16 thresh1_sc_20_lo; + uint16 thresh0_sc_40_lo; + uint16 thresh1_sc_40_lo; + uint16 thresh0_sc_80_lo; + uint16 thresh1_sc_80_lo; + uint16 thresh0_sc_20_hi; + uint16 thresh1_sc_20_hi; + uint16 thresh0_sc_40_hi; + uint16 thresh1_sc_40_hi; + uint16 thresh0_sc_80_hi; + uint16 thresh1_sc_80_hi; + uint16 fc_varth_sb; + uint16 fc_varth_bin5_sb; + uint16 notradar_enb; + uint16 max_notradar_lp; + uint16 max_notradar; + uint16 max_notradar_lp_sc; + uint16 max_notradar_sc; + uint16 highpow_war_enb; + uint16 highpow_sp_ratio; //unit is 0.5 +} wl_radar_thr2_t; + +#define WL_RADAR_THR_VERSION 2 + +typedef struct { + uint32 ver; + uint32 len; + int32 rssi_th[3]; + uint8 rssi_gain_80[4]; + uint8 rssi_gain_160[4]; +} wl_dyn_switch_th_t; + +#define WL_PHY_DYN_SWITCH_TH_VERSION 1 + +/** RSSI per antenna */ +typedef struct { + uint32 version; /**< version field */ + uint32 count; /**< number of valid antenna rssi */ + int8 rssi_ant[WL_RSSI_ANT_MAX]; /**< rssi per antenna */ + int8 rssi_sum; /**< summed rssi across all antennas */ + int8 PAD[3]; +} wl_rssi_ant_t; + +/* SNR per antenna */ +typedef struct { + uint32 version; /* version field */ + uint32 count; /* number of valid antenna snr */ + int8 snr_ant[WL_RSSI_ANT_MAX]; /* snr per antenna */ +} wl_snr_ant_t; + +/** data structure used in 'dfs_status' wl interface, which is used to query dfs status */ +typedef struct { + uint32 state; /**< noted by WL_DFS_CACSTATE_XX. */ + uint32 duration; /**< time spent in ms in state. */ + /** + * as dfs enters ISM state, it removes the operational channel from quiet channel + * list and notes the channel in channel_cleared. set to 0 if no channel is cleared + */ + chanspec_t chanspec_cleared; + /** chanspec cleared used to be a uint32, add another to uint16 to maintain size */ + uint16 pad; +} wl_dfs_status_t; + +typedef struct { + uint32 state; /* noted by WL_DFS_CACSTATE_XX */ + uint32 duration; /* time spent in ms in state */ + chanspec_t chanspec; /* chanspec of this core */ + chanspec_t chanspec_last_cleared; /* chanspec last cleared for operation by scanning */ + uint16 sub_type; /* currently just the index of the core or the respective PLL */ + uint16 pad; +} wl_dfs_sub_status_t; + +#define WL_DFS_STATUS_ALL_VERSION (1) +typedef struct { + uint16 version; /* version field; current max version 1 */ + uint16 num_sub_status; + wl_dfs_sub_status_t dfs_sub_status[1]; /* struct array of length num_sub_status */ +} wl_dfs_status_all_t; + +#define WL_DFS_AP_MOVE_VERSION (1) + +struct wl_dfs_ap_move_status_v1 { + int16 dfs_status; /* DFS scan status */ + chanspec_t chanspec; /* New AP Chanspec */ + wl_dfs_status_t cac_status; /* CAC status */ +}; + +typedef struct wl_dfs_ap_move_status_v2 { + int8 version; /* version field; current max version 1 */ + int8 move_status; /* DFS move status */ + chanspec_t chanspec; /* New AP Chanspec */ + wl_dfs_status_all_t scan_status; /* status; see dfs_status_all for wl_dfs_status_all_t */ +} wl_dfs_ap_move_status_v2_t; + +#define WL_DFS_AP_MOVE_ABORT -1 /* Abort any dfs_ap_move in progress immediately */ +#define WL_DFS_AP_MOVE_STUNT -2 /* Stunt move but continue background CSA if in progress */ + +/** data structure used in 'radar_status' wl interface, which is use to query radar det status */ +typedef struct { + uint8 detected; + uint8 PAD[3]; + int32 count; + uint8 pretended; + uint8 PAD[3]; + uint32 radartype; + uint32 timenow; + uint32 timefromL; + int32 lp_csect_single; + int32 detected_pulse_index; + int32 nconsecq_pulses; + chanspec_t ch; + uint8 PAD[2]; + int32 pw[10]; + int32 intv[10]; + int32 fm[10]; +} wl_radar_status_t; + +#define NUM_PWRCTRL_RATES 12 + +typedef struct { + uint8 txpwr_band_max[NUM_PWRCTRL_RATES]; /**< User set target */ + uint8 txpwr_limit[NUM_PWRCTRL_RATES]; /**< reg and local power limit */ + uint8 txpwr_local_max; /**< local max according to the AP */ + uint8 txpwr_local_constraint; /**< local constraint according to the AP */ + uint8 txpwr_chan_reg_max; /**< Regulatory max for this channel */ + uint8 txpwr_target[2][NUM_PWRCTRL_RATES]; /**< Latest target for 2.4 and 5 Ghz */ + uint8 txpwr_est_Pout[2]; /**< Latest estimate for 2.4 and 5 Ghz */ + uint8 txpwr_opo[NUM_PWRCTRL_RATES]; /**< On G phy, OFDM power offset */ + uint8 txpwr_bphy_cck_max[NUM_PWRCTRL_RATES]; /**< Max CCK power for this band (SROM) */ + uint8 txpwr_bphy_ofdm_max; /**< Max OFDM power for this band (SROM) */ + uint8 txpwr_aphy_max[NUM_PWRCTRL_RATES]; /**< Max power for A band (SROM) */ + int8 txpwr_antgain[2]; /**< Ant gain for each band - from SROM */ + uint8 txpwr_est_Pout_gofdm; /**< Pwr estimate for 2.4 OFDM */ +} tx_power_legacy_t; + +#define WL_TX_POWER_RATES_LEGACY 45 +#define WL_TX_POWER_MCS20_FIRST 12 +#define WL_TX_POWER_MCS20_NUM 16 +#define WL_TX_POWER_MCS40_FIRST 28 +#define WL_TX_POWER_MCS40_NUM 17 + +typedef struct { + uint32 flags; + chanspec_t chanspec; /**< txpwr report for this channel */ + chanspec_t local_chanspec; /**< channel on which we are associated */ + uint8 local_max; /**< local max according to the AP */ + uint8 local_constraint; /**< local constraint according to the AP */ + int8 antgain[2]; /**< Ant gain for each band - from SROM */ + uint8 rf_cores; /**< count of RF Cores being reported */ + uint8 est_Pout[4]; /**< Latest tx power out estimate per RF + * chain without adjustment + */ + uint8 est_Pout_cck; /**< Latest CCK tx power out estimate */ + uint8 user_limit[WL_TX_POWER_RATES_LEGACY]; /**< User limit */ + uint8 reg_limit[WL_TX_POWER_RATES_LEGACY]; /**< Regulatory power limit */ + uint8 board_limit[WL_TX_POWER_RATES_LEGACY]; /**< Max power board can support (SROM) */ + uint8 target[WL_TX_POWER_RATES_LEGACY]; /**< Latest target power */ + uint8 PAD[2]; +} tx_power_legacy2_t; + +#define WL_NUM_2x2_ELEMENTS 4 +#define WL_NUM_3x3_ELEMENTS 6 +#define WL_NUM_4x4_ELEMENTS 10 + +typedef struct { + uint16 ver; /**< version of this struct */ + uint16 len; /**< length in bytes of this structure */ + uint32 flags; + chanspec_t chanspec; /**< txpwr report for this channel */ + chanspec_t local_chanspec; /**< channel on which we are associated */ + uint32 buflen; /**< ppr buffer length */ + uint8 pprbuf[1]; /**< Latest target power buffer */ +} wl_txppr_t; + +#define WL_TXPPR_VERSION 1 +#define WL_TXPPR_LENGTH (sizeof(wl_txppr_t)) +#define TX_POWER_T_VERSION 45 +/** number of ppr serialization buffers, it should be reg, board and target */ +#define WL_TXPPR_SER_BUF_NUM (3) +/* curpower ppr types */ +enum { + PPRTYPE_TARGETPOWER = 1, + PPRTYPE_BOARDLIMITS = 2, + PPRTYPE_REGLIMITS = 3 +}; + +typedef struct chanspec_txpwr_max { + chanspec_t chanspec; /**< chanspec */ + uint8 txpwr_max; /**< max txpwr in all the rates */ + uint8 padding; +} chanspec_txpwr_max_t; + +typedef struct wl_chanspec_txpwr_max { + uint16 ver; /**< version of this struct */ + uint16 len; /**< length in bytes of this structure */ + uint32 count; /**< number of elements of (chanspec, txpwr_max) pair */ + chanspec_txpwr_max_t txpwr[1]; /**< array of (chanspec, max_txpwr) pair */ +} wl_chanspec_txpwr_max_t; + +#define WL_CHANSPEC_TXPWR_MAX_VER 1 +#define WL_CHANSPEC_TXPWR_MAX_LEN (sizeof(wl_chanspec_txpwr_max_t)) + +typedef struct tx_inst_power { + uint8 txpwr_est_Pout[2]; /**< Latest estimate for 2.4 and 5 Ghz */ + uint8 txpwr_est_Pout_gofdm; /**< Pwr estimate for 2.4 OFDM */ +} tx_inst_power_t; + +#define WL_NUM_TXCHAIN_MAX 4 +typedef struct wl_txchain_pwr_offsets { + int8 offset[WL_NUM_TXCHAIN_MAX]; /**< quarter dBm signed offset for each chain */ +} wl_txchain_pwr_offsets_t; + +/** maximum channels returned by the get valid channels iovar */ +#define WL_NUMCHANNELS 64 +#define WL_NUMCHANNELS_MANY_CHAN 10 +#define WL_ITER_LIMIT_MANY_CHAN 5 + +#define WL_MIMO_PS_CFG_VERSION_1 1 + +typedef struct wl_mimops_cfg { + uint8 version; + /* active_chains: 0 for all, 1 for 1 chain. */ + uint8 active_chains; + /* static (0) or dynamic (1).or disabled (3) Mode applies only when active_chains = 0. */ + uint8 mode; + /* bandwidth = Full (0), 20M (1), 40M (2), 80M (3). */ + uint8 bandwidth; + uint8 applychangesafterlearning; + uint8 pad[3]; +} wl_mimops_cfg_t; + +/* This event is for tracing MIMO PS metrics snapshot calls. + * It is helpful to debug out-of-sync issue between + * ucode SHM values and FW snapshot calculation. + * It is part of the EVENT_LOG_TAG_MIMO_PS_TRACE. + */ +#define WL_MIMO_PS_METRICS_SNAPSHOT_TRACE_TYPE 0 +typedef struct wl_mimo_ps_metrics_snapshot_trace { + /* type field for this TLV: */ + uint16 type; + /* length field for this TLV */ + uint16 len; + uint32 idle_slotcnt_mimo; /* MIMO idle slotcnt raw SHM value */ + uint32 last_idle_slotcnt_mimo; /* stored value snapshot */ + uint32 idle_slotcnt_siso; /* SISO idle slotcnt raw SHM value */ + uint32 last_idle_slotcnt_siso; /* stored value snapshot */ + uint32 rx_time_mimo; /* Rx MIMO raw SHM value */ + uint32 last_rx_time_mimo; /* stored value snapshot */ + uint32 rx_time_siso; /* RX SISO raw SHM value */ + uint32 last_rx_time_siso; /* stored value snapshot */ + uint32 tx_time_1chain; /* Tx 1-chain raw SHM value */ + uint32 last_tx_time_1chain; /* stored value snapshot */ + uint32 tx_time_2chain; /* Tx 2-chain raw SHM value */ + uint32 last_tx_time_2chain; /* stored value snapshot */ + uint32 tx_time_3chain; /* Tx 3-chain raw SHM value */ + uint32 last_tx_time_3chain; /* stored value snapshot */ + uint16 reason; /* reason for snapshot call, see below */ + /* Does the call reset last values after delta calculation */ + uint16 reset_last; +} wl_mimo_ps_metrics_snapshot_trace_t; +/* reason codes for mimo ps metrics snapshot function calls */ +#define WL_MIMOPS_METRICS_SNAPSHOT_REPORT 1 +#define WL_MIMOPS_METRICS_SNAPSHOT_RXCHAIN_SET 2 +#define WL_MIMOPS_METRICS_SNAPSHOT_ARBI 3 +#define WL_MIMOPS_METRICS_SNAPSHOT_SLOTUPD 4 +#define WL_MIMOPS_METRICS_SNAPSHOT_PMBCNRX 5 +#define WL_MIMOPS_METRICS_SNAPSHOT_BMACINIT 6 +#define WL_MIMOPS_METRICS_SNAPSHOT_HT_COMPLETE 7 +#define WL_MIMOPS_METRICS_SNAPSHOT_OCL 8 + +#define WL_MIMO_PS_STATUS_VERSION_2 2 +typedef struct wl_mimo_ps_status { + uint8 version; + uint8 ap_cap; /* The associated AP's capability (BW, MIMO/SISO). */ + uint8 association_status; /* How we are associated to the AP (MIMO/SISO). */ + uint8 mimo_ps_state; /* mimo_ps_cfg states: [0-5]. See below for values */ + uint8 mrc_state; /* MRC state: NONE (0), ACTIVE(1) */ + uint8 bss_rxchain; /* bss rxchain bitmask */ + uint8 bss_txchain; /* bss txchain bitmask */ + uint8 bss_bw; /* bandwidth: Full (0), 20M (1), 40M (2), 80M (3), etc */ + uint16 hw_state; /* bitmask of hw state. See below for values */ + uint8 hw_rxchain; /* actual HW rxchain bitmask */ + uint8 hw_txchain; /* actual HW txchain bitmask */ + uint8 hw_bw; /* bandwidth: Full (0), 20M (1), 40M (2), 80M (3), etc */ + uint8 pm_bcnrx_state; /* actual state of ucode flag */ + uint8 basic_rates_present; /* internal flag to trigger siso bcmc rx */ + uint8 siso_bcmc_rx_state; /* actual state of ucode flag */ +} wl_mimo_ps_status_t; + +#define WL_MIMO_PS_STATUS_VERSION_1 1 +typedef struct wl_mimo_ps_status_v1 { + uint8 version; + uint8 ap_cap; /* The associated AP's capability (BW, MIMO/SISO). */ + uint8 association_status; /* How we are associated to the AP (MIMO/SISO). */ + uint8 mimo_ps_state; /* mimo_ps_cfg states: [0-5]. See below for values */ + uint8 mrc_state; /* MRC state: NONE (0), ACTIVE(1) */ + uint8 bss_rxchain; /* bss rxchain bitmask */ + uint8 bss_txchain; /* bss txchain bitmask */ + uint8 bss_bw; /* bandwidth: Full (0), 20M (1), 40M (2), 80M (3), etc */ + uint16 hw_state; /* bitmask of hw state. See below for values */ + uint8 hw_rxchain; /* actual HW rxchain bitmask */ + uint8 hw_txchain; /* actual HW txchain bitmask */ + uint8 hw_bw; /* bandwidth: Full (0), 20M (1), 40M (2), 80M (3), etc */ + uint8 pad[3]; +} wl_mimo_ps_status_v1_t; + +#define WL_MIMO_PS_STATUS_AP_CAP(ap_cap) (ap_cap & 0x0F) +#define WL_MIMO_PS_STATUS_AP_CAP_BW(ap_cap) (ap_cap >> 4) +#define WL_MIMO_PS_STATUS_ASSOC_BW_SHIFT 4 + +/* version 3: assoc status: low nibble is status enum, high other flags */ +#define WL_MIMO_PS_STATUS_VERSION_3 3 +#define WL_MIMO_PS_STATUS_ASSOC_STATUS_MASK 0x0F +#define WL_MIMO_PS_STATUS_ASSOC_STATUS_VHT_WITHOUT_OMN 0x80 + +/* mimo_ps_status: ap_cap/association status */ +enum { + WL_MIMO_PS_STATUS_ASSOC_NONE = 0, + WL_MIMO_PS_STATUS_ASSOC_SISO = 1, + WL_MIMO_PS_STATUS_ASSOC_MIMO = 2, + WL_MIMO_PS_STATUS_ASSOC_LEGACY = 3 +}; + +/* mimo_ps_status: mimo_ps_cfg states */ +enum { + WL_MIMO_PS_CFG_STATE_NONE = 0, + WL_MIMO_PS_CFG_STATE_INFORM_AP_INPROGRESS = 1, + WL_MIMO_PS_CFG_STATE_INFORM_AP_DONE = 2, + WL_MIMO_PS_CFG_STATE_LEARNING = 3, + WL_MIMO_PS_CFG_STATE_HW_CONFIGURE = 4, + WL_MIMO_PS_CFG_STATE_INFORM_AP_PENDING = 5 +}; + +/* mimo_ps_status: hw_state values */ +#define WL_MIMO_PS_STATUS_HW_STATE_NONE 0 +#define WL_MIMO_PS_STATUS_HW_STATE_LTECOEX (0x1 << 0) +#define WL_MIMO_PS_STATUS_HW_STATE_MIMOPS_BSS (0x1 << 1) +#define WL_MIMO_PS_STATUS_HW_STATE_AWDL_BSS (0x1 << 2) +#define WL_MIMO_PS_STATUS_HW_STATE_SCAN (0x1 << 3) +#define WL_MIMO_PS_STATUS_HW_STATE_TXPPR (0x1 << 4) +#define WL_MIMO_PS_STATUS_HW_STATE_PWRTHOTTLE (0x1 << 5) +#define WL_MIMO_PS_STATUS_HW_STATE_TMPSENSE (0x1 << 6) +#define WL_MIMO_PS_STATUS_HW_STATE_IOVAR (0x1 << 7) +#define WL_MIMO_PS_STATUS_HW_STATE_AP_BSS (0x1 << 8) + +/* mimo_ps_status: mrc states */ +#define WL_MIMO_PS_STATUS_MRC_NONE 0 +#define WL_MIMO_PS_STATUS_MRC_ACTIVE 1 + +/* mimo_ps_status: core flag states for single-core beacon and siso-bcmc rx */ +#define WL_MIMO_PS_STATUS_MHF_FLAG_NONE 0 +#define WL_MIMO_PS_STATUS_MHF_FLAG_ACTIVE 1 +#define WL_MIMO_PS_STATUS_MHF_FLAG_COREDOWN 2 +#define WL_MIMO_PS_STATUS_MHF_FLAG_INVALID 3 + +/* Type values for the REASON */ +#define WL_MIMO_PS_PS_LEARNING_ABORTED (1 << 0) +#define WL_MIMO_PS_PS_LEARNING_COMPLETED (1 << 1) +#define WL_MIMO_PS_PS_LEARNING_ONGOING (1 << 2) + +typedef struct wl_mimo_ps_learning_event_data { + uint32 startTimeStamp; + uint32 endTimeStamp; + uint16 reason; + struct ether_addr BSSID; + uint32 totalSISO_below_rssi_threshold; + uint32 totalMIMO_below_rssi_threshold; + uint32 totalSISO_above_rssi_threshold; + uint32 totalMIMO_above_rssi_threshold; +} wl_mimo_ps_learning_event_data_t; + +#define WL_MIMO_PS_PS_LEARNING_CFG_ABORT (1 << 0) +#define WL_MIMO_PS_PS_LEARNING_CFG_STATUS (1 << 1) +#define WL_MIMO_PS_PS_LEARNING_CFG_CONFIG (1 << 2) +#define WL_MIMO_PS_PS_LEARNING_CFG_MASK (0x7) + +#define WL_MIMO_PS_PS_LEARNING_CFG_V1 1 + +typedef struct wl_mimops_learning_cfg { + /* flag: bit 0 for abort */ + /* flag: bit 1 for status */ + /* flag: bit 2 for configuring no of packets and rssi */ + uint8 flag; + /* mimo ps learning version, compatible version is 0 */ + uint8 version; + /* if version is 0 or rssi is 0, ignored */ + int8 learning_rssi_threshold; + uint8 reserved; + uint32 no_of_packets_for_learning; + wl_mimo_ps_learning_event_data_t mimops_learning_data; +} wl_mimops_learning_cfg_t; + +#define WL_OCL_STATUS_VERSION 1 +typedef struct ocl_status_info { + uint8 version; + uint8 len; + uint16 fw_status; /* Bits representing FW disable reasons */ + uint8 hw_status; /* Bits for actual HW config and SISO/MIMO coremask */ + uint8 coremask; /* The ocl core mask (indicating listening core) */ +} ocl_status_info_t; + +/* MWS OCL map */ +#define WL_MWS_OCL_OVERRIDE_VERSION 1 +typedef struct wl_mws_ocl_override { + uint16 version; /* Structure version */ + uint16 bitmap_2g; /* bitmap for 2.4G channels bits 1-13 */ + uint16 bitmap_5g_lo; /* bitmap for 5G low channels by 2: + *34-48, 52-56, 60-64, 100-102 + */ + uint16 bitmap_5g_mid; /* bitmap for 5G mid channels by 2: + * 104, 108-112, 116-120, 124-128, + * 132-136, 140, 149-151 + */ + uint16 bitmap_5g_high; /* bitmap for 5G high channels by 2 + * 153, 157-161, 165 + */ +} wl_mws_ocl_override_t; + +/* Bits for fw_status */ +#define OCL_DISABLED_HOST 0x01 /* Host has disabled through ocl_enable */ +#define OCL_DISABLED_RSSI 0x02 /* Disabled because of ocl_rssi_threshold */ +#define OCL_DISABLED_LTEC 0x04 /* Disabled due to LTE Coex activity */ +#define OCL_DISABLED_SISO 0x08 /* Disabled while in SISO mode */ +#define OCL_DISABLED_CAL 0x10 /* Disabled during active calibration */ +#define OCL_DISABLED_CHANSWITCH 0x20 /* Disabled during active channel switch */ +#define OCL_DISABLED_ASPEND 0x40 /* Disabled due to assoc pending */ + +/* Bits for hw_status */ +#define OCL_HWCFG 0x01 /* State of OCL config bit in phy HW */ +#define OCL_HWMIMO 0x02 /* Set if current coremask is > 1 bit */ +#define OCL_COREDOWN 0x80 /* Set if core is currently down */ + +#define WL_OPS_CFG_VERSION_1 1 +/* Common IOVAR struct */ +typedef struct wl_ops_cfg_v1 { + uint16 version; + uint16 len; /* total length includes fixed fields and variable data[] */ + uint16 subcmd_id; /* subcommand id */ + uint16 padding; /* reserved / padding for 4 byte align */ + uint8 data[]; /* subcommand data; could be empty */ +} wl_ops_cfg_v1_t; + +/* subcommands ids */ +enum { + WL_OPS_CFG_SUBCMD_ENABLE = 0, /* OPS enable/disable mybss and obss + * for nav and plcp options + */ + WL_OPS_CFG_SUBCMD_MAX_SLEEP_DUR = 1, /* Max sleep duration used for OPS */ + WL_OPS_CFG_SUBCMD_RESET_STATS = 2 /* Reset stats part of ops_status + * on both slices + */ +}; + +#define WL_OPS_CFG_MASK 0xffff +#define WL_OPS_CFG_CAP_MASK 0xffff0000 +#define WL_OPS_CFG_CAP_SHIFT 16 /* Shift bits to locate the OPS CAP */ +#define WL_OPS_MAX_SLEEP_DUR 12500 /* max ops duration in us */ +#define WL_OPS_MINOF_MAX_SLEEP_DUR 512 /* minof max ops duration in us */ +#define WL_OPS_SUPPORTED_CFG (WL_OPS_MYBSS_PLCP_DUR | WL_OPS_MYBSS_NAV_DUR \ + | WL_OPS_OBSS_PLCP_DUR | WL_OPS_OBSS_NAV_DUR) +#define WL_OPS_DEFAULT_CFG WL_OPS_SUPPORTED_CFG + +/* WL_OPS_CFG_SUBCMD_ENABLE */ +typedef struct wl_ops_cfg_enable { + uint32 bits; /* selectively enable ops for mybss and obss */ +} wl_ops_cfg_enable_t; +/* Bits for WL_OPS_CFG_SUBCMD_ENABLE Parameter */ +#define WL_OPS_MYBSS_PLCP_DUR 0x1 /* OPS based on mybss 11b & 11n mixed HT frames + * PLCP header duration + */ +#define WL_OPS_MYBSS_NAV_DUR 0x2 /* OPS based on mybss RTS-CTS duration */ +#define WL_OPS_OBSS_PLCP_DUR 0x4 /* OPS based on obss 11b & 11n mixed HT frames + * PLCP header duration + */ +#define WL_OPS_OBSS_NAV_DUR 0x8 /* OPS based on obss RTS-CTS duration */ + +/* WL_OPS_CFG_SUBCMD_MAX_SLEEP_DUR */ +typedef struct wl_ops_cfg_max_sleep_dur { + uint32 val; /* maximum sleep duration (us) used for OPS */ +} wl_ops_cfg_max_sleep_dur_t; + +/* WL_OPS_CFG_SUBCMD_RESET_STATS */ +typedef struct wl_ops_cfg_reset_stats { + uint32 val; /* bitmap of slices, 0 means all slices */ +} wl_ops_cfg_reset_stats_t; + +#define WL_OPS_STATUS_VERSION_1 1 +#define OPS_DUR_HIST_BINS 5 /* number of bins used, 0-1, 1-2, 2-4, 4-8, >8 msec */ +typedef struct wl_ops_status_v1 { + uint16 version; + uint16 len; /* Total length including all fixed fields */ + uint8 slice_index; /* Slice for which status is reported */ + uint8 disable_obss; /* indicate if obss cfg is disabled */ + uint8 pad[2]; /* 4-byte alignment */ + uint32 disable_reasons; /* FW disable reasons */ + uint32 disable_duration; /* ops disable time(ms) due to disable reasons */ + uint32 applied_ops_config; /* currently applied ops config */ + uint32 partial_ops_dur; /* Total time (in usec) of partial ops duration */ + uint32 full_ops_dur; /* Total time (in usec) of full ops duration */ + uint32 count_dur_hist[OPS_DUR_HIST_BINS]; /* ops occurrence histogram */ + uint32 nav_cnt; /* number of times ops triggered based NAV duration */ + uint32 plcp_cnt; /* number of times ops triggered based PLCP duration */ + uint32 mybss_cnt; /* number of times mybss ops trigger */ + uint32 obss_cnt; /* number of times obss ops trigger */ + uint32 miss_dur_cnt; /* number of times ops couldn't happen + * due to insufficient duration + */ + uint32 miss_premt_cnt; /* number of times ops couldn't happen due + * to not meeting Phy preemption thresh + */ + uint32 max_dur_cnt; /* number of times ops did not trigger due to + * frames exceeding max sleep duration + */ + uint32 wake_cnt; /* number of ops miss due to wake reason */ + uint32 bcn_wait_cnt; /* number of ops miss due to waiting for bcn */ +} wl_ops_status_v1_t; +/* Bits for disable_reasons */ +#define OPS_DISABLED_HOST 0x01 /* Host has disabled through ops_cfg */ +#define OPS_DISABLED_UNASSOC 0x02 /* Disabled because the slice is in unassociated state */ +#define OPS_DISABLED_SCAN 0x04 /* Disabled because the slice is in scan state */ +#define OPS_DISABLED_BCN_MISS 0x08 /* Disabled because beacon missed for a duration */ + +#define WL_PSBW_CFG_VERSION_1 1 +/* Common IOVAR struct */ +typedef struct wl_psbw_cfg_v1 { + uint16 version; + uint16 len; /* total length includes fixed fields and variable data[] */ + uint16 subcmd_id; /* subcommand id */ + uint16 pad; /* reserved / padding for 4 byte align */ + uint8 data[]; /* subcommand data */ +} wl_psbw_cfg_v1_t; + +/* subcommands ids */ +enum { + /* PSBW enable/disable */ + WL_PSBW_CFG_SUBCMD_ENABLE = 0, + /* override psbw disable requests */ + WL_PSBW_CFG_SUBCMD_OVERRIDE_DISABLE_MASK = 1, + /* Reset stats part of psbw status */ + WL_PSBW_CFG_SUBCMD_RESET_STATS = 2 +}; + +#define WL_PSBW_OVERRIDE_DISA_CFG_MASK 0x0000ffff +#define WL_PSBW_OVERRIDE_DISA_CAP_MASK 0xffff0000 +#define WL_PSBW_OVERRIDE_DISA_CAP_SHIFT 16 /* shift bits for cap */ + +/* WL_PSBW_CFG_SUBCMD_ENABLE */ +typedef struct wl_psbw_cfg_enable { + bool enable; /* enable or disable */ +} wl_psbw_cfg_enable_t; + +/* WL_PSBW_CFG_SUBCMD_OVERRIDE_DISABLE_MASK */ +typedef struct wl_psbw_cfg_override_disable_mask { + uint32 mask; /* disable requests to override, cap and current cfg */ +} wl_psbw_cfg_override_disable_mask_t; + +/* WL_PSBW_CFG_SUBCMD_RESET_STATS */ +typedef struct wl_psbw_cfg_reset_stats { + uint32 val; /* infra interface index, 0 */ +} wl_psbw_cfg_reset_stats_t; + +#define WL_PSBW_STATUS_VERSION_1 1 +typedef struct wl_psbw_status_v1 { + uint16 version; + uint16 len; /* total length including all fixed fields */ + uint8 curr_slice_index; /* current slice index of the interface */ + uint8 associated; /* interface associatd */ + chanspec_t chspec; /* radio chspec */ + uint32 state; /* psbw state */ + uint32 disable_reasons; /* FW disable reasons */ + uint32 slice_enable_dur; /* time(ms) psbw remains enabled on this slice */ + uint32 total_enable_dur; /* time(ms) psbw remains enabled total */ + uint32 enter_cnt; /* total cnt entering PSBW active */ + uint32 exit_cnt; /* total cnt exiting PSBW active */ + uint32 exit_imd_cnt; /* total cnt imd exit when waited N tbtts */ + uint32 enter_skip_cnt; /* total cnt entering PSBW active skipped */ +} wl_psbw_status_v1_t; + +/* Bit for state */ +#define PSBW_ACTIVE 0x1 /* active 20MHz */ +#define PSBW_TTTT_PEND 0x2 /* waiting for TTTT intr */ +#define PSBW_WAIT_ENTER 0x4 /* in wait period before entering */ +#define PSBW_CAL_DONE 0x8 /* 20M channel cal done */ + +/* Bits for disable_reasons */ +#define WL_PSBW_DISA_HOST 0x00000001 /* Host has disabled through psbw_cfg */ +#define WL_PSBW_DISA_AP20M 0x00000002 /* AP is operating on 20 MHz */ +#define WL_PSBW_DISA_SLOTTED_BSS 0x00000004 /* AWDL or NAN active */ +#define WL_PSBW_DISA_NOT_PMFAST 0x00000008 /* Not PM_FAST */ +#define WL_PSBW_DISA_BASICRATESET 0x00000010 /* BasicRateSet is empty */ +#define WL_PSBW_DISA_NOT_D3 0x00000020 /* PCIe not in D3 */ +#define WL_PSBW_DISA_CSA 0x00000040 /* CSA IE is present */ +#define WL_PSBW_DISA_ASSOC 0x00000080 /* assoc state is active/or unassoc */ +#define WL_PSBW_DISA_SCAN 0x00000100 /* scan state is active */ +#define WL_PSBW_DISA_CAL 0x00000200 /* cal pending or active */ +/* following are not part of disable reasons */ +#define WL_PSBW_EXIT_PM 0x00001000 /* Out of PM */ +#define WL_PSBW_EXIT_TIM 0x00002000 /* unicast TIM bit present */ +#define WL_PSBW_EXIT_DATA 0x00004000 /* Data for transmission */ +#define WL_PSBW_EXIT_MGMTDATA 0x00008000 /* management frame for transmission */ +#define WL_PSBW_EXIT_BW_UPD 0x00010000 /* BW being updated */ +#define WL_PSBW_DISA_NONE 0x80000000 /* reserved for internal use only */ + +/* + * Join preference iovar value is an array of tuples. Each tuple has a one-byte type, + * a one-byte length, and a variable length value. RSSI type tuple must be present + * in the array. + * + * Types are defined in "join preference types" section. + * + * Length is the value size in octets. It is reserved for WL_JOIN_PREF_WPA type tuple + * and must be set to zero. + * + * Values are defined below. + * + * 1. RSSI - 2 octets + * offset 0: reserved + * offset 1: reserved + * + * 2. WPA - 2 + 12 * n octets (n is # tuples defined below) + * offset 0: reserved + * offset 1: # of tuples + * offset 2: tuple 1 + * offset 14: tuple 2 + * ... + * offset 2 + 12 * (n - 1) octets: tuple n + * + * struct wpa_cfg_tuple { + * uint8 akm[DOT11_OUI_LEN+1]; akm suite + * uint8 ucipher[DOT11_OUI_LEN+1]; unicast cipher suite + * uint8 mcipher[DOT11_OUI_LEN+1]; multicast cipher suite + * }; + * + * multicast cipher suite can be specified as a specific cipher suite or WL_WPA_ACP_MCS_ANY. + * + * 3. BAND - 2 octets + * offset 0: reserved + * offset 1: see "band preference" and "band types" + * + * 4. BAND RSSI - 2 octets + * offset 0: band types + * offset 1: +ve RSSI boost value in dB + */ + +struct tsinfo_arg { + uint8 octets[3]; +}; + +#define RATE_CCK_1MBPS 0 +#define RATE_CCK_2MBPS 1 +#define RATE_CCK_5_5MBPS 2 +#define RATE_CCK_11MBPS 3 + +#define RATE_LEGACY_OFDM_6MBPS 0 +#define RATE_LEGACY_OFDM_9MBPS 1 +#define RATE_LEGACY_OFDM_12MBPS 2 +#define RATE_LEGACY_OFDM_18MBPS 3 +#define RATE_LEGACY_OFDM_24MBPS 4 +#define RATE_LEGACY_OFDM_36MBPS 5 +#define RATE_LEGACY_OFDM_48MBPS 6 +#define RATE_LEGACY_OFDM_54MBPS 7 + +#define WL_BSSTRANS_RSSI_RATE_MAP_VERSION 1 +#define WL_BSSTRANS_RSSI_RATE_MAP_VERSION_V1 1 +#define WL_BSSTRANS_RSSI_RATE_MAP_VERSION_V2 2 + +typedef struct wl_bsstrans_rssi { + int8 rssi_2g; /**< RSSI in dbm for 2.4 G */ + int8 rssi_5g; /**< RSSI in dbm for 5G, unused for cck */ +} wl_bsstrans_rssi_t; + +#define RSSI_RATE_MAP_MAX_STREAMS 4 /**< max streams supported */ + +/** RSSI to rate mapping, all 20Mhz, no SGI */ +typedef struct wl_bsstrans_rssi_rate_map_v2 { + uint16 ver; + uint16 len; /**< length of entire structure */ + wl_bsstrans_rssi_t cck[WL_NUM_RATES_CCK]; /**< 2.4G only */ + wl_bsstrans_rssi_t ofdm[WL_NUM_RATES_OFDM]; /**< 6 to 54mbps */ + wl_bsstrans_rssi_t phy_n[RSSI_RATE_MAP_MAX_STREAMS][WL_NUM_RATES_MCS_1STREAM]; /* MCS0-7 */ + wl_bsstrans_rssi_t phy_ac[RSSI_RATE_MAP_MAX_STREAMS][WL_NUM_RATES_VHT_ALL]; /**< MCS0-11 */ + wl_bsstrans_rssi_t phy_ax[RSSI_RATE_MAP_MAX_STREAMS][WL_NUM_RATES_HE]; /**< MCS0-11 */ +} wl_bsstrans_rssi_rate_map_v2_t; + +/** RSSI to rate mapping, all 20Mhz, no SGI */ +typedef struct wl_bsstrans_rssi_rate_map_v1 { + uint16 ver; + uint16 len; /**< length of entire structure */ + wl_bsstrans_rssi_t cck[WL_NUM_RATES_CCK]; /**< 2.4G only */ + wl_bsstrans_rssi_t ofdm[WL_NUM_RATES_OFDM]; /**< 6 to 54mbps */ + wl_bsstrans_rssi_t phy_n[RSSI_RATE_MAP_MAX_STREAMS][WL_NUM_RATES_MCS_1STREAM]; /* MCS0-7 */ + wl_bsstrans_rssi_t phy_ac[RSSI_RATE_MAP_MAX_STREAMS][WL_NUM_RATES_VHT]; /**< MCS0-9 */ +} wl_bsstrans_rssi_rate_map_v1_t; + +/** RSSI to rate mapping, all 20Mhz, no SGI */ +typedef struct wl_bsstrans_rssi_rate_map { + uint16 ver; + uint16 len; /**< length of entire structure */ + wl_bsstrans_rssi_t cck[WL_NUM_RATES_CCK]; /**< 2.4G only */ + wl_bsstrans_rssi_t ofdm[WL_NUM_RATES_OFDM]; /**< 6 to 54mbps */ + wl_bsstrans_rssi_t phy_n[RSSI_RATE_MAP_MAX_STREAMS][WL_NUM_RATES_MCS_1STREAM]; /* MCS0-7 */ + wl_bsstrans_rssi_t phy_ac[RSSI_RATE_MAP_MAX_STREAMS][WL_NUM_RATES_VHT]; /**< MCS0-9 */ +} wl_bsstrans_rssi_rate_map_t; + +#define WL_BSSTRANS_ROAMTHROTTLE_VERSION 1 + +/** Configure number of scans allowed per throttle period */ +typedef struct wl_bsstrans_roamthrottle { + uint16 ver; + uint16 period; + uint16 scans_allowed; +} wl_bsstrans_roamthrottle_t; + +#define NFIFO 6 /**< # tx/rx fifopairs */ + +#if defined(BCM_AQM_DMA_DESC) && !defined(BCM_AQM_DMA_DESC_DISABLED) +#if defined(WL_MU_TX) && !defined(WL_MU_TX_DISABLED) +#define NFIFO_EXT 32 /* 6 traditional FIFOs + 2 rsvd + 24 MU FIFOs */ +#else +#define NFIFO_EXT 10 /* 4EDCA + 4 TWT + 1 Mcast/Bcast + 1 Spare */ +#endif // endif +#elif defined(WL11AX_TRIGGERQ) && !defined(WL11AX_TRIGGERQ_DISABLED) +#define NFIFO_EXT 10 +#else +#define NFIFO_EXT NFIFO +#endif /* BCM_AQM_DMA_DESC && !BCM_AQM_DMA_DESC_DISABLED */ + +/* Reinit reason codes */ +enum { + WL_REINIT_RC_NONE = 0, + WL_REINIT_RC_PS_SYNC = 1, + WL_REINIT_RC_PSM_WD = 2, + WL_REINIT_RC_MAC_WAKE = 3, + WL_REINIT_RC_MAC_SUSPEND = 4, + WL_REINIT_RC_MAC_SPIN_WAIT = 5, + WL_REINIT_RC_AXI_BUS_ERROR = 6, + WL_REINIT_RC_DEVICE_REMOVED = 7, + WL_REINIT_RC_PCIE_FATAL_ERROR = 8, + WL_REINIT_RC_OL_FW_TRAP = 9, + WL_REINIT_RC_FIFO_ERR = 10, + WL_REINIT_RC_INV_TX_STATUS = 11, + WL_REINIT_RC_MQ_ERROR = 12, + WL_REINIT_RC_PHYTXERR_THRESH = 13, + WL_REINIT_RC_USER_FORCED = 14, + WL_REINIT_RC_FULL_RESET = 15, + WL_REINIT_RC_AP_BEACON = 16, + WL_REINIT_RC_PM_EXCESSED = 17, + WL_REINIT_RC_NO_CLK = 18, + WL_REINIT_RC_SW_ASSERT = 19, + WL_REINIT_RC_PSM_JMP0 = 20, + WL_REINIT_RC_PSM_RUN = 21, + WL_REINIT_RC_ENABLE_MAC = 22, + WL_REINIT_RC_SCAN_TIMEOUT = 23, + WL_REINIT_RC_JOIN_TIMEOUT = 24, + /* Below error codes are generated during D3 exit validation */ + WL_REINIT_RC_LINK_NOT_ACTIVE = 25, + WL_REINIT_RC_PCI_CFG_RD_FAIL = 26, + WL_REINIT_RC_INV_VEN_ID = 27, + WL_REINIT_RC_INV_DEV_ID = 28, + WL_REINIT_RC_INV_BAR0 = 29, + WL_REINIT_RC_INV_BAR2 = 30, + WL_REINIT_RC_AER_UC_FATAL = 31, + WL_REINIT_RC_AER_UC_NON_FATAL = 32, + WL_REINIT_RC_AER_CORR = 33, + WL_REINIT_RC_AER_DEV_STS = 34, + WL_REINIT_RC_PCIe_STS = 35, + WL_REINIT_RC_MMIO_RD_FAIL = 36, + WL_REINIT_RC_MMIO_RD_INVAL = 37, + WL_REINIT_RC_MMIO_ARM_MEM_RD_FAIL = 38, + WL_REINIT_RC_MMIO_ARM_MEM_INVAL = 39, + WL_REINIT_RC_SROM_LOAD_FAILED = 40, + WL_REINIT_RC_PHY_CRASH = 41, + WL_REINIT_TX_STALL = 42, + WL_REINIT_RC_TX_FLOW_CONTROL_BLOCKED = 43, + WL_REINIT_RC_RX_HC_FAIL = 44, + WL_REINIT_RC_RX_DMA_STALL = 45, + WL_REINIT_UTRACE_BUF_OVERLAP_SR = 46, + WL_REINIT_UTRACE_TPL_OUT_BOUNDS = 47, + WL_REINIT_UTRACE_TPL_OSET_STRT0 = 48, + WL_REINIT_RC_PHYTXERR = 49, + WL_REINIT_RC_PSM_FATAL_SUSP = 50, + WL_REINIT_RC_TX_FIFO_SUSP = 51, + WL_REINIT_RC_MAC_ENABLE = 52, + WL_REINIT_RC_SCAN_STALLED = 53, + WL_REINIT_RC_PHY_HC = 54, + WL_REINIT_RC_LAST /* This must be the last entry */ +}; + +#define NREINITREASONCOUNT 8 + +#define REINITRSNIDX(_x) (((_x) < WL_REINIT_RC_LAST) ? (_x) : 0) + +#define WL_CNT_T_VERSION 30 /**< current version of wl_cnt_t struct */ +#define WL_CNT_VERSION_6 6 +#define WL_CNT_VERSION_7 7 +#define WL_CNT_VERSION_11 11 +#define WL_CNT_VERSION_XTLV 30 + +#define WL_COUNTERS_IOV_VERSION_1 1 +#define WL_SUBCNTR_IOV_VER WL_COUNTERS_IOV_VERSION_1 +/* First two uint16 are version and lenght fields. So offset of the first counter will be 4 */ +#define FIRST_COUNTER_OFFSET 0x04 + +#define WLC_WITH_XTLV_CNT + +/* Number of xtlv info as required to calculate subcounter offsets */ +#define WL_CNT_XTLV_ID_NUM 10 +#define WL_TLV_IOV_VER 1 + +/** + * tlv IDs uniquely identifies counter component + * packed into wl_cmd_t container + */ +enum wl_cnt_xtlv_id { + WL_CNT_XTLV_SLICE_IDX = 0x1, /**< Slice index */ + WL_CNT_XTLV_WLC = 0x100, /**< WLC layer counters */ + WL_CNT_XTLV_WLC_RINIT_RSN = 0x101, /**< WLC layer reinitreason extension */ + WL_CNT_XTLV_WLC_HE = 0x102, /* he counters */ + WL_CNT_XTLV_WLC_SECVLN = 0x103, /* security vulnerabilities counters */ + WL_CNT_XTLV_CNTV_LE10_UCODE = 0x200, /**< wl counter ver < 11 UCODE MACSTAT */ + WL_CNT_XTLV_LT40_UCODE_V1 = 0x300, /**< corerev < 40 UCODE MACSTAT */ + WL_CNT_XTLV_GE40_UCODE_V1 = 0x400, /**< corerev >= 40 UCODE MACSTAT */ + WL_CNT_XTLV_GE64_UCODEX_V1 = 0x800, /* corerev >= 64 UCODEX MACSTAT */ + WL_CNT_XTLV_GE80_UCODE_V1 = 0x900, /* corerev >= 80 UCODEX MACSTAT */ + WL_CNT_XTLV_GE80_TXFUNFL_UCODE_V1 = 0x1000 /* corerev >= 80 UCODEX MACSTAT */ +}; + +/* tlv IDs uniquely identifies periodic state component */ +enum wl_periodic_slice_state_xtlv_id { + WL_STATE_COMPACT_COUNTERS = 0x1, + WL_STATE_TXBF_COUNTERS = 0x2, + WL_STATE_COMPACT_HE_COUNTERS = 0x3 +}; + +/* tlv IDs uniquely identifies periodic state component */ +enum wl_periodic_if_state_xtlv_id { + WL_STATE_IF_COMPACT_STATE = 0x1, + WL_STATE_IF_ADPS_STATE = 0x02 +}; + +#define TDMTX_CNT_VERSION_V1 1 +#define TDMTX_CNT_VERSION_V2 2 + +/* structure holding tdm counters that interface to iovar */ +typedef struct tdmtx_cnt_v1 { + uint16 ver; + uint16 length; /* length of this structure */ + uint16 wlc_idx; /* index for wlc */ + uint16 enabled; /* tdmtx is enabled on slice */ + uint32 tdmtx_txa_on; /* TXA on requests */ + uint32 tdmtx_txa_tmcnt; /* Total number of TXA timeout */ + uint32 tdmtx_por_on; /* TXA POR requests */ + uint32 tdmtx_txpuen; /* Path enable requests */ + uint32 tdmtx_txpudis; /* Total number of times Tx path is muted on the slice */ + uint32 tdmtx_txpri_on; /* Total number of times Tx priority was obtained by the slice */ + uint32 tdmtx_txdefer; /* Total number of times Tx was deferred on the slice */ + uint32 tdmtx_txmute; /* Total number of times active Tx muted on the slice */ + uint32 tdmtx_actpwrboff; /* Total number of times TX power is backed off by the slice */ + uint32 tdmtx_txa_dur; /* Total time txa on */ + uint32 tdmtx_txpri_dur; /* Total time TXPri */ + uint32 tdmtx_txdefer_dur; /* Total time txdefer */ + /* TDMTX input fields */ + uint32 tdmtx_txpri; + uint32 tdmtx_defer; + uint32 tdmtx_threshold; + uint32 tdmtx_rssi_threshold; + uint32 tdmtx_txpwrboff; + uint32 tdmtx_txpwrboff_dt; +} tdmtx_cnt_v1_t; + +typedef struct { + uint16 ver; + uint16 length; /* length of the data portion */ + uint16 cnt; + uint16 pad; /* pad to align to 32 bit */ + uint8 data[]; /* array of tdmtx_cnt_v1_t */ +} tdmtx_status_t; + +/* structure holding counters that match exactly shm field sizes */ +typedef struct tdmtx_cnt_shm_v1 { + uint16 tdmtx_txa_on; /* TXA on requests */ + uint16 tdmtx_tmcnt; /* TXA on requests */ + uint16 tdmtx_por_on; /* TXA POR requests */ + uint16 tdmtx_txpuen; /* Path enable requests */ + uint16 tdmtx_txpudis; /* Total number of times Tx path is muted on the slice */ + uint16 tdmtx_txpri_on; /* Total number of times Tx priority was obtained by the slice */ + uint16 tdmtx_txdefer; /* Total number of times Tx was defered by the slice */ + uint16 tdmtx_txmute; /* Total number of times active Tx muted on the slice */ + uint16 tdmtx_actpwrboff; /* Total number of times TX power is backed off by the slice */ + uint16 tdmtx_txa_dur_l; /* Total time (low 16 bits) txa on */ + uint16 tdmtx_txa_dur_h; /* Total time (low 16 bits) txa on */ + uint16 tdmtx_txpri_dur_l; /* Total time (low 16 bits) TXPri */ + uint16 tdmtx_txpri_dur_h; /* Total time (high 16 bits) TXPri */ + uint16 tdmtx_txdefer_dur_l; /* Total time (low 16 bits) txdefer */ + uint16 tdmtx_txdefer_dur_h; /* Total time (high 16 bits) txdefer */ +} tdmtx_cnt_shm_v1_t; + +/* structure holding tdm counters that interface to iovar for version 2 */ +typedef struct tdmtx_cnt_v2 { + uint16 ver; + uint16 length; /* length of this structure */ + uint16 wlc_idx; /* index for wlc */ + uint16 enabled; /* tdmtx is enabled on slice */ + uint32 tdmtx_txa_on; /* TXA on requests */ + uint32 tdmtx_txa_tmcnt; /* Total number of TXA timeout */ + uint32 tdmtx_porhi_on; /* TXA PORHI requests */ + uint32 tdmtx_porlo_on; /* TXA PORLO requests */ + uint32 tdmtx_txpuen; /* Path enable requests */ + uint32 tdmtx_txpudis; /* Total number of times Tx path is muted on the slice */ + uint32 tdmtx_txpri_on; /* Total number of times Tx priority was obtained by the slice */ + uint32 tdmtx_txdefer; /* Total number of times Tx was deferred on the slice */ + uint32 tdmtx_txmute; /* Total number of times active Tx muted on the slice */ + uint32 tdmtx_actpwrboff; /* Total number of times TX power is backed off by the slice */ + uint32 tdmtx_txa_dur; /* Total time txa on */ + uint32 tdmtx_txpri_dur; /* Total time TXPri */ + uint32 tdmtx_txdefer_dur; /* Total time txdefer */ + /* TDMTX input fields */ + uint32 tdmtx_txpri; + uint32 tdmtx_defer; + uint32 tdmtx_threshold; + uint32 tdmtx_rssi_threshold; + uint32 tdmtx_txpwrboff; + uint32 tdmtx_txpwrboff_dt; +} tdmtx_cnt_v2_t; + +/* structure holding counters that match exactly shm field sizes */ +typedef struct tdmtx_cnt_shm_v2 { + uint16 tdmtx_txa_on; /* TXA on requests */ + uint16 tdmtx_tmcnt; /* TXA on requests */ + uint16 tdmtx_porhi_on; /* TXA PORHI requests */ + uint16 tdmtx_porlo_on; /* TXA PORLO requests */ + uint16 tdmtx_txpuen; /* Path enable requests */ + uint16 tdmtx_txpudis; /* Total number of times Tx path is muted on the slice */ + uint16 tdmtx_txpri_on; /* Total number of times Tx priority was obtained by the slice */ + uint16 tdmtx_txdefer; /* Total number of times Tx was defered by the slice */ + uint16 tdmtx_txmute; /* Total number of times active Tx muted on the slice */ + uint16 tdmtx_actpwrboff; /* Total number of times TX power is backed off by the slice */ + uint16 tdmtx_txa_dur_l; /* Total time (low 16 bits) txa on */ + uint16 tdmtx_txa_dur_h; /* Total time (low 16 bits) txa on */ + uint16 tdmtx_txpri_dur_l; /* Total time (low 16 bits) TXPri */ + uint16 tdmtx_txpri_dur_h; /* Total time (high 16 bits) TXPri */ + uint16 tdmtx_txdefer_dur_l; /* Total time (low 16 bits) txdefer */ + uint16 tdmtx_txdefer_dur_h; /* Total time (high 16 bits) txdefer */ +} tdmtx_cnt_shm_v2_t; + +typedef struct wl_tdmtx_ioc { + uint16 id; /* ID of the sub-command */ + uint16 len; /* total length of all data[] */ + uint8 data[]; /* var len payload */ +} wl_tdmtx_ioc_t; + +/* + * iovar subcommand ids + */ +enum { + IOV_TDMTX_ENB = 1, + IOV_TDMTX_STATUS = 2, + IOV_TDMTX_TXPRI = 3, + IOV_TDMTX_DEFER = 4, + IOV_TDMTX_TXA = 5, + IOV_TDMTX_CFG = 6, + IOV_TDMTX_LAST +}; + +/** + * The number of variables in wl macstat cnt struct. + * (wl_cnt_ge40mcst_v1_t, wl_cnt_lt40mcst_v1_t, wl_cnt_v_le10_mcst_t) + */ +#define WL_CNT_MCST_VAR_NUM 64 +/* sizeof(wl_cnt_ge40mcst_v1_t), sizeof(wl_cnt_lt40mcst_v1_t), and sizeof(wl_cnt_v_le10_mcst_t) */ +#define WL_CNT_MCST_STRUCT_SZ ((uint32)sizeof(uint32) * WL_CNT_MCST_VAR_NUM) +#define WL_CNT_REV80_MCST_STRUCT_SZ ((uint32)sizeof(wl_cnt_ge80mcst_v1_t)) +#define WL_CNT_REV80_MCST_TXFUNFlW_STRUCT_FIXED_SZ \ + ((uint32)OFFSETOF(wl_cnt_ge80_txfunfl_v1_t, txfunfl)) +#define WL_CNT_REV80_MCST_TXFUNFl_STRUCT_SZ(fcnt) \ + (WL_CNT_REV80_MCST_TXFUNFlW_STRUCT_FIXED_SZ + (fcnt * sizeof(uint32))) +#define WL_CNT_REV80_MCST_TXFUNFlW_STRUCT_SZ (WL_CNT_REV80_MCST_TXFUNFl_STRUCT_SZ(NFIFO_EXT)) + +#define WL_CNT_MCXST_STRUCT_SZ ((uint32)sizeof(wl_cnt_ge64mcxst_v1_t)) + +#define WL_CNT_HE_STRUCT_SZ ((uint32)sizeof(wl_he_cnt_wlc_t)) + +#define WL_CNT_SECVLN_STRUCT_SZ ((uint32)sizeof(wl_secvln_cnt_t)) + +#define INVALID_CNT_VAL (uint32)(-1) + +#define WL_XTLV_CNTBUF_MAX_SIZE ((uint32)(OFFSETOF(wl_cnt_info_t, data)) + \ + (uint32)BCM_XTLV_HDR_SIZE + (uint32)sizeof(wl_cnt_wlc_t) + \ + (uint32)BCM_XTLV_HDR_SIZE + WL_CNT_MCST_STRUCT_SZ + \ + (uint32)BCM_XTLV_HDR_SIZE + WL_CNT_MCXST_STRUCT_SZ) + +#define WL_CNTBUF_MAX_SIZE MAX(WL_XTLV_CNTBUF_MAX_SIZE, (uint32)sizeof(wl_cnt_ver_11_t)) + +/** Top structure of counters IOVar buffer */ +typedef struct { + uint16 version; /**< see definition of WL_CNT_T_VERSION */ + uint16 datalen; /**< length of data including all paddings. */ + uint8 data []; /**< variable length payload: + * 1 or more bcm_xtlv_t type of tuples. + * each tuple is padded to multiple of 4 bytes. + * 'datalen' field of this structure includes all paddings. + */ +} wl_cnt_info_t; + +/* Top structure of subcounters IOVar buffer + * Whenever we make any change in this structure + * WL_SUBCNTR_IOV_VER should be updated accordingly + * The structure definition should remain consistant b/w + * FW and wl/WLM app. + */ +typedef struct { + uint16 version; /* Version of IOVAR structure. Used for backward + * compatibility in future. Whenever we make any + * changes to this structure then value of WL_SUBCNTR_IOV_VER + * needs to be updated properly. + */ + uint16 length; /* length in bytes of this structure */ + uint16 counters_version; /* see definition of WL_CNT_T_VERSION + * wl app will send the version of counters + * which is used to calculate the offset of counters. + * It must match the version of counters FW is using + * else FW will return error with his version of counters + * set in this field. + */ + uint16 num_subcounters; /* Number of counter offset passed by wl app to FW. */ + uint32 data[1]; /* variable length payload: + * Offsets to the counters will be passed to FW + * throught this data field. FW will return the value of counters + * at the offsets passed by wl app in this fiels itself. + */ +} wl_subcnt_info_t; + +/* Top structure of counters TLV version IOVar buffer + * The structure definition should remain consistant b/w + * FW and wl/WLM app. + */ +typedef struct { + uint16 version; /* Version of IOVAR structure. Added for backward + * compatibility feature. If any changes are done, + * WL_TLV_IOV_VER need to be updated. + */ + uint16 length; /* total len in bytes of this structure + payload */ + uint16 counters_version; /* See definition of WL_CNT_VERSION_XTLV + * wl app will update counter tlv version to be used + * so to calculate offset of supported TLVs. + * If there is a mismatch in the version, FW will update an error + */ + uint16 num_tlv; /* Max number of TLV info passed by FW to WL app. + * and vice-versa + */ + uint32 data[]; /* variable length payload: + * This stores the tlv as supported by F/W to the wl app. + * This table is required to compute subcounter offsets at WLapp end. + */ +} wl_cntr_tlv_info_t; + +/** wlc layer counters */ +typedef struct { + /* transmit stat counters */ + uint32 txframe; /**< tx data frames */ + uint32 txbyte; /**< tx data bytes */ + uint32 txretrans; /**< tx mac retransmits */ + uint32 txerror; /**< tx data errors (derived: sum of others) */ + uint32 txctl; /**< tx management frames */ + uint32 txprshort; /**< tx short preamble frames */ + uint32 txserr; /**< tx status errors */ + uint32 txnobuf; /**< tx out of buffers errors */ + uint32 txnoassoc; /**< tx discard because we're not associated */ + uint32 txrunt; /**< tx runt frames */ + uint32 txchit; /**< tx header cache hit (fastpath) */ + uint32 txcmiss; /**< tx header cache miss (slowpath) */ + + /* transmit chip error counters */ + uint32 txuflo; /**< tx fifo underflows */ + uint32 txphyerr; /**< tx phy errors (indicated in tx status) */ + uint32 txphycrs; + + /* receive stat counters */ + uint32 rxframe; /**< rx data frames */ + uint32 rxbyte; /**< rx data bytes */ + uint32 rxerror; /**< rx data errors (derived: sum of others) */ + uint32 rxctl; /**< rx management frames */ + uint32 rxnobuf; /**< rx out of buffers errors */ + uint32 rxnondata; /**< rx non data frames in the data channel errors */ + uint32 rxbadds; /**< rx bad DS errors */ + uint32 rxbadcm; /**< rx bad control or management frames */ + uint32 rxfragerr; /**< rx fragmentation errors */ + uint32 rxrunt; /**< rx runt frames */ + uint32 rxgiant; /**< rx giant frames */ + uint32 rxnoscb; /**< rx no scb error */ + uint32 rxbadproto; /**< rx invalid frames */ + uint32 rxbadsrcmac; /**< rx frames with Invalid Src Mac */ + uint32 rxbadda; /**< rx frames tossed for invalid da */ + uint32 rxfilter; /**< rx frames filtered out */ + + /* receive chip error counters */ + uint32 rxoflo; /**< rx fifo overflow errors */ + uint32 rxuflo[NFIFO]; /**< rx dma descriptor underflow errors */ + + uint32 d11cnt_txrts_off; /**< d11cnt txrts value when reset d11cnt */ + uint32 d11cnt_rxcrc_off; /**< d11cnt rxcrc value when reset d11cnt */ + uint32 d11cnt_txnocts_off; /**< d11cnt txnocts value when reset d11cnt */ + + /* misc counters */ + uint32 dmade; /**< tx/rx dma descriptor errors */ + uint32 dmada; /**< tx/rx dma data errors */ + uint32 dmape; /**< tx/rx dma descriptor protocol errors */ + uint32 reset; /**< reset count */ + uint32 tbtt; /**< cnts the TBTT int's */ + uint32 txdmawar; + uint32 pkt_callback_reg_fail; /**< callbacks register failure */ + + /* 802.11 MIB counters, pp. 614 of 802.11 reaff doc. */ + uint32 txfrag; /**< dot11TransmittedFragmentCount */ + uint32 txmulti; /**< dot11MulticastTransmittedFrameCount */ + uint32 txfail; /**< dot11FailedCount */ + uint32 txretry; /**< dot11RetryCount */ + uint32 txretrie; /**< dot11MultipleRetryCount */ + uint32 rxdup; /**< dot11FrameduplicateCount */ + uint32 txrts; /**< dot11RTSSuccessCount */ + uint32 txnocts; /**< dot11RTSFailureCount */ + uint32 txnoack; /**< dot11ACKFailureCount */ + uint32 rxfrag; /**< dot11ReceivedFragmentCount */ + uint32 rxmulti; /**< dot11MulticastReceivedFrameCount */ + uint32 rxcrc; /**< dot11FCSErrorCount */ + uint32 txfrmsnt; /**< dot11TransmittedFrameCount (bogus MIB?) */ + uint32 rxundec; /**< dot11WEPUndecryptableCount */ + + /* WPA2 counters (see rxundec for DecryptFailureCount) */ + uint32 tkipmicfaill; /**< TKIPLocalMICFailures */ + uint32 tkipcntrmsr; /**< TKIPCounterMeasuresInvoked */ + uint32 tkipreplay; /**< TKIPReplays */ + uint32 ccmpfmterr; /**< CCMPFormatErrors */ + uint32 ccmpreplay; /**< CCMPReplays */ + uint32 ccmpundec; /**< CCMPDecryptErrors */ + uint32 fourwayfail; /**< FourWayHandshakeFailures */ + uint32 wepundec; /**< dot11WEPUndecryptableCount */ + uint32 wepicverr; /**< dot11WEPICVErrorCount */ + uint32 decsuccess; /**< DecryptSuccessCount */ + uint32 tkipicverr; /**< TKIPICVErrorCount */ + uint32 wepexcluded; /**< dot11WEPExcludedCount */ + + uint32 txchanrej; /**< Tx frames suppressed due to channel rejection */ + uint32 psmwds; /**< Count PSM watchdogs */ + uint32 phywatchdog; /**< Count Phy watchdogs (triggered by ucode) */ + + /* MBSS counters, AP only */ + uint32 prq_entries_handled; /**< PRQ entries read in */ + uint32 prq_undirected_entries; /**< which were bcast bss & ssid */ + uint32 prq_bad_entries; /**< which could not be translated to info */ + uint32 atim_suppress_count; /**< TX suppressions on ATIM fifo */ + uint32 bcn_template_not_ready; /**< Template marked in use on send bcn ... */ + uint32 bcn_template_not_ready_done; /**< ...but "DMA done" interrupt rcvd */ + uint32 late_tbtt_dpc; /**< TBTT DPC did not happen in time */ + + /* per-rate receive stat counters */ + uint32 rx1mbps; /**< packets rx at 1Mbps */ + uint32 rx2mbps; /**< packets rx at 2Mbps */ + uint32 rx5mbps5; /**< packets rx at 5.5Mbps */ + uint32 rx6mbps; /**< packets rx at 6Mbps */ + uint32 rx9mbps; /**< packets rx at 9Mbps */ + uint32 rx11mbps; /**< packets rx at 11Mbps */ + uint32 rx12mbps; /**< packets rx at 12Mbps */ + uint32 rx18mbps; /**< packets rx at 18Mbps */ + uint32 rx24mbps; /**< packets rx at 24Mbps */ + uint32 rx36mbps; /**< packets rx at 36Mbps */ + uint32 rx48mbps; /**< packets rx at 48Mbps */ + uint32 rx54mbps; /**< packets rx at 54Mbps */ + uint32 rx108mbps; /**< packets rx at 108mbps */ + uint32 rx162mbps; /**< packets rx at 162mbps */ + uint32 rx216mbps; /**< packets rx at 216 mbps */ + uint32 rx270mbps; /**< packets rx at 270 mbps */ + uint32 rx324mbps; /**< packets rx at 324 mbps */ + uint32 rx378mbps; /**< packets rx at 378 mbps */ + uint32 rx432mbps; /**< packets rx at 432 mbps */ + uint32 rx486mbps; /**< packets rx at 486 mbps */ + uint32 rx540mbps; /**< packets rx at 540 mbps */ + + uint32 rfdisable; /**< count of radio disables */ + + uint32 txexptime; /**< Tx frames suppressed due to timer expiration */ + + uint32 txmpdu_sgi; /**< count for sgi transmit */ + uint32 rxmpdu_sgi; /**< count for sgi received */ + uint32 txmpdu_stbc; /**< count for stbc transmit */ + uint32 rxmpdu_stbc; /**< count for stbc received */ + + uint32 rxundec_mcst; /**< dot11WEPUndecryptableCount */ + + /* WPA2 counters (see rxundec for DecryptFailureCount) */ + uint32 tkipmicfaill_mcst; /**< TKIPLocalMICFailures */ + uint32 tkipcntrmsr_mcst; /**< TKIPCounterMeasuresInvoked */ + uint32 tkipreplay_mcst; /**< TKIPReplays */ + uint32 ccmpfmterr_mcst; /**< CCMPFormatErrors */ + uint32 ccmpreplay_mcst; /**< CCMPReplays */ + uint32 ccmpundec_mcst; /**< CCMPDecryptErrors */ + uint32 fourwayfail_mcst; /**< FourWayHandshakeFailures */ + uint32 wepundec_mcst; /**< dot11WEPUndecryptableCount */ + uint32 wepicverr_mcst; /**< dot11WEPICVErrorCount */ + uint32 decsuccess_mcst; /**< DecryptSuccessCount */ + uint32 tkipicverr_mcst; /**< TKIPICVErrorCount */ + uint32 wepexcluded_mcst; /**< dot11WEPExcludedCount */ + + uint32 dma_hang; /**< count for dma hang */ + uint32 reinit; /**< count for reinit */ + + uint32 pstatxucast; /**< count of ucast frames xmitted on all psta assoc */ + uint32 pstatxnoassoc; /**< count of txnoassoc frames xmitted on all psta assoc */ + uint32 pstarxucast; /**< count of ucast frames received on all psta assoc */ + uint32 pstarxbcmc; /**< count of bcmc frames received on all psta */ + uint32 pstatxbcmc; /**< count of bcmc frames transmitted on all psta */ + + uint32 cso_passthrough; /**< hw cso required but passthrough */ + uint32 cso_normal; /**< hw cso hdr for normal process */ + uint32 chained; /**< number of frames chained */ + uint32 chainedsz1; /**< number of chain size 1 frames */ + uint32 unchained; /**< number of frames not chained */ + uint32 maxchainsz; /**< max chain size so far */ + uint32 currchainsz; /**< current chain size */ + uint32 pciereset; /**< Secondary Bus Reset issued by driver */ + uint32 cfgrestore; /**< configspace restore by driver */ + uint32 reinitreason[NREINITREASONCOUNT]; /**< reinitreason counters; 0: Unknown reason */ + uint32 rxrtry; + uint32 rxmpdu_mu; /**< Number of MU MPDUs received */ + + /* detailed control/management frames */ + uint32 txbar; /**< Number of TX BAR */ + uint32 rxbar; /**< Number of RX BAR */ + uint32 txpspoll; /**< Number of TX PS-poll */ + uint32 rxpspoll; /**< Number of RX PS-poll */ + uint32 txnull; /**< Number of TX NULL_DATA */ + uint32 rxnull; /**< Number of RX NULL_DATA */ + uint32 txqosnull; /**< Number of TX NULL_QoSDATA */ + uint32 rxqosnull; /**< Number of RX NULL_QoSDATA */ + uint32 txassocreq; /**< Number of TX ASSOC request */ + uint32 rxassocreq; /**< Number of RX ASSOC request */ + uint32 txreassocreq; /**< Number of TX REASSOC request */ + uint32 rxreassocreq; /**< Number of RX REASSOC request */ + uint32 txdisassoc; /**< Number of TX DISASSOC */ + uint32 rxdisassoc; /**< Number of RX DISASSOC */ + uint32 txassocrsp; /**< Number of TX ASSOC response */ + uint32 rxassocrsp; /**< Number of RX ASSOC response */ + uint32 txreassocrsp; /**< Number of TX REASSOC response */ + uint32 rxreassocrsp; /**< Number of RX REASSOC response */ + uint32 txauth; /**< Number of TX AUTH */ + uint32 rxauth; /**< Number of RX AUTH */ + uint32 txdeauth; /**< Number of TX DEAUTH */ + uint32 rxdeauth; /**< Number of RX DEAUTH */ + uint32 txprobereq; /**< Number of TX probe request */ + uint32 rxprobereq; /**< Number of RX probe request */ + uint32 txprobersp; /**< Number of TX probe response */ + uint32 rxprobersp; /**< Number of RX probe response */ + uint32 txaction; /**< Number of TX action frame */ + uint32 rxaction; /**< Number of RX action frame */ + uint32 ampdu_wds; /**< Number of AMPDU watchdogs */ + uint32 txlost; /**< Number of lost packets reported in txs */ + uint32 txdatamcast; /**< Number of TX multicast data packets */ + uint32 txdatabcast; /**< Number of TX broadcast data packets */ + uint32 psmxwds; /**< Number of PSMx watchdogs */ + uint32 rxback; + uint32 txback; + uint32 p2p_tbtt; /**< Number of P2P TBTT Events */ + uint32 p2p_tbtt_miss; /**< Number of P2P TBTT Events Miss */ + uint32 txqueue_start; + uint32 txqueue_end; + uint32 txbcast; /* Broadcast TransmittedFrameCount */ + uint32 txdropped; /* tx dropped pkts */ + uint32 rxbcast; /* BroadcastReceivedFrameCount */ + uint32 rxdropped; /* rx dropped pkts (derived: sum of others) */ + uint32 txq_end_assoccb; /* forced txqueue_end callback fired in assoc */ + uint32 tx_toss_cnt; /* number of tx packets tossed */ + uint32 rx_toss_cnt; /* number of rx packets tossed */ + uint32 last_tx_toss_rsn; /* reason because of which last tx pkt tossed */ + uint32 last_rx_toss_rsn; /* reason because of which last rx pkt tossed */ + uint32 pmk_badlen_cnt; /* number of invalid pmk len */ + +} wl_cnt_wlc_t; + +/* he counters Version 1 */ +#define HE_COUNTERS_V1 (1) +typedef struct wl_he_cnt_wlc_v1 { + uint32 he_rxtrig_myaid; + uint32 he_rxtrig_rand; + uint32 he_colormiss_cnt; + uint32 he_txmampdu; + uint32 he_txmtid_back; + uint32 he_rxmtid_back; + uint32 he_rxmsta_back; + uint32 he_txfrag; + uint32 he_rxdefrag; + uint32 he_txtrig; + uint32 he_rxtrig_basic; + uint32 he_rxtrig_murts; + uint32 he_rxtrig_bsrp; + uint32 he_rxdlmu; + uint32 he_physu_rx; + uint32 he_phyru_rx; + uint32 he_txtbppdu; +} wl_he_cnt_wlc_v1_t; + +/* he counters Version 2 */ +#define HE_COUNTERS_V2 (2) +typedef struct wl_he_cnt_wlc_v2 { + uint16 version; + uint16 len; + uint32 he_rxtrig_myaid; /**< rxed valid trigger frame with myaid */ + uint32 he_rxtrig_rand; /**< rxed valid trigger frame with random aid */ + uint32 he_colormiss_cnt; /**< for bss color mismatch cases */ + uint32 he_txmampdu; /**< for multi-TID AMPDU transmission */ + uint32 he_txmtid_back; /**< for multi-TID BACK transmission */ + uint32 he_rxmtid_back; /**< reception of multi-TID BACK */ + uint32 he_rxmsta_back; /**< reception of multi-STA BACK */ + uint32 he_txfrag; /**< transmission of Dynamic fragmented packets */ + uint32 he_rxdefrag; /**< reception of dynamic fragmented packets */ + uint32 he_txtrig; /**< transmission of trigger frames */ + uint32 he_rxtrig_basic; /**< reception of basic trigger frame */ + uint32 he_rxtrig_murts; /**< reception of MU-RTS trigger frame */ + uint32 he_rxtrig_bsrp; /**< reception of BSR poll trigger frame */ + uint32 he_rxdlmu; /**< reception of DL MU PPDU */ + uint32 he_physu_rx; /**< reception of SU frame */ + uint32 he_phyru_rx; /**< reception of RU frame */ + uint32 he_txtbppdu; /**< increments on transmission of every TB PPDU */ + uint32 he_null_tbppdu; /**< null TB PPDU's sent as a response to basic trigger frame */ +} wl_he_cnt_wlc_v2_t; + +/* he counters Version 3 */ +#define HE_COUNTERS_V3 (3) +typedef struct wl_he_cnt_wlc_v3 { + uint16 version; + uint16 len; + uint32 he_rxtrig_myaid; /**< rxed valid trigger frame with myaid */ + uint32 he_rxtrig_rand; /**< rxed valid trigger frame with random aid */ + uint32 he_colormiss_cnt; /**< for bss color mismatch cases */ + uint32 he_txmampdu; /**< for multi-TID AMPDU transmission */ + uint32 he_txmtid_back; /**< for multi-TID BACK transmission */ + uint32 he_rxmtid_back; /**< reception of multi-TID BACK */ + uint32 he_rxmsta_back; /**< reception of multi-STA BACK */ + uint32 he_txfrag; /**< transmission of Dynamic fragmented packets */ + uint32 he_rxdefrag; /**< reception of dynamic fragmented packets */ + uint32 he_txtrig; /**< transmission of trigger frames */ + uint32 he_rxtrig_basic; /**< reception of basic trigger frame */ + uint32 he_rxtrig_murts; /**< reception of MU-RTS trigger frame */ + uint32 he_rxtrig_bsrp; /**< reception of BSR poll trigger frame */ + uint32 he_rxhemuppdu_cnt; /**< rxing HE MU PPDU */ + uint32 he_physu_rx; /**< reception of SU frame */ + uint32 he_phyru_rx; /**< reception of RU frame */ + uint32 he_txtbppdu; /**< increments on transmission of every TB PPDU */ + uint32 he_null_tbppdu; /**< null TB PPDU's sent as a response to basic trigger frame */ + uint32 he_rxhesuppdu_cnt; /**< rxing SU PPDU */ + uint32 he_rxhesureppdu_cnt; /**< rxing Range Extension(RE) SU PPDU */ + uint32 he_null_zero_agg; /**< null AMPDU's transmitted in response to basic trigger + * because of zero aggregation + */ + uint32 he_null_bsrp_rsp; /**< null AMPDU's txed in response to BSR poll */ + uint32 he_null_fifo_empty; /**< null AMPDU's in response to basic trigger + * because of no frames in fifo's + */ +} wl_he_cnt_wlc_v3_t; + +#ifndef HE_COUNTERS_VERSION_ENABLED +#define HE_COUNTERS_VERSION (HE_COUNTERS_V1) +typedef wl_he_cnt_wlc_v1_t wl_he_cnt_wlc_t; +#endif /* HE_COUNTERS_VERSION_ENABLED */ + +/* security vulnerabilities counters */ +typedef struct { + uint32 ie_unknown; /* number of unknown IEs */ + uint32 ie_invalid_length; /* number of IEs with invalid length */ + uint32 ie_invalid_data; /* number of IEs with invalid data */ + uint32 ipv6_invalid_length; /* number of IPv6 packets with invalid payload length */ +} wl_secvln_cnt_t; + +/* Reinit reasons - do not put anything else other than reinit reasons here */ +typedef struct { + uint32 rsn[WL_REINIT_RC_LAST]; +} reinit_rsns_t; + +/* MACXSTAT counters for ucodex (corerev >= 64) */ +typedef struct { + uint32 macxsusp; + uint32 m2vmsg; + uint32 v2mmsg; + uint32 mboxout; + uint32 musnd; + uint32 sfb2v; +} wl_cnt_ge64mcxst_v1_t; + +/** MACSTAT counters for ucode (corerev >= 40) */ +typedef struct { + /* MAC counters: 32-bit version of d11.h's macstat_t */ + uint32 txallfrm; /**< total number of frames sent, incl. Data, ACK, RTS, CTS, + * Control Management (includes retransmissions) + */ + uint32 txrtsfrm; /**< number of RTS sent out by the MAC */ + uint32 txctsfrm; /**< number of CTS sent out by the MAC */ + uint32 txackfrm; /**< number of ACK frames sent out */ + uint32 txdnlfrm; /**< number of Null-Data transmission generated from template */ + uint32 txbcnfrm; /**< beacons transmitted */ + uint32 txfunfl[6]; /**< per-fifo tx underflows */ + uint32 txampdu; /**< number of AMPDUs transmitted */ + uint32 txmpdu; /**< number of MPDUs transmitted */ + uint32 txtplunfl; /**< Template underflows (mac was too slow to transmit ACK/CTS + * or BCN) + */ + uint32 txphyerror; /**< Transmit phy error, type of error is reported in tx-status for + * driver enqueued frames + */ + uint32 pktengrxducast; /**< unicast frames rxed by the pkteng code */ + uint32 pktengrxdmcast; /**< multicast frames rxed by the pkteng code */ + uint32 rxfrmtoolong; /**< Received frame longer than legal limit (2346 bytes) */ + uint32 rxfrmtooshrt; /**< Received frame did not contain enough bytes for its frame type */ + uint32 rxanyerr; /**< Any RX error that is not counted by other counters. */ + uint32 rxbadfcs; /**< number of frames for which the CRC check failed in the MAC */ + uint32 rxbadplcp; /**< parity check of the PLCP header failed */ + uint32 rxcrsglitch; /**< PHY was able to correlate the preamble but not the header */ + uint32 rxstrt; /**< Number of received frames with a good PLCP + * (i.e. passing parity check) + */ + uint32 rxdtucastmbss; /**< number of received DATA frames with good FCS and matching RA */ + uint32 rxmgucastmbss; /**< number of received mgmt frames with good FCS and matching RA */ + uint32 rxctlucast; /**< number of received CNTRL frames with good FCS and matching RA */ + uint32 rxrtsucast; /**< number of unicast RTS addressed to the MAC (good FCS) */ + uint32 rxctsucast; /**< number of unicast CTS addressed to the MAC (good FCS) */ + uint32 rxackucast; /**< number of ucast ACKS received (good FCS) */ + uint32 rxdtocast; /**< number of received DATA frames (good FCS and not matching RA) */ + uint32 rxmgocast; /**< number of received MGMT frames (good FCS and not matching RA) */ + uint32 rxctlocast; /**< number of received CNTRL frame (good FCS and not matching RA) */ + uint32 rxrtsocast; /**< number of received RTS not addressed to the MAC */ + uint32 rxctsocast; /**< number of received CTS not addressed to the MAC */ + uint32 rxdtmcast; /**< number of RX Data multicast frames received by the MAC */ + uint32 rxmgmcast; /**< number of RX Management multicast frames received by the MAC */ + uint32 rxctlmcast; /**< number of RX Control multicast frames received by the MAC + * (unlikely to see these) + */ + uint32 rxbeaconmbss; /**< beacons received from member of BSS */ + uint32 rxdtucastobss; /**< number of unicast frames addressed to the MAC from + * other BSS (WDS FRAME) + */ + uint32 rxbeaconobss; /**< beacons received from other BSS */ + uint32 rxrsptmout; /**< number of response timeouts for transmitted frames + * expecting a response + */ + uint32 bcntxcancl; /**< transmit beacons canceled due to receipt of beacon (IBSS) */ + uint32 rxnodelim; /**< number of no valid delimiter detected by ampdu parser */ + uint32 rxf0ovfl; /**< number of receive fifo 0 overflows */ + uint32 rxf1ovfl; /**< number of receive fifo 1 overflows */ + uint32 rxhlovfl; /**< number of length / header fifo overflows */ + uint32 missbcn_dbg; /**< number of beacon missed to receive */ + uint32 pmqovfl; /**< number of PMQ overflows */ + uint32 rxcgprqfrm; /**< number of received Probe requests that made it into + * the PRQ fifo + */ + uint32 rxcgprsqovfl; /**< Rx Probe Request Que overflow in the AP */ + uint32 txcgprsfail; /**< Tx Probe Response Fail. AP sent probe response but did + * not get ACK + */ + uint32 txcgprssuc; /**< Tx Probe Response Success (ACK was received) */ + uint32 prs_timeout; /**< number of probe requests that were dropped from the PRQ + * fifo because a probe response could not be sent out within + * the time limit defined in M_PRS_MAXTIME + */ + uint32 txrtsfail; /**< number of rts transmission failure that reach retry limit */ + uint32 txucast; /**< number of unicast tx expecting response other than cts/cwcts */ + uint32 txinrtstxop; /**< number of data frame transmissions during rts txop */ + uint32 rxback; /**< blockack rxcnt */ + uint32 txback; /**< blockack txcnt */ + uint32 bphy_rxcrsglitch; /**< PHY count of bphy glitches */ + uint32 rxdrop20s; /**< drop secondary cnt */ + uint32 rxtoolate; /**< receive too late */ + uint32 bphy_badplcp; /**< number of bad PLCP reception on BPHY rate */ +} wl_cnt_ge40mcst_v1_t; + +/** MACSTAT counters for ucode (corerev < 40) */ +typedef struct { + /* MAC counters: 32-bit version of d11.h's macstat_t */ + uint32 txallfrm; /**< total number of frames sent, incl. Data, ACK, RTS, CTS, + * Control Management (includes retransmissions) + */ + uint32 txrtsfrm; /**< number of RTS sent out by the MAC */ + uint32 txctsfrm; /**< number of CTS sent out by the MAC */ + uint32 txackfrm; /**< number of ACK frames sent out */ + uint32 txdnlfrm; /**< number of Null-Data transmission generated from template */ + uint32 txbcnfrm; /**< beacons transmitted */ + uint32 txfunfl[6]; /**< per-fifo tx underflows */ + uint32 txampdu; /**< number of AMPDUs transmitted */ + uint32 txmpdu; /**< number of MPDUs transmitted */ + uint32 txtplunfl; /**< Template underflows (mac was too slow to transmit ACK/CTS + * or BCN) + */ + uint32 txphyerror; /**< Transmit phy error, type of error is reported in tx-status for + * driver enqueued frames + */ + uint32 pktengrxducast; /**< unicast frames rxed by the pkteng code */ + uint32 pktengrxdmcast; /**< multicast frames rxed by the pkteng code */ + uint32 rxfrmtoolong; /**< Received frame longer than legal limit (2346 bytes) */ + uint32 rxfrmtooshrt; /**< Received frame did not contain enough bytes for its frame type */ + uint32 rxanyerr; /**< Any RX error that is not counted by other counters. */ + uint32 rxbadfcs; /**< number of frames for which the CRC check failed in the MAC */ + uint32 rxbadplcp; /**< parity check of the PLCP header failed */ + uint32 rxcrsglitch; /**< PHY was able to correlate the preamble but not the header */ + uint32 rxstrt; /**< Number of received frames with a good PLCP + * (i.e. passing parity check) + */ + uint32 rxdtucastmbss; /**< number of received DATA frames with good FCS and matching RA */ + uint32 rxmgucastmbss; /**< number of received mgmt frames with good FCS and matching RA */ + uint32 rxctlucast; /**< number of received CNTRL frames with good FCS and matching RA */ + uint32 rxrtsucast; /**< number of unicast RTS addressed to the MAC (good FCS) */ + uint32 rxctsucast; /**< number of unicast CTS addressed to the MAC (good FCS) */ + uint32 rxackucast; /**< number of ucast ACKS received (good FCS) */ + uint32 rxdtocast; /**< number of received DATA frames (good FCS and not matching RA) */ + uint32 rxmgocast; /**< number of received MGMT frames (good FCS and not matching RA) */ + uint32 rxctlocast; /**< number of received CNTRL frame (good FCS and not matching RA) */ + uint32 rxrtsocast; /**< number of received RTS not addressed to the MAC */ + uint32 rxctsocast; /**< number of received CTS not addressed to the MAC */ + uint32 rxdtmcast; /**< number of RX Data multicast frames received by the MAC */ + uint32 rxmgmcast; /**< number of RX Management multicast frames received by the MAC */ + uint32 rxctlmcast; /**< number of RX Control multicast frames received by the MAC + * (unlikely to see these) + */ + uint32 rxbeaconmbss; /**< beacons received from member of BSS */ + uint32 rxdtucastobss; /**< number of unicast frames addressed to the MAC from + * other BSS (WDS FRAME) + */ + uint32 rxbeaconobss; /**< beacons received from other BSS */ + uint32 rxrsptmout; /**< number of response timeouts for transmitted frames + * expecting a response + */ + uint32 bcntxcancl; /**< transmit beacons canceled due to receipt of beacon (IBSS) */ + uint32 rxnodelim; /**< number of no valid delimiter detected by ampdu parser */ + uint32 rxf0ovfl; /**< number of receive fifo 0 overflows */ + uint32 dbgoff46; + uint32 dbgoff47; + uint32 dbgoff48; /**< Used for counting txstatus queue overflow (corerev <= 4) */ + uint32 pmqovfl; /**< number of PMQ overflows */ + uint32 rxcgprqfrm; /**< number of received Probe requests that made it into + * the PRQ fifo + */ + uint32 rxcgprsqovfl; /**< Rx Probe Request Que overflow in the AP */ + uint32 txcgprsfail; /**< Tx Probe Response Fail. AP sent probe response but did + * not get ACK + */ + uint32 txcgprssuc; /**< Tx Probe Response Success (ACK was received) */ + uint32 prs_timeout; /**< number of probe requests that were dropped from the PRQ + * fifo because a probe response could not be sent out within + * the time limit defined in M_PRS_MAXTIME + */ + uint32 txrtsfail; /**< number of rts transmission failure that reach retry limit */ + uint32 txucast; /**< number of unicast tx expecting response other than cts/cwcts */ + uint32 txinrtstxop; /**< number of data frame transmissions during rts txop */ + uint32 rxback; /**< blockack rxcnt */ + uint32 txback; /**< blockack txcnt */ + uint32 bphy_rxcrsglitch; /**< PHY count of bphy glitches */ + uint32 phywatch; + uint32 rxtoolate; /**< receive too late */ + uint32 bphy_badplcp; /**< number of bad PLCP reception on BPHY rate */ +} wl_cnt_lt40mcst_v1_t; + +/** MACSTAT counters for ucode (corerev >= 80) */ +typedef struct { + /* MAC counters: 32-bit version of d11.h's macstat_t */ + /* Start of PSM2HOST stats(72) block */ + uint32 txallfrm; /**< total number of frames sent, incl. Data, ACK, RTS, CTS, + * Control Management (includes retransmissions) + */ + uint32 txrtsfrm; /**< number of RTS sent out by the MAC */ + uint32 txctsfrm; /**< number of CTS sent out by the MAC */ + uint32 txackfrm; /**< number of ACK frames sent out */ + uint32 txdnlfrm; /**< number of Null-Data transmission generated from template */ + uint32 txbcnfrm; /**< beacons transmitted */ + uint32 txampdu; /**< number of AMPDUs transmitted */ + uint32 txmpdu; /**< number of MPDUs transmitted */ + uint32 txtplunfl; /**< Template underflows (mac was too slow to transmit ACK/CTS + * or BCN) + */ + uint32 txphyerror; /**< Transmit phy error, type of error is reported in tx-status for + * driver enqueued frames + */ + uint32 pktengrxducast; /**< unicast frames rxed by the pkteng code */ + uint32 pktengrxdmcast; /**< multicast frames rxed by the pkteng code */ + uint32 rxfrmtoolong; /**< Received frame longer than legal limit (2346 bytes) */ + uint32 rxfrmtooshrt; /**< Received frame did not contain enough bytes for its frame type */ + uint32 rxanyerr; /**< Any RX error that is not counted by other counters. */ + uint32 rxbadfcs; /**< number of frames for which the CRC check failed in the MAC */ + uint32 rxbadplcp; /**< parity check of the PLCP header failed */ + uint32 rxcrsglitch; /**< PHY was able to correlate the preamble but not the header */ + uint32 rxstrt; /**< Number of received frames with a good PLCP + * (i.e. passing parity check) + */ + uint32 rxdtucastmbss; /**< number of received DATA frames with good FCS and matching RA */ + uint32 rxmgucastmbss; /**< number of received mgmt frames with good FCS and matching RA */ + uint32 rxctlucast; /**< number of received CNTRL frames with good FCS and matching RA */ + uint32 rxrtsucast; /**< number of unicast RTS addressed to the MAC (good FCS) */ + uint32 rxctsucast; /**< number of unicast CTS addressed to the MAC (good FCS) */ + uint32 rxackucast; /**< number of ucast ACKS received (good FCS) */ + uint32 rxdtocast; /**< number of received DATA frames (good FCS and not matching RA) */ + uint32 rxmgocast; /**< number of received MGMT frames (good FCS and not matching RA) */ + uint32 rxctlocast; /**< number of received CNTRL frame (good FCS and not matching RA) */ + uint32 rxrtsocast; /**< number of received RTS not addressed to the MAC */ + uint32 rxctsocast; /**< number of received CTS not addressed to the MAC */ + uint32 rxdtmcast; /**< number of RX Data multicast frames received by the MAC */ + uint32 rxmgmcast; /**< number of RX Management multicast frames received by the MAC */ + uint32 rxctlmcast; /**< number of RX Control multicast frames received by the MAC + * (unlikely to see these) + */ + uint32 rxbeaconmbss; /**< beacons received from member of BSS */ + uint32 rxdtucastobss; /**< number of unicast frames addressed to the MAC from + * other BSS (WDS FRAME) + */ + uint32 rxbeaconobss; /**< beacons received from other BSS */ + uint32 rxrsptmout; /**< number of response timeouts for transmitted frames + * expecting a response + */ + uint32 bcntxcancl; /**< transmit beacons canceled due to receipt of beacon (IBSS) */ + uint32 rxnodelim; /**< number of no valid delimiter detected by ampdu parser */ + uint32 missbcn_dbg; /**< number of beacon missed to receive */ + uint32 pmqovfl; /**< number of PMQ overflows */ + uint32 rxcgprqfrm; /**< number of received Probe requests that made it into + * the PRQ fifo + */ + uint32 rxcgprsqovfl; /**< Rx Probe Request Que overflow in the AP */ + uint32 txcgprsfail; /**< Tx Probe Response Fail. AP sent probe response but did + * not get ACK + */ + uint32 txcgprssuc; /**< Tx Probe Response Success (ACK was received) */ + uint32 prs_timeout; /**< number of probe requests that were dropped from the PRQ + * fifo because a probe response could not be sent out within + * the time limit defined in M_PRS_MAXTIME + */ + uint32 txrtsfail; /**< number of rts transmission failure that reach retry limit */ + uint32 txucast; /**< number of unicast tx expecting response other than cts/cwcts */ + uint32 txinrtstxop; /**< number of data frame transmissions during rts txop */ + uint32 rxback; /**< blockack rxcnt */ + uint32 txback; /**< blockack txcnt */ + uint32 bphy_rxcrsglitch; /**< PHY count of bphy glitches */ + uint32 rxdrop20s; /**< drop secondary cnt */ + uint32 rxtoolate; /**< receive too late */ + uint32 bphy_badplcp; /**< number of bad PLCP reception on BPHY rate */ + uint32 rxtrig_myaid; /* New counters added in corerev 80 */ + uint32 rxtrig_rand; + uint32 goodfcs; + uint32 colormiss; + uint32 txmampdu; + uint32 rxmtidback; + uint32 rxmstaback; + uint32 txfrag; + /* End of PSM2HOST stats block */ + /* start of rxerror overflow counter(24) block which are modified/added in corerev 80 */ + uint32 phyovfl; + uint32 rxf0ovfl; /**< number of receive fifo 0 overflows */ + uint32 rxf1ovfl; /**< number of receive fifo 1 overflows */ + uint32 lenfovfl; + uint32 weppeof; + uint32 badplcp; + uint32 msduthresh; + uint32 strmeof; + uint32 stsfifofull; + uint32 stsfifoerr; + uint32 PAD[6]; + uint32 rxerr_stat; + uint32 ctx_fifo_full; + uint32 PAD[38]; /* PAD added for counter elements to be added soon */ +} wl_cnt_ge80mcst_v1_t; + +typedef struct { + uint32 fifocount; + uint32 txfunfl[]; +} wl_cnt_ge80_txfunfl_v1_t; + +/** MACSTAT counters for "wl counter" version <= 10 */ +typedef struct { + /* MAC counters: 32-bit version of d11.h's macstat_t */ + uint32 txallfrm; /**< total number of frames sent, incl. Data, ACK, RTS, CTS, + * Control Management (includes retransmissions) + */ + uint32 txrtsfrm; /**< number of RTS sent out by the MAC */ + uint32 txctsfrm; /**< number of CTS sent out by the MAC */ + uint32 txackfrm; /**< number of ACK frames sent out */ + uint32 txdnlfrm; /**< number of Null-Data transmission generated from template */ + uint32 txbcnfrm; /**< beacons transmitted */ + uint32 txfunfl[6]; /**< per-fifo tx underflows */ + uint32 txfbw; /**< transmit at fallback bw (dynamic bw) */ + uint32 PAD0; /**< number of MPDUs transmitted */ + uint32 txtplunfl; /**< Template underflows (mac was too slow to transmit ACK/CTS + * or BCN) + */ + uint32 txphyerror; /**< Transmit phy error, type of error is reported in tx-status for + * driver enqueued frames + */ + uint32 pktengrxducast; /**< unicast frames rxed by the pkteng code */ + uint32 pktengrxdmcast; /**< multicast frames rxed by the pkteng code */ + uint32 rxfrmtoolong; /**< Received frame longer than legal limit (2346 bytes) */ + uint32 rxfrmtooshrt; /**< Received frame did not contain enough bytes for its frame type */ + uint32 rxinvmachdr; /**< Either the protocol version != 0 or frame type not + * data/control/management + */ + uint32 rxbadfcs; /**< number of frames for which the CRC check failed in the MAC */ + uint32 rxbadplcp; /**< parity check of the PLCP header failed */ + uint32 rxcrsglitch; /**< PHY was able to correlate the preamble but not the header */ + uint32 rxstrt; /**< Number of received frames with a good PLCP + * (i.e. passing parity check) + */ + uint32 rxdfrmucastmbss; /* number of received DATA frames with good FCS and matching RA */ + uint32 rxmfrmucastmbss; /* number of received mgmt frames with good FCS and matching RA */ + uint32 rxcfrmucast; /**< number of received CNTRL frames with good FCS and matching RA */ + uint32 rxrtsucast; /**< number of unicast RTS addressed to the MAC (good FCS) */ + uint32 rxctsucast; /**< number of unicast CTS addressed to the MAC (good FCS) */ + uint32 rxackucast; /**< number of ucast ACKS received (good FCS) */ + uint32 rxdfrmocast; /**< number of received DATA frames (good FCS and not matching RA) */ + uint32 rxmfrmocast; /**< number of received MGMT frames (good FCS and not matching RA) */ + uint32 rxcfrmocast; /**< number of received CNTRL frame (good FCS and not matching RA) */ + uint32 rxrtsocast; /**< number of received RTS not addressed to the MAC */ + uint32 rxctsocast; /**< number of received CTS not addressed to the MAC */ + uint32 rxdfrmmcast; /**< number of RX Data multicast frames received by the MAC */ + uint32 rxmfrmmcast; /**< number of RX Management multicast frames received by the MAC */ + uint32 rxcfrmmcast; /**< number of RX Control multicast frames received by the MAC + * (unlikely to see these) + */ + uint32 rxbeaconmbss; /**< beacons received from member of BSS */ + uint32 rxdfrmucastobss; /**< number of unicast frames addressed to the MAC from + * other BSS (WDS FRAME) + */ + uint32 rxbeaconobss; /**< beacons received from other BSS */ + uint32 rxrsptmout; /**< number of response timeouts for transmitted frames + * expecting a response + */ + uint32 bcntxcancl; /**< transmit beacons canceled due to receipt of beacon (IBSS) */ + uint32 PAD1; + uint32 rxf0ovfl; /**< number of receive fifo 0 overflows */ + uint32 rxf1ovfl; /**< Number of receive fifo 1 overflows (obsolete) */ + uint32 rxf2ovfl; /**< Number of receive fifo 2 overflows (obsolete) */ + uint32 txsfovfl; /**< Number of transmit status fifo overflows (obsolete) */ + uint32 pmqovfl; /**< number of PMQ overflows */ + uint32 rxcgprqfrm; /**< number of received Probe requests that made it into + * the PRQ fifo + */ + uint32 rxcgprsqovfl; /**< Rx Probe Request Que overflow in the AP */ + uint32 txcgprsfail; /**< Tx Probe Response Fail. AP sent probe response but did + * not get ACK + */ + uint32 txcgprssuc; /**< Tx Probe Response Success (ACK was received) */ + uint32 prs_timeout; /**< number of probe requests that were dropped from the PRQ + * fifo because a probe response could not be sent out within + * the time limit defined in M_PRS_MAXTIME + */ + uint32 rxnack; /**< obsolete */ + uint32 frmscons; /**< obsolete */ + uint32 txnack; /**< obsolete */ + uint32 rxback; /**< blockack rxcnt */ + uint32 txback; /**< blockack txcnt */ + uint32 bphy_rxcrsglitch; /**< PHY count of bphy glitches */ + uint32 rxdrop20s; /**< drop secondary cnt */ + uint32 rxtoolate; /**< receive too late */ + uint32 bphy_badplcp; /**< number of bad PLCP reception on BPHY rate */ +} wl_cnt_v_le10_mcst_t; + +#define MAX_RX_FIFO 3 +#define WL_RXFIFO_CNT_VERSION 1 /* current version of wl_rxfifo_cnt_t */ +typedef struct { + /* Counters for frames received from rx fifos */ + uint16 version; + uint16 length; /* length of entire structure */ + uint32 rxf_data[MAX_RX_FIFO]; /* data frames from rx fifo */ + uint32 rxf_mgmtctl[MAX_RX_FIFO]; /* mgmt/ctl frames from rx fifo */ +} wl_rxfifo_cnt_t; + +typedef struct { + uint16 version; /**< see definition of WL_CNT_T_VERSION */ + uint16 length; /**< length of entire structure */ + + /* transmit stat counters */ + uint32 txframe; /**< tx data frames */ + uint32 txbyte; /**< tx data bytes */ + uint32 txretrans; /**< tx mac retransmits */ + uint32 txerror; /**< tx data errors (derived: sum of others) */ + uint32 txctl; /**< tx management frames */ + uint32 txprshort; /**< tx short preamble frames */ + uint32 txserr; /**< tx status errors */ + uint32 txnobuf; /**< tx out of buffers errors */ + uint32 txnoassoc; /**< tx discard because we're not associated */ + uint32 txrunt; /**< tx runt frames */ + uint32 txchit; /**< tx header cache hit (fastpath) */ + uint32 txcmiss; /**< tx header cache miss (slowpath) */ + + /* transmit chip error counters */ + uint32 txuflo; /**< tx fifo underflows */ + uint32 txphyerr; /**< tx phy errors (indicated in tx status) */ + uint32 txphycrs; + + /* receive stat counters */ + uint32 rxframe; /**< rx data frames */ + uint32 rxbyte; /**< rx data bytes */ + uint32 rxerror; /**< rx data errors (derived: sum of others) */ + uint32 rxctl; /**< rx management frames */ + uint32 rxnobuf; /**< rx out of buffers errors */ + uint32 rxnondata; /**< rx non data frames in the data channel errors */ + uint32 rxbadds; /**< rx bad DS errors */ + uint32 rxbadcm; /**< rx bad control or management frames */ + uint32 rxfragerr; /**< rx fragmentation errors */ + uint32 rxrunt; /**< rx runt frames */ + uint32 rxgiant; /**< rx giant frames */ + uint32 rxnoscb; /**< rx no scb error */ + uint32 rxbadproto; /**< rx invalid frames */ + uint32 rxbadsrcmac; /**< rx frames with Invalid Src Mac */ + uint32 rxbadda; /**< rx frames tossed for invalid da */ + uint32 rxfilter; /**< rx frames filtered out */ + + /* receive chip error counters */ + uint32 rxoflo; /**< rx fifo overflow errors */ + uint32 rxuflo[NFIFO]; /**< rx dma descriptor underflow errors */ + + uint32 d11cnt_txrts_off; /**< d11cnt txrts value when reset d11cnt */ + uint32 d11cnt_rxcrc_off; /**< d11cnt rxcrc value when reset d11cnt */ + uint32 d11cnt_txnocts_off; /**< d11cnt txnocts value when reset d11cnt */ + + /* misc counters */ + uint32 dmade; /**< tx/rx dma descriptor errors */ + uint32 dmada; /**< tx/rx dma data errors */ + uint32 dmape; /**< tx/rx dma descriptor protocol errors */ + uint32 reset; /**< reset count */ + uint32 tbtt; /**< cnts the TBTT int's */ + uint32 txdmawar; + uint32 pkt_callback_reg_fail; /**< callbacks register failure */ + + /* MAC counters: 32-bit version of d11.h's macstat_t */ + uint32 txallfrm; /**< total number of frames sent, incl. Data, ACK, RTS, CTS, + * Control Management (includes retransmissions) + */ + uint32 txrtsfrm; /**< number of RTS sent out by the MAC */ + uint32 txctsfrm; /**< number of CTS sent out by the MAC */ + uint32 txackfrm; /**< number of ACK frames sent out */ + uint32 txdnlfrm; /**< Not used */ + uint32 txbcnfrm; /**< beacons transmitted */ + uint32 txfunfl[6]; /**< per-fifo tx underflows */ + uint32 rxtoolate; /**< receive too late */ + uint32 txfbw; /**< transmit at fallback bw (dynamic bw) */ + uint32 txtplunfl; /**< Template underflows (mac was too slow to transmit ACK/CTS + * or BCN) + */ + uint32 txphyerror; /**< Transmit phy error, type of error is reported in tx-status for + * driver enqueued frames + */ + uint32 rxfrmtoolong; /**< Received frame longer than legal limit (2346 bytes) */ + uint32 rxfrmtooshrt; /**< Received frame did not contain enough bytes for its frame type */ + uint32 rxinvmachdr; /**< Either the protocol version != 0 or frame type not + * data/control/management + */ + uint32 rxbadfcs; /**< number of frames for which the CRC check failed in the MAC */ + uint32 rxbadplcp; /**< parity check of the PLCP header failed */ + uint32 rxcrsglitch; /**< PHY was able to correlate the preamble but not the header */ + uint32 rxstrt; /**< Number of received frames with a good PLCP + * (i.e. passing parity check) + */ + uint32 rxdfrmucastmbss; /* Number of received DATA frames with good FCS and matching RA */ + uint32 rxmfrmucastmbss; /* number of received mgmt frames with good FCS and matching RA */ + uint32 rxcfrmucast; /**< number of received CNTRL frames with good FCS and matching RA */ + uint32 rxrtsucast; /**< number of unicast RTS addressed to the MAC (good FCS) */ + uint32 rxctsucast; /**< number of unicast CTS addressed to the MAC (good FCS) */ + uint32 rxackucast; /**< number of ucast ACKS received (good FCS) */ + uint32 rxdfrmocast; /**< number of received DATA frames (good FCS and not matching RA) */ + uint32 rxmfrmocast; /**< number of received MGMT frames (good FCS and not matching RA) */ + uint32 rxcfrmocast; /**< number of received CNTRL frame (good FCS and not matching RA) */ + uint32 rxrtsocast; /**< number of received RTS not addressed to the MAC */ + uint32 rxctsocast; /**< number of received CTS not addressed to the MAC */ + uint32 rxdfrmmcast; /**< number of RX Data multicast frames received by the MAC */ + uint32 rxmfrmmcast; /**< number of RX Management multicast frames received by the MAC */ + uint32 rxcfrmmcast; /**< number of RX Control multicast frames received by the MAC + * (unlikely to see these) + */ + uint32 rxbeaconmbss; /**< beacons received from member of BSS */ + uint32 rxdfrmucastobss; /**< number of unicast frames addressed to the MAC from + * other BSS (WDS FRAME) + */ + uint32 rxbeaconobss; /**< beacons received from other BSS */ + uint32 rxrsptmout; /**< Number of response timeouts for transmitted frames + * expecting a response + */ + uint32 bcntxcancl; /**< transmit beacons canceled due to receipt of beacon (IBSS) */ + uint32 rxf0ovfl; /**< Number of receive fifo 0 overflows */ + uint32 rxf1ovfl; /**< Number of receive fifo 1 overflows (obsolete) */ + uint32 rxf2ovfl; /**< Number of receive fifo 2 overflows (obsolete) */ + uint32 txsfovfl; /**< Number of transmit status fifo overflows (obsolete) */ + uint32 pmqovfl; /**< Number of PMQ overflows */ + uint32 rxcgprqfrm; /**< Number of received Probe requests that made it into + * the PRQ fifo + */ + uint32 rxcgprsqovfl; /**< Rx Probe Request Que overflow in the AP */ + uint32 txcgprsfail; /**< Tx Probe Response Fail. AP sent probe response but did + * not get ACK + */ + uint32 txcgprssuc; /**< Tx Probe Response Success (ACK was received) */ + uint32 prs_timeout; /**< Number of probe requests that were dropped from the PRQ + * fifo because a probe response could not be sent out within + * the time limit defined in M_PRS_MAXTIME + */ + uint32 rxnack; /**< obsolete */ + uint32 frmscons; /**< obsolete */ + uint32 txnack; /**< obsolete */ + uint32 rxback; /**< blockack rxcnt */ + uint32 txback; /**< blockack txcnt */ + + /* 802.11 MIB counters, pp. 614 of 802.11 reaff doc. */ + uint32 txfrag; /**< dot11TransmittedFragmentCount */ + uint32 txmulti; /**< dot11MulticastTransmittedFrameCount */ + uint32 txfail; /**< dot11FailedCount */ + uint32 txretry; /**< dot11RetryCount */ + uint32 txretrie; /**< dot11MultipleRetryCount */ + uint32 rxdup; /**< dot11FrameduplicateCount */ + uint32 txrts; /**< dot11RTSSuccessCount */ + uint32 txnocts; /**< dot11RTSFailureCount */ + uint32 txnoack; /**< dot11ACKFailureCount */ + uint32 rxfrag; /**< dot11ReceivedFragmentCount */ + uint32 rxmulti; /**< dot11MulticastReceivedFrameCount */ + uint32 rxcrc; /**< dot11FCSErrorCount */ + uint32 txfrmsnt; /**< dot11TransmittedFrameCount (bogus MIB?) */ + uint32 rxundec; /**< dot11WEPUndecryptableCount */ + + /* WPA2 counters (see rxundec for DecryptFailureCount) */ + uint32 tkipmicfaill; /**< TKIPLocalMICFailures */ + uint32 tkipcntrmsr; /**< TKIPCounterMeasuresInvoked */ + uint32 tkipreplay; /**< TKIPReplays */ + uint32 ccmpfmterr; /**< CCMPFormatErrors */ + uint32 ccmpreplay; /**< CCMPReplays */ + uint32 ccmpundec; /**< CCMPDecryptErrors */ + uint32 fourwayfail; /**< FourWayHandshakeFailures */ + uint32 wepundec; /**< dot11WEPUndecryptableCount */ + uint32 wepicverr; /**< dot11WEPICVErrorCount */ + uint32 decsuccess; /**< DecryptSuccessCount */ + uint32 tkipicverr; /**< TKIPICVErrorCount */ + uint32 wepexcluded; /**< dot11WEPExcludedCount */ + + uint32 txchanrej; /**< Tx frames suppressed due to channel rejection */ + uint32 psmwds; /**< Count PSM watchdogs */ + uint32 phywatchdog; /**< Count Phy watchdogs (triggered by ucode) */ + + /* MBSS counters, AP only */ + uint32 prq_entries_handled; /**< PRQ entries read in */ + uint32 prq_undirected_entries; /**< which were bcast bss & ssid */ + uint32 prq_bad_entries; /**< which could not be translated to info */ + uint32 atim_suppress_count; /**< TX suppressions on ATIM fifo */ + uint32 bcn_template_not_ready; /**< Template marked in use on send bcn ... */ + uint32 bcn_template_not_ready_done; /**< ...but "DMA done" interrupt rcvd */ + uint32 late_tbtt_dpc; /**< TBTT DPC did not happen in time */ + + /* per-rate receive stat counters */ + uint32 rx1mbps; /**< packets rx at 1Mbps */ + uint32 rx2mbps; /**< packets rx at 2Mbps */ + uint32 rx5mbps5; /**< packets rx at 5.5Mbps */ + uint32 rx6mbps; /**< packets rx at 6Mbps */ + uint32 rx9mbps; /**< packets rx at 9Mbps */ + uint32 rx11mbps; /**< packets rx at 11Mbps */ + uint32 rx12mbps; /**< packets rx at 12Mbps */ + uint32 rx18mbps; /**< packets rx at 18Mbps */ + uint32 rx24mbps; /**< packets rx at 24Mbps */ + uint32 rx36mbps; /**< packets rx at 36Mbps */ + uint32 rx48mbps; /**< packets rx at 48Mbps */ + uint32 rx54mbps; /**< packets rx at 54Mbps */ + uint32 rx108mbps; /**< packets rx at 108mbps */ + uint32 rx162mbps; /**< packets rx at 162mbps */ + uint32 rx216mbps; /**< packets rx at 216 mbps */ + uint32 rx270mbps; /**< packets rx at 270 mbps */ + uint32 rx324mbps; /**< packets rx at 324 mbps */ + uint32 rx378mbps; /**< packets rx at 378 mbps */ + uint32 rx432mbps; /**< packets rx at 432 mbps */ + uint32 rx486mbps; /**< packets rx at 486 mbps */ + uint32 rx540mbps; /**< packets rx at 540 mbps */ + + /* pkteng rx frame stats */ + uint32 pktengrxducast; /**< unicast frames rxed by the pkteng code */ + uint32 pktengrxdmcast; /**< multicast frames rxed by the pkteng code */ + + uint32 rfdisable; /**< count of radio disables */ + uint32 bphy_rxcrsglitch; /**< PHY count of bphy glitches */ + uint32 bphy_badplcp; + + uint32 txexptime; /**< Tx frames suppressed due to timer expiration */ + + uint32 txmpdu_sgi; /**< count for sgi transmit */ + uint32 rxmpdu_sgi; /**< count for sgi received */ + uint32 txmpdu_stbc; /**< count for stbc transmit */ + uint32 rxmpdu_stbc; /**< count for stbc received */ + + uint32 rxundec_mcst; /**< dot11WEPUndecryptableCount */ + + /* WPA2 counters (see rxundec for DecryptFailureCount) */ + uint32 tkipmicfaill_mcst; /**< TKIPLocalMICFailures */ + uint32 tkipcntrmsr_mcst; /**< TKIPCounterMeasuresInvoked */ + uint32 tkipreplay_mcst; /**< TKIPReplays */ + uint32 ccmpfmterr_mcst; /**< CCMPFormatErrors */ + uint32 ccmpreplay_mcst; /**< CCMPReplays */ + uint32 ccmpundec_mcst; /**< CCMPDecryptErrors */ + uint32 fourwayfail_mcst; /**< FourWayHandshakeFailures */ + uint32 wepundec_mcst; /**< dot11WEPUndecryptableCount */ + uint32 wepicverr_mcst; /**< dot11WEPICVErrorCount */ + uint32 decsuccess_mcst; /**< DecryptSuccessCount */ + uint32 tkipicverr_mcst; /**< TKIPICVErrorCount */ + uint32 wepexcluded_mcst; /**< dot11WEPExcludedCount */ + + uint32 dma_hang; /**< count for dma hang */ + uint32 reinit; /**< count for reinit */ + + uint32 pstatxucast; /**< count of ucast frames xmitted on all psta assoc */ + uint32 pstatxnoassoc; /**< count of txnoassoc frames xmitted on all psta assoc */ + uint32 pstarxucast; /**< count of ucast frames received on all psta assoc */ + uint32 pstarxbcmc; /**< count of bcmc frames received on all psta */ + uint32 pstatxbcmc; /**< count of bcmc frames transmitted on all psta */ + + uint32 cso_passthrough; /**< hw cso required but passthrough */ + uint32 cso_normal; /**< hw cso hdr for normal process */ + uint32 chained; /**< number of frames chained */ + uint32 chainedsz1; /**< number of chain size 1 frames */ + uint32 unchained; /**< number of frames not chained */ + uint32 maxchainsz; /**< max chain size so far */ + uint32 currchainsz; /**< current chain size */ + uint32 rxdrop20s; /**< drop secondary cnt */ + uint32 pciereset; /**< Secondary Bus Reset issued by driver */ + uint32 cfgrestore; /**< configspace restore by driver */ + uint32 reinitreason[NREINITREASONCOUNT]; /**< reinitreason counters; 0: Unknown reason */ + uint32 rxrtry; /**< num of received packets with retry bit on */ + uint32 txmpdu; /**< macstat cnt only valid in ver 11. number of MPDUs txed. */ + uint32 rxnodelim; /**< macstat cnt only valid in ver 11. + * number of occasions that no valid delimiter is detected + * by ampdu parser. + */ + uint32 rxmpdu_mu; /**< Number of MU MPDUs received */ + + /* detailed control/management frames */ + uint32 txbar; /**< Number of TX BAR */ + uint32 rxbar; /**< Number of RX BAR */ + uint32 txpspoll; /**< Number of TX PS-poll */ + uint32 rxpspoll; /**< Number of RX PS-poll */ + uint32 txnull; /**< Number of TX NULL_DATA */ + uint32 rxnull; /**< Number of RX NULL_DATA */ + uint32 txqosnull; /**< Number of TX NULL_QoSDATA */ + uint32 rxqosnull; /**< Number of RX NULL_QoSDATA */ + uint32 txassocreq; /**< Number of TX ASSOC request */ + uint32 rxassocreq; /**< Number of RX ASSOC request */ + uint32 txreassocreq; /**< Number of TX REASSOC request */ + uint32 rxreassocreq; /**< Number of RX REASSOC request */ + uint32 txdisassoc; /**< Number of TX DISASSOC */ + uint32 rxdisassoc; /**< Number of RX DISASSOC */ + uint32 txassocrsp; /**< Number of TX ASSOC response */ + uint32 rxassocrsp; /**< Number of RX ASSOC response */ + uint32 txreassocrsp; /**< Number of TX REASSOC response */ + uint32 rxreassocrsp; /**< Number of RX REASSOC response */ + uint32 txauth; /**< Number of TX AUTH */ + uint32 rxauth; /**< Number of RX AUTH */ + uint32 txdeauth; /**< Number of TX DEAUTH */ + uint32 rxdeauth; /**< Number of RX DEAUTH */ + uint32 txprobereq; /**< Number of TX probe request */ + uint32 rxprobereq; /**< Number of RX probe request */ + uint32 txprobersp; /**< Number of TX probe response */ + uint32 rxprobersp; /**< Number of RX probe response */ + uint32 txaction; /**< Number of TX action frame */ + uint32 rxaction; /**< Number of RX action frame */ + uint32 ampdu_wds; /**< Number of AMPDU watchdogs */ + uint32 txlost; /**< Number of lost packets reported in txs */ + uint32 txdatamcast; /**< Number of TX multicast data packets */ + uint32 txdatabcast; /**< Number of TX broadcast data packets */ + uint32 txbcast; /* Broadcast TransmittedFrameCount */ + uint32 txdropped; /* tx dropped pkts */ + uint32 rxbcast; /* BroadcastReceivedFrameCount */ + uint32 rxdropped; /* rx dropped pkts (derived: sum of others) */ + +} wl_cnt_ver_11_t; + +typedef struct { + uint16 version; /* see definition of WL_CNT_T_VERSION */ + uint16 length; /* length of entire structure */ + + /* transmit stat counters */ + uint32 txframe; /* tx data frames */ + uint32 txbyte; /* tx data bytes */ + uint32 txretrans; /* tx mac retransmits */ + uint32 txerror; /* tx data errors (derived: sum of others) */ + uint32 txctl; /* tx management frames */ + uint32 txprshort; /* tx short preamble frames */ + uint32 txserr; /* tx status errors */ + uint32 txnobuf; /* tx out of buffers errors */ + uint32 txnoassoc; /* tx discard because we're not associated */ + uint32 txrunt; /* tx runt frames */ + uint32 txchit; /* tx header cache hit (fastpath) */ + uint32 txcmiss; /* tx header cache miss (slowpath) */ + + /* transmit chip error counters */ + uint32 txuflo; /* tx fifo underflows */ + uint32 txphyerr; /* tx phy errors (indicated in tx status) */ + uint32 txphycrs; + + /* receive stat counters */ + uint32 rxframe; /* rx data frames */ + uint32 rxbyte; /* rx data bytes */ + uint32 rxerror; /* rx data errors (derived: sum of others) */ + uint32 rxctl; /* rx management frames */ + uint32 rxnobuf; /* rx out of buffers errors */ + uint32 rxnondata; /* rx non data frames in the data channel errors */ + uint32 rxbadds; /* rx bad DS errors */ + uint32 rxbadcm; /* rx bad control or management frames */ + uint32 rxfragerr; /* rx fragmentation errors */ + uint32 rxrunt; /* rx runt frames */ + uint32 rxgiant; /* rx giant frames */ + uint32 rxnoscb; /* rx no scb error */ + uint32 rxbadproto; /* rx invalid frames */ + uint32 rxbadsrcmac; /* rx frames with Invalid Src Mac */ + uint32 rxbadda; /* rx frames tossed for invalid da */ + uint32 rxfilter; /* rx frames filtered out */ + + /* receive chip error counters */ + uint32 rxoflo; /* rx fifo overflow errors */ + uint32 rxuflo[NFIFO]; /* rx dma descriptor underflow errors */ + + uint32 d11cnt_txrts_off; /* d11cnt txrts value when reset d11cnt */ + uint32 d11cnt_rxcrc_off; /* d11cnt rxcrc value when reset d11cnt */ + uint32 d11cnt_txnocts_off; /* d11cnt txnocts value when reset d11cnt */ + + /* misc counters */ + uint32 dmade; /* tx/rx dma descriptor errors */ + uint32 dmada; /* tx/rx dma data errors */ + uint32 dmape; /* tx/rx dma descriptor protocol errors */ + uint32 reset; /* reset count */ + uint32 tbtt; /* cnts the TBTT int's */ + uint32 txdmawar; + uint32 pkt_callback_reg_fail; /* callbacks register failure */ + + /* MAC counters: 32-bit version of d11.h's macstat_t */ + uint32 txallfrm; /* total number of frames sent, incl. Data, ACK, RTS, CTS, + * Control Management (includes retransmissions) + */ + uint32 txrtsfrm; /* number of RTS sent out by the MAC */ + uint32 txctsfrm; /* number of CTS sent out by the MAC */ + uint32 txackfrm; /* number of ACK frames sent out */ + uint32 txdnlfrm; /* Not used */ + uint32 txbcnfrm; /* beacons transmitted */ + uint32 txfunfl[8]; /* per-fifo tx underflows */ + uint32 txtplunfl; /* Template underflows (mac was too slow to transmit ACK/CTS + * or BCN) + */ + uint32 txphyerror; /* Transmit phy error, type of error is reported in tx-status for + * driver enqueued frames + */ + uint32 rxfrmtoolong; /* Received frame longer than legal limit (2346 bytes) */ + uint32 rxfrmtooshrt; /* Received frame did not contain enough bytes for its frame type */ + uint32 rxinvmachdr; /* Either the protocol version != 0 or frame type not + * data/control/management + */ + uint32 rxbadfcs; /* number of frames for which the CRC check failed in the MAC */ + uint32 rxbadplcp; /* parity check of the PLCP header failed */ + uint32 rxcrsglitch; /* PHY was able to correlate the preamble but not the header */ + uint32 rxstrt; /* Number of received frames with a good PLCP + * (i.e. passing parity check) + */ + uint32 rxdfrmucastmbss; /* Number of received DATA frames with good FCS and matching RA */ + uint32 rxmfrmucastmbss; /* number of received mgmt frames with good FCS and matching RA */ + uint32 rxcfrmucast; /* number of received CNTRL frames with good FCS and matching RA */ + uint32 rxrtsucast; /* number of unicast RTS addressed to the MAC (good FCS) */ + uint32 rxctsucast; /* number of unicast CTS addressed to the MAC (good FCS) */ + uint32 rxackucast; /* number of ucast ACKS received (good FCS) */ + uint32 rxdfrmocast; /* number of received DATA frames (good FCS and not matching RA) */ + uint32 rxmfrmocast; /* number of received MGMT frames (good FCS and not matching RA) */ + uint32 rxcfrmocast; /* number of received CNTRL frame (good FCS and not matching RA) */ + uint32 rxrtsocast; /* number of received RTS not addressed to the MAC */ + uint32 rxctsocast; /* number of received CTS not addressed to the MAC */ + uint32 rxdfrmmcast; /* number of RX Data multicast frames received by the MAC */ + uint32 rxmfrmmcast; /* number of RX Management multicast frames received by the MAC */ + uint32 rxcfrmmcast; /* number of RX Control multicast frames received by the MAC + * (unlikely to see these) + */ + uint32 rxbeaconmbss; /* beacons received from member of BSS */ + uint32 rxdfrmucastobss; /* number of unicast frames addressed to the MAC from + * other BSS (WDS FRAME) + */ + uint32 rxbeaconobss; /* beacons received from other BSS */ + uint32 rxrsptmout; /* Number of response timeouts for transmitted frames + * expecting a response + */ + uint32 bcntxcancl; /* transmit beacons canceled due to receipt of beacon (IBSS) */ + uint32 rxf0ovfl; /* Number of receive fifo 0 overflows */ + uint32 rxf1ovfl; /* Number of receive fifo 1 overflows (obsolete) */ + uint32 rxf2ovfl; /* Number of receive fifo 2 overflows (obsolete) */ + uint32 txsfovfl; /* Number of transmit status fifo overflows (obsolete) */ + uint32 pmqovfl; /* Number of PMQ overflows */ + uint32 rxcgprqfrm; /* Number of received Probe requests that made it into + * the PRQ fifo + */ + uint32 rxcgprsqovfl; /* Rx Probe Request Que overflow in the AP */ + uint32 txcgprsfail; /* Tx Probe Response Fail. AP sent probe response but did + * not get ACK + */ + uint32 txcgprssuc; /* Tx Probe Response Success (ACK was received) */ + uint32 prs_timeout; /* Number of probe requests that were dropped from the PRQ + * fifo because a probe response could not be sent out within + * the time limit defined in M_PRS_MAXTIME + */ + uint32 rxnack; /* obsolete */ + uint32 frmscons; /* obsolete */ + uint32 txnack; /* obsolete */ + uint32 txglitch_nack; /* obsolete */ + uint32 txburst; /* obsolete */ + + /* 802.11 MIB counters, pp. 614 of 802.11 reaff doc. */ + uint32 txfrag; /* dot11TransmittedFragmentCount */ + uint32 txmulti; /* dot11MulticastTransmittedFrameCount */ + uint32 txfail; /* dot11FailedCount */ + uint32 txretry; /* dot11RetryCount */ + uint32 txretrie; /* dot11MultipleRetryCount */ + uint32 rxdup; /* dot11FrameduplicateCount */ + uint32 txrts; /* dot11RTSSuccessCount */ + uint32 txnocts; /* dot11RTSFailureCount */ + uint32 txnoack; /* dot11ACKFailureCount */ + uint32 rxfrag; /* dot11ReceivedFragmentCount */ + uint32 rxmulti; /* dot11MulticastReceivedFrameCount */ + uint32 rxcrc; /* dot11FCSErrorCount */ + uint32 txfrmsnt; /* dot11TransmittedFrameCount (bogus MIB?) */ + uint32 rxundec; /* dot11WEPUndecryptableCount */ + + /* WPA2 counters (see rxundec for DecryptFailureCount) */ + uint32 tkipmicfaill; /* TKIPLocalMICFailures */ + uint32 tkipcntrmsr; /* TKIPCounterMeasuresInvoked */ + uint32 tkipreplay; /* TKIPReplays */ + uint32 ccmpfmterr; /* CCMPFormatErrors */ + uint32 ccmpreplay; /* CCMPReplays */ + uint32 ccmpundec; /* CCMPDecryptErrors */ + uint32 fourwayfail; /* FourWayHandshakeFailures */ + uint32 wepundec; /* dot11WEPUndecryptableCount */ + uint32 wepicverr; /* dot11WEPICVErrorCount */ + uint32 decsuccess; /* DecryptSuccessCount */ + uint32 tkipicverr; /* TKIPICVErrorCount */ + uint32 wepexcluded; /* dot11WEPExcludedCount */ + + uint32 txchanrej; /* Tx frames suppressed due to channel rejection */ + uint32 psmwds; /* Count PSM watchdogs */ + uint32 phywatchdog; /* Count Phy watchdogs (triggered by ucode) */ + + /* MBSS counters, AP only */ + uint32 prq_entries_handled; /* PRQ entries read in */ + uint32 prq_undirected_entries; /* which were bcast bss & ssid */ + uint32 prq_bad_entries; /* which could not be translated to info */ + uint32 atim_suppress_count; /* TX suppressions on ATIM fifo */ + uint32 bcn_template_not_ready; /* Template marked in use on send bcn ... */ + uint32 bcn_template_not_ready_done; /* ...but "DMA done" interrupt rcvd */ + uint32 late_tbtt_dpc; /* TBTT DPC did not happen in time */ + + /* per-rate receive stat counters */ + uint32 rx1mbps; /* packets rx at 1Mbps */ + uint32 rx2mbps; /* packets rx at 2Mbps */ + uint32 rx5mbps5; /* packets rx at 5.5Mbps */ + uint32 rx6mbps; /* packets rx at 6Mbps */ + uint32 rx9mbps; /* packets rx at 9Mbps */ + uint32 rx11mbps; /* packets rx at 11Mbps */ + uint32 rx12mbps; /* packets rx at 12Mbps */ + uint32 rx18mbps; /* packets rx at 18Mbps */ + uint32 rx24mbps; /* packets rx at 24Mbps */ + uint32 rx36mbps; /* packets rx at 36Mbps */ + uint32 rx48mbps; /* packets rx at 48Mbps */ + uint32 rx54mbps; /* packets rx at 54Mbps */ + uint32 rx108mbps; /* packets rx at 108mbps */ + uint32 rx162mbps; /* packets rx at 162mbps */ + uint32 rx216mbps; /* packets rx at 216 mbps */ + uint32 rx270mbps; /* packets rx at 270 mbps */ + uint32 rx324mbps; /* packets rx at 324 mbps */ + uint32 rx378mbps; /* packets rx at 378 mbps */ + uint32 rx432mbps; /* packets rx at 432 mbps */ + uint32 rx486mbps; /* packets rx at 486 mbps */ + uint32 rx540mbps; /* packets rx at 540 mbps */ + + /* pkteng rx frame stats */ + uint32 pktengrxducast; /* unicast frames rxed by the pkteng code */ + uint32 pktengrxdmcast; /* multicast frames rxed by the pkteng code */ + + uint32 rfdisable; /* count of radio disables */ + uint32 bphy_rxcrsglitch; /* PHY count of bphy glitches */ + + uint32 txexptime; /* Tx frames suppressed due to timer expiration */ + + uint32 txmpdu_sgi; /* count for sgi transmit */ + uint32 rxmpdu_sgi; /* count for sgi received */ + uint32 txmpdu_stbc; /* count for stbc transmit */ + uint32 rxmpdu_stbc; /* count for stbc received */ + + uint32 rxundec_mcst; /* dot11WEPUndecryptableCount */ + + /* WPA2 counters (see rxundec for DecryptFailureCount) */ + uint32 tkipmicfaill_mcst; /* TKIPLocalMICFailures */ + uint32 tkipcntrmsr_mcst; /* TKIPCounterMeasuresInvoked */ + uint32 tkipreplay_mcst; /* TKIPReplays */ + uint32 ccmpfmterr_mcst; /* CCMPFormatErrors */ + uint32 ccmpreplay_mcst; /* CCMPReplays */ + uint32 ccmpundec_mcst; /* CCMPDecryptErrors */ + uint32 fourwayfail_mcst; /* FourWayHandshakeFailures */ + uint32 wepundec_mcst; /* dot11WEPUndecryptableCount */ + uint32 wepicverr_mcst; /* dot11WEPICVErrorCount */ + uint32 decsuccess_mcst; /* DecryptSuccessCount */ + uint32 tkipicverr_mcst; /* TKIPICVErrorCount */ + uint32 wepexcluded_mcst; /* dot11WEPExcludedCount */ + + uint32 dma_hang; /* count for stbc received */ + uint32 rxrtry; /* number of packets with retry bit set to 1 */ +} wl_cnt_ver_7_t; + +typedef struct { + uint16 version; /**< see definition of WL_CNT_T_VERSION */ + uint16 length; /**< length of entire structure */ + + /* transmit stat counters */ + uint32 txframe; /**< tx data frames */ + uint32 txbyte; /**< tx data bytes */ + uint32 txretrans; /**< tx mac retransmits */ + uint32 txerror; /**< tx data errors (derived: sum of others) */ + uint32 txctl; /**< tx management frames */ + uint32 txprshort; /**< tx short preamble frames */ + uint32 txserr; /**< tx status errors */ + uint32 txnobuf; /**< tx out of buffers errors */ + uint32 txnoassoc; /**< tx discard because we're not associated */ + uint32 txrunt; /**< tx runt frames */ + uint32 txchit; /**< tx header cache hit (fastpath) */ + uint32 txcmiss; /**< tx header cache miss (slowpath) */ + + /* transmit chip error counters */ + uint32 txuflo; /**< tx fifo underflows */ + uint32 txphyerr; /**< tx phy errors (indicated in tx status) */ + uint32 txphycrs; + + /* receive stat counters */ + uint32 rxframe; /**< rx data frames */ + uint32 rxbyte; /**< rx data bytes */ + uint32 rxerror; /**< rx data errors (derived: sum of others) */ + uint32 rxctl; /**< rx management frames */ + uint32 rxnobuf; /**< rx out of buffers errors */ + uint32 rxnondata; /**< rx non data frames in the data channel errors */ + uint32 rxbadds; /**< rx bad DS errors */ + uint32 rxbadcm; /**< rx bad control or management frames */ + uint32 rxfragerr; /**< rx fragmentation errors */ + uint32 rxrunt; /**< rx runt frames */ + uint32 rxgiant; /**< rx giant frames */ + uint32 rxnoscb; /**< rx no scb error */ + uint32 rxbadproto; /**< rx invalid frames */ + uint32 rxbadsrcmac; /**< rx frames with Invalid Src Mac */ + uint32 rxbadda; /**< rx frames tossed for invalid da */ + uint32 rxfilter; /**< rx frames filtered out */ + + /* receive chip error counters */ + uint32 rxoflo; /**< rx fifo overflow errors */ + uint32 rxuflo[NFIFO]; /**< rx dma descriptor underflow errors */ + + uint32 d11cnt_txrts_off; /**< d11cnt txrts value when reset d11cnt */ + uint32 d11cnt_rxcrc_off; /**< d11cnt rxcrc value when reset d11cnt */ + uint32 d11cnt_txnocts_off; /**< d11cnt txnocts value when reset d11cnt */ + + /* misc counters */ + uint32 dmade; /**< tx/rx dma descriptor errors */ + uint32 dmada; /**< tx/rx dma data errors */ + uint32 dmape; /**< tx/rx dma descriptor protocol errors */ + uint32 reset; /**< reset count */ + uint32 tbtt; /**< cnts the TBTT int's */ + uint32 txdmawar; + uint32 pkt_callback_reg_fail; /**< callbacks register failure */ + + /* MAC counters: 32-bit version of d11.h's macstat_t */ + uint32 txallfrm; /**< total number of frames sent, incl. Data, ACK, RTS, CTS, + * Control Management (includes retransmissions) + */ + uint32 txrtsfrm; /**< number of RTS sent out by the MAC */ + uint32 txctsfrm; /**< number of CTS sent out by the MAC */ + uint32 txackfrm; /**< number of ACK frames sent out */ + uint32 txdnlfrm; /**< Not used */ + uint32 txbcnfrm; /**< beacons transmitted */ + uint32 txfunfl[6]; /**< per-fifo tx underflows */ + uint32 rxtoolate; /**< receive too late */ + uint32 txfbw; /**< transmit at fallback bw (dynamic bw) */ + uint32 txtplunfl; /**< Template underflows (mac was too slow to transmit ACK/CTS + * or BCN) + */ + uint32 txphyerror; /**< Transmit phy error, type of error is reported in tx-status for + * driver enqueued frames + */ + uint32 rxfrmtoolong; /**< Received frame longer than legal limit (2346 bytes) */ + uint32 rxfrmtooshrt; /**< Received frame did not contain enough bytes for its frame type */ + uint32 rxinvmachdr; /**< Either the protocol version != 0 or frame type not + * data/control/management + */ + uint32 rxbadfcs; /**< number of frames for which the CRC check failed in the MAC */ + uint32 rxbadplcp; /**< parity check of the PLCP header failed */ + uint32 rxcrsglitch; /**< PHY was able to correlate the preamble but not the header */ + uint32 rxstrt; /**< Number of received frames with a good PLCP + * (i.e. passing parity check) + */ + uint32 rxdfrmucastmbss; /**< # of received DATA frames with good FCS and matching RA */ + uint32 rxmfrmucastmbss; /**< # of received mgmt frames with good FCS and matching RA */ + uint32 rxcfrmucast; /**< # of received CNTRL frames with good FCS and matching RA */ + uint32 rxrtsucast; /**< number of unicast RTS addressed to the MAC (good FCS) */ + uint32 rxctsucast; /**< number of unicast CTS addressed to the MAC (good FCS) */ + uint32 rxackucast; /**< number of ucast ACKS received (good FCS) */ + uint32 rxdfrmocast; /**< # of received DATA frames (good FCS and not matching RA) */ + uint32 rxmfrmocast; /**< # of received MGMT frames (good FCS and not matching RA) */ + uint32 rxcfrmocast; /**< # of received CNTRL frame (good FCS and not matching RA) */ + uint32 rxrtsocast; /**< number of received RTS not addressed to the MAC */ + uint32 rxctsocast; /**< number of received CTS not addressed to the MAC */ + uint32 rxdfrmmcast; /**< number of RX Data multicast frames received by the MAC */ + uint32 rxmfrmmcast; /**< number of RX Management multicast frames received by the MAC */ + uint32 rxcfrmmcast; /**< number of RX Control multicast frames received by the MAC + * (unlikely to see these) + */ + uint32 rxbeaconmbss; /**< beacons received from member of BSS */ + uint32 rxdfrmucastobss; /**< number of unicast frames addressed to the MAC from + * other BSS (WDS FRAME) + */ + uint32 rxbeaconobss; /**< beacons received from other BSS */ + uint32 rxrsptmout; /**< Number of response timeouts for transmitted frames + * expecting a response + */ + uint32 bcntxcancl; /**< transmit beacons canceled due to receipt of beacon (IBSS) */ + uint32 rxf0ovfl; /**< Number of receive fifo 0 overflows */ + uint32 rxf1ovfl; /**< Number of receive fifo 1 overflows (obsolete) */ + uint32 rxf2ovfl; /**< Number of receive fifo 2 overflows (obsolete) */ + uint32 txsfovfl; /**< Number of transmit status fifo overflows (obsolete) */ + uint32 pmqovfl; /**< Number of PMQ overflows */ + uint32 rxcgprqfrm; /**< Number of received Probe requests that made it into + * the PRQ fifo + */ + uint32 rxcgprsqovfl; /**< Rx Probe Request Que overflow in the AP */ + uint32 txcgprsfail; /**< Tx Probe Response Fail. AP sent probe response but did + * not get ACK + */ + uint32 txcgprssuc; /**< Tx Probe Response Success (ACK was received) */ + uint32 prs_timeout; /**< Number of probe requests that were dropped from the PRQ + * fifo because a probe response could not be sent out within + * the time limit defined in M_PRS_MAXTIME + */ + uint32 rxnack; + uint32 frmscons; + uint32 txnack; /**< obsolete */ + uint32 rxback; /**< blockack rxcnt */ + uint32 txback; /**< blockack txcnt */ + + /* 802.11 MIB counters, pp. 614 of 802.11 reaff doc. */ + uint32 txfrag; /**< dot11TransmittedFragmentCount */ + uint32 txmulti; /**< dot11MulticastTransmittedFrameCount */ + uint32 txfail; /**< dot11FailedCount */ + uint32 txretry; /**< dot11RetryCount */ + uint32 txretrie; /**< dot11MultipleRetryCount */ + uint32 rxdup; /**< dot11FrameduplicateCount */ + uint32 txrts; /**< dot11RTSSuccessCount */ + uint32 txnocts; /**< dot11RTSFailureCount */ + uint32 txnoack; /**< dot11ACKFailureCount */ + uint32 rxfrag; /**< dot11ReceivedFragmentCount */ + uint32 rxmulti; /**< dot11MulticastReceivedFrameCount */ + uint32 rxcrc; /**< dot11FCSErrorCount */ + uint32 txfrmsnt; /**< dot11TransmittedFrameCount (bogus MIB?) */ + uint32 rxundec; /**< dot11WEPUndecryptableCount */ + + /* WPA2 counters (see rxundec for DecryptFailureCount) */ + uint32 tkipmicfaill; /**< TKIPLocalMICFailures */ + uint32 tkipcntrmsr; /**< TKIPCounterMeasuresInvoked */ + uint32 tkipreplay; /**< TKIPReplays */ + uint32 ccmpfmterr; /**< CCMPFormatErrors */ + uint32 ccmpreplay; /**< CCMPReplays */ + uint32 ccmpundec; /**< CCMPDecryptErrors */ + uint32 fourwayfail; /**< FourWayHandshakeFailures */ + uint32 wepundec; /**< dot11WEPUndecryptableCount */ + uint32 wepicverr; /**< dot11WEPICVErrorCount */ + uint32 decsuccess; /**< DecryptSuccessCount */ + uint32 tkipicverr; /**< TKIPICVErrorCount */ + uint32 wepexcluded; /**< dot11WEPExcludedCount */ + + uint32 rxundec_mcst; /**< dot11WEPUndecryptableCount */ + + /* WPA2 counters (see rxundec for DecryptFailureCount) */ + uint32 tkipmicfaill_mcst; /**< TKIPLocalMICFailures */ + uint32 tkipcntrmsr_mcst; /**< TKIPCounterMeasuresInvoked */ + uint32 tkipreplay_mcst; /**< TKIPReplays */ + uint32 ccmpfmterr_mcst; /**< CCMPFormatErrors */ + uint32 ccmpreplay_mcst; /**< CCMPReplays */ + uint32 ccmpundec_mcst; /**< CCMPDecryptErrors */ + uint32 fourwayfail_mcst; /**< FourWayHandshakeFailures */ + uint32 wepundec_mcst; /**< dot11WEPUndecryptableCount */ + uint32 wepicverr_mcst; /**< dot11WEPICVErrorCount */ + uint32 decsuccess_mcst; /**< DecryptSuccessCount */ + uint32 tkipicverr_mcst; /**< TKIPICVErrorCount */ + uint32 wepexcluded_mcst; /**< dot11WEPExcludedCount */ + + uint32 txchanrej; /**< Tx frames suppressed due to channel rejection */ + uint32 txexptime; /**< Tx frames suppressed due to timer expiration */ + uint32 psmwds; /**< Count PSM watchdogs */ + uint32 phywatchdog; /**< Count Phy watchdogs (triggered by ucode) */ + + /* MBSS counters, AP only */ + uint32 prq_entries_handled; /**< PRQ entries read in */ + uint32 prq_undirected_entries; /**< which were bcast bss & ssid */ + uint32 prq_bad_entries; /**< which could not be translated to info */ + uint32 atim_suppress_count; /**< TX suppressions on ATIM fifo */ + uint32 bcn_template_not_ready; /**< Template marked in use on send bcn ... */ + uint32 bcn_template_not_ready_done; /**< ...but "DMA done" interrupt rcvd */ + uint32 late_tbtt_dpc; /**< TBTT DPC did not happen in time */ + + /* per-rate receive stat counters */ + uint32 rx1mbps; /**< packets rx at 1Mbps */ + uint32 rx2mbps; /**< packets rx at 2Mbps */ + uint32 rx5mbps5; /**< packets rx at 5.5Mbps */ + uint32 rx6mbps; /**< packets rx at 6Mbps */ + uint32 rx9mbps; /**< packets rx at 9Mbps */ + uint32 rx11mbps; /**< packets rx at 11Mbps */ + uint32 rx12mbps; /**< packets rx at 12Mbps */ + uint32 rx18mbps; /**< packets rx at 18Mbps */ + uint32 rx24mbps; /**< packets rx at 24Mbps */ + uint32 rx36mbps; /**< packets rx at 36Mbps */ + uint32 rx48mbps; /**< packets rx at 48Mbps */ + uint32 rx54mbps; /**< packets rx at 54Mbps */ + uint32 rx108mbps; /**< packets rx at 108mbps */ + uint32 rx162mbps; /**< packets rx at 162mbps */ + uint32 rx216mbps; /**< packets rx at 216 mbps */ + uint32 rx270mbps; /**< packets rx at 270 mbps */ + uint32 rx324mbps; /**< packets rx at 324 mbps */ + uint32 rx378mbps; /**< packets rx at 378 mbps */ + uint32 rx432mbps; /**< packets rx at 432 mbps */ + uint32 rx486mbps; /**< packets rx at 486 mbps */ + uint32 rx540mbps; /**< packets rx at 540 mbps */ + + /* pkteng rx frame stats */ + uint32 pktengrxducast; /**< unicast frames rxed by the pkteng code */ + uint32 pktengrxdmcast; /**< multicast frames rxed by the pkteng code */ + + uint32 rfdisable; /**< count of radio disables */ + uint32 bphy_rxcrsglitch; /**< PHY count of bphy glitches */ + uint32 bphy_badplcp; + + uint32 txmpdu_sgi; /**< count for sgi transmit */ + uint32 rxmpdu_sgi; /**< count for sgi received */ + uint32 txmpdu_stbc; /**< count for stbc transmit */ + uint32 rxmpdu_stbc; /**< count for stbc received */ + + uint32 rxdrop20s; /**< drop secondary cnt */ +} wl_cnt_ver_6_t; + +#define WL_DELTA_STATS_T_VERSION 2 /**< current version of wl_delta_stats_t struct */ + +typedef struct { + uint16 version; /**< see definition of WL_DELTA_STATS_T_VERSION */ + uint16 length; /**< length of entire structure */ + + /* transmit stat counters */ + uint32 txframe; /**< tx data frames */ + uint32 txbyte; /**< tx data bytes */ + uint32 txretrans; /**< tx mac retransmits */ + uint32 txfail; /**< tx failures */ + + /* receive stat counters */ + uint32 rxframe; /**< rx data frames */ + uint32 rxbyte; /**< rx data bytes */ + + /* per-rate receive stat counters */ + uint32 rx1mbps; /**< packets rx at 1Mbps */ + uint32 rx2mbps; /**< packets rx at 2Mbps */ + uint32 rx5mbps5; /**< packets rx at 5.5Mbps */ + uint32 rx6mbps; /**< packets rx at 6Mbps */ + uint32 rx9mbps; /**< packets rx at 9Mbps */ + uint32 rx11mbps; /**< packets rx at 11Mbps */ + uint32 rx12mbps; /**< packets rx at 12Mbps */ + uint32 rx18mbps; /**< packets rx at 18Mbps */ + uint32 rx24mbps; /**< packets rx at 24Mbps */ + uint32 rx36mbps; /**< packets rx at 36Mbps */ + uint32 rx48mbps; /**< packets rx at 48Mbps */ + uint32 rx54mbps; /**< packets rx at 54Mbps */ + uint32 rx108mbps; /**< packets rx at 108mbps */ + uint32 rx162mbps; /**< packets rx at 162mbps */ + uint32 rx216mbps; /**< packets rx at 216 mbps */ + uint32 rx270mbps; /**< packets rx at 270 mbps */ + uint32 rx324mbps; /**< packets rx at 324 mbps */ + uint32 rx378mbps; /**< packets rx at 378 mbps */ + uint32 rx432mbps; /**< packets rx at 432 mbps */ + uint32 rx486mbps; /**< packets rx at 486 mbps */ + uint32 rx540mbps; /**< packets rx at 540 mbps */ + + /* phy stats */ + uint32 rxbadplcp; + uint32 rxcrsglitch; + uint32 bphy_rxcrsglitch; + uint32 bphy_badplcp; + + uint32 slice_index; /**< Slice for which stats are reported */ + +} wl_delta_stats_t; + +/* Partial statistics counter report */ +#define WL_CNT_CTL_MGT_FRAMES 0 + +typedef struct { + uint16 type; + uint16 len; + + /* detailed control/management frames */ + uint32 txnull; + uint32 rxnull; + uint32 txqosnull; + uint32 rxqosnull; + uint32 txassocreq; + uint32 rxassocreq; + uint32 txreassocreq; + uint32 rxreassocreq; + uint32 txdisassoc; + uint32 rxdisassoc; + uint32 txassocrsp; + uint32 rxassocrsp; + uint32 txreassocrsp; + uint32 rxreassocrsp; + uint32 txauth; + uint32 rxauth; + uint32 txdeauth; + uint32 rxdeauth; + uint32 txprobereq; + uint32 rxprobereq; + uint32 txprobersp; + uint32 rxprobersp; + uint32 txaction; + uint32 rxaction; + uint32 txrts; + uint32 rxrts; + uint32 txcts; + uint32 rxcts; + uint32 txack; + uint32 rxack; + uint32 txbar; + uint32 rxbar; + uint32 txback; + uint32 rxback; + uint32 txpspoll; + uint32 rxpspoll; +} wl_ctl_mgt_cnt_t; + +typedef struct { + uint32 packets; + uint32 bytes; +} wl_traffic_stats_t; + +typedef struct { + uint16 version; /**< see definition of WL_WME_CNT_VERSION */ + uint16 length; /**< length of entire structure */ + + wl_traffic_stats_t tx[AC_COUNT]; /**< Packets transmitted */ + wl_traffic_stats_t tx_failed[AC_COUNT]; /**< Packets dropped or failed to transmit */ + wl_traffic_stats_t rx[AC_COUNT]; /**< Packets received */ + wl_traffic_stats_t rx_failed[AC_COUNT]; /**< Packets failed to receive */ + + wl_traffic_stats_t forward[AC_COUNT]; /**< Packets forwarded by AP */ + + wl_traffic_stats_t tx_expired[AC_COUNT]; /**< packets dropped due to lifetime expiry */ + +} wl_wme_cnt_t; + +struct wl_msglevel2 { + uint32 low; + uint32 high; +}; + +/* A versioned structure for setting and retrieving debug message levels. */ +#define WL_MSGLEVEL_STRUCT_VERSION_1 1 + +typedef struct wl_msglevel_v1 { + uint16 version; + uint16 length; + uint32 msglevel1; + uint32 msglevel2; + uint32 msglevel3; + /* add another uint32 when full */ +} wl_msglevel_v1_t; + +#define WL_ICMP_IPV6_CFG_VERSION 1 +#define WL_ICMP_IPV6_CLEAR_ALL (1 << 0) + +typedef struct wl_icmp_ipv6_cfg { + uint16 version; + uint16 length; + uint16 fixed_length; + uint16 flags; + uint32 num_ipv6; + /* num_ipv6 to follow */ + struct ipv6_addr host_ipv6[]; +} wl_icmp_ipv6_cfg_t; + +#define WL_ICMP_CFG_IPV6_FIXED_LEN OFFSETOF(wl_icmp_ipv6_cfg_t, host_ipv6) +#define WL_ICMP_CFG_IPV6_LEN(count) (WL_ICMP_CFG_IPV6_FIXED_LEN + \ + ((count) * sizeof(struct ipv6_addr))) + +typedef struct wl_mkeep_alive_pkt { + uint16 version; /* Version for mkeep_alive */ + uint16 length; /* length of fixed parameters in the structure */ + uint32 period_msec; /* high bit on means immediate send */ + uint16 len_bytes; + uint8 keep_alive_id; /* 0 - 3 for N = 4 */ + uint8 data[1]; +} wl_mkeep_alive_pkt_t; + +#define WL_MKEEP_ALIVE_VERSION 1 +#define WL_MKEEP_ALIVE_FIXED_LEN OFFSETOF(wl_mkeep_alive_pkt_t, data) +#define WL_MKEEP_ALIVE_PRECISION 500 +#define WL_MKEEP_ALIVE_PERIOD_MASK 0x7FFFFFFF +#define WL_MKEEP_ALIVE_IMMEDIATE 0x80000000 + +/** TCP Keep-Alive conn struct */ +typedef struct wl_mtcpkeep_alive_conn_pkt { + struct ether_addr saddr; /**< src mac address */ + struct ether_addr daddr; /**< dst mac address */ + struct ipv4_addr sipaddr; /**< source IP addr */ + struct ipv4_addr dipaddr; /**< dest IP addr */ + uint16 sport; /**< src port */ + uint16 dport; /**< dest port */ + uint32 seq; /**< seq number */ + uint32 ack; /**< ACK number */ + uint16 tcpwin; /**< TCP window */ + uint16 PAD; +} wl_mtcpkeep_alive_conn_pkt_t; + +/** TCP Keep-Alive interval struct */ +typedef struct wl_mtcpkeep_alive_timers_pkt { + uint16 interval; /**< interval timer */ + uint16 retry_interval; /**< retry_interval timer */ + uint16 retry_count; /**< retry_count */ +} wl_mtcpkeep_alive_timers_pkt_t; + +typedef struct wake_info { + uint32 wake_reason; + uint32 wake_info_len; /**< size of packet */ + uint8 packet[]; +} wake_info_t; + +typedef struct wake_pkt { + uint32 wake_pkt_len; /**< size of packet */ + uint8 packet[]; +} wake_pkt_t; + +#define WL_MTCPKEEP_ALIVE_VERSION 1 + +/* #ifdef WLBA */ + +#define WLC_BA_CNT_VERSION 1 /**< current version of wlc_ba_cnt_t */ + +/** block ack related stats */ +typedef struct wlc_ba_cnt { + uint16 version; /**< WLC_BA_CNT_VERSION */ + uint16 length; /**< length of entire structure */ + + /* transmit stat counters */ + uint32 txpdu; /**< pdus sent */ + uint32 txsdu; /**< sdus sent */ + uint32 txfc; /**< tx side flow controlled packets */ + uint32 txfci; /**< tx side flow control initiated */ + uint32 txretrans; /**< retransmitted pdus */ + uint32 txbatimer; /**< ba resend due to timer */ + uint32 txdrop; /**< dropped packets */ + uint32 txaddbareq; /**< addba req sent */ + uint32 txaddbaresp; /**< addba resp sent */ + uint32 txdelba; /**< delba sent */ + uint32 txba; /**< ba sent */ + uint32 txbar; /**< bar sent */ + uint32 txpad[4]; /**< future */ + + /* receive side counters */ + uint32 rxpdu; /**< pdus recd */ + uint32 rxqed; /**< pdus buffered before sending up */ + uint32 rxdup; /**< duplicate pdus */ + uint32 rxnobuf; /**< pdus discarded due to no buf */ + uint32 rxaddbareq; /**< addba req recd */ + uint32 rxaddbaresp; /**< addba resp recd */ + uint32 rxdelba; /**< delba recd */ + uint32 rxba; /**< ba recd */ + uint32 rxbar; /**< bar recd */ + uint32 rxinvba; /**< invalid ba recd */ + uint32 rxbaholes; /**< ba recd with holes */ + uint32 rxunexp; /**< unexpected packets */ + uint32 rxpad[4]; /**< future */ +} wlc_ba_cnt_t; +/* #endif WLBA */ + +/** structure for per-tid ampdu control */ +struct ampdu_tid_control { + uint8 tid; /* tid */ + uint8 enable; /* enable/disable */ +}; + +/** struct for ampdu tx/rx aggregation control */ +struct ampdu_aggr { + int8 aggr_override; /**< aggr overrided by dongle. Not to be set by host. */ + uint16 conf_TID_bmap; /**< bitmap of TIDs to configure */ + uint16 enab_TID_bmap; /**< enable/disable per TID */ +}; + +/** structure for identifying ea/tid for sending addba/delba */ +struct ampdu_ea_tid { + struct ether_addr ea; /**< Station address */ + uint8 tid; /**< tid */ + uint8 initiator; /**< 0 is recipient, 1 is originator */ +}; + +/** structure for identifying retry/tid for retry_limit_tid/rr_retry_limit_tid */ +struct ampdu_retry_tid { + uint8 tid; /**< tid */ + uint8 retry; /**< retry value */ +}; + +#define BDD_FNAME_LEN 32 /**< Max length of friendly name */ +typedef struct bdd_fname { + uint8 len; /**< length of friendly name */ + uchar name[BDD_FNAME_LEN]; /**< friendly name */ +} bdd_fname_t; + +/* structure for addts arguments */ +/** For ioctls that take a list of TSPEC */ +struct tslist { + int32 count; /**< number of tspecs */ + struct tsinfo_arg tsinfo[]; /**< variable length array of tsinfo */ +}; + +/* WLTDLS */ +/**structure for tdls iovars */ +typedef struct tdls_iovar { + struct ether_addr ea; /**< Station address */ + uint8 mode; /**< mode: depends on iovar */ + uint8 PAD; + chanspec_t chanspec; + uint8 PAD[6]; +} tdls_iovar_t; + +#define TDLS_WFD_IE_SIZE 512 +/**structure for tdls wfd ie */ +typedef struct tdls_wfd_ie_iovar { + struct ether_addr ea; /**< Station address */ + uint8 mode; + uint8 PAD; + uint16 length; + uint8 data[TDLS_WFD_IE_SIZE]; +} tdls_wfd_ie_iovar_t; +/* #endif WLTDLS */ + +/** structure for addts/delts arguments */ +typedef struct tspec_arg { + uint16 version; /**< see definition of TSPEC_ARG_VERSION */ + uint16 length; /**< length of entire structure */ + uint32 flag; /**< bit field */ + /* TSPEC Arguments */ + struct tsinfo_arg tsinfo; /**< TS Info bit field */ + uint8 PAD; + uint16 nom_msdu_size; /**< (Nominal or fixed) MSDU Size (bytes) */ + uint16 max_msdu_size; /**< Maximum MSDU Size (bytes) */ + uint32 min_srv_interval; /**< Minimum Service Interval (us) */ + uint32 max_srv_interval; /**< Maximum Service Interval (us) */ + uint32 inactivity_interval; /**< Inactivity Interval (us) */ + uint32 suspension_interval; /**< Suspension Interval (us) */ + uint32 srv_start_time; /**< Service Start Time (us) */ + uint32 min_data_rate; /**< Minimum Data Rate (bps) */ + uint32 mean_data_rate; /**< Mean Data Rate (bps) */ + uint32 peak_data_rate; /**< Peak Data Rate (bps) */ + uint32 max_burst_size; /**< Maximum Burst Size (bytes) */ + uint32 delay_bound; /**< Delay Bound (us) */ + uint32 min_phy_rate; /**< Minimum PHY Rate (bps) */ + uint16 surplus_bw; /**< Surplus Bandwidth Allowance (range 1.0 to 8.0) */ + uint16 medium_time; /**< Medium Time (32 us/s periods) */ + uint8 dialog_token; /**< dialog token */ + uint8 PAD[3]; +} tspec_arg_t; + +/** tspec arg for desired station */ +typedef struct tspec_per_sta_arg { + struct ether_addr ea; + uint8 PAD[2]; + struct tspec_arg ts; +} tspec_per_sta_arg_t; + +/** structure for max bandwidth for each access category */ +typedef struct wme_max_bandwidth { + uint32 ac[AC_COUNT]; /**< max bandwidth for each access category */ +} wme_max_bandwidth_t; + +#define WL_WME_MBW_PARAMS_IO_BYTES (sizeof(wme_max_bandwidth_t)) + +/* current version of wl_tspec_arg_t struct */ +#define TSPEC_ARG_VERSION 2 /**< current version of wl_tspec_arg_t struct */ +#define TSPEC_ARG_LENGTH 55 /**< argument length from tsinfo to medium_time */ +#define TSPEC_DEFAULT_DIALOG_TOKEN 42 /**< default dialog token */ +#define TSPEC_DEFAULT_SBW_FACTOR 0x3000 /**< default surplus bw */ + +#define WL_WOWL_KEEPALIVE_MAX_PACKET_SIZE 80 +#define WLC_WOWL_MAX_KEEPALIVE 2 + +/** Packet lifetime configuration per ac */ +typedef struct wl_lifetime { + uint32 ac; /**< access class */ + uint32 lifetime; /**< Packet lifetime value in ms */ +} wl_lifetime_t; + +/** Management time configuration */ +typedef struct wl_lifetime_mg { + uint32 mgmt_bitmap; /**< Mgmt subtype */ + uint32 lifetime; /**< Packet lifetime value in us */ +} wl_lifetime_mg_t; + +/* MAC Sample Capture related */ +#define WL_MACCAPTR_DEFSTART_PTR 0xA00 +#define WL_MACCAPTR_DEFSTOP_PTR 0xA3F +#define WL_MACCAPTR_DEFSZ 0x3F + +#define WL_MACCAPTR_DEF_MASK 0xFFFFFFFF + +typedef enum { + WL_MACCAPT_TRIG = 0, + WL_MACCAPT_STORE = 1, + WL_MACCAPT_TRANS = 2, + WL_MACCAPT_MATCH = 3 +} maccaptr_optn; + +typedef enum { + WL_MACCAPT_STRT = 1, + WL_MACCAPT_STOP = 2, + WL_MACCAPT_RST = 3 +} maccaptr_cmd_t; + +/* MAC Sample Capture Set-up Paramters */ +typedef struct wl_maccapture_params { + uint8 gpio_sel; + uint8 la_mode; /* TRUE: GPIO Out Enabled */ + uint8 PAD[2]; + uint32 start_ptr; /* Start address to store */ + uint32 stop_ptr; /* Stop address to store */ + uint8 optn_bmp; /* Options */ + uint8 PAD[3]; + uint32 tr_mask; /* Trigger Mask */ + uint32 tr_val; /* Trigger Value */ + uint32 s_mask; /* Store Mode Mask */ + uint32 x_mask; /* Trans. Mode Mask */ + uint32 m_mask; /* Match Mode Mask */ + uint32 m_val; /* Match Value */ + maccaptr_cmd_t cmd; /* Start / Stop */ +} wl_maccapture_params_t; + +/** Channel Switch Announcement param */ +typedef struct wl_chan_switch { + uint8 mode; /**< value 0 or 1 */ + uint8 count; /**< count # of beacons before switching */ + chanspec_t chspec; /**< chanspec */ + uint8 reg; /**< regulatory class */ + uint8 frame_type; /**< csa frame type, unicast or broadcast */ +} wl_chan_switch_t; + +enum { + PFN_LIST_ORDER, + PFN_RSSI +}; + +enum { + DISABLE, + ENABLE +}; + +enum { + OFF_ADAPT, + SMART_ADAPT, + STRICT_ADAPT, + SLOW_ADAPT +}; + +#define SORT_CRITERIA_BIT 0 +#define AUTO_NET_SWITCH_BIT 1 +#define ENABLE_BKGRD_SCAN_BIT 2 +#define IMMEDIATE_SCAN_BIT 3 +#define AUTO_CONNECT_BIT 4 +#define ENABLE_BD_SCAN_BIT 5 +#define ENABLE_ADAPTSCAN_BIT 6 +#define IMMEDIATE_EVENT_BIT 8 +#define SUPPRESS_SSID_BIT 9 +#define ENABLE_NET_OFFLOAD_BIT 10 +/** report found/lost events for SSID and BSSID networks seperately */ +#define REPORT_SEPERATELY_BIT 11 + +#define SORT_CRITERIA_MASK 0x0001 +#define AUTO_NET_SWITCH_MASK 0x0002 +#define ENABLE_BKGRD_SCAN_MASK 0x0004 +#define IMMEDIATE_SCAN_MASK 0x0008 +#define AUTO_CONNECT_MASK 0x0010 + +#define ENABLE_BD_SCAN_MASK 0x0020 +#define ENABLE_ADAPTSCAN_MASK 0x00c0 +#define IMMEDIATE_EVENT_MASK 0x0100 +#define SUPPRESS_SSID_MASK 0x0200 +#define ENABLE_NET_OFFLOAD_MASK 0x0400 +/** report found/lost events for SSID and BSSID networks seperately */ +#define REPORT_SEPERATELY_MASK 0x0800 + +#define PFN_VERSION 2 + +#define PFN_COMPLETE 1 +#define PFN_INCOMPLETE 0 + +#define DEFAULT_BESTN 2 +#define DEFAULT_MSCAN 0 +#define DEFAULT_REPEAT 10 +#define DEFAULT_EXP 2 + +#define PFN_PARTIAL_SCAN_BIT 0 +#define PFN_PARTIAL_SCAN_MASK 1 + +#define PFN_SWC_RSSI_WINDOW_MAX 8 +#define PFN_SWC_MAX_NUM_APS 16 +#define PFN_HOTLIST_MAX_NUM_APS 64 + +#define MAX_EPNO_HIDDEN_SSID 8 +#define MAX_WHITELIST_SSID 2 + +/* Version 1 and 2 for various scan results structures defined below */ +#define PFN_SCANRESULTS_VERSION_V1 1 +#define PFN_SCANRESULTS_VERSION_V2 2 + +/** PFN network info structure */ +typedef struct wl_pfn_subnet_info_v1 { + struct ether_addr BSSID; + uint8 channel; /**< channel number only */ + uint8 SSID_len; + uint8 SSID[32]; +} wl_pfn_subnet_info_v1_t; + +typedef struct wl_pfn_subnet_info_v2 { + struct ether_addr BSSID; + uint8 channel; /**< channel number only */ + uint8 SSID_len; + union { + uint8 SSID[32]; + uint16 index; + } u; +} wl_pfn_subnet_info_v2_t; + +typedef struct wl_pfn_net_info_v1 { + wl_pfn_subnet_info_v1_t pfnsubnet; + int16 RSSI; /**< receive signal strength (in dBm) */ + uint16 timestamp; /**< age in seconds */ +} wl_pfn_net_info_v1_t; + +typedef struct wl_pfn_net_info_v2 { + wl_pfn_subnet_info_v2_t pfnsubnet; + int16 RSSI; /**< receive signal strength (in dBm) */ + uint16 timestamp; /**< age in seconds */ +} wl_pfn_net_info_v2_t; + +/* Version 1 and 2 for various lbest scan results structures below */ +#define PFN_LBEST_SCAN_RESULT_VERSION_V1 1 +#define PFN_LBEST_SCAN_RESULT_VERSION_V2 2 + +#define MAX_CHBKT_PER_RESULT 4 + +typedef struct wl_pfn_lnet_info_v1 { + wl_pfn_subnet_info_v1_t pfnsubnet; /**< BSSID + channel + SSID len + SSID */ + uint16 flags; /**< partial scan, etc */ + int16 RSSI; /**< receive signal strength (in dBm) */ + uint32 timestamp; /**< age in miliseconds */ + uint16 rtt0; /**< estimated distance to this AP in centimeters */ + uint16 rtt1; /**< standard deviation of the distance to this AP in centimeters */ +} wl_pfn_lnet_info_v1_t; + +typedef struct wl_pfn_lnet_info_v2 { + wl_pfn_subnet_info_v2_t pfnsubnet; /**< BSSID + channel + SSID len + SSID */ + uint16 flags; /**< partial scan, etc */ + int16 RSSI; /**< receive signal strength (in dBm) */ + uint32 timestamp; /**< age in miliseconds */ + uint16 rtt0; /**< estimated distance to this AP in centimeters */ + uint16 rtt1; /**< standard deviation of the distance to this AP in centimeters */ +} wl_pfn_lnet_info_v2_t; + +typedef struct wl_pfn_lscanresults_v1 { + uint32 version; + uint32 status; + uint32 count; + wl_pfn_lnet_info_v1_t netinfo[1]; +} wl_pfn_lscanresults_v1_t; + +typedef struct wl_pfn_lscanresults_v2 { + uint32 version; + uint16 status; + uint16 count; + uint32 scan_ch_buckets[MAX_CHBKT_PER_RESULT]; + wl_pfn_lnet_info_v2_t netinfo[1]; +} wl_pfn_lscanresults_v2_t; + +/**this is used to report on 1-* pfn scan results */ +typedef struct wl_pfn_scanresults_v1 { + uint32 version; + uint32 status; + uint32 count; + wl_pfn_net_info_v1_t netinfo[1]; +} wl_pfn_scanresults_v1_t; + +typedef struct wl_pfn_scanresults_v2 { + uint32 version; + uint32 status; + uint32 count; + uint32 scan_ch_bucket; + wl_pfn_net_info_v2_t netinfo[1]; +} wl_pfn_scanresults_v2_t; + +typedef struct wl_pfn_significant_net { + uint16 flags; + uint16 channel; + struct ether_addr BSSID; + int8 rssi[PFN_SWC_RSSI_WINDOW_MAX]; +} wl_pfn_significant_net_t; + +#define PFN_SWC_SCANRESULT_VERSION 1 + +typedef struct wl_pfn_swc_results { + uint32 version; + uint32 pkt_count; /**< No. of results in current frame */ + uint32 total_count; /**< Total expected results */ + wl_pfn_significant_net_t list[]; +} wl_pfn_swc_results_t; +typedef struct wl_pfn_net_info_bssid { + struct ether_addr BSSID; + uint8 channel; /**< channel number only */ + int8 RSSI; /**< receive signal strength (in dBm) */ + uint16 flags; /**< (e.g. partial scan, off channel) */ + uint16 timestamp; /**< age in seconds */ +} wl_pfn_net_info_bssid_t; + +typedef struct wl_pfn_scanhist_bssid { + uint32 version; + uint32 status; + uint32 count; + wl_pfn_net_info_bssid_t netinfo[1]; +} wl_pfn_scanhist_bssid_t; + +/* Version 1 and 2 for various single scan result */ +#define PFN_SCANRESULT_VERSION_V1 1 +#define PFN_SCANRESULT_VERSION_V2 2 + +/* used to report exactly one scan result */ +/* plus reports detailed scan info in bss_info */ +typedef struct wl_pfn_scanresult_v1 { + uint32 version; + uint32 status; + uint32 count; + wl_pfn_net_info_v1_t netinfo; + wl_bss_info_v109_t bss_info; +} wl_pfn_scanresult_v1_t; + +typedef struct wl_pfn_scanresult_v2 { + uint32 version; + uint32 status; + uint32 count; + wl_pfn_net_info_v2_t netinfo; + wl_bss_info_v109_t bss_info; +} wl_pfn_scanresult_v2_t; + +typedef struct wl_pfn_scanresult_v2_1 { + uint32 version; + uint32 status; + uint32 count; + wl_pfn_net_info_v2_t netinfo; + uint8 bss_info[]; /* var length wl_bss_info_X structures */ +} wl_pfn_scanresult_v2_1_t; + +/**PFN data structure */ +typedef struct wl_pfn_param { + int32 version; /**< PNO parameters version */ + int32 scan_freq; /**< Scan frequency */ + int32 lost_network_timeout; /**< Timeout in sec. to declare + * discovered network as lost + */ + int16 flags; /**< Bit field to control features + * of PFN such as sort criteria auto + * enable switch and background scan + */ + int16 rssi_margin; /**< Margin to avoid jitter for choosing a + * PFN based on RSSI sort criteria + */ + uint8 bestn; /**< number of best networks in each scan */ + uint8 mscan; /**< number of scans recorded */ + uint8 repeat; /**< Minimum number of scan intervals + *before scan frequency changes in adaptive scan + */ + uint8 exp; /**< Exponent of 2 for maximum scan interval */ + int32 slow_freq; /**< slow scan period */ +} wl_pfn_param_t; + +typedef struct wl_pfn_bssid { + struct ether_addr macaddr; + /* Bit4: suppress_lost, Bit3: suppress_found */ + uint16 flags; +} wl_pfn_bssid_t; +typedef struct wl_pfn_significant_bssid { + struct ether_addr macaddr; + int8 rssi_low_threshold; + int8 rssi_high_threshold; +} wl_pfn_significant_bssid_t; +#define WL_PFN_SUPPRESSFOUND_MASK 0x08 +#define WL_PFN_SUPPRESSLOST_MASK 0x10 +#define WL_PFN_SSID_IMPRECISE_MATCH 0x80 +#define WL_PFN_SSID_SAME_NETWORK 0x10000 +#define WL_PFN_SUPPRESS_AGING_MASK 0x20000 +#define WL_PFN_FLUSH_ALL_SSIDS 0x40000 + +#define WL_PFN_IOVAR_FLAG_MASK 0xFFFF00FF +#define WL_PFN_RSSI_MASK 0xff00 +#define WL_PFN_RSSI_SHIFT 8 + +typedef struct wl_pfn_cfg { + uint32 reporttype; + int32 channel_num; + uint16 channel_list[WL_NUMCHANNELS]; + uint32 flags; +} wl_pfn_cfg_t; + +#define WL_PFN_SSID_CFG_VERSION 1 +#define WL_PFN_SSID_CFG_CLEAR 0x1 + +typedef struct wl_pfn_ssid_params { + int8 min5G_rssi; /* minimum 5GHz RSSI for a BSSID to be considered */ + int8 min2G_rssi; /* minimum 2.4GHz RSSI for a BSSID to be considered */ + int16 init_score_max; /* The maximum score that a network can have before bonuses */ + + int16 cur_bssid_bonus; /* Add to current bssid */ + int16 same_ssid_bonus; /* score bonus for all networks with the same network flag */ + int16 secure_bonus; /* score bonus for networks that are not open */ + int16 band_5g_bonus; +} wl_pfn_ssid_params_t; + +typedef struct wl_ssid_ext_params { + int8 min5G_rssi; /* minimum 5GHz RSSI for a BSSID to be considered */ + int8 min2G_rssi; /* minimum 2.4GHz RSSI for a BSSID to be considered */ + int16 init_score_max; /* The maximum score that a network can have before bonuses */ + int16 cur_bssid_bonus; /* Add to current bssid */ + int16 same_ssid_bonus; /* score bonus for all networks with the same network flag */ + int16 secure_bonus; /* score bonus for networks that are not open */ + int16 band_5g_bonus; +} wl_ssid_ext_params_t; + +typedef struct wl_pfn_ssid_cfg { + uint16 version; + uint16 flags; + wl_ssid_ext_params_t params; +} wl_pfn_ssid_cfg_t; + +#define CH_BUCKET_REPORT_NONE 0 +#define CH_BUCKET_REPORT_SCAN_COMPLETE_ONLY 1 +#define CH_BUCKET_REPORT_FULL_RESULT 2 +#define CH_BUCKET_REPORT_SCAN_COMPLETE (CH_BUCKET_REPORT_SCAN_COMPLETE_ONLY | \ + CH_BUCKET_REPORT_FULL_RESULT) +#define CH_BUCKET_REPORT_REGULAR 0 +#define CH_BUCKET_GSCAN 4 + +typedef struct wl_pfn_gscan_ch_bucket_cfg { + uint8 bucket_end_index; + uint8 bucket_freq_multiple; + uint8 flag; + uint8 reserved; + uint16 repeat; + uint16 max_freq_multiple; +} wl_pfn_gscan_ch_bucket_cfg_t; + +typedef struct wl_pfn_capabilities { + uint16 max_mscan; + uint16 max_bestn; + uint16 max_swc_bssid; + uint16 max_hotlist_bssid; +} wl_pfn_capabilities_t; + +#define GSCAN_SEND_ALL_RESULTS_MASK (1 << 0) +#define GSCAN_ALL_BUCKETS_IN_FIRST_SCAN_MASK (1 << 3) +#define GSCAN_CFG_FLAGS_ONLY_MASK (1 << 7) +#define WL_GSCAN_CFG_VERSION 1 +typedef struct wl_pfn_gscan_cfg { + uint16 version; + /** + * BIT0 1 = send probes/beacons to HOST + * BIT1 Reserved + * BIT2 Reserved + * Add any future flags here + * BIT7 1 = no other useful cfg sent + */ + uint8 flags; + /** Buffer filled threshold in % to generate an event */ + uint8 buffer_threshold; + /** + * No. of BSSIDs with "change" to generate an evt + * change - crosses rssi threshold/lost + */ + uint8 swc_nbssid_threshold; + /* Max=8 (for now) Size of rssi cache buffer */ + uint8 swc_rssi_window_size; + uint8 count_of_channel_buckets; + uint8 retry_threshold; + uint16 lost_ap_window; + wl_pfn_gscan_ch_bucket_cfg_t channel_bucket[1]; +} wl_pfn_gscan_cfg_t; + +#define WL_PFN_REPORT_ALLNET 0 +#define WL_PFN_REPORT_SSIDNET 1 +#define WL_PFN_REPORT_BSSIDNET 2 + +#define WL_PFN_CFG_FLAGS_PROHIBITED 0x00000001 /* Accept and use prohibited channels */ +#define WL_PFN_CFG_FLAGS_RESERVED 0xfffffffe /**< Remaining reserved for future use */ + +typedef struct wl_pfn { + wlc_ssid_t ssid; /**< ssid name and its length */ + int32 flags; /**< bit2: hidden */ + int32 infra; /**< BSS Vs IBSS */ + int32 auth; /**< Open Vs Closed */ + int32 wpa_auth; /**< WPA type */ + int32 wsec; /**< wsec value */ +} wl_pfn_t; + +typedef struct wl_pfn_list { + uint32 version; + uint32 enabled; + uint32 count; + wl_pfn_t pfn[1]; +} wl_pfn_list_t; + +#define PFN_SSID_EXT_VERSION 1 + +typedef struct wl_pfn_ext { + uint8 flags; + int8 rssi_thresh; /* RSSI threshold, track only if RSSI > threshold */ + uint16 wpa_auth; /* Match the wpa auth type defined in wlioctl_defs.h */ + uint8 ssid[DOT11_MAX_SSID_LEN]; + uint8 ssid_len; + uint8 pad; +} wl_pfn_ext_t; +typedef struct wl_pfn_ext_list { + uint16 version; + uint16 count; + wl_pfn_ext_t pfn_ext[1]; +} wl_pfn_ext_list_t; + +#define WL_PFN_SSID_EXT_FOUND 0x1 +#define WL_PFN_SSID_EXT_LOST 0x2 +typedef struct wl_pfn_result_ssid { + uint8 flags; + int8 rssi; + /* channel number */ + uint16 channel; + /* Assume idx in order of cfg */ + uint32 index; +} wl_pfn_result_ssid_crc32_t; + +typedef struct wl_pfn_ssid_ext_result { + uint16 version; + uint16 count; + wl_pfn_result_ssid_crc32_t net[1]; +} wl_pfn_ssid_ext_result_t; + +#define PFN_EXT_AUTH_CODE_OPEN 1 /* open */ +#define PFN_EXT_AUTH_CODE_PSK 2 /* WPA_PSK or WPA2PSK */ +#define PFN_EXT_AUTH_CODE_EAPOL 4 /* any EAPOL */ + +#define WL_PFN_HIDDEN_BIT 2 +#define WL_PFN_HIDDEN_MASK 0x4 + +#ifndef BESTN_MAX +#define BESTN_MAX 10 +#endif // endif + +#ifndef MSCAN_MAX +#define MSCAN_MAX 90 +#endif // endif + +/* Dynamic scan configuration for motion profiles */ + +#define WL_PFN_MPF_VERSION 1 + +/* Valid group IDs, may be expanded in the future */ +#define WL_PFN_MPF_GROUP_SSID 0 +#define WL_PFN_MPF_GROUP_BSSID 1 +#define WL_PFN_MPF_MAX_GROUPS 2 + +/* Max number of MPF states supported in this time */ +#define WL_PFN_MPF_STATES_MAX 4 + +/* Flags for the mpf-specific stuff */ +#define WL_PFN_MPF_ADAPT_ON_BIT 0 +#define WL_PFN_MPF_ADAPTSCAN_BIT 1 + +#define WL_PFN_MPF_ADAPT_ON_MASK 0x0001 +#define WL_PFN_MPF_ADAPTSCAN_MASK 0x0006 + +/* Per-state timing values */ +typedef struct wl_pfn_mpf_state_params { + int32 scan_freq; /* Scan frequency (secs) */ + int32 lost_network_timeout; /* Timeout to declare net lost (secs) */ + int16 flags; /* Space for flags: ADAPT etc */ + uint8 exp; /* Exponent of 2 for max interval for SMART/STRICT_ADAPT */ + uint8 repeat; /* Number of scans before changing adaptation level */ + int32 slow_freq; /* Slow scan period for SLOW_ADAPT */ +} wl_pfn_mpf_state_params_t; + +typedef struct wl_pfn_mpf_param { + uint16 version; /* Structure version */ + uint16 groupid; /* Group ID: 0 (SSID), 1 (BSSID), other: reserved */ + wl_pfn_mpf_state_params_t state[WL_PFN_MPF_STATES_MAX]; +} wl_pfn_mpf_param_t; + +/* Structure for setting pfn_override iovar */ +typedef struct wl_pfn_override_param { + uint16 version; /* Structure version */ + uint16 start_offset; /* Seconds from now to apply new params */ + uint16 duration; /* Seconds to keep new params applied */ + uint16 reserved; + wl_pfn_mpf_state_params_t override; +} wl_pfn_override_param_t; +#define WL_PFN_OVERRIDE_VERSION 1 + +/* + * Definitions for base MPF configuration + */ + +#define WL_MPF_VERSION 1 +#define WL_MPF_MAX_BITS 3 +#define WL_MPF_MAX_STATES (1 << WL_MPF_MAX_BITS) + +#define WL_MPF_STATE_NAME_MAX 12 + +typedef struct wl_mpf_val { + uint16 val; /* Value of GPIO bits */ + uint16 state; /* State identifier */ + char name[WL_MPF_STATE_NAME_MAX]; /* Optional name */ +} wl_mpf_val_t; + +typedef struct wl_mpf_map { + uint16 version; + uint16 type; + uint16 mask; /* Which GPIO bits to use */ + uint8 count; /* Count of state/value mappings */ + uint8 PAD; + wl_mpf_val_t vals[WL_MPF_MAX_STATES]; +} wl_mpf_map_t; + +#define WL_MPF_STATE_AUTO (0xFFFF) /* (uint16)-1) */ + +typedef struct wl_mpf_state { + uint16 version; + uint16 type; + uint16 state; /* Get/Set */ + uint8 force; /* 0 - auto (HW) state, 1 - forced state */ + char name[WL_MPF_STATE_NAME_MAX]; /* Get/Set: Optional/actual name */ + uint8 PAD; +} wl_mpf_state_t; +/* + * WLFCTS definition + */ +typedef struct wl_txstatus_additional_info { + uint32 rspec; + uint32 enq_ts; + uint32 last_ts; + uint32 entry_ts; + uint16 seq; + uint8 rts_cnt; + uint8 tx_cnt; +} wl_txstatus_additional_info_t; + +/** Service discovery */ +typedef struct { + uint8 transaction_id; /**< Transaction id */ + uint8 protocol; /**< Service protocol type */ + uint16 query_len; /**< Length of query */ + uint16 response_len; /**< Length of response */ + uint8 qrbuf[]; +} wl_p2po_qr_t; + +typedef struct { + uint16 period; /**< extended listen period */ + uint16 interval; /**< extended listen interval */ + uint16 count; /* count to repeat */ + uint16 pad; /* pad for 32bit align */ +} wl_p2po_listen_t; + +/** GAS state machine tunable parameters. Structure field values of 0 means use the default. */ +typedef struct wl_gas_config { + uint16 max_retransmit; /**< Max # of firmware/driver retransmits on no Ack + * from peer (on top of the ucode retries). + */ + uint16 response_timeout; /**< Max time to wait for a GAS-level response + * after sending a packet. + */ + uint16 max_comeback_delay; /**< Max GAS response comeback delay. + * Exceeding this fails the GAS exchange. + */ + uint16 max_retries; /**< Max # of GAS state machine retries on failure + * of a GAS frame exchange. + */ +} wl_gas_config_t; + +/** P2P Find Offload parameters */ +typedef struct wl_p2po_find_config { + uint16 version; /**< Version of this struct */ + uint16 length; /**< sizeof(wl_p2po_find_config_t) */ + int32 search_home_time; /**< P2P search state home time when concurrent + * connection exists. -1 for default. + */ + uint8 num_social_channels; + /**< Number of social channels up to WL_P2P_SOCIAL_CHANNELS_MAX. + * 0 means use default social channels. + */ + uint8 flags; + uint16 social_channels[1]; /**< Variable length array of social channels */ +} wl_p2po_find_config_t; +#define WL_P2PO_FIND_CONFIG_VERSION 2 /**< value for version field */ + +/** wl_p2po_find_config_t flags */ +#define P2PO_FIND_FLAG_SCAN_ALL_APS 0x01 /**< Whether to scan for all APs in the p2po_find + * periodic scans of all channels. + * 0 means scan for only P2P devices. + * 1 means scan for P2P devices plus non-P2P APs. + */ + +/** For adding a WFDS service to seek */ +typedef struct { + uint32 seek_hdl; /**< unique id chosen by host */ + uint8 addr[6]; /**< Seek service from a specific device with this + * MAC address, all 1's for any device. + */ + uint8 service_hash[P2P_WFDS_HASH_LEN]; + uint8 service_name_len; + uint8 service_name[MAX_WFDS_SEEK_SVC_NAME_LEN]; + /**< Service name to seek, not null terminated */ + uint8 service_info_req_len; + uint8 service_info_req[1]; /**< Service info request, not null terminated. + * Variable length specified by service_info_req_len. + * Maximum length is MAX_WFDS_SEEK_SVC_INFO_LEN. + */ +} wl_p2po_wfds_seek_add_t; + +/** For deleting a WFDS service to seek */ +typedef struct { + uint32 seek_hdl; /**< delete service specified by id */ +} wl_p2po_wfds_seek_del_t; + +/** For adding a WFDS service to advertise */ +#include +typedef BWL_PRE_PACKED_STRUCT struct { + uint32 advertise_hdl; /**< unique id chosen by host */ + uint8 service_hash[P2P_WFDS_HASH_LEN]; + uint32 advertisement_id; + uint16 service_config_method; + uint8 service_name_len; + uint8 service_name[MAX_WFDS_SVC_NAME_LEN]; + /**< Service name , not null terminated */ + uint8 service_status; + uint16 service_info_len; + uint8 service_info[1]; /**< Service info, not null terminated. + * Variable length specified by service_info_len. + * Maximum length is MAX_WFDS_ADV_SVC_INFO_LEN. + */ +} BWL_POST_PACKED_STRUCT wl_p2po_wfds_advertise_add_t; +#include + +/** For deleting a WFDS service to advertise */ +typedef struct { + uint32 advertise_hdl; /**< delete service specified by hdl */ +} wl_p2po_wfds_advertise_del_t; + +/** P2P Offload discovery mode for the p2po_state iovar */ +typedef enum { + WL_P2PO_DISC_STOP, + WL_P2PO_DISC_LISTEN, + WL_P2PO_DISC_DISCOVERY +} disc_mode_t; + +/* ANQP offload */ + +#define ANQPO_MAX_QUERY_SIZE 256 +typedef struct { + uint16 max_retransmit; /**< ~0 use default, max retransmit on no ACK from peer */ + uint16 response_timeout; /**< ~0 use default, msec to wait for resp after tx packet */ + uint16 max_comeback_delay; /**< ~0 use default, max comeback delay in resp else fail */ + uint16 max_retries; /**< ~0 use default, max retries on failure */ + uint16 query_len; /**< length of ANQP query */ + uint8 query_data[1]; /**< ANQP encoded query (max ANQPO_MAX_QUERY_SIZE) */ +} wl_anqpo_set_t; + +#define WL_ANQPO_FLAGS_BSSID_WILDCARD 0x0001 +#define WL_ANQPO_PEER_LIST_VERSION_2 2 +typedef struct { + uint16 channel; /**< channel of the peer */ + struct ether_addr addr; /**< addr of the peer */ +} wl_anqpo_peer_v1_t; +typedef struct { + uint16 channel; /**< channel of the peer */ + struct ether_addr addr; /**< addr of the peer */ + uint32 flags; /**< 0x01-Peer is MBO Capable */ +} wl_anqpo_peer_v2_t; + +#define ANQPO_MAX_PEER_LIST 64 +typedef struct { + uint16 count; /**< number of peers in list */ + wl_anqpo_peer_v1_t peer[1]; /**< max ANQPO_MAX_PEER_LIST */ +} wl_anqpo_peer_list_v1_t; + +typedef struct { + uint16 version; /**instr_len * sizeof((apf_program)->instrs[0])) +#define WL_APF_PROGRAM_TOTAL_LEN(apf_program) \ + (WL_APF_PROGRAM_FIXED_LEN + WL_APF_PROGRAM_LEN(apf_program)) + +/** IOVAR "pkt_filter_enable" parameter. */ +typedef struct wl_pkt_filter_enable { + uint32 id; /**< Unique filter id */ + uint32 enable; /**< Enable/disable bool */ +} wl_pkt_filter_enable_t; + +/** IOVAR "pkt_filter_list" parameter. Used to retrieve a list of installed filters. */ +typedef struct wl_pkt_filter_list { + uint32 num; /**< Number of installed packet filters */ + uint8 filter[]; /**< Variable array of packet filters. */ +} wl_pkt_filter_list_t; + +#define WL_PKT_FILTER_LIST_FIXED_LEN OFFSETOF(wl_pkt_filter_list_t, filter) + +/** IOVAR "pkt_filter_stats" parameter. Used to retrieve debug statistics. */ +typedef struct wl_pkt_filter_stats { + uint32 num_pkts_matched; /**< # filter matches for specified filter id */ + uint32 num_pkts_forwarded; /**< # packets fwded from dongle to host for all filters */ + uint32 num_pkts_discarded; /**< # packets discarded by dongle for all filters */ +} wl_pkt_filter_stats_t; + +/** IOVAR "pkt_filter_ports" parameter. Configure TCP/UDP port filters. */ +typedef struct wl_pkt_filter_ports { + uint8 version; /**< Be proper */ + uint8 reserved; /**< Be really proper */ + uint16 count; /**< Number of ports following */ + /* End of fixed data */ + uint16 ports[1]; /**< Placeholder for ports[] */ +} wl_pkt_filter_ports_t; + +#define WL_PKT_FILTER_PORTS_FIXED_LEN OFFSETOF(wl_pkt_filter_ports_t, ports) + +#define WL_PKT_FILTER_PORTS_VERSION 0 +#if defined(WL_PKT_FLTR_EXT) && !defined(WL_PKT_FLTR_EXT_DISABLED) +#define WL_PKT_FILTER_PORTS_MAX 256 +#else +#define WL_PKT_FILTER_PORTS_MAX 128 +#endif /* WL_PKT_FLTR_EXT && !WL_PKT_FLTR_EXT_DISABLED */ + +#define RSN_REPLAY_LEN 8 +typedef struct _gtkrefresh { + uint8 KCK[RSN_KCK_LENGTH]; + uint8 KEK[RSN_KEK_LENGTH]; + uint8 ReplayCounter[RSN_REPLAY_LEN]; +} gtk_keyinfo_t, *pgtk_keyinfo_t; + +/** Sequential Commands ioctl */ +typedef struct wl_seq_cmd_ioctl { + uint32 cmd; /**< common ioctl definition */ + uint32 len; /**< length of user buffer */ +} wl_seq_cmd_ioctl_t; + +#define WL_SEQ_CMD_ALIGN_BYTES 4 + +/** + * These are the set of get IOCTLs that should be allowed when using + * IOCTL sequence commands. These are issued implicitly by wl.exe each time + * it is invoked. We never want to buffer these, or else wl.exe will stop working. + */ +#define WL_SEQ_CMDS_GET_IOCTL_FILTER(cmd) \ + (((cmd) == WLC_GET_MAGIC) || \ + ((cmd) == WLC_GET_VERSION) || \ + ((cmd) == WLC_GET_AP) || \ + ((cmd) == WLC_GET_INSTANCE)) + +#define MAX_PKTENG_SWEEP_STEPS 40 +typedef struct wl_pkteng { + uint32 flags; + uint32 delay; /**< Inter-packet delay */ + uint32 nframes; /**< Number of frames */ + uint32 length; /**< Packet length */ + uint8 seqno; /**< Enable/disable sequence no. */ + struct ether_addr dest; /**< Destination address */ + struct ether_addr src; /**< Source address */ + uint8 sweep_steps; /**< Number of sweep power */ + uint8 PAD[2]; +} wl_pkteng_t; + +/* IOVAR pkteng_sweep_counters response structure */ +#define WL_PKTENG_SWEEP_COUNTERS_VERSION 1 +typedef struct wl_pkteng_sweep_ctrs { + uint16 version; /**< Version - 1 */ + uint16 size; /**< Complete Size including sweep_counters */ + uint16 sweep_steps; /**< Number of steps */ + uint16 PAD; + uint16 sweep_counter[]; /**< Array of frame counters */ +} wl_pkteng_sweep_ctrs_t; + +/* IOVAR pkteng_rx_pkt response structure */ +#define WL_PKTENG_RX_PKT_VERSION 1 +typedef struct wl_pkteng_rx_pkt { + uint16 version; /**< Version - 1 */ + uint16 size; /**< Complete Size including the packet */ + uint8 payload[]; /**< Packet payload */ +} wl_pkteng_rx_pkt_t; + +#define WL_PKTENG_RU_FILL_VER_1 1u +#define WL_PKTENG_RU_FILL_VER_2 2u +// struct for ru packet engine +typedef struct wl_pkteng_ru_v1 { + uint16 version; /* ver is 1 */ + uint16 length; /* size of complete structure */ + uint8 bw; /* bandwidth info */ + uint8 ru_alloc_val; /* ru allocation index number */ + uint8 mcs_val; /* mcs allocated value */ + uint8 nss_val; /* num of spatial streams */ + uint32 num_bytes; /* approx num of bytes to calculate other required params */ + uint8 cp_ltf_val ; /* GI and LTF symbol size */ + uint8 he_ltf_symb ; /* num of HE-LTF symbols */ + uint8 stbc; /* STBC support */ + uint8 coding_val; /* BCC/LDPC coding support */ + uint8 pe_category; /* PE duration 0/8/16usecs */ + uint8 dcm; /* dual carrier modulation */ + uint8 mumimo_ltfmode; /* ltf mode */ + uint8 trig_tx; /* form and transmit the trigger frame */ + uint8 trig_type; /* type of trigger frame */ + uint8 trig_period; /* trigger tx periodicity TBD */ + struct ether_addr dest; /* destination address for un-associated mode */ +} wl_pkteng_ru_v1_t; + +typedef struct wl_pkteng_ru_v2 { + uint16 version; /* ver is 1 */ + uint16 length; /* size of complete structure */ + uint8 bw; /* bandwidth info */ + uint8 ru_alloc_val; /* ru allocation index number */ + uint8 mcs_val; /* mcs allocated value */ + uint8 nss_val; /* num of spatial streams */ + uint32 num_bytes; /* approx num of bytes to calculate other required params */ + struct ether_addr dest; /* destination address for un-associated mode */ + uint8 cp_ltf_val ; /* GI and LTF symbol size */ + uint8 he_ltf_symb ; /* num of HE-LTF symbols */ + uint8 stbc; /* STBC support */ + uint8 coding_val; /* BCC/LDPC coding support */ + uint8 pe_category; /* PE duration 0/8/16usecs */ + uint8 dcm; /* dual carrier modulation */ + uint8 mumimo_ltfmode; /* ltf mode */ + uint8 trig_tx; /* form and transmit the trigger frame */ + uint8 trig_type; /* type of trigger frame */ + uint8 trig_period; /* trigger tx periodicity TBD */ + uint8 tgt_rssi; /* target rssi value in encoded format */ + uint8 pad[3]; /* 3 byte padding to make structure size a multiple of 32bits */ +} wl_pkteng_ru_v2_t; + +#ifndef WL_PKTENG_RU_VER +/* App uses the latest version - source picks it up from wlc_types.h */ +typedef wl_pkteng_ru_v2_t wl_pkteng_ru_fill_t; +#endif // endif + +typedef struct wl_trig_frame_info { + /* Structure versioning and structure length params */ + uint16 version; + uint16 length; + /* Below params are the fields related to trigger frame contents */ + /* Common Info Params Figure 9-52d - 11ax Draft 1.1 */ + uint16 lsig_len; + uint16 trigger_type; + uint16 cascade_indication; + uint16 cs_req; + uint16 bw; + uint16 cp_ltf_type; + uint16 mu_mimo_ltf_mode; + uint16 num_he_ltf_syms; + uint16 stbc; + uint16 ldpc_extra_symb; + uint16 ap_tx_pwr; + uint16 afactor; + uint16 pe_disambiguity; + uint16 spatial_resuse; + uint16 doppler; + uint16 he_siga_rsvd; + uint16 cmn_info_rsvd; + /* User Info Params Figure 9-52e - 11ax Draft 1.1 */ + uint16 aid12; + uint16 ru_alloc; + uint16 coding_type; + uint16 mcs; + uint16 dcm; + uint16 ss_alloc; + uint16 tgt_rssi; + uint16 usr_info_rsvd; +} wl_trig_frame_info_t; + +/* wl pkteng_stats related definitions */ +#define WL_PKTENG_STATS_V1 (1) +#define WL_PKTENG_STATS_V2 (2) + +typedef struct wl_pkteng_stats_v1 { + uint32 lostfrmcnt; /**< RX PER test: no of frames lost (skip seqno) */ + int32 rssi; /**< RSSI */ + int32 snr; /**< signal to noise ratio */ + uint16 rxpktcnt[NUM_80211_RATES+1]; + uint8 rssi_qdb; /**< qdB portion of the computed rssi */ + uint8 version; +} wl_pkteng_stats_v1_t; + +typedef struct wl_pkteng_stats_v2 { + uint32 lostfrmcnt; /**< RX PER test: no of frames lost (skip seqno) */ + int32 rssi; /**< RSSI */ + int32 snr; /**< signal to noise ratio */ + uint16 rxpktcnt[NUM_80211_RATES+1]; + uint8 rssi_qdb; /**< qdB portion of the computed rssi */ + uint8 version; + uint16 length; + uint16 pad; + int32 rssi_per_core[WL_RSSI_ANT_MAX]; + int32 rssi_per_core_qdb[WL_RSSI_ANT_MAX]; +} wl_pkteng_stats_v2_t; + +#ifndef WL_PKTENG_STATS_TYPEDEF_HAS_ALIAS +typedef wl_pkteng_stats_v1_t wl_pkteng_stats_t; +#endif /* WL_PKTENG_STATS_TYPEDEF_HAS_ALIAS */ + +typedef struct wl_txcal_params { + wl_pkteng_t pkteng; + uint8 gidx_start; + int8 gidx_step; + uint8 gidx_stop; + uint8 PAD; +} wl_txcal_params_t; + +typedef struct wl_txcal_gainidx { + uint8 num_actv_cores; + uint8 gidx_start_percore[WL_STA_ANT_MAX]; + uint8 gidx_stop_percore[WL_STA_ANT_MAX]; + uint8 PAD[3]; +} wl_txcal_gainidx_t; + +typedef struct wl_txcal_params_v2 { + wl_pkteng_t pkteng; + int8 gidx_step; + uint8 pwr_start[WL_STA_ANT_MAX]; + uint8 pwr_stop[WL_STA_ANT_MAX]; + uint8 init_start_idx; + uint8 gidx_start_percore[WL_STA_ANT_MAX]; + uint8 gidx_stop_percore[WL_STA_ANT_MAX]; + uint16 version; +} wl_txcal_params_v2_t; + +typedef wl_txcal_params_t wl_txcal_params_v1_t; + +typedef struct wl_rssilog_params { + uint8 enable; + uint8 rssi_threshold; + uint8 time_threshold; + uint8 pad; +} wl_rssilog_params_t; + +typedef struct wl_sslpnphy_papd_debug_data { + uint8 psat_pwr; + uint8 psat_indx; + uint8 final_idx; + uint8 start_idx; + int32 min_phase; + int32 voltage; + int8 temperature; + uint8 PAD[3]; +} wl_sslpnphy_papd_debug_data_t; +typedef struct wl_sslpnphy_debug_data { + int16 papdcompRe [64]; + int16 papdcompIm [64]; +} wl_sslpnphy_debug_data_t; +typedef struct wl_sslpnphy_spbdump_data { + uint16 tbl_length; + int16 spbreal[256]; + int16 spbimg[256]; +} wl_sslpnphy_spbdump_data_t; +typedef struct wl_sslpnphy_percal_debug_data { + uint32 cur_idx; + uint32 tx_drift; + uint8 prev_cal_idx; + uint8 PAD[3]; + uint32 percal_ctr; + int32 nxt_cal_idx; + uint32 force_1idxcal; + uint32 onedxacl_req; + int32 last_cal_volt; + int8 last_cal_temp; + uint8 PAD[3]; + uint32 vbat_ripple; + uint32 exit_route; + int32 volt_winner; +} wl_sslpnphy_percal_debug_data_t; + +typedef enum { + wowl_pattern_type_bitmap = 0, + wowl_pattern_type_arp, + wowl_pattern_type_na +} wowl_pattern_type_t; + +typedef struct wl_wowl_pattern { + uint32 masksize; /**< Size of the mask in #of bytes */ + uint32 offset; /**< Pattern byte offset in packet */ + uint32 patternoffset; /**< Offset of start of pattern in the structure */ + uint32 patternsize; /**< Size of the pattern itself in #of bytes */ + uint32 id; /**< id */ + uint32 reasonsize; /**< Size of the wakeup reason code */ + wowl_pattern_type_t type; /**< Type of pattern */ + /* Mask follows the structure above */ + /* Pattern follows the mask is at 'patternoffset' from the start */ +} wl_wowl_pattern_t; + +typedef struct wl_wowl_pattern_list { + uint32 count; + wl_wowl_pattern_t pattern[1]; +} wl_wowl_pattern_list_t; + +typedef struct wl_wowl_wakeind { + uint8 pci_wakeind; /**< Whether PCI PMECSR PMEStatus bit was set */ + uint32 ucode_wakeind; /**< What wakeup-event indication was set by ucode */ +} wl_wowl_wakeind_t; + +/** per AC rate control related data structure */ +typedef struct wl_txrate_class { + uint8 init_rate; + uint8 min_rate; + uint8 max_rate; +} wl_txrate_class_t; + +/** structure for Overlap BSS scan arguments */ +typedef struct wl_obss_scan_arg { + int16 passive_dwell; + int16 active_dwell; + int16 bss_widthscan_interval; + int16 passive_total; + int16 active_total; + int16 chanwidth_transition_delay; + int16 activity_threshold; +} wl_obss_scan_arg_t; + +#define WL_OBSS_SCAN_PARAM_LEN sizeof(wl_obss_scan_arg_t) + +/** RSSI event notification configuration. */ +typedef struct wl_rssi_event { + uint32 rate_limit_msec; /**< # of events posted to application will be limited to + * one per specified period (0 to disable rate limit). + */ + uint8 num_rssi_levels; /**< Number of entries in rssi_levels[] below */ + int8 rssi_levels[MAX_RSSI_LEVELS]; /**< Variable number of RSSI levels. An event + * will be posted each time the RSSI of received + * beacons/packets crosses a level. + */ + int8 pad[3]; +} wl_rssi_event_t; + +#define RSSI_MONITOR_VERSION 1 +#define RSSI_MONITOR_STOP (1 << 0) +typedef struct wl_rssi_monitor_cfg { + uint8 version; + uint8 flags; + int8 max_rssi; + int8 min_rssi; +} wl_rssi_monitor_cfg_t; + +typedef struct wl_rssi_monitor_evt { + uint8 version; + int8 cur_rssi; + uint16 pad; +} wl_rssi_monitor_evt_t; + +/* CCA based channel quality event configuration (ID values for both config and report) */ +#define WL_CHAN_QUAL_CCA 0 +#define WL_CHAN_QUAL_NF 1 +#define WL_CHAN_QUAL_NF_LTE 2 +#define WL_CHAN_QUAL_TOTAL 3 /* The total IDs supported in both config and report */ +/* Additional channel quality event support in report only (>= 0x100) + * Notice that uint8 is used in configuration struct wl_chan_qual_metric_t, but uint16 is + * used for report in struct cca_chan_qual_event_t. So the ID values beyond 8-bit are used + * for reporting purpose only. + */ +#define WL_CHAN_QUAL_FULL_CCA (0x100 | WL_CHAN_QUAL_CCA) + +#define MAX_CHAN_QUAL_LEVELS 8 + +typedef struct wl_chan_qual_metric { + uint8 id; /**< metric ID */ + uint8 num_levels; /**< Number of entries in rssi_levels[] below */ + uint16 flags; + int16 htol[MAX_CHAN_QUAL_LEVELS]; /**< threshold level array: hi-to-lo */ + int16 ltoh[MAX_CHAN_QUAL_LEVELS]; /**< threshold level array: lo-to-hi */ +} wl_chan_qual_metric_t; + +typedef struct wl_chan_qual_event { + uint32 rate_limit_msec; /**< # of events posted to application will be limited to + * one per specified period (0 to disable rate limit). + */ + uint16 flags; + uint16 num_metrics; + wl_chan_qual_metric_t metric[WL_CHAN_QUAL_TOTAL]; /**< metric array */ +} wl_chan_qual_event_t; +typedef struct wl_action_obss_coex_req { + uint8 info; + uint8 num; + uint8 ch_list[1]; +} wl_action_obss_coex_req_t; + +/** IOVar parameter block for small MAC address array with type indicator */ +#define WL_IOV_MAC_PARAM_LEN 4 + +#define WL_IOV_PKTQ_LOG_PRECS 16 + +#include +typedef BWL_PRE_PACKED_STRUCT struct { + uint32 num_addrs; + uint8 addr_type[WL_IOV_MAC_PARAM_LEN]; + struct ether_addr ea[WL_IOV_MAC_PARAM_LEN]; +} BWL_POST_PACKED_STRUCT wl_iov_mac_params_t; +#include + +/** This is extra info that follows wl_iov_mac_params_t */ +typedef struct { + uint32 addr_info[WL_IOV_MAC_PARAM_LEN]; +} wl_iov_mac_extra_params_t; + +/** Combined structure */ +typedef struct { + wl_iov_mac_params_t params; + wl_iov_mac_extra_params_t extra_params; +} wl_iov_mac_full_params_t; + +/** Parameter block for PKTQ_LOG statistics */ +#define PKTQ_LOG_COUNTERS_V4 \ + /* packets requested to be stored */ \ + uint32 requested; \ + /* packets stored */ \ + uint32 stored; \ + /* packets saved, because a lowest priority queue has given away one packet */ \ + uint32 saved; \ + /* packets saved, because an older packet from the same queue has been dropped */ \ + uint32 selfsaved; \ + /* packets dropped, because pktq is full with higher precedence packets */ \ + uint32 full_dropped; \ + /* packets dropped because pktq per that precedence is full */ \ + uint32 dropped; \ + /* packets dropped, in order to save one from a queue of a highest priority */ \ + uint32 sacrificed; \ + /* packets droped because of hardware/transmission error */ \ + uint32 busy; \ + /* packets re-sent because they were not received */ \ + uint32 retry; \ + /* packets retried again (ps pretend) prior to moving power save mode */ \ + uint32 ps_retry; \ + /* suppressed packet count */ \ + uint32 suppress; \ + /* packets finally dropped after retry limit */ \ + uint32 retry_drop; \ + /* the high-water mark of the queue capacity for packets - goes to zero as queue fills */ \ + uint32 max_avail; \ + /* the high-water mark of the queue utilisation for packets - ('inverse' of max_avail) */ \ + uint32 max_used; \ + /* the maximum capacity of the queue */ \ + uint32 queue_capacity; \ + /* count of rts attempts that failed to receive cts */ \ + uint32 rtsfail; \ + /* count of packets sent (acked) successfully */ \ + uint32 acked; \ + /* running total of phy rate of packets sent successfully */ \ + uint32 txrate_succ; \ + /* running total of phy 'main' rate */ \ + uint32 txrate_main; \ + /* actual data transferred successfully */ \ + uint32 throughput; \ + /* time difference since last pktq_stats */ \ + uint32 time_delta; + +typedef struct { + PKTQ_LOG_COUNTERS_V4 +} pktq_log_counters_v04_t; + +/** v5 is the same as V4 with extra parameter */ +typedef struct { + PKTQ_LOG_COUNTERS_V4 + /** cumulative time to transmit */ + uint32 airtime; +} pktq_log_counters_v05_t; + +typedef struct { + uint8 num_prec[WL_IOV_MAC_PARAM_LEN]; + pktq_log_counters_v04_t counters[WL_IOV_MAC_PARAM_LEN][WL_IOV_PKTQ_LOG_PRECS]; + uint32 counter_info[WL_IOV_MAC_PARAM_LEN]; + uint32 pspretend_time_delta[WL_IOV_MAC_PARAM_LEN]; + char headings[]; +} pktq_log_format_v04_t; + +typedef struct { + uint8 num_prec[WL_IOV_MAC_PARAM_LEN]; + pktq_log_counters_v05_t counters[WL_IOV_MAC_PARAM_LEN][WL_IOV_PKTQ_LOG_PRECS]; + uint32 counter_info[WL_IOV_MAC_PARAM_LEN]; + uint32 pspretend_time_delta[WL_IOV_MAC_PARAM_LEN]; + char headings[]; +} pktq_log_format_v05_t; + +typedef struct { + uint32 version; + wl_iov_mac_params_t params; + union { + pktq_log_format_v04_t v04; + pktq_log_format_v05_t v05; + } pktq_log; +} wl_iov_pktq_log_t; + +/* PKTQ_LOG_AUTO, PKTQ_LOG_DEF_PREC flags introduced in v05, they are ignored by v04 */ +#define PKTQ_LOG_AUTO (1 << 31) +#define PKTQ_LOG_DEF_PREC (1 << 30) + +typedef struct wl_pfn_macaddr_cfg_0 { + uint8 version; + uint8 reserved; + struct ether_addr macaddr; +} wl_pfn_macaddr_cfg_0_t; +#define LEGACY1_WL_PFN_MACADDR_CFG_VER 0 +#define WL_PFN_MAC_OUI_ONLY_MASK 1 +#define WL_PFN_SET_MAC_UNASSOC_MASK 2 +#define WL_PFN_RESTRICT_LA_MAC_MASK 4 +#define WL_PFN_MACADDR_FLAG_MASK 0x7 +/** To configure pfn_macaddr */ +typedef struct wl_pfn_macaddr_cfg { + uint8 version; + uint8 flags; + struct ether_addr macaddr; +} wl_pfn_macaddr_cfg_t; +#define WL_PFN_MACADDR_CFG_VER 1 + +/* + * SCB_BS_DATA iovar definitions start. + */ +#define SCB_BS_DATA_STRUCT_VERSION 1 + +/** The actual counters maintained for each station */ +typedef struct { + /* The following counters are a subset of what pktq_stats provides per precedence. */ + uint32 retry; /**< packets re-sent because they were not received */ + uint32 retry_drop; /**< packets finally dropped after retry limit */ + uint32 rtsfail; /**< count of rts attempts that failed to receive cts */ + uint32 acked; /**< count of packets sent (acked) successfully */ + uint32 txrate_succ; /**< running total of phy rate of packets sent successfully */ + uint32 txrate_main; /**< running total of phy 'main' rate */ + uint32 throughput; /**< actual data transferred successfully */ + uint32 time_delta; /**< time difference since last pktq_stats */ + uint32 airtime; /**< cumulative total medium access delay in useconds */ +} iov_bs_data_counters_t; + +/** The structure for individual station information. */ +#include +typedef BWL_PRE_PACKED_STRUCT struct { + struct ether_addr station_address; /**< The station MAC address */ + uint16 station_flags; /**< Bit mask of flags, for future use. */ + iov_bs_data_counters_t station_counters; /**< The actual counter values */ +} BWL_POST_PACKED_STRUCT iov_bs_data_record_t; +#include + +#include +typedef BWL_PRE_PACKED_STRUCT struct { + uint16 structure_version; /**< Structure version number (for wl/wlu matching) */ + uint16 structure_count; /**< Number of iov_bs_data_record_t records following */ + iov_bs_data_record_t structure_record[1]; /**< 0 - structure_count records */ +} BWL_POST_PACKED_STRUCT iov_bs_data_struct_t; +#include + +/* Bitmask of options that can be passed in to the iovar. */ +enum { + SCB_BS_DATA_FLAG_NO_RESET = (1<<0) /**< Do not clear the counters after reading */ +}; +/* + * SCB_BS_DATA iovar definitions end. + */ + +typedef struct wlc_extlog_cfg { + int32 max_number; + uint16 module; /**< bitmap */ + uint8 level; + uint8 flag; + uint16 version; + uint16 PAD; +} wlc_extlog_cfg_t; + +typedef struct log_record { + uint32 time; + uint16 module; + uint16 id; + uint8 level; + uint8 sub_unit; + uint8 seq_num; + uint8 pad; + int32 arg; + char str[MAX_ARGSTR_LEN]; + char PAD[4-MAX_ARGSTR_LEN%4]; +} log_record_t; + +typedef struct wlc_extlog_req { + uint32 from_last; + uint32 num; +} wlc_extlog_req_t; + +typedef struct wlc_extlog_results { + uint16 version; + uint16 record_len; + uint32 num; + log_record_t logs[1]; +} wlc_extlog_results_t; + +typedef struct log_idstr { + uint16 id; + uint16 flag; + uint8 arg_type; + const char *fmt_str; +} log_idstr_t; + +#define FMTSTRF_USER 1 + +/* flat ID definitions + * New definitions HAVE TO BE ADDED at the end of the table. Otherwise, it will + * affect backward compatibility with pre-existing apps + */ +typedef enum { + FMTSTR_DRIVER_UP_ID = 0, + FMTSTR_DRIVER_DOWN_ID = 1, + FMTSTR_SUSPEND_MAC_FAIL_ID = 2, + FMTSTR_NO_PROGRESS_ID = 3, + FMTSTR_RFDISABLE_ID = 4, + FMTSTR_REG_PRINT_ID = 5, + FMTSTR_EXPTIME_ID = 6, + FMTSTR_JOIN_START_ID = 7, + FMTSTR_JOIN_COMPLETE_ID = 8, + FMTSTR_NO_NETWORKS_ID = 9, + FMTSTR_SECURITY_MISMATCH_ID = 10, + FMTSTR_RATE_MISMATCH_ID = 11, + FMTSTR_AP_PRUNED_ID = 12, + FMTSTR_KEY_INSERTED_ID = 13, + FMTSTR_DEAUTH_ID = 14, + FMTSTR_DISASSOC_ID = 15, + FMTSTR_LINK_UP_ID = 16, + FMTSTR_LINK_DOWN_ID = 17, + FMTSTR_RADIO_HW_OFF_ID = 18, + FMTSTR_RADIO_HW_ON_ID = 19, + FMTSTR_EVENT_DESC_ID = 20, + FMTSTR_PNP_SET_POWER_ID = 21, + FMTSTR_RADIO_SW_OFF_ID = 22, + FMTSTR_RADIO_SW_ON_ID = 23, + FMTSTR_PWD_MISMATCH_ID = 24, + FMTSTR_FATAL_ERROR_ID = 25, + FMTSTR_AUTH_FAIL_ID = 26, + FMTSTR_ASSOC_FAIL_ID = 27, + FMTSTR_IBSS_FAIL_ID = 28, + FMTSTR_EXTAP_FAIL_ID = 29, + FMTSTR_MAX_ID +} log_fmtstr_id_t; + +/** 11k Neighbor Report element (unversioned, deprecated) */ +typedef struct nbr_element { + uint8 id; + uint8 len; + struct ether_addr bssid; + uint32 bssid_info; + uint8 reg; + uint8 channel; + uint8 phytype; + uint8 pad; +} nbr_element_t; +#define NBR_ADD_STATIC 0 +#define NBR_ADD_DYNAMIC 1 + +#define WL_RRM_NBR_RPT_VER 1 +/** 11k Neighbor Report element */ +typedef struct nbr_rpt_elem { + uint8 version; + uint8 id; + uint8 len; + uint8 pad; + struct ether_addr bssid; + uint8 pad_1[2]; + uint32 bssid_info; + uint8 reg; + uint8 channel; + uint8 phytype; + uint8 addtype; /* static for manual add or dynamic if auto-learning of neighbors */ + wlc_ssid_t ssid; + chanspec_t chanspec; + uint8 bss_trans_preference; + uint8 flags; +} nbr_rpt_elem_t; + +typedef enum event_msgs_ext_command { + EVENTMSGS_NONE = 0, + EVENTMSGS_SET_BIT = 1, + EVENTMSGS_RESET_BIT = 2, + EVENTMSGS_SET_MASK = 3 +} event_msgs_ext_command_t; + +#define EVENTMSGS_VER 1 +#define EVENTMSGS_EXT_STRUCT_SIZE OFFSETOF(eventmsgs_ext_t, mask[0]) + +/* len- for SET it would be mask size from the application to the firmware */ +/* for GET it would be actual firmware mask size */ +/* maxgetsize - is only used for GET. indicate max mask size that the */ +/* application can read from the firmware */ +typedef struct eventmsgs_ext +{ + uint8 ver; + uint8 command; + uint8 len; + uint8 maxgetsize; + uint8 mask[1]; +} eventmsgs_ext_t; + +#include +typedef BWL_PRE_PACKED_STRUCT struct pcie_bus_tput_params { + /** no of host dma descriptors programmed by the firmware before a commit */ + uint16 max_dma_descriptors; + + uint16 host_buf_len; /**< length of host buffer */ + dmaaddr_t host_buf_addr; /**< physical address for bus_throughput_buf */ +} BWL_POST_PACKED_STRUCT pcie_bus_tput_params_t; +#include + +typedef struct pcie_bus_tput_stats { + uint16 time_taken; /**< no of secs the test is run */ + uint16 nbytes_per_descriptor; /**< no of bytes of data dma ed per descriptor */ + + /** no of desciptors for which dma is sucessfully completed within the test time */ + uint32 count; +} pcie_bus_tput_stats_t; + +#define HOST_WAKEUP_DATA_VER 1 +#include +/* Bus interface host wakeup data */ +typedef BWL_PRE_PACKED_STRUCT struct wl_host_wakeup_data { + uint16 ver; + uint16 len; + uchar data[1]; /* wakeup data */ +} BWL_POST_PACKED_STRUCT wl_host_wakeup_data_t; +#include + +#define HOST_WAKEUP_DATA_VER_2 2 +#include +/* Bus interface host wakeup data */ +typedef BWL_PRE_PACKED_STRUCT struct wl_host_wakeup_data_v2 { + uint16 ver; + uint16 len; + uint32 gpio_toggle_time; /* gpio toggle time in ms */ + uchar data[1]; /* wakeup data */ +} BWL_POST_PACKED_STRUCT wl_host_wakeup_data_v2_t; +#include + +typedef struct keepalives_max_idle { + uint16 keepalive_count; /**< nmbr of keepalives per bss_max_idle period */ + uint8 mkeepalive_index; /**< mkeepalive_index for keepalive frame to be used */ + uint8 PAD; /**< to align next field */ + uint16 max_interval; /**< seconds */ +} keepalives_max_idle_t; + +#define PM_IGNORE_BCMC_PROXY_ARP (1 << 0) +#define PM_IGNORE_BCMC_ALL_DMS_ACCEPTED (1 << 1) + +/* ##### HMAP section ##### */ +#define PCIE_MAX_HMAP_WINDOWS 8 +#define PCIE_HMAPTEST_VERSION 2 +#define HMAPTEST_INVALID_OFFSET 0xFFFFFFFFu +#define HMAPTEST_DEFAULT_WRITE_PATTERN 0xBABECAFEu +#define HMAPTEST_ACCESS_ARM 0 +#define HMAPTEST_ACCESS_M2M 1 +#define HMAPTEST_ACCESS_D11 2 +#define HMAPTEST_ACCESS_NONE 3 + +typedef struct pcie_hmaptest { + uint16 version; /* Version */ + uint16 length; /* Length of entire structure */ + uint32 xfer_len; + uint32 accesstype; + uint32 is_write; + uint32 is_invalid; + uint32 host_addr_hi; + uint32 host_addr_lo; + uint32 host_offset; + uint32 value; /* 4 byte value to be filled in case of write access test */ + uint32 delay; /* wait time in seconds before initiating access from dongle */ +} pcie_hmaptest_t; + +/* HMAP window register set */ +typedef struct hmapwindow { + uint32 baseaddr_lo; /* BaseAddrLower */ + uint32 baseaddr_hi; /* BaseAddrUpper */ + uint32 windowlength; /* Window Length */ +} hmapwindow_t; + +#define PCIE_HMAP_VERSION 1 +typedef struct pcie_hmap { + uint16 version; /**< Version */ + uint16 length; /**< Length of entire structure */ + uint32 enable; /**< status of HMAP enabled/disabled */ + uint32 nwindows; /* no. of HMAP windows enabled */ + uint32 window_config; /* HMAP window_config register */ + uint32 hmap_violationaddr_lo; /* violating address lo */ + uint32 hmap_violationaddr_hi; /* violating addr hi */ + uint32 hmap_violation_info; /* violation info */ + hmapwindow_t hwindows[]; /* Multiple hwindows */ +} pcie_hmap_t; + +/* ##### Power Stats section ##### */ + +#define WL_PWRSTATS_VERSION 2 + +/** Input structure for pwrstats IOVAR */ +typedef struct wl_pwrstats_query { + uint16 length; /**< Number of entries in type array. */ + uint16 type[1]; /**< Types (tags) to retrieve. + * Length 0 (no types) means get all. + */ +} wl_pwrstats_query_t; + +/** This structure is for version 2; version 1 will be deprecated in by FW */ +#include +typedef BWL_PRE_PACKED_STRUCT struct wl_pwrstats { + uint16 version; /**< Version = 2 is TLV format */ + uint16 length; /**< Length of entire structure */ + uint8 data[1]; /**< TLV data, a series of structures, + * each starting with type and length. + * + * Padded as necessary so each section + * starts on a 4-byte boundary. + * + * Both type and len are uint16, but the + * upper nibble of length is reserved so + * valid len values are 0-4095. + */ +} BWL_POST_PACKED_STRUCT wl_pwrstats_t; +#include +#define WL_PWR_STATS_HDRLEN OFFSETOF(wl_pwrstats_t, data) + +/* Bits for wake reasons */ +#define WLC_PMD_WAKE_SET 0x1 +#define WLC_PMD_PM_AWAKE_BCN 0x2 +/* BIT:3 is no longer being used */ +#define WLC_PMD_SCAN_IN_PROGRESS 0x8 +#define WLC_PMD_RM_IN_PROGRESS 0x10 +#define WLC_PMD_AS_IN_PROGRESS 0x20 +#define WLC_PMD_PM_PEND 0x40 +#define WLC_PMD_PS_POLL 0x80 +#define WLC_PMD_CHK_UNALIGN_TBTT 0x100 +#define WLC_PMD_APSD_STA_UP 0x200 +#define WLC_PMD_TX_PEND_WAR 0x400 /* obsolete, can be reused */ +#define WLC_PMD_GPTIMER_STAY_AWAKE 0x800 +#define WLC_PMD_PM2_RADIO_SOFF_PEND 0x2000 +#define WLC_PMD_NON_PRIM_STA_UP 0x4000 +#define WLC_PMD_AP_UP 0x8000 + +typedef struct wlc_pm_debug { + uint32 timestamp; /**< timestamp in millisecond */ + uint32 reason; /**< reason(s) for staying awake */ +} wlc_pm_debug_t; + +/** WL_PWRSTATS_TYPE_PM_AWAKE1 structures (for 6.25 firmware) */ +#define WLC_STA_AWAKE_STATES_MAX_V1 30 +#define WLC_PMD_EVENT_MAX_V1 32 +/** Data sent as part of pwrstats IOVAR (and EXCESS_PM_WAKE event) */ +#include +typedef BWL_PRE_PACKED_STRUCT struct pm_awake_data_v1 { + uint32 curr_time; /**< ms */ + uint32 hw_macc; /**< HW maccontrol */ + uint32 sw_macc; /**< SW maccontrol */ + uint32 pm_dur; /**< Total sleep time in PM, msecs */ + uint32 mpc_dur; /**< Total sleep time in MPC, msecs */ + + /* int32 drifts = remote - local; +ve drift => local-clk slow */ + int32 last_drift; /**< Most recent TSF drift from beacon */ + int32 min_drift; /**< Min TSF drift from beacon in magnitude */ + int32 max_drift; /**< Max TSF drift from beacon in magnitude */ + + uint32 avg_drift; /**< Avg TSF drift from beacon */ + + /* Wake history tracking */ + uint8 pmwake_idx; /**< for stepping through pm_state */ + wlc_pm_debug_t pm_state[WLC_STA_AWAKE_STATES_MAX_V1]; /**< timestamped wake bits */ + uint32 pmd_event_wake_dur[WLC_PMD_EVENT_MAX_V1]; /**< cumulative usecs per wake reason */ + uint32 drift_cnt; /**< Count of drift readings over which avg_drift was computed */ +} BWL_POST_PACKED_STRUCT pm_awake_data_v1_t; +#include + +#include +typedef BWL_PRE_PACKED_STRUCT struct wl_pwr_pm_awake_stats_v1 { + uint16 type; /**< WL_PWRSTATS_TYPE_PM_AWAKE */ + uint16 len; /**< Up to 4K-1, top 4 bits are reserved */ + + pm_awake_data_v1_t awake_data; + uint32 frts_time; /**< Cumulative ms spent in frts since driver load */ + uint32 frts_end_cnt; /**< No of times frts ended since driver load */ +} BWL_POST_PACKED_STRUCT wl_pwr_pm_awake_stats_v1_t; +#include + +/** WL_PWRSTATS_TYPE_PM_AWAKE2 structures. Data sent as part of pwrstats IOVAR */ +typedef struct pm_awake_data_v2 { + uint32 curr_time; /**< ms */ + uint32 hw_macc; /**< HW maccontrol */ + uint32 sw_macc; /**< SW maccontrol */ + uint32 pm_dur; /**< Total sleep time in PM, msecs */ + uint32 mpc_dur; /**< Total sleep time in MPC, msecs */ + + /* int32 drifts = remote - local; +ve drift => local-clk slow */ + int32 last_drift; /**< Most recent TSF drift from beacon */ + int32 min_drift; /**< Min TSF drift from beacon in magnitude */ + int32 max_drift; /**< Max TSF drift from beacon in magnitude */ + + uint32 avg_drift; /**< Avg TSF drift from beacon */ + + /* Wake history tracking */ + + /* pmstate array (type wlc_pm_debug_t) start offset */ + uint16 pm_state_offset; + /** pmstate number of array entries */ + uint16 pm_state_len; + + /** array (type uint32) start offset */ + uint16 pmd_event_wake_dur_offset; + /** pmd_event_wake_dur number of array entries */ + uint16 pmd_event_wake_dur_len; + + uint32 drift_cnt; /**< Count of drift readings over which avg_drift was computed */ + uint8 pmwake_idx; /**< for stepping through pm_state */ + uint8 flags; /**< bit0: 1-sleep, 0- wake. bit1: 0-bit0 invlid, 1-bit0 valid */ + uint8 pad[2]; + uint32 frts_time; /**< Cumulative ms spent in frts since driver load */ + uint32 frts_end_cnt; /**< No of times frts ended since driver load */ +} pm_awake_data_v2_t; + +typedef struct wl_pwr_pm_awake_stats_v2 { + uint16 type; /**< WL_PWRSTATS_TYPE_PM_AWAKE */ + uint16 len; /**< Up to 4K-1, top 4 bits are reserved */ + + pm_awake_data_v2_t awake_data; +} wl_pwr_pm_awake_stats_v2_t; + +/* bit0: 1-sleep, 0- wake. bit1: 0-bit0 invlid, 1-bit0 valid */ +#define WL_PWR_PM_AWAKE_STATS_WAKE 0x02 +#define WL_PWR_PM_AWAKE_STATS_ASLEEP 0x03 +#define WL_PWR_PM_AWAKE_STATS_WAKE_MASK 0x03 + +/* WL_PWRSTATS_TYPE_PM_AWAKE Version 2 structures taken from 4324/43342 */ +/* These structures are only to be used with 4324/43342 devices */ + +#define WL_STA_AWAKE_STATES_MAX_V2 30 +#define WL_PMD_EVENT_MAX_V2 32 +#define MAX_P2P_BSS_DTIM_PRD 4 + +#include +typedef BWL_PRE_PACKED_STRUCT struct ucode_dbg_v2 { + uint32 macctrl; + uint16 m_p2p_hps; + uint16 m_p2p_bss_dtim_prd[MAX_P2P_BSS_DTIM_PRD]; + uint32 psmdebug[20]; + uint32 phydebug[20]; + uint32 psm_brc; + uint32 ifsstat; +} BWL_POST_PACKED_STRUCT ucode_dbg_v2_t; +#include + +#include +typedef BWL_PRE_PACKED_STRUCT struct pmalert_awake_data_v2 { + uint32 curr_time; /* ms */ + uint32 hw_macc; /* HW maccontrol */ + uint32 sw_macc; /* SW maccontrol */ + uint32 pm_dur; /* Total sleep time in PM, msecs */ + uint32 mpc_dur; /* Total sleep time in MPC, msecs */ + + /* int32 drifts = remote - local; +ve drift => local-clk slow */ + int32 last_drift; /* Most recent TSF drift from beacon */ + int32 min_drift; /* Min TSF drift from beacon in magnitude */ + int32 max_drift; /* Max TSF drift from beacon in magnitude */ + + uint32 avg_drift; /* Avg TSF drift from beacon */ + + /* Wake history tracking */ + uint8 pmwake_idx; /* for stepping through pm_state */ + wlc_pm_debug_t pm_state[WL_STA_AWAKE_STATES_MAX_V2]; /* timestamped wake bits */ + uint32 pmd_event_wake_dur[WL_PMD_EVENT_MAX_V2]; /* cumulative usecs per wake reason */ + uint32 drift_cnt; /* Count of drift readings over which avg_drift was computed */ + uint32 start_event_dur[WL_PMD_EVENT_MAX_V2]; /* start event-duration */ + ucode_dbg_v2_t ud; + uint32 frts_time; /* Cumulative ms spent in frts since driver load */ + uint32 frts_end_cnt; /* No of times frts ended since driver load */ +} BWL_POST_PACKED_STRUCT pmalert_awake_data_v2_t; +#include + +#include +typedef BWL_PRE_PACKED_STRUCT struct pm_alert_data_v2 { + uint32 version; + uint32 length; /* Length of entire structure */ + uint32 reasons; /* reason(s) for pm_alert */ + /* Following fields are present only for reasons + * PM_DUR_EXCEEDED, MPC_DUR_EXCEEDED & CONST_AWAKE_DUR_EXCEEDED + */ + uint32 prev_stats_time; /* msecs */ + uint32 prev_pm_dur; /* msecs */ + uint32 prev_mpc_dur; /* msecs */ + pmalert_awake_data_v2_t awake_data; +} BWL_POST_PACKED_STRUCT pm_alert_data_v2_t; +#include + +#include +typedef BWL_PRE_PACKED_STRUCT struct wl_pwr_pm_awake_status_v2 { + uint16 type; /* WL_PWRSTATS_TYPE_PM_AWAKE */ + uint16 len; /* Up to 4K-1, top 4 bits are reserved */ + + pmalert_awake_data_v2_t awake_data; + uint32 frts_time; /* Cumulative ms spent in frts since driver load */ + uint32 frts_end_cnt; /* No of times frts ended since driver load */ +} BWL_POST_PACKED_STRUCT wl_pwr_pm_awake_status_v2_t; +#include + +/* Below are latest definitions from PHO25178RC100_BRANCH_6_50 */ +/* wl_pwr_pm_awake_stats_v1_t is used for WL_PWRSTATS_TYPE_PM_AWAKE */ +/* (at least) the chip independent registers */ +typedef struct ucode_dbg_ext { + uint32 x120; + uint32 x124; + uint32 x154; + uint32 x158; + uint32 x15c; + uint32 x180; + uint32 x184; + uint32 x188; + uint32 x18c; + uint32 x1a0; + uint32 x1a8; + uint32 x1e0; + uint32 scr_x14; + uint32 scr_x2b; + uint32 scr_x2c; + uint32 scr_x2d; + uint32 scr_x2e; + + uint16 x40a; + uint16 x480; + uint16 x490; + uint16 x492; + uint16 x4d8; + uint16 x4b8; + uint16 x4ba; + uint16 x4bc; + uint16 x4be; + uint16 x500; + uint16 x50e; + uint16 x522; + uint16 x546; + uint16 x578; + uint16 x602; + uint16 x646; + uint16 x648; + uint16 x666; + uint16 x670; + uint16 x690; + uint16 x692; + uint16 x6a0; + uint16 x6a2; + uint16 x6a4; + uint16 x6b2; + uint16 x7c0; + + uint16 shm_x20; + uint16 shm_x4a; + uint16 shm_x5e; + uint16 shm_x5f; + uint16 shm_xaab; + uint16 shm_x74a; + uint16 shm_x74b; + uint16 shm_x74c; + uint16 shm_x74e; + uint16 shm_x756; + uint16 shm_x75b; + uint16 shm_x7b9; + uint16 shm_x7d4; + + uint16 shm_P2P_HPS; + uint16 shm_P2P_intr[16]; + uint16 shm_P2P_perbss[48]; +} ucode_dbg_ext_t; + +#include +typedef BWL_PRE_PACKED_STRUCT struct pm_alert_data_v1 { + uint32 version; + uint32 length; /**< Length of entire structure */ + uint32 reasons; /**< reason(s) for pm_alert */ + /* Following fields are present only for reasons + * PM_DUR_EXCEEDED, MPC_DUR_EXCEEDED & CONST_AWAKE_DUR_EXCEEDED + */ + uint32 prev_stats_time; /**< msecs */ + uint32 prev_pm_dur; /**< msecs */ + uint32 prev_mpc_dur; /**< msecs */ + pm_awake_data_v1_t awake_data; + uint32 start_event_dur[WLC_PMD_EVENT_MAX_V1]; /**< start event-duration */ + ucode_dbg_v2_t ud; + uint32 frts_time; /**< Cumulative ms spent in frts since driver load */ + uint32 frts_end_cnt; /**< No of times frts ended since driver load */ + ucode_dbg_ext_t ud_ext; + uint32 prev_frts_dur; /**< ms */ +} BWL_POST_PACKED_STRUCT pm_alert_data_v1_t; +#include + +/* End of 43342/4324 v2 structure definitions */ + +/* Original bus structure is for HSIC */ + +typedef struct bus_metrics { + uint32 suspend_ct; /**< suspend count */ + uint32 resume_ct; /**< resume count */ + uint32 disconnect_ct; /**< disconnect count */ + uint32 reconnect_ct; /**< reconnect count */ + uint32 active_dur; /**< msecs in bus, usecs for user */ + uint32 suspend_dur; /**< msecs in bus, usecs for user */ + uint32 disconnect_dur; /**< msecs in bus, usecs for user */ +} bus_metrics_t; + +/** Bus interface info for USB/HSIC */ +#include +typedef BWL_PRE_PACKED_STRUCT struct wl_pwr_usb_hsic_stats { + uint16 type; /**< WL_PWRSTATS_TYPE_USB_HSIC */ + uint16 len; /**< Up to 4K-1, top 4 bits are reserved */ + + bus_metrics_t hsic; /**< stats from hsic bus driver */ +} BWL_POST_PACKED_STRUCT wl_pwr_usb_hsic_stats_t; +#include + +/* PCIe Event counter tlv IDs */ +enum pcie_cnt_xtlv_id { + PCIE_CNT_XTLV_METRICS = 0x1, /**< PCIe Bus Metrics */ + PCIE_CNT_XTLV_BUS_CNT = 0x2 /**< PCIe Bus counters */ +}; + +typedef struct pcie_bus_metrics { + uint32 d3_suspend_ct; /**< suspend count */ + uint32 d0_resume_ct; /**< resume count */ + uint32 perst_assrt_ct; /**< PERST# assert count */ + uint32 perst_deassrt_ct; /**< PERST# de-assert count */ + uint32 active_dur; /**< msecs */ + uint32 d3_suspend_dur; /**< msecs */ + uint32 perst_dur; /**< msecs */ + uint32 l0_cnt; /**< L0 entry count */ + uint32 l0_usecs; /**< L0 duration in usecs */ + uint32 l1_cnt; /**< L1 entry count */ + uint32 l1_usecs; /**< L1 duration in usecs */ + uint32 l1_1_cnt; /**< L1_1ss entry count */ + uint32 l1_1_usecs; /**< L1_1ss duration in usecs */ + uint32 l1_2_cnt; /**< L1_2ss entry count */ + uint32 l1_2_usecs; /**< L1_2ss duration in usecs */ + uint32 l2_cnt; /**< L2 entry count */ + uint32 l2_usecs; /**< L2 duration in usecs */ + uint32 timestamp; /**< Timestamp on when stats are collected */ + uint32 num_h2d_doorbell; /**< # of doorbell interrupts - h2d */ + uint32 num_d2h_doorbell; /**< # of doorbell interrupts - d2h */ + uint32 num_submissions; /**< # of submissions */ + uint32 num_completions; /**< # of completions */ + uint32 num_rxcmplt; /**< # of rx completions */ + uint32 num_rxcmplt_drbl; /**< of drbl interrupts for rx complt. */ + uint32 num_txstatus; /**< # of tx completions */ + uint32 num_txstatus_drbl; /**< of drbl interrupts for tx complt. */ + uint32 deepsleep_count; /**< # of times chip went to deepsleep */ + uint32 deepsleep_dur; /**< # of msecs chip was in deepsleep */ + uint32 ltr_active_ct; /**< # of times chip went to LTR ACTIVE */ + uint32 ltr_active_dur; /**< # of msecs chip was in LTR ACTIVE */ + uint32 ltr_sleep_ct; /**< # of times chip went to LTR SLEEP */ + uint32 ltr_sleep_dur; /**< # of msecs chip was in LTR SLEEP */ +} pcie_bus_metrics_t; + +typedef struct pcie_cnt { + uint32 ltr_state; /**< Current LTR state */ + uint32 l0_sr_cnt; /**< SR count during L0 */ + uint32 l2l3_sr_cnt; /**< SR count during L2L3 */ + uint32 d3_ack_sr_cnt; /**< srcount during last D3-ACK */ + uint32 d3_sr_cnt; /**< SR count during D3 */ + uint32 d3_info_start; /**< D3 INFORM received time */ + uint32 d3_info_enter_cnt; /**< # of D3 INFORM received */ + uint32 d3_cnt; /**< # of real D3 */ + uint32 d3_ack_sent_cnt; /**< # of D3 ACK sent count */ + uint32 d3_drop_cnt_event; /**< # of events dropped during D3 */ + uint32 d2h_req_q_len; /**< # of Packet pending in D2H request queue */ + uint32 hw_reason; /**< Last Host wake assert reason */ + uint32 hw_assert_cnt; /**< # of times Host wake Asserted */ + uint32 host_ready_cnt; /**< # of Host ready interrupts */ + uint32 hw_assert_reason_0; /**< timestamp when hw_reason is TRAP */ + uint32 hw_assert_reason_1; /**< timestamp when hw_reason is WL_EVENT */ + uint32 hw_assert_reason_2; /**< timestamp when hw_reason is DATA */ + uint32 hw_assert_reason_3; /**< timestamp when hw_reason is DELAYED_WAKE */ + uint32 last_host_ready; /**< Timestamp of last Host ready */ + bool hw_asserted; /**< Flag to indicate if Host wake is Asserted */ + bool event_delivery_pend; /**< No resources to send event */ + uint16 pad; /**< Word alignment for scripts */ +} pcie_cnt_t; + +/** Bus interface info for PCIE */ +typedef struct wl_pwr_pcie_stats { + uint16 type; /**< WL_PWRSTATS_TYPE_PCIE */ + uint16 len; /**< Up to 4K-1, top 4 bits are reserved */ + pcie_bus_metrics_t pcie; /**< stats from pcie bus driver */ +} wl_pwr_pcie_stats_t; + +/** Scan information history per category */ +typedef struct scan_data { + uint32 count; /**< Number of scans performed */ + uint32 dur; /**< Total time (in us) used */ +} scan_data_t; + +typedef struct wl_pwr_scan_stats { + uint16 type; /**< WL_PWRSTATS_TYPE_SCAN */ + uint16 len; /**< Up to 4K-1, top 4 bits are reserved */ + + /* Scan history */ + scan_data_t user_scans; /**< User-requested scans: (i/e/p)scan */ + scan_data_t assoc_scans; /**< Scans initiated by association requests */ + scan_data_t roam_scans; /**< Scans initiated by the roam engine */ + scan_data_t pno_scans[8]; /**< For future PNO bucketing (BSSID, SSID, etc) */ + scan_data_t other_scans; /**< Scan engine usage not assigned to the above */ +} wl_pwr_scan_stats_t; + +typedef struct wl_pwr_connect_stats { + uint16 type; /**< WL_PWRSTATS_TYPE_SCAN */ + uint16 len; /**< Up to 4K-1, top 4 bits are reserved */ + + /* Connection (Association + Key exchange) data */ + uint32 count; /**< Number of connections performed */ + uint32 dur; /**< Total time (in ms) used */ +} wl_pwr_connect_stats_t; + +typedef struct wl_pwr_phy_stats { + uint16 type; /**< WL_PWRSTATS_TYPE_PHY */ + uint16 len; /**< Up to 4K-1, top 4 bits are reserved */ + uint32 tx_dur; /**< TX Active duration in us */ + uint32 rx_dur; /**< RX Active duration in us */ +} wl_pwr_phy_stats_t; + +typedef struct wl_mimo_meas_metrics_v1 { + uint16 type; + uint16 len; + /* Total time(us) idle in MIMO RX chain configuration */ + uint32 total_idle_time_mimo; + /* Total time(us) idle in SISO RX chain configuration */ + uint32 total_idle_time_siso; + /* Total receive time (us) in SISO RX chain configuration */ + uint32 total_rx_time_siso; + /* Total receive time (us) in MIMO RX chain configuration */ + uint32 total_rx_time_mimo; + /* Total 1-chain transmit time(us) */ + uint32 total_tx_time_1chain; + /* Total 2-chain transmit time(us) */ + uint32 total_tx_time_2chain; + /* Total 3-chain transmit time(us) */ + uint32 total_tx_time_3chain; +} wl_mimo_meas_metrics_v1_t; + +typedef struct wl_mimo_meas_metrics { + uint16 type; + uint16 len; + /* Total time(us) idle in MIMO RX chain configuration */ + uint32 total_idle_time_mimo; + /* Total time(us) idle in SISO RX chain configuration */ + uint32 total_idle_time_siso; + /* Total receive time (us) in SISO RX chain configuration */ + uint32 total_rx_time_siso; + /* Total receive time (us) in MIMO RX chain configuration */ + uint32 total_rx_time_mimo; + /* Total 1-chain transmit time(us) */ + uint32 total_tx_time_1chain; + /* Total 2-chain transmit time(us) */ + uint32 total_tx_time_2chain; + /* Total 3-chain transmit time(us) */ + uint32 total_tx_time_3chain; + /* End of original, OCL fields start here */ + /* Total time(us) idle in ocl mode */ + uint32 total_idle_time_ocl; + /* Total receive time (us) in ocl mode */ + uint32 total_rx_time_ocl; + /* End of OCL fields, internal adjustment fields here */ + /* Total SIFS idle time in MIMO mode */ + uint32 total_sifs_time_mimo; + /* Total SIFS idle time in SISO mode */ + uint32 total_sifs_time_siso; +} wl_mimo_meas_metrics_t; + +typedef struct wl_pwr_slice_index { + uint16 type; /* WL_PWRSTATS_TYPE_SLICE_INDEX */ + uint16 len; + + uint32 slice_index; /* Slice index for which stats are meant for */ +} wl_pwr_slice_index_t; + +typedef struct wl_pwr_tsync_stats { + uint16 type; /**< WL_PWRSTATS_TYPE_TSYNC */ + uint16 len; + uint32 avb_uptime; /**< AVB uptime in msec */ +} wl_pwr_tsync_stats_t; + +typedef struct wl_pwr_ops_stats { + uint16 type; /* WL_PWRSTATS_TYPE_OPS_STATS */ + uint16 len; /* total length includes fixed fields */ + uint32 partial_ops_dur; /* Total time(in usec) partial ops duration */ + uint32 full_ops_dur; /* Total time(in usec) full ops duration */ +} wl_pwr_ops_stats_t; + +typedef struct wl_pwr_bcntrim_stats { + uint16 type; /* WL_PWRSTATS_TYPE_BCNTRIM_STATS */ + uint16 len; /* total length includes fixed fields */ + uint8 associated; /* STA is associated ? */ + uint8 slice_idx; /* on which slice STA is associated */ + uint16 pad; /* padding */ + uint32 slice_beacon_seen; /* number of beacons seen on the Infra + * interface on this slice + */ + uint32 slice_beacon_trimmed; /* number beacons actually trimmed on this slice */ + uint32 total_beacon_seen; /* total number of beacons seen on the Infra interface */ + uint32 total_beacon_trimmed; /* total beacons actually trimmed */ +} wl_pwr_bcntrim_stats_t; + +typedef struct wl_pwr_slice_index_band { + uint16 type; /* WL_PWRSTATS_TYPE_SLICE_INDEX_BAND_INFO */ + uint16 len; /* Total length includes fixed fields */ + uint16 index; /* Slice Index */ + int16 bandtype; /* Slice Bandtype */ +} wl_pwr_slice_index_band_t; + +typedef struct wl_pwr_psbw_stats { + uint16 type; /* WL_PWRSTATS_TYPE_PSBW_STATS */ + uint16 len; /* total length includes fixed fields */ + uint8 slice_idx; /* on which slice STA is associated */ + uint8 pad[3]; + uint32 slice_enable_dur; /* time(ms) psbw remains enabled on this slice */ + uint32 total_enable_dur; /* time(ms) psbw remains enabled total */ +} wl_pwr_psbw_stats_t; + +/* ##### End of Power Stats section ##### */ + +/** IPV4 Arp offloads for ndis context */ +#include +BWL_PRE_PACKED_STRUCT struct hostip_id { + struct ipv4_addr ipa; + uint8 id; +} BWL_POST_PACKED_STRUCT; +#include + +/* Return values */ +#define ND_REPLY_PEER 0x1 /**< Reply was sent to service NS request from peer */ +#define ND_REQ_SINK 0x2 /**< Input packet should be discarded */ +#define ND_FORCE_FORWARD 0X3 /**< For the dongle to forward req to HOST */ + +/** Neighbor Solicitation Response Offload IOVAR param */ +#include +typedef BWL_PRE_PACKED_STRUCT struct nd_param { + struct ipv6_addr host_ip[2]; + struct ipv6_addr solicit_ip; + struct ipv6_addr remote_ip; + uint8 host_mac[ETHER_ADDR_LEN]; + uint32 offload_id; +} BWL_POST_PACKED_STRUCT nd_param_t; +#include + +typedef struct wl_pfn_roam_thresh { + uint32 pfn_alert_thresh; /**< time in ms */ + uint32 roam_alert_thresh; /**< time in ms */ +} wl_pfn_roam_thresh_t; + +/* Reasons for wl_pmalert_t */ +#define PM_DUR_EXCEEDED (1<<0) +#define MPC_DUR_EXCEEDED (1<<1) +#define ROAM_ALERT_THRESH_EXCEEDED (1<<2) +#define PFN_ALERT_THRESH_EXCEEDED (1<<3) +#define CONST_AWAKE_DUR_ALERT (1<<4) +#define CONST_AWAKE_DUR_RECOVERY (1<<5) + +#define MIN_PM_ALERT_LEN 9 + +/** Data sent in EXCESS_PM_WAKE event */ +#define WL_PM_ALERT_VERSION 3 + +/** This structure is for version 3; version 2 will be deprecated in by FW */ +#include +typedef BWL_PRE_PACKED_STRUCT struct wl_pmalert { + uint16 version; /**< Version = 3 is TLV format */ + uint16 length; /**< Length of entire structure */ + uint32 reasons; /**< reason(s) for pm_alert */ + uint8 data[1]; /**< TLV data, a series of structures, + * each starting with type and length. + * + * Padded as necessary so each section + * starts on a 4-byte boundary. + * + * Both type and len are uint16, but the + * upper nibble of length is reserved so + * valid len values are 0-4095. + */ +} BWL_POST_PACKED_STRUCT wl_pmalert_t; +#include + +/* Type values for the data section */ +#define WL_PMALERT_FIXED 0 /**< struct wl_pmalert_fixed_t, fixed fields */ +#define WL_PMALERT_PMSTATE 1 /**< struct wl_pmalert_pmstate_t, variable */ +#define WL_PMALERT_EVENT_DUR 2 /**< struct wl_pmalert_event_dur_t, variable */ +#define WL_PMALERT_UCODE_DBG 3 /**< struct wl_pmalert_ucode_dbg_v1, variable */ +#define WL_PMALERT_PS_ALLOWED_HIST 4 /**< struct wl_pmalert_ps_allowed_history, variable */ +#define WL_PMALERT_EXT_UCODE_DBG 5 /**< struct wl_pmalert_ext_ucode_dbg_t, variable */ +#define WL_PMALERT_EPM_START_EVENT_DUR 6 /**< struct wl_pmalert_event_dur_t, variable */ +#define WL_PMALERT_UCODE_DBG_V2 7 /**< struct wl_pmalert_ucode_dbg_v2, variable */ + +typedef struct wl_pmalert_fixed { + uint16 type; /**< WL_PMALERT_FIXED */ + uint16 len; /**< Up to 4K-1, top 4 bits are reserved */ + uint32 prev_stats_time; /**< msecs */ + uint32 curr_time; /**< ms */ + uint32 prev_pm_dur; /**< msecs */ + uint32 pm_dur; /**< Total sleep time in PM, msecs */ + uint32 prev_mpc_dur; /**< msecs */ + uint32 mpc_dur; /**< Total sleep time in MPC, msecs */ + uint32 hw_macc; /**< HW maccontrol */ + uint32 sw_macc; /**< SW maccontrol */ + + /* int32 drifts = remote - local; +ve drift -> local-clk slow */ + int32 last_drift; /**< Most recent TSF drift from beacon */ + int32 min_drift; /**< Min TSF drift from beacon in magnitude */ + int32 max_drift; /**< Max TSF drift from beacon in magnitude */ + + uint32 avg_drift; /**< Avg TSF drift from beacon */ + uint32 drift_cnt; /**< Count of drift readings over which avg_drift was computed */ + uint32 frts_time; /**< Cumulative ms spent in data frts since driver load */ + uint32 frts_end_cnt; /**< No of times frts ended since driver load */ + uint32 prev_frts_dur; /**< Data frts duration at start of pm-period */ + uint32 cal_dur; /**< Cumulative ms spent in calibration */ + uint32 prev_cal_dur; /**< cal duration at start of pm-period */ +} wl_pmalert_fixed_t; + +typedef struct wl_pmalert_pmstate { + uint16 type; /**< WL_PMALERT_PMSTATE */ + uint16 len; /**< Up to 4K-1, top 4 bits are reserved */ + + uint8 pmwake_idx; /**< for stepping through pm_state */ + uint8 pad[3]; + /* Array of pmstate; len of array is based on tlv len */ + wlc_pm_debug_t pmstate[1]; +} wl_pmalert_pmstate_t; + +typedef struct wl_pmalert_event_dur { + uint16 type; /**< WL_PMALERT_EVENT_DUR */ + uint16 len; /**< Up to 4K-1, top 4 bits are reserved */ + + /* Array of event_dur, len of array is based on tlv len */ + uint32 event_dur[1]; +} wl_pmalert_event_dur_t; + +#include +BWL_PRE_PACKED_STRUCT struct wl_pmalert_ucode_dbg_v1 { + uint16 type; /* WL_PMALERT_UCODE_DBG */ + uint16 len; /* Up to 4K-1, top 4 bits are reserved */ + uint32 macctrl; + uint16 m_p2p_hps; + uint32 psm_brc; + uint32 ifsstat; + uint16 m_p2p_bss_dtim_prd[MAX_P2P_BSS_DTIM_PRD]; + uint32 psmdebug[20]; + uint32 phydebug[20]; + uint16 M_P2P_BSS[3][12]; + uint16 M_P2P_PRE_TBTT[3]; + + /* Following is valid only for corerevs<40 */ + uint16 xmtfifordy; + + /* Following 3 are valid only for 11ac corerevs (>=40) */ + uint16 psm_maccommand; + uint16 txe_status1; + uint16 AQMFifoReady; +} BWL_POST_PACKED_STRUCT; +#include + +#include +BWL_PRE_PACKED_STRUCT struct wl_pmalert_ucode_dbg_v2 { + uint16 type; /**< WL_PMALERT_UCODE_DBG_V2 */ + uint16 len; /**< Up to 4K-1, top 4 bits are reserved */ + uint32 macctrl; + uint16 m_p2p_hps; + uint32 psm_brc; + uint32 ifsstat; + uint16 m_p2p_bss_dtim_prd[MAX_P2P_BSS_DTIM_PRD]; + uint32 psmdebug[20]; + uint32 phydebug[20]; + uint16 M_P2P_BSS[3][12]; + uint16 M_P2P_PRE_TBTT[3]; + + /* Following is valid only for corerevs<40 */ + uint16 xmtfifordy; + + /* Following 3 are valid only for 11ac corerevs (>=40) */ + uint16 psm_maccommand; + uint16 txe_status1; + uint32 AQMFifoReady; +} BWL_POST_PACKED_STRUCT; +#include + +typedef struct wlc_ps_debug { + uint32 timestamp; /**< timestamp in millisecond */ + uint32 ps_mask; /**< reason(s) for disallowing ps */ +} wlc_ps_debug_t; + +typedef struct wl_pmalert_ps_allowed_hist { + uint16 type; /**< WL_PMALERT_PS_ALLOWED_HIST */ + uint16 len; /**< Up to 4K-1, top 4 bits are reserved */ + uint32 ps_allowed_start_idx; + /* Array of ps_debug, len of array is based on tlv len */ + wlc_ps_debug_t ps_debug[1]; +} wl_pmalert_ps_allowed_hist_t; + +/* Structures and constants used for "vndr_ie" IOVar interface */ +#define VNDR_IE_CMD_LEN 4 /**< length of the set command string: + * "add", "del" (+ NUL) + */ + +#define VNDR_IE_INFO_HDR_LEN (sizeof(uint32)) + +#include +typedef BWL_PRE_PACKED_STRUCT struct { + uint32 pktflag; /**< bitmask indicating which packet(s) contain this IE */ + vndr_ie_t vndr_ie_data; /**< vendor IE data */ +} BWL_POST_PACKED_STRUCT vndr_ie_info_t; +#include + +#include +typedef BWL_PRE_PACKED_STRUCT struct { + int32 iecount; /**< number of entries in the vndr_ie_list[] array */ + vndr_ie_info_t vndr_ie_list[1]; /**< variable size list of vndr_ie_info_t structs */ +} BWL_POST_PACKED_STRUCT vndr_ie_buf_t; +#include + +#include +typedef BWL_PRE_PACKED_STRUCT struct { + char cmd[VNDR_IE_CMD_LEN]; /**< vndr_ie IOVar set command : "add", "del" + NUL */ + vndr_ie_buf_t vndr_ie_buffer; /**< buffer containing Vendor IE list information */ +} BWL_POST_PACKED_STRUCT vndr_ie_setbuf_t; +#include + +/** tag_ID/length/value_buffer tuple */ +#include +typedef BWL_PRE_PACKED_STRUCT struct { + uint8 id; + uint8 len; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT tlv_t; +#include + +#include +typedef BWL_PRE_PACKED_STRUCT struct { + uint32 pktflag; /**< bitmask indicating which packet(s) contain this IE */ + tlv_t ie_data; /**< IE data */ +} BWL_POST_PACKED_STRUCT ie_info_t; +#include + +#include +typedef BWL_PRE_PACKED_STRUCT struct { + int32 iecount; /**< number of entries in the ie_list[] array */ + ie_info_t ie_list[1]; /**< variable size list of ie_info_t structs */ +} BWL_POST_PACKED_STRUCT ie_buf_t; +#include + +#include +typedef BWL_PRE_PACKED_STRUCT struct { + char cmd[VNDR_IE_CMD_LEN]; /**< ie IOVar set command : "add" + NUL */ + ie_buf_t ie_buffer; /**< buffer containing IE list information */ +} BWL_POST_PACKED_STRUCT ie_setbuf_t; +#include + +#include +typedef BWL_PRE_PACKED_STRUCT struct { + uint32 pktflag; /**< bitmask indicating which packet(s) contain this IE */ + uint8 id; /**< IE type */ +} BWL_POST_PACKED_STRUCT ie_getbuf_t; +#include + +/* structures used to define format of wps ie data from probe requests */ +/* passed up to applications via iovar "prbreq_wpsie" */ +typedef struct sta_prbreq_wps_ie_hdr { + struct ether_addr staAddr; + uint16 ieLen; +} sta_prbreq_wps_ie_hdr_t; + +#include +typedef BWL_PRE_PACKED_STRUCT struct sta_prbreq_wps_ie_data { + sta_prbreq_wps_ie_hdr_t hdr; + uint8 ieData[1]; +} BWL_POST_PACKED_STRUCT sta_prbreq_wps_ie_data_t; +#include + +#include +typedef BWL_PRE_PACKED_STRUCT struct sta_prbreq_wps_ie_list { + uint32 totLen; + uint8 ieDataList[1]; +} BWL_POST_PACKED_STRUCT sta_prbreq_wps_ie_list_t; +#include + +#include +typedef BWL_PRE_PACKED_STRUCT struct { + uint32 flags; + chanspec_t chanspec; /**< txpwr report for this channel */ + chanspec_t local_chanspec; /**< channel on which we are associated */ + uint8 local_max; /**< local max according to the AP */ + uint8 local_constraint; /**< local constraint according to the AP */ + int8 antgain[2]; /**< Ant gain for each band - from SROM */ + uint8 rf_cores; /**< count of RF Cores being reported */ + uint8 est_Pout[4]; /**< Latest tx power out estimate per RF chain */ + uint8 est_Pout_act[4]; /**< Latest tx power out estimate per RF chain w/o adjustment */ + uint8 est_Pout_cck; /**< Latest CCK tx power out estimate */ + uint8 tx_power_max[4]; /**< Maximum target power among all rates */ + uint32 tx_power_max_rate_ind[4]; /**< Index of the rate with the max target power */ + int8 sar; /**< SAR limit for display by wl executable */ + int8 channel_bandwidth; /**< 20, 40 or 80 MHz bandwidth? */ + uint8 version; /**< Version of the data format wlu <--> driver */ + uint8 display_core; /**< Displayed curpower core */ + int8 target_offsets[4]; /**< Target power offsets for current rate per core */ + uint32 last_tx_ratespec; /**< Ratespec for last transmition */ + uint32 user_target; /**< user limit */ + uint32 ppr_len; /**< length of each ppr serialization buffer */ + int8 SARLIMIT[MAX_STREAMS_SUPPORTED]; + uint8 pprdata[1]; /**< ppr serialization buffer */ +} BWL_POST_PACKED_STRUCT tx_pwr_rpt_t; +#include + +#include +typedef BWL_PRE_PACKED_STRUCT struct { + struct ipv4_addr ipv4_addr; + struct ether_addr nexthop; +} BWL_POST_PACKED_STRUCT ibss_route_entry_t; +#include + +#include +typedef BWL_PRE_PACKED_STRUCT struct { + uint32 num_entry; + ibss_route_entry_t route_entry[1]; +} BWL_POST_PACKED_STRUCT ibss_route_tbl_t; +#include + +#define MAX_IBSS_ROUTE_TBL_ENTRY 64 + +#define TXPWR_TARGET_VERSION 0 +#include +typedef BWL_PRE_PACKED_STRUCT struct { + int32 version; /**< version number */ + chanspec_t chanspec; /**< txpwr report for this channel */ + int8 txpwr[WL_STA_ANT_MAX]; /**< Max tx target power, in qdb */ + uint8 rf_cores; /**< count of RF Cores being reported */ +} BWL_POST_PACKED_STRUCT txpwr_target_max_t; +#include + +#define BSS_PEER_INFO_PARAM_CUR_VER 0 +/** Input structure for IOV_BSS_PEER_INFO */ +#include +typedef BWL_PRE_PACKED_STRUCT struct { + uint16 version; + struct ether_addr ea; /**< peer MAC address */ +} BWL_POST_PACKED_STRUCT bss_peer_info_param_t; +#include + +#define BSS_PEER_INFO_CUR_VER 0 + +#include +typedef BWL_PRE_PACKED_STRUCT struct { + uint16 version; + struct ether_addr ea; + int32 rssi; + uint32 tx_rate; /**< current tx rate */ + uint32 rx_rate; /**< current rx rate */ + wl_rateset_t rateset; /**< rateset in use */ + uint32 age; /**< age in seconds */ +} BWL_POST_PACKED_STRUCT bss_peer_info_t; +#include + +#define BSS_PEER_LIST_INFO_CUR_VER 0 + +#include +typedef BWL_PRE_PACKED_STRUCT struct { + uint16 version; + uint16 bss_peer_info_len; /**< length of bss_peer_info_t */ + uint32 count; /**< number of peer info */ + bss_peer_info_t peer_info[1]; /**< peer info */ +} BWL_POST_PACKED_STRUCT bss_peer_list_info_t; +#include + +#define BSS_PEER_LIST_INFO_FIXED_LEN OFFSETOF(bss_peer_list_info_t, peer_info) + +#define AIBSS_BCN_FORCE_CONFIG_VER_0 0 + +/** structure used to configure AIBSS beacon force xmit */ +#include +typedef BWL_PRE_PACKED_STRUCT struct { + uint16 version; + uint16 len; + uint32 initial_min_bcn_dur; /**< dur in ms to check a bcn in bcn_flood period */ + uint32 min_bcn_dur; /**< dur in ms to check a bcn after bcn_flood period */ + uint32 bcn_flood_dur; /**< Initial bcn xmit period in ms */ +} BWL_POST_PACKED_STRUCT aibss_bcn_force_config_t; +#include + +#define AIBSS_TXFAIL_CONFIG_VER_0 0 +#define AIBSS_TXFAIL_CONFIG_VER_1 1 +#define AIBSS_TXFAIL_CONFIG_CUR_VER AIBSS_TXFAIL_CONFIG_VER_1 + +/** structure used to configure aibss tx fail event */ +#include +typedef BWL_PRE_PACKED_STRUCT struct { + uint16 version; + uint16 len; + uint32 bcn_timeout; /**< dur in seconds to receive 1 bcn */ + uint32 max_tx_retry; /**< no of consecutive no acks to send txfail event */ + uint32 max_atim_failure; /**< no of consecutive atim failure */ +} BWL_POST_PACKED_STRUCT aibss_txfail_config_t; +#include + +#include +typedef BWL_PRE_PACKED_STRUCT struct wl_aibss_if { + uint16 version; + uint16 len; + uint32 flags; + struct ether_addr addr; + chanspec_t chspec; +} BWL_POST_PACKED_STRUCT wl_aibss_if_t; +#include + +#include +typedef BWL_PRE_PACKED_STRUCT struct wlc_ipfo_route_entry { + struct ipv4_addr ip_addr; + struct ether_addr nexthop; +} BWL_POST_PACKED_STRUCT wlc_ipfo_route_entry_t; +#include + +#include +typedef BWL_PRE_PACKED_STRUCT struct wlc_ipfo_route_tbl { + uint32 num_entry; + wlc_ipfo_route_entry_t route_entry[1]; +} BWL_POST_PACKED_STRUCT wlc_ipfo_route_tbl_t; +#include + +/* Version of wlc_btc_stats_t structure. + * Increment whenever a change is made to wlc_btc_stats_t + */ +#define BTCX_STATS_VER_4 4 +typedef struct wlc_btc_stats_v4 { + uint16 version; /* version number of struct */ + uint16 valid; /* Size of this struct */ + uint32 stats_update_timestamp; /* tStamp when data is updated. */ + uint32 btc_status; /* Hybrid/TDM indicator: Bit2:Hybrid, Bit1:TDM,Bit0:CoexEnabled */ + uint32 bt_req_type_map; /* BT Antenna Req types since last stats sample */ + uint32 bt_req_cnt; /* #BT antenna requests since last stats sampl */ + uint32 bt_gnt_cnt; /* #BT antenna grants since last stats sample */ + uint32 bt_gnt_dur; /* usec BT owns antenna since last stats sample */ + uint16 bt_abort_cnt; /* #Times WL was preempted due to BT since WL up */ + uint16 bt_rxf1ovfl_cnt; /* #Time PSNULL retry count exceeded since WL up */ + uint16 bt_latency_cnt; /* #Time ucode high latency detected since WL up */ + uint16 bt_succ_pm_protect_cnt; /* successful PM protection */ + uint16 bt_succ_cts_cnt; /* successful CTS2A protection */ + uint16 bt_wlan_tx_preempt_cnt; /* WLAN TX Preemption */ + uint16 bt_wlan_rx_preempt_cnt; /* WLAN RX Preemption */ + uint16 bt_ap_tx_after_pm_cnt; /* AP TX even after PM protection */ + uint16 bt_peraud_cumu_gnt_cnt; /* Grant cnt for periodic audio */ + uint16 bt_peraud_cumu_deny_cnt; /* Deny cnt for periodic audio */ + uint16 bt_a2dp_cumu_gnt_cnt; /* Grant cnt for A2DP */ + uint16 bt_a2dp_cumu_deny_cnt; /* Deny cnt for A2DP */ + uint16 bt_sniff_cumu_gnt_cnt; /* Grant cnt for Sniff */ + uint16 bt_sniff_cumu_deny_cnt; /* Deny cnt for Sniff */ + uint16 bt_dcsn_map; /* Accumulated decision bitmap once Ant grant */ + uint16 bt_dcsn_cnt; /* Accumulated decision bitmap counters once Ant grant */ + uint16 bt_a2dp_hiwat_cnt; /* Ant grant by a2dp high watermark */ + uint16 bt_datadelay_cnt; /* Ant grant by acl/a2dp datadelay */ + uint16 bt_crtpri_cnt; /* Ant grant by critical BT task */ + uint16 bt_pri_cnt; /* Ant grant by high BT task */ + uint16 a2dpbuf1cnt; /* Ant request with a2dp buffercnt 1 */ + uint16 a2dpbuf2cnt; /* Ant request with a2dp buffercnt 2 */ + uint16 a2dpbuf3cnt; /* Ant request with a2dp buffercnt 3 */ + uint16 a2dpbuf4cnt; /* Ant request with a2dp buffercnt 4 */ + uint16 a2dpbuf5cnt; /* Ant request with a2dp buffercnt 5 */ + uint16 a2dpbuf6cnt; /* Ant request with a2dp buffercnt 6 */ + uint16 a2dpbuf7cnt; /* Ant request with a2dp buffercnt 7 */ + uint16 a2dpbuf8cnt; /* Ant request with a2dp buffercnt 8 */ + uint16 antgrant_lt10ms; /* Ant grant duration cnt 0~10ms */ + uint16 antgrant_lt30ms; /* Ant grant duration cnt 10~30ms */ + uint16 antgrant_lt60ms; /* Ant grant duration cnt 30~60ms */ + uint16 antgrant_ge60ms; /* Ant grant duration cnt 60~ms */ +} wlc_btc_stats_v4_t; + +#define BTCX_STATS_VER_3 3 + +typedef struct wlc_btc_stats_v3 { + uint16 version; /* version number of struct */ + uint16 valid; /* Size of this struct */ + uint32 stats_update_timestamp; /* tStamp when data is updated. */ + uint32 btc_status; /* Hybrid/TDM indicator: Bit2:Hybrid, Bit1:TDM,Bit0:CoexEnabled */ + uint32 bt_req_type_map; /* BT Antenna Req types since last stats sample */ + uint32 bt_req_cnt; /* #BT antenna requests since last stats sampl */ + uint32 bt_gnt_cnt; /* #BT antenna grants since last stats sample */ + uint32 bt_gnt_dur; /* usec BT owns antenna since last stats sample */ + uint16 bt_abort_cnt; /* #Times WL was preempted due to BT since WL up */ + uint16 bt_rxf1ovfl_cnt; /* #Time PSNULL retry count exceeded since WL up */ + uint16 bt_latency_cnt; /* #Time ucode high latency detected since WL up */ + uint16 rsvd; /* pad to align struct to 32bit bndry */ + uint16 bt_succ_pm_protect_cnt; /* successful PM protection */ + uint16 bt_succ_cts_cnt; /* successful CTS2A protection */ + uint16 bt_wlan_tx_preempt_cnt; /* WLAN TX Preemption */ + uint16 bt_wlan_rx_preempt_cnt; /* WLAN RX Preemption */ + uint16 bt_ap_tx_after_pm_cnt; /* AP TX even after PM protection */ + uint16 bt_peraud_cumu_gnt_cnt; /* Grant cnt for periodic audio */ + uint16 bt_peraud_cumu_deny_cnt; /* Deny cnt for periodic audio */ + uint16 bt_a2dp_cumu_gnt_cnt; /* Grant cnt for A2DP */ + uint16 bt_a2dp_cumu_deny_cnt; /* Deny cnt for A2DP */ + uint16 bt_sniff_cumu_gnt_cnt; /* Grant cnt for Sniff */ + uint16 bt_sniff_cumu_deny_cnt; /* Deny cnt for Sniff */ + uint8 pad; /* Padding */ + uint8 slice_index; /* Slice to report */ +} wlc_btc_stats_v3_t; + +#define BTCX_STATS_VER_2 2 + +typedef struct wlc_btc_stats_v2 { + uint16 version; /* version number of struct */ + uint16 valid; /* Size of this struct */ + uint32 stats_update_timestamp; /* tStamp when data is updated. */ + uint32 btc_status; /* Hybrid/TDM indicator: Bit2:Hybrid, Bit1:TDM,Bit0:CoexEnabled */ + uint32 bt_req_type_map; /* BT Antenna Req types since last stats sample */ + uint32 bt_req_cnt; /* #BT antenna requests since last stats sampl */ + uint32 bt_gnt_cnt; /* #BT antenna grants since last stats sample */ + uint32 bt_gnt_dur; /* usec BT owns antenna since last stats sample */ + uint16 bt_abort_cnt; /* #Times WL was preempted due to BT since WL up */ + uint16 bt_rxf1ovfl_cnt; /* #Time PSNULL retry count exceeded since WL up */ + uint16 bt_latency_cnt; /* #Time ucode high latency detected since WL up */ + uint16 rsvd; /* pad to align struct to 32bit bndry */ +} wlc_btc_stats_v2_t; + +#define WL_IPFO_ROUTE_TBL_FIXED_LEN 4 +#define WL_MAX_IPFO_ROUTE_TBL_ENTRY 64 + + /* Global ASSERT Logging */ +#define ASSERTLOG_CUR_VER 0x0100 +#define MAX_ASSRTSTR_LEN 64 + + typedef struct assert_record { + uint32 time; + uint8 seq_num; + int8 str[MAX_ASSRTSTR_LEN]; + } assert_record_t; + + typedef struct assertlog_results { + uint16 version; + uint16 record_len; + uint32 num; + assert_record_t logs[1]; + } assertlog_results_t; + +#define LOGRRC_FIX_LEN 8 +#define IOBUF_ALLOWED_NUM_OF_LOGREC(type, len) ((len - LOGRRC_FIX_LEN)/sizeof(type)) +/* BCMWAPI_WAI */ +#define IV_LEN 16 + struct wapi_sta_msg_t + { + uint16 msg_type; + uint16 datalen; + uint8 vap_mac[6]; + uint8 reserve_data1[2]; + uint8 sta_mac[6]; + uint8 reserve_data2[2]; + uint8 gsn[IV_LEN]; + uint8 wie[TLV_BODY_LEN_MAX + TLV_HDR_LEN]; /* 257 */ + uint8 pad[3]; /* padding for alignment */ + }; +/* #endif BCMWAPI_WAI */ + /* chanim acs record */ + typedef struct { + uint8 valid; + uint8 trigger; + chanspec_t selected_chspc; + int8 bgnoise; + uint32 glitch_cnt; + uint8 ccastats; + uint8 chan_idle; + uint32 timestamp; + } chanim_acs_record_t; + + typedef struct { + chanim_acs_record_t acs_record[CHANIM_ACS_RECORD]; + uint8 count; + uint32 timestamp; + } wl_acs_record_t; + +#define WL_CHANIM_STATS_V2 2 +#define CCASTATS_V2_MAX 9 +typedef struct chanim_stats_v2 { + uint32 glitchcnt; /**< normalized as per second count */ + uint32 badplcp; /**< normalized as per second count */ + uint8 ccastats[CCASTATS_V2_MAX]; /**< normalized as 0-255 */ + int8 bgnoise; /**< background noise level (in dBm) */ + chanspec_t chanspec; /**< ctrl chanspec of the interface */ + uint32 timestamp; /**< time stamp at which the stats are collected */ + uint32 bphy_glitchcnt; /**< normalized as per second count */ + uint32 bphy_badplcp; /**< normalized as per second count */ + uint8 chan_idle; /**< normalized as 0~255 */ + uint8 PAD[3]; +} chanim_stats_v2_t; + +typedef struct chanim_stats { + uint32 glitchcnt; /**< normalized as per second count */ + uint32 badplcp; /**< normalized as per second count */ + uint8 ccastats[CCASTATS_MAX]; /**< normalized as 0-255 */ + int8 bgnoise; /**< background noise level (in dBm) */ + uint8 pad_1[11 - CCASTATS_MAX]; + chanspec_t chanspec; /**< ctrl chanspec of the interface */ + uint8 pad_2[2]; + uint32 timestamp; /**< time stamp at which the stats are collected */ + uint32 bphy_glitchcnt; /**< normalized as per second count */ + uint32 bphy_badplcp; /**< normalized as per second count */ + uint8 chan_idle; /**< normalized as 0~255 */ + uint8 PAD[3]; +} chanim_stats_t; + +#define WL_CHANIM_STATS_VERSION 3 +typedef struct { + uint32 buflen; + uint32 version; + uint32 count; + chanim_stats_t stats[1]; +} wl_chanim_stats_t; + +#define WL_CHANIM_STATS_FIXED_LEN OFFSETOF(wl_chanim_stats_t, stats) + +/** Noise measurement metrics. */ +#define NOISE_MEASURE_KNOISE 0x1 + +/** scb probe parameter */ +typedef struct { + uint32 scb_timeout; + uint32 scb_activity_time; + uint32 scb_max_probe; +} wl_scb_probe_t; + +/* structure/defines for selective mgmt frame (smf) stats support */ + +#define SMFS_VERSION 1 +/** selected mgmt frame (smf) stats element */ +typedef struct wl_smfs_elem { + uint32 count; + uint16 code; /**< SC or RC code */ + uint8 PAD[2]; +} wl_smfs_elem_t; + +typedef struct wl_smf_stats { + uint32 version; + uint16 length; /**< reserved for future usage */ + uint8 type; + uint8 codetype; + uint32 ignored_cnt; + uint32 malformed_cnt; + uint32 count_total; /**< count included the interested group */ + wl_smfs_elem_t elem[1]; +} wl_smf_stats_t; + +#define WL_SMFSTATS_FIXED_LEN OFFSETOF(wl_smf_stats_t, elem); + +enum { + SMFS_CODETYPE_SC, + SMFS_CODETYPE_RC +}; + +typedef enum smfs_type { + SMFS_TYPE_AUTH, + SMFS_TYPE_ASSOC, + SMFS_TYPE_REASSOC, + SMFS_TYPE_DISASSOC_TX, + SMFS_TYPE_DISASSOC_RX, + SMFS_TYPE_DEAUTH_TX, + SMFS_TYPE_DEAUTH_RX, + SMFS_TYPE_MAX +} smfs_type_t; + +/* #ifdef PHYMON */ + +#define PHYMON_VERSION 1 + +typedef struct wl_phycal_core_state { + /* Tx IQ/LO calibration coeffs */ + int16 tx_iqlocal_a; + int16 tx_iqlocal_b; + int8 tx_iqlocal_ci; + int8 tx_iqlocal_cq; + int8 tx_iqlocal_di; + int8 tx_iqlocal_dq; + int8 tx_iqlocal_ei; + int8 tx_iqlocal_eq; + int8 tx_iqlocal_fi; + int8 tx_iqlocal_fq; + + /** Rx IQ calibration coeffs */ + int16 rx_iqcal_a; + int16 rx_iqcal_b; + + uint8 tx_iqlocal_pwridx; /**< Tx Power Index for Tx IQ/LO calibration */ + uint8 PAD[3]; + uint32 papd_epsilon_table[64]; /**< PAPD epsilon table */ + int16 papd_epsilon_offset; /**< PAPD epsilon offset */ + uint8 curr_tx_pwrindex; /**< Tx power index */ + int8 idle_tssi; /**< Idle TSSI */ + int8 est_tx_pwr; /**< Estimated Tx Power (dB) */ + int8 est_rx_pwr; /**< Estimated Rx Power (dB) from RSSI */ + uint16 rx_gaininfo; /**< Rx gain applied on last Rx pkt */ + uint16 init_gaincode; /**< initgain required for ACI */ + int8 estirr_tx; + int8 estirr_rx; +} wl_phycal_core_state_t; + +typedef struct wl_phycal_state { + int32 version; + int8 num_phy_cores; /**< number of cores */ + int8 curr_temperature; /**< on-chip temperature sensor reading */ + chanspec_t chspec; /**< channspec for this state */ + uint8 aci_state; /**< ACI state: ON/OFF */ + uint8 PAD; + uint16 crsminpower; /**< crsminpower required for ACI */ + uint16 crsminpowerl; /**< crsminpowerl required for ACI */ + uint16 crsminpoweru; /**< crsminpoweru required for ACI */ + wl_phycal_core_state_t phycal_core[1]; +} wl_phycal_state_t; + +#define WL_PHYCAL_STAT_FIXED_LEN OFFSETOF(wl_phycal_state_t, phycal_core) +/* endif PHYMON */ + +/** discovery state */ +typedef struct wl_p2p_disc_st { + uint8 state; /**< see state */ + uint8 PAD; + chanspec_t chspec; /**< valid in listen state */ + uint16 dwell; /**< valid in listen state, in ms */ +} wl_p2p_disc_st_t; + +/** scan request */ +typedef struct wl_p2p_scan { + uint8 type; /**< 'S' for WLC_SCAN, 'E' for "escan" */ + uint8 reserved[3]; + /* scan or escan parms... */ +} wl_p2p_scan_t; + +/** i/f request */ +typedef struct wl_p2p_if { + struct ether_addr addr; + uint8 type; /**< see i/f type */ + uint8 PAD; + chanspec_t chspec; /**< for p2p_ifadd GO */ +} wl_p2p_if_t; + +/** i/f query */ +typedef struct wl_p2p_ifq { + uint32 bsscfgidx; + char ifname[BCM_MSG_IFNAME_MAX]; +} wl_p2p_ifq_t; + +/** OppPS & CTWindow */ +typedef struct wl_p2p_ops { + uint8 ops; /**< 0: disable 1: enable */ + uint8 ctw; /**< >= 10 */ +} wl_p2p_ops_t; + +/** absence and presence request */ +typedef struct wl_p2p_sched_desc { + uint32 start; + uint32 interval; + uint32 duration; + uint32 count; /**< see count */ +} wl_p2p_sched_desc_t; + +typedef struct wl_p2p_sched { + uint8 type; /**< see schedule type */ + uint8 action; /**< see schedule action */ + uint8 option; /**< see schedule option */ + uint8 PAD; + wl_p2p_sched_desc_t desc[1]; +} wl_p2p_sched_t; + +typedef struct wl_p2p_wfds_hash { + uint32 advt_id; + uint16 nw_cfg_method; + uint8 wfds_hash[6]; + uint8 name_len; + uint8 service_name[MAX_WFDS_SVC_NAME_LEN]; + uint8 PAD[3]; +} wl_p2p_wfds_hash_t; + +typedef struct wl_p2p_config_params { + uint16 enable; /**< 0: disable 1: enable */ + uint16 chanspec; /* GO chanspec */ + wlc_ssid_t ssid; /* SSID */ +} wl_p2p_config_params_t; + +typedef struct wl_bcmdcs_data { + uint32 reason; + chanspec_t chspec; + uint8 PAD[2]; +} wl_bcmdcs_data_t; +/* ifdef EXT_STA */ +/** + * Format of IHV data passed to OID_DOT11_NIC_SPECIFIC_EXTENSION. + */ +typedef struct _IHV_NIC_SPECIFIC_EXTENSION { + uint8 oui[4]; /**< vendor specific OUI value */ + uint32 event; /**< event code */ + uint8 ihvData[1]; /**< ihv data */ +} IHV_NIC_SPECIFIC_EXTENSION, *PIHV_NIC_SPECIFIC_EXTENSION; +#define IHV_NIC_SPECIFIC_EXTENTION_HEADER OFFSETOF(IHV_NIC_SPECIFIC_EXTENSION, ihvData[0]) +/* EXT_STA */ +/** NAT configuration */ +typedef struct { + uint32 ipaddr; /**< interface ip address */ + uint32 ipaddr_mask; /**< interface ip address mask */ + uint32 ipaddr_gateway; /**< gateway ip address */ + uint8 mac_gateway[6]; /**< gateway mac address */ + uint8 PAD[2]; + uint32 ipaddr_dns; /**< DNS server ip address, valid only for public if */ + uint8 mac_dns[6]; /**< DNS server mac address, valid only for public if */ + uint8 GUID[38]; /**< interface GUID */ +} nat_if_info_t; + +typedef struct { + uint32 op; /**< operation code */ + uint8 pub_if; /**< set for public if, clear for private if */ + uint8 PAD[3]; + nat_if_info_t if_info; /**< interface info */ +} nat_cfg_t; + +typedef struct { + int32 state; /**< NAT state returned */ +} nat_state_t; + +typedef struct flush_txfifo { + uint32 txfifobmp; + uint32 hwtxfifoflush; + struct ether_addr ea; + uint8 PAD[2]; +} flush_txfifo_t; + +enum { + SPATIAL_MODE_2G_IDX = 0, + SPATIAL_MODE_5G_LOW_IDX, + SPATIAL_MODE_5G_MID_IDX, + SPATIAL_MODE_5G_HIGH_IDX, + SPATIAL_MODE_5G_UPPER_IDX, + SPATIAL_MODE_MAX_IDX +}; + +#define WLC_TXCORE_MAX 4 /**< max number of txcore supports */ +#define WLC_TXCORE_MAX_OLD 2 /**< backward compatibilty for TXCAL */ +#define WLC_SUBBAND_MAX 4 /**< max number of sub-band supports */ +typedef struct { + uint8 band2g[WLC_TXCORE_MAX]; + uint8 band5g[WLC_SUBBAND_MAX][WLC_TXCORE_MAX]; +} sar_limit_t; + +#define MAX_NUM_TXCAL_MEAS 128 +#define MAX_NUM_PWR_STEP 40 +#define TXCAL_IOVAR_VERSION 0x1 + +#define TXCAL_GAINSWEEP_VER (TXCAL_GAINSWEEP_VERSION_V2) +#define TXCAL_GAINSWEEP_VERSION_V2 2 + +/* Below macro defines the latest txcal iovar version updated */ +/* This macro also reflects in the 'txcal_ver' iovar */ +#define TXCAL_IOVAR_LATEST TXCAL_GAINSWEEP_VER + +/* below are used for bphy/ofdm separated LSC */ +#define TXCAL_PWR_BPHY 0 +#define TXCAL_PWR_OFDM 1 + +typedef struct wl_txcal_meas_percore { + uint16 tssi[MAX_NUM_TXCAL_MEAS]; + int16 pwr[MAX_NUM_TXCAL_MEAS]; +} wl_txcal_meas_percore_t; + +typedef struct wl_txcal_meas_ncore { + uint16 version; + uint8 valid_cnt; + uint8 num_core; + wl_txcal_meas_percore_t txcal_percore[1]; +} wl_txcal_meas_ncore_t; + +typedef struct wl_txcal_power_tssi_percore { + int16 tempsense; + int16 pwr_start; + uint8 pwr_start_idx; + uint8 num_entries; + uint16 pad; + uint8 tssi[MAX_NUM_PWR_STEP]; +} wl_txcal_power_tssi_percore_t; + +typedef struct wl_txcal_power_tssi_ncore { + uint16 version; + uint8 set_core; + uint8 channel; + uint8 num_core; + uint8 gen_tbl; + uint8 ofdm; + uint8 pad; + wl_txcal_power_tssi_percore_t tssi_percore[4]; +} wl_txcal_power_tssi_ncore_t; + +typedef struct wl_txcal_meas { + uint16 tssi[WLC_TXCORE_MAX][MAX_NUM_TXCAL_MEAS]; + int16 pwr[WLC_TXCORE_MAX][MAX_NUM_TXCAL_MEAS]; + uint8 valid_cnt; + uint8 PAD; +} wl_txcal_meas_t; + +typedef struct wl_txcal_meas_old { + uint16 tssi[WLC_TXCORE_MAX_OLD][MAX_NUM_TXCAL_MEAS]; + int16 pwr[WLC_TXCORE_MAX_OLD][MAX_NUM_TXCAL_MEAS]; + uint8 valid_cnt; + uint8 PAD; +} wl_txcal_meas_old_t; + +typedef struct wl_txcal_power_tssi { + uint8 set_core; + uint8 channel; + int16 tempsense[WLC_TXCORE_MAX]; + int16 pwr_start[WLC_TXCORE_MAX]; + uint8 pwr_start_idx[WLC_TXCORE_MAX]; + uint8 num_entries[WLC_TXCORE_MAX]; + uint8 tssi[WLC_TXCORE_MAX][MAX_NUM_PWR_STEP]; + uint8 gen_tbl; + uint8 ofdm; +} wl_txcal_power_tssi_t; + +typedef struct wl_txcal_power_tssi_old { + uint8 set_core; + uint8 channel; + int16 tempsense[WLC_TXCORE_MAX_OLD]; + int16 pwr_start[WLC_TXCORE_MAX_OLD]; + uint8 pwr_start_idx[WLC_TXCORE_MAX_OLD]; + uint8 num_entries[WLC_TXCORE_MAX_OLD]; + uint8 tssi[WLC_TXCORE_MAX_OLD][MAX_NUM_PWR_STEP]; + uint8 gen_tbl; + uint8 ofdm; +} wl_txcal_power_tssi_old_t; + +typedef struct wl_olpc_pwr { + uint16 version; + uint8 core; + uint8 channel; + int16 tempsense; + uint8 olpc_idx; + uint8 ofdm; +} wl_olpc_pwr_t; + +typedef struct wl_rfem_temp_vdet_temp { + uint8 vdet_fem_t1; + int8 rfem_temp_t1; + uint8 vdet_fem_t2; + int8 rfem_temp_t2; +} wl_rfem_temp_vdet_temp_t; + +typedef struct wl_rfem_temp_vin_tssi { + uint16 vin_chip_v1; + int16 tssi_chip_v1; + uint16 vin_chip_v2; + int16 tssi_chip_v2; +} wl_rfem_temp_vin_tssi_t; + +typedef struct wl_txcal_tempsense { + uint16 version; + uint8 valid_cnt; + uint8 core; + int16 ref_temperature; + int16 meas_temperature; + wl_rfem_temp_vdet_temp_t vdet_temp; + wl_rfem_temp_vin_tssi_t vin_tssi; +} wl_txcal_tempsense_t; + +/** IOVAR "mempool" parameter. Used to retrieve a list of memory pool statistics. */ +typedef struct wl_mempool_stats { + int32 num; /**< Number of memory pools */ + bcm_mp_stats_t s[1]; /**< Variable array of memory pool stats. */ +} wl_mempool_stats_t; + +typedef struct { + uint32 ipaddr; + uint32 ipaddr_netmask; + uint32 ipaddr_gateway; +} nwoe_ifconfig_t; + +/* Both powersel_params and lpc_params are used by IOVAR lpc_params. + * The powersel_params is replaced by lpc_params in later WLC versions. + */ +typedef struct powersel_params { + /* LPC Params exposed via IOVAR */ + int32 tp_ratio_thresh; /**< Throughput ratio threshold */ + uint8 rate_stab_thresh; /**< Thresh for rate stability based on nupd */ + uint8 pwr_stab_thresh; /**< Number of successes before power step down */ + uint8 pwr_sel_exp_time; /**< Time lapse for expiry of database */ + uint8 PAD; +} powersel_params_t; + +#define WL_LPC_PARAMS_VER_2 2 +#define WL_LPC_PARAMS_CURRENT_VERSION WL_LPC_PARAMS_VER_2 + +typedef struct lpc_params { + uint16 version; + uint16 length; + /* LPC Params exposed via IOVAR */ + uint8 rate_stab_thresh; /**< Thresh for rate stability based on nupd */ + uint8 pwr_stab_thresh; /**< Number of successes before power step down */ + uint8 lpc_exp_time; /**< Time lapse for expiry of database */ + uint8 pwrup_slow_step; /**< Step size for slow step up */ + uint8 pwrup_fast_step; /**< Step size for fast step up */ + uint8 pwrdn_slow_step; /**< Step size for slow step down */ +} lpc_params_t; + +/* tx pkt delay statistics */ +#define SCB_RETRY_SHORT_DEF 7 /**< Default Short retry Limit */ +#define WLPKTDLY_HIST_NBINS 16 /**< number of bins used in the Delay histogram */ + +/** structure to store per-AC delay statistics */ +typedef struct scb_delay_stats { + uint32 txmpdu_lost; /**< number of MPDUs lost */ + uint32 txmpdu_cnt[SCB_RETRY_SHORT_DEF]; /**< retry times histogram */ + uint32 delay_sum[SCB_RETRY_SHORT_DEF]; /**< cumulative packet latency */ + uint32 delay_min; /**< minimum packet latency observed */ + uint32 delay_max; /**< maximum packet latency observed */ + uint32 delay_avg; /**< packet latency average */ + uint32 delay_hist[WLPKTDLY_HIST_NBINS]; /**< delay histogram */ + uint32 delay_count; /**< minimum number of time period units before + consequent packet delay events can be generated + */ + uint32 prev_txmpdu_cnt; /**< Previous value of txmpdu_cnt[] during last iteration */ + uint32 prev_delay_sum; /**< Previous value of delay_sum[] during last iteration */ +} scb_delay_stats_t; + +/** structure for txdelay event */ +typedef struct txdelay_event { + uint8 status; + uint8 PAD[3]; + int32 rssi; + chanim_stats_t chanim_stats; + scb_delay_stats_t delay_stats[AC_COUNT]; +} txdelay_event_t; + +/** structure for txdelay parameters */ +typedef struct txdelay_params { + uint16 ratio; /**< Avg Txdelay Delta */ + uint8 cnt; /**< Sample cnt */ + uint8 period; /**< Sample period */ + uint8 tune; /**< Debug */ + uint8 PAD; +} txdelay_params_t; +#define MAX_TXDELAY_STATS_SCBS 6 +#define TXDELAY_STATS_VERSION 1 + +enum { + TXDELAY_STATS_PARTIAL_RESULT = 0, + TXDELAY_STATS_FULL_RESULT = 1 +}; + +typedef struct scb_total_delay_stats { + struct ether_addr ea; + uint8 pad[2]; + scb_delay_stats_t dlystats[AC_COUNT]; +} scb_total_delay_stats_t; + +typedef struct txdelay_stats { + uint32 version; + uint32 full_result; /* 0:Partial, 1:full */ + uint32 scb_cnt; /* in:requested, out:returned */ + scb_total_delay_stats_t scb_delay_stats[1]; +} txdelay_stats_t; + +#define WL_TXDELAY_STATS_FIXED_SIZE \ + (sizeof(txdelay_stats_t)+(MAX_TXDELAY_STATS_SCBS-1)*sizeof(scb_total_delay_stats_t)) +enum { + WNM_SERVICE_DMS = 1, + WNM_SERVICE_FMS = 2, + WNM_SERVICE_TFS = 3 +}; + +/** Definitions for WNM/NPS TCLAS */ +typedef struct wl_tclas { + uint8 user_priority; + uint8 fc_len; + dot11_tclas_fc_t fc; +} wl_tclas_t; + +#define WL_TCLAS_FIXED_SIZE OFFSETOF(wl_tclas_t, fc) + +typedef struct wl_tclas_list { + uint32 num; + wl_tclas_t tclas[]; +} wl_tclas_list_t; + +/** Definitions for WNM/NPS Traffic Filter Service */ +typedef struct wl_tfs_req { + uint8 tfs_id; + uint8 tfs_actcode; + uint8 tfs_subelem_id; + uint8 send; +} wl_tfs_req_t; + +typedef struct wl_tfs_filter { + uint8 status; /**< Status returned by the AP */ + uint8 tclas_proc; /**< TCLAS processing value (0:and, 1:or) */ + uint8 tclas_cnt; /**< count of all wl_tclas_t in tclas array */ + uint8 tclas[1]; /**< VLA of wl_tclas_t */ +} wl_tfs_filter_t; +#define WL_TFS_FILTER_FIXED_SIZE OFFSETOF(wl_tfs_filter_t, tclas) + +typedef struct wl_tfs_fset { + struct ether_addr ea; /**< Address of AP/STA involved with this filter set */ + uint8 tfs_id; /**< TFS ID field chosen by STA host */ + uint8 status; /**< Internal status TFS_STATUS_xxx */ + uint8 actcode; /**< Action code DOT11_TFS_ACTCODE_xxx */ + uint8 token; /**< Token used in last request frame */ + uint8 notify; /**< Notify frame sent/received because of this set */ + uint8 filter_cnt; /**< count of all wl_tfs_filter_t in filter array */ + uint8 filter[1]; /**< VLA of wl_tfs_filter_t */ +} wl_tfs_fset_t; +#define WL_TFS_FSET_FIXED_SIZE OFFSETOF(wl_tfs_fset_t, filter) + +enum { + TFS_STATUS_DISABLED = 0, /**< TFS filter set disabled by user */ + TFS_STATUS_DISABLING = 1, /**< Empty request just sent to AP */ + TFS_STATUS_VALIDATED = 2, /**< Filter set validated by AP (but maybe not enabled!) */ + TFS_STATUS_VALIDATING = 3, /**< Filter set just sent to AP */ + TFS_STATUS_NOT_ASSOC = 4, /**< STA not associated */ + TFS_STATUS_NOT_SUPPORT = 5, /**< TFS not supported by AP */ + TFS_STATUS_DENIED = 6, /**< Filter set refused by AP (=> all sets are disabled!) */ +}; + +typedef struct wl_tfs_status { + uint8 fset_cnt; /**< count of all wl_tfs_fset_t in fset array */ + wl_tfs_fset_t fset[1]; /**< VLA of wl_tfs_fset_t */ +} wl_tfs_status_t; + +typedef struct wl_tfs_set { + uint8 send; /**< Immediatly register registered sets on AP side */ + uint8 tfs_id; /**< ID of a specific set (existing or new), or nul for all */ + uint8 actcode; /**< Action code for this filter set */ + uint8 tclas_proc; /**< TCLAS processing operator for this filter set */ +} wl_tfs_set_t; + +typedef struct wl_tfs_term { + uint8 del; /**< Delete internal set once confirmation received */ + uint8 tfs_id; /**< ID of a specific set (existing), or nul for all */ +} wl_tfs_term_t; + +#define DMS_DEP_PROXY_ARP (1 << 0) + +/* Definitions for WNM/NPS Directed Multicast Service */ +enum { + DMS_STATUS_DISABLED = 0, /**< DMS desc disabled by user */ + DMS_STATUS_ACCEPTED = 1, /**< Request accepted by AP */ + DMS_STATUS_NOT_ASSOC = 2, /**< STA not associated */ + DMS_STATUS_NOT_SUPPORT = 3, /**< DMS not supported by AP */ + DMS_STATUS_DENIED = 4, /**< Request denied by AP */ + DMS_STATUS_TERM = 5, /**< Request terminated by AP */ + DMS_STATUS_REMOVING = 6, /**< Remove request just sent */ + DMS_STATUS_ADDING = 7, /**< Add request just sent */ + DMS_STATUS_ERROR = 8, /**< Non compliant AP behvior */ + DMS_STATUS_IN_PROGRESS = 9, /**< Request just sent */ + DMS_STATUS_REQ_MISMATCH = 10 /**< Conditions for sending DMS req not met */ +}; + +typedef struct wl_dms_desc { + uint8 user_id; + uint8 status; + uint8 token; + uint8 dms_id; + uint8 tclas_proc; + uint8 mac_len; /**< length of all ether_addr in data array, 0 if STA */ + uint8 tclas_len; /**< length of all wl_tclas_t in data array */ + uint8 data[1]; /**< VLA of 'ether_addr' and 'wl_tclas_t' (in this order ) */ +} wl_dms_desc_t; + +#define WL_DMS_DESC_FIXED_SIZE OFFSETOF(wl_dms_desc_t, data) + +typedef struct wl_dms_status { + uint32 cnt; + wl_dms_desc_t desc[1]; +} wl_dms_status_t; + +typedef struct wl_dms_set { + uint8 send; + uint8 user_id; + uint8 tclas_proc; +} wl_dms_set_t; + +typedef struct wl_dms_term { + uint8 del; + uint8 user_id; +} wl_dms_term_t; + +typedef struct wl_service_term { + uint8 service; + union { + wl_dms_term_t dms; + } u; +} wl_service_term_t; + +/** Definitions for WNM/NPS BSS Transistion */ +#define WL_BSSTRANS_QUERY_VERSION_1 1 +typedef struct wl_bsstrans_query { + uint16 version; /* structure version */ + uint16 pad0; /* padding for 4-byte allignment */ + wlc_ssid_t ssid; /* SSID of NBR elem to be queried for */ + uint8 reason; /* Reason code of the BTQ */ + uint8 pad1[3]; /* padding for 4-byte allignment */ +} wl_bsstrans_query_t; + +#define BTM_QUERY_NBR_COUNT_MAX 16 + +#define WL_BTQ_NBR_LIST_VERSION_1 1 +typedef struct wl_btq_nbr_list { + uint16 version; /* structure version */ + uint8 count; /* No. of BTQ NBRs returned */ + uint8 pad; /* padding for 4-byte allignment */ + nbr_rpt_elem_t btq_nbt_elem[]; /* BTQ NBR elem in a BTQ NBR list */ +} wl_btq_nbr_list_t; + +typedef struct wl_bsstrans_req { + uint16 tbtt; /**< time of BSS to end of life, in unit of TBTT */ + uint16 dur; /**< time of BSS to keep off, in unit of minute */ + uint8 reqmode; /**< request mode of BSS transition request */ + uint8 unicast; /**< request by unicast or by broadcast */ +} wl_bsstrans_req_t; + +enum { + BSSTRANS_RESP_AUTO = 0, /**< Currently equivalent to ENABLE */ + BSSTRANS_RESP_DISABLE = 1, /**< Never answer BSS Trans Req frames */ + BSSTRANS_RESP_ENABLE = 2, /**< Always answer Req frames with preset data */ + BSSTRANS_RESP_WAIT = 3, /**< Send ind, wait and/or send preset data (NOT IMPL) */ + BSSTRANS_RESP_IMMEDIATE = 4 /**< After an ind, set data and send resp (NOT IMPL) */ +}; + +typedef struct wl_bsstrans_resp { + uint8 policy; + uint8 status; + uint8 delay; + struct ether_addr target; +} wl_bsstrans_resp_t; + +/* "wnm_bsstrans_policy" argument programs behavior after BSSTRANS Req reception. + * BSS-Transition feature is used by multiple programs such as NPS-PF, VE-PF, + * Band-steering, Hotspot 2.0 and customer requirements. Each PF and its test plan + * mandates different behavior on receiving BSS-transition request. To accomodate + * such divergent behaviors these policies have been created. + */ +typedef enum { + WL_BSSTRANS_POLICY_ROAM_ALWAYS = 0, /**< Roam (or disassociate) in all cases */ + WL_BSSTRANS_POLICY_ROAM_IF_MODE = 1, /**< Roam only if requested by Request Mode field */ + WL_BSSTRANS_POLICY_ROAM_IF_PREF = 2, /**< Roam only if Preferred BSS provided */ + WL_BSSTRANS_POLICY_WAIT = 3, /**< Wait for deauth and send Accepted status */ + WL_BSSTRANS_POLICY_PRODUCT = 4, /**< Policy for real product use cases (Olympic) */ + WL_BSSTRANS_POLICY_PRODUCT_WBTEXT = 5, /**< Policy for real product use cases (SS) */ + WL_BSSTRANS_POLICY_MBO = 6, /**< Policy for MBO certification */ + WL_BSSTRANS_POLICY_MAX = 7 +} wnm_bsstrans_policy_type_t; + +/** Definitions for WNM/NPS TIM Broadcast */ +typedef struct wl_timbc_offset { + int16 offset; /**< offset in us */ + uint16 fix_intv; /**< override interval sent from STA */ + uint16 rate_override; /**< use rate override to send high rate TIM broadcast frame */ + uint8 tsf_present; /**< show timestamp in TIM broadcast frame */ + uint8 PAD; +} wl_timbc_offset_t; + +typedef struct wl_timbc_set { + uint8 interval; /**< Interval in DTIM wished or required. */ + uint8 flags; /**< Bitfield described below */ + uint16 rate_min; /**< Minimum rate required for High/Low TIM frames. Optionnal */ + uint16 rate_max; /**< Maximum rate required for High/Low TIM frames. Optionnal */ +} wl_timbc_set_t; + +enum { + WL_TIMBC_SET_TSF_REQUIRED = 1, /**< Enable TIMBC only if TSF in TIM frames */ + WL_TIMBC_SET_NO_OVERRIDE = 2, /**< ... if AP does not override interval */ + WL_TIMBC_SET_PROXY_ARP = 4, /**< ... if AP support Proxy ARP */ + WL_TIMBC_SET_DMS_ACCEPTED = 8 /**< ... if all DMS desc have been accepted */ +}; + +typedef struct wl_timbc_status { + uint8 status_sta; /**< Status from internal state machine (check below) */ + uint8 status_ap; /**< From AP response frame (check 8.4.2.86 from 802.11) */ + uint8 interval; + uint8 pad; + int32 offset; + uint16 rate_high; + uint16 rate_low; +} wl_timbc_status_t; + +enum { + WL_TIMBC_STATUS_DISABLE = 0, /**< TIMBC disabled by user */ + WL_TIMBC_STATUS_REQ_MISMATCH = 1, /**< AP settings do no match user requirements */ + WL_TIMBC_STATUS_NOT_ASSOC = 2, /**< STA not associated */ + WL_TIMBC_STATUS_NOT_SUPPORT = 3, /**< TIMBC not supported by AP */ + WL_TIMBC_STATUS_DENIED = 4, /**< Req to disable TIMBC sent to AP */ + WL_TIMBC_STATUS_ENABLE = 5 /**< TIMBC enabled */ +}; + +/** Definitions for PM2 Dynamic Fast Return To Sleep */ +typedef struct wl_pm2_sleep_ret_ext { + uint8 logic; /**< DFRTS logic: see WL_DFRTS_LOGIC_* below */ + uint8 PAD; + uint16 low_ms; /**< Low FRTS timeout */ + uint16 high_ms; /**< High FRTS timeout */ + uint16 rx_pkts_threshold; /**< switching threshold: # rx pkts */ + uint16 tx_pkts_threshold; /**< switching threshold: # tx pkts */ + uint16 txrx_pkts_threshold; /**< switching threshold: # (tx+rx) pkts */ + uint32 rx_bytes_threshold; /**< switching threshold: # rx bytes */ + uint32 tx_bytes_threshold; /**< switching threshold: # tx bytes */ + uint32 txrx_bytes_threshold; /**< switching threshold: # (tx+rx) bytes */ +} wl_pm2_sleep_ret_ext_t; + +#define WL_DFRTS_LOGIC_OFF 0 /**< Feature is disabled */ +#define WL_DFRTS_LOGIC_OR 1 /**< OR all non-zero threshold conditions */ +#define WL_DFRTS_LOGIC_AND 2 /**< AND all non-zero threshold conditions */ + +/* Values for the passive_on_restricted_mode iovar. When set to non-zero, this iovar + * disables automatic conversions of a channel from passively scanned to + * actively scanned. These values only have an effect for country codes such + * as XZ where some 5 GHz channels are defined to be passively scanned. + */ +#define WL_PASSACTCONV_DISABLE_NONE 0 /**< Enable permanent and temporary conversions */ +#define WL_PASSACTCONV_DISABLE_ALL 1 /**< Disable permanent and temporary conversions */ +#define WL_PASSACTCONV_DISABLE_PERM 2 /**< Disable only permanent conversions */ + +/* Definitions for Reliable Multicast */ +#define WL_RMC_CNT_VERSION 1 +#define WL_RMC_TR_VERSION 1 +#define WL_RMC_MAX_CLIENT 32 +#define WL_RMC_FLAG_INBLACKLIST 1 +#define WL_RMC_FLAG_ACTIVEACKER 2 +#define WL_RMC_FLAG_RELMCAST 4 +#define WL_RMC_MAX_TABLE_ENTRY 4 + +#define WL_RMC_VER 1 +#define WL_RMC_INDEX_ACK_ALL 255 +#define WL_RMC_NUM_OF_MC_STREAMS 4 +#define WL_RMC_MAX_TRS_PER_GROUP 1 +#define WL_RMC_MAX_TRS_IN_ACKALL 1 +#define WL_RMC_ACK_MCAST0 0x02 +#define WL_RMC_ACK_MCAST_ALL 0x01 +#define WL_RMC_ACTF_TIME_MIN 300 /**< time in ms */ +#define WL_RMC_ACTF_TIME_MAX 20000 /**< time in ms */ +#define WL_RMC_MAX_NUM_TRS 32 /**< maximun transmitters allowed */ +#define WL_RMC_ARTMO_MIN 350 /**< time in ms */ +#define WL_RMC_ARTMO_MAX 40000 /**< time in ms */ + +/* RMC events in action frames */ +enum rmc_opcodes { + RELMCAST_ENTRY_OP_DISABLE = 0, /**< Disable multi-cast group */ + RELMCAST_ENTRY_OP_DELETE = 1, /**< Delete multi-cast group */ + RELMCAST_ENTRY_OP_ENABLE = 2, /**< Enable multi-cast group */ + RELMCAST_ENTRY_OP_ACK_ALL = 3 /**< Enable ACK ALL bit in AMT */ +}; + +/* RMC operational modes */ +enum rmc_modes { + WL_RMC_MODE_RECEIVER = 0, /**< Receiver mode by default */ + WL_RMC_MODE_TRANSMITTER = 1, /**< Transmitter mode using wl ackreq */ + WL_RMC_MODE_INITIATOR = 2 /**< Initiator mode using wl ackreq */ +}; + +/** Each RMC mcast client info */ +typedef struct wl_relmcast_client { + uint8 flag; /**< status of client such as AR, R, or blacklisted */ + uint8 PAD; + int16 rssi; /**< rssi value of RMC client */ + struct ether_addr addr; /**< mac address of RMC client */ +} wl_relmcast_client_t; + +/** RMC Counters */ +typedef struct wl_rmc_cnts { + uint16 version; /**< see definition of WL_CNT_T_VERSION */ + uint16 length; /**< length of entire structure */ + uint16 dupcnt; /**< counter for duplicate rmc MPDU */ + uint16 ackreq_err; /**< counter for wl ackreq error */ + uint16 af_tx_err; /**< error count for action frame transmit */ + uint16 null_tx_err; /**< error count for rmc null frame transmit */ + uint16 af_unicast_tx_err; /**< error count for rmc unicast frame transmit */ + uint16 mc_no_amt_slot; /**< No mcast AMT entry available */ + /* Unused. Keep for rom compatibility */ + uint16 mc_no_glb_slot; /**< No mcast entry available in global table */ + uint16 mc_not_mirrored; /**< mcast group is not mirrored */ + uint16 mc_existing_tr; /**< mcast group is already taken by transmitter */ + uint16 mc_exist_in_amt; /**< mcast group is already programmed in amt */ + /* Unused. Keep for rom compatibility */ + uint16 mc_not_exist_in_gbl; /**< mcast group is not in global table */ + uint16 mc_not_exist_in_amt; /**< mcast group is not in AMT table */ + uint16 mc_utilized; /**< mcast addressed is already taken */ + uint16 mc_taken_other_tr; /**< multi-cast addressed is already taken */ + uint32 rmc_rx_frames_mac; /**< no of mc frames received from mac */ + uint32 rmc_tx_frames_mac; /**< no of mc frames transmitted to mac */ + uint32 mc_null_ar_cnt; /**< no. of times NULL AR is received */ + uint32 mc_ar_role_selected; /**< no. of times took AR role */ + uint32 mc_ar_role_deleted; /**< no. of times AR role cancelled */ + uint32 mc_noacktimer_expired; /**< no. of times noack timer expired */ + uint16 mc_no_wl_clk; /**< no wl clk detected when trying to access amt */ + uint16 mc_tr_cnt_exceeded; /**< No of transmitters in the network exceeded */ +} wl_rmc_cnts_t; + +/** RMC Status */ +typedef struct wl_relmcast_st { + uint8 ver; /**< version of RMC */ + uint8 num; /**< number of clients detected by transmitter */ + wl_relmcast_client_t clients[WL_RMC_MAX_CLIENT]; + uint16 err; /**< error status (used in infra) */ + uint16 actf_time; /**< action frame time period */ +} wl_relmcast_status_t; + +/** Entry for each STA/node */ +typedef struct wl_rmc_entry { + /* operation on multi-cast entry such add, + * delete, ack-all + */ + int8 flag; + struct ether_addr addr; /**< multi-cast group mac address */ +} wl_rmc_entry_t; + +/** RMC table */ +typedef struct wl_rmc_entry_table { + uint8 index; /**< index to a particular mac entry in table */ + uint8 opcode; /**< opcodes or operation on entry */ + wl_rmc_entry_t entry[WL_RMC_MAX_TABLE_ENTRY]; +} wl_rmc_entry_table_t; + +typedef struct wl_rmc_trans_elem { + struct ether_addr tr_mac; /**< transmitter mac */ + struct ether_addr ar_mac; /**< ar mac */ + uint16 artmo; /**< AR timeout */ + uint8 amt_idx; /**< amt table entry */ + uint8 PAD; + uint16 flag; /**< entry will be acked, not acked, programmed, full etc */ +} wl_rmc_trans_elem_t; + +/** RMC transmitters */ +typedef struct wl_rmc_trans_in_network { + uint8 ver; /**< version of RMC */ + uint8 num_tr; /**< number of transmitters in the network */ + wl_rmc_trans_elem_t trs[WL_RMC_MAX_NUM_TRS]; +} wl_rmc_trans_in_network_t; + +/** To update vendor specific ie for RMC */ +typedef struct wl_rmc_vsie { + uint8 oui[DOT11_OUI_LEN]; + uint8 PAD; + uint16 payload; /**< IE Data Payload */ +} wl_rmc_vsie_t; + +/* structures & defines for proximity detection */ +enum proxd_method { + PROXD_UNDEFINED_METHOD = 0, + PROXD_RSSI_METHOD = 1, + PROXD_TOF_METHOD = 2 +}; + +/* structures for proximity detection device role */ +#define WL_PROXD_MODE_DISABLE 0 +#define WL_PROXD_MODE_NEUTRAL 1 +#define WL_PROXD_MODE_INITIATOR 2 +#define WL_PROXD_MODE_TARGET 3 + +#define WL_PROXD_ACTION_STOP 0 +#define WL_PROXD_ACTION_START 1 + +#define WL_PROXD_FLAG_TARGET_REPORT 0x1 +#define WL_PROXD_FLAG_REPORT_FAILURE 0x2 +#define WL_PROXD_FLAG_INITIATOR_REPORT 0x4 +#define WL_PROXD_FLAG_NOCHANSWT 0x8 +#define WL_PROXD_FLAG_NETRUAL 0x10 +#define WL_PROXD_FLAG_INITIATOR_RPTRTT 0x20 +#define WL_PROXD_FLAG_ONEWAY 0x40 +#define WL_PROXD_FLAG_SEQ_EN 0x80 + +#define WL_PROXD_SETFLAG_K 0x1 +#define WL_PROXD_SETFLAG_N 0x2 +#define WL_PROXD_SETFLAG_S 0x4 + +#define WL_PROXD_SETFLAG_K 0x1 +#define WL_PROXD_SETFLAG_N 0x2 +#define WL_PROXD_SETFLAG_S 0x4 + +#define WL_PROXD_RANDOM_WAKEUP 0x8000 +#define WL_PROXD_MAXREPORT 8 + +typedef struct wl_proxd_iovar { + uint16 method; /**< Proximity Detection method */ + uint16 mode; /**< Mode (neutral, initiator, target) */ +} wl_proxd_iovar_t; + +/* + * structures for proximity detection parameters + * consists of two parts, common and method specific params + * common params should be placed at the beginning + */ + +typedef struct wl_proxd_params_common { + chanspec_t chanspec; /**< channel spec */ + int16 tx_power; /**< tx power of Proximity Detection(PD) frames (in dBm) */ + uint16 tx_rate; /**< tx rate of PD rames (in 500kbps units) */ + uint16 timeout; /**< timeout value */ + uint16 interval; /**< interval between neighbor finding attempts (in TU) */ + uint16 duration; /**< duration of neighbor finding attempts (in ms) */ +} wl_proxd_params_common_t; + +typedef struct wl_proxd_params_rssi_method { + chanspec_t chanspec; /**< chanspec for home channel */ + int16 tx_power; /**< tx power of Proximity Detection frames (in dBm) */ + uint16 tx_rate; /**< tx rate of PD frames, 500kbps units */ + uint16 timeout; /**< state machine wait timeout of the frames (in ms) */ + uint16 interval; /**< interval between neighbor finding attempts (in TU) */ + uint16 duration; /**< duration of neighbor finding attempts (in ms) */ + /* method specific ones go after this line */ + int16 rssi_thresh; /**< RSSI threshold (in dBm) */ + uint16 maxconvergtmo; /**< max wait converge timeout (in ms) */ +} wl_proxd_params_rssi_method_t; + +#define Q1_NS 25 /**< Q1 time units */ + +#define TOF_BW_NUM 3 /**< number of bandwidth that the TOF can support */ +#define TOF_BW_SEQ_NUM (TOF_BW_NUM+2) /* number of total index */ +enum tof_bw_index { + TOF_BW_20MHZ_INDEX = 0, + TOF_BW_40MHZ_INDEX = 1, + TOF_BW_80MHZ_INDEX = 2, + TOF_BW_SEQTX_INDEX = 3, + TOF_BW_SEQRX_INDEX = 4 +}; + +#define BANDWIDTH_BASE 20 /**< base value of bandwidth */ +#define TOF_BW_20MHZ (BANDWIDTH_BASE << TOF_BW_20MHZ_INDEX) +#define TOF_BW_40MHZ (BANDWIDTH_BASE << TOF_BW_40MHZ_INDEX) +#define TOF_BW_80MHZ (BANDWIDTH_BASE << TOF_BW_80MHZ_INDEX) +#define TOF_BW_10MHZ 10 + +#define NFFT_BASE 64 /**< base size of fft */ +#define TOF_NFFT_20MHZ (NFFT_BASE << TOF_BW_20MHZ_INDEX) +#define TOF_NFFT_40MHZ (NFFT_BASE << TOF_BW_40MHZ_INDEX) +#define TOF_NFFT_80MHZ (NFFT_BASE << TOF_BW_80MHZ_INDEX) + +typedef struct wl_proxd_params_tof_method { + chanspec_t chanspec; /**< chanspec for home channel */ + int16 tx_power; /**< tx power of Proximity Detection(PD) frames (in dBm) */ + uint16 tx_rate; /**< tx rate of PD rames (in 500kbps units) */ + uint16 timeout; /**< state machine wait timeout of the frames (in ms) */ + uint16 interval; /**< interval between neighbor finding attempts (in TU) */ + uint16 duration; /**< duration of neighbor finding attempts (in ms) */ + /* specific for the method go after this line */ + struct ether_addr tgt_mac; /**< target mac addr for TOF method */ + uint16 ftm_cnt; /**< number of the frames txed by initiator */ + uint16 retry_cnt; /**< number of retransmit attampts for ftm frames */ + int16 vht_rate; /**< ht or vht rate */ + /* add more params required for other methods can be added here */ +} wl_proxd_params_tof_method_t; + +typedef struct wl_proxd_seq_config +{ + int16 N_tx_log2; + int16 N_rx_log2; + int16 N_tx_scale; + int16 N_rx_scale; + int16 w_len; + int16 w_offset; +} wl_proxd_seq_config_t; + +#define WL_PROXD_TUNE_VERSION_1 1 +#define WL_PROXD_TUNE_VERSION_2 2 +#include +/* For legacy ranging target (e.g. 43430, 43342) */ +typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_params_tof_tune_v1 { + uint32 version; + uint32 Ki; /**< h/w delay K factor for initiator */ + uint32 Kt; /**< h/w delay K factor for target */ + int16 vhtack; /**< enable/disable VHT ACK */ + int16 N_log2[TOF_BW_SEQ_NUM]; /**< simple threshold crossing */ + int16 w_offset[TOF_BW_NUM]; /**< offset of threshold crossing window(per BW) */ + int16 w_len[TOF_BW_NUM]; /**< length of threshold crossing window(per BW) */ + int32 maxDT; /**< max time difference of T4/T1 or T3/T2 */ + int32 minDT; /**< min time difference of T4/T1 or T3/T2 */ + uint8 totalfrmcnt; /**< total count of transfered measurement frames */ + uint16 rsv_media; /**< reserve media value for TOF */ + uint32 flags; /**< flags */ + uint8 core; /**< core to use for tx */ + uint8 setflags; /* set flags of K, N. S values */ + int16 N_scale[TOF_BW_SEQ_NUM]; /**< simple threshold crossing */ + uint8 sw_adj; /**< enable sw assisted timestamp adjustment */ + uint8 hw_adj; /**< enable hw assisted timestamp adjustment */ + uint8 seq_en; /**< enable ranging sequence */ + uint8 ftm_cnt[TOF_BW_SEQ_NUM]; /**< number of ftm frames based on bandwidth */ + int16 N_log2_2g; /**< simple threshold crossing for 2g channel */ + int16 N_scale_2g; /**< simple threshold crossing for 2g channel */ + wl_proxd_seq_config_t seq_5g20; + wl_proxd_seq_config_t seq_2g20; /* Thresh crossing params for 2G Sequence */ + uint16 bitflip_thresh; /* bitflip threshold */ + uint16 snr_thresh; /* SNR threshold */ + int8 recv_2g_thresh; /* 2g recieve sensitivity threshold */ + uint32 acs_gdv_thresh; + int8 acs_rssi_thresh; + uint8 smooth_win_en; + int32 emu_delay; +} BWL_POST_PACKED_STRUCT wl_proxd_params_tof_tune_v1_t; +#include + +#include +/* For legacy ranging initiator (including 4364) */ +typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_params_tof_tune_v2 { + uint32 version; + uint32 Ki; /**< h/w delay K factor for initiator */ + uint32 Kt; /**< h/w delay K factor for target */ + int16 vhtack; /**< enable/disable VHT ACK */ + int16 N_log2[TOF_BW_SEQ_NUM]; /**< simple threshold crossing */ + int16 w_offset[TOF_BW_NUM]; /**< offset of threshold crossing window(per BW) */ + int16 w_len[TOF_BW_NUM]; /**< length of threshold crossing window(per BW) */ + int32 maxDT; /**< max time difference of T4/T1 or T3/T2 */ + int32 minDT; /**< min time difference of T4/T1 or T3/T2 */ + uint8 totalfrmcnt; /**< total count of transfered measurement frames */ + uint16 rsv_media; /**< reserve media value for TOF */ + uint32 flags; /**< flags */ + uint8 core; /**< core to use for tx */ + uint8 setflags; /* set flags of K, N. S values */ + int16 N_scale[TOF_BW_SEQ_NUM]; /**< simple threshold crossing */ + uint8 sw_adj; /**< enable sw assisted timestamp adjustment */ + uint8 hw_adj; /**< enable hw assisted timestamp adjustment */ + uint8 seq_en; /**< enable ranging sequence */ + uint8 ftm_cnt[TOF_BW_SEQ_NUM]; /**< number of ftm frames based on bandwidth */ + int16 N_log2_2g; /**< simple threshold crossing for 2g channel */ + int16 N_scale_2g; /**< simple threshold crossing for 2g channel */ + wl_proxd_seq_config_t seq_5g20; + wl_proxd_seq_config_t seq_2g20; /* Thresh crossing params for 2G Sequence */ + uint16 bitflip_thresh; /* bitflip threshold */ + uint16 snr_thresh; /* SNR threshold */ + int8 recv_2g_thresh; /* 2g recieve sensitivity threshold */ + uint32 acs_gdv_thresh; + int8 acs_rssi_thresh; + uint8 smooth_win_en; + int32 acs_gdmm_thresh; + int8 acs_delta_rssi_thresh; + int32 emu_delay; + uint8 core_mask; /* core mask selection */ +} BWL_POST_PACKED_STRUCT wl_proxd_params_tof_tune_v2_t; +#include + +#define WL_PROXD_TUNE_VERSION_3 3 +/* Future ranging support */ +typedef struct wl_proxd_params_tof_tune_v3 { + uint16 version; + uint16 len; + uint32 Ki; /**< h/w delay K factor for initiator */ + uint32 Kt; /**< h/w delay K factor for target */ + int16 vhtack; /**< enable/disable VHT ACK */ + uint16 PAD; + int16 N_log2[TOF_BW_SEQ_NUM]; /**< simple threshold crossing */ + uint16 PAD; + int16 w_offset[TOF_BW_NUM]; /**< offset of threshold crossing window(per BW) */ + uint16 PAD; + int16 w_len[TOF_BW_NUM]; /**< length of threshold crossing window(per BW) */ + uint16 PAD; + int32 maxDT; /**< max time difference of T4/T1 or T3/T2 */ + int32 minDT; /**< min time difference of T4/T1 or T3/T2 */ + uint8 totalfrmcnt; /**< total count of transfered measurement frames */ + uint8 PAD[3]; + uint16 rsv_media; /**< reserve media value for TOF */ + uint16 PAD; + uint32 flags; /**< flags */ + uint8 core; /**< core to use for tx */ + uint8 setflags; /* set flags of K, N. S values */ + uint16 PAD; + int16 N_scale[TOF_BW_SEQ_NUM]; /**< simple threshold crossing */ + uint8 sw_adj; /**< enable sw assisted timestamp adjustment */ + uint8 hw_adj; /**< enable hw assisted timestamp adjustment */ + uint8 seq_en; /**< enable ranging sequence */ + uint8 PAD[3]; + uint8 ftm_cnt[TOF_BW_SEQ_NUM]; /**< number of ftm frames based on bandwidth */ + uint8 PAD[3]; + int16 N_log2_2g; /**< simple threshold crossing for 2g channel */ + int16 N_scale_2g; /**< simple threshold crossing for 2g channel */ + wl_proxd_seq_config_t seq_5g20; + wl_proxd_seq_config_t seq_2g20; /* Thresh crossing params for 2G Sequence */ + uint16 bitflip_thresh; /* bitflip threshold */ + uint16 snr_thresh; /* SNR threshold */ + int8 recv_2g_thresh; /* 2g recieve sensitivity threshold */ + uint8 PAD[3]; + uint32 acs_gdv_thresh; + int8 acs_rssi_thresh; + uint8 smooth_win_en; + uint16 PAD; + int32 acs_gdmm_thresh; + int8 acs_delta_rssi_thresh; + uint8 PAD[3]; + int32 emu_delay; + uint8 core_mask; /* core mask selection */ + uint8 PAD[3]; +} wl_proxd_params_tof_tune_v3_t; + +typedef struct wl_proxd_params_iovar { + uint16 method; /**< Proximity Detection method */ + uint8 PAD[2]; + union { + /* common params for pdsvc */ + wl_proxd_params_common_t cmn_params; /**< common parameters */ + /* method specific */ + wl_proxd_params_rssi_method_t rssi_params; /**< RSSI method parameters */ + wl_proxd_params_tof_method_t tof_params; /**< TOF method parameters */ + /* tune parameters */ + wl_proxd_params_tof_tune_v3_t tof_tune; /**< TOF tune parameters */ + } u; /**< Method specific optional parameters */ +} wl_proxd_params_iovar_t; + +#define PROXD_COLLECT_GET_STATUS 0 +#define PROXD_COLLECT_SET_STATUS 1 +#define PROXD_COLLECT_QUERY_HEADER 2 +#define PROXD_COLLECT_QUERY_DATA 3 +#define PROXD_COLLECT_QUERY_DEBUG 4 +#define PROXD_COLLECT_REMOTE_REQUEST 5 +#define PROXD_COLLECT_DONE 6 + +typedef enum { + WL_PROXD_COLLECT_METHOD_TYPE_DISABLE = 0x0, + WL_PROXD_COLLECT_METHOD_TYPE_IOVAR = 0x1, + WL_PROXD_COLLECT_METHOD_TYPE_EVENT = 0x2, + WL_PROXD_COLLECT_METHOD_TYPE_EVENT_LOG = 0x4 +} wl_proxd_collect_method_type_t; + +typedef uint16 wl_proxd_collect_method_t; /* query status: method to send proxd collect */ + +#include +typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_collect_query { + uint32 method; /**< method */ + uint8 request; /**< Query request. */ + uint8 status; /**< bitmask 0 -- disable, 0x1 -- enable collection, */ + /* 0x2 -- Use generic event, 0x4 -- use event log */ + uint16 index; /**< The current frame index [0 to total_frames - 1]. */ + uint16 mode; /**< Initiator or Target */ + uint8 busy; /**< tof sm is busy */ + uint8 remote; /**< Remote collect data */ +} BWL_POST_PACKED_STRUCT wl_proxd_collect_query_t; +#include + +#include +typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_collect_header { + uint16 total_frames; /**< The total frames for this collect. */ + uint16 nfft; /**< nfft value */ + uint16 bandwidth; /**< bandwidth */ + uint16 channel; /**< channel number */ + uint32 chanspec; /**< channel spec */ + uint32 fpfactor; /**< avb timer value factor */ + uint16 fpfactor_shift; /**< avb timer value shift bits */ + int32 distance; /**< distance calculated by fw */ + uint32 meanrtt; /**< mean of RTTs */ + uint32 modertt; /**< mode of RTTs */ + uint32 medianrtt; /**< median of RTTs */ + uint32 sdrtt; /**< standard deviation of RTTs */ + uint32 clkdivisor; /**< clock divisor */ + uint16 chipnum; /**< chip type */ + uint8 chiprev; /**< chip revision */ + uint8 phyver; /**< phy version */ + struct ether_addr localMacAddr; /**< local mac address */ + struct ether_addr remoteMacAddr; /**< remote mac address */ + wl_proxd_params_tof_tune_v3_t params; +} BWL_POST_PACKED_STRUCT wl_proxd_collect_header_t; +#include + +/* ifdef WL_NAN */ +/* ********************** NAN wl interface struct types and defs ******************** */ +/* + * Uses new common IOVAR batch processing mechanism + */ + +/* + * NAN config control + * Bits 0 - 23 can be set by host + * Bits 24 - 31 - Internal use for firmware, host cannot set it + */ + +/* + * Bit 0 : If set to 1, means event uses nan bsscfg, + * otherwise uses infra bsscfg. Default is using infra bsscfg + */ +#define WL_NAN_CTRL_ROUTE_EVENT_VIA_NAN_BSSCFG 0x000001 +/* If set, discovery beacons are transmitted on 2G band */ +#define WL_NAN_CTRL_DISC_BEACON_TX_2G 0x000002 +/* If set, sync beacons are transmitted on 2G band */ +#define WL_NAN_CTRL_SYNC_BEACON_TX_2G 0x000004 +/* If set, discovery beacons are transmitted on 5G band */ +#define WL_NAN_CTRL_DISC_BEACON_TX_5G 0x000008 +/* If set, sync beacons are transmitted on 5G band */ +#define WL_NAN_CTRL_SYNC_BEACON_TX_5G 0x000010 +/* If set, auto datapath responses will be sent by FW */ +#define WL_NAN_CTRL_AUTO_DPRESP 0x000020 +/* If set, auto datapath confirms will be sent by FW */ +#define WL_NAN_CTRL_AUTO_DPCONF 0x000040 +/* If set, auto schedule responses will be sent by FW */ +#define WL_NAN_CTRL_AUTO_SCHEDRESP 0x000080 +/* If set, auto schedule confirms will be sent by FW */ +#define WL_NAN_CTRL_AUTO_SCHEDCONF 0x000100 +/* If set, proprietary rates are supported by FW */ +#define WL_NAN_CTRL_PROP_RATE 0x000200 +/* If set, service awake_dw overrides global dev awake_dw */ +#define WL_NAN_CTRL_SVC_OVERRIDE_DEV_AWAKE_DW 0x000400 +/* If set, merge scan will be disabled */ +#define WL_NAN_CTRL_SCAN_DISABLE 0x000800 +/* If set, power save will be disabled */ +#define WL_NAN_CTRL_POWER_SAVE_DISABLE 0x001000 +/* If set, device will merge to configured CID only */ +#define WL_NAN_CTRL_MERGE_CONF_CID_ONLY 0x002000 +/* If set, 5g core will be brought down in single band NAN */ +#define WL_NAN_CTRL_5G_SLICE_POWER_OPT 0x004000 +#define WL_NAN_CTRL_DUMP_HEAP 0x008000 +/* If set, host generates and assign ndp id for ndp sessions */ +#define WL_NAN_CTRL_HOST_GEN_NDPID 0x010000 +/* If set, nan ndp inactivity watchdog will be activated */ +#define WL_NAN_CTRL_DELETE_INACTIVE_PEERS 0x020000 +/* If set, nan assoc coex will be activated */ +#define WL_NAN_CTRL_INFRA_ASSOC_COEX 0x040000 +/* If set, dam will accept all NDP/RNG request from the peer including counter */ +#define WL_NAN_CTRL_DAM_ACCEPT_ALL 0x080000 +/* If set, nan mac ignores role for tx discovery beacon for periodic config */ +#define WL_NAN_CTRL_FASTDISC_IGNO_ROLE 0x100000 +/* If set, include NA in NAN beacons (disc beacons for now) */ +#define WL_NAN_CTRL_INCL_NA_IN_BCNS 0x200000 +/* If set, host assist will be enabled */ +#define WL_NAN_CTRL_HOST_ASSIST 0x400000 +/* If set, host configures NDI associated with the service */ +#define WL_NAN_CTRL_HOST_CFG_SVC_NDI 0x800000 + +/* Value when all host-configurable bits set */ +#define WL_NAN_CTRL_MAX_MASK 0xFFFFFF +#define WL_NAN_CFG_CTRL_FW_BITS 8 + +/* Last 8-bits are firmware controlled bits. + * Bit 31: + * If set - indicates that NAN initialization is successful + * Bit 30: + * If set - indicates that NAN MAC cfg creation is successful + * + * NOTE: These are only ready-only bits for host. + * All sets to these bits from host are masked off + */ +#define WL_NAN_PROTO_INIT_DONE (1 << 31) +#define WL_NAN_CFG_CREATE_DONE (1 << 30) + +#define WL_NAN_GET_PROTO_INIT_STATUS(x) \ + (((x) & WL_NAN_PROTO_INIT_DONE) ? TRUE:FALSE) +#define WL_NAN_CLEAR_PROTO_INIT_STATUS(x) \ + ((x) &= ~WL_NAN_PROTO_INIT_DONE) +#define WL_NAN_SET_PROTO_INIT_STATUS(x) \ + ((x) |= (WL_NAN_PROTO_INIT_DONE)) + +#define WL_NAN_GET_CFG_CREATE_STATUS(x) \ + (((x) & WL_NAN_CFG_CREATE_DONE) ? TRUE:FALSE) +#define WL_NAN_CLEAR_CFG_CREATE_STATUS(x) \ + ((x) &= ~WL_NAN_CFG_CREATE_DONE) +#define WL_NAN_SET_CFG_CREATE_STATUS(x) \ + ((x) |= (WL_NAN_CFG_CREATE_DONE)) + +#define WL_NAN_IOCTL_VERSION 0x2 +/* < some sufficient ioc buff size for our module */ +#define WL_NAN_IOC_BUFSZ 256 +/* some sufficient ioc buff size for dump commands */ +#define WL_NAN_IOC_BUFSZ_EXT 1024 +#define WL_NAN_MAX_SIDS_IN_BEACONS 127 /* Max allowed SIDs */ +#define WL_NAN_MASTER_RANK_LEN 8 +#define WL_NAN_RANGE_LIMITED 0x0040 /* Publish/Subscribe flags */ + +/** The service hash (service id) is exactly this many bytes. */ +#define WL_NAN_SVC_HASH_LEN 6 +#define WL_NAN_HASHES_PER_BLOOM 4 /** Number of hash functions per bloom filter */ + +/* no. of max last disc results */ +#define WL_NAN_MAX_DISC_RESULTS 3 + +/* Max len of Rx and Tx filters */ +#define WL_NAN_MAX_SVC_MATCH_FILTER_LEN 255 + +/* Max service name len */ +#define WL_NAN_MAX_SVC_NAME_LEN 32 + +/* Type of Data path connection */ +#define WL_NAN_DP_TYPE_UNICAST 0 +#define WL_NAN_DP_TYPE_MULTICAST 1 + +/* MAX security params length PMK field */ +#define WL_NAN_NCS_SK_PMK_LEN 32 + +/* Post disc attr ID type */ +typedef uint8 wl_nan_post_disc_attr_id_t; + +/* + * Component IDs + */ +typedef enum { + WL_NAN_COMPID_CONFIG = 1, + WL_NAN_COMPID_ELECTION = 2, + WL_NAN_COMPID_SD = 3, + WL_NAN_COMPID_TIMESYNC = 4, + WL_NAN_COMPID_DATA_PATH = 5, + WL_NAN_COMPID_DEBUG = 15 /* Keep this at the end */ +} wl_nan_comp_id_t; + +#define WL_NAN_COMP_SHIFT 8 +#define WL_NAN_COMP_MASK(_c) (0x0F & ((uint8)(_c))) +#define WL_NAN_COMP_ID(_c) (WL_NAN_COMP_MASK(_c) << WL_NAN_COMP_SHIFT) + +/* NAN Events */ + +/** Instance ID type (unique identifier) */ +typedef uint8 wl_nan_instance_id_t; + +/* Publish sent for a subscribe */ +/* WL_NAN_EVENT_REPLIED */ + +typedef struct wl_nan_ev_replied { + struct ether_addr sub_mac; /* Subscriber MAC */ + wl_nan_instance_id_t pub_id; /* Publisher Instance ID */ + uint8 sub_id; /* Subscriber ID */ + int8 sub_rssi; /* Subscriber RSSI */ + uint8 pad[3]; +} wl_nan_ev_replied_t; + +typedef struct wl_nan_event_replied { + struct ether_addr sub_mac; /* Subscriber MAC */ + wl_nan_instance_id_t pub_id; /* Publisher Instance ID */ + uint8 sub_id; /* Subscriber ID */ + int8 sub_rssi; /* Subscriber RSSI */ + uint8 attr_num; + uint16 attr_list_len; /* sizeof attributes attached to payload */ + uint8 attr_list[0]; /* attributes payload */ +} wl_nan_event_replied_t; + +/* NAN Tx status of transmitted frames */ +#define WL_NAN_TXS_FAILURE 0 +#define WL_NAN_TXS_SUCCESS 1 + +/* NAN frame types */ +enum wl_nan_frame_type { + /* discovery frame types */ + WL_NAN_FRM_TYPE_PUBLISH = 1, + WL_NAN_FRM_TYPE_SUBSCRIBE = 2, + WL_NAN_FRM_TYPE_FOLLOWUP = 3, + + /* datapath frame types */ + WL_NAN_FRM_TYPE_DP_REQ = 4, + WL_NAN_FRM_TYPE_DP_RESP = 5, + WL_NAN_FRM_TYPE_DP_CONF = 6, + WL_NAN_FRM_TYPE_DP_INSTALL = 7, + WL_NAN_FRM_TYPE_DP_END = 8, + + /* schedule frame types */ + WL_NAN_FRM_TYPE_SCHED_REQ = 9, + WL_NAN_FRM_TYPE_SCHED_RESP = 10, + WL_NAN_FRM_TYPE_SCHED_CONF = 11, + WL_NAN_FRM_TYPE_SCHED_UPD = 12, + + /* ranging frame types */ + WL_NAN_FRM_TYPE_RNG_REQ = 13, + WL_NAN_FRM_TYPE_RNG_RESP = 14, + WL_NAN_FRM_TYPE_RNG_TERM = 15, + WL_NAN_FRM_TYPE_RNG_REPORT = 16, + + WL_NAN_FRM_TYPE_UNSOLICIT_SDF = 17, + WL_NAN_FRM_TYPE_INVALID +}; +typedef uint8 wl_nan_frame_type_t; + +/* NAN Reason codes for tx status */ +enum wl_nan_txs_reason_codes { + WL_NAN_REASON_SUCCESS = 1, /* NAN status success */ + WL_NAN_REASON_TIME_OUT = 2, /* timeout reached */ + WL_NAN_REASON_DROPPED = 3, /* pkt dropped due to internal failure */ + WL_NAN_REASON_MAX_RETRIES_DONE = 4 /* Max retries exceeded */ +}; + +/* For NAN TX status */ +typedef struct wl_nan_event_txs { + uint8 status; /* For TX status, success or failure */ + uint8 reason_code; /* to identify reason when status is failure */ + uint16 host_seq; /* seq num to keep track of pkts sent by host */ + uint8 type; /* frame type */ + uint8 pad; + uint16 opt_tlvs_len; + uint8 opt_tlvs[]; +} wl_nan_event_txs_t; + +/* SD transmit pkt's event status is sent as optional tlv in wl_nan_event_txs_t */ +typedef struct wl_nan_event_sd_txs { + uint8 inst_id; /* Publish or subscribe instance id */ + uint8 req_id; /* Requestor instance id */ +} wl_nan_event_sd_txs_t; + +/* Subscribe or Publish instance Terminated */ + +/* WL_NAN_EVENT_TERMINATED */ + +#define NAN_SD_TERM_REASON_TIMEOUT 1 +#define NAN_SD_TERM_REASON_HOSTREQ 2 +#define NAN_SD_TERM_REASON_FWTERM 3 +#define NAN_SD_TERM_REASON_FAIL 4 + +typedef struct wl_nan_ev_terminated { + uint8 instance_id; /* publish / subscribe instance id */ + uint8 reason; /* 1=timeout, 2=Host/IOVAR, 3=FW Terminated 4=Failure */ + uint8 svctype; /* 0 - Publish, 0x1 - Subscribe */ + uint8 pad; /* Align */ + uint32 tx_cnt; /* Number of SDFs sent */ +} wl_nan_ev_terminated_t; + +/* Follow up received against a pub / subscr */ +/* WL_NAN_EVENT_RECEIVE */ + +typedef struct wl_nan_ev_receive { + struct ether_addr remote_addr; /* Peer NAN device MAC */ + uint8 local_id; /* Local subscribe or publish ID */ + uint8 remote_id; /* Remote subscribe or publish ID */ + int8 fup_rssi; + uint8 attr_num; + uint16 attr_list_len; /* sizeof attributes attached to payload */ + uint8 attr_list[0]; /* attributes payload */ +} wl_nan_ev_receive_t; + +/* For NAN event mask extention */ +#define WL_NAN_EVMASK_EXTN_VER 1 +#define WL_NAN_EVMASK_EXTN_LEN 16 /* 16*8 = 128 masks supported */ + +typedef struct wl_nan_event_extn { + uint8 ver; + uint8 pad; + uint16 len; + uint8 evmask[]; +} wl_nan_evmask_extn_t; + +/* WL_NAN_XTLV_DATA_DP_TXS */ + +typedef struct wl_nan_data_dp_txs { + uint8 ndp_id; + uint8 pad; + struct ether_addr indi; /* initiator ndi */ +} wl_nan_data_dp_txs_t; + +/* WL_NAN_XTLV_RNG_TXS */ + +typedef struct wl_nan_range_txs { + uint8 range_id; + uint8 pad[3]; +} wl_nan_range_txs_t; + +#define NAN_MAX_BANDS 2 + +/* + * TLVs - Below XTLV definitions will be deprecated + * in due course (soon as all other branches update + * to the comp ID based XTLVs listed below). + */ +enum wl_nan_cmd_xtlv_id { + WL_NAN_XTLV_MAC_ADDR = 0x120, + WL_NAN_XTLV_MATCH_RX = 0x121, + WL_NAN_XTLV_MATCH_TX = 0x122, + WL_NAN_XTLV_SVC_INFO = 0x123, + WL_NAN_XTLV_SVC_NAME = 0x124, + WL_NAN_XTLV_SR_FILTER = 0x125, + WL_NAN_XTLV_FOLLOWUP = 0x126, + WL_NAN_XTLV_SVC_LIFE_COUNT = 0x127, + WL_NAN_XTLV_AVAIL = 0x128, + WL_NAN_XTLV_SDF_RX = 0x129, + WL_NAN_XTLV_SDE_CONTROL = 0x12a, + WL_NAN_XTLV_SDE_RANGE_LIMIT = 0x12b, + WL_NAN_XTLV_NAN_AF = 0x12c, + WL_NAN_XTLV_SD_TERMINATE = 0x12d, + WL_NAN_XTLV_CLUSTER_ID = 0x12e, + WL_NAN_XTLV_PEER_RSSI = 0x12f, + WL_NAN_XTLV_BCN_RX = 0x130, + WL_NAN_XTLV_REPLIED = 0x131, /* Publish sent for a subscribe */ + WL_NAN_XTLV_RECEIVED = 0x132, /* FUP Received */ + WL_NAN_XTLV_DISC_RESULTS = 0x133, /* Discovery results */ + WL_NAN_XTLV_TXS = 0x134 /* TX status */ +}; + +#define WL_NAN_CMD_GLOBAL 0x00 +#define WL_NAN_CMD_CFG_COMP_ID 0x01 +#define WL_NAN_CMD_ELECTION_COMP_ID 0x02 +#define WL_NAN_CMD_SD_COMP_ID 0x03 +#define WL_NAN_CMD_SYNC_COMP_ID 0x04 +#define WL_NAN_CMD_DATA_COMP_ID 0x05 +#define WL_NAN_CMD_DAM_COMP_ID 0x06 +#define WL_NAN_CMD_RANGE_COMP_ID 0x07 +#define WL_NAN_CMD_GENERIC_COMP_ID 0x08 +#define WL_NAN_CMD_SCHED_COMP_ID 0x09 +#define WL_NAN_CMD_NSR_COMP_ID 0x0a /* NAN Save Restore */ +#define WL_NAN_CMD_NANHO_COMP_ID 0x0b /* NAN Host offload */ +#define WL_NAN_CMD_DBG_COMP_ID 0x0f + +#define WL_NAN_CMD_COMP_SHIFT 8 +#define NAN_CMD(x, y) (((x) << WL_NAN_CMD_COMP_SHIFT) | (y)) + +/* + * Module based NAN TLV IDs + */ +typedef enum wl_nan_tlv { + + WL_NAN_XTLV_CFG_MATCH_RX = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x01), + WL_NAN_XTLV_CFG_MATCH_TX = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x02), + WL_NAN_XTLV_CFG_SR_FILTER = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x03), + WL_NAN_XTLV_CFG_SVC_NAME = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x04), + WL_NAN_XTLV_CFG_NAN_STATUS = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x05), + WL_NAN_XTLV_CFG_SVC_LIFE_COUNT = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x06), + WL_NAN_XTLV_CFG_SVC_HASH = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x07), + WL_NAN_XTLV_CFG_SEC_CSID = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x08), /* Security CSID */ + WL_NAN_XTLV_CFG_SEC_PMK = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x09), /* Security PMK */ + WL_NAN_XTLV_CFG_SEC_PMKID = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x0A), + WL_NAN_XTLV_CFG_SEC_SCID = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x0B), + WL_NAN_XTLV_CFG_VNDR_PAYLOAD = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x0C), + WL_NAN_XTLV_CFG_HOST_INDPID = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x0D), + /* when host ndpid is used */ + WL_NAN_XTLV_CFG_MAC_ADDR = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x0E), + /* fast disc time bitmap config */ + WL_NAN_XTLV_CFG_FDISC_TBMP = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x0F), + + WL_NAN_XTLV_SD_SVC_INFO = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x01), + WL_NAN_XTLV_SD_FOLLOWUP = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x02), + WL_NAN_XTLV_SD_SDF_RX = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x03), + WL_NAN_XTLV_SD_SDE_CONTROL = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x04), + WL_NAN_XTLV_SD_SDE_RANGE_LIMIT = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x05), + WL_NAN_XTLV_SD_NAN_AF = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x06), + WL_NAN_XTLV_SD_TERM = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x07), + WL_NAN_XTLV_SD_REPLIED = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x08), /* Pub sent */ + WL_NAN_XTLV_SD_FUP_RECEIVED = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x09), /* FUP Received */ + WL_NAN_XTLV_SD_DISC_RESULTS = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x0A), /* Pub RX */ + WL_NAN_XTLV_SD_TXS = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x0B), /* Tx status */ + WL_NAN_XTLV_SD_SDE_SVC_INFO = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x0C), + WL_NAN_XTLV_SD_SDE_SVC_UPD_IND = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x0D), + WL_NAN_XTLV_SD_SVC_NDI = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x0E), + WL_NAN_XTLV_SD_NDP_SPEC_INFO = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x0F), + WL_NAN_XTLV_SD_NDPE_TLV_LIST = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x10), + WL_NAN_XTLV_SD_NDL_QOS_UPD = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x11), + + WL_NAN_XTLV_SYNC_BCN_RX = NAN_CMD(WL_NAN_CMD_SYNC_COMP_ID, 0x01), + WL_NAN_XTLV_EV_MR_CHANGED = NAN_CMD(WL_NAN_CMD_SYNC_COMP_ID, 0x02), + + WL_NAN_XTLV_DATA_DP_END = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x01), + WL_NAN_XTLV_DATA_DP_INFO = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x02), + WL_NAN_XTLV_DATA_DP_SEC_INST = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x03), + WL_NAN_XTLV_DATA_DP_TXS = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x04), /* txs for dp */ + WL_NAN_XTLV_DATA_DP_OPAQUE_INFO = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x05), + WL_NAN_XTLV_RANGE_INFO = NAN_CMD(WL_NAN_CMD_RANGE_COMP_ID, 0x01), + WL_NAN_XTLV_RNG_TXS = NAN_CMD(WL_NAN_CMD_RANGE_COMP_ID, 0x02), + + WL_NAN_XTLV_EV_SLOT_INFO = NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x01), + WL_NAN_XTLV_EV_GEN_INFO = NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x02), + WL_NAN_XTLV_CCA_STATS = NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x03), + WL_NAN_XTLV_PER_STATS = NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x04), + WL_NAN_XTLV_CHBOUND_INFO = NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x05), + WL_NAN_XTLV_SLOT_STATS = NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x06), + + WL_NAN_XTLV_DAM_NA_ATTR = NAN_CMD(WL_NAN_CMD_DAM_COMP_ID, 0x01), /* na attr */ + WL_NAN_XTLV_HOST_ASSIST_REQ = NAN_CMD(WL_NAN_CMD_DAM_COMP_ID, 0x02), /* host assist */ + + WL_NAN_XTLV_GEN_FW_CAP = NAN_CMD(WL_NAN_CMD_GENERIC_COMP_ID, 0x01), /* fw cap */ + + WL_NAN_XTLV_SCHED_INFO = NAN_CMD(WL_NAN_CMD_SCHED_COMP_ID, 0x01), + + /* Nan Save-Restore XTLVs */ + WL_NAN_XTLV_NSR2_PEER = NAN_CMD(WL_NAN_CMD_NSR_COMP_ID, 0x21), + WL_NAN_XTLV_NSR2_NDP = NAN_CMD(WL_NAN_CMD_NSR_COMP_ID, 0x22), + + /* Host offload XTLVs */ + WL_NAN_XTLV_NANHO_PEER_ENTRY = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x01), + WL_NAN_XTLV_NANHO_DCAPLIST = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x02), + WL_NAN_XTLV_NANHO_DCSLIST = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x03), + WL_NAN_XTLV_NANHO_BLOB = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x04), + WL_NAN_XTLV_NANHO_NDP_STATE = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x05), + WL_NAN_XTLV_NANHO_FRM_TPLT = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x06), + WL_NAN_XTLV_NANHO_OOB_NAF = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x07) +} wl_nan_tlv_t; + +/* Sub Module ID's for NAN */ +enum { + NAN_MAC = 0, /* nan mac */ + NAN_DISC = 1, /* nan discovery */ + NAN_DBG = 2, /* nan debug */ + NAN_SCHED = 3, /* nan sched */ + NAN_PEER_ENTRY = 4, /* nan peer entry */ + NAN_AVAIL = 5, /* nan avail */ + NAN_DAM = 6, /* nan dam */ + NAN_FSM = 7, /* nan fsm registry */ + NAN_NDP = 8, /* nan ndp */ + NAN_NDL = 9, /* nan ndl */ + NAN_DP = 10, /* nan dp core */ + NAN_RNG = 11, /* nan ranging */ + NAN_SEC = 12, /* nan sec */ + NAN_LAST = 13 +}; + +enum wl_nan_sub_cmd_xtlv_id { + + /* Special command - Tag zero */ + WL_NAN_CMD_GLB_NAN_VER = NAN_CMD(WL_NAN_CMD_GLOBAL, 0x00), + + /* nan cfg sub-commands */ + + WL_NAN_CMD_CFG_NAN_INIT = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x01), + WL_NAN_CMD_CFG_ROLE = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x02), + WL_NAN_CMD_CFG_HOP_CNT = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x03), + WL_NAN_CMD_CFG_HOP_LIMIT = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x04), + WL_NAN_CMD_CFG_WARMUP_TIME = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x05), + WL_NAN_CMD_CFG_STATUS = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x06), + WL_NAN_CMD_CFG_OUI = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x07), + WL_NAN_CMD_CFG_COUNT = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x08), + WL_NAN_CMD_CFG_CLEARCOUNT = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x09), + WL_NAN_CMD_CFG_CHANNEL = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x0A), + WL_NAN_CMD_CFG_BAND = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x0B), + WL_NAN_CMD_CFG_CID = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x0C), + WL_NAN_CMD_CFG_IF_ADDR = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x0D), + WL_NAN_CMD_CFG_BCN_INTERVAL = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x0E), + WL_NAN_CMD_CFG_SDF_TXTIME = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x0F), + WL_NAN_CMD_CFG_SID_BEACON = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x10), + WL_NAN_CMD_CFG_DW_LEN = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x11), + WL_NAN_CMD_CFG_AVAIL = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x12), + WL_NAN_CMD_CFG_WFA_TM = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x13), + WL_NAN_CMD_CFG_EVENT_MASK = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x14), + WL_NAN_CMD_CFG_NAN_CONFIG = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x15), /* ctrl */ + WL_NAN_CMD_CFG_NAN_ENAB = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x16), + WL_NAN_CMD_CFG_ULW = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x17), + WL_NAN_CMD_CFG_NAN_CONFIG2 = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x18), /* ctrl2 */ + WL_NAN_CMD_CFG_DEV_CAP = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x19), + WL_NAN_CMD_CFG_SCAN_PARAMS = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x1A), + WL_NAN_CMD_CFG_VNDR_PAYLOAD = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x1B), + WL_NAN_CMD_CFG_FASTDISC = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x1C), + WL_NAN_CMD_CFG_MIN_TX_RATE = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x1D), + WL_NAN_CMD_CFG_MAX = WL_NAN_CMD_CFG_MIN_TX_RATE, + + /* Add new commands before and update */ + + /* nan election sub-commands */ + WL_NAN_CMD_ELECTION_HOST_ENABLE = NAN_CMD(WL_NAN_CMD_ELECTION_COMP_ID, 0x01), + WL_NAN_CMD_ELECTION_METRICS_CONFIG = NAN_CMD(WL_NAN_CMD_ELECTION_COMP_ID, 0x02), + WL_NAN_CMD_ELECTION_METRICS_STATE = NAN_CMD(WL_NAN_CMD_ELECTION_COMP_ID, 0x03), + WL_NAN_CMD_ELECTION_LEAVE = NAN_CMD(WL_NAN_CMD_ELECTION_COMP_ID, 0x03), + WL_NAN_CMD_ELECTION_MERGE = NAN_CMD(WL_NAN_CMD_ELECTION_COMP_ID, 0x04), + WL_NAN_CMD_ELECTION_ADVERTISERS = NAN_CMD(WL_NAN_CMD_ELECTION_COMP_ID, 0x05), + WL_NAN_CMD_ELECTION_RSSI_THRESHOLD = NAN_CMD(WL_NAN_CMD_ELECTION_COMP_ID, 0x06), + WL_NAN_CMD_ELECTION_MAX = WL_NAN_CMD_ELECTION_RSSI_THRESHOLD, + /* New commands go before and update */ + + /* nan SD sub-commands */ + WL_NAN_CMD_SD_PARAMS = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x01), + WL_NAN_CMD_SD_PUBLISH = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x02), + WL_NAN_CMD_SD_PUBLISH_LIST = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x03), + WL_NAN_CMD_SD_CANCEL_PUBLISH = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x04), + WL_NAN_CMD_SD_SUBSCRIBE = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x05), + WL_NAN_CMD_SD_SUBSCRIBE_LIST = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x06), + WL_NAN_CMD_SD_CANCEL_SUBSCRIBE = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x07), + WL_NAN_CMD_SD_VND_INFO = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x08), + WL_NAN_CMD_SD_STATS = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x09), + WL_NAN_CMD_SD_TRANSMIT = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x0A), + WL_NAN_CMD_SD_FUP_TRANSMIT = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x0B), + WL_NAN_CMD_SD_CONNECTION = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x0C), + WL_NAN_CMD_SD_SHOW = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x0D), + WL_NAN_CMD_SD_MAX = WL_NAN_CMD_SD_SHOW, + + /* nan time sync sub-commands */ + + WL_NAN_CMD_SYNC_SOCIAL_CHAN = NAN_CMD(WL_NAN_CMD_SYNC_COMP_ID, 0x01), + WL_NAN_CMD_SYNC_AWAKE_DWS = NAN_CMD(WL_NAN_CMD_SYNC_COMP_ID, 0x02), + WL_NAN_CMD_SYNC_BCN_RSSI_NOTIF_THRESHOLD = NAN_CMD(WL_NAN_CMD_SYNC_COMP_ID, 0x03), + WL_NAN_CMD_SYNC_MAX = WL_NAN_CMD_SYNC_BCN_RSSI_NOTIF_THRESHOLD, + + /* nan2 commands */ + WL_NAN_CMD_DATA_CONFIG = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x01), + WL_NAN_CMD_DATA_RSVD02 = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x02), + WL_NAN_CMD_DATA_RSVD03 = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x03), + WL_NAN_CMD_DATA_DATAREQ = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x04), + WL_NAN_CMD_DATA_DATARESP = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x05), + WL_NAN_CMD_DATA_DATAEND = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x06), + WL_NAN_CMD_DATA_SCHEDUPD = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x07), + WL_NAN_CMD_DATA_RSVD08 = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x08), + WL_NAN_CMD_DATA_CAP = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x9), + WL_NAN_CMD_DATA_STATUS = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x0A), + WL_NAN_CMD_DATA_STATS = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x0B), + WL_NAN_CMD_DATA_RSVD0C = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x0C), + WL_NAN_CMD_DATA_NDP_SHOW = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x0D), + WL_NAN_CMD_DATA_DATACONF = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x0E), + WL_NAN_CMD_DATA_MIN_TX_RATE = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x0F), + WL_NAN_CMD_DATA_MAX_PEERS = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x10), + WL_NAN_CMD_DATA_DP_IDLE_PERIOD = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x11), + WL_NAN_CMD_DATA_DP_OPAQUE_INFO = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x12), + WL_NAN_CMD_DATA_DP_HB_DURATION = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x13), + WL_NAN_CMD_DATA_PATH_MAX = WL_NAN_CMD_DATA_DP_HB_DURATION, /* New ones before and update */ + + /* nan dam sub-commands */ + WL_NAN_CMD_DAM_CFG = NAN_CMD(WL_NAN_CMD_DAM_COMP_ID, 0x01), + WL_NAN_CMD_DAM_MAX = WL_NAN_CMD_DAM_CFG, /* New ones before and update */ + + /* nan2.0 ranging commands */ + WL_NAN_CMD_RANGE_REQUEST = NAN_CMD(WL_NAN_CMD_RANGE_COMP_ID, 0x01), + WL_NAN_CMD_RANGE_AUTO = NAN_CMD(WL_NAN_CMD_RANGE_COMP_ID, 0x02), + WL_NAN_CMD_RANGE_RESPONSE = NAN_CMD(WL_NAN_CMD_RANGE_COMP_ID, 0x03), + WL_NAN_CMD_RANGE_CANCEL = NAN_CMD(WL_NAN_CMD_RANGE_COMP_ID, 0x04), + + /* nan debug sub-commands */ + WL_NAN_CMD_DBG_SCAN_PARAMS = NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x01), + WL_NAN_CMD_DBG_SCAN = NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x02), + WL_NAN_CMD_DBG_SCAN_RESULTS = NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x03), + /* This is now moved under CFG */ + WL_NAN_CMD_DBG_EVENT_MASK = NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x04), + WL_NAN_CMD_DBG_EVENT_CHECK = NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x05), + WL_NAN_CMD_DBG_DUMP = NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x06), + WL_NAN_CMD_DBG_CLEAR = NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x07), + WL_NAN_CMD_DBG_RSSI = NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x08), + WL_NAN_CMD_DBG_DEBUG = NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x09), + WL_NAN_CMD_DBG_TEST1 = NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x0A), + WL_NAN_CMD_DBG_TEST2 = NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x0B), + WL_NAN_CMD_DBG_TEST3 = NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x0C), + WL_NAN_CMD_DBG_DISC_RESULTS = NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x0D), + WL_NAN_CMD_DBG_STATS = NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x0E), + WL_NAN_CMD_DBG_LEVEL = NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x0F), + WL_NAN_CMD_DBG_MAX = WL_NAN_CMD_DBG_LEVEL, /* New ones before and update */ + + /* Generic componenet */ + WL_NAN_CMD_GEN_STATS = NAN_CMD(WL_NAN_CMD_GENERIC_COMP_ID, 0x01), + WL_NAN_CMD_GEN_FW_CAP = NAN_CMD(WL_NAN_CMD_GENERIC_COMP_ID, 0x02), + WL_NAN_CMD_GEN_MAX = WL_NAN_CMD_GEN_FW_CAP, + + /* NAN Save-Restore */ + WL_NAN_CMD_NSR2 = NAN_CMD(WL_NAN_CMD_NSR_COMP_ID, 0x20), + WL_NAN_CMD_NSR2_MAX = WL_NAN_CMD_NSR2, + + /* Host offload sub-commands */ + WL_NAN_CMD_NANHO_UPDATE = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x01), + WL_NAN_CMD_NANHO_FRM_TPLT = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x02), + WL_NAN_CMD_NANHO_OOB_NAF = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x03), + WL_NAN_CMD_NANHO_MAX = WL_NAN_CMD_NANHO_OOB_NAF +}; + +/* + * Component/Module based NAN TLV IDs for NAN stats + */ +typedef enum wl_nan_stats_tlv { + WL_NAN_XTLV_SYNC_MAC_STATS = NAN_CMD(WL_NAN_CMD_SYNC_COMP_ID, 0x01), + + WL_NAN_XTLV_SD_DISC_STATS = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x01), + + WL_NAN_XTLV_DATA_NDP_STATS = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x01), + WL_NAN_XTLV_DATA_NDL_STATS = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x02), + WL_NAN_XTLV_DATA_SEC_STATS = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x03), + + WL_NAN_XTLV_GEN_SCHED_STATS = NAN_CMD(WL_NAN_CMD_GENERIC_COMP_ID, 0x01), + WL_NAN_XTLV_GEN_PEER_STATS = NAN_CMD(WL_NAN_CMD_GENERIC_COMP_ID, 0x02), + WL_NAN_XTLV_GEN_PEER_STATS_DEVCAP = NAN_CMD(WL_NAN_CMD_GENERIC_COMP_ID, 0x03), + WL_NAN_XTLV_GEN_PEER_STATS_NDP = NAN_CMD(WL_NAN_CMD_GENERIC_COMP_ID, 0x04), + WL_NAN_XTLV_GEN_PEER_STATS_SCHED = NAN_CMD(WL_NAN_CMD_GENERIC_COMP_ID, 0x05), + WL_NAN_XTLV_GEN_AVAIL_STATS_SCHED = NAN_CMD(WL_NAN_CMD_GENERIC_COMP_ID, 0x06), + WL_NAN_XTLV_GEN_NDP_STATS = NAN_CMD(WL_NAN_CMD_GENERIC_COMP_ID, 0x07), + + WL_NAN_XTLV_DAM_STATS = NAN_CMD(WL_NAN_CMD_DAM_COMP_ID, 0x01), + WL_NAN_XTLV_DAM_AVAIL_STATS = NAN_CMD(WL_NAN_CMD_DAM_COMP_ID, 0x02), + + WL_NAN_XTLV_RANGE_STATS = NAN_CMD(WL_NAN_CMD_RANGE_COMP_ID, 0x01) +} wl_nan_stats_tlv_t; + +/* NAN stats WL_NAN_CMD_GEN_STATS command */ +/* Input data */ +typedef struct wl_nan_cmn_get_stat { + uint32 modules_btmap; /* Bitmap to indicate module stats are needed: + * See NAN Sub Module ID's above + */ + uint8 operation; /* Get, Get and Clear */ + uint8 arg1; /* Submodule control variable1 */ + uint8 arg2; /* Submodule control variable2 */ + uint8 pad; /* May not be needed as TLV's are aligned,add to pass compile chk */ +} wl_nan_cmn_get_stat_t; + +/* Output for Stats container */ +typedef struct wl_nan_cmn_stat { + uint32 n_stats; /* Number of different sub TLV stats present in the container */ + uint32 totlen; /* Total Length of stats data in container */ + uint8 stats_tlvs []; /* Stat TLV's container */ +} wl_nan_cmn_stat_t; + +/* Defines for operation */ +#define WLA_NAN_STATS_GET 0 +#define WLA_NAN_STATS_GET_CLEAR 1 + +#define WL_NAN_STAT_ALL 0xFFFFFFFF + +/* NAN Mac stats */ + +typedef struct wl_nan_mac_band_stats { + uint32 bcn_tx; /* 2g/5g disc/sync beacon tx count */ + uint32 bcn_rx; /* 2g/5g disc/sync beacon rx count */ + uint32 dws; /* Number of 2g/5g DW's */ +} wl_nan_mac_band_stats_t; + +/* Note: if this struct is changing update wl_nan_slot_ecounters_vX_t version, + * as this struct is sent as payload in wl_nan_slot_ecounter_vX_ts + */ +typedef struct wl_nan_mac_stats { + wl_nan_mac_band_stats_t band[NAN_MAX_BANDS]; /* MAC sync band specific stats */ + uint32 naf_tx; /* NAN AF tx */ + uint32 naf_rx; /* NAN AF rx */ + uint32 sdf_tx; /* SDF tx */ + uint32 sdf_rx; /* SDF rx */ +} wl_nan_mac_stats_t; + +/* NAN Sched stats */ +/* Per core Sched stats */ +typedef struct nan_sched_stats_core { + uint32 slotstart; /* slot_start */ + uint32 slotend; /* slot_end */ + uint32 slotskip; /* slot_skip */ + uint32 slotstart_partial; /* slot resume */ + uint32 slotend_partial; /* slot pre-empt */ + uint8 avail_upd_cnt; /* count to track num of times avail has been updated */ + uint8 pad[3]; +} nan_sched_stats_core_t; +/* Common Sched stats */ +typedef struct nan_sched_stats_cmn { + uint32 slot_adj_dw; /* Slot adjusts due to DW changes */ + uint32 slot_dur; /* Total slot duration in TU's */ +} nan_sched_stats_cmn_t; + +/* Note: if this struct is changing update wl_nan_slot_ecounters_vX_t version, + * as this struct is sent as payload in wl_nan_slot_ecounters_vX_t + */ +typedef struct nan_sched_stats { + nan_sched_stats_cmn_t cmn; + nan_sched_stats_core_t slice[MAX_NUM_D11CORES]; +} nan_sched_stats_t; +/* End NAN Sched stats */ + +/* NAN Discovery stats */ +typedef struct nan_disc_stats { + uint32 pub_tx; /* Publish tx */ + uint32 pub_rx; /* Publish rx */ + uint32 sub_tx; /* Subscribe tx */ + uint32 sub_rx; /* Subscribe rx */ + uint32 fup_tx; /* Followup tx */ + uint32 fup_rx; /* Followup rx */ + uint32 pub_resp_ignored; /* response to incoming publish ignored */ + uint32 sub_resp_ignored; /* response to incoming subscribe ignored */ +} nan_disc_stats_t; +/* NAN Discovery stats end */ + +/* statistics for nan sec */ +typedef struct nan_sec_stats_s { + uint32 mic_fail; /* rx mic fail */ + uint32 replay_fail; /* replay counter */ + uint32 tx_fail; /* tx fail (from txstatus) */ + uint32 key_info_err; /* key info field err */ + uint32 ok_sessions; /* successful mx negotiations */ + uint32 fail_sessions; /* failed sessions */ + uint32 keydesc_err; /* key desc error */ + uint32 invalid_cipher; /* cipher suite not valid */ + uint32 pmk_not_found; /* no pmk found for given service or for any reason */ + uint32 no_pmk_for_pmkid; /* no pmk found for give pmkid */ + uint32 key_install_err; /* failed to install keys */ + uint32 no_keydesc_attr; /* key desc attr missing */ + uint32 nonce_mismatch; /* nonce mismatch */ +} nan_sec_stats_t; + +/* WL_NAN_XTLV_GEN_PEER_STATS */ +typedef struct wl_nan_peer_stats { + struct ether_addr nmi; + uint8 pad[2]; + uint32 pkt_enq; /* counter for queued pkt of peer */ + + /* NDL */ + bool ndl_exist; + uint8 ndl_state; + bool counter_proposed; + uint8 pad1; + + /* NDL QoS */ + uint16 local_max_latency; + uint16 peer_max_latency; + uint8 local_min_slots; + uint8 peer_min_slots; + + /* security association */ + struct ether_addr sec_laddr; /* local mac addr */ + struct ether_addr sec_raddr; /* remote mac addr */ + uint8 sec_csid; + uint8 pad2; +} wl_nan_peer_stats_t; + +/* WL_NAN_XTLV_GEN_PEER_STATS_DEVCAP */ +typedef struct wl_nan_peer_stats_dev_cap { + uint8 mapid; + uint8 awake_dw_2g; + uint8 awake_dw_5g; + uint8 bands_supported; + uint8 op_mode; + uint8 num_antennas; + uint16 chan_switch_time; + uint8 capabilities; + uint8 pad[3]; +} wl_nan_peer_stats_dev_cap_t; + +/* WL_NAN_XTLV_GEN_PEER_STATS_NDP */ +typedef struct wl_nan_peer_stats_ndp { + uint8 peer_role; + uint8 ndp_state; + uint8 indp_id; /* initiator ndp id */ + uint8 ndp_ctrl; /* ndp control field */ + struct ether_addr peer_nmi; + struct ether_addr peer_ndi; + struct ether_addr local_ndi; + + /* peer scb info */ + bool scb_allocated; + bool scb_found; + uint32 scb_flags; + uint32 scb_flags2; + uint32 scb_flags3; +} wl_nan_peer_stats_ndp_t; + +enum { + WL_NAN_SCHED_STAT_SLOT_COMM = 0x01, /* Committed slot */ + WL_NAN_SCHED_STAT_SLOT_COND = 0x02, /* Conditional slot(proposal/counter) */ + WL_NAN_SCHED_STAT_SLOT_NDC = 0x04, /* NDC slot */ + WL_NAN_SCHED_STAT_SLOT_IMMUT = 0x08, /* Immutable slot */ + WL_NAN_SCHED_STAT_SLOT_RANGE = 0x10, /* Ranging slot */ +}; +typedef uint16 wl_nan_stats_sched_slot_info_t; + +typedef struct wl_nan_stats_sched_slot { + wl_nan_stats_sched_slot_info_t info; /* capture slot type and more info */ + chanspec_t chanspec; +} wl_nan_stats_sched_slot_t; + +/* WL_NAN_XTLV_GEN_PEER_STATS_SCHED, WL_NAN_XTLV_GEN_AVAIL_STATS_SCHED */ +typedef struct wl_nan_stats_sched { + uint8 map_id; + uint8 seq_id; /* seq id from NA attr */ + uint8 slot_dur; + uint8 pad; + uint16 period; + uint16 num_slot; + wl_nan_stats_sched_slot_t slot[]; +} wl_nan_stats_sched_t; + +/* WL_NAN_XTLV_GEN_PEER_STATS_SCHED */ +typedef struct wl_nan_peer_stats_sched { + uint8 map_id; + uint8 seq_id; /* seq id from NA attr */ + uint8 slot_dur; + uint8 pad; + uint16 period; + uint16 num_slot; + wl_nan_stats_sched_slot_t slot[]; +} wl_nan_peer_stats_sched_t; + +/* WL_NAN_XTLV_RANGE_STATS */ +typedef struct wl_nan_range_stats { + uint16 rng_ssn_estb; + uint16 rng_ssn_fail; + uint16 rng_sched_start; + uint16 rng_sched_end; + uint16 ftm_ssn_success; /* number of succesfull ftm sessions */ + uint16 ftm_ssn_fail; + uint16 num_meas; /* number of ftm frames */ + uint16 num_valid_meas; /* number of ftm frames with valid timestamp */ +} wl_nan_range_stats_t; + +/* defines for ndp stats flag */ + +#define NAN_NDP_STATS_FLAG_ROLE_MASK 0x01 +#define NAN_NDP_STATS_FLAG_ROLE_INIT 0x00 +#define NAN_NDP_STATS_FLAG_ROLE_RESP 0x01 + +#define NAN_NDP_STATS_STATE_BIT_SHIFT 1 +#define NAN_NDP_STATS_FLAG_STATE_MASK 0x07 +#define NAN_NDP_STATS_FLAG_STATE_IN_PROG 0x00 +#define NAN_NDP_STATS_FLAG_STATE_ESTB 0x01 +#define NAN_NDP_STATS_FLAG_STATE_TEARDOWN_WAIT 0x02 +/* More states can be added here, when needed */ + +/* WL_NAN_XTLV_GEN_NDP_STATS */ +typedef struct wl_nan_ndp_stats_s { + uint8 ndp_id; + uint8 indp_id; + uint8 flags; + uint8 nan_sec_csid; + struct ether_addr lndi_addr; + struct ether_addr pnmi_addr; + struct ether_addr pndi_addr; + uint8 PAD[2]; +} wl_nan_ndp_stats_t; + +/* WL_NAN_XTLV_EV_SLOT_INFO */ +typedef struct wl_nan_slot_info_s { + /* dw slot start expected */ + uint32 dwst_h; + uint32 dwst_l; + /* dw slot start actual */ + uint32 act_dwst_h; + uint32 act_dwst_l; + uint16 cur_chan[MAX_NUM_D11CORES]; /* sdb channels */ + uint16 dw_chan; /* dw channel */ + uint8 dw_no; /* dw number */ + uint8 slot_seq_no; /* slot seq no. */ +} wl_nan_slot_info_t; + +/* WL_NAN_EVENT_MR_CHANGED */ +typedef uint8 wl_nan_mr_changed_t; +#define WL_NAN_AMR_CHANGED 1 +#define WL_NAN_IMR_CHANGED 2 + +/** status - TBD BCME_ vs NAN status - range reserved for BCME_ */ +enum { + /* add new status here... */ + WL_NAN_E_PEER_NOTAVAIL = -2131, + WL_NAN_E_SCB_EXISTS = -2130, + WL_NAN_E_INVALID_PEER_NDI = -2129, + WL_NAN_E_INVALID_LOCAL_NDI = -2128, + WL_NAN_E_ALREADY_EXISTS = -2127, /* generic NAN error for duplication */ + WL_NAN_E_EXCEED_MAX_NUM_MAPS = -2126, + WL_NAN_E_INVALID_DEV_CHAN_SCHED = -2125, + WL_NAN_E_INVALID_PEER_BLOB_TYPE = -2124, + WL_NAN_E_INVALID_LCL_BLOB_TYPE = -2123, + WL_NAN_E_BCMC_PDPA = -2122, /* BCMC NAF PDPA */ + WL_NAN_E_TIMEOUT = -2121, + WL_NAN_E_HOST_CFG = -2120, + WL_NAN_E_NO_ACK = -2119, + WL_NAN_E_SECINST_FAIL = -2118, + WL_NAN_E_REJECT_NDL = -2117, /* generic NDL rejection error */ + WL_NAN_E_INVALID_NDP_ATTR = -2116, + WL_NAN_E_HOST_REJECTED = -2115, + WL_NAN_E_PCB_NORESOURCE = -2114, + WL_NAN_E_NDC_EXISTS = -2113, + WL_NAN_E_NO_NDC_ENTRY_AVAIL = -2112, + WL_NAN_E_INVALID_NDC_ENTRY = -2111, + WL_NAN_E_SD_TX_LIST_FULL = -2110, + WL_NAN_E_SVC_SUB_LIST_FULL = -2109, + WL_NAN_E_SVC_PUB_LIST_FULL = -2108, + WL_NAN_E_SDF_MAX_LEN_EXCEEDED = -2107, + WL_NAN_E_ZERO_CRB = -2106, /* no CRB between local and peer */ + WL_NAN_E_PEER_NDC_NOT_SELECTED = -2105, /* peer ndc not selected */ + WL_NAN_E_DAM_CHAN_CONFLICT = -2104, /* dam schedule channel conflict */ + WL_NAN_E_DAM_SCHED_PERIOD = -2103, /* dam schedule period mismatch */ + WL_NAN_E_LCL_NDC_NOT_SELECTED = -2102, /* local selected ndc not configured */ + WL_NAN_E_NDL_QOS_INVALID_NA = -2101, /* na doesn't comply with ndl qos */ + WL_NAN_E_CLEAR_NAF_WITH_SA_AS_RNDI = -2100, /* rx clear naf with peer rndi */ + WL_NAN_E_SEC_CLEAR_PKT = -2099, /* rx clear pkt from a peer with sec_sa */ + WL_NAN_E_PROT_NON_PDPA_NAF = -2098, /* rx protected non PDPA frame */ + WL_NAN_E_DAM_DOUBLE_REMOVE = -2097, /* remove peer schedule already removed */ + WL_NAN_E_DAM_DOUBLE_MERGE = -2096, /* merge peer schedule already merged */ + WL_NAN_E_DAM_REJECT_INVALID = -2095, /* reject for invalid schedule */ + WL_NAN_E_DAM_REJECT_RANGE = -2094, + WL_NAN_E_DAM_REJECT_QOS = -2093, + WL_NAN_E_DAM_REJECT_NDC = -2092, + WL_NAN_E_DAM_REJECT_PEER_IMMUT = -2091, + WL_NAN_E_DAM_REJECT_LCL_IMMUT = -2090, + WL_NAN_E_DAM_EXCEED_NUM_SCHED = -2089, + WL_NAN_E_DAM_INVALID_SCHED_MAP = -2088, /* invalid schedule map list */ + WL_NAN_E_DAM_INVALID_LCL_SCHED = -2087, + WL_NAN_E_INVALID_MAP_ID = -2086, + WL_NAN_E_CHAN_OVERLAP_ACROSS_MAP = -2085, + WL_NAN_E_INVALID_CHAN_LIST = -2084, + WL_NAN_E_INVALID_RANGE_TBMP = -2083, + WL_NAN_E_INVALID_IMMUT_SCHED = -2082, + WL_NAN_E_INVALID_NDC_ATTR = -2081, + WL_NAN_E_INVALID_TIME_BITMAP = -2080, + WL_NAN_E_INVALID_NA_ATTR = -2079, + WL_NAN_E_NO_NA_ATTR_IN_AVAIL_MAP = -2078, /* no na attr saved in avail map */ + WL_NAN_E_INVALID_MAP_IDX = -2077, + WL_NAN_E_SEC_SA_NOTFOUND = -2076, + WL_NAN_E_BSSCFG_NOTFOUND = -2075, + WL_NAN_E_SCB_NOTFOUND = -2074, + WL_NAN_E_NCS_SK_KDESC_TYPE = -2073, + WL_NAN_E_NCS_SK_KEY_DESC_VER = -2072, /* key descr ver */ + WL_NAN_E_NCS_SK_KEY_TYPE = -2071, /* key descr type */ + WL_NAN_E_NCS_SK_KEYINFO_FAIL = -2070, /* key info (generic) */ + WL_NAN_E_NCS_SK_KEY_LEN = -2069, /* key len */ + WL_NAN_E_NCS_SK_KDESC_NOT_FOUND = -2068, /* key desc not found */ + WL_NAN_E_NCS_SK_INVALID_PARAMS = -2067, /* invalid args */ + WL_NAN_E_NCS_SK_KDESC_INVALID = -2066, /* key descr is not valid */ + WL_NAN_E_NCS_SK_NONCE_MISMATCH = -2065, + WL_NAN_E_NCS_SK_KDATA_SAVE_FAIL = -2064, /* not able to save key data */ + WL_NAN_E_NCS_SK_AUTH_TOKEN_CALC_FAIL = -2063, + WL_NAN_E_NCS_SK_PTK_CALC_FAIL = -2062, + WL_NAN_E_INVALID_STARTOFFSET = -2061, + WL_NAN_E_BAD_NA_ENTRY_TYPE = -2060, + WL_NAN_E_INVALID_CHANBMP = -2059, + WL_NAN_E_INVALID_OP_CLASS = -2058, + WL_NAN_E_NO_IES = -2057, + WL_NAN_E_NO_PEER_ENTRY_AVAIL = -2056, + WL_NAN_E_INVALID_PEER = -2055, + WL_NAN_E_PEER_EXISTS = -2054, + WL_NAN_E_PEER_NOTFOUND = -2053, + WL_NAN_E_NO_MEM = -2052, + WL_NAN_E_INVALID_OPTION = -2051, + WL_NAN_E_INVALID_BAND = -2050, + WL_NAN_E_INVALID_MAC = -2049, + WL_NAN_E_BAD_INSTANCE = -2048, + /* NAN status code reserved from -2048 to -3071 */ + WL_NAN_E_ERROR = -1, + WL_NAN_E_OK = 0 +}; + +/* Error codes used in vendor specific attribute in Data Path Termination frames */ +enum { + WL_NAN_DPEND_E_OK = 0, + WL_NAN_DPEND_E_ERROR = 1, + WL_NAN_DPEND_E_HOST_CMD = 2, + WL_NAN_DPEND_E_HOST_REJECTED = 3, /* host rejected rx frame */ + WL_NAN_DPEND_E_RESOURCE_LIMIT = 4, + WL_NAN_DPEND_E_NO_ACK_RCV = 5, + WL_NAN_DPEND_E_TIMEOUT = 6, + WL_NAN_DPEND_E_NO_ELT = 7, /* rx frame missing element container */ + WL_NAN_DPEND_E_NO_NDP_ATTR = 8, + WL_NAN_DPEND_E_NO_AVAIL_ATTR = 9, + WL_NAN_DPEND_E_NO_NDC_ATTR = 10, + WL_NAN_DPEND_E_NO_RANGE_BM = 11, + WL_NAN_DPEND_E_INVALID_NDP_ATTR = 12, + WL_NAN_DPEND_E_INVALID_NDC_ATTR = 13, + WL_NAN_DPEND_E_INVALID_IMMUT = 14, + WL_NAN_DPEND_E_INVALID_NDL_QOS = 15, + WL_NAN_DPEND_E_INVALID_SEC_PARAMS = 16, + WL_NAN_DPEND_E_REJECT_AVAIL = 17, + WL_NAN_DPEND_E_REJECT_NDL = 18 +}; + +typedef int32 wl_nan_status_t; + +/** nan cmd list entry */ +enum wl_nan_sub_cmd_input_flags { + WL_NAN_SUB_CMD_FLAG_NONE = 0, + WL_NAN_SUB_CMD_FLAG_SKIP = 1, /* Skip to next sub-command on error */ + WL_NAN_SUB_CMD_FLAG_TERMINATE = 2, /* Terminate processing and return */ + WL_NAN_SUB_CMD_FLAG_LAST /* Keep this at the end */ +}; + +/** container for nan events */ +typedef struct wl_nan_ioc { + uint16 version; /**< interface command or event version */ + uint16 id; /**< nan ioctl cmd ID */ + uint16 len; /**< total length of all tlv records in data[] */ + uint16 pad; /**< pad to be 32 bit aligment */ + uint8 data []; /**< var len payload of bcm_xtlv_t type */ +} wl_nan_ioc_t; + +/* + * NAN sub-command data structures + */ + +/* + * Config component WL_NAN_CMD_CFG_XXXX sub-commands + * WL_NAN_CMD_CFG_ENABLE + */ +enum wl_nan_config_state { + WL_NAN_CONFIG_STATE_DISABLE = 0, + WL_NAN_CONFIG_STATE_ENABLE = 1 +}; + +typedef int8 wl_nan_config_state_t; + +/* WL_NAN_CMD_CFG_NAN_INIT */ + +typedef uint8 wl_nan_init_t; + +/* WL_NAN_CMD_CFG_NAN_VERSION */ +typedef uint16 wl_nan_ver_t; + +/* WL_NAN_CMD_CFG_NAN_CONFIG */ +typedef uint32 wl_nan_cfg_ctrl_t; + +/* WL_NAN_CMD_CFG_NAN_CONFIG2 */ +typedef struct wl_nan_cfg_ctrl2 { + uint32 flags1; /* wl_nan_cfg_ctrl2_flags1 */ + uint32 flags2; /* wl_nan_cfg_ctrl2_flags2 */ +} wl_nan_cfg_ctrl2_t; + +enum wl_nan_cfg_ctrl2_flags1 { + /* Allows unicast SDF TX while local device is under NDP/NDL negotiation, + * but Not with the peer SDF destined to. + */ + WL_NAN_CTRL2_FLAG1_ALLOW_SDF_TX_UCAST_IN_PROG = 0x00000001, + /* Allows broadcast SDF TX while local device is under NDP/NDL negotiation */ + WL_NAN_CTRL2_FLAG1_ALLOW_SDF_TX_BCAST_IN_PROG = 0x00000002, + /* Allows the device to send schedule update automatically on local schedule change */ + WL_NAN_CTRL2_FLAG1_AUTO_SCHEDUPD = 0x00000004, + /* Allows the device to handle slot pre_close operations */ + WL_NAN_CTRL2_FLAG1_SLOT_PRE_CLOSE = 0x00000008 +}; +#define WL_NAN_CTRL2_FLAGS1_MASK 0x0000000F + +#define WL_NAN_CTRL2_FLAGS2_MASK 0x00000000 + +/* + * WL_NAN_CMD_CFG_BAND, WL_NAN_CMD_CFG_RSSI_THRESHOLD(Get only) + */ +typedef uint8 wl_nan_band_t; + +/* + * WL_NAN_CMD_CFG_ROLE + */ +enum wl_nan_role { + WL_NAN_ROLE_AUTO = 0, + WL_NAN_ROLE_NON_MASTER_NON_SYNC = 1, + WL_NAN_ROLE_NON_MASTER_SYNC = 2, + WL_NAN_ROLE_MASTER = 3, + WL_NAN_ROLE_ANCHOR_MASTER = 4 +}; + +typedef uint8 wl_nan_role_t; + +typedef struct wl_nan_device_state +{ + wl_nan_role_t role; /* Sync Master, Non-Sync Master */ + uint8 state; /* TBD */ + uint8 hopcount; /* Hops to the Anchor Master */ + struct ether_addr immediate_master; /* Master MAC */ + struct ether_addr anchor_master; /* Anchor Master MAC */ + struct ether_addr cluster_id; /* Cluster ID to which this device belongs to */ + uint8 PAD[3]; + uint32 tsf_high; /* NAN Cluster TSFs */ + uint32 tsf_low; +} wl_nan_device_state_t; + +/* + * WL_NAN_CMD_CFG_HOP_CNT, WL_NAN_CMD_CFG_HOP_LIMIT + */ +typedef uint8 wl_nan_hop_count_t; + +/* + * WL_NAN_CMD_CFG_WARMUP_TIME + */ +typedef uint32 wl_nan_warmup_time_ticks_t; + +/* + * WL_NAN_CMD_CFG_RSSI_THRESHOLD + * rssi_close and rssi_mid are used to transition master to non-master + * role by NAN state machine. rssi thresholds corresponding to the band + * will be updated. + */ +/* To be deprecated */ +typedef struct wl_nan_rssi_threshold { + wl_nan_band_t band; + int8 rssi_close; + int8 rssi_mid; + uint8 pad; +} wl_nan_rssi_threshold_t; + +/* WL_NAN_CMD_ELECTION_RSSI_THRESHOLD */ + +typedef struct wl_nan_rssi_thld { + int8 rssi_close_2g; + int8 rssi_mid_2g; + int8 rssi_close_5g; + int8 rssi_mid_5g; +} wl_nan_rssi_thld_t; + +/* WL_NAN_CMD_DATA_MAX_PEERS */ + +typedef uint8 wl_nan_max_peers_t; + +/* + * WL_NAN_CMD_CFG_STATUS + */ + +typedef enum wl_nan_election_mode { + WL_NAN_ELECTION_RUN_BY_HOST = 1, + WL_NAN_ELECTION_RUN_BY_FW = 2 +} wl_nan_election_mode_t; + +typedef struct wl_nan_conf_status { + struct ether_addr nmi; /* NAN mgmt interface address */ + uint8 enabled; /* NAN is enabled */ + uint8 role; /* Current nan sync role */ + struct ether_addr cid; /* Current Cluster id */ + uint8 social_chans[2]; /* Social channels */ + uint8 mr[8]; /* Self Master Rank */ + uint8 amr[8]; /* Anchor Master Rank */ + uint32 ambtt; /* Anchor master beacon target time */ + uint32 cluster_tsf_h; /* Current Cluster TSF High */ + uint32 cluster_tsf_l; /* Current Cluster TSF Low */ + uint8 election_mode; /* Election mode, host or firmware */ + uint8 hop_count; /* Current Hop count */ + uint8 imr[8]; /* Immediate Master Rank */ + uint8 pad[4]; + uint16 opt_tlvs_len; + uint8 opt_tlvs[]; +} wl_nan_conf_status_t; + +/* + * WL_NAN_CMD_CFG_OUI + */ +typedef struct wl_nan_oui_type { + uint8 nan_oui[DOT11_OUI_LEN]; + uint8 type; +} wl_nan_oui_type_t; + +/* + * WL_NAN_CMD_CFG_COUNT + */ +typedef struct wl_nan_count { + uint32 cnt_bcn_tx; /**< TX disc/sync beacon count */ + uint32 cnt_bcn_rx; /**< RX disc/sync beacon count */ + uint32 cnt_svc_disc_tx; /**< TX svc disc frame count */ + uint32 cnt_svc_disc_rx; /**< RX svc disc frame count */ +} wl_nan_count_t; +/* + * Election component WL_NAN_CMD_ELECTION_XXXX sub-commands + * WL_NAN_CMD_ELECTION_HOST_ENABLE + */ +enum wl_nan_enable_flags { + WL_NAN_DISABLE_FLAG_HOST_ELECTION = 0, + WL_NAN_ENABLE_FLAG_HOST_ELECTION = 1 +}; + +/* + * 0 - disable host based election + * 1 - enable host based election + */ +typedef uint8 wl_nan_host_enable_t; + +/* + * WL_NAN_CMD_ELECTION_METRICS_CONFIG + */ +/* Set only */ +typedef struct wl_nan_election_metric_config { + uint8 random_factor; /* Configured random factor */ + uint8 master_pref; /* configured master preference */ + uint8 pad[2]; +} wl_nan_election_metric_config_t; + +/* + * WL_NAN_CMD_ELECTION_METRICS_STATE + */ +/* Get only */ +typedef struct wl_nan_election_metric_state { + uint8 random_factor; /* random factor used in MIs */ + uint8 master_pref; /* Master advertised in MIs */ + uint8 pad[2]; +} wl_nan_election_metric_state_t; + +/* + * WL_NAN_CMD_ELECTION_LEAVE + * WL_NAN_CMD_ELECTION_STOP + */ +typedef struct ether_addr wl_nan_cluster_id_t; + +/* + * WL_NAN_CMD_ELECTION_MERGE + * 0 - disable cluster merge + * 1 - enable cluster merge + */ +typedef uint8 wl_nan_merge_enable_t; + +/* + * WL_NAN_CMD_CFG_ROLE + * role = 0 means configuration by firmware(obsolete); otherwise by host + * when host configures role, also need target master address to sync to + */ +#define NAN_SYNC_MASTER_SELF 1 +#define NAN_SYNC_MASTER_USE_TIMING 2 /* Use the tsf timing provided */ +#define NAN_SYNC_MASTER_AMREC_UPD 4 /* provide AM record update */ + +/* + struct ether_addr addr: + when NAN_SYNC_MASTER_USE_TIMING is set, addr is the mac of Rx NAN beacon + providing the timing info + ltsf_h, ltsf_l: + The local TSF timestamp filled in by FW in the WL_NAN_EVENT_BCN_RX event; + rtsf_h, rtsf_l: + The timestamp in the Rx beacon frame, filled in by host + uint32 ambtt: + the amtt in the cluster ID attribute in the Rx beacon frame +*/ + +typedef struct nan_sync_master { + uint8 flag; /* 1: self; 2: use TSF timing; 4: AMR update */ + uint8 hop_count; + struct ether_addr addr; + struct ether_addr cluster_id; + chanspec_t channel; /* bcn reception channel */ + uint32 ltsf_h; + uint32 ltsf_l; + uint32 rtsf_h; + uint32 rtsf_l; + uint8 amr[WL_NAN_MASTER_RANK_LEN]; + uint32 ambtt; +} nan_sync_master_t; + +/* +* NAN Sync TLV(NSTLV): +* To keep NAN/AWDL concurrency time sync. +* It is generated at hybrid device, and propogated by AWDL only device. +* It contains the information needed to run NAN election +*/ +#include +typedef BWL_PRE_PACKED_STRUCT struct awdl_nan_sync_tlv { + uint16 hop_count; /* total hop_count */ + struct ether_addr src_addr; /* macaddr of the hybrid originator of nstlv */ + struct ether_addr cluster_id; /* NAN cluster ID of hybrid originator of nstlv */ + uint32 nan_tsf_h; /* NAN cluster TSF of the hybrid originator of nstlv */ + uint32 nan_tsf_l; + uint8 master_preference; + uint8 random_factor; + uint8 amr[WL_NAN_MASTER_RANK_LEN]; + uint8 orig_hop_count; /* hop_count of the origin hybrid NAN device */ + uint32 ambtt; /* Anchor Master Beacon Transmission Time */ + uint8 opt_xtlv_len; /* xtlv len */ +} BWL_POST_PACKED_STRUCT awdl_nan_sync_tlv_t; + +typedef BWL_PRE_PACKED_STRUCT struct wl_awdl_nan_sync_tlv { + uint8 type; /* 23 for NTLV */ + uint16 param_len; + awdl_nan_sync_tlv_t ntlv; +} BWL_POST_PACKED_STRUCT wl_awdl_nan_sync_tlv_t; +#include + +/* NAN advertiser structure */ +/* TODO RSDB: add chspec to indicates core corresponds correct core */ +typedef struct nan_adv_entry { + uint8 age; /* used to remove stale entries */ + uint8 hop_count; /* for NTLV support, use bit7 for virtual NAN peer */ + struct ether_addr addr; + struct ether_addr cluster_id; + chanspec_t channel; /* bcn reception channel */ + uint32 ltsf_h; + uint32 ltsf_l; + uint32 rtsf_h; + uint32 rtsf_l; + uint8 amr[WL_NAN_MASTER_RANK_LEN]; + uint32 ambtt; + int8 rssi[NAN_MAX_BANDS]; /* rssi last af was received at */ + int8 last_rssi[NAN_MAX_BANDS]; /* rssi in the last AF */ +} nan_adv_entry_t; +#define NAN_VIRTUAL_PEER_BIT 0x80 + +typedef enum { + NAC_CNT_NTLV_AF_TX = 0, /* count of AWDL AF containing NTLV tx */ + NAC_CNT_NTLV_AF_RX, /* count of AWDL AF containing NTLV rx */ + NAC_CNT_NTLV_TMERR_TX, /* count of NTLV tx timing error */ + NAC_CNT_NTLV_TMERR_RX, /* count of NTLV rx timing error */ + NAC_CNT_NTLV_TM_MISMATCH, /* count of TopMaster mismatch in Rx NTLV processing */ + NAC_CNT_NTLV_ADV_EXISTED, /* count of NTLV ignored bc advertiser existed from bcn */ + NAC_CNT_NTLV_STALED_BCN, /* count of staled bcn from NTLV info */ + NAC_CNT_NTLV_MERGE, /* count of NTLV used for NAN cluster merge */ + NAC_CNT_NTLV_ELECTION_DROP, /* count of NTLV dropped in NAN election */ + NAC_CNT_NTLV_TSF_ADOPT, /* count of NTLV used for NAN TSF adoption */ + NAC_CNT_NTLV_LAST +} nac_cnt_enum_t; + +#define NAC_MAX_CNT (NAC_CNT_NTLV_LAST) + +typedef struct nac_stats { + uint32 nac_cnt[NAC_MAX_CNT]; +} nac_stats_t; + +typedef struct nan_adv_table { + uint8 num_adv; + uint8 adv_size; + uint8 pad[2]; + nan_adv_entry_t adv_nodes[0]; +} nan_adv_table_t; + +typedef struct wl_nan_role_cfg { + wl_nan_role_t cfg_role; + wl_nan_role_t cur_role; + uint8 pad[2]; + nan_sync_master_t target_master; +} wl_nan_role_cfg_t; + +typedef struct wl_nan_role_config { + wl_nan_role_t role; + struct ether_addr target_master; + uint8 pad; +} wl_nan_role_config_t; + +typedef int8 wl_nan_sd_optional_field_types_t; + +/* Flag bits for Publish and Subscribe (wl_nan_sd_params_t flags) */ + +/* First 8 bits are blocked for mapping + * against svc_control flag bits which goes out + * as part of SDA attribute in air in SDF frames + */ +#define WL_NAN_RANGE_LIMITED 0x0040 + +/* Event generation indicator (default is continuous) */ + +#define WL_NAN_MATCH_ONCE 0x100000 +#define WL_NAN_MATCH_NEVER 0x200000 + +/* Bits specific to Publish */ + +#define WL_NAN_PUB_UNSOLICIT 0x1000 /* Unsolicited Tx */ +#define WL_NAN_PUB_SOLICIT 0x2000 /* Solicited Tx */ +#define WL_NAN_PUB_BOTH 0x3000 /* Both the above */ + +#define WL_NAN_PUB_BCAST 0x4000 /* bcast solicited Tx only */ +#define WL_NAN_PUB_EVENT 0x8000 /* Event on each solicited Tx */ +#define WL_NAN_PUB_SOLICIT_PENDING 0x10000 /* Used for one-time solicited Publish */ + +#define WL_NAN_FOLLOWUP 0x20000 /* Follow-up frames */ +#define WL_NAN_TX_FOLLOWUP 0x40000 /* host generated transmit Follow-up frames */ + +/* Bits specific to Subscribe */ + +#define WL_NAN_SUB_ACTIVE 0x1000 /* Active subscribe mode */ +#define WL_NAN_SUB_MATCH_IF_SVC_INFO 0x2000 /* Service info in publish */ + +#define WL_NAN_TTL_UNTIL_CANCEL 0xFFFFFFFF /* Special values for time to live (ttl) parameter */ + +/* + * Publish - runs until first transmission + * Subscribe - runs until first DiscoveryResult event + */ +#define WL_NAN_TTL_FIRST 0 + +/* Nan Service Based control Flags */ + +/* If set, dev will take care of dp_resp */ +#define WL_NAN_SVC_CTRL_AUTO_DPRESP 0x1000000 + +/* If set, host wont rec event "receive" */ +#define WL_NAN_SVC_CTRL_SUPPRESS_EVT_RECEIVE 0x2000000 + +/* If set, host wont rec event "replied" */ +#define WL_NAN_SVC_CTRL_SUPPRESS_EVT_REPLIED 0x4000000 + +/* If set, host wont rec event "terminated" */ +#define WL_NAN_SVC_CTRL_SUPPRESS_EVT_TERMINATED 0x8000000 + +/* + * WL_NAN_CMD_SD_PARAMS + */ +typedef struct wl_nan_sd_params +{ + uint16 length; /* length including options */ + uint8 period; /* period of the unsolicited SDF xmission in DWs */ + uint8 awake_dw; /* interval between two DWs where SDF tx/rx are done */ + uint8 svc_hash[WL_NAN_SVC_HASH_LEN]; /* Hash for the service name */ + uint8 instance_id; /* Instance of the current service */ + int8 proximity_rssi; /* RSSI limit to Rx subscribe or pub SDF 0 no effect */ + uint32 flags; /* bitmap representing aforesaid optional flags */ + int32 ttl; /* TTL for this instance id, -1 will run till cancelled */ + tlv_t optional[1]; /* optional fields in the SDF as appropriate */ +} wl_nan_sd_params_t; + +/* + * WL_NAN_CMD_SD_PUBLISH_LIST + * WL_NAN_CMD_SD_SUBSCRIBE_LIST + */ +typedef struct wl_nan_service_info +{ + uint8 instance_id; /* Publish instance ID */ + uint8 service_hash[WL_NAN_SVC_HASH_LEN]; /* Hash for service name */ +} wl_nan_service_info_t; + +typedef struct wl_nan_service_list +{ + uint16 id_count; /* Number of registered publish/subscribe services */ + wl_nan_service_info_t list[1]; /* service info defined by nan_service instance */ +} wl_nan_service_list_t; + +/* + * WL_NAN_CMD_CFG_BCN_INTERVAL + */ +typedef uint16 wl_nan_disc_bcn_interval_t; + +/* + * WL_NAN_CMD_CFG_SDF_TXTIME + */ +typedef uint16 wl_nan_svc_disc_txtime_t; + +/* + * WL_NAN_CMD_CFG_STOP_BCN_TX + */ +typedef uint16 wl_nan_stop_bcn_tx_t; + +/* + * WL_NAN_CMD_CFG_SID_BEACON + */ +typedef struct wl_nan_sid_beacon_control { + uint8 sid_enable; /* Flag to indicate the inclusion of Service IDs in Beacons */ + uint8 sid_count; /* Limit for number of publish SIDs to be included in Beacons */ + uint8 sub_sid_count; /* Limit for number of subscribe SIDs to be included in Beacons */ + uint8 pad; +} wl_nan_sid_beacon_control_t; + +/* + * WL_NAN_CMD_CFG_DW_LEN + */ +typedef uint16 wl_nan_dw_len_t; + +/* + * WL_NAN_CMD_CFG_AWAKE_DW Will be deprecated. + */ +typedef struct wl_nan_awake_dw { + wl_nan_band_t band; /* 0 - b mode 1- a mode */ + uint8 interval; /* 1 or 2 or 4 or 8 or 16 */ + uint16 pad; +} wl_nan_awake_dw_t; + +/* + * WL_NAN_CMD_CFG_AWAKE_DWS + */ +typedef struct wl_nan_awake_dws { + uint8 dw_interval_2g; /* 2G DW interval */ + uint8 dw_interval_5g; /* 5G DW interval */ + uint16 pad; +} wl_nan_awake_dws_t; + +/* WL_NAN_CMD_SYNC_BCN_RSSI_NOTIF_THRESHOLD */ + +typedef struct wl_nan_rssi_notif_thld { + int8 bcn_rssi_2g; + int8 bcn_rssi_5g; + int16 pad; +} wl_nan_rssi_notif_thld_t; + +/* + * WL_NAN_CMD_CFG_SOCIAL_CHAN + */ +typedef struct wl_nan_social_channels { + uint8 soc_chan_2g; /* 2G social channel */ + uint8 soc_chan_5g; /* 5G social channel */ + uint16 pad; +} wl_nan_social_channels_t; + +/* + * WL_NAN_CMD_SD_CANCEL_PUBLISH + * WL_NAN_CMD_SD_CANCEL_SUBSCRIBE + */ +typedef uint8 wl_nan_instance_id; /* Instance ID of an active publish instance */ + +/* + * WL_NAN_CMD_SD_VND_INFO + */ +typedef struct wl_nan_sd_vendor_info +{ + uint16 length; /* Size in bytes of the payload following this field */ + uint8 data[]; /* Vendor Information */ +} wl_nan_sd_vendor_info_t; + +/* + * WL_NAN_CMD_SD_STATS + */ +typedef struct wl_nan_sd_stats { + uint32 sdftx; + uint32 sdfrx; + uint32 sdsrffail; + uint32 sdrejrssi; + uint32 sdfollowuprx; + uint32 sdsubmatch; + uint32 sdpubreplied; + uint32 sdmftfail1; + uint32 sdmftfail2; + uint32 sdmftfail3; + uint32 sdmftfail4; +} wl_nan_sd_stats_t; + +/* Flag bits for sd transmit message (wl_nan_sd_transmit_t flags) */ + +/* If set, host wont rec "tx status" event for tx-followup msg */ +#define WL_NAN_FUP_SUPR_EVT_TXS 0x01 +/* more flags can be added here */ + +/* + * WL_NAN_CMD_SD_TRANSMIT + * WL_NAN_CMD_SD_FUP_TRANSMIT + */ +typedef struct wl_nan_sd_transmit { + uint8 local_service_id; /* Sender Service ID */ + uint8 requestor_service_id; /* Destination Service ID */ + struct ether_addr destination_addr; /* Destination MAC */ + uint16 token; /* follow_up_token when a follow-up + * msg is queued successfully + */ + uint8 priority; /* requested relative prio */ + uint8 flags; /* Flags for tx follow-up msg */ + uint16 opt_len; /* total length of optional tlvs */ + uint8 opt_tlv[]; /* optional tlvs in bcm_xtlv_t type */ +} wl_nan_sd_transmit_t; + +/* + * WL_NAN_CMD_SYNC_TSRESERVE + */ +/** time slot */ +#define NAN_MAX_TIMESLOT 32 +typedef struct wl_nan_timeslot { + uint32 abitmap; /**< available bitmap */ + uint32 chanlist[NAN_MAX_TIMESLOT]; +} wl_nan_timeslot_t; + +/* + * Deprecated + * + * WL_NAN_CMD_SYNC_TSRELEASE + */ +typedef uint32 wl_nan_ts_bitmap_t; + +/* nan passive scan params */ +#define NAN_SCAN_MAX_CHCNT 8 +/* nan merge scan params */ +typedef struct wl_nan_scan_params { + /* dwell time of discovery channel corresponds to band_idx. + * If set to 0 then fw default will be used. + */ + uint16 dwell_time; + /* scan period of discovery channel corresponds to band_idx. + * If set to 0 then fw default will be used. + */ + uint16 scan_period; + /* band index of discovery channel */ + uint8 band_index; +} wl_nan_scan_params_t; + +/* + * WL_NAN_CMD_DBG_SCAN + */ +typedef struct wl_nan_dbg_scan { + struct ether_addr cid; + uint8 pad[2]; +} wl_nan_dbg_scan_t; + +/* NAN_DBG_LEVEL */ +typedef struct wl_nan_dbg_level { + uint32 nan_err_level; /* for Error levels */ + uint32 nan_dbg_level; /* for bebug logs and trace */ + uint32 nan_info_level; /* for dumps like prhex */ +} wl_nan_dbg_level_t; + +/* + * WL_NAN_CMD_DBG_EVENT_MASK + */ +typedef uint32 wl_nan_event_mask_t; + +/* + * WL_NAN_CMD_DBG_EVENT_CHECK + */ +typedef uint8 wl_nan_dbg_ifname[BCM_MSG_IFNAME_MAX]; + +/* + * WL_NAN_CMD_DBG_DUMP + * WL_NAN_CMD_DBG_CLEAR + */ +enum wl_nan_dbg_dump_type { + WL_NAN_DBG_DT_RSSI_DATA = 1, + WL_NAN_DBG_DT_STATS_DATA = 2, + /* + * Additional enums before this line + */ + WL_NAN_DBG_DT_INVALID +}; +typedef int8 wl_nan_dbg_dump_type_t; + +/** various params and ctl swithce for nan_debug instance */ +/* + * WL_NAN_CMD_DBG_DEBUG + */ +typedef struct wl_nan_debug_params { + uint16 cmd; /**< debug cmd to perform a debug action */ + uint16 status; + uint32 msglevel; /**< msg level if enabled */ + uint8 enabled; /**< runtime debuging enabled */ + uint8 collect; + uint8 PAD[2]; +} wl_nan_debug_params_t; + +typedef struct wl_nan_sched_svc_timeslot_s { + uint32 abitmap; /* availability bitmap */ + uint32 chanlist[NAN_MAX_TIMESLOT]; + uint8 res; /* resolution: 0 = 16ms, 1 = 32ms, 2 = 64ms 3 = reserved. REfer NAN spec */ + uint8 mapid; /* mapid from NAN spec. Used to differentiate 2G Vs 5G band */ + uint8 PAD[2]; +} wl_nan_sched_svc_timeslot_t; + +/* + * WL_NAN_CMD_DATA_DP_IDLE_PERIOD + */ +typedef uint16 wl_nan_ndp_idle_period_t; + +/* + * WL_NAN_CMD_DATA_DP_HB_DURATION + */ +typedef uint16 wl_nan_ndp_hb_duration_t; + +/* nan cmd IDs */ +enum wl_nan_cmds { + /* nan cfg /disc & dbg ioctls */ + WL_NAN_CMD_ENABLE = 1, + WL_NAN_CMD_ATTR = 2, + WL_NAN_CMD_NAN_JOIN = 3, + WL_NAN_CMD_LEAVE = 4, + WL_NAN_CMD_MERGE = 5, + WL_NAN_CMD_STATUS = 6, + WL_NAN_CMD_TSRESERVE = 7, + WL_NAN_CMD_TSSCHEDULE = 8, + WL_NAN_CMD_TSRELEASE = 9, + WL_NAN_CMD_OUI = 10, + WL_NAN_CMD_OOB_AF = 11, + WL_NAN_CMD_SCAN_PARAMS = 12, + + WL_NAN_CMD_COUNT = 15, + WL_NAN_CMD_CLEARCOUNT = 16, + + /* discovery engine commands */ + WL_NAN_CMD_PUBLISH = 20, + WL_NAN_CMD_SUBSCRIBE = 21, + WL_NAN_CMD_CANCEL_PUBLISH = 22, + WL_NAN_CMD_CANCEL_SUBSCRIBE = 23, + WL_NAN_CMD_TRANSMIT = 24, + WL_NAN_CMD_CONNECTION = 25, + WL_NAN_CMD_SHOW = 26, + WL_NAN_CMD_STOP = 27, /* stop nan for a given cluster ID */ + /* nan debug iovars & cmds */ + WL_NAN_CMD_SCAN = 47, + WL_NAN_CMD_SCAN_RESULTS = 48, + WL_NAN_CMD_EVENT_MASK = 49, + WL_NAN_CMD_EVENT_CHECK = 50, + WL_NAN_CMD_DUMP = 51, + WL_NAN_CMD_CLEAR = 52, + WL_NAN_CMD_RSSI = 53, + + WL_NAN_CMD_DEBUG = 60, + WL_NAN_CMD_TEST1 = 61, + WL_NAN_CMD_TEST2 = 62, + WL_NAN_CMD_TEST3 = 63, + WL_NAN_CMD_DISC_RESULTS = 64, + /* nan 2.0 data path commands */ + WL_NAN_CMD_DATAPATH = 65 +}; + +/* NAN DP interface commands */ +enum wl_nan_dp_cmds { + /* nan 2.0 ioctls */ + WL_NAN_CMD_DP_CAP = 1000, + WL_NAN_CMD_DP_CONFIG = 1001, + WL_NAN_CMD_DP_CREATE = 1002, + WL_NAN_CMD_DP_AUTO_CONNECT = 1003, + WL_NAN_CMD_DP_DATA_REQ = 1004, + WL_NAN_CMD_DP_DATA_RESP = 1005, + WL_NAN_CMD_DP_SCHED_UPD = 1006, + WL_NAN_CMD_DP_END = 1007, + WL_NAN_CMD_DP_CONNECT = 1008, + WL_NAN_CMD_DP_STATUS = 1009 +}; + +/* TODO Should remove this fixed length */ +#define WL_NAN_DATA_SVC_SPEC_INFO_LEN 32 /* arbitrary */ +#define WL_NAN_DP_MAX_SVC_INFO 0xFF +#define WL_NAN_DATA_NDP_INST_SUPPORT 16 + +/* Nan flags (16 bits) */ +#define WL_NAN_DP_FLAG_SVC_INFO 0x0001 +#define WL_NAN_DP_FLAG_CONFIRM 0x0002 +#define WL_NAN_DP_FLAG_EXPLICIT_CFM 0x0004 +#define WL_NAN_DP_FLAG_SECURITY 0x0008 +#define WL_NAN_DP_FLAG_HAST_NDL_COUNTER 0x0010 /* Host assisted NDL counter */ + +/* NAN Datapath host status */ +#define WL_NAN_DP_STATUS_ACCEPTED 1 +#define WL_NAN_DP_STATUS_REJECTED 0 + +/* to be done */ +typedef struct wl_nan_dp_cap { + uint8 tbd; +} wl_nan_dp_cap_t; + +/** The service hash (service id) is exactly this many bytes. */ +#define WL_NAN_SVC_HASH_LEN 6 +/** Number of hash functions per bloom filter */ +#define WL_NAN_HASHES_PER_BLOOM 4 +/* no. of max last disc results */ +#define WL_NAN_MAX_DISC_RESULTS 3 + +/* NAN security related defines */ +/* NCS-SK related */ +#define WL_NAN_NCS_SK_PMK_LEN 32 +#define WL_NAN_NCS_SK_PMKID_LEN 16 + +/* recent discovery results */ +typedef struct wl_nan_disc_result_s +{ + wl_nan_instance_id_t instance_id; /* instance id of pub/sub req */ + wl_nan_instance_id_t peer_instance_id; /* peer instance id of pub/sub req/resp */ + uint8 svc_hash[WL_NAN_SVC_HASH_LEN]; /* service descp string */ + struct ether_addr peer_mac; /* peer mac address */ +} wl_nan_disc_result_t; + +/* list of recent discovery results */ +typedef struct wl_nan_disc_results_s +{ + wl_nan_disc_result_t disc_result[WL_NAN_MAX_DISC_RESULTS]; +} wl_nan_disc_results_list_t; + +/* nan 1.0 events */ +/* To be deprecated - will be replaced by event_disc_result */ +typedef struct wl_nan_ev_disc_result { + wl_nan_instance_id_t pub_id; + wl_nan_instance_id_t sub_id; + struct ether_addr pub_mac; + uint8 opt_tlvs[0]; +} wl_nan_ev_disc_result_t; + +typedef struct wl_nan_event_disc_result { + wl_nan_instance_id_t pub_id; + wl_nan_instance_id_t sub_id; + struct ether_addr pub_mac; + int8 publish_rssi; /* publisher RSSI */ + uint8 attr_num; + uint16 attr_list_len; /* length of the all the attributes in the SDF */ + uint8 attr_list[0]; /* list of NAN attributes */ +} wl_nan_event_disc_result_t; + +typedef struct wl_nan_ev_p2p_avail { + struct ether_addr sender; + struct ether_addr p2p_dev_addr; + uint8 dev_role; + uint8 resolution; + uint8 repeat; + uint8 pad[3]; + chanspec_t chanspec; + uint32 avail_bmap; +} wl_nan_ev_p2p_avail_t; + +/* +* discovery interface event structures * +*/ + +/* mandatory parameters for OOB action frame */ +/* single-shot when bitmap and offset are set to 0; periodic otherwise */ +typedef struct wl_nan_oob_af_params_s +{ + /* bitmap for the 32 timeslots in 512TU dw interval */ + uint32 ts_map; + /* offset from start of dw, in us */ + uint32 tx_offset; + struct ether_addr bssid; + struct ether_addr dest; + uint32 pkt_lifetime; + uint16 payload_len; + uint8 payload[1]; +} wl_nan_oob_af_params_t; + +/* NAN Ranging */ + +/* Bit defines for global flags */ +#define WL_NAN_RANGING_ENABLE 1 /**< enable RTT */ +#define WL_NAN_RANGING_RANGED 2 /**< Report to host if ranged as target */ +typedef struct nan_ranging_config { + uint32 chanspec; /**< Ranging chanspec */ + uint16 timeslot; /**< NAN RTT start time slot 1-511 */ + uint16 duration; /**< NAN RTT duration in ms */ + struct ether_addr allow_mac; /**< peer initiated ranging: the allowed peer mac + * address, a unicast (for one peer) or + * a broadcast for all. Setting it to all zeros + * means responding to none,same as not setting + * the flag bit NAN_RANGING_RESPOND + */ + uint16 flags; +} wl_nan_ranging_config_t; + +/** list of peers for self initiated ranging */ +/** Bit defines for per peer flags */ +#define WL_NAN_RANGING_REPORT (1<<0) /**< Enable reporting range to target */ +typedef struct nan_ranging_peer { + uint32 chanspec; /**< desired chanspec for this peer */ + uint32 abitmap; /**< available bitmap */ + struct ether_addr ea; /**< peer MAC address */ + uint8 frmcnt; /**< frame count */ + uint8 retrycnt; /**< retry count */ + uint16 flags; /**< per peer flags, report or not */ + uint16 PAD; +} wl_nan_ranging_peer_t; +typedef struct nan_ranging_list { + uint8 count; /**< number of MAC addresses */ + uint8 num_peers_done; /**< host set to 0, when read, shows number of peers + * completed, success or fail + */ + uint8 num_dws; /**< time period to do the ranging, specified in dws */ + uint8 reserve; /**< reserved field */ + wl_nan_ranging_peer_t rp[1]; /**< variable length array of peers */ +} wl_nan_ranging_list_t; + +/* ranging results, a list for self initiated ranging and one for peer initiated ranging */ +/* There will be one structure for each peer */ +#define WL_NAN_RANGING_STATUS_SUCCESS 1 +#define WL_NAN_RANGING_STATUS_FAIL 2 +#define WL_NAN_RANGING_STATUS_TIMEOUT 3 +#define WL_NAN_RANGING_STATUS_ABORT 4 /**< with partial results if sounding count > 0 */ +typedef struct nan_ranging_result { + uint8 status; /**< 1: Success, 2: Fail 3: Timeout 4: Aborted */ + uint8 sounding_count; /**< number of measurements completed (0 = failure) */ + struct ether_addr ea; /**< initiator MAC address */ + uint32 chanspec; /**< Chanspec where the ranging was done */ + uint32 timestamp; /**< 32bits of the TSF timestamp ranging was completed at */ + uint32 distance; /**< mean distance in meters expressed as Q4 number. + * Only valid when sounding_count > 0. Examples: + * 0x08 = 0.5m + * 0x10 = 1m + * 0x18 = 1.5m + * set to 0xffffffff to indicate invalid number + */ + int32 rtt_var; /**< standard deviation in 10th of ns of RTTs measured. + * Only valid when sounding_count > 0 + */ + struct ether_addr tgtea; /**< target MAC address */ + uint8 PAD[2]; +} wl_nan_ranging_result_t; +typedef struct nan_ranging_event_data { + uint8 mode; /**< 1: Result of host initiated ranging */ + /* 2: Result of peer initiated ranging */ + uint8 reserved; + uint8 success_count; /**< number of peers completed successfully */ + uint8 count; /**< number of peers in the list */ + wl_nan_ranging_result_t rr[1]; /**< variable array of ranging peers */ +} wl_nan_ranging_event_data_t; + +enum { + WL_NAN_STATS_RSSI = 1, + WL_NAN_STATS_DATA = 2, + WL_NAN_STATS_DP = 3, +/* + * ***** ADD before this line **** + */ + WL_NAN_STATS_INVALID +}; +typedef struct wl_nan_dp_stats { + uint32 tbd; /* TBD */ +} wl_nan_dp_stats_t; + +typedef struct wl_nan_stats { + /* general */ + uint32 cnt_dw; /* DW slots */ + uint32 cnt_disc_bcn_sch; /* disc beacon slots */ + uint32 cnt_amr_exp; /* count of ambtt expiries resetting roles */ + uint32 cnt_bcn_upd; /* count of beacon template updates */ + uint32 cnt_bcn_tx; /* count of sync & disc bcn tx */ + uint32 cnt_bcn_rx; /* count of sync & disc bcn rx */ + uint32 cnt_sync_bcn_tx; /* count of sync bcn tx within DW */ + uint32 cnt_disc_bcn_tx; /* count of disc bcn tx */ + uint32 cnt_sdftx_bcmc; /* count of bcast/mcast sdf tx */ + uint32 cnt_sdftx_uc; /* count of unicast sdf tx */ + uint32 cnt_sdftx_fail; /* count of unicast sdf tx fails */ + uint32 cnt_sdf_rx; /* count of sdf rx */ + /* NAN roles */ + uint32 cnt_am; /* anchor master */ + uint32 cnt_master; /* master */ + uint32 cnt_nms; /* non master sync */ + uint32 cnt_nmns; /* non master non sync */ + /* TX */ + uint32 cnt_err_txtime; /* txtime in sync bcn frame not a multiple of dw intv */ + uint32 cnt_err_unsch_tx; /* tx while not in DW/ disc bcn slot */ + uint32 cnt_err_bcn_tx; /* beacon tx error */ + uint32 cnt_sync_bcn_tx_miss; /* no. of times time delta between 2 cosequetive + * sync beacons is more than expected + */ + /* MSCH */ + uint32 cnt_err_msch_reg; /* error is Dw/disc reg with msch */ + uint32 cnt_err_wrong_ch_cb; /* count of msch calbacks in wrong channel */ + uint32 cnt_dw_skip; /* count of DW rejected */ + uint32 cnt_disc_skip; /* count of disc bcn rejected */ + uint32 cnt_dw_start_early; /* msch cb not at registered time */ + uint32 cnt_dw_start_late; /* no. of delays in slot start */ + /* SCANS */ + uint32 cnt_mrg_scan; /* count of merge scans completed */ + uint32 cnt_err_ms_rej; /* number of merge scan failed */ + uint32 cnt_scan_results; /* no. of nan beacons scanned */ + uint32 cnt_join_scan_rej; /* no. of join scans rejected */ + uint32 cnt_nan_scan_abort; /* no. of join scans rejected */ + /* enable/disable */ + uint32 cnt_nan_enab; /* no. of times nan feature got enabled */ + uint32 cnt_nan_disab; /* no. of times nan feature got disabled */ + uint32 cnt_sync_bcn_rx; /* count of sync bcn rx within DW */ + uint32 cnt_sync_bcn_rx_tu[3]; /* Delta bw the tsf in bcn & remote */ + uint32 cnt_bcn_tx_out_dw; /* TX sync beacon outside dw */ + uint32 cnt_role_am_dw; /* anchor master role due to dw */ + uint32 cnt_am_hop_err; /* wrong hopcount set for AM */ +} wl_nan_stats_t; + +#define WL_NAN_MAC_MAX_NAN_PEERS 6 +#define WL_NAN_MAC_MAX_RSSI_DATA_PER_PEER 10 + +typedef struct wl_nan_nbr_rssi { + uint8 rx_chan; /* channel number on which bcn rcvd */ + uint8 PAD[3]; + int32 rssi_raw; /* received rssi value */ + int32 rssi_avg; /* normalized rssi value */ +} wl_nan_peer_rssi_t; + +typedef struct wl_nan_peer_rssi_entry { + struct ether_addr mac; /* peer mac address */ + uint8 flags; /* TODO:rssi data order: latest first, oldest first etc */ + uint8 rssi_cnt; /* rssi data sample present */ + wl_nan_peer_rssi_t rssi[WL_NAN_MAC_MAX_RSSI_DATA_PER_PEER]; /* RSSI data frm peer */ +} wl_nan_peer_rssi_entry_t; + +#define WL_NAN_PEER_RSSI 0x1 +#define WL_NAN_PEER_RSSI_LIST 0x2 + +typedef struct wl_nan_nbr_rssi_data { + uint8 flags; /* this is a list or single rssi data */ + uint8 peer_cnt; /* number of peers */ + uint16 pad; /* padding */ + wl_nan_peer_rssi_entry_t peers[1]; /* peers data list */ +} wl_nan_peer_rssi_data_t; + +/* WL_NAN_CMD_DBG_DUMP, GET Resp */ +typedef struct wl_nan_dbg_dump_rsp { + wl_nan_dbg_dump_type_t dump_type; /* dump data type */ + uint8 pad[3]; + union { + wl_nan_peer_rssi_data_t peer_rssi; + wl_nan_stats_t nan_stats; + } u; +} wl_nan_dbg_dump_rsp_t; + +enum nan_termination_status { + NAN_TERM_REASON_INVALID = 1, + NAN_TERM_REASON_TIMEOUT = 2, + NAN_TERM_REASON_USER_REQ = 3, + NAN_TERM_REASON_FAILURE = 4, + NAN_TERM_REASON_COUNT_REACHED = 5, + NAN_TERM_REASON_DE_SHUTDOWN = 6, + NAN_TERM_REASON_DISABLE_IN_PROGRESS = 7 +}; + +/* nan2 data iovar */ +/* nan2 qos */ +typedef struct wl_nan_dp_qos +{ + uint8 tid; + uint8 pad; + uint16 pkt_size; + uint16 mean_rate; + uint16 svc_interval; +} wl_nan_dp_qos_t; + +#define WL_NAN_NDL_QOS_MAX_LAT_NO_PREF 0xFFFF + +/* nan2 qos */ +typedef struct wl_nan_ndl_qos +{ + uint8 min_slots; /* min slots per dw interval */ + uint8 pad; + uint16 max_latency; /* max latency */ +} wl_nan_ndl_qos_t; + +/* ndp config */ +typedef struct wl_nan_ndp_config +{ + uint8 ndp_id; + uint8 pub_id; + struct ether_addr pub_addr; + struct ether_addr data_addr; /* configure local data addr */ + struct ether_addr init_data_addr; /* initiator data addr */ + uint8 svc_spec_info[WL_NAN_DATA_SVC_SPEC_INFO_LEN]; + wl_nan_dp_qos_t qos; + uint16 avail_len; + uint8 pad[3]; + uint8 data[1]; +} wl_nan_ndp_config_t; + +/* nan2 device capabilities */ +typedef struct wl_nan_ndp_oper_cfg { + uint8 awake_dw_2g; + uint8 awake_dw_5g; + uint8 bands_supported; + uint8 op_mode; +} wl_nan_ndp_oper_cfg_t; + +typedef uint8 wl_nan_ndp_ndpid_t; +typedef uint8 wl_nan_ndp_conn_t; + +#define WL_NAN_INVALID_NDPID 0 /* reserved ndp id */ + +typedef struct wl_nan_dp_req { + uint8 type; /* 0- unicast 1 - multicast */ + uint8 pub_id; /* Publisher ID */ + uint16 flags; + struct ether_addr peer_mac; /* Peer's NMI addr */ + struct ether_addr mcast_mac; /* Multicast addr */ + struct ether_addr ndi; + wl_nan_dp_qos_t qos; + wl_nan_ndl_qos_t ndl_qos; /* ndl qos */ + uint8 tlv_params[]; /* xtlv parameters for command */ +} wl_nan_dp_req_t; + +/* TODO Need to replace ndp_id with lndp_id */ +/* Return structure to data req IOVAR */ +typedef struct wl_nan_dp_req_ret { + struct ether_addr indi; /* Initiators data mac addr */ + uint8 ndp_id; /* Initiators ndpid */ + uint8 pad; +} wl_nan_dp_req_ret_t; + +typedef struct wl_nan_dp_resp { + uint8 type; /* 0- unicast 1 - multicast */ + uint8 status; /* Accepted or Rejected */ + uint8 reason_code; + /* Local NDP ID for unicast, mc_id for multicast, 0 for implicit NMSG */ + uint8 ndp_id; /* can be host indp id also */ + wl_nan_dp_qos_t qos; + /* Initiator data address for unicast or multicast address for multicast */ + struct ether_addr mac_addr; + struct ether_addr ndi; + uint16 flags; + wl_nan_ndl_qos_t ndl_qos; /* ndl qos */ + uint8 tlv_params[]; /* xtlv parameters for command */ +} wl_nan_dp_resp_t; + +/* Return structure to data resp IOVAR */ +typedef struct wl_nan_dp_resp_ret { + uint8 nmsgid; /* NMSG ID or for multicast else 0 */ + uint8 pad[3]; +} wl_nan_dp_resp_ret_t; + +typedef struct wl_nan_dp_conf { + uint8 lndp_id; /* can be host ndp id */ + uint8 status; /* Accepted or Rejected */ + uint8 pad[2]; +} wl_nan_dp_conf_t; + +typedef struct wl_nan_dp_end +{ + uint8 lndp_id; /* can be host ndp id */ + uint8 status; + struct ether_addr mac_addr; /* initiator's ndi */ +} wl_nan_dp_end_t; + +typedef struct wl_nan_dp_schedupd { + uint8 type; /* 0: unicast, 1: multicast */ + uint8 flags; + struct ether_addr addr; /* peer NMI or multicast addr */ + wl_nan_dp_qos_t qos; + wl_nan_ndl_qos_t ndl_qos; /* ndl qos */ + uint8 map_id; + uint8 pad; + uint16 hostseq; +} wl_nan_dp_schedupd_t; + +/* set: update with notification, unset: NDL setup handshake */ +#define WL_NAN_DP_SCHEDUPD_NOTIF (1 << 0) + +/* list ndp ids */ +typedef struct wl_nan_ndp_id_list { + uint16 ndp_count; + uint8 lndp_id[]; +} wl_nan_ndp_id_list_t; + +/* nan2 status */ +typedef struct ndp_session { + uint8 lndp_id; + uint8 state; + uint8 pub_id; + uint8 pad; +} ndp_session_t; + +typedef struct wl_nan_ndp_status { + struct ether_addr peer_nmi; + struct ether_addr peer_ndi; + ndp_session_t session; + struct ether_addr lndi; + uint8 pad[2]; +} wl_nan_ndp_status_t; + +#define NAN_DP_OPAQUE_INFO_DP_RESP 0x01 +#define NAN_DP_OPAQUE_INFO_DP_CONF 0x02 + +typedef struct wl_nan_dp_opaque_info { + uint8 frm_mask; /* dp_resp / dp_conf as defined above. */ + struct ether_addr initiator_ndi; /* NDI to match in the dp_req. */ + uint8 pub_id; /* publish id where the opaque data is included. */ + uint8 len; /* len of opaque_info[]. */ + uint8 pad[3]; + uint8 opaque_info[0]; +} wl_nan_dp_opaque_info_t; + +/* events */ +#define NAN_DP_SESSION_UNICAST 0 +#define NAN_DP_SESSION_MULTICAST 1 +#define NAN_DP_SECURITY_NONE 0 +#define NAN_DP_SECURITY_CSID 1 +#define NAN_DP_SECURITY_MK 2 +#define WL_NAN_DATA_NMSGID_LEN 8 /* 8 bytes as per nan spec */ + +/* Common event structure for Nan Datapath + * Used for sending NDP Indication, Response, Confirmation, Securty Install and Establish events + */ +typedef struct wl_nan_ev_datapath_cmn { + uint8 type; + /* ndp_id is valid only if type is unicast */ + uint8 ndp_id; + uint8 pub_id; + uint8 security; + /* Following two fields are valid only if type is unicast */ + struct ether_addr initiator_ndi; + struct ether_addr responder_ndi; + struct ether_addr peer_nmi; + uint8 status; + uint8 role; + /* Following two fields are valid only if type is multicast */ + uint8 nmsg_id[WL_NAN_DATA_NMSGID_LEN]; + uint8 mc_id; + uint8 pad; + uint16 opt_tlv_len; + uint8 opt_tlvs[]; +} wl_nan_ev_datapath_cmn_t; + +/* this is obsolete - DON'T USE */ +typedef struct wl_nan_ev_datapath_end { + uint8 ndp_id; + uint8 status; + uint8 pad[2]; + struct ether_addr peer_nmi; + struct ether_addr peer_ndi; +} wl_nan_ev_datapath_end_t; + +typedef struct wl_tsf { + uint32 tsf_l; + uint32 tsf_h; +} wl_tsf_t; + +typedef struct wl_nan_ev_rx_bcn { + wl_tsf_t tsf; + uint16 bcn_len; + uint8 pad[2]; + uint8 bcn[0]; +} wl_nan_ev_rx_bcn_t; + +/* reason of host assist request */ +enum wl_nan_host_assist_reason { + WL_NAN_HAST_REASON_NONE = 0, + + /* reason for host assist request */ + WL_NAN_HAST_REASON_NO_CRB = 1, /* NDL: no common NA */ + WL_NAN_HAST_REASON_NDC = 2, /* NDL: NDC not compliant */ + WL_NAN_HAST_REASON_IMMUT = 3, /* NDL: peer immutable schedule */ + WL_NAN_HAST_REASON_RNG = 4, /* NDL: ranging schedule */ + WL_NAN_HAST_REASON_QOS = 5, /* NDL: QoS not satisfied */ + WL_NAN_HAST_REASON_SVC_NDI_MISSING = 6 /* SD: NDI associated with svc is missing */ +}; +typedef uint8 wl_nan_host_assist_reason_t; + +/* WL_NAN_XTLV_HOST_ASSIST_REQ */ +typedef struct wl_nan_host_assist_req { + struct ether_addr peer_nmi; /* peer nmi */ + struct ether_addr initiator_ndi; /* initiator ndi */ + uint8 indp_id; /* initiator NDP ID */ + wl_nan_frame_type_t frm_type; /* received NAF type */ + wl_nan_host_assist_reason_t reason; /* reason of host assist request */ + uint8 pub_id; /* Publish ID (valid for WL_NAN_FRM_TYPE_DP_REQ) */ + uint8 pad[2]; +} wl_nan_host_assist_req_t; + +/* nan sub-features */ +enum wl_nan_fw_cap_flag1 { + WL_NAN_FW_CAP_FLAG_NONE = 0x00000000, /* dummy */ + WL_NAN_FW_CAP_FLAG1_AVAIL = 0x00000001, + WL_NAN_FW_CAP_FLAG1_DISC = 0x00000002, + WL_NAN_FW_CAP_FLAG1_DATA = 0x00000004, + WL_NAN_FW_CAP_FLAG1_SEC = 0x00000008, + WL_NAN_FW_CAP_FLAG1_RANGE = 0x00000010, + WL_NAN_FW_CAP_FLAG1_WFA_TB = 0x00000020, + WL_NAN_FW_CAP_FLAG1_DAM = 0x00000040, + WL_NAN_FW_CAP_FLAG1_DAM_STRICT = 0x00000080, + WL_NAN_FW_CAP_FLAG1_DAM_AUTO = 0x00000100, + WL_NAN_FW_CAP_FLAG1_DBG = 0x00000200, + WL_NAN_FW_CAP_FLAG1_BCMC_IN_NDC = 0x00000400, + WL_NAN_FW_CAP_FLAG1_CHSTATS = 0x00000800, + WL_NAN_FW_CAP_FLAG1_ASSOC_COEX = 0x00001000, + WL_NAN_FW_CAP_FLAG1_FASTDISC = 0x00002000, + WL_NAN_FW_CAP_FLAG1_NO_ID_GEN = 0x00004000, + WL_NAN_FW_CAP_FLAG1_DP_OPAQUE_DATA = 0x00008000, + WL_NAN_FW_CAP_FLAG1_NSR2 = 0x00010000, + WL_NAN_FW_CAP_FLAG1_NSR2_SAVE = 0x00020000, + WL_NAN_FW_CAP_FLAG1_NANHO = 0x00040000 +}; + +/* WL_NAN_XTLV_GEN_FW_CAP */ +typedef struct wl_nan_fw_cap { + uint32 flags1; /* nan sub-features compiled in firmware */ + uint32 flags2; /* for more sub-features in future */ + uint8 max_svc_publishes; /* max num of service publish */ + uint8 max_svc_subscribes; /* max num of service subscribe */ + uint8 max_lcl_sched_maps; /* max num of local schedule map */ + uint8 max_lcl_ndc_entries; /* max num of local NDC entry */ + uint8 max_lcl_ndi_interfaces; /* max num of local NDI interface */ + uint8 max_peer_entries; /* max num of peer entry */ + uint8 max_ndp_sessions; /* max num of NDP session */ + uint8 max_concurrent_nan_clusters; /* max num of concurrent clusters */ + uint16 max_service_name_len; /* max service name length */ + uint16 max_match_filter_len; /* max match filter length */ + uint16 max_total_match_filter_len; /* max total match filter length */ + uint16 max_service_specific_info_len; /* max service specific info length */ + uint16 max_vsa_data_len; /* max vendor specific attrib data length */ + uint16 max_mesh_data_len; /* max mesh data length */ + uint16 max_app_info_len; /* max app info length */ + uint16 max_sdea_svc_specific_info_len; /* max sdea ser specific info length */ + uint8 max_queued_tx_followup_msgs; /* max no. of queued tx followup msgs */ + uint8 max_subscribe_address; /* max subscribe addresses supported */ + uint8 ndp_supported_bands; /* number of ndp supported bands */ + uint8 is_ndp_security_supported; /* if secure ndp is supported */ + uint8 cipher_suites_supported_mask; /* bitmask for suites supported */ + uint8 pad[3]; +} wl_nan_fw_cap_t; + +/* nan cipher suite support mask bits */ +#define WL_NAN_CIPHER_SUITE_SHARED_KEY_128_MASK 0x01 +#define WL_NAN_CIPHER_SUITE_SHARED_KEY_256_MASK 0x02 + +/* NAN Save Restore */ +#define WL_NAN_NSR2_INFO_MAX_SIZE 2048 /* arbitrary */ + +/* WL_NAN_XTLV_NSR2_PEER */ +typedef struct wl_nan_nsr_peer_info { + struct ether_addr nmi; + uint8 l_min_slots; /* local QoS min slots */ + uint8 p_min_slots; /* peer QoS min slots */ + uint16 l_max_latency; /* local QoS max latency */ + uint16 p_max_latency; /* peer QoS max latency */ + uint8 num_map; /* num of NA map */ + uint8 pad; + uint16 attrs_len; /* total len of following attrs */ + uint8 attrs[]; /* peer attributes (NA/NDC/ULW/DevCap/Element container) */ +} wl_nan_nsr_peer_info_t; + +enum wl_nan_nsr_ndp_flag { + WL_NAN_NSR_NDP_FLAG_LCL_INITATOR = 0x0001, + WL_NAN_NSR_NDP_FLAG_MCAST = 0x0002 +}; +typedef uint16 wl_nan_nsr_ndp_flag_t; + +/* WL_NAN_XTLV_NSR2_NDP */ +typedef struct wl_nan_nsr_ndp_info { + struct ether_addr peer_nmi; + struct ether_addr peer_ndi; + struct ether_addr lcl_ndi; + uint16 flags; /* wl_nan_nsr_ndp_flag_t */ + uint8 pub_id; /* publish id */ + uint8 indp_id; /* initiator's ndp id */ + uint8 last_token; /* last NDP dialog token */ + uint8 pad; +} wl_nan_nsr_ndp_info_t; + +/* NAN2.0 Ranging definitions */ + +/* result indication bit map */ +#define NAN_RANGE_INDICATION_CONT (1<<0) +#define NAN_RANGE_INDICATION_INGRESS (1<<1) +#define NAN_RANGE_INDICATION_EGRESS (1<<2) + +/* responder flags */ +#define NAN_RANGE_FLAG_AUTO_ACCEPT (1 << 0) +#define NAN_RANGE_FLAG_RESULT_REQUIRED (1 << 1) + +typedef struct wl_nan_range_req { + struct ether_addr peer; + uint8 publisher_id; + uint8 indication; /* bit map for result event */ + uint32 resolution; /* default millimeters */ + uint32 ingress; /* ingress limit in mm */ + uint32 egress; /* egress limit in mm */ + uint32 interval; /* max interval(in TU) b/w two ranging measurements */ +} wl_nan_range_req_t; + +#define NAN_RNG_REQ_IOV_LEN 24 + +typedef uint8 wl_nan_range_id; + +typedef struct wl_nan_range_resp { + wl_nan_range_id range_id; + uint8 flags; /* auto response, range result required */ + uint8 status; /* accept, reject */ + uint8 indication; /* bit map for result event */ + uint32 resolution; /* default millimeters */ + uint32 ingress; /* ingress limit in mm */ + uint32 egress; /* egress limit in mm */ + uint32 interval; /* max interval(in TU) b/w two ranging measurements */ +} wl_nan_range_resp_t; + +#define NAN_RNG_RESP_IOV_LEN 20 + +#define NAN_RNG_MAX_IOV_LEN 255 + +typedef struct wl_nan_ev_rng_req_ind { + struct ether_addr peer_m_addr; + uint8 rng_id; + /* ftm parameters */ + uint8 max_burst_dur; + uint8 min_ftm_delta; + uint8 max_num_ftm; + uint8 ftm_format_bw; + /* location info availability bit map */ + uint8 lc_info_avail; + /* Last movement indication */ + uint16 last_movement; + uint8 pad[2]; +} wl_nan_ev_rng_req_ind_t; + +#define NAN_RNG_REQ_IND_SIZE 14 + +typedef struct wl_nan_ev_rng_rpt_ind { + uint32 dist_mm; /* in millimeter */ + struct ether_addr peer_m_addr; + uint8 indication; /* indication definitions mentioned above */ + uint8 rng_id; +} wl_nan_ev_rng_rpt_ind_t; + +#define NAN_RNG_RPT_IND_SIZE 12 + +/* nan ranging termination reason codes */ +#define NAN_RNG_TERM_IDLE_TIMEOUT 1 /* no ftms from peer */ +#define NAN_RNG_TERM_PEER_REQ 2 +#define NAN_RNG_TERM_USER_REQ 3 +#define NAN_RNG_TERM_RNG_RESP_TIMEOUT 4 + +typedef struct wl_nan_ev_rng_term_ind { + struct ether_addr peer_m_addr; + uint8 reason_code; + uint8 rng_id; +} wl_nan_ev_rng_term_ind_t; + +#define NAN_RNG_TERM_IND_SIZE 8 + +typedef struct wl_nan_ev_rng_resp { + struct ether_addr peer_m_addr; + uint8 status; + uint8 rng_id; +} wl_nan_ev_rng_resp_t; + +/* Used by NDL schedule events - + * WL_NAN_EVENT_PEER_SCHED_UPD_NOTIF, WL_NAN_EVENT_PEER_SCHED_REQ + * WL_NAN_EVENT_PEER_SCHED_RESP, WL_NAN_EVENT_PEER_SCHED_CONF + */ +typedef struct wl_nan_ev_sched_info { + struct ether_addr peer_nmi; + uint8 ndl_status; /* applies only to sched resp/conf */ + uint8 pad; + uint16 opt_tlv_len; + uint8 opt_tlvs[]; +} wl_nan_ev_sched_info_t; + +/* WL_NAN_EVENT_CHAN_BOUNDARY */ +typedef struct wl_nan_chbound_info { + uint32 cluster_tsf_h; /* Current Cluster TSF High */ + uint32 cluster_tsf_l; /* Current Cluster TSF Low */ + uint16 cur_chspec; + uint16 opt_tlvs_len; + uint8 opt_tlvs[]; +} wl_nan_chbound_info_t; + +/* channel stats (includes nan & non-nan) */ + +/* WL_NAN_XTLV_CCA_STATS */ +typedef struct wl_nan_cca_stats { + uint16 chanspec; + uint8 pad[2]; + uint32 sample_dur; + + uint32 congest_ibss; + uint32 congest_obss; + uint32 interference; +} wl_nan_cca_stats_t; + +/* WL_NAN_XTLV_PER_STATS */ +typedef struct wl_nan_per_stats_s { + uint16 chanspec; + uint8 pad[2]; + uint32 sample_dur; + + uint32 txframe; /* tx data frames */ + uint32 txretrans; /* tx mac retransmits */ + uint32 txerror; /* tx data errors */ + uint32 txctl; /* tx management frames */ + uint32 txserr; /* tx status errors */ + + uint32 rxframe; /* rx data frames */ + uint32 rxerror; /* rx data errors */ + uint32 rxctl; /* rx management frames */ + + uint32 txbar; /* tx bar */ + uint32 rxbar; /* rx bar */ + uint32 txaction; /* tx action frame */ + uint32 rxaction; /* rx action frame */ + uint32 txlost; /* lost packets reported in txs */ + uint32 rxback; /* rx block ack */ + uint32 txback; /* tx bloak ack */ +} wl_nan_per_stats_t; + +/* fast discovery beacon config + * WL_NAN_XTLV_CFG_FDISC_TBMP +*/ +typedef struct wl_nan_fastdisc_s { + uint8 id; + uint8 bitmap_len; + uint8 pad[2]; + uint8 bitmap[]; +} wl_nan_fastdisc_t; + +#define WL_NAN_FASTDISC_CFG_SIZE 1024 /* arbitrary */ + +#ifdef WL_NANHO +/* ****************** NAN Host offload specific strucures ****************** */ + +enum wl_nan_rx_mgmt_frm_type { + WL_NAN_RX_MGMT_FRM_BCN = 0, + WL_NAN_RX_MGMT_FRM_SDF = 1, + WL_NAN_RX_MGMT_FRM_NAF = 2 +}; +typedef uint8 wl_nan_rx_mgmt_frm_type_t; + +/* WL_NAN_EVENT_RX_MGMT_FRM */ +typedef struct wl_nan_event_rx_mgmt_frm { + uint8 frm_type; /* wl_nan_rx_mgmt_frm_type_t */ + uint8 pad; + uint16 frm_len; + uint8 frm[]; +} wl_nan_event_rx_mgmt_frm_t; + +#define WL_NAN_NANHO_UPDATE_MAX_SIZE 2048 /* arbitrary */ + +enum wl_nan_peer_entry_action { + WL_NAN_PEER_ENTRY_ACT_ADD = 0, /* add peer entry */ + WL_NAN_PEER_ENTRY_ACT_REMOVE = 1 /* remove peer entry */ +}; +typedef uint8 wl_nan_peer_entry_action_t; + +/* WL_NAN_XTLV_NANHO_PEER_ENTRY */ +typedef struct wl_nan_peer_entry +{ + struct ether_addr nmi; /* nmi of peer device */ + uint8 action; /* wl_nan_peer_entry_action_t */ + uint8 pad; +} wl_nan_peer_entry_t; + +enum wl_nan_dcaplist_action { + WL_NAN_DCAPLIST_ACT_UPDATE = 0, /* update or add */ + WL_NAN_DCAPLIST_ACT_REMOVE = 1 /* remove (only for peer dcap cache entry) */ +}; +typedef uint8 wl_nan_dcaplist_action_t; + +/* WL_NAN_XTLV_NANHO_DCAPLIST */ +typedef struct wl_nan_dev_cap_list +{ + struct ether_addr nmi; /* null for local device */ + uint8 action; /* wl_nan_dcaplist_action_t */ + /* optional fields for WL_NAN_DCAPLIST_ACT_UPDATE */ + uint8 num_maps; + uint8 dcap[]; /* list of nan_dev_cap_t */ +} wl_nan_dev_cap_list_t; + +typedef struct wl_nan_dev_chan_sched { + uint16 num_slots; /* number of slot in schedule */ + uint16 period; /* period of channel schedule (TU) */ + uint8 slot_dur; /* slot duration (TU) */ + uint8 map_id; /* map id (TBD) */ + uint8 pad[2]; + uint8 data[]; + /* chanspec_t chan_sched[num_slot] */ + /* uint8 slot_info[num_slot] */ +} wl_nan_dev_chan_sched_t; + +/* WL_NAN_XTLV_NANHO_DCSLIST */ +typedef struct wl_nan_dev_chan_sched_list { + struct ether_addr nmi; /* null for local device */ + uint8 num_maps; + uint8 pad; + wl_nan_dev_chan_sched_t dcs[]; +} wl_nan_dev_chan_sched_list_t; + +/* WL_NAN_XTLV_NANHO_BLOB */ +typedef struct wl_nan_dev_blob { + struct ether_addr nmi; /* null for local device */ + uint16 blob_len; /* blob len in blob[] buffer */ + uint8 blob_type; + uint8 pad[3]; + uint8 blob[]; +} wl_nan_dev_blob_t; + +typedef struct wl_nan_peer_ndl_state { + struct ether_addr nmi; + uint8 ndl_state; /* nan_peer_ndl_state_t */ + uint8 pad; +} wl_nan_peer_ndl_state_t; + +enum wl_nan_ndp_state_action { + WL_NAN_NDP_STATE_ACT_ESTABLISHED = 0, + WL_NAN_NDP_STATE_ACT_TERMINATED = 1 +}; +typedef uint8 wl_nan_ndp_state_action_t; + +/* WL_NAN_XTLV_NANHO_NDP_STATE */ +typedef struct wl_nan_ndp_state { + struct ether_addr peer_nmi; + struct ether_addr peer_ndi; + struct ether_addr lcl_ndi; + uint8 action; /* wl_nan_ndp_state_action_t */ + uint8 pad; + /* TODO: secured NDP information */ +} wl_nan_ndp_state_t; + +/* *************** end of NAN Host offload specific strucures ************** */ +#endif /* WL_NANHO */ + +/* ********************* end of NAN section ******************************** */ +/* endif WL_NAN */ + +#define P2P_NAN_IOC_BUFSZ 512 /* some sufficient ioc buff size */ +#define WL_P2P_NAN_IOCTL_VERSION 0x1 + +/* container for p2p nan iovtls & events */ +typedef struct wl_p2p_nan_ioc { + uint16 version; /* interface command or event version */ + uint16 id; /* p2p nan ioctl cmd ID */ + uint16 len; /* total length of data[] */ + uint16 pad; /* padding */ + uint8 data []; /* var len payload of bcm_xtlv_t type */ +} wl_p2p_nan_ioc_t; + +/* p2p nan cmd IDs */ +enum wl_p2p_nan_cmds { + /* p2p nan cfg ioctls */ + WL_P2P_NAN_CMD_ENABLE = 1, + WL_P2P_NAN_CMD_CONFIG = 2, + WL_P2P_NAN_CMD_DEL_CONFIG = 3, + WL_P2P_NAN_CMD_GET_INSTS = 4 +}; + +#define WL_P2P_NAN_CONFIG_VERSION 1 + +#define WL_P2P_NAN_DEVICE_P2P 0x0 +#define WL_P2P_NAN_DEVICE_GO 0x1 +#define WL_P2P_NAN_DEVICE_GC 0x2 +#define WL_P2P_NAN_DEVICE_INVAL 0xFF + +/* NAN P2P operation */ +typedef struct p2p_nan_config { + uint16 version; /* wl_p2p_nan_config_t structure version */ + uint16 len; /* total length including version and variable IE */ + uint32 flags; /* 0x1 to NEW, 0x2 to ADD, 0x4 to DEL */ + uint8 inst_id; /* publisher/subscriber id */ + uint8 inst_type; /* publisher/subscriber */ + uint8 dev_role; /* P2P device role: 'P2P','GO' or 'GC' */ + uint8 pad1; /* padding */ + uint8 resolution; /* Availability bitmap resolution */ + uint8 repeat; /* Whether Availabilty repeat across DW */ + uint16 ie_len; /* variable ie len */ + struct ether_addr dev_mac; /* P2P device addres */ + uint16 pad2; /* Padding */ + uint32 avail_bmap; /* availability interval bitmap */ + uint32 chanspec; /* Chanspec */ + uint8 ie[]; /* hex ie data */ +} wl_p2p_nan_config_t; + +#define WL_P2P_NAN_SERVICE_LIST_VERSION 1 +typedef enum wl_nan_service_type { + WL_NAN_SVC_INST_PUBLISHER = 1, + WL_NAN_SVC_INST_SUBSCRIBER = 2 +} wl_nan_service_type_t; + +#define WL_P2P_NAN_CONFIG_NEW 0x1 +#define WL_P2P_NAN_CONFIG_ADD 0x2 +#define WL_P2P_NAN_CONFIG_DEL 0x4 + +typedef struct wl_nan_svc_inst { + uint8 inst_id; /* publisher/subscriber id */ + uint8 inst_type; /* publisher/subscriber */ +} wl_nan_svc_inst_t; + +typedef struct wl_nan_svc_inst_list { + uint16 version; /* this structure version */ + uint16 len; /* total length including version and variable svc list */ + uint16 count; /* service instance count */ + uint16 pad; /* padding */ + wl_nan_svc_inst_t svc[1]; /* service instance list */ +} wl_nan_svc_inst_list_t; + +#define NAN_POST_DISC_P2P_DATA_VER 1 +/* This structure will be used send peer p2p data with + * NAN discovery result + */ +typedef struct nan_post_disc_p2p_data { + uint8 ver; /* this structure version */ + uint8 dev_role; /* P2P Device role */ + uint8 resolution; /* Availability bitmap resolution */ + uint8 repeat; /* Whether Availabilty repeat across DW */ + struct ether_addr dev_mac; /* P2P device addres */ + uint16 pad1; /* Padding */ + uint32 chanspec; /* Chanspec */ + uint32 avl_bmp; /* availability interval bitmap */ +} nan_post_disc_p2p_data_t; + +enum { + WL_AVAIL_NONE = 0x0000, + WL_AVAIL_LOCAL = 0x0001, + WL_AVAIL_PEER = 0x0002, + WL_AVAIL_NDC = 0x0003, + WL_AVAIL_IMMUTABLE = 0x0004, + WL_AVAIL_RESPONSE = 0x0005, + WL_AVAIL_COUNTER = 0x0006, + WL_AVAIL_RANGING = 0x0007, + WL_AVAIL_UPD_POT = 0x0008, /* modify potential, keep committed/conditional */ + WL_AVAIL_UPD_COM_COND = 0x0009, /* modify committed/conditional, keep potential */ + WL_AVAIL_REMOVE_MAP = 0x000A, /* remove map */ + WL_AVAIL_FRM_TYPE = 0x000B, /* specify frame types containing NA */ + WL_AVAIL_TYPE_MAX = WL_AVAIL_FRM_TYPE /* New ones before and update */ +}; +#define WL_AVAIL_TYPE_MASK 0x000F +#define WL_AVAIL_FLAG_REMOVE 0x2000 /* remove schedule attr of given type & map id */ +#define WL_AVAIL_FLAG_SELECTED_NDC 0x4000 +#define WL_AVAIL_FLAG_RAW_MODE 0x8000 +#define WL_AVAIL_FLAGS_MASK 0xFF00 +#define WL_AVAIL_FLAGS_SHIFT 8 + +typedef int16 wl_avail_flags_t; + +/* availability entry flags */ +enum { + WL_AVAIL_ENTRY_NONE = 0x0000, + WL_AVAIL_ENTRY_COM = 0x0001, /* committed */ + WL_AVAIL_ENTRY_POT = 0x0002, /* potential */ + WL_AVAIL_ENTRY_COND = 0x0004, /* conditional */ + WL_AVAIL_ENTRY_PAGED = 0x0008, /* P-NDL */ + WL_AVAIL_ENTRY_USAGE = 0x0030, /* usage preference */ + WL_AVAIL_ENTRY_BIT_DUR = 0x00C0, /* bit duration */ + WL_AVAIL_ENTRY_BAND_PRESENT = 0x0100, /* band present */ + WL_AVAIL_ENTRY_CHAN_PRESENT = 0x0200, /* channel information present */ + WL_AVAIL_ENTRY_CHAN_ENTRY_PRESENT = 0x0400, /* channel entry (opclass+bitmap) */ + /* free to use 0x0800 */ + WL_AVAIL_ENTRY_RXNSS = 0xF000 /* max num of spatial stream RX */ +}; + +/* bit duration */ +enum { + WL_AVAIL_BIT_DUR_16 = 0, /* 16TU */ + WL_AVAIL_BIT_DUR_32 = 1, /* 32TU */ + WL_AVAIL_BIT_DUR_64 = 2, /* 64TU */ + WL_AVAIL_BIT_DUR_128 = 3, /* 128TU */ +}; + +/* period */ +enum { + WL_AVAIL_PERIOD_0 = 0, /* 0TU */ + WL_AVAIL_PERIOD_128 = 1, /* 128TU */ + WL_AVAIL_PERIOD_256 = 2, /* 256TU */ + WL_AVAIL_PERIOD_512 = 3, /* 512TU */ + WL_AVAIL_PERIOD_1024 = 4, /* 1024TU */ + WL_AVAIL_PERIOD_2048 = 5, /* 2048TU */ + WL_AVAIL_PERIOD_4096 = 6, /* 4096TU */ + WL_AVAIL_PERIOD_8192 = 7, /* 8192TU */ +}; + +/* band */ +enum { + WL_AVAIL_BAND_NONE = 0, /* reserved */ + WL_AVAIL_BAND_SUB1G = 1, /* sub-1 GHz */ + WL_AVAIL_BAND_2G = 2, /* 2.4 GHz */ + WL_AVAIL_BAND_3G = 3, /* reserved (for 3.6 GHz) */ + WL_AVAIL_BAND_5G = 4, /* 4.9 and 5 GHz */ + WL_AVAIL_BAND_60G = 5, /* reserved (for 60 GHz) */ +}; + +#define WL_AVAIL_ENTRY_TYPE_MASK 0x000F +#define WL_AVAIL_ENTRY_USAGE_MASK 0x0030 /* up to 4 usage preferences */ +#define WL_AVAIL_ENTRY_USAGE_SHIFT 4 +#define WL_AVAIL_ENTRY_USAGE_VAL(_flags) (((_flags) & WL_AVAIL_ENTRY_USAGE_MASK) \ + >> WL_AVAIL_ENTRY_USAGE_SHIFT) + +#define WL_AVAIL_ENTRY_BIT_DUR_MASK 0x00C0 /* 0:16TU, 1:32TU, 2:64TU, 3:128TU */ +#define WL_AVAIL_ENTRY_BIT_DUR_SHIFT 6 +#define WL_AVAIL_ENTRY_BIT_DUR_VAL(_flags) (((_flags) & WL_AVAIL_ENTRY_BIT_DUR_MASK) \ + >> WL_AVAIL_ENTRY_BIT_DUR_SHIFT) + +#define WL_AVAIL_ENTRY_BAND_MASK 0x0100 /* 0=band not present, 1=present */ +#define WL_AVAIL_ENTRY_BAND_SHIFT 8 + +#define WL_AVAIL_ENTRY_CHAN_MASK 0x0200 /* 0=channel info not present, 1=present */ +#define WL_AVAIL_ENTRY_CHAN_SHIFT 9 + +#define WL_AVAIL_ENTRY_CHAN_ENTRY_MASK 0x0400 /* 0=chanspec, 1=hex channel entry */ +#define WL_AVAIL_ENTRY_CHAN_ENTRY_SHIFT 10 + +#define WL_AVAIL_ENTRY_RXNSS_MASK 0xF000 +#define WL_AVAIL_ENTRY_RXNSS_SHIFT 12 +#define WL_AVAIL_ENTRY_RXNSS_VAL(_flags) (((_flags) & WL_AVAIL_ENTRY_RXNSS_MASK) \ + >> WL_AVAIL_ENTRY_RXNSS_SHIFT) +#define WL_AVAIL_ENTRY_RXNSS_MAX 15 /* 0-15 */ + +/* mask for channel_entry (to be obsoleted) */ +#define WL_AVAIL_ENTRY_OPCLASS_MASK 0xFF +#define WL_AVAIL_ENTRY_CHAN_BITMAP_MASK 0xFF00 +#define WL_AVAIL_ENTRY_CHAN_BITMAP_SHIFT 8 +#define WL_AVAIL_ENTRY_CHAN_BITMAP_VAL(_info) (((_info) & WL_AVAIL_ENTRY_CHAN_BITMAP_MASK) \ + >> WL_AVAIL_ENTRY_CHAN_BITMAP_SHIFT) + +/* Used for raw channel entry field input */ +#define MAX_CHAN_ENTRY_LEN 6 + +typedef struct wl_avail_entry { + uint16 length; /* total length */ + uint16 start_offset; /* in TUs, multiply by 16 for total offset */ + union { + uint32 channel_info; /* either chanspec or hex channel entry (opclass + + * bitmap per NAN spec), as indicated by setting + * WL_AVAIL_ENTRY_HEX_CHAN_ENTRY flag + */ + uint32 band; /* defined by WL_BAND enum, 2=2.4GHz, 4=5GHz */ + uint8 channel_entry[MAX_CHAN_ENTRY_LEN]; + uint8 align[8]; /* aligned len of union in structure (not for use) + * if member of union is changed, + * update length of align[] accordingly. + */ + } u; /* band or channel value, 0=all band/channels */ + uint8 sched_map_id; /* avail map id associated with sched entry */ + uint8 pad; + uint8 period; /* in TUs, defined by WL_AVAIL_PERIOD enum + * 1:128, 2:256, 3:512, 4:1024, 5:2048, 6:4096, + * 7:8192 + */ + uint8 bitmap_len; + uint16 flags; /* defined by avail entry flags enum: + * type, usage pref, bit duration, rx nss, + * and band, channel or channel entry + */ + uint8 bitmap[]; /* time bitmap */ +} wl_avail_entry_t; + +#define WL_AVAIL_VERSION 1 /* current wl_avail version */ + +typedef struct wl_avail { + uint16 length; /* total length */ + uint16 flags; /* LSB - avail type (defined by WL_AVAIL enum) + * MSB - avail flags + */ + uint8 id; /* id used for multiple maps/avail */ + uint8 lndc_id; /* ndc id used in multi-ndc case */ + uint8 version; + uint8 pad; + struct ether_addr addr; /* peer mac address or ndc id */ + uint8 num_entries; + uint8 unused_byte; + /* add additional fields above this line */ + uint8 entry[]; +} wl_avail_t; + +#define WL_AVAIL_MIN_LEN(n) ((n) ? OFFSETOF(wl_avail_t, entry) + \ + ((n) * OFFSETOF(wl_avail_entry_t, bitmap)) : 0) + +/* unaligned schedule (window) */ +typedef struct wl_avail_ulw { + uint8 id; /* schedule ID */ + uint8 overwrite; /* bit 0: overwrite all + * 1-4: map ID if overwrite all is 0 + */ + uint16 flags; + uint32 start; /* start time of first ULW, in us */ + uint32 dur; /* duration of ULW, in us */ + uint32 period; /* time between consecutive ULWs, in us */ + union { + uint32 chanspec; + uint32 band; + uint8 chan_entry[MAX_CHAN_ENTRY_LEN]; + uint8 pad[8]; + } u; + uint8 cntdwn; /* remaining ULWs before schedule ends */ + uint8 pad[3]; +} wl_avail_ulw_t; + +/* unset: NAN is not available during ULW, set: NAN is avail depending on ctrl flags */ +#define WL_NAN_ULW_CTRL_PRESENT (1 << 0) +/* unset: band, set: channel */ +#define WL_NAN_ULW_CTRL_TYPE (1 << 1) +/* set: NAN is availabile on specified band/channel */ +#define WL_NAN_ULW_CTRL_AVAIL (1 << 2) +/* channel is provided in raw attribute format */ +#define WL_NAN_ULW_CTRL_RAW_CHAN (1 << 3) + +/* nan wfa testmode operations */ +enum { + WL_NAN_WFA_TM_IGNORE_TERMINATE_NAF = 0x00000001, + WL_NAN_WFA_TM_IGNORE_RX_DATA_OUTSIDE_CRB = 0x00000002, + WL_NAN_WFA_TM_ALLOW_TX_DATA_OUTSIDE_CRB = 0x00000004, + WL_NAN_WFA_TM_ENFORCE_NDL_COUNTER = 0x00000008, + WL_NAN_WFA_TM_BYPASS_NDL_PROPOSAL_VALIDATION = 0x00000010, + /* allow data(pings) tx while ndp sec negotiation */ + WL_NAN_WFA_TM_SEC_SEND_PINGS_BYPASS_NDP_SM = 0x00000020, + /* generate and insert incorrect mic */ + WL_NAN_WFA_TM_SEC_INCORRECT_MIC = 0x00000040, + /* send m4 reject deliberately */ + WL_NAN_WFA_TM_SEC_REJECT_STATUS4M4 = 0x00000080, + /* send mgmt frame (for eg. ndp terminate) in clear txt (bypass security) */ + WL_NAN_WFA_TM_SEC_SEND_MGMT_CLEAR = 0x00000100, + /* validate qos */ + WL_NAN_WFA_TM_NDL_QOS_VALIDATE = 0x00000200, + /* firmware generated schedule update */ + WL_NAN_WFA_TM_GEN_SCHED_UPD = 0x00000400, + /* add lower 4-bytes of TSF to configured start time */ + WL_NAN_WFA_TM_ULW_START_TIME = 0x00000800, + /* enable schedule validation for SDF */ + WL_NAN_WFA_TM_SDF_SCHED_VALIDATE = 0x00001000, + /* by pass faw na iovar */ + WL_NAN_WFA_TM_SKIP_RAW_NA_BLOB = 0x00002000, + /* overwrite local NA with peer NA in received frame */ + WL_NAN_WFA_TM_LOCAL_NA_OVERWRITE = 0x00004000, + /* randomize and self configure ndl qos(needed at responder in auto mode) */ + WL_NAN_WFA_TM_SELF_CFG_NDL_QOS = 0x00008000, + /* send NAF frames only in DW */ + WL_NAN_WFA_TM_SEND_NAF_IN_DW = 0x00010000, + /* restrict channels used for countered slots to Ch 6/149 only */ + WL_NAN_WFA_TM_RESTRICT_COUNTER_SLOTS_CHAN = 0x00020000, + /* NDPE negative test case (4.2.5 & 4.2.6) */ + WL_NAN_WFA_TM_NDPE_NEGATIVE_TEST_TB = 0x00040000, + /* Set NDPE(NAN3.0) capable bit in dev cap attr */ + WL_NAN_WFA_TM_ENABLE_NDPE_CAP = 0x00080000, + /* NDPE negative test case (4.2.5.2). Enable both NDP and NDPE attributes */ + WL_NAN_WFA_TM_ENABLE_NDP_NDPE_ATTR = 0x00100000, + + /* add above & update mask */ + WL_NAN_WFA_TM_FLAG_MASK = 0x001FFFFF +}; +typedef uint32 wl_nan_wfa_testmode_t; + +/* To be removed; replaced by wl_nan_vndr_payload */ +typedef struct wl_nan_vndr_ie { + uint32 flags; /* bitmask indicating which packet(s) contain this IE */ + uint16 body_len; /* length of body (does not include oui field) */ + uint8 pad[2]; + uint8 oui[DOT11_OUI_LEN]; + uint8 pad2; + uint8 body[]; /* vendor IE payload */ +} wl_nan_vndr_ie_t; + +typedef struct wl_nan_vndr_payload { + uint32 flags; /* bitmask indicating which packet(s) contain payload */ + uint16 payload_len; /* length of payload */ + uint8 pad[2]; + uint8 payload[]; /* payload to be appended to NAN frame */ +} wl_nan_vndr_payload_t; + +typedef struct wl_nan_dev_cap { + uint8 bands[NAN_MAX_BANDS]; + uint8 awake_dw[NAN_MAX_BANDS]; + uint8 overwrite_mapid[NAN_MAX_BANDS]; + uint8 mapid; /* dev cap mapid */ + uint8 all_maps; /* applies to device */ + uint8 paging; + uint8 pad[3]; +} wl_nan_dev_cap_t; + +/* arbitrary max len for frame template */ +#define WL_NAN_FRM_TPLT_MAX_LEN 1024 + +typedef struct wl_nan_frm_tplt { + wl_nan_frame_type_t type; + uint8 pad; + uint16 len; /* length of template */ + uint8 data[]; /* template */ +} wl_nan_frm_tplt_t; + +#define RSSI_THRESHOLD_SIZE 16 +#define MAX_IMP_RESP_SIZE 256 + +typedef struct wl_proxd_rssi_bias { + int32 version; /**< version */ + int32 threshold[RSSI_THRESHOLD_SIZE]; /**< threshold */ + int32 peak_offset; /**< peak offset */ + int32 bias; /**< rssi bias */ + int32 gd_delta; /**< GD - GD_ADJ */ + int32 imp_resp[MAX_IMP_RESP_SIZE]; /**< (Hi*Hi)+(Hr*Hr) */ +} wl_proxd_rssi_bias_t; + +typedef struct wl_proxd_rssi_bias_avg { + int32 avg_threshold[RSSI_THRESHOLD_SIZE]; /**< avg threshold */ + int32 avg_peak_offset; /**< avg peak offset */ + int32 avg_rssi; /**< avg rssi */ + int32 avg_bias; /**< avg bias */ +} wl_proxd_rssi_bias_avg_t; + +#include +typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_collect_info { + uint16 type; /**< type: 0 channel table, 1 channel smoothing table, 2 and 3 seq */ + uint16 index; /**< The current frame index, from 1 to total_frames. */ + uint16 tof_cmd; /**< M_TOF_CMD */ + uint16 tof_rsp; /**< M_TOF_RSP */ + uint16 tof_avb_rxl; /**< M_TOF_AVB_RX_L */ + uint16 tof_avb_rxh; /**< M_TOF_AVB_RX_H */ + uint16 tof_avb_txl; /**< M_TOF_AVB_TX_L */ + uint16 tof_avb_txh; /**< M_TOF_AVB_TX_H */ + uint16 tof_id; /**< M_TOF_ID */ + uint8 tof_frame_type; + uint8 tof_frame_bw; + int8 tof_rssi; + int32 tof_cfo; + int32 gd_adj_ns; /**< gound delay */ + int32 gd_h_adj_ns; /**< group delay + threshold crossing */ + int16 nfft; /**< number of samples stored in H */ + uint8 num_max_cores; + +} BWL_POST_PACKED_STRUCT wl_proxd_collect_info_t; +#include + +#define K_TOF_COLLECT_H_PAD 1 +#define K_TOF_COLLECT_SC_20MHZ (64) +/* Maximum possible size of sample capture */ +#define K_TOF_COLLECT_SC_80MHZ (2*K_TOF_COLLECT_SC_20MHZ) +/* Maximum possible size of channel dump */ +#define K_TOF_COLLECT_CHAN_SIZE (2*K_TOF_COLLECT_SC_80MHZ) + +/* +A few extra samples are required to estimate frequency offset +Right now 16 samples are being used. Can be changed in future. +*/ +#define K_TOF_COLLECT_SAMP_SIZE_20MHZ (2*(K_TOF_COLLECT_SC_20MHZ)+16+K_TOF_COLLECT_H_PAD) +#define K_TOF_COLLECT_RAW_SAMP_SIZE_20MHZ (2*K_TOF_COLLECT_SAMP_SIZE_20MHZ) +#define K_TOF_COLLECT_H_SIZE_20MHZ (K_TOF_COLLECT_SAMP_SIZE_20MHZ) +#define K_TOF_COLLECT_HRAW_SIZE_20MHZ (K_TOF_COLLECT_RAW_SAMP_SIZE_20MHZ) + +#define K_TOF_COLLECT_SAMP_SIZE_80MHZ (2*(K_TOF_COLLECT_SC_80MHZ)+16+K_TOF_COLLECT_H_PAD) +#define K_TOF_COLLECT_RAW_SAMP_SIZE_80MHZ (2*K_TOF_COLLECT_SAMP_SIZE_80MHZ) +#define K_TOF_COLLECT_H_SIZE_80MHZ (K_TOF_COLLECT_SAMP_SIZE_80MHZ) +#define K_TOF_COLLECT_HRAW_SIZE_80MHZ (K_TOF_COLLECT_RAW_SAMP_SIZE_80MHZ) + +#define WL_PROXD_COLLECT_DATA_VERSION_1 1 +#include +typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_collect_data_v1 { + wl_proxd_collect_info_t info; + uint8 ri_rr[FTM_TPK_RI_RR_LEN]; + /**< raw data read from phy used to adjust timestamps */ + uint32 H[K_TOF_COLLECT_H_SIZE_20MHZ]; +} BWL_POST_PACKED_STRUCT wl_proxd_collect_data_t_v1; +#include + +#define WL_PROXD_COLLECT_DATA_VERSION_2 2 +#include +typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_collect_data_v2 { + wl_proxd_collect_info_t info; + uint8 ri_rr[FTM_TPK_RI_RR_LEN_SECURE_2_0]; + /**< raw data read from phy used to adjust timestamps */ + uint32 H[K_TOF_COLLECT_H_SIZE_20MHZ]; +} BWL_POST_PACKED_STRUCT wl_proxd_collect_data_t_v2; +#include + +#define WL_PROXD_COLLECT_DATA_VERSION_3 3 +typedef struct wl_proxd_collect_data_v3 { + uint16 version; + uint16 len; + wl_proxd_collect_info_t info; + uint8 ri_rr[FTM_TPK_RI_RR_LEN_SECURE_2_0]; + /**< raw data read from phy used to adjust timestamps */ + uint32 H[K_TOF_COLLECT_H_SIZE_20MHZ]; + uint32 chan[4 * K_TOF_COLLECT_CHAN_SIZE]; +} wl_proxd_collect_data_t_v3; +#define WL_PROXD_COLLECT_DATA_VERSION_MAX WL_PROXD_COLLECT_DATA_VERSION_3 + +typedef struct wl_proxd_debug_data { + uint8 count; /**< number of packets */ + uint8 stage; /**< state machone stage */ + uint8 received; /**< received or txed */ + uint8 paket_type; /**< packet type */ + uint8 category; /**< category field */ + uint8 action; /**< action field */ + uint8 token; /**< token number */ + uint8 follow_token; /**< following token number */ + uint16 index; /**< index of the packet */ + uint16 tof_cmd; /**< M_TOF_CMD */ + uint16 tof_rsp; /**< M_TOF_RSP */ + uint16 tof_avb_rxl; /**< M_TOF_AVB_RX_L */ + uint16 tof_avb_rxh; /**< M_TOF_AVB_RX_H */ + uint16 tof_avb_txl; /**< M_TOF_AVB_TX_L */ + uint16 tof_avb_txh; /**< M_TOF_AVB_TX_H */ + uint16 tof_id; /**< M_TOF_ID */ + uint16 tof_status0; /**< M_TOF_STATUS_0 */ + uint16 tof_status2; /**< M_TOF_STATUS_2 */ + uint16 tof_chsm0; /**< M_TOF_CHNSM_0 */ + uint16 tof_phyctl0; /**< M_TOF_PHYCTL0 */ + uint16 tof_phyctl1; /**< M_TOF_PHYCTL1 */ + uint16 tof_phyctl2; /**< M_TOF_PHYCTL2 */ + uint16 tof_lsig; /**< M_TOF_LSIG */ + uint16 tof_vhta0; /**< M_TOF_VHTA0 */ + uint16 tof_vhta1; /**< M_TOF_VHTA1 */ + uint16 tof_vhta2; /**< M_TOF_VHTA2 */ + uint16 tof_vhtb0; /**< M_TOF_VHTB0 */ + uint16 tof_vhtb1; /**< M_TOF_VHTB1 */ + uint16 tof_apmductl; /**< M_TOF_AMPDU_CTL */ + uint16 tof_apmdudlim; /**< M_TOF_AMPDU_DLIM */ + uint16 tof_apmdulen; /**< M_TOF_AMPDU_LEN */ +} wl_proxd_debug_data_t; + +/** version of the wl_wsec_info structure */ +#define WL_WSEC_INFO_VERSION 0x01 + +/** start enum value for BSS properties */ +#define WL_WSEC_INFO_BSS_BASE 0x0100 + +/** size of len and type fields of wl_wsec_info_tlv_t struct */ +#define WL_WSEC_INFO_TLV_HDR_LEN OFFSETOF(wl_wsec_info_tlv_t, data) + +/** Allowed wl_wsec_info properties; not all of them may be supported. */ +typedef enum { + WL_WSEC_INFO_NONE = 0, + WL_WSEC_INFO_MAX_KEYS = 1, + WL_WSEC_INFO_NUM_KEYS = 2, + WL_WSEC_INFO_NUM_HW_KEYS = 3, + WL_WSEC_INFO_MAX_KEY_IDX = 4, + WL_WSEC_INFO_NUM_REPLAY_CNTRS = 5, + WL_WSEC_INFO_SUPPORTED_ALGOS = 6, + WL_WSEC_INFO_MAX_KEY_LEN = 7, + WL_WSEC_INFO_FLAGS = 8, + /* add global/per-wlc properties above */ + WL_WSEC_INFO_BSS_FLAGS = (WL_WSEC_INFO_BSS_BASE + 1), + WL_WSEC_INFO_BSS_WSEC = (WL_WSEC_INFO_BSS_BASE + 2), + WL_WSEC_INFO_BSS_TX_KEY_ID = (WL_WSEC_INFO_BSS_BASE + 3), + WL_WSEC_INFO_BSS_ALGO = (WL_WSEC_INFO_BSS_BASE + 4), + WL_WSEC_INFO_BSS_KEY_LEN = (WL_WSEC_INFO_BSS_BASE + 5), + WL_WSEC_INFO_BSS_ALGOS = (WL_WSEC_INFO_BSS_BASE + 6), + /* add per-BSS properties above */ + WL_WSEC_INFO_MAX = 0xffff +} wl_wsec_info_type_t; + +typedef struct { + uint32 algos; /* set algos to be enabled/disabled */ + uint32 mask; /* algos outside mask unaltered */ +} wl_wsec_info_algos_t; + +/** tlv used to return wl_wsec_info properties */ +typedef struct { + uint16 type; + uint16 len; /**< data length */ + uint8 data[1]; /**< data follows */ +} wl_wsec_info_tlv_t; + +/** input/output data type for wsec_info iovar */ +typedef struct wl_wsec_info { + uint8 version; /**< structure version */ + uint8 pad[2]; + uint8 num_tlvs; + wl_wsec_info_tlv_t tlvs[1]; /**< tlv data follows */ +} wl_wsec_info_t; + +/* + * randmac definitions + */ +#define WL_RANDMAC_MODULE "randmac" +#define WL_RANDMAC_API_VERSION 0x0100 /**< version 1.0 */ +#define WL_RANDMAC_API_MIN_VERSION 0x0100 /**< version 1.0 */ + +/** subcommands that can apply to randmac */ +enum { + WL_RANDMAC_SUBCMD_NONE = 0, + WL_RANDMAC_SUBCMD_GET_VERSION = 1, + WL_RANDMAC_SUBCMD_ENABLE = 2, + WL_RANDMAC_SUBCMD_DISABLE = 3, + WL_RANDMAC_SUBCMD_CONFIG = 4, + WL_RANDMAC_SUBCMD_STATS = 5, + WL_RANDMAC_SUBCMD_CLEAR_STATS = 6, + + WL_RANDMAC_SUBCMD_MAX +}; +typedef int16 wl_randmac_subcmd_t; + +/* Common IOVAR struct */ +typedef struct wl_randmac { + uint16 version; + uint16 len; /* total length */ + wl_randmac_subcmd_t subcmd_id; /* subcommand id */ + uint8 data[0]; /* subcommand data */ +} wl_randmac_t; + +#define WL_RANDMAC_IOV_HDR_SIZE OFFSETOF(wl_randmac_t, data) + +/* randmac version subcommand */ +typedef struct wl_randmac_version { + uint16 version; /* Randmac method version info */ + uint8 pad[2]; /* Align on 4 byte boundary */ +} wl_randmac_version_t; + +/* + * Bitmask for methods supporting MAC randomization feature + */ +#define WL_RANDMAC_USER_NONE 0x0000 +#define WL_RANDMAC_USER_FTM 0x0001 +#define WL_RANDMAC_USER_NAN 0x0002 +#define WL_RANDMAC_USER_SCAN 0x0004 +#define WL_RANDMAC_USER_ANQP 0x0008 +#define WL_RANDMAC_USER_ALL 0xFFFF +typedef uint16 wl_randmac_method_t; + +enum { + WL_RANDMAC_FLAGS_NONE = 0x00, + WL_RANDMAC_FLAGS_ADDR = 0x01, + WL_RANDMAC_FLAGS_MASK = 0x02, + WL_RANDMAC_FLAGS_METHOD = 0x04, + WL_RANDMAC_FLAGS_ALL = 0xFF +}; +typedef uint8 wl_randmac_flags_t; + +/* randmac statistics subcommand */ +typedef struct wl_randmac_stats { + uint32 set_ok; /* Set random addr success count */ + uint32 set_fail; /* Set random addr failed count */ + uint32 set_reqs; /* Set random addr count */ + uint32 reset_reqs; /* Restore random addr count */ + uint32 restore_ok; /* Restore random addr succes count */ + uint32 restore_fail; /* Restore random addr failed count */ + uint32 events_sent; /* randmac module events count */ + uint32 events_rcvd; /* randmac events received count */ +} wl_randmac_stats_t; + +/* randmac config subcommand */ +typedef struct wl_randmac_config { + struct ether_addr addr; /* Randomized MAC address */ + struct ether_addr addr_mask; /* bitmask for randomization */ + wl_randmac_method_t method; /* Enabled methods */ + wl_randmac_flags_t flags; /* What config info changed */ + uint8 PAD; +} wl_randmac_config_t; + +enum { + WL_RANDMAC_EVENT_NONE = 0, /**< not an event, reserved */ + WL_RANDMAC_EVENT_BSSCFG_ADDR_SET = 1, /* bsscfg addr randomized */ + WL_RANDMAC_EVENT_BSSCFG_ADDR_RESTORE = 2, /* bsscfg addr restored */ + WL_RANDMAC_EVENT_ENABLED = 3, /* randmac module enabled */ + WL_RANDMAC_EVENT_DISABLE = 4, /* randmac module disabled */ + WL_RANDMAC_EVENT_BSSCFG_STATUS = 5, /* bsscfg enable/disable */ + + WL_RANDMAC_EVENT_MAX +}; +typedef int16 wl_randmac_event_type_t; +typedef int32 wl_randmac_status_t; +typedef uint32 wl_randmac_event_mask_t; + +#define WL_RANDMAC_EVENT_MASK_ALL 0xfffffffe +#define WL_RANDMAC_EVENT_MASK_EVENT(_event_type) (1 << (_event_type)) +#define WL_RANDMAC_EVENT_ENABLED(_mask, _event_type) (\ + ((_mask) & WL_RANDMAC_EVENT_MASK_EVENT(_event_type)) != 0) + +/** tlv IDs - data length 4 bytes unless overridden by type, alignment 32 bits */ +enum { + WL_RANDMAC_TLV_NONE = 0, + WL_RANDMAC_TLV_METHOD = 1, + WL_RANDMAC_TLV_ADDR = 2, + WL_RANDMAC_TLV_MASK = 3 +}; +typedef uint16 wl_randmac_tlv_id_t; + +typedef struct wl_randmac_tlv { + wl_randmac_tlv_id_t id; + uint16 len; /* Length of variable */ + uint8 data[1]; +} wl_randmac_tlv_t; + +/** randmac event */ +typedef struct wl_randmac_event { + uint16 version; + uint16 len; /* Length of all variables */ + wl_randmac_event_type_t type; + wl_randmac_method_t method; + uint8 pad[2]; + wl_randmac_tlv_t tlvs[1]; /**< variable */ +} wl_randmac_event_t; + +/* + * scan MAC definitions + */ + +/** common iovar struct */ +typedef struct wl_scanmac { + uint16 subcmd_id; /**< subcommand id */ + uint16 len; /**< total length of data[] */ + uint8 data[]; /**< subcommand data */ +} wl_scanmac_t; + +/* subcommand ids */ +#define WL_SCANMAC_SUBCMD_ENABLE 0 +#define WL_SCANMAC_SUBCMD_BSSCFG 1 /**< only GET supported */ +#define WL_SCANMAC_SUBCMD_CONFIG 2 + +/** scanmac enable data struct */ +typedef struct wl_scanmac_enable { + uint8 enable; /**< 1 - enable, 0 - disable */ + uint8 pad[3]; /**< 4-byte struct alignment */ +} wl_scanmac_enable_t; + +/** scanmac bsscfg data struct */ +typedef struct wl_scanmac_bsscfg { + uint32 bsscfg; /**< bsscfg index */ +} wl_scanmac_bsscfg_t; + +/** scanmac config data struct */ +typedef struct wl_scanmac_config { + struct ether_addr mac; /**< 6 bytes of MAC address or MAC prefix (i.e. OUI) */ + struct ether_addr random_mask; /**< randomized bits on each scan */ + uint16 scan_bitmap; /**< scans to use this MAC address */ + uint8 pad[2]; /**< 4-byte struct alignment */ +} wl_scanmac_config_t; + +/* scan bitmap */ +#define WL_SCANMAC_SCAN_UNASSOC (0x01 << 0) /**< unassociated scans */ +#define WL_SCANMAC_SCAN_ASSOC_ROAM (0x01 << 1) /**< associated roam scans */ +#define WL_SCANMAC_SCAN_ASSOC_PNO (0x01 << 2) /**< associated PNO scans */ +#define WL_SCANMAC_SCAN_ASSOC_HOST (0x01 << 3) /**< associated host scans */ + +#define WL_SCAN_EVENT_VERSION 1 + +#define WL_SCAN_TYPE_ASSOC 0x1 /* Assoc scan */ +#define WL_SCAN_TYPE_ROAM 0x2 /* Roam scan */ +#define WL_SCAN_TYPE_FWSCAN 0x4 /* Other FW scan */ +#define WL_SCAN_TYPE_HOSTSCAN 0x8 /* Host scan */ + +typedef struct scan_event_data { + uint32 version; + uint32 flags; + uint16 num_chan_slice0; + uint16 num_chan_slice1; + /* Will contain num_chan_slice0 followed by num_chan_slice1 chanspecs */ + chanspec_t scan_chan_list[]; +} scan_event_data_t; + +/* + * bonjour dongle offload definitions + */ + +/* common iovar struct */ +typedef struct wl_bdo { + uint16 subcmd_id; /* subcommand id */ + uint16 len; /* total length of data[] */ + uint8 data[]; /* subcommand data */ +} wl_bdo_t; + +/* subcommand ids */ +#define WL_BDO_SUBCMD_DOWNLOAD 0 /* Download flattened database */ +#define WL_BDO_SUBCMD_ENABLE 1 /* Start bonjour after download */ +#define WL_BDO_SUBCMD_MAX_DOWNLOAD 2 /* Get the max download size */ + +/* maximum fragment size */ +#define BDO_MAX_FRAGMENT_SIZE 1024 + +/* download flattened database + * + * BDO must be disabled before database download else fail. + * + * If database size is within BDO_MAX_FRAGMENT_SIZE then only a single fragment + * is required (i.e. frag_num = 0, total_size = frag_size). + * If database size exceeds BDO_MAX_FRAGMENT_SIZE then multiple fragments are required. + */ +typedef struct wl_bdo_download { + uint16 total_size; /* total database size */ + uint16 frag_num; /* fragment number, 0 for first fragment, N-1 for last fragment */ + uint16 frag_size; /* size of fragment (max BDO_MAX_FRAGMENT_SIZE) */ + uint8 pad[2]; /* 4-byte struct alignment */ + uint8 fragment[BDO_MAX_FRAGMENT_SIZE]; /* fragment data */ +} wl_bdo_download_t; + +/* enable + * + * Enable requires a downloaded database else fail. + */ +typedef struct wl_bdo_enable { + uint8 enable; /* 1 - enable, 0 - disable */ + uint8 pad[3]; /* 4-byte struct alignment */ +} wl_bdo_enable_t; + +/* + * Get the max download size for Bonjour Offload. + */ +typedef struct wl_bdo_max_download { + uint16 size; /* Max download size in bytes */ + uint8 pad[2]; /* 4-byte struct alignment */ +} wl_bdo_max_download_t; + +/* + * TCP keepalive offload definitions + */ + +/* common iovar struct */ +typedef struct wl_tko { + uint16 subcmd_id; /* subcommand id */ + uint16 len; /* total length of data[] */ + uint8 data[]; /* subcommand data */ +} wl_tko_t; + +/* subcommand ids */ +#define WL_TKO_SUBCMD_MAX_TCP 0 /* max TCP connections supported */ +#define WL_TKO_SUBCMD_PARAM 1 /* configure offload common parameters */ +#define WL_TKO_SUBCMD_CONNECT 2 /* TCP connection info */ +#define WL_TKO_SUBCMD_ENABLE 3 /* enable/disable */ +#define WL_TKO_SUBCMD_STATUS 4 /* TCP connection status */ + +/* WL_TKO_SUBCMD_MAX_CONNECT subcommand data */ +typedef struct wl_tko_max_tcp { + uint8 max; /* max TCP connections supported */ + uint8 pad[3]; /* 4-byte struct alignment */ +} wl_tko_max_tcp_t; + +/* WL_TKO_SUBCMD_PARAM subcommand data */ +typedef struct wl_tko_param { + uint16 interval; /* keepalive tx interval (secs) */ + uint16 retry_interval; /* keepalive retry interval (secs) */ + uint16 retry_count; /* retry_count */ + uint8 pad[2]; /* 4-byte struct alignment */ +} wl_tko_param_t; + +/* WL_TKO_SUBCMD_CONNECT subcommand data + * invoke with unique 'index' for each TCP connection + */ +typedef struct wl_tko_connect { + uint8 index; /* TCP connection index, 0 to max-1 */ + uint8 ip_addr_type; /* 0 - IPv4, 1 - IPv6 */ + uint16 local_port; /* local port */ + uint16 remote_port; /* remote port */ + uint16 PAD; + uint32 local_seq; /* local sequence number */ + uint32 remote_seq; /* remote sequence number */ + uint16 request_len; /* TCP keepalive request packet length */ + uint16 response_len; /* TCP keepalive response packet length */ + uint8 data[]; /* variable length field containing local/remote IPv4/IPv6, + * TCP keepalive request packet, TCP keepalive response packet + * For IPv4, length is 4 * 2 + request_length + response_length + * offset 0 - local IPv4 + * offset 4 - remote IPv4 + * offset 8 - TCP keepalive request packet + * offset 8+request_length - TCP keepalive response packet + * For IPv6, length is 16 * 2 + request_length + response_length + * offset 0 - local IPv6 + * offset 16 - remote IPv6 + * offset 32 - TCP keepalive request packet + * offset 32+request_length - TCP keepalive response packet + */ +} wl_tko_connect_t; + +/* WL_TKO_SUBCMD_CONNECT subcommand data to GET configured info for specific index */ +typedef struct wl_tko_get_connect { + uint8 index; /* TCP connection index, 0 to max-1 */ + uint8 pad[3]; /* 4-byte struct alignment */ +} wl_tko_get_connect_t; + +typedef struct wl_tko_enable { + uint8 enable; /* 1 - enable, 0 - disable */ + uint8 pad[3]; /* 4-byte struct alignment */ +} wl_tko_enable_t; + +/* WL_TKO_SUBCMD_STATUS subcommand data */ +/* must be invoked before tko is disabled else status is unavailable */ +typedef struct wl_tko_status { + uint8 count; /* number of status entries (i.e. equals + * max TCP connections supported) + */ + uint8 status[1]; /* variable length field contain status for + * each TCP connection index + */ +} wl_tko_status_t; + +typedef enum { + TKO_STATUS_NORMAL = 0, /* TCP connection normal, no error */ + TKO_STATUS_NO_RESPONSE = 1, /* no response to TCP keepalive */ + TKO_STATUS_NO_TCP_ACK_FLAG = 2, /* TCP ACK flag not set */ + TKO_STATUS_UNEXPECT_TCP_FLAG = 3, /* unexpect TCP flags set other than ACK */ + TKO_STATUS_SEQ_NUM_INVALID = 4, /* ACK != sequence number */ + TKO_STATUS_REMOTE_SEQ_NUM_INVALID = 5, /* SEQ > remote sequence number */ + TKO_STATUS_TCP_DATA = 6, /* TCP data available */ + TKO_STATUS_UNAVAILABLE = 255, /* not used/configured */ +} tko_status_t; + +enum rssi_reason { + RSSI_REASON_UNKNOW = 0, + RSSI_REASON_LOWRSSI = 1, + RSSI_REASON_NSYC = 2, + RSSI_REASON_TIMEOUT = 3 +}; + +enum tof_reason { + TOF_REASON_OK = 0, + TOF_REASON_REQEND = 1, + TOF_REASON_TIMEOUT = 2, + TOF_REASON_NOACK = 3, + TOF_REASON_INVALIDAVB = 4, + TOF_REASON_INITIAL = 5, + TOF_REASON_ABORT = 6 +}; + +enum rssi_state { + RSSI_STATE_POLL = 0, + RSSI_STATE_TPAIRING = 1, + RSSI_STATE_IPAIRING = 2, + RSSI_STATE_THANDSHAKE = 3, + RSSI_STATE_IHANDSHAKE = 4, + RSSI_STATE_CONFIRMED = 5, + RSSI_STATE_PIPELINE = 6, + RSSI_STATE_NEGMODE = 7, + RSSI_STATE_MONITOR = 8, + RSSI_STATE_LAST = 9 +}; + +enum tof_state { + TOF_STATE_IDLE = 0, + TOF_STATE_IWAITM = 1, + TOF_STATE_TWAITM = 2, + TOF_STATE_ILEGACY = 3, + TOF_STATE_IWAITCL = 4, + TOF_STATE_TWAITCL = 5, + TOF_STATE_ICONFIRM = 6, + TOF_STATE_IREPORT = 7 +}; + +enum tof_mode_type { + TOF_LEGACY_UNKNOWN = 0, + TOF_LEGACY_AP = 1, + TOF_NONLEGACY_AP = 2 +}; + +enum tof_way_type { + TOF_TYPE_ONE_WAY = 0, + TOF_TYPE_TWO_WAY = 1, + TOF_TYPE_REPORT = 2 +}; + +enum tof_rate_type { + TOF_FRAME_RATE_VHT = 0, + TOF_FRAME_RATE_LEGACY = 1 +}; + +#define TOF_ADJ_TYPE_NUM 4 /**< number of assisted timestamp adjustment */ +enum tof_adj_mode { + TOF_ADJ_SOFTWARE = 0, + TOF_ADJ_HARDWARE = 1, + TOF_ADJ_SEQ = 2, + TOF_ADJ_NONE = 3 +}; + +#define FRAME_TYPE_NUM 4 /**< number of frame type */ +enum frame_type { + FRAME_TYPE_CCK = 0, + FRAME_TYPE_OFDM = 1, + FRAME_TYPE_11N = 2, + FRAME_TYPE_11AC = 3 +}; + +typedef struct wl_proxd_status_iovar { + uint16 method; /**< method */ + uint8 mode; /**< mode */ + uint8 peermode; /**< peer mode */ + uint8 state; /**< state */ + uint8 reason; /**< reason code */ + uint8 PAD[2]; + uint32 distance; /**< distance */ + uint32 txcnt; /**< tx pkt counter */ + uint32 rxcnt; /**< rx pkt counter */ + struct ether_addr peer; /**< peer mac address */ + int8 avg_rssi; /**< average rssi */ + int8 hi_rssi; /**< highest rssi */ + int8 low_rssi; /**< lowest rssi */ + uint8 PAD[3]; + uint32 dbgstatus; /**< debug status */ + uint16 frame_type_cnt[FRAME_TYPE_NUM]; /**< frame types */ + uint8 adj_type_cnt[TOF_ADJ_TYPE_NUM]; /**< adj types HW/SW */ +} wl_proxd_status_iovar_t; + +/* ifdef NET_DETECT */ +typedef struct net_detect_adapter_features { + uint8 wowl_enabled; + uint8 net_detect_enabled; + uint8 nlo_enabled; +} net_detect_adapter_features_t; + +typedef enum net_detect_bss_type { + nd_bss_any = 0, + nd_ibss, + nd_ess +} net_detect_bss_type_t; + +typedef struct net_detect_profile { + wlc_ssid_t ssid; + net_detect_bss_type_t bss_type; /**< Ignore for now since Phase 1 is only for ESS */ + uint32 cipher_type; /**< DOT11_CIPHER_ALGORITHM enumeration values */ + uint32 auth_type; /**< DOT11_AUTH_ALGORITHM enumeration values */ +} net_detect_profile_t; + +typedef struct net_detect_profile_list { + uint32 num_nd_profiles; + net_detect_profile_t nd_profile[]; +} net_detect_profile_list_t; + +typedef struct net_detect_config { + uint8 nd_enabled; + uint8 PAD[3]; + uint32 scan_interval; + uint32 wait_period; + uint8 wake_if_connected; + uint8 wake_if_disconnected; + uint8 PAD[2]; + net_detect_profile_list_t nd_profile_list; +} net_detect_config_t; + +typedef enum net_detect_wake_reason { + nd_reason_unknown, + nd_net_detected, + nd_wowl_event, + nd_ucode_error +} net_detect_wake_reason_t; + +typedef struct net_detect_wake_data { + net_detect_wake_reason_t nd_wake_reason; + uint32 nd_wake_date_length; + uint8 nd_wake_data[0]; /**< Wake data (currently unused) */ +} net_detect_wake_data_t; + +/* endif NET_DETECT */ + +/* (unversioned, deprecated) */ +typedef struct bcnreq { + uint8 bcn_mode; + uint8 PAD[3]; + int32 dur; + int32 channel; + struct ether_addr da; + uint16 random_int; + wlc_ssid_t ssid; + uint16 reps; + uint8 PAD[2]; +} bcnreq_t; + +#define WL_RRM_BCN_REQ_VER 1 +typedef struct bcn_req { + uint8 version; + uint8 bcn_mode; + uint8 pad_1[2]; + int32 dur; + int32 channel; + struct ether_addr da; + uint16 random_int; + wlc_ssid_t ssid; + uint16 reps; + uint8 req_elements; + uint8 pad_2; + chanspec_list_t chspec_list; +} bcn_req_t; + +typedef struct rrmreq { + struct ether_addr da; + uint8 reg; + uint8 chan; + uint16 random_int; + uint16 dur; + uint16 reps; +} rrmreq_t; + +typedef struct framereq { + struct ether_addr da; + uint8 reg; + uint8 chan; + uint16 random_int; + uint16 dur; + struct ether_addr ta; + uint16 reps; +} framereq_t; + +typedef struct statreq { + struct ether_addr da; + struct ether_addr peer; + uint16 random_int; + uint16 dur; + uint8 group_id; + uint8 PAD; + uint16 reps; +} statreq_t; + +typedef struct txstrmreq { + struct ether_addr da; /* Destination address */ + uint16 random_int; /* Random interval for measurement start */ + uint16 dur; /* Measurement duration */ + uint16 reps; /* number of repetitions */ + struct ether_addr peer; /* Peer MAC address */ + uint8 tid; /* Traffic ID */ + uint8 bin0_range; /* Delay range of the first bin */ +} txstrmreq_t; + +typedef struct lcireq { + struct ether_addr da; /* Destination address */ + uint16 reps; /* number of repetitions */ + uint8 subj; /* Local/Remote/Thid party */ + uint8 lat_res; /* Latitude requested Resolution */ + uint8 lon_res; /* Longitude requested Resolution */ + uint8 alt_res; /* Altitude requested Resolution */ +} lcireq_t; + +typedef struct civicreq { + struct ether_addr da; /* Destination address */ + uint16 reps; /* number of repetitions */ + uint8 subj; /* Local/Remote/Thid party */ + uint8 civloc_type; /* Format of location info */ + uint8 siu; /* Unit of Location service interval */ + uint8 pad; + uint16 si; /* Location service interval */ +} civicreq_t; + +typedef struct locidreq { + struct ether_addr da; /* Destination address */ + uint16 reps; /* number of repetitions */ + uint8 subj; /* Local/Remote/Thid party */ + uint8 siu; /* Unit of Location service interval */ + uint16 si; /* Location service interval */ +} locidreq_t; + +typedef struct wl_rrm_config_ioc { + uint16 version; /* command version */ + uint16 id; /* subiovar cmd ID */ + uint16 len; /* total length of all bytes in data[] */ + uint16 pad; /* 4-byte boundary padding */ + uint8 data[1]; /* payload */ +} wl_rrm_config_ioc_t; + +enum { + WL_RRM_CONFIG_NONE = 0, /* reserved */ + WL_RRM_CONFIG_GET_LCI = 1, /* get LCI */ + WL_RRM_CONFIG_SET_LCI = 2, /* set LCI */ + WL_RRM_CONFIG_GET_CIVIC = 3, /* get civic location */ + WL_RRM_CONFIG_SET_CIVIC = 4, /* set civic location */ + WL_RRM_CONFIG_GET_LOCID = 5, /* get location identifier */ + WL_RRM_CONFIG_SET_LOCID = 6, /* set location identifier */ + WL_RRM_CONFIG_MAX = 7 +}; + +#define WL_RRM_CONFIG_NAME "rrm_config" +#define WL_RRM_CONFIG_MIN_LENGTH OFFSETOF(wl_rrm_config_ioc_t, data) + +enum { + WL_RRM_EVENT_NONE = 0, /* not an event, reserved */ + WL_RRM_EVENT_FRNG_REQ = 1, /* Receipt of FRNG request frame */ + WL_RRM_EVENT_FRNG_REP = 2, /* Receipt of FRNG report frame */ + + WL_RRM_EVENT_MAX +}; +typedef int16 wl_rrm_event_type_t; + +typedef struct frngreq_target { + uint32 bssid_info; + uint8 channel; + uint8 phytype; + uint8 reg; + uint8 pad; + struct ether_addr bssid; + chanspec_t chanspec; + uint32 sid; +} frngreq_target_t; + +typedef struct frngreq { + wl_rrm_event_type_t event; /* RRM event type */ + struct ether_addr da; + uint16 max_init_delay; /* Upper bound of random delay, in TUs */ + uint8 min_ap_count; /* Min FTM ranges requested (1-15) */ + uint8 num_aps; /* Number of APs to range, at least min_ap_count */ + uint16 max_age; /* Max elapsed time before FTM request, 0xFFFF = any */ + uint16 reps; /* Number of repetitions of this measurement type */ + frngreq_target_t targets[1]; /* Target BSSIDs to range */ +} frngreq_t; + +typedef struct frngrep_range { + uint32 start_tsf; /* 4 lsb of tsf */ + struct ether_addr bssid; + uint8 pad[2]; + uint32 range; + uint32 max_err; + uint8 rsvd; + uint8 pad2[3]; +} frngrep_range_t; + +typedef struct frngrep_error { + uint32 start_tsf; /* 4 lsb of tsf */ + struct ether_addr bssid; + uint8 code; + uint8 pad[1]; +} frngrep_error_t; + +typedef struct frngrep { + wl_rrm_event_type_t event; /* RRM event type */ + struct ether_addr da; + uint8 range_entry_count; + uint8 error_entry_count; + uint16 dialog_token; /* dialog token */ + frngrep_range_t range_entries[DOT11_FTM_RANGE_ENTRY_MAX_COUNT]; + frngrep_error_t error_entries[DOT11_FTM_RANGE_ERROR_ENTRY_MAX_COUNT]; +} frngrep_t; + +typedef struct wl_rrm_frng_ioc { + uint16 version; /* command version */ + uint16 id; /* subiovar cmd ID */ + uint16 len; /* total length of all bytes in data[] */ + uint16 pad; /* 4-byte boundary padding */ + uint8 data[]; /* payload */ +} wl_rrm_frng_ioc_t; + +enum { + WL_RRM_FRNG_NONE = 0, /* reserved */ + WL_RRM_FRNG_SET_REQ = 1, /* send ftm ranging request */ + WL_RRM_FRNG_MAX = 2 +}; + +#define WL_RRM_FRNG_NAME "rrm_frng" +#define WL_RRM_FRNG_MIN_LENGTH OFFSETOF(wl_rrm_frng_ioc_t, data) + +#define WL_RRM_RPT_VER 0 +#define WL_RRM_RPT_MAX_PAYLOAD 256 +#define WL_RRM_RPT_MIN_PAYLOAD 7 +#define WL_RRM_RPT_FALG_ERR 0 +#define WL_RRM_RPT_FALG_GRP_ID_PROPR (1 << 0) +#define WL_RRM_RPT_FALG_GRP_ID_0 (1 << 1) +typedef struct { + uint16 ver; /**< version */ + struct ether_addr addr; /**< STA MAC addr */ + uint32 timestamp; /**< timestamp of the report */ + uint16 flag; /**< flag */ + uint16 len; /**< length of payload data */ + uint8 data[WL_RRM_RPT_MAX_PAYLOAD]; +} statrpt_t; + +typedef struct wlc_dwds_config { + uint32 enable; + uint32 mode; /**< STA/AP interface */ + struct ether_addr ea; + uint8 PAD[2]; +} wlc_dwds_config_t; + +typedef struct wl_el_set_params_s { + uint8 set; /**< Set number */ + uint8 PAD[3]; + uint32 size; /**< Size to make/expand */ +} wl_el_set_params_t; + +typedef struct wl_el_tag_params_s { + uint16 tag; + uint8 set; + uint8 flags; +} wl_el_tag_params_t; + +#define EVENT_LOG_SET_TYPE_CURRENT_VERSION 0 +typedef struct wl_el_set_type_s { + uint16 version; + uint16 len; + uint8 set; /* Set number */ + uint8 type; /* Type- EVENT_LOG_SET_TYPE_DEFAULT or EVENT_LOG_SET_TYPE_PRSRV */ + uint16 PAD; +} wl_el_set_type_t; + +typedef struct wl_staprio_cfg { + struct ether_addr ea; /**< mac addr */ + uint8 prio; /**< scb priority */ +} wl_staprio_cfg_t; + +#define STAMON_STACONFIG_VER 1 +/* size of struct wlc_stamon_sta_config_t elements */ +#define STAMON_STACONFIG_LENGTH 20 + +typedef enum wl_stamon_cfg_cmd_type { + STAMON_CFG_CMD_DEL = 0, + STAMON_CFG_CMD_ADD = 1, + STAMON_CFG_CMD_ENB = 2, + STAMON_CFG_CMD_DSB = 3, + STAMON_CFG_CMD_CNT = 4, + STAMON_CFG_CMD_RSTCNT = 5, + STAMON_CFG_CMD_GET_STATS = 6, + STAMON_CFG_CMD_SET_MONTIME = 7 +} wl_stamon_cfg_cmd_type_t; + +typedef struct wlc_stamon_sta_config { + wl_stamon_cfg_cmd_type_t cmd; /**< 0 - delete, 1 - add */ + struct ether_addr ea; + uint16 version; /* Command structure version */ + uint16 length; /* Command structure length */ + uint8 pad[2]; + /* Time (ms) for which STA's are monitored. Value ZERO indicates no time limit */ + uint32 monitor_time; +} wlc_stamon_sta_config_t; + +/* ifdef SR_DEBUG */ +typedef struct /* pmu_reg */{ + uint32 pmu_control; + uint32 pmu_capabilities; + uint32 pmu_status; + uint32 res_state; + uint32 res_pending; + uint32 pmu_timer1; + uint32 min_res_mask; + uint32 max_res_mask; + uint32 pmu_chipcontrol1[4]; + uint32 pmu_regcontrol[5]; + uint32 pmu_pllcontrol[5]; + uint32 pmu_rsrc_up_down_timer[31]; + uint32 rsrc_dep_mask[31]; +} pmu_reg_t; +/* endif SR_DEBUG */ + +typedef struct wl_taf_define { + struct ether_addr ea; /**< STA MAC or 0xFF... */ + uint16 version; /**< version */ + uint32 sch; /**< method index */ + uint32 prio; /**< priority */ + uint32 misc; /**< used for return value */ + uint8 text[]; /**< used to pass and return ascii text */ +} wl_taf_define_t; + +/** Received Beacons lengths information */ +#define WL_LAST_BCNS_INFO_FIXED_LEN OFFSETOF(wlc_bcn_len_hist_t, bcnlen_ring) +typedef struct wlc_bcn_len_hist { + uint16 ver; /**< version field */ + uint16 cur_index; /**< current pointed index in ring buffer */ + uint32 max_bcnlen; /**< Max beacon length received */ + uint32 min_bcnlen; /**< Min beacon length received */ + uint32 ringbuff_len; /**< Length of the ring buffer 'bcnlen_ring' */ + uint32 bcnlen_ring[1]; /**< ring buffer storing received beacon lengths */ +} wlc_bcn_len_hist_t; + +/* WDS net interface types */ +#define WL_WDSIFTYPE_NONE 0x0 /**< The interface type is neither WDS nor DWDS. */ +#define WL_WDSIFTYPE_WDS 0x1 /**< The interface is WDS type. */ +#define WL_WDSIFTYPE_DWDS 0x2 /**< The interface is DWDS type. */ + +typedef struct wl_bssload_static { + uint8 is_static; + uint8 PAD; + uint16 sta_count; + uint8 chan_util; + uint8 PAD; + uint16 aac; +} wl_bssload_static_t; + +/* Buffer of size WLC_SAMPLECOLLECT_MAXLEN (=10240 for 4345a0 ACPHY) + * gets copied to this, multiple times + */ +typedef enum wl_gpaio_option { + GPAIO_PMU_AFELDO, + GPAIO_PMU_TXLDO, + GPAIO_PMU_VCOLDO, + GPAIO_PMU_LNALDO, + GPAIO_PMU_ADCLDO, + GPAIO_ICTAT_CAL, + GPAIO_PMU_CLEAR, + GPAIO_OFF, + GPAIO_PMU_LOGENLDO, + GPAIO_PMU_RXLDO2G, + GPAIO_PMU_RXLDO5G, + GPAIO_PMU_LPFTXLDO, + GPAIO_PMU_LDO1P6, + GPAIO_RCAL, + GPAIO_IQDAC_BUF_DC_MEAS, + GPAIO_IQDAC_BUF_DC_CLEAR, + GPAIO_DAC_IQ_DC_RDBK, + GPAIO_DAC_IQ_DC_RDBK_CLEAR, + GPAIO_AFE_LDO_FOR_DAC_DC, + GPAIO_PA5G_VCAS_SOURCE +} wl_gpaio_option_t; + +/** IO Var Operations - the Value of iov_op In wlc_ap_doiovar */ +typedef enum wlc_ap_iov_bss_operation { + WLC_AP_IOV_OP_DELETE = -1, + WLC_AP_IOV_OP_DISABLE = 0, + WLC_AP_IOV_OP_ENABLE = 1, + WLC_AP_IOV_OP_MANUAL_AP_BSSCFG_CREATE = 2, + WLC_AP_IOV_OP_MANUAL_STA_BSSCFG_CREATE = 3, + WLC_AP_IOV_OP_MOVE = 4 +} wlc_ap_iov_bss_oper_t; + +/* LTE coex info */ +/* Analogue of HCI Set MWS Signaling cmd */ +typedef struct { + int16 mws_rx_assert_offset; + int16 mws_rx_assert_jitter; + int16 mws_rx_deassert_offset; + int16 mws_rx_deassert_jitter; + int16 mws_tx_assert_offset; + int16 mws_tx_assert_jitter; + int16 mws_tx_deassert_offset; + int16 mws_tx_deassert_jitter; + int16 mws_pattern_assert_offset; + int16 mws_pattern_assert_jitter; + int16 mws_inact_dur_assert_offset; + int16 mws_inact_dur_assert_jitter; + int16 mws_scan_freq_assert_offset; + int16 mws_scan_freq_assert_jitter; + int16 mws_prio_assert_offset_req; +} wci2_config_t; + +/** Analogue of HCI MWS Channel Params */ +typedef struct { + uint16 mws_rx_center_freq; /**< MHz */ + uint16 mws_tx_center_freq; + uint16 mws_rx_channel_bw; /**< KHz */ + uint16 mws_tx_channel_bw; + uint8 mws_channel_en; + uint8 mws_channel_type; /**< Don't care for WLAN? */ +} mws_params_t; + +#define LTECX_MAX_NUM_PERIOD_TYPES 7 + +/* LTE Frame params */ +typedef struct { + uint16 mws_frame_dur; + int16 mws_framesync_assert_offset; + uint16 mws_framesync_assert_jitter; + uint16 mws_period_dur[LTECX_MAX_NUM_PERIOD_TYPES]; + uint8 mws_period_type[LTECX_MAX_NUM_PERIOD_TYPES]; + uint8 mws_num_periods; +} mws_frame_config_t; + +/** MWS wci2 message */ +typedef struct { + uint8 mws_wci2_data; /**< BT-SIG msg */ + uint8 PAD; + uint16 mws_wci2_interval; /**< Interval in us */ + uint16 mws_wci2_repeat; /**< No of msgs to send */ +} mws_wci2_msg_t; +/* MWS ANT map */ +typedef struct { + uint16 combo1; /* mws ant selection 1 */ + uint16 combo2; /* mws ant selection 2 */ + uint16 combo3; /* mws ant selection 3 */ + uint16 combo4; /* mws ant selection 4 */ +} mws_ant_map_t; + +/* MWS ANT map 2nd generation */ +typedef struct { + uint16 combo[16]; /* mws ant selection 2nd */ +} mws_ant_map_t_2nd; + +/* MWS SCAN_REQ Bitmap */ +typedef struct mws_scanreq_params { + uint16 idx; + uint16 bm_2g; + uint16 bm_5g_lo; + uint16 bm_5g_mid; + uint16 bm_5g_hi; +} mws_scanreq_params_t; + +typedef struct { + uint32 config; /**< MODE: AUTO (-1), Disable (0), Enable (1) */ + uint32 status; /**< Current state: Disabled (0), Enabled (1) */ +} wl_config_t; + +#define WLC_RSDB_MODE_AUTO_MASK 0x80 +#define WLC_RSDB_EXTRACT_MODE(val) ((int8)((val) & (~(WLC_RSDB_MODE_AUTO_MASK)))) + +typedef struct { + uint16 request; /* type of sensor hub request */ + uint16 enable; /* enable/disable response for specified request */ + uint16 interval; /* interval between responses to the request */ +} shub_req_t; + +#define WL_IF_STATS_T_VERSION 1 /**< current version of wl_if_stats structure */ + +/** per interface counters */ +typedef struct wl_if_stats { + uint16 version; /**< version of the structure */ + uint16 length; /**< length of the entire structure */ + uint32 PAD; /**< padding */ + + /* transmit stat counters */ + uint64 txframe; /**< tx data frames */ + uint64 txbyte; /**< tx data bytes */ + uint64 txerror; /**< tx data errors (derived: sum of others) */ + uint64 txnobuf; /**< tx out of buffer errors */ + uint64 txrunt; /**< tx runt frames */ + uint64 txfail; /**< tx failed frames */ + uint64 txretry; /**< tx retry frames */ + uint64 txretrie; /**< tx multiple retry frames */ + uint64 txfrmsnt; /**< tx sent frames */ + uint64 txmulti; /**< tx mulitcast sent frames */ + uint64 txfrag; /**< tx fragments sent */ + + /* receive stat counters */ + uint64 rxframe; /**< rx data frames */ + uint64 rxbyte; /**< rx data bytes */ + uint64 rxerror; /**< rx data errors (derived: sum of others) */ + uint64 rxnobuf; /**< rx out of buffer errors */ + uint64 rxrunt; /**< rx runt frames */ + uint64 rxfragerr; /**< rx fragment errors */ + uint64 rxmulti; /**< rx multicast frames */ + + uint64 txexptime; /* DATA Tx frames suppressed due to timer expiration */ + uint64 txrts; /* RTS/CTS succeeeded count */ + uint64 txnocts; /* RTS/CTS faled count */ + + uint64 txretrans; /* Number of frame retransmissions */ +} +wl_if_stats_t; + +typedef struct wl_band { + uint16 bandtype; /**< WL_BAND_2G, WL_BAND_5G */ + uint16 bandunit; /**< bandstate[] index */ + uint16 phytype; /**< phytype */ + uint16 phyrev; +} +wl_band_t; + +#define WL_WLC_VERSION_T_VERSION 1 /**< current version of wlc_version structure */ + +/** wlc interface version */ +typedef struct wl_wlc_version { + uint16 version; /**< version of the structure */ + uint16 length; /**< length of the entire structure */ + + /* epi version numbers */ + uint16 epi_ver_major; /**< epi major version number */ + uint16 epi_ver_minor; /**< epi minor version number */ + uint16 epi_rc_num; /**< epi RC number */ + uint16 epi_incr_num; /**< epi increment number */ + + /* wlc interface version numbers */ + uint16 wlc_ver_major; /**< wlc interface major version number */ + uint16 wlc_ver_minor; /**< wlc interface minor version number */ +} +wl_wlc_version_t; + +/* Highest version of WLC_API_VERSION supported */ +#define WLC_API_VERSION_MAJOR_MAX 8 +#define WLC_API_VERSION_MINOR_MAX 0 + +/* begin proxd definitions */ +#include + +#define WL_PROXD_API_VERSION 0x0300 /**< version 3.0 */ + +/** Minimum supported API version */ +#define WL_PROXD_API_MIN_VERSION 0x0300 + +/** proximity detection methods */ +enum { + WL_PROXD_METHOD_NONE = 0, + WL_PROXD_METHOD_RSVD1 = 1, /**< backward compatibility - RSSI, not supported */ + WL_PROXD_METHOD_TOF = 2, + WL_PROXD_METHOD_RSVD2 = 3, /**< 11v only - if needed */ + WL_PROXD_METHOD_FTM = 4, /**< IEEE rev mc/2014 */ + WL_PROXD_METHOD_MAX +}; +typedef int16 wl_proxd_method_t; + +/** global and method configuration flags */ +enum { + WL_PROXD_FLAG_NONE = 0x00000000, + WL_PROXD_FLAG_RX_ENABLED = 0x00000001, /**< respond to requests, per bss */ + WL_PROXD_FLAG_RX_RANGE_REQ = 0x00000002, /**< 11mc range requests enabled */ + WL_PROXD_FLAG_TX_LCI = 0x00000004, /**< tx lci, if known */ + WL_PROXD_FLAG_TX_CIVIC = 0x00000008, /**< tx civic, if known */ + WL_PROXD_FLAG_RX_AUTO_BURST = 0x00000010, /**< auto respond w/o host action */ + WL_PROXD_FLAG_TX_AUTO_BURST = 0x00000020, /**< continue tx w/o host action */ + WL_PROXD_FLAG_AVAIL_PUBLISH = 0x00000040, /**< publish availability */ + WL_PROXD_FLAG_AVAIL_SCHEDULE = 0x00000080, /**< schedule using availability */ + WL_PROXD_FLAG_ASAP_CAPABLE = 0x00000100, /* ASAP capable */ + WL_PROXD_FLAG_MBURST_FOLLOWUP = 0x00000200, /* new multi-burst algorithm */ + WL_PROXD_FLAG_SECURE = 0x00000400, /* per bsscfg option */ + WL_PROXD_FLAG_NO_TSF_SYNC = 0x00000800, /* disable tsf sync */ + WL_PROXD_FLAG_ALL = 0xffffffff +}; +typedef uint32 wl_proxd_flags_t; + +#define WL_PROXD_FLAGS_AVAIL (WL_PROXD_FLAG_AVAIL_PUBLISH | \ + WL_PROXD_FLAG_AVAIL_SCHEDULE) + +/** session flags */ +enum { + WL_PROXD_SESSION_FLAG_NONE = 0x00000000, /**< no flags */ + WL_PROXD_SESSION_FLAG_INITIATOR = 0x00000001, /**< local device is initiator */ + WL_PROXD_SESSION_FLAG_TARGET = 0x00000002, /**< local device is target */ + WL_PROXD_SESSION_FLAG_ONE_WAY = 0x00000004, /**< (initiated) 1-way rtt */ + WL_PROXD_SESSION_FLAG_AUTO_BURST = 0x00000008, /**< created w/ rx_auto_burst */ + WL_PROXD_SESSION_FLAG_PERSIST = 0x00000010, /**< good until cancelled */ + WL_PROXD_SESSION_FLAG_RTT_DETAIL = 0x00000020, /**< rtt detail in results */ + WL_PROXD_SESSION_FLAG_SECURE = 0x00000040, /**< sessionis secure */ + WL_PROXD_SESSION_FLAG_AOA = 0x00000080, /**< AOA along w/ RTT */ + WL_PROXD_SESSION_FLAG_RX_AUTO_BURST = 0x00000100, /**< Same as proxd flags above */ + WL_PROXD_SESSION_FLAG_TX_AUTO_BURST = 0x00000200, /**< Same as proxd flags above */ + WL_PROXD_SESSION_FLAG_NAN_BSS = 0x00000400, /**< Use NAN BSS, if applicable */ + WL_PROXD_SESSION_FLAG_TS1 = 0x00000800, /**< e.g. FTM1 - ASAP-capable */ + WL_PROXD_SESSION_FLAG_REPORT_FAILURE = 0x00002000, /**< report failure to target */ + WL_PROXD_SESSION_FLAG_INITIATOR_RPT = 0x00004000, /**< report distance to target */ + WL_PROXD_SESSION_FLAG_NOCHANSWT = 0x00008000, + WL_PROXD_SESSION_FLAG_NETRUAL = 0x00010000, /**< netrual mode */ + WL_PROXD_SESSION_FLAG_SEQ_EN = 0x00020000, /**< Toast */ + WL_PROXD_SESSION_FLAG_NO_PARAM_OVRD = 0x00040000, /**< no param override from target */ + WL_PROXD_SESSION_FLAG_ASAP = 0x00080000, /**< ASAP session */ + WL_PROXD_SESSION_FLAG_REQ_LCI = 0x00100000, /**< transmit LCI req */ + WL_PROXD_SESSION_FLAG_REQ_CIV = 0x00200000, /**< transmit civic loc req */ + WL_PROXD_SESSION_FLAG_PRE_SCAN = 0x00400000, /* enable pre-scan for asap=1 */ + WL_PROXD_SESSION_FLAG_AUTO_VHTACK = 0x00800000, /* use vhtack based on brcm ie */ + WL_PROXD_SESSION_FLAG_VHTACK = 0x01000000, /* vht ack is in use - output only */ + WL_PROXD_SESSION_FLAG_BDUR_NOPREF = 0x02000000, /* burst-duration: no preference */ + WL_PROXD_SESSION_FLAG_NUM_FTM_NOPREF = 0x04000000, /* num of FTM frames: no preference */ + WL_PROXD_SESSION_FLAG_FTM_SEP_NOPREF = 0x08000000, /* time btw FTM frams: no pref */ + WL_PROXD_SESSION_FLAG_NUM_BURST_NOPREF = 0x10000000, /* num of bursts: no pref */ + WL_PROXD_SESSION_FLAG_BURST_PERIOD_NOPREF = 0x20000000, /* burst period: no pref */ + WL_PROXD_SESSION_FLAG_MBURST_FOLLOWUP = 0x40000000, /* new mburst algo - reserved */ + WL_PROXD_SESSION_FLAG_MBURST_NODELAY = 0x80000000, /**< good until cancelled */ + WL_PROXD_SESSION_FLAG_ALL = 0xffffffff + +}; +typedef uint32 wl_proxd_session_flags_t; + +/** time units - mc supports up to 0.1ns resolution */ +enum { + WL_PROXD_TMU_TU = 0, /**< 1024us */ + WL_PROXD_TMU_SEC = 1, + WL_PROXD_TMU_MILLI_SEC = 2, + WL_PROXD_TMU_MICRO_SEC = 3, + WL_PROXD_TMU_NANO_SEC = 4, + WL_PROXD_TMU_PICO_SEC = 5 +}; +typedef int16 wl_proxd_tmu_t; + +/** time interval e.g. 10ns */ +typedef struct wl_proxd_intvl { + uint32 intvl; + wl_proxd_tmu_t tmu; + uint8 pad[2]; +} wl_proxd_intvl_t; + +/** commands that can apply to proxd, method or a session */ +enum { + WL_PROXD_CMD_NONE = 0, + WL_PROXD_CMD_GET_VERSION = 1, + WL_PROXD_CMD_ENABLE = 2, + WL_PROXD_CMD_DISABLE = 3, + WL_PROXD_CMD_CONFIG = 4, + WL_PROXD_CMD_START_SESSION = 5, + WL_PROXD_CMD_BURST_REQUEST = 6, + WL_PROXD_CMD_STOP_SESSION = 7, + WL_PROXD_CMD_DELETE_SESSION = 8, + WL_PROXD_CMD_GET_RESULT = 9, + WL_PROXD_CMD_GET_INFO = 10, + WL_PROXD_CMD_GET_STATUS = 11, + WL_PROXD_CMD_GET_SESSIONS = 12, + WL_PROXD_CMD_GET_COUNTERS = 13, + WL_PROXD_CMD_CLEAR_COUNTERS = 14, + WL_PROXD_CMD_COLLECT = 15, /* not supported, see 'wl proxd_collect' */ + WL_PROXD_CMD_TUNE = 16, /* not supported, see 'wl proxd_tune' */ + WL_PROXD_CMD_DUMP = 17, + WL_PROXD_CMD_START_RANGING = 18, + WL_PROXD_CMD_STOP_RANGING = 19, + WL_PROXD_CMD_GET_RANGING_INFO = 20, + WL_PROXD_CMD_IS_TLV_SUPPORTED = 21, + + WL_PROXD_CMD_MAX +}; +typedef int16 wl_proxd_cmd_t; + +/* session ids: + * id 0 is reserved + * ids 1..0x7fff - allocated by host/app + * 0x8000-0xffff - allocated by firmware, used for auto/rx + */ +enum { + WL_PROXD_SESSION_ID_GLOBAL = 0 +}; + +/* Externally allocated sids */ +#define WL_PROXD_SID_EXT_MAX 0x7fff +#define WL_PROXD_SID_EXT_ALLOC(_sid) ((_sid) > 0 && (_sid) <= WL_PROXD_SID_EXT_MAX) + +/* block size for reserved sid blocks */ +#define WL_PROXD_SID_EXT_BLKSZ 256 +#define WL_PROXD_SID_EXT_BLK_START(_i) (WL_PROXD_SID_EXT_MAX - (_i) * WL_PROXD_SID_EXT_BLKSZ + 1) +#define WL_PROXD_SID_EXT_BLK_END(_start) ((_start) + WL_PROXD_SID_EXT_BLKSZ - 1) + +/* rrm block */ +#define WL_PROXD_SID_RRM_START WL_PROXD_SID_EXT_BLK_START(1) +#define WL_PROXD_SID_RRM_END WL_PROXD_SID_EXT_BLK_END(WL_PROXD_SID_RRM_START) + +/* nan block */ +#define WL_PROXD_SID_NAN_START WL_PROXD_SID_EXT_BLK_START(2) +#define WL_PROXD_SID_NAN_END WL_PROXD_SID_EXT_BLK_END(WL_PROXD_SID_NAN_START) + +/** maximum number sessions that can be allocated, may be less if tunable */ +#define WL_PROXD_MAX_SESSIONS 16 + +typedef uint16 wl_proxd_session_id_t; + +/** status - TBD BCME_ vs proxd status - range reserved for BCME_ */ +enum { + WL_PROXD_E_LAST = -1056, + WL_PROXD_E_NOAVAIL = -1056, + WL_PROXD_E_EXT_SCHED = -1055, + WL_PROXD_E_NOT_BCM = -1054, + WL_PROXD_E_FRAME_TYPE = -1053, + WL_PROXD_E_VERNOSUPPORT = -1052, + WL_PROXD_E_SEC_NOKEY = -1051, + WL_PROXD_E_SEC_POLICY = -1050, + WL_PROXD_E_SCAN_INPROCESS = -1049, + WL_PROXD_E_BAD_PARTIAL_TSF = -1048, + WL_PROXD_E_SCANFAIL = -1047, + WL_PROXD_E_NOTSF = -1046, + WL_PROXD_E_POLICY = -1045, + WL_PROXD_E_INCOMPLETE = -1044, + WL_PROXD_E_OVERRIDDEN = -1043, + WL_PROXD_E_ASAP_FAILED = -1042, + WL_PROXD_E_NOTSTARTED = -1041, + WL_PROXD_E_INVALIDMEAS = -1040, + WL_PROXD_E_INCAPABLE = -1039, + WL_PROXD_E_MISMATCH = -1038, + WL_PROXD_E_DUP_SESSION = -1037, + WL_PROXD_E_REMOTE_FAIL = -1036, + WL_PROXD_E_REMOTE_INCAPABLE = -1035, + WL_PROXD_E_SCHED_FAIL = -1034, + WL_PROXD_E_PROTO = -1033, + WL_PROXD_E_EXPIRED = -1032, + WL_PROXD_E_TIMEOUT = -1031, + WL_PROXD_E_NOACK = -1030, + WL_PROXD_E_DEFERRED = -1029, + WL_PROXD_E_INVALID_SID = -1028, + WL_PROXD_E_REMOTE_CANCEL = -1027, + WL_PROXD_E_CANCELED = -1026, /**< local */ + WL_PROXD_E_INVALID_SESSION = -1025, + WL_PROXD_E_BAD_STATE = -1024, + WL_PROXD_E_START = -1024, + WL_PROXD_E_ERROR = -1, + WL_PROXD_E_OK = 0 +}; +typedef int32 wl_proxd_status_t; + +/* proxd errors from phy */ +#define PROXD_TOF_INIT_ERR_BITS 16 + +enum { + WL_PROXD_PHY_ERR_LB_CORR_THRESH = (1 << 0), /* Loopback Correlation threshold */ + WL_PROXD_PHY_ERR_RX_CORR_THRESH = (1 << 1), /* Received Correlation threshold */ + WL_PROXD_PHY_ERR_LB_PEAK_POWER = (1 << 2), /* Loopback Peak power */ + WL_PROXD_PHY_ERR_RX_PEAK_POWER = (1 << 3), /* Received Peak power */ + WL_PROXD_PHY_ERR_BITFLIP = (1 << 4), /* Bitflips */ + WL_PROXD_PHY_ERR_SNR = (1 << 5), /* SNR */ + WL_PROXD_PHY_RX_STRT_WIN_OFF = (1 << 6), /* Receive start window is off */ + WL_PROXD_PHY_RX_END_WIN_OFF = (1 << 7), /* Receive End window is off */ + WL_PROXD_PHY_ERR_LOW_CONFIDENCE = (1 << 15), /* Low confidence on meas distance */ +}; +typedef uint32 wl_proxd_phy_error_t; + +/** session states */ +enum { + WL_PROXD_SESSION_STATE_NONE = 0, + WL_PROXD_SESSION_STATE_CREATED = 1, + WL_PROXD_SESSION_STATE_CONFIGURED = 2, + WL_PROXD_SESSION_STATE_STARTED = 3, + WL_PROXD_SESSION_STATE_DELAY = 4, + WL_PROXD_SESSION_STATE_USER_WAIT = 5, + WL_PROXD_SESSION_STATE_SCHED_WAIT = 6, + WL_PROXD_SESSION_STATE_BURST = 7, + WL_PROXD_SESSION_STATE_STOPPING = 8, + WL_PROXD_SESSION_STATE_ENDED = 9, + WL_PROXD_SESSION_STATE_START_WAIT = 10, + WL_PROXD_SESSION_STATE_DESTROYING = -1 +}; +typedef int16 wl_proxd_session_state_t; + +/** RTT sample flags */ +enum { + WL_PROXD_RTT_SAMPLE_NONE = 0x00, + WL_PROXD_RTT_SAMPLE_DISCARD = 0x01 +}; +typedef uint8 wl_proxd_rtt_sample_flags_t; +typedef int16 wl_proxd_rssi_t; +typedef uint16 wl_proxd_snr_t; +typedef uint16 wl_proxd_bitflips_t; + +/** result flags */ +enum { + WL_PRXOD_RESULT_FLAG_NONE = 0x0000, + WL_PROXD_RESULT_FLAG_NLOS = 0x0001, /**< LOS - if available */ + WL_PROXD_RESULT_FLAG_LOS = 0x0002, /**< NLOS - if available */ + WL_PROXD_RESULT_FLAG_FATAL = 0x0004, /**< Fatal error during burst */ + WL_PROXD_RESULT_FLAG_VHTACK = 0x0008, /* VHTACK or Legacy ACK used */ + WL_PROXD_REQUEST_SENT = 0x0010, /* FTM request was sent */ + WL_PROXD_REQUEST_ACKED = 0x0020, /* FTM request was acked */ + WL_PROXD_LTFSEQ_STARTED = 0x0040, /* LTF sequence started */ + WL_PROXD_RESULT_FLAG_ALL = 0xffff +}; +typedef int16 wl_proxd_result_flags_t; + +#define WL_PROXD_RTT_SAMPLE_VERSION_1 1 +typedef struct wl_proxd_rtt_sample_v1 { + uint8 id; /**< id for the sample - non-zero */ + wl_proxd_rtt_sample_flags_t flags; + wl_proxd_rssi_t rssi; + wl_proxd_intvl_t rtt; /**< round trip time */ + uint32 ratespec; + wl_proxd_snr_t snr; + wl_proxd_bitflips_t bitflips; + wl_proxd_status_t status; + int32 distance; + wl_proxd_phy_error_t tof_phy_error; + wl_proxd_phy_error_t tof_tgt_phy_error; /* target phy error bit map */ + wl_proxd_snr_t tof_tgt_snr; + wl_proxd_bitflips_t tof_tgt_bitflips; + uint8 coreid; + uint8 pad[3]; +} wl_proxd_rtt_sample_v1_t; + +#define WL_PROXD_RTT_RESULT_VERSION_1 1 +/** rtt measurement result */ +typedef struct wl_proxd_rtt_result_v1 { + wl_proxd_session_id_t sid; + wl_proxd_result_flags_t flags; + wl_proxd_status_t status; + struct ether_addr peer; + wl_proxd_session_state_t state; /**< current state */ + union { + wl_proxd_intvl_t retry_after; /* hint for errors */ + wl_proxd_intvl_t burst_duration; /* burst duration */ + } u; + wl_proxd_rtt_sample_v1_t avg_rtt; + uint32 avg_dist; /* 1/256m units */ + uint16 sd_rtt; /* RTT standard deviation */ + uint8 num_valid_rtt; /* valid rtt cnt */ + uint8 num_ftm; /* actual num of ftm cnt (Configured) */ + uint16 burst_num; /* in a session */ + uint16 num_rtt; /* 0 if no detail */ + uint16 num_meas; /* number of ftm frames seen OTA */ + uint8 pad[2]; + wl_proxd_rtt_sample_v1_t rtt[1]; /* variable */ +} wl_proxd_rtt_result_v1_t; + +#define WL_PROXD_RTT_SAMPLE_VERSION_2 2 +typedef struct wl_proxd_rtt_sample_v2 { + uint16 version; + uint16 length; + uint8 id; /**< id for the sample - non-zero */ + wl_proxd_rtt_sample_flags_t flags; + wl_proxd_rssi_t rssi; + wl_proxd_intvl_t rtt; /**< round trip time */ + uint32 ratespec; + wl_proxd_snr_t snr; + wl_proxd_bitflips_t bitflips; + wl_proxd_status_t status; + int32 distance; + wl_proxd_phy_error_t tof_phy_error; + wl_proxd_phy_error_t tof_tgt_phy_error; /* target phy error bit map */ + wl_proxd_snr_t tof_tgt_snr; + wl_proxd_bitflips_t tof_tgt_bitflips; + uint8 coreid; + uint8 pad[3]; + uint32 chanspec; +} wl_proxd_rtt_sample_v2_t; + +#define WL_PROXD_RTT_RESULT_VERSION_2 2 +/** rtt measurement result */ +typedef struct wl_proxd_rtt_result_v2 { + uint16 version; + uint16 length; /* up to rtt[] */ + wl_proxd_session_id_t sid; + wl_proxd_result_flags_t flags; + wl_proxd_status_t status; + struct ether_addr peer; + wl_proxd_session_state_t state; /**< current state */ + union { + wl_proxd_intvl_t retry_after; /* hint for errors */ + wl_proxd_intvl_t burst_duration; /* burst duration */ + } u; + uint32 avg_dist; /* 1/256m units */ + uint16 sd_rtt; /* RTT standard deviation */ + uint8 num_valid_rtt; /* valid rtt cnt */ + uint8 num_ftm; /* actual num of ftm cnt (Configured) */ + uint16 burst_num; /* in a session */ + uint16 num_rtt; /* 0 if no detail */ + uint16 num_meas; /* number of ftm frames seen OTA */ + uint8 pad[2]; + wl_proxd_rtt_sample_v2_t rtt[1]; /* variable, first element is avg_rtt */ +} wl_proxd_rtt_result_v2_t; + +/** aoa measurement result */ +typedef struct wl_proxd_aoa_result { + wl_proxd_session_id_t sid; + wl_proxd_result_flags_t flags; + wl_proxd_status_t status; + struct ether_addr peer; + wl_proxd_session_state_t state; + uint16 burst_num; + uint8 pad[2]; + /* wl_proxd_aoa_sample_t sample_avg; TBD */ +} BWL_POST_PACKED_STRUCT wl_proxd_aoa_result_t; +#include + +/** global stats */ +typedef struct wl_proxd_counters { + uint32 tx; /* tx frame count */ + uint32 rx; /* rx frame count */ + uint32 burst; /* total number of burst */ + uint32 sessions; /* total number of sessions */ + uint32 max_sessions; /* max concurrency */ + uint32 sched_fail; /* scheduling failures */ + uint32 timeouts; /* timeouts */ + uint32 protoerr; /* protocol errors */ + uint32 noack; /* tx w/o ack */ + uint32 txfail; /* any tx falure */ + uint32 lci_req_tx; /* tx LCI requests */ + uint32 lci_req_rx; /* rx LCI requests */ + uint32 lci_rep_tx; /* tx LCI reports */ + uint32 lci_rep_rx; /* rx LCI reports */ + uint32 civic_req_tx; /* tx civic requests */ + uint32 civic_req_rx; /* rx civic requests */ + uint32 civic_rep_tx; /* tx civic reports */ + uint32 civic_rep_rx; /* rx civic reports */ + uint32 rctx; /* ranging contexts created */ + uint32 rctx_done; /* count of ranging done */ + uint32 publish_err; /* availability publishing errors */ + uint32 on_chan; /* count of scheduler onchan */ + uint32 off_chan; /* count of scheduler offchan */ + uint32 tsf_lo; /* local tsf or session tsf */ + uint32 tsf_hi; + uint32 num_meas; +} wl_proxd_counters_t; + +typedef struct wl_proxd_counters wl_proxd_session_counters_t; + +enum { + WL_PROXD_CAP_NONE = 0x0000, + WL_PROXD_CAP_ALL = 0xffff +}; +typedef int16 wl_proxd_caps_t; + +/** method capabilities */ +enum { + WL_PROXD_FTM_CAP_NONE = 0x0000, + WL_PROXD_FTM_CAP_FTM1 = 0x0001 +}; +typedef uint16 wl_proxd_ftm_caps_t; + +typedef struct wl_proxd_tlv_id_list { + uint16 num_ids; + uint16 ids[1]; +} wl_proxd_tlv_id_list_t; + +typedef struct wl_proxd_session_id_list { + uint16 num_ids; + wl_proxd_session_id_t ids[1]; +} wl_proxd_session_id_list_t; + +typedef struct wl_proxd_tpk { + struct ether_addr peer; + uint8 tpk[TPK_FTM_LEN]; +} wl_proxd_tpk_t; + +/* tlvs returned for get_info on ftm method + * configuration: + * proxd flags + * event mask + * debug mask + * session defaults (session tlvs) + * status tlv - not supported for ftm method + * info tlv + */ +typedef struct wl_proxd_ftm_info { + wl_proxd_ftm_caps_t caps; + uint16 max_sessions; + uint16 num_sessions; + uint16 rx_max_burst; +} wl_proxd_ftm_info_t; + +enum { + WL_PROXD_WAIT_NONE = 0x0000, + WL_PROXD_WAIT_KEY = 0x0001, + WL_PROXD_WAIT_SCHED = 0x0002, + WL_PROXD_WAIT_TSF = 0x0004 +}; +typedef int16 wl_proxd_wait_reason_t; + +/* tlvs returned for get_info on session + * session config (tlvs) + * session info tlv + */ +typedef struct wl_proxd_ftm_session_info { + uint16 sid; + uint8 bss_index; + uint8 pad; + struct ether_addr bssid; + wl_proxd_session_state_t state; + wl_proxd_status_t status; + uint16 burst_num; + wl_proxd_wait_reason_t wait_reason; + uint32 meas_start_lo; /* sn tsf of 1st meas for cur/prev burst */ + uint32 meas_start_hi; +} wl_proxd_ftm_session_info_t; + +typedef struct wl_proxd_ftm_session_status { + uint16 sid; + wl_proxd_session_state_t state; + wl_proxd_status_t status; + uint16 burst_num; + uint16 pad; +} wl_proxd_ftm_session_status_t; + +/** rrm range request */ +typedef struct wl_proxd_range_req { + uint16 num_repeat; + uint16 init_delay_range; /**< in TUs */ + uint8 pad; + uint8 num_nbr; /**< number of (possible) neighbors */ + nbr_element_t nbr[1]; +} wl_proxd_range_req_t; + +#define WL_PROXD_LCI_LAT_OFF 0 +#define WL_PROXD_LCI_LONG_OFF 5 +#define WL_PROXD_LCI_ALT_OFF 10 + +#define WL_PROXD_LCI_GET_LAT(_lci, _lat, _lat_err) { \ + unsigned _off = WL_PROXD_LCI_LAT_OFF; \ + _lat_err = (_lci)->data[(_off)] & 0x3f; \ + _lat = (_lci)->data[(_off)+1]; \ + _lat |= (_lci)->data[(_off)+2] << 8; \ + _lat |= (_lci)->data[_(_off)+3] << 16; \ + _lat |= (_lci)->data[(_off)+4] << 24; \ + _lat <<= 2; \ + _lat |= (_lci)->data[(_off)] >> 6; \ +} + +#define WL_PROXD_LCI_GET_LONG(_lci, _lcilong, _long_err) { \ + unsigned _off = WL_PROXD_LCI_LONG_OFF; \ + _long_err = (_lci)->data[(_off)] & 0x3f; \ + _lcilong = (_lci)->data[(_off)+1]; \ + _lcilong |= (_lci)->data[(_off)+2] << 8; \ + _lcilong |= (_lci)->data[_(_off)+3] << 16; \ + _lcilong |= (_lci)->data[(_off)+4] << 24; \ + __lcilong <<= 2; \ + _lcilong |= (_lci)->data[(_off)] >> 6; \ +} + +#define WL_PROXD_LCI_GET_ALT(_lci, _alt_type, _alt, _alt_err) { \ + unsigned _off = WL_PROXD_LCI_ALT_OFF; \ + _alt_type = (_lci)->data[_off] & 0x0f; \ + _alt_err = (_lci)->data[(_off)] >> 4; \ + _alt_err |= ((_lci)->data[(_off)+1] & 0x03) << 4; \ + _alt = (_lci)->data[(_off)+2]; \ + _alt |= (_lci)->data[(_off)+3] << 8; \ + _alt |= (_lci)->data[_(_off)+4] << 16; \ + _alt <<= 6; \ + _alt |= (_lci)->data[(_off) + 1] >> 2; \ +} + +#define WL_PROXD_LCI_VERSION(_lci) ((_lci)->data[15] >> 6) + +/* availability. advertising mechanism bss specific */ +/** availablity flags */ +enum { + WL_PROXD_AVAIL_NONE = 0, + WL_PROXD_AVAIL_NAN_PUBLISHED = 0x0001, + WL_PROXD_AVAIL_SCHEDULED = 0x0002 /**< scheduled by proxd */ +}; +typedef int16 wl_proxd_avail_flags_t; + +/** time reference */ +enum { + WL_PROXD_TREF_NONE = 0, + WL_PROXD_TREF_DEV_TSF = 1, + WL_PROXD_TREF_NAN_DW = 2, + WL_PROXD_TREF_TBTT = 3, + WL_PROXD_TREF_MAX /* last entry */ +}; +typedef int16 wl_proxd_time_ref_t; + +/** proxd channel-time slot */ +typedef struct { + wl_proxd_intvl_t start; /**< from ref */ + wl_proxd_intvl_t duration; /**< from start */ + uint32 chanspec; +} wl_proxd_time_slot_t; + +typedef struct wl_proxd_avail24 { + wl_proxd_avail_flags_t flags; /**< for query only */ + wl_proxd_time_ref_t time_ref; + uint16 max_slots; /**< for query only */ + uint16 num_slots; + wl_proxd_time_slot_t slots[1]; /**< ROM compat - not used */ + wl_proxd_intvl_t repeat; + wl_proxd_time_slot_t ts0[1]; +} wl_proxd_avail24_t; +#define WL_PROXD_AVAIL24_TIMESLOT(_avail24, _i) (&(_avail24)->ts0[(_i)]) +#define WL_PROXD_AVAIL24_TIMESLOT_OFFSET(_avail24) OFFSETOF(wl_proxd_avail24_t, ts0) +#define WL_PROXD_AVAIL24_TIMESLOTS(_avail24) WL_PROXD_AVAIL24_TIMESLOT(_avail24, 0) +#define WL_PROXD_AVAIL24_SIZE(_avail24, _num_slots) (\ + WL_PROXD_AVAIL24_TIMESLOT_OFFSET(_avail24) + \ + (_num_slots) * sizeof(*WL_PROXD_AVAIL24_TIMESLOT(_avail24, 0))) + +typedef struct wl_proxd_avail { + wl_proxd_avail_flags_t flags; /**< for query only */ + wl_proxd_time_ref_t time_ref; + uint16 max_slots; /**< for query only */ + uint16 num_slots; + wl_proxd_intvl_t repeat; + wl_proxd_time_slot_t slots[1]; +} wl_proxd_avail_t; +#define WL_PROXD_AVAIL_TIMESLOT(_avail, _i) (&(_avail)->slots[(_i)]) +#define WL_PROXD_AVAIL_TIMESLOT_OFFSET(_avail) OFFSETOF(wl_proxd_avail_t, slots) + +#define WL_PROXD_AVAIL_TIMESLOTS(_avail) WL_PROXD_AVAIL_TIMESLOT(_avail, 0) +#define WL_PROXD_AVAIL_SIZE(_avail, _num_slots) (\ + WL_PROXD_AVAIL_TIMESLOT_OFFSET(_avail) + \ + (_num_slots) * sizeof(*WL_PROXD_AVAIL_TIMESLOT(_avail, 0))) + +/* collect support TBD */ + +/** debugging */ +enum { + WL_PROXD_DEBUG_NONE = 0x00000000, + WL_PROXD_DEBUG_LOG = 0x00000001, + WL_PROXD_DEBUG_IOV = 0x00000002, + WL_PROXD_DEBUG_EVENT = 0x00000004, + WL_PROXD_DEBUG_SESSION = 0x00000008, + WL_PROXD_DEBUG_PROTO = 0x00000010, + WL_PROXD_DEBUG_SCHED = 0x00000020, + WL_PROXD_DEBUG_RANGING = 0x00000040, + WL_PROXD_DEBUG_NAN = 0x00000080, + WL_PROXD_DEBUG_PKT = 0x00000100, + WL_PROXD_DEBUG_SEC = 0x00000200, + WL_PROXD_DEBUG_EVENTLOG = 0x80000000, /* map/enable EVNET_LOG_TAG_PROXD_INFO */ + WL_PROXD_DEBUG_ALL = 0xffffffff +}; +typedef uint32 wl_proxd_debug_mask_t; + +/** tlv IDs - data length 4 bytes unless overridden by type, alignment 32 bits */ +enum { + WL_PROXD_TLV_ID_NONE = 0, + WL_PROXD_TLV_ID_METHOD = 1, + WL_PROXD_TLV_ID_FLAGS = 2, + WL_PROXD_TLV_ID_CHANSPEC = 3, /**< note: uint32 */ + WL_PROXD_TLV_ID_TX_POWER = 4, + WL_PROXD_TLV_ID_RATESPEC = 5, + WL_PROXD_TLV_ID_BURST_DURATION = 6, /**< intvl - length of burst */ + WL_PROXD_TLV_ID_BURST_PERIOD = 7, /**< intvl - between bursts */ + WL_PROXD_TLV_ID_BURST_FTM_SEP = 8, /**< intvl - between FTMs */ + WL_PROXD_TLV_ID_BURST_NUM_FTM = 9, /**< uint16 - per burst */ + WL_PROXD_TLV_ID_NUM_BURST = 10, /**< uint16 */ + WL_PROXD_TLV_ID_FTM_RETRIES = 11, /**< uint16 at FTM level */ + WL_PROXD_TLV_ID_BSS_INDEX = 12, /**< uint8 */ + WL_PROXD_TLV_ID_BSSID = 13, + WL_PROXD_TLV_ID_INIT_DELAY = 14, /**< intvl - optional,non-standalone only */ + WL_PROXD_TLV_ID_BURST_TIMEOUT = 15, /**< expect response within - intvl */ + WL_PROXD_TLV_ID_EVENT_MASK = 16, /**< interested events - in/out */ + WL_PROXD_TLV_ID_FLAGS_MASK = 17, /**< interested flags - in only */ + WL_PROXD_TLV_ID_PEER_MAC = 18, /**< mac address of peer */ + WL_PROXD_TLV_ID_FTM_REQ = 19, /**< dot11_ftm_req */ + WL_PROXD_TLV_ID_LCI_REQ = 20, + WL_PROXD_TLV_ID_LCI = 21, + WL_PROXD_TLV_ID_CIVIC_REQ = 22, + WL_PROXD_TLV_ID_CIVIC = 23, + WL_PROXD_TLV_ID_AVAIL24 = 24, /**< ROM compatibility */ + WL_PROXD_TLV_ID_SESSION_FLAGS = 25, + WL_PROXD_TLV_ID_SESSION_FLAGS_MASK = 26, /**< in only */ + WL_PROXD_TLV_ID_RX_MAX_BURST = 27, /**< uint16 - limit bursts per session */ + WL_PROXD_TLV_ID_RANGING_INFO = 28, /**< ranging info */ + WL_PROXD_TLV_ID_RANGING_FLAGS = 29, /**< uint16 */ + WL_PROXD_TLV_ID_RANGING_FLAGS_MASK = 30, /**< uint16, in only */ + WL_PROXD_TLV_ID_NAN_MAP_ID = 31, + WL_PROXD_TLV_ID_DEV_ADDR = 32, + WL_PROXD_TLV_ID_AVAIL = 33, /**< wl_proxd_avail_t */ + WL_PROXD_TLV_ID_TLV_ID = 34, /* uint16 tlv-id */ + WL_PROXD_TLV_ID_FTM_REQ_RETRIES = 35, /* uint16 FTM request retries */ + WL_PROXD_TLV_ID_TPK = 36, /* 32byte TPK */ + WL_PROXD_TLV_ID_RI_RR = 36, /* RI_RR */ + WL_PROXD_TLV_ID_TUNE = 37, /* wl_proxd_pararms_tof_tune_t */ + WL_PROXD_TLV_ID_CUR_ETHER_ADDR = 38, /* Source Address used for Tx */ + + /* output - 512 + x */ + WL_PROXD_TLV_ID_STATUS = 512, + WL_PROXD_TLV_ID_COUNTERS = 513, + WL_PROXD_TLV_ID_INFO = 514, + WL_PROXD_TLV_ID_RTT_RESULT = 515, + WL_PROXD_TLV_ID_AOA_RESULT = 516, + WL_PROXD_TLV_ID_SESSION_INFO = 517, + WL_PROXD_TLV_ID_SESSION_STATUS = 518, + WL_PROXD_TLV_ID_SESSION_ID_LIST = 519, + WL_PROXD_TLV_ID_RTT_RESULT_V2 = 520, + + /* debug tlvs can be added starting 1024 */ + WL_PROXD_TLV_ID_DEBUG_MASK = 1024, + WL_PROXD_TLV_ID_COLLECT = 1025, /**< output only */ + WL_PROXD_TLV_ID_STRBUF = 1026, + + WL_PROXD_TLV_ID_COLLECT_HEADER = 1025, /* wl_proxd_collect_header_t */ + WL_PROXD_TLV_ID_COLLECT_INFO = 1028, /* wl_proxd_collect_info_t */ + WL_PROXD_TLV_ID_COLLECT_DATA = 1029, /* wl_proxd_collect_data_t */ + WL_PROXD_TLV_ID_COLLECT_CHAN_DATA = 1030, /* wl_proxd_collect_data_t */ + WL_PROXD_TLV_ID_MF_STATS_DATA = 1031, /* mf_stats_buffer */ + + WL_PROXD_TLV_ID_MAX +}; + +typedef struct wl_proxd_tlv { + uint16 id; + uint16 len; + uint8 data[1]; +} wl_proxd_tlv_t; + +/** proxd iovar - applies to proxd, method or session */ +typedef struct wl_proxd_iov { + uint16 version; + uint16 len; + wl_proxd_cmd_t cmd; + wl_proxd_method_t method; + wl_proxd_session_id_t sid; + uint8 PAD[2]; + wl_proxd_tlv_t tlvs[1]; /**< variable */ +} wl_proxd_iov_t; + +#define WL_PROXD_IOV_HDR_SIZE OFFSETOF(wl_proxd_iov_t, tlvs) + +/* The following event definitions may move to bcmevent.h, but sharing proxd types + * across needs more invasive changes unrelated to proxd + */ +enum { + WL_PROXD_EVENT_NONE = 0, /**< not an event, reserved */ + WL_PROXD_EVENT_SESSION_CREATE = 1, + WL_PROXD_EVENT_SESSION_START = 2, + WL_PROXD_EVENT_FTM_REQ = 3, + WL_PROXD_EVENT_BURST_START = 4, + WL_PROXD_EVENT_BURST_END = 5, + WL_PROXD_EVENT_SESSION_END = 6, + WL_PROXD_EVENT_SESSION_RESTART = 7, + WL_PROXD_EVENT_BURST_RESCHED = 8, /**< burst rescheduled-e.g. partial TSF */ + WL_PROXD_EVENT_SESSION_DESTROY = 9, + WL_PROXD_EVENT_RANGE_REQ = 10, + WL_PROXD_EVENT_FTM_FRAME = 11, + WL_PROXD_EVENT_DELAY = 12, + WL_PROXD_EVENT_VS_INITIATOR_RPT = 13, /**< (target) rx initiator-report */ + WL_PROXD_EVENT_RANGING = 14, + WL_PROXD_EVENT_LCI_MEAS_REP = 15, /* LCI measurement report */ + WL_PROXD_EVENT_CIVIC_MEAS_REP = 16, /* civic measurement report */ + WL_PROXD_EVENT_COLLECT = 17, + WL_PROXD_EVENT_START_WAIT = 18, /* waiting to start */ + WL_PROXD_EVENT_MF_STATS = 19, /* mf stats event */ + + WL_PROXD_EVENT_MAX +}; +typedef int16 wl_proxd_event_type_t; + +/** proxd event mask - upto 32 events for now */ +typedef uint32 wl_proxd_event_mask_t; + +#define WL_PROXD_EVENT_MASK_ALL 0xfffffffe +#define WL_PROXD_EVENT_MASK_EVENT(_event_type) (1 << (_event_type)) +#define WL_PROXD_EVENT_ENABLED(_mask, _event_type) (\ + ((_mask) & WL_PROXD_EVENT_MASK_EVENT(_event_type)) != 0) + +/** proxd event - applies to proxd, method or session */ +typedef struct wl_proxd_event { + uint16 version; + uint16 len; + wl_proxd_event_type_t type; + wl_proxd_method_t method; + wl_proxd_session_id_t sid; + uint8 pad[2]; + wl_proxd_tlv_t tlvs[1]; /**< variable */ +} wl_proxd_event_t; + +enum { + WL_PROXD_RANGING_STATE_NONE = 0, + WL_PROXD_RANGING_STATE_NOTSTARTED = 1, + WL_PROXD_RANGING_STATE_INPROGRESS = 2, + WL_PROXD_RANGING_STATE_DONE = 3 +}; +typedef int16 wl_proxd_ranging_state_t; + +/** proxd ranging flags */ +enum { + WL_PROXD_RANGING_FLAG_NONE = 0x0000, /**< no flags */ + WL_PROXD_RANGING_FLAG_DEL_SESSIONS_ON_STOP = 0x0001, + WL_PROXD_RANGING_FLAG_ALL = 0xffff +}; +typedef uint16 wl_proxd_ranging_flags_t; + +struct wl_proxd_ranging_info { + wl_proxd_status_t status; + wl_proxd_ranging_state_t state; + wl_proxd_ranging_flags_t flags; + uint16 num_sids; + uint16 num_done; +}; +typedef struct wl_proxd_ranging_info wl_proxd_ranging_info_t; + +#include +/* Legacy platform i.e. 43342/43430 */ +#define WL_PROXD_COLLECT_EVENT_DATA_VERSION_1 1 +typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_collect_event_data_v1 { + uint32 H_LB[K_TOF_COLLECT_H_SIZE_20MHZ]; + uint32 H_RX[K_TOF_COLLECT_H_SIZE_20MHZ]; + uint8 ri_rr[FTM_TPK_LEN]; + wl_proxd_phy_error_t phy_err_mask; +} BWL_POST_PACKED_STRUCT wl_proxd_collect_event_data_v1_t; + +/* Secured 2.0 supoorted devices i.e. 4364 */ +#define WL_PROXD_COLLECT_EVENT_DATA_VERSION_2 2 +typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_collect_event_data_v2 { + uint32 H_LB[K_TOF_COLLECT_H_SIZE_20MHZ]; + uint32 H_RX[K_TOF_COLLECT_H_SIZE_20MHZ]; + uint8 ri_rr[FTM_TPK_RI_RR_LEN_SECURE_2_0]; + wl_proxd_phy_error_t phy_err_mask; +} BWL_POST_PACKED_STRUCT wl_proxd_collect_event_data_v2_t; +#include + +#define WL_PROXD_COLLECT_EVENT_DATA_VERSION_3 3 +typedef struct wl_proxd_collect_event_data_v3 { + uint16 version; + uint16 length; + uint32 H_LB[K_TOF_COLLECT_H_SIZE_20MHZ]; + uint32 H_RX[K_TOF_COLLECT_H_SIZE_20MHZ]; + uint8 ri_rr[FTM_TPK_RI_RR_LEN_SECURE_2_0]; + wl_proxd_phy_error_t phy_err_mask; +} wl_proxd_collect_event_data_v3_t; + +#define WL_PROXD_COLLECT_EVENT_DATA_VERSION_MAX WL_PROXD_COLLECT_EVENT_DATA_VERSION_3 + +/** Data returned by the bssload_report iovar. This is also the WLC_E_BSS_LOAD event data */ +#include +typedef BWL_PRE_PACKED_STRUCT struct wl_bssload { + uint16 sta_count; /**< station count */ + uint16 aac; /**< available admission capacity */ + uint8 chan_util; /**< channel utilization */ +} BWL_POST_PACKED_STRUCT wl_bssload_t; +#include + +/** + * Maximum number of configurable BSS Load levels. The number of BSS Load + * ranges is always 1 more than the number of configured levels. eg. if + * 3 levels of 10, 20, 30 are configured then this defines 4 load ranges: + * 0-10, 11-20, 21-30, 31-255. A WLC_E_BSS_LOAD event is generated each time + * the utilization level crosses into another range, subject to the rate limit. + */ +#define MAX_BSSLOAD_LEVELS 8 +#define MAX_BSSLOAD_RANGES (MAX_BSSLOAD_LEVELS + 1) + +/** BSS Load event notification configuration. */ +typedef struct wl_bssload_cfg { + uint32 rate_limit_msec; /**< # of events posted to application will be limited to + * one per specified period (0 to disable rate limit). + */ + uint8 num_util_levels; /**< Number of entries in util_levels[] below */ + uint8 util_levels[MAX_BSSLOAD_LEVELS]; + /**< Variable number of BSS Load utilization levels in + * low to high order. An event will be posted each time + * a received beacon's BSS Load IE channel utilization + * value crosses a level. + */ + uint8 PAD[3]; +} wl_bssload_cfg_t; + +/** Multiple roaming profile suport */ +#define WL_MAX_ROAM_PROF_BRACKETS 4 + +#define WL_ROAM_PROF_VER_0 0 +#define WL_ROAM_PROF_VER_1 1 +#define WL_ROAM_PROF_VER_2 2 +#define WL_MAX_ROAM_PROF_VER WL_ROAM_PROF_VER_1 + +#define WL_ROAM_PROF_NONE (0 << 0) +#define WL_ROAM_PROF_LAZY (1 << 0) +#define WL_ROAM_PROF_NO_CI (1 << 1) +#define WL_ROAM_PROF_SUSPEND (1 << 2) +#define WL_ROAM_PROF_SYNC_DTIM (1 << 6) +#define WL_ROAM_PROF_DEFAULT (1 << 7) /**< backward compatible single default profile */ + +#define WL_FACTOR_TABLE_MAX_LIMIT 5 + +#define WL_CU_2G_ROAM_TRIGGER (-60) +#define WL_CU_5G_ROAM_TRIGGER (-70) + +#define WL_CU_SCORE_DELTA_DEFAULT 20 + +#define WL_MAX_CHANNEL_USAGE 0x0FF +#define WL_CU_PERCENTAGE_DISABLE 0 +#define WL_CU_PERCENTAGE_DEFAULT 70 +#define WL_CU_PERCENTAGE_MAX 100 +#define WL_CU_CALC_DURATION_DEFAULT 10 /* seconds */ +#define WL_CU_CALC_DURATION_MAX 60 /* seconds */ + +#define WL_ESTM_LOW_TRIGGER_DISABLE 0 +#define WL_ESTM_LOW_TRIGGER_DEFAULT 5 /* Mbps */ +#define WL_ESTM_LOW_TRIGGER_MAX 250 /* Mbps */ +#define WL_ESTM_ROAM_DELTA_DEFAULT 10 + +typedef struct wl_roam_prof_v3 { + int8 roam_flags; /**< bit flags */ + int8 roam_trigger; /**< RSSI trigger level per profile/RSSI bracket */ + int8 rssi_lower; + int8 roam_delta; + + /* if channel_usage if zero, roam_delta is rssi delta required for new AP */ + /* if channel_usage if non-zero, roam_delta is score delta(%) required for new AP */ + int8 rssi_boost_thresh; /**< Min RSSI to qualify for RSSI boost */ + int8 rssi_boost_delta; /**< RSSI boost for AP in the other band */ + uint16 nfscan; /**< number of full scan to start with */ + uint16 fullscan_period; + uint16 init_scan_period; + uint16 backoff_multiplier; + uint16 max_scan_period; + uint8 channel_usage; + uint8 cu_avg_calc_dur; + uint16 estm_low_trigger; /**< ESTM low throughput roam trigger */ + int8 estm_roam_delta; /**< ESTM low throughput roam delta */ + uint8 pad; +} wl_roam_prof_v3_t; + +typedef struct wl_roam_prof_v2 { + int8 roam_flags; /**< bit flags */ + int8 roam_trigger; /**< RSSI trigger level per profile/RSSI bracket */ + int8 rssi_lower; + int8 roam_delta; + + /* if channel_usage if zero, roam_delta is rssi delta required for new AP */ + /* if channel_usage if non-zero, roam_delta is score delta(%) required for new AP */ + int8 rssi_boost_thresh; /**< Min RSSI to qualify for RSSI boost */ + int8 rssi_boost_delta; /**< RSSI boost for AP in the other band */ + uint16 nfscan; /**< number of full scan to start with */ + uint16 fullscan_period; + uint16 init_scan_period; + uint16 backoff_multiplier; + uint16 max_scan_period; + uint8 channel_usage; + uint8 cu_avg_calc_dur; + uint8 pad[2]; +} wl_roam_prof_v2_t; + +typedef struct wl_roam_prof_v1 { + int8 roam_flags; /**< bit flags */ + int8 roam_trigger; /**< RSSI trigger level per profile/RSSI bracket */ + int8 rssi_lower; + int8 roam_delta; + + /* if channel_usage if zero, roam_delta is rssi delta required for new AP */ + /* if channel_usage if non-zero, roam_delta is score delta(%) required for new AP */ + int8 rssi_boost_thresh; /**< Min RSSI to qualify for RSSI boost */ + int8 rssi_boost_delta; /**< RSSI boost for AP in the other band */ + uint16 nfscan; /**< number of full scan to start with */ + uint16 fullscan_period; + uint16 init_scan_period; + uint16 backoff_multiplier; + uint16 max_scan_period; +} wl_roam_prof_v1_t; + +typedef struct wl_roam_prof_band_v3 { + uint32 band; /**< Must be just one band */ + uint16 ver; /**< version of this struct */ + uint16 len; /**< length in bytes of this structure */ + wl_roam_prof_v3_t roam_prof[WL_MAX_ROAM_PROF_BRACKETS]; +} wl_roam_prof_band_v3_t; + +typedef struct wl_roam_prof_band_v2 { + uint32 band; /**< Must be just one band */ + uint16 ver; /**< version of this struct */ + uint16 len; /**< length in bytes of this structure */ + wl_roam_prof_v2_t roam_prof[WL_MAX_ROAM_PROF_BRACKETS]; +} wl_roam_prof_band_v2_t; + +typedef struct wl_roam_prof_band_v1 { + uint32 band; /**< Must be just one band */ + uint16 ver; /**< version of this struct */ + uint16 len; /**< length in bytes of this structure */ + wl_roam_prof_v1_t roam_prof[WL_MAX_ROAM_PROF_BRACKETS]; +} wl_roam_prof_band_v1_t; + +#define BSS_MAXTABLE_SIZE 10 +#define WNM_BSS_SELECT_FACTOR_VERSION 1 +typedef struct wnm_bss_select_factor_params { + uint8 low; + uint8 high; + uint8 factor; + uint8 pad; +} wnm_bss_select_factor_params_t; + +#define WNM_BSS_SELECT_FIXED_SIZE OFFSETOF(wnm_bss_select_factor_cfg_t, params) +typedef struct wnm_bss_select_factor_cfg { + uint8 version; + uint8 band; + uint16 type; + uint16 pad; + uint16 count; + wnm_bss_select_factor_params_t params[1]; +} wnm_bss_select_factor_cfg_t; + +#define WNM_BSS_SELECT_WEIGHT_VERSION 1 +typedef struct wnm_bss_select_weight_cfg { + uint8 version; + uint8 band; + uint16 type; + uint16 weight; /* weightage for each type between 0 to 100 */ +} wnm_bss_select_weight_cfg_t; + +/* For branches before koala .. wbtext is part + * of wnm need to use below type only + */ +typedef struct wnm_btm_default_score_cfg { + uint32 default_score; /* default score */ + uint8 band; +} wnm_btm_default_score_cfg_t; + +/* For branches from koala and above .. wbtext is + * seperate module..need to use below type only + */ +typedef struct wbtext_btm_default_score_cfg { + uint32 default_score; /* default score */ + uint8 band; +} wbtext_btm_default_score_cfg_t; + +#define WNM_BSS_SELECT_TYPE_RSSI 0 +#define WNM_BSS_SELECT_TYPE_CU 1 +#define WNM_BSS_SELECT_TYPE_ESTM_DL 2 + +#define WNM_BSSLOAD_MONITOR_VERSION 1 +typedef struct wnm_bssload_monitor_cfg { + uint8 version; + uint8 band; + uint8 duration; /* duration between 1 to 20sec */ +} wnm_bssload_monitor_cfg_t; + +#define WNM_ROAM_TRIGGER_VERSION 1 +typedef struct wnm_roam_trigger_cfg { + uint8 version; + uint8 band; + uint16 type; + int16 trigger; /* trigger for each type in new roam algorithm */ +} wnm_roam_trigger_cfg_t; + +/* Data structures for Interface Create/Remove */ + +#define WL_INTERFACE_CREATE_VER (0) +#define WL_INTERFACE_CREATE_VER_1 1 +#define WL_INTERFACE_CREATE_VER_2 2 +#define WL_INTERFACE_CREATE_VER_3 3 + +/* + * The flags filed of the wl_interface_create is designed to be + * a Bit Mask. As of now only Bit 0 and Bit 1 are used as mentioned below. + * The rest of the bits can be used, incase we have to provide + * more information to the dongle + */ + +/* + * Bit 0 of flags field is used to inform whether the interface requested to + * be created is STA or AP. + * 0 - Create a STA interface + * 1 - Create an AP interface + * NOTE: This Bit 0 is applicable for the WL_INTERFACE_CREATE_VER < 2 + */ +#define WL_INTERFACE_CREATE_STA (0 << 0) +#define WL_INTERFACE_CREATE_AP (1 << 0) + +/* + * From revision >= 2 Bit 0 of flags field will not used be for STA or AP interface creation. + * "iftype" field shall be used for identifying the interface type. + */ +typedef enum wl_interface_type { + WL_INTERFACE_TYPE_STA = 0, + WL_INTERFACE_TYPE_AP = 1, + WL_INTERFACE_TYPE_AWDL = 2, + WL_INTERFACE_TYPE_NAN = 3, + WL_INTERFACE_TYPE_P2P_GO = 4, + WL_INTERFACE_TYPE_P2P_GC = 5, + WL_INTERFACE_TYPE_P2P_DISC = 6, + WL_INTERFACE_TYPE_IBSS = 7, + WL_INTERFACE_TYPE_MAX +} wl_interface_type_t; + +/* + * Bit 1 of flags field is used to inform whether MAC is present in the + * data structure or not. + * 0 - Ignore mac_addr field + * 1 - Use the mac_addr field + */ +#define WL_INTERFACE_MAC_DONT_USE (0 << 1) +#define WL_INTERFACE_MAC_USE (1 << 1) + +/* + * Bit 2 of flags field is used to inform whether core or wlc index + * is present in the data structure or not. + * 0 - Ignore wlc_index field + * 1 - Use the wlc_index field + */ +#define WL_INTERFACE_WLC_INDEX_DONT_USE (0 << 2) +#define WL_INTERFACE_WLC_INDEX_USE (1 << 2) + +/* + * Bit 3 of flags field is used to create interface on the host requested interface index + * 0 - Ignore if_index field + * 1 - Use the if_index field + */ +#define WL_INTERFACE_IF_INDEX_USE (1 << 3) + +/* + * Bit 4 of flags field is used to assign BSSID + * 0 - Ignore bssid field + * 1 - Use the bssid field + */ +#define WL_INTERFACE_BSSID_INDEX_USE (1 << 4) + +typedef struct wl_interface_create { + uint16 ver; /* version of this struct */ + uint32 flags; /* flags that defines the operation */ + struct ether_addr mac_addr; /* Optional Mac address */ +} wl_interface_create_t; + +typedef struct wl_interface_create_v1 { + uint16 ver; /**< version of this struct */ + uint8 pad1[2]; /**< Padding bytes */ + uint32 flags; /**< flags that defines the operation */ + struct ether_addr mac_addr; /**< Optional Mac address */ + uint8 pad2[2]; /**< Padding bytes */ + uint32 wlc_index; /**< Optional wlc index */ +} wl_interface_create_v1_t; + +typedef struct wl_interface_create_v2 { + uint16 ver; /**< version of this struct */ + uint8 pad1[2]; /**< Padding bytes */ + uint32 flags; /**< flags that defines the operation */ + struct ether_addr mac_addr; /**< Optional Mac address */ + uint8 iftype; /**< Type of interface created */ + uint8 pad2; /**< Padding bytes */ + uint32 wlc_index; /**< Optional wlc index */ +} wl_interface_create_v2_t; + +typedef struct wl_interface_create_v3 { + uint16 ver; /**< version of this struct */ + uint16 len; /**< length of whole structure including variable length */ + uint16 fixed_len; /**< Fixed length of this structure excluding data[] */ + uint8 iftype; /**< Type of interface created */ + uint8 wlc_index; /**< Optional wlc index */ + uint32 flags; /**< flags that defines the operation */ + struct ether_addr mac_addr; /**< Optional Mac address */ + struct ether_addr bssid; /**< Optional BSSID */ + uint8 if_index; /**< interface index requested by Host */ + uint8 pad[3]; /**< Padding bytes to ensure data[] is at 32 bit aligned */ + uint8 data[]; /**< Optional application/Module specific data */ +} wl_interface_create_v3_t; + +#define WL_INTERFACE_INFO_VER_1 1 +#define WL_INTERFACE_INFO_VER_2 2 + +typedef struct wl_interface_info_v1 { + uint16 ver; /**< version of this struct */ + struct ether_addr mac_addr; /**< MAC address of the interface */ + char ifname[BCM_MSG_IFNAME_MAX]; /**< name of interface */ + uint8 bsscfgidx; /**< source bsscfg index */ + uint8 PAD; +} wl_interface_info_v1_t; + +typedef struct wl_interface_info_v2 { + uint16 ver; /**< version of this struct */ + uint16 length; /**< length of the whole structure */ + struct ether_addr mac_addr; /**< MAC address of the interface */ + uint8 bsscfgidx; /**< source bsscfg index */ + uint8 if_index; /**< Interface index allocated by FW */ + char ifname[BCM_MSG_IFNAME_MAX]; /**< name of interface */ +} wl_interface_info_v2_t; + +#define PHY_RXIQEST_AVERAGING_DELAY 10 + +typedef struct wl_iqest_params { + uint32 rxiq; + uint8 niter; + uint8 delay; + uint8 PAD[2]; +} wl_iqest_params_t; + +typedef struct wl_iqest_sweep_params { + wl_iqest_params_t params; + uint8 nchannels; + uint8 channel[3]; /** variable */ +} wl_iqest_sweep_params_t; + +typedef struct wl_iqest_value { + uint8 channel; + uint8 PAD[3]; + uint32 rxiq; +} wl_iqest_value_t; + +typedef struct wl_iqest_result { + uint8 nvalues; + uint8 PAD[3]; + wl_iqest_value_t value[1]; +} wl_iqest_result_t; + +#define WL_PRIO_ROAM_PROF_V1 (1u) + +typedef struct wl_prio_roam_prof_v1 { + uint16 version; /* Version info */ + uint16 length; /* byte length of this structure */ + uint8 prio_roam_mode; /* Roam mode RCC/RCC+Full Scan */ + uint8 PAD[3]; +} wl_prio_roam_prof_v1_t; + +typedef enum wl_prio_roam_mode { + PRIO_ROAM_MODE_OFF = 0, /* Prio_Roam feature disable */ + PRIO_ROAM_MODE_RCC_ONLY = 1, /* Scan RCC list only */ + PRIO_ROAM_MODE_RCC_FULLSCAN = 2, /* Scan RCC list + Full scan */ + PRIO_ROAM_MODE_FULLSCAN_ONLY = 3 /* Full Scan only */ +} wl_prio_roam_mode_t; + +/* BTCX AIBSS (Oxygen) Status */ +typedef struct wlc_btc_aibss_info { + uint32 prev_tsf_l; // Lower 32 bits of last read of TSF + uint32 prev_tsf_h; // Higher 32 bits of last read of TSF + uint32 last_btinfo; // Last read of BT info + uint32 local_btinfo; // Local BT INFO BitMap + uint8 bt_out_of_sync_cnt; // BT not in sync with strobe + uint8 esco_off_cnt; // Count incremented when ESCO is off + uint8 strobe_enabled; // Set only in AIBSS mode + uint8 strobe_on; // strobe to BT is on for Oxygen + uint8 local_bt_in_sync; // Sync status of local BT when strobe is on + uint8 other_bt_in_sync; // Sync state of BT in other devices in AIBSS + uint8 local_bt_is_master; // Local BT is master + uint8 sco_prot_on; // eSCO Protection on in local device + uint8 other_esco_present; // eSCO status in other devices in AIBSS + uint8 rx_agg_change; // Indicates Rx Agg size needs to change + uint8 rx_agg_modified; // Rx Agg size modified + uint8 acl_grant_set; // ACL grants on for speeding up sync + uint8 write_ie_err_cnt; // BTCX Ie write error cnt + uint8 parse_ie_err_cnt; // BTCX IE parse error cnt + uint8 wci2_fail_cnt; // WCI2 init failure cnt + uint8 strobe_enable_err_cnt; // Strobe enable err cnt + uint8 strobe_init_err_cnt; // Strobe init err cnt + uint8 tsf_jump_cnt; // TSF jump cnt + uint8 acl_grant_cnt; // ALC grant cnt + uint8 pad1; + uint16 ibss_tsf_shm; // SHM address of strobe TSF + uint16 pad2; +} wlc_btc_aibss_info_t; + +#define WLC_BTC_AIBSS_STATUS_VER 1 +#define WLC_BTC_AIBSS_STATUS_LEN (sizeof(wlc_btc_aibss_status_t) - 2 * (sizeof(uint16))) + +typedef struct wlc_btc_aibss_status { + uint16 version; // Version # + uint16 len; // Length of the structure(excluding len & version) + int32 mode; // Current value of btc_mode + uint16 bth_period; // bt coex period. read from shm. + uint16 agg_off_bm; // AGG OFF BM read from SHM + uint8 bth_active; // bt active session + uint8 pad[3]; + wlc_btc_aibss_info_t aibss_info; // Structure definition above +} wlc_btc_aibss_status_t; + +typedef enum { + STATE_NONE = 0, + + /* WLAN -> BT */ + W2B_DATA_SET = 21, + B2W_ACK_SET = 22, + W2B_DATA_CLEAR = 23, + B2W_ACK_CLEAR = 24, + + /* BT -> WLAN */ + B2W_DATA_SET = 31, + W2B_ACK_SET = 32, + B2W_DATA_CLEAR = 33, + W2B_ACK_CLEAR = 34 +} bwte_gci_intstate_t; + +#define WL_BWTE_STATS_VERSION 1 /* version of bwte_stats_t */ +typedef struct { + uint32 version; + + bwte_gci_intstate_t inttobt; + bwte_gci_intstate_t intfrombt; + + uint32 bt2wl_intrcnt; /* bt->wlan interrrupt count */ + uint32 wl2bt_intrcnt; /* wlan->bt interrupt count */ + + uint32 wl2bt_dset_cnt; + uint32 wl2bt_dclear_cnt; + uint32 wl2bt_aset_cnt; + uint32 wl2bt_aclear_cnt; + + uint32 bt2wl_dset_cnt; + uint32 bt2wl_dclear_cnt; + uint32 bt2wl_aset_cnt; + uint32 bt2wl_aclear_cnt; + + uint32 state_error_1; + uint32 state_error_2; + uint32 state_error_3; + uint32 state_error_4; +} bwte_stats_t; + +#define TBOW_MAX_SSID_LEN 32 +#define TBOW_MAX_PASSPHRASE_LEN 63 + +#define WL_TBOW_SETUPINFO_T_VERSION 1 /* version of tbow_setup_netinfo_t */ +typedef struct tbow_setup_netinfo { + uint32 version; + uint8 opmode; + uint8 pad; + uint8 macaddr[ETHER_ADDR_LEN]; + uint32 ssid_len; + uint8 ssid[TBOW_MAX_SSID_LEN]; + uint8 passphrase_len; + uint8 passphrase[TBOW_MAX_PASSPHRASE_LEN]; + chanspec_t chanspec; + uint8 PAD[2]; + uint32 channel; +} tbow_setup_netinfo_t; + +typedef enum tbow_ho_opmode { + TBOW_HO_MODE_START_GO = 0, + TBOW_HO_MODE_START_STA, + TBOW_HO_MODE_START_GC, + TBOW_HO_MODE_TEST_GO, + TBOW_HO_MODE_STOP_GO = 0x10, + TBOW_HO_MODE_STOP_STA, + TBOW_HO_MODE_STOP_GC, + TBOW_HO_MODE_TEARDOWN +} tbow_ho_opmode_t; + +/* Beacon trim feature statistics */ +/* configuration */ +#define BCNTRIMST_PER 0 /* Number of beacons to trim (0: disable) */ +#define BCNTRIMST_TIMEND 1 /* Number of bytes till TIM IE */ +#define BCNTRIMST_TSFLMT 2 /* TSF tolerance value (usecs) */ +/* internal use */ +#define BCNTRIMST_CUR 3 /* PSM's local beacon trim counter */ +#define BCNTRIMST_PREVLEN 4 /* Beacon length excluding the TIM IE */ +#define BCNTRIMST_TIMLEN 5 /* TIM IE Length */ +#define BCNTRIMST_RSSI 6 /* Partial beacon RSSI */ +#define BCNTRIMST_CHAN 7 /* Partial beacon channel */ +/* debug stat (off by default) */ +#define BCNTRIMST_DUR 8 /* RX duration until beacon trimmed */ +#define BCNTRIMST_RXMBSS 9 /* MYBSSID beacon received */ +#define BCNTRIMST_CANTRIM 10 /* # beacons which were trimmed */ +#define BCNTRIMST_LENCHG 11 /* # beacons not trimmed due to length change */ +#define BCNTRIMST_TSFDRF 12 /* # beacons not trimmed due to large TSF delta */ +#define BCNTRIMST_NOTIM 13 /* # beacons not trimmed due to TIM missing */ + +#define BCNTRIMST_NUM 14 + +#define WL_BCNTRIM_STATUS_VERSION_1 1 +typedef struct wl_bcntrim_status_query_v1 { + uint16 version; + uint16 len; /* Total length includes fixed fields */ + uint8 reset; /* reset after reading the stats */ + uint8 pad[3]; /* 4-byte alignment */ +} wl_bcntrim_status_query_v1_t; + +typedef struct wl_bcntrim_status_v1 { + uint16 version; + uint16 len; /* Total length includes fixed fields and variable data[] */ + uint8 curr_slice_id; /* slice index of the interface */ + uint8 applied_cfg; /* applied bcntrim N threshold */ + uint8 pad[2]; /* 4-byte alignment */ + uint32 fw_status; /* Bits representing bcntrim disable reason in FW */ + uint32 total_disable_dur; /* total duration (msec) bcntrim remains + disabled due to FW disable reasons + */ + uint32 data[]; /* variable length data containing stats */ +} wl_bcntrim_status_v1_t; + +#define BCNTRIM_STATS_MAX 10 /* Total stats part of the status data[] */ + +/* Bits for FW status */ +#define WL_BCNTRIM_DISABLE_HOST 0x1 /* Host disabled bcntrim through bcntrim IOVar */ +#define WL_BCNTRIM_DISABLE_PHY_RATE 0x2 /* bcntrim disabled because beacon rx rate is + * higher than phy_rate_thresh + */ +#define WL_BCNTRIM_DISABLE_QUIET_IE 0x4 /* bcntrim disable when Quiet IE present */ +#define WL_BCNTRIM_DISABLE_QBSSLOAD_IE 0x8 /* bcntrim disable when QBSS Load IE present */ +#define WL_BCNTRIM_DISABLE_OPERMODE_IE 0x10 /* bcntrim dsiable when opermode IE is present */ +#define WL_BCNTRIM_DISABLE_CSA_IE 0x20 /* bcntrim dsiable when CSA IE is present */ + +#define BCNTRIM_DISABLE_THRESHOLD_TIME 1000 * 10 /* enable bcntrim after a threshold (10sec) + * when disabled due to above mentioned IE's + */ +#define WL_BCNTRIM_CFG_VERSION_1 1 +/* Common IOVAR struct */ +typedef struct wl_bcntrim_cfg_v1 { + uint16 version; + uint16 len; /* Total length includes fixed fields and variable data[] */ + uint16 subcmd_id; /* subcommand id */ + uint16 pad; /* pad/reserved */ + uint8 data[]; /* subcommand data; could be empty */ +} wl_bcntrim_cfg_v1_t; + +/* subcommands ids */ +enum { + WL_BCNTRIM_CFG_SUBCMD_PHY_RATE_THRESH = 0, /* PHY rate threshold above + which bcntrim is not applied + */ + WL_BCNTRIM_CFG_SUBCMD_OVERRIDE_DISABLE_MASK = 1, /* Override bcntrim disable reasons */ + WL_BCNTRIM_CFG_SUBCMD_TSF_DRIFT_LIMIT = 2 /* TSF drift limit to consider bcntrim */ +}; + +#define BCNTRIM_MAX_PHY_RATE 48 /* in 500Kbps */ +#define BCNTRIM_MAX_TSF_DRIFT 65535 /* in usec */ +#define WL_BCNTRIM_OVERRIDE_DISABLE_MASK \ + (WL_BCNTRIM_DISABLE_QUIET_IE | WL_BCNTRIM_DISABLE_QBSSLOAD_IE) + +/* WL_BCNTRIM_CFG_SUBCMD_PHY_RATE_TRESH */ +typedef struct wl_bcntrim_cfg_phy_rate_thresh { + uint32 rate; /* beacon rate (in 500kbps units) */ +} wl_bcntrim_cfg_phy_rate_thresh_t; + +/* WL_BCNTRIM_CFG_SUBCMD_OVERRIDE_DISABLE_MASK */ +typedef struct wl_bcntrim_cfg_override_disable_mask { + uint32 mask; /* bits representing individual disable reason to override */ +} wl_bcntrim_cfg_override_disable_mask_t; + +/* WL_BCNTRIM_CFG_SUBCMD_TSF_DRIFT_LIMIT */ +typedef struct wl_bcntrim_cfg_tsf_drift_limit { + uint16 drift; /* tsf drift limit specified in usec */ + uint8 pad[2]; /* 4-byte alignment */ +} wl_bcntrim_cfg_tsf_drift_limit_t; + +/* -------------- TX Power Cap --------------- */ +#define TXPWRCAP_MAX_NUM_CORES 8 +#define TXPWRCAP_MAX_NUM_ANTENNAS (TXPWRCAP_MAX_NUM_CORES * 2) + +#define TXPWRCAP_MAX_NUM_CORES_V3 4 +#define TXPWRCAP_MAX_NUM_ANTENNAS_V3 (TXPWRCAP_MAX_NUM_CORES_V3 * 2) + +#define TXPWRCAP_NUM_SUBBANDS 5 +#define TXPWRCAP_MAX_NUM_SUBGRPS 10 + +/* IOVAR txcapconfig enum's */ +#define TXPWRCAPCONFIG_WCI2 0u +#define TXPWRCAPCONFIG_HOST 1u +#define TXPWRCAPCONFIG_WCI2_AND_HOST 2u +#define TXPWRCAPCONFIG_NONE 0xFFu + +/* IOVAR txcapstate enum's */ +#define TXPWRCAPSTATE_LOW_CAP 0 +#define TXPWRCAPSTATE_HIGH_CAP 1 +#define TXPWRCAPSTATE_HOST_LOW_WCI2_LOW_CAP 0 +#define TXPWRCAPSTATE_HOST_LOW_WCI2_HIGH_CAP 1 +#define TXPWRCAPSTATE_HOST_HIGH_WCI2_LOW_CAP 2 +#define TXPWRCAPSTATE_HOST_HIGH_WCI2_HIGH_CAP 3 + +/* IOVAR txcapconfig and txcapstate structure is shared: SET and GET */ +#define TXPWRCAPCTL_VERSION 2 +#define TXPWRCAPCTL_VERSION_3 3 + +typedef struct wl_txpwrcap_ctl { + uint8 version; + uint8 ctl[TXPWRCAP_NUM_SUBBANDS]; +} wl_txpwrcap_ctl_t; + +typedef struct wl_txpwrcap_ctl_v3 { + uint8 version; + uint8 ctl[TXPWRCAP_MAX_NUM_SUBGRPS]; +} wl_txpwrcap_ctl_v3_t; + +/* IOVAR txcapdump structure: GET only */ +#define TXPWRCAP_DUMP_VERSION 2 +typedef struct wl_txpwrcap_dump { + uint8 version; + uint8 pad0; + uint8 current_country[2]; + uint32 current_channel; + uint8 config[TXPWRCAP_NUM_SUBBANDS]; + uint8 state[TXPWRCAP_NUM_SUBBANDS]; + uint8 high_cap_state_enabled; + uint8 wci2_cell_status_last; + uint8 download_present; + uint8 num_subbands; + uint8 num_antennas; + uint8 num_antennas_per_core[TXPWRCAP_MAX_NUM_CORES]; + uint8 num_cc_groups; + uint8 current_country_cc_group_info_index; + int8 low_cap[TXPWRCAP_MAX_NUM_ANTENNAS*TXPWRCAP_NUM_SUBBANDS]; + int8 high_cap[TXPWRCAP_MAX_NUM_ANTENNAS*TXPWRCAP_NUM_SUBBANDS]; + uint8 PAD[3]; +} wl_txpwrcap_dump_t; + +typedef struct wl_txpwrcap_dump_v3 { + uint8 version; + uint8 pad0; + uint8 current_country[2]; + uint32 current_channel; + uint8 config[TXPWRCAP_NUM_SUBBANDS]; + uint8 state[TXPWRCAP_NUM_SUBBANDS]; + uint8 high_cap_state_enabled; + uint8 wci2_cell_status_last; + uint8 download_present; + uint8 num_subbands; + uint8 num_antennas; + uint8 num_antennas_per_core[TXPWRCAP_MAX_NUM_CORES]; + uint8 num_cc_groups; + uint8 current_country_cc_group_info_index; + uint8 cap_states_per_cc_group; + int8 host_low_wci2_low_cap[TXPWRCAP_MAX_NUM_ANTENNAS*TXPWRCAP_NUM_SUBBANDS]; + int8 host_low_wci2_high_cap[TXPWRCAP_MAX_NUM_ANTENNAS*TXPWRCAP_NUM_SUBBANDS]; + int8 host_high_wci2_low_cap[TXPWRCAP_MAX_NUM_ANTENNAS*TXPWRCAP_NUM_SUBBANDS]; + int8 host_high_wci2_high_cap[TXPWRCAP_MAX_NUM_ANTENNAS*TXPWRCAP_NUM_SUBBANDS]; + uint8 PAD[2]; +} wl_txpwrcap_dump_v3_t; + +/* +* Capability flag for wl_txpwrcap_tbl_v2_t and wl_txpwrcap_t +* The index into pwrs will be: 0: onbody-cck, 1: onbody-ofdm, 2:offbody-cck, 3:offbody-ofdm +* +* For 5G power in SDB case as well as for non-SDB case, the value of flag will be: CAP_ONOFF_BODY +* The index into pwrs will be: 0: onbody, 1: offbody-ofdm +*/ + +#define CAP_ONOFF_BODY (0x1) /* on/off body only */ +#define CAP_CCK_OFDM (0x2) /* cck/ofdm capability only */ +#define CAP_LTE_CELL (0x4) /* cell on/off capability; required for iOS builds */ +#define CAP_HEAD_BODY (0x8) /* head/body capability */ +#define CAP_2G_DEPON_5G (0x10) /* 2G pwr caps depend on other slice 5G subband */ +#define CAP_SISO_MIMO (0x20) /* Siso/Mimo Separate Power Caps */ +#define CAP_ANT_TX (0x40) /* Separate Power Caps based on cell ant tx value */ +#define CAP_ONOFF_BODY_CCK_OFDM (CAP_ONOFF_BODY | CAP_CCK_OFDM) +#define CAP_TXPWR_ALL (CAP_ONOFF_BODY|CAP_CCK_OFDM|CAP_LTE_CELL|\ + CAP_SISO_MIMO|CAP_HEAD_BODY|CAP_ANT_TX) + +#define TXHDR_SEC_MAX 5u /* Deprecated. Kept till removed in all branches */ +#define TXPWRCAP_MAX_STATES 4u +#define TXPWRCAP_MAX_STATES_V3 10u +#define TXPWRCAP_CCKOFDM_ONOFFBODY_MAX_STATES 4u +#define TXPWRCAP_ONOFFBODY_MAX_STATES 2u +#define TXPWRCAP_ONOFFCELL_MAX_STATES 2u + +#define TXHDR_SEC_NONSDB_MAIN_2G 0 +#define TXHDR_SEC_NONSDB_MAIN_5G 1 +#define TXHDR_SEC_NONSDB_AUX_2G 2 +#define TXHDR_SEC_NONSDB_AUX_5G 3 +#define TXHDR_SEC_SDB_MAIN_2G 4 +#define TXHDR_SEC_SDB_MAIN_5G 5 +#define TXHDR_SEC_SDB_AUX_2G 6 +#define TXHDR_SEC_SDB_AUX_5G 7 +#define TXHDR_MAX_SECTION 8 + +#define WL_TXPWRCAP_MAX_SLICES 2 +#define WL_TXPWRCAPDUMP_VER 4 + +#define WL_TXPWRCAP_VERSION_2 2 +#define WL_TXPWRCAP_VERSION_3 3 + +typedef struct wl_txpwrcap { + uint8 capability; + uint8 num_cap_states; + uint8 section; /* Index from above,eg. TXHDR_SEC_NONSDB */ + int8 pwrs[][TXPWRCAP_NUM_SUBBANDS][TXPWRCAP_MAX_NUM_CORES]; +} wl_txpwrcap_t; + +typedef struct { + uint8 capability; + uint8 num_cap_states; + uint8 num_subgrps; + uint8 section; /* Index from above,eg. TXHDR_SEC_NONSDB */ + int8 pwrs[][TXPWRCAP_MAX_NUM_SUBGRPS][TXPWRCAP_MAX_NUM_ANTENNAS_V3]; +} wl_txpwrcap_v2_t; + +#define TXPWRCAP_DUMP_VERSION_4 4 +#define TXPWRCAP_DUMP_VERSION_5 5 + +typedef struct wl_txpwrcap_dump_v4 { + uint8 version; + uint8 num_pwrcap; + uint8 current_country[2]; + uint32 current_channel; + uint8 download_present; + uint8 num_cores; /* number cores on slice */ + uint8 num_cc_groups; /* number cc groups */ + uint8 current_country_cc_group_info_index; + /* first power cap always exist + * On main,-non-sdb follows by sdb2g and then sdb5g + * On aux slice - aux2g then aux5g. + */ + wl_txpwrcap_t pwrcap; /* first power cap */ +} wl_txpwrcap_dump_v4_t; + +typedef struct wl_txpwrcap_dump_v5 { + uint8 version; + uint8 num_pwrcap; + uint8 current_country[2]; + uint8 current_channel; + uint8 high_cap_state_enabled; + uint8 reserved[2]; + uint8 download_present; + uint8 num_ants; /* number antenna slice */ + uint8 num_cc_groups; /* number cc groups */ + uint8 current_country_cc_group_info_index; + uint8 ant_tx; /* current value of ant_tx */ + uint8 cell_status; /* current value of cell status */ + int8 pwrcap[]; /* variable size power caps (wl_txpwrcap_v2_t) */ +} wl_txpwrcap_dump_v5_t; + +typedef struct wl_txpwrcap_tbl { + uint8 num_antennas_per_core[TXPWRCAP_MAX_NUM_CORES]; + /* Stores values for valid antennas */ + int8 pwrcap_cell_on[TXPWRCAP_MAX_NUM_ANTENNAS]; /* qdBm units */ + int8 pwrcap_cell_off[TXPWRCAP_MAX_NUM_ANTENNAS]; /* qdBm units */ +} wl_txpwrcap_tbl_t; + +typedef struct wl_txpwrcap_tbl_v2 { + uint8 version; + uint8 length; /* size of entire structure, including the pwrs */ + uint8 capability; /* capability bitmap */ + uint8 num_cores; /* number of cores i.e. entries in each cap state row */ + /* + * pwrs array has TXPWRCAP_MAX_STATES rows - one for each cap state. + * Each row has up to TXPWRCAP_MAX_NUM_CORES entries - one for each core. + */ + uint8 pwrs[][TXPWRCAP_MAX_NUM_CORES]; /* qdBm units */ +} wl_txpwrcap_tbl_v2_t; + +typedef struct wl_txpwrcap_tbl_v3 { + uint8 version; + uint8 length; /* size of entire structure, including the pwrs */ + uint8 capability; /* capability bitmap */ + uint8 num_cores; /* number of cores */ + uint8 num_antennas_per_core[TXPWRCAP_MAX_NUM_CORES_V3]; + /* + * pwrs array has TXPWRCAP_MAX_STATES rows - one for each cap state. + * Each row has up to TXPWRCAP_MAX_NUM_ANTENNAS entries - for each antenna. + * Included in the rows of powers are rows for fail safe. + */ + int8 pwrs[][TXPWRCAP_MAX_NUM_ANTENNAS_V3]; /* qdBm units */ +} wl_txpwrcap_tbl_v3_t; + +/* ##### Ecounters section ##### */ +#define ECOUNTERS_VERSION_1 1 + +/* Input structure for ecounters IOVAR */ +typedef struct ecounters_config_request { + uint16 version; /* config version */ + uint16 set; /* Set where data will go. */ + uint16 size; /* Size of the set. */ + uint16 timeout; /* timeout in seconds. */ + uint16 num_events; /* Number of events to report. */ + uint16 ntypes; /* Number of entries in type array. */ + uint16 type[1]; /* Statistics Types (tags) to retrieve. */ +} ecounters_config_request_t; + +#define ECOUNTERS_EVENTMSGS_VERSION_1 1 +#define ECOUNTERS_TRIGGER_CONFIG_VERSION_1 1 + +#define ECOUNTERS_EVENTMSGS_EXT_MASK_OFFSET \ + OFFSETOF(ecounters_eventmsgs_ext_t, mask[0]) + +#define ECOUNTERS_TRIG_CONFIG_TYPE_OFFSET \ + OFFSETOF(ecounters_trigger_config_t, type[0]) + +typedef struct ecounters_eventmsgs_ext { + uint8 version; + uint8 len; + uint8 mask[1]; +} ecounters_eventmsgs_ext_t; + +typedef struct ecounters_trigger_config { + uint16 version; /* version */ + uint16 set; /* set where data should go */ + uint16 rsvd; /* reserved */ + uint16 pad; /* pad/reserved */ + uint16 ntypes; /* number of types/tags */ + uint16 type[1]; /* list of types */ +} ecounters_trigger_config_t; + +#define ECOUNTERS_TRIGGER_REASON_VERSION_1 1 +typedef enum { + /* Triggered due to timer based ecounters */ + ECOUNTERS_TRIGGER_REASON_TIMER = 0, + /* Triggered due to event based configuration */ + ECOUNTERS_TRIGGER_REASON_EVENTS = 1, + ECOUNTERS_TRIGGER_REASON_D2H_EVENTS = 2, + ECOUNTERS_TRIGGER_REASON_H2D_EVENTS = 3, + ECOUNTERS_TRIGGER_REASON_USER_EVENTS = 4, + ECOUNTERS_TRIGGER_REASON_MAX = 5 +} ecounters_trigger_reasons_list_t; + +typedef struct ecounters_trigger_reason { + uint16 version; /* version */ + uint16 trigger_reason; /* trigger reason */ + uint32 sub_reason_code; /* sub reason code */ + uint32 trigger_time_now; /* time in ms at trigger */ + uint32 host_ref_time; /* host ref time */ +} ecounters_trigger_reason_t; + +#define WL_LQM_VERSION_1 1 + +/* For wl_lqm_t flags field */ +#define WL_LQM_CURRENT_BSS_VALID 0x1 +#define WL_LQM_TARGET_BSS_VALID 0x2 + +#define WL_PERIODIC_COMPACT_CNTRS_VER_1 (1) +#define WL_PERIODIC_TXBF_CNTRS_VER_1 (1) +typedef struct { + uint16 version; + uint16 pad; + /* taken from wl_wlc_cnt_t */ + uint32 txfail; + /* taken from wl_cnt_ge40mcst_v1_t */ + uint32 txallfrm; /**< total number of frames sent, incl. Data, ACK, RTS, CTS, + * Control Management (includes retransmissions) + */ + uint32 txrtsfrm; /**< number of RTS sent out by the MAC */ + uint32 txctsfrm; /**< number of CTS sent out by the MAC */ + uint32 txback; /**< blockack txcnt */ + uint32 txucast; /**< number of unicast tx expecting response other than cts/cwcts */ + uint32 txnoack; /**< dot11ACKFailureCount */ + uint32 txframe; /**< tx data frames */ + uint32 txretrans; /**< tx mac retransmits */ + uint32 txpspoll; /**< Number of TX PS-poll */ + + uint32 rxrsptmout; /**< number of response timeouts for transmitted frames + * expecting a response + */ + uint32 txrtsfail; /**< number of rts transmission failure that reach retry limit */ + uint32 rxstrt; /**< number of received frames with a good PLCP */ + uint32 rxbadplcp; /**< number of parity check of the PLCP header failed */ + uint32 rxcrsglitch; /**< PHY was able to correlate the preamble but not the header */ + uint32 rxnodelim; /**< number of no valid delimiter detected by ampdu parser */ + uint32 bphy_badplcp; /**< number of bad PLCP reception on BPHY rate */ + uint32 bphy_rxcrsglitch; /**< PHY count of bphy glitches */ + uint32 rxbadfcs; /**< number of frames for which the CRC check failed in the MAC */ + uint32 rxf0ovfl; /**< number of receive fifo 0 overflows */ + uint32 rxf1ovfl; /**< number of receive fifo 0 overflows */ + uint32 rxhlovfl; /**< number of length / header fifo overflows */ + uint32 rxrtsucast; /**< number of unicast RTS addressed to the MAC (good FCS) */ + uint32 rxctsucast; /**< number of unicast CTS addressed to the MAC (good FCS) */ + uint32 rxackucast; /**< number of ucast ACKS received (good FCS) */ + uint32 rxback; /**< blockack rxcnt */ + uint32 rxbeaconmbss; /**< beacons received from member of BSS */ + uint32 rxdtucastmbss; /**< number of received DATA frames with good FCS and matching RA */ + uint32 rxbeaconobss; /**< beacons received from other BSS */ + uint32 rxdtucastobss; /**< number of unicast frames addressed to the MAC from + * other BSS (WDS FRAME) + */ + uint32 rxdtocast; /**< number of received DATA frames (good FCS and no matching RA) */ + uint32 rxrtsocast; /**< number of received RTS not addressed to the MAC */ + uint32 rxctsocast; /**< number of received CTS not addressed to the MAC */ + uint32 rxdtmcast; /**< number of RX Data multicast frames received by the MAC */ + uint32 rxmpdu_mu; /**< Number of MU MPDUs received */ + uint32 rxtoolate; /**< receive too late */ + uint32 rxframe; /**< rx data frames */ + uint32 lqcm_report; /**< lqcm metric tx/rx idx */ + uint32 tx_toss_cnt; /* number of tx packets tossed */ + uint32 rx_toss_cnt; /* number of rx packets tossed */ + uint32 last_tx_toss_rsn; /* reason because of which last tx pkt tossed */ + uint32 last_rx_toss_rsn; /* reason because of which last rx pkt tossed */ + uint32 txbcnfrm; /**< beacons transmitted */ +} wl_periodic_compact_cntrs_v1_t; + +#define WL_PERIODIC_COMPACT_CNTRS_VER_2 (2) +typedef struct { + uint16 version; + uint16 pad; + /* taken from wl_wlc_cnt_t */ + uint32 txfail; + /* taken from wl_cnt_ge40mcst_v1_t */ + uint32 txallfrm; /**< total number of frames sent, incl. Data, ACK, RTS, CTS, + * Control Management (includes retransmissions) + */ + uint32 txrtsfrm; /**< number of RTS sent out by the MAC */ + uint32 txctsfrm; /**< number of CTS sent out by the MAC */ + uint32 txback; /**< blockack txcnt */ + uint32 txucast; /**< number of unicast tx expecting response other than cts/cwcts */ + uint32 txnoack; /**< dot11ACKFailureCount */ + uint32 txframe; /**< tx data frames */ + uint32 txretrans; /**< tx mac retransmits */ + uint32 txpspoll; /**< Number of TX PS-poll */ + + uint32 rxrsptmout; /**< number of response timeouts for transmitted frames + * expecting a response + */ + uint32 txrtsfail; /**< number of rts transmission failure that reach retry limit */ + uint32 rxstrt; /**< number of received frames with a good PLCP */ + uint32 rxbadplcp; /**< number of parity check of the PLCP header failed */ + uint32 rxcrsglitch; /**< PHY was able to correlate the preamble but not the header */ + uint32 rxnodelim; /**< number of no valid delimiter detected by ampdu parser */ + uint32 bphy_badplcp; /**< number of bad PLCP reception on BPHY rate */ + uint32 bphy_rxcrsglitch; /**< PHY count of bphy glitches */ + uint32 rxbadfcs; /**< number of frames for which the CRC check failed in the MAC */ + uint32 rxf0ovfl; /**< number of receive fifo 0 overflows */ + uint32 rxf1ovfl; /**< number of receive fifo 0 overflows */ + uint32 rxhlovfl; /**< number of length / header fifo overflows */ + uint32 rxrtsucast; /**< number of unicast RTS addressed to the MAC (good FCS) */ + uint32 rxctsucast; /**< number of unicast CTS addressed to the MAC (good FCS) */ + uint32 rxackucast; /**< number of ucast ACKS received (good FCS) */ + uint32 rxback; /**< blockack rxcnt */ + uint32 rxbeaconmbss; /**< beacons received from member of BSS */ + uint32 rxdtucastmbss; /**< number of received DATA frames with good FCS and matching RA */ + uint32 rxbeaconobss; /**< beacons received from other BSS */ + uint32 rxdtucastobss; /**< number of unicast frames addressed to the MAC from + * other BSS (WDS FRAME) + */ + uint32 rxdtocast; /**< number of received DATA frames (good FCS and no matching RA) */ + uint32 rxrtsocast; /**< number of received RTS not addressed to the MAC */ + uint32 rxctsocast; /**< number of received CTS not addressed to the MAC */ + uint32 rxdtmcast; /**< number of RX Data multicast frames received by the MAC */ + uint32 rxmpdu_mu; /**< Number of MU MPDUs received */ + uint32 rxtoolate; /**< receive too late */ + uint32 rxframe; /**< rx data frames */ + uint32 lqcm_report; /**< lqcm metric tx/rx idx */ + uint32 tx_toss_cnt; /* number of tx packets tossed */ + uint32 rx_toss_cnt; /* number of rx packets tossed */ + uint32 last_tx_toss_rsn; /* reason because of which last tx pkt tossed */ + uint32 last_rx_toss_rsn; /* reason because of which last rx pkt tossed */ + uint32 txbcnfrm; /**< beacons transmitted */ + uint32 rxretry; /* Number of rx packets received after retry */ + uint32 rxdup; /* Number of dump packet. Indicates whether peer is receiving ack */ + uint32 chswitch_cnt; /* Number of channel switches */ + uint32 pm_dur; /* Total sleep time in PM, msecs */ +} wl_periodic_compact_cntrs_v2_t; + +#define WL_PERIODIC_COMPACT_CNTRS_VER_3 (3) +typedef struct { + uint16 version; + uint16 pad; + /* taken from wl_wlc_cnt_t */ + uint32 txfail; + /* taken from wl_cnt_ge40mcst_v1_t */ + uint32 txallfrm; /**< total number of frames sent, incl. Data, ACK, RTS, CTS, + * Control Management (includes retransmissions) + */ + uint32 txrtsfrm; /**< number of RTS sent out by the MAC */ + uint32 txctsfrm; /**< number of CTS sent out by the MAC */ + uint32 txback; /**< blockack txcnt */ + uint32 txucast; /**< number of unicast tx expecting response other than cts/cwcts */ + uint32 txnoack; /**< dot11ACKFailureCount */ + uint32 txframe; /**< tx data frames */ + uint32 txretrans; /**< tx mac retransmits */ + uint32 txpspoll; /**< Number of TX PS-poll */ + + uint32 rxrsptmout; /**< number of response timeouts for transmitted frames + * expecting a response + */ + uint32 txrtsfail; /**< number of rts transmission failure that reach retry limit */ + uint32 rxstrt; /**< number of received frames with a good PLCP */ + uint32 rxbadplcp; /**< number of parity check of the PLCP header failed */ + uint32 rxcrsglitch; /**< PHY was able to correlate the preamble but not the header */ + uint32 rxnodelim; /**< number of no valid delimiter detected by ampdu parser */ + uint32 bphy_badplcp; /**< number of bad PLCP reception on BPHY rate */ + uint32 bphy_rxcrsglitch; /**< PHY count of bphy glitches */ + uint32 rxbadfcs; /**< number of frames for which the CRC check failed in the MAC */ + uint32 rxf0ovfl; /**< number of receive fifo 0 overflows */ + uint32 rxf1ovfl; /**< number of receive fifo 0 overflows */ + uint32 rxhlovfl; /**< number of length / header fifo overflows */ + uint32 rxrtsucast; /**< number of unicast RTS addressed to the MAC (good FCS) */ + uint32 rxctsucast; /**< number of unicast CTS addressed to the MAC (good FCS) */ + uint32 rxackucast; /**< number of ucast ACKS received (good FCS) */ + uint32 rxback; /**< blockack rxcnt */ + uint32 rxbeaconmbss; /**< beacons received from member of BSS */ + uint32 rxdtucastmbss; /**< number of received DATA frames with good FCS and matching RA */ + uint32 rxbeaconobss; /**< beacons received from other BSS */ + uint32 rxdtucastobss; /**< number of unicast frames addressed to the MAC from + * other BSS (WDS FRAME) + */ + uint32 rxdtocast; /**< number of received DATA frames (good FCS and no matching RA) */ + uint32 rxrtsocast; /**< number of received RTS not addressed to the MAC */ + uint32 rxctsocast; /**< number of received CTS not addressed to the MAC */ + uint32 rxdtmcast; /**< number of RX Data multicast frames received by the MAC */ + uint32 rxmpdu_mu; /**< Number of MU MPDUs received */ + uint32 rxtoolate; /**< receive too late */ + uint32 rxframe; /**< rx data frames */ + uint32 lqcm_report; /**< lqcm metric tx/rx idx */ + uint32 tx_toss_cnt; /* number of tx packets tossed */ + uint32 rx_toss_cnt; /* number of rx packets tossed */ + uint32 last_tx_toss_rsn; /* reason because of which last tx pkt tossed */ + uint32 last_rx_toss_rsn; /* reason because of which last rx pkt tossed */ + uint32 txbcnfrm; /**< beacons transmitted */ + uint32 rxretry; /* Number of rx packets received after retry */ + uint32 rxdup; /* Number of dump packet. Indicates whether peer is receiving ack */ + uint32 chswitch_cnt; /* Number of channel switches */ + uint32 pm_dur; /* Total sleep time in PM, msecs */ + uint32 rxholes; /* Count of missed packets from peer */ +} wl_periodic_compact_cntrs_v3_t; + +#define WL_PERIODIC_COMPACT_HE_CNTRS_VER_1 (1) +typedef struct { + uint16 version; + uint16 len; + uint32 he_rxtrig_rand; + uint32 he_colormiss_cnt; + uint32 he_txmtid_back; + uint32 he_rxmtid_back; + uint32 he_rxmsta_back; + uint32 he_rxtrig_basic; + uint32 he_rxtrig_murts; + uint32 he_rxtrig_bsrp; + uint32 he_rxdlmu; + uint32 he_physu_rx; + uint32 he_txtbppdu; +} wl_compact_he_cnt_wlc_v1_t; + +typedef struct { + uint16 version; + uint16 coreup; + uint32 txndpa; + uint32 txndp; + uint32 rxsf; + uint32 txbfm; + uint32 rxndpa_u; + uint32 rxndpa_m; + uint32 bferpt; + uint32 rxbfpoll; + uint32 txsf; +} wl_periodic_txbf_cntrs_v1_t; + +typedef struct { + struct ether_addr BSSID; + chanspec_t chanspec; + int32 rssi; + int32 snr; +} wl_rx_signal_metric_t; + +typedef struct { + uint8 version; + uint8 flags; + uint16 pad; + int32 noise_level; /* current noise level */ + wl_rx_signal_metric_t current_bss; + wl_rx_signal_metric_t target_bss; +} wl_lqm_t; + +#define WL_PERIODIC_IF_STATE_VER_1 (1) +typedef struct wl_if_state_compact { + uint8 version; + uint8 assoc_state; + uint8 antenna_count; /**< number of valid antenna rssi */ + int8 noise_level; /**< noise right after tx (in dBm) */ + int8 snr; /* current noise level */ + int8 rssi_sum; /**< summed rssi across all antennas */ + uint16 pad16; + int8 rssi_ant[WL_RSSI_ANT_MAX]; /**< rssi per antenna */ + struct ether_addr BSSID; + chanspec_t chanspec; +} wl_if_state_compact_t; + +#define WL_EVENT_STATISTICS_VER_1 (1) +/* Event based statistics ecounters */ +typedef struct { + uint16 version; + uint16 pad; + struct ether_addr BSSID; /* BSSID of the BSS */ + uint32 txdeauthivalclass; +} wl_event_based_statistics_v1_t; + +/* ##### Ecounters v2 section ##### */ + +#define ECOUNTERS_VERSION_2 2 + +/* Enumeration of various ecounters request types. This namespace is different from + * global reportable stats namespace. +*/ +enum { + WL_ECOUNTERS_XTLV_REPORT_REQ = 1 +}; + +/* Input structure for ecounters IOVAR */ +typedef struct ecounters_config_request_v2 { + uint16 version; /* config version */ + uint16 len; /* Length of this struct including variable len */ + uint16 logset; /* Set where data will go. */ + uint16 reporting_period; /* reporting_period */ + uint16 num_reports; /* Number of timer expirations to report on */ + uint8 pad[2]; /* Reserved for future use */ + uint8 ecounters_xtlvs[]; /* Statistics Types (tags) to retrieve. */ +} ecounters_config_request_v2_t; + +#define ECOUNTERS_STATS_TYPES_FLAG_SLICE 0x1 +#define ECOUNTERS_STATS_TYPES_FLAG_IFACE 0x2 +#define ECOUNTERS_STATS_TYPES_FLAG_GLOBAL 0x4 +#define ECOUNTERS_STATS_TYPES_DEFAULT 0x8 + +/* Slice mask bits */ +#define ECOUNTERS_STATS_TYPES_SLICE_MASK_SLICE0 0x1 +#define ECOUNTERS_STATS_TYPES_SLICE_MASK_SLICE1 0x2 + +typedef struct ecounters_stats_types_report_req { + /* flags: bit0 = slice, bit1 = iface, bit2 = global, + * rest reserved + */ + uint16 flags; + uint16 if_index; /* host interface index */ + uint16 slice_mask; /* bit0 = slice0, bit1=slice1, rest reserved */ + uint8 pad[2]; /* padding */ + uint8 stats_types_req[]; /* XTLVs of requested types */ +} ecounters_stats_types_report_req_t; + +/* ##### Ecounters_Eventmsgs v2 section ##### */ + +#define ECOUNTERS_EVENTMSGS_VERSION_2 2 + +typedef struct event_ecounters_config_request_v2 { + uint16 version; /* config version */ + uint16 len; /* Length of this struct including variable len */ + uint16 logset; /* Set where data will go. */ + uint16 event_id; /* Event id for which this config is meant for */ + uint8 flags; /* Config flags */ + uint8 pad[3]; /* Reserved for future use */ + uint8 ecounters_xtlvs[]; /* Statistics Types (tags) to retrieve. */ +} event_ecounters_config_request_v2_t; + +#define EVENT_ECOUNTERS_FLAGS_ADD (1 << 0) /* Add configuration for the event_id if set */ +#define EVENT_ECOUNTERS_FLAGS_DEL (1 << 1) /* Delete configuration for event_id if set */ +#define EVENT_ECOUNTERS_FLAGS_ANYIF (1 << 2) /* Interface filtering disable / off bit */ +#define EVENT_ECOUNTERS_FLAGS_BE (1 << 3) /* If cleared report stats of + * one event log buffer + */ +#define EVENT_ECOUNTERS_FLAGS_DEL_ALL (1 << 4) /* Delete all the configurations of + * event ecounters if set + */ + +#define EVENT_ECOUNTERS_FLAGS_BUS (1 << 5) /* Add configuration for the bus events */ +#define EVENT_ECOUNTERS_FLAGS_BUS_H2D (1 << 6) /* Add configuration for the bus direction + * 0 - D2H and 1 - H2D + */ + +#define EVENT_ECOUNTERS_FLAGS_DELAYED_FLUSH (1 << 7) /* Flush only when half of the total size + * of blocks gets filled. This is to avoid + * many interrupts to host. + */ +#define EVENT_ECOUNTERS_FLAGS_USER (1 << 6) /* Add configuration for user defined events + * Reuse the same flag as H2D + */ + +/* Ecounters suspend resume */ +#define ECOUNTERS_SUSPEND_VERSION_V1 1 +/* To be used in populating suspend_mask and suspend_bitmap */ +#define ECOUNTERS_SUSPEND_TIMER (1 << ECOUNTERS_TRIGGER_REASON_TIMER) +#define ECOUNTERS_SUSPEND_EVENTS (1 << ECOUNTERS_TRIGGER_REASON_EVENTS) + +typedef struct ecounters_suspend { + uint16 version; + uint16 len; + uint32 suspend_bitmap; /* type of ecounter reporting to be suspended */ + uint32 suspend_mask; /* type of ecounter reporting to be suspended */ +} ecounters_suspend_t; + +/* -------------- dynamic BTCOEX --------------- */ +#define DCTL_TROWS 2 /**< currently practical number of rows */ +#define DCTL_TROWS_MAX 4 /**< 2 extra rows RFU */ +/* DYNCTL profile flags */ +#define DCTL_FLAGS_DISABLED 0 /**< default value: all features disabled */ +#define DCTL_FLAGS_DYNCTL (1 << 0) /**< 1 - enabled, 0 - legacy only */ +#define DCTL_FLAGS_DESENSE (1 << 1) /**< auto desense is enabled */ +#define DCTL_FLAGS_MSWITCH (1 << 2) /**< mode switching is enabled */ +#define DCTL_FLAGS_PWRCTRL (1 << 3) /**< Tx power control is enabled */ +/* for now AGG on/off is handled separately */ +#define DCTL_FLAGS_TX_AGG_OFF (1 << 4) /**< TBD: allow TX agg Off */ +#define DCTL_FLAGS_RX_AGG_OFF (1 << 5) /**< TBD: allow RX agg Off */ +/* used for dry run testing only */ +#define DCTL_FLAGS_DRYRUN (1 << 7) /**< Enables dynctl dry run mode */ +#define IS_DYNCTL_ON(prof) ((prof->flags & DCTL_FLAGS_DYNCTL) != 0) +#define IS_DESENSE_ON(prof) ((prof->flags & DCTL_FLAGS_DESENSE) != 0) +#define IS_MSWITCH_ON(prof) ((prof->flags & DCTL_FLAGS_MSWITCH) != 0) +#define IS_PWRCTRL_ON(prof) ((prof->flags & DCTL_FLAGS_PWRCTRL) != 0) +/* desense level currently in use */ +#define DESENSE_OFF 0 +#define DFLT_DESENSE_MID 12 +#define DFLT_DESENSE_HIGH 2 + +/** + * dynctl data points(a set of btpwr & wlrssi thresholds) + * for mode & desense switching + */ +typedef struct btc_thr_data { + int8 mode; /**< used by desense sw */ + int8 bt_pwr; /**< BT tx power threshold */ + int8 bt_rssi; /**< BT rssi threshold */ + /* wl rssi range when mode or desense change may be needed */ + int8 wl_rssi_high; + int8 wl_rssi_low; +} btc_thr_data_t; + +/* dynctl. profile data structure */ +#define DCTL_PROFILE_VER 0x01 +#include +typedef BWL_PRE_PACKED_STRUCT struct dctl_prof { + uint8 version; /**< dynctl profile version */ + /* dynctl profile flags bit:0 - dynctl On, bit:1 dsns On, bit:2 mode sw On, */ + uint8 flags; /**< bit[6:3] reserved, bit7 - Dryrun (sim) - On */ + /** wl desense levels to apply */ + uint8 dflt_dsns_level; + uint8 low_dsns_level; + uint8 mid_dsns_level; + uint8 high_dsns_level; + /** mode switching hysteresis in dBm */ + int8 msw_btrssi_hyster; + /** default btcoex mode */ + uint8 default_btc_mode; + /** num of active rows in mode switching table */ + uint8 msw_rows; + /** num of rows in desense table */ + uint8 dsns_rows; + /** dynctl mode switching data table */ + btc_thr_data_t msw_data[DCTL_TROWS_MAX]; + /** dynctl desense switching data table */ + btc_thr_data_t dsns_data[DCTL_TROWS_MAX]; +} BWL_POST_PACKED_STRUCT dctl_prof_t; +#include + +/** dynctl status info */ +#include +typedef BWL_PRE_PACKED_STRUCT struct dynctl_status { + uint8 sim_on; /**< true if simulation is On */ + uint16 bt_pwr_shm; /**< BT per/task power as read from ucode */ + int8 bt_pwr; /**< BT pwr extracted & converted to dBm */ + int8 bt_rssi; /**< BT rssi in dBm */ + int8 wl_rssi; /**< last wl rssi reading used by btcoex */ + uint8 dsns_level; /**< current desense level */ + uint8 btc_mode; /**< current btcoex mode */ + /* add more status items if needed, pad to 4 BB if needed */ +} BWL_POST_PACKED_STRUCT dynctl_status_t; +#include + +/** dynctl simulation (dryrun data) */ +#include +typedef BWL_PRE_PACKED_STRUCT struct dynctl_sim { + uint8 sim_on; /**< simulation mode on/off */ + int8 btpwr; /**< simulated BT power in dBm */ + int8 btrssi; /**< simulated BT rssi in dBm */ + int8 wlrssi; /**< simulated WL rssi in dBm */ +} BWL_POST_PACKED_STRUCT dynctl_sim_t; +/* no default structure packing */ +#include + +/** PTK key maintained per SCB */ +#define RSN_TEMP_ENCR_KEY_LEN 16 +typedef struct wpa_ptk { + uint8 kck[RSN_KCK_LENGTH]; /**< EAPOL-Key Key Confirmation Key (KCK) */ + uint8 kek[RSN_KEK_LENGTH]; /**< EAPOL-Key Key Encryption Key (KEK) */ + uint8 tk1[RSN_TEMP_ENCR_KEY_LEN]; /**< Temporal Key 1 (TK1) */ + uint8 tk2[RSN_TEMP_ENCR_KEY_LEN]; /**< Temporal Key 2 (TK2) */ +} wpa_ptk_t; + +/** GTK key maintained per SCB */ +typedef struct wpa_gtk { + uint32 idx; + uint32 key_len; + uint8 key[DOT11_MAX_KEY_SIZE]; +} wpa_gtk_t; + +/** FBT Auth Response Data structure */ +typedef struct wlc_fbt_auth_resp { + uint8 macaddr[ETHER_ADDR_LEN]; /**< station mac address */ + uint8 pad[2]; + uint8 pmk_r1_name[WPA2_PMKID_LEN]; + wpa_ptk_t ptk; /**< pairwise key */ + wpa_gtk_t gtk; /**< group key */ + uint32 ie_len; + uint8 status; /**< Status of parsing FBT authentication + Request in application + */ + uint8 ies[1]; /**< IEs contains MDIE, RSNIE, + FBTIE (ANonce, SNonce,R0KH-ID, R1KH-ID) + */ +} wlc_fbt_auth_resp_t; + +/** FBT Action Response frame */ +typedef struct wlc_fbt_action_resp { + uint16 version; /**< structure version */ + uint16 length; /**< length of structure */ + uint8 macaddr[ETHER_ADDR_LEN]; /**< station mac address */ + uint8 data_len; /**< len of ie from Category */ + uint8 data[1]; /**< data contains category, action, sta address, target ap, + status code,fbt response frame body + */ +} wlc_fbt_action_resp_t; + +#define MACDBG_PMAC_ADDR_INPUT_MAXNUM 16 +#define MACDBG_PMAC_OBJ_TYPE_LEN 8 + +typedef struct _wl_macdbg_pmac_param_t { + char type[MACDBG_PMAC_OBJ_TYPE_LEN]; + uint8 step; + uint8 w_en; + uint16 num; + uint32 bitmap; + uint8 addr_raw; + uint8 addr_num; + uint16 addr[MACDBG_PMAC_ADDR_INPUT_MAXNUM]; + uint8 pad0[2]; + uint32 w_val; +} wl_macdbg_pmac_param_t; + +/** IOVAR 'svmp_sampcol' parameter. Used to set and read SVMP_SAMPLE_COLLECT's setting */ +typedef struct wl_svmp_sampcol_param { + uint32 version; /* version */ + uint8 enable; + uint8 trigger_mode; /* SVMP_SAMPCOL_TRIGGER */ + uint8 trigger_mode_s[2]; /* SVMP_SAMPCOL_PKTPROC */ + uint8 data_samplerate; /* SVMP_SAMPCOL_SAMPLERATE */ + uint8 data_sel_phy1; /* SVMP_SAMPCOL_PHY1MUX */ + uint8 data_sel_rx1; /* SVMP_SAMPCOL_RX1MUX without iqCompOut */ + uint8 data_sel_dualcap; /* SVMP_SAMPCOL_RX1MUX */ + uint8 pack_mode; /* SVMP_SAMPCOL_PACK */ + uint8 pack_order; + uint8 pack_cfix_fmt; + uint8 pack_1core_sel; + uint16 waitcnt; + uint16 caplen; + uint32 buff_addr_start; /* in word-size (2-bytes) */ + uint32 buff_addr_end; /* note: Tcl in byte-size, HW in vector-size (8-bytes) */ + uint8 int2vasip; + uint8 PAD; + uint16 status; +} wl_svmp_sampcol_t; + +#define WL_SVMP_SAMPCOL_PARAMS_VERSION 1 + +enum { + SVMP_SAMPCOL_TRIGGER_PKTPROC_TRANSITION = 0, + SVMP_SAMPCOL_TRIGGER_FORCE_IMMEDIATE, + SVMP_SAMPCOL_TRIGGER_RADAR_DET +}; + +enum { + SVMP_SAMPCOL_PHY1MUX_GPIOOUT = 0, + SVMP_SAMPCOL_PHY1MUX_FFT, + SVMP_SAMPCOL_PHY1MUX_DBGHX, + SVMP_SAMPCOL_PHY1MUX_RX1MUX +}; + +enum { + SVMP_SAMPCOL_RX1MUX_FARROWOUT = 4, + SVMP_SAMPCOL_RX1MUX_IQCOMPOUT, + SVMP_SAMPCOL_RX1MUX_DCFILTEROUT, + SVMP_SAMPCOL_RX1MUX_RXFILTEROUT, + SVMP_SAMPCOL_RX1MUX_ACIFILTEROUT +}; + +enum { + SVMP_SAMPCOL_SAMPLERATE_1XBW = 0, + SVMP_SAMPCOL_SAMPLERATE_2XBW +}; + +enum { + SVMP_SAMPCOL_PACK_DUALCAP = 0, + SVMP_SAMPCOL_PACK_4CORE, + SVMP_SAMPCOL_PACK_2CORE, + SVMP_SAMPCOL_PACK_1CORE +}; + +enum { + SVMP_SAMPCOL_PKTPROC_RESET = 0, + SVMP_SAMPCOL_PKTPROC_CARRIER_SEARCH, + SVMP_SAMPCOL_PKTPROC_WAIT_FOR_NB_PWR, + SVMP_SAMPCOL_PKTPROC_WAIT_FOR_W1_PWR, + SVMP_SAMPCOL_PKTPROC_WAIT_FOR_W2_PWR, + SVMP_SAMPCOL_PKTPROC_OFDM_PHY, + SVMP_SAMPCOL_PKTPROC_TIMING_SEARCH, + SVMP_SAMPCOL_PKTPROC_CHAN_EST_1, + SVMP_SAMPCOL_PKTPROC_LEG_SIG_DEC, + SVMP_SAMPCOL_PKTPROC_SIG_DECODE_1, + SVMP_SAMPCOL_PKTPROC_SIG_DECODE_2, + SVMP_SAMPCOL_PKTPROC_HT_AGC, + SVMP_SAMPCOL_PKTPROC_CHAN_EST_2, + SVMP_SAMPCOL_PKTPROC_PAY_DECODE, + SVMP_SAMPCOL_PKTPROC_DSSS_CCK_PHY, + SVMP_SAMPCOL_PKTPROC_WAIT_ENERGY_DROP, + SVMP_SAMPCOL_PKTPROC_WAIT_NCLKS, + SVMP_SAMPCOL_PKTPROC_PAY_DEC_EXT, + SVMP_SAMPCOL_PKTPROC_SIG_FAIL_DELAY, + SVMP_SAMPCOL_PKTPROC_RIFS_SEARCH, + SVMP_SAMPCOL_PKTPROC_BOARD_SWITCH_DIV_SEARCH, + SVMP_SAMPCOL_PKTPROC_DSSS_CCK_BOARD_SWITCH_DIV_SEARCH, + SVMP_SAMPCOL_PKTPROC_CHAN_EST_3, + SVMP_SAMPCOL_PKTPROC_CHAN_EST_4, + SVMP_SAMPCOL_PKTPROC_FINE_TIMING_SEARCH, + SVMP_SAMPCOL_PKTPROC_SET_CLIP_GAIN, + SVMP_SAMPCOL_PKTPROC_NAP, + SVMP_SAMPCOL_PKTPROC_VHT_SIGA_DEC, + SVMP_SAMPCOL_PKTPROC_VHT_SIGB_DEC, + SVMP_SAMPCOL_PKTPROC_PKT_ABORT, + SVMP_SAMPCOL_PKTPROC_DCCAL +}; + +/** IOVAR 'svmp_mem' parameter. Used to read/clear svmp memory */ +typedef struct svmp_mem { + uint32 addr; /**< offset to read svmp memory from vasip base address */ + uint16 len; /**< length in count of uint16's */ + uint16 val; /**< set the range of addr/len with a value */ +} svmp_mem_t; + +/** IOVAR 'mu_rate' parameter. read/set mu rate for upto four users */ +#define MU_RATE_CFG_VERSION 1 +typedef struct mu_rate { + uint16 version; /**< version of the structure as defined by MU_RATE_CFG_VERSION */ + uint16 length; /**< length of entire structure */ + uint8 auto_rate; /**< enable/disable auto rate */ + uint8 PAD; + uint16 rate_user[4]; /**< rate per each of four users, set to -1 for no change */ +} mu_rate_t; + +/** IOVAR 'mu_group' parameter. Used to set and read MU group recommendation setting */ +#define WL_MU_GROUP_AUTO_COMMAND -1 +#define WL_MU_GROUP_PARAMS_VERSION 3 +#define WL_MU_GROUP_METHOD_NAMELEN 64 +#define WL_MU_GROUP_NGROUP_MAX 15 +#define WL_MU_GROUP_NUSER_MAX 4 +#define WL_MU_GROUP_METHOD_MIN 0 +#define WL_MU_GROUP_NUMBER_AUTO_MIN 1 +#define WL_MU_GROUP_NUMBER_AUTO_MAX 15 +#define WL_MU_GROUP_NUMBER_FORCED_MAX 8 +#define WL_MU_GROUP_METHOD_OLD 0 +#define WL_MU_GROUP_MODE_AUTO 0 +#define WL_MU_GROUP_MODE_FORCED 1 +#define WL_MU_GROUP_FORCED_1GROUP 1 +#define WL_MU_GROUP_ENTRY_EMPTY -1 +typedef struct mu_group { + uint32 version; /* version */ + int16 forced; /* forced group recommendation */ + int16 forced_group_mcs; /* forced group with mcs */ + int16 forced_group_num; /* forced group number */ + int16 group_option[WL_MU_GROUP_NGROUP_MAX][WL_MU_GROUP_NUSER_MAX]; + /* set mode for forced grouping and read mode for auto grouping */ + int16 group_GID[WL_MU_GROUP_NGROUP_MAX]; + int16 group_method; /* methof for VASIP group recommendation */ + int16 group_number; /* requested number for VASIP group recommendation */ + int16 auto_group_num; /* exact number from VASIP group recommendation */ + int8 group_method_name[WL_MU_GROUP_METHOD_NAMELEN]; + uint8 PAD[2]; +} mu_group_t; + +typedef struct mupkteng_sta { + struct ether_addr ea; + uint8 PAD[2]; + int32 nrxchain; + int32 idx; +} mupkteng_sta_t; + +typedef struct mupkteng_client { + int32 rspec; + int32 idx; + int32 flen; + int32 nframes; +} mupkteng_client_t; + +typedef struct mupkteng_tx { + mupkteng_client_t client[8]; + int32 nclients; + int32 ntx; +} mupkteng_tx_t; + +/* + * MU Packet engine interface. + * The following two definitions will go into + * wlioctl_defs.h + * when wl utility changes are merged to EAGLE TOB & Trunk + */ + +#define WL_MUPKTENG_PER_TX_START 0x10 +#define WL_MUPKTENG_PER_TX_STOP 0x20 + +/** IOVAR 'mu_policy' parameter. Used to configure MU admission control policies */ +#define WL_MU_POLICY_PARAMS_VERSION 1 +#define WL_MU_POLICY_SCHED_DEFAULT 60 +#define WL_MU_POLICY_DISABLED 0 +#define WL_MU_POLICY_ENABLED 1 +#define WL_MU_POLICY_NRX_MIN 1 +#define WL_MU_POLICY_NRX_MAX 2 +typedef struct mu_policy { + uint16 version; + uint16 length; + uint32 sched_timer; + uint32 pfmon; + uint32 pfmon_gpos; + uint32 samebw; + uint32 nrx; + uint32 max_muclients; +} mu_policy_t; + +#define WL_NAN_BAND_STR_SIZE 5 /* sizeof ("auto") */ + +/** Definitions of different NAN Bands */ +/* do not change the order */ +enum { + NAN_BAND_B = 0, + NAN_BAND_A, + NAN_BAND_AUTO, + NAN_BAND_INVALID = 0xFF +}; + +/* ifdef WL11ULB */ +/* ULB Mode configured via "ulb_mode" IOVAR */ +enum { + ULB_MODE_DISABLED = 0, + ULB_MODE_STD_ALONE_MODE = 1, /* Standalone ULB Mode */ + ULB_MODE_DYN_MODE = 2, /* Dynamic ULB Mode */ + /* Add all other enums before this */ + MAX_SUPP_ULB_MODES +}; + +/* ULB BWs configured via "ulb_bw" IOVAR during Standalone Mode Only. + * Values of this enumeration are also used to specify 'Current Operational Bandwidth' + * and 'Primary Operational Bandwidth' sub-fields in 'ULB Operations' field (used in + * 'ULB Operations' Attribute or 'ULB Mode Switch' Attribute) + */ +typedef enum { + ULB_BW_DISABLED = 0, + ULB_BW_10MHZ = 1, /* Standalone ULB BW in 10 MHz BW */ + ULB_BW_5MHZ = 2, /* Standalone ULB BW in 5 MHz BW */ + ULB_BW_2P5MHZ = 3, /* Standalone ULB BW in 2.5 MHz BW */ + /* Add all other enums before this */ + MAX_SUPP_ULB_BW +} ulb_bw_type_t; +/* endif WL11ULB */ + +#define WL_MESH_IOCTL_VERSION 1 +#define MESH_IOC_BUFSZ 512 /* sufficient ioc buff size for mesh */ +/* container for mesh iovtls & events */ +typedef struct wl_mesh_ioc { + uint16 version; /* interface command or event version */ + uint16 id; /* mesh ioctl cmd ID */ + uint16 len; /* total length of all tlv records in data[] */ + uint16 pad; /* pad to be 32 bit aligment */ + uint8 data[]; /* var len payload of bcm_xtlv_t type */ +} wl_mesh_ioc_t; + +enum wl_mesh_cmds { + WL_MESH_CMD_ENABLE = 1, + WL_MESH_CMD_JOIN = 2, + WL_MESH_CMD_PEER_STATUS = 3, + WL_MESH_CMD_ADD_ROUTE = 4, + WL_MESH_CMD_DEL_ROUTE = 5, + WL_MESH_CMD_ADD_FILTER = 6, + WL_MESH_CMD_ENAB_AL_METRIC = 7 +}; + +enum wl_mesh_cmd_xtlv_id { + WL_MESH_XTLV_ENABLE = 1, + WL_MESH_XTLV_JOIN = 2, + WL_MESH_XTLV_STATUS = 3, + WL_MESH_XTLV_ADD_ROUTE = 4, + WL_MESH_XTLV_DEL_ROUTE = 5, + WL_MESH_XTLV_ADD_FILTER = 6, + WL_MESH_XTLV_ENAB_AIRLINK = 7 +}; +/* endif WLMESH */ + +/* Fast BSS Transition parameter configuration */ +#define FBT_PARAM_CURRENT_VERSION 0 + +typedef struct _wl_fbt_params { + uint16 version; /* version of the structure + * as defined by FBT_PARAM_CURRENT_VERSION + */ + uint16 length; /* length of the entire structure */ + + uint16 param_type; /* type of parameter defined below */ + uint16 param_len; /* length of the param_value */ + uint8 param_value[1]; /* variable length */ +} wl_fbt_params_t; + +#define WL_FBT_PARAM_TYPE_RSNIE 0 +#define WL_FBT_PARAM_TYPE_FTIE 0x1 +#define WL_FBT_PARAM_TYPE_SNONCE 0x2 +#define WL_FBT_PARAM_TYPE_MDE 0x3 +#define WL_FBT_PARAM_TYPE_PMK_R0_NAME 0x4 +#define WL_FBT_PARAM_TYPE_R0_KHID 0x5 +#define WL_FBT_PARAM_TYPE_R1_KHID 0x6 +#define WL_FBT_PARAM_TYPE_FIRST_INVALID 0x7 + +/* Assoc Mgr commands for fine control of assoc */ +#define WL_ASSOC_MGR_CURRENT_VERSION 0x0 + +typedef struct { + uint16 version; /* version of the structure as + * defined by WL_ASSOC_MGR_CURRENT_VERSION + */ + uint16 length; /* length of the entire structure */ + + uint16 cmd; + uint16 params; +} wl_assoc_mgr_cmd_t; + +#define WL_ASSOC_MGR_CMD_PAUSE_ON_EVT 0 /* have assoc pause on certain events */ +#define WL_ASSOC_MGR_CMD_ABORT_ASSOC 1 + +#define WL_ASSOC_MGR_PARAMS_EVENT_NONE 0 /* use this to resume as well as clear */ +#define WL_ASSOC_MGR_PARAMS_PAUSE_EVENT_AUTH_RESP 1 + +#define WL_WINVER_STRUCT_VER_1 (1) + +typedef struct wl_winver { + + /* Version and length of this structure. Length includes all fields in wl_winver_t */ + uint16 struct_version; + uint16 struct_length; + + /* Windows operating system version info (Microsoft provided) */ + struct { + uint32 major_ver; + uint32 minor_ver; + uint32 build; + } os_runtime; + + /* NDIS runtime version (Microsoft provided) */ + struct { + uint16 major_ver; + uint16 minor_ver; + } ndis_runtime; + + /* NDIS Driver version (Broadcom provided) */ + struct { + uint16 major_ver; + uint16 minor_ver; + } ndis_driver; + + /* WDI Upper Edge (UE) Driver version (Microsoft provided) */ + struct { + uint8 major_ver; + uint8 minor_ver; + uint8 suffix; + } wdi_ue; + + /* WDI Lower Edge (LE) Driver version (Broadcom provided) */ + struct { + uint8 major_ver; + uint8 minor_ver; + uint8 suffix; + } wdi_le; + uint8 PAD[2]; +} wl_winver_t; + +/* defined(WLRCC) || defined(ROAM_CHANNEL_CACHE) */ +#define MAX_ROAM_CHANNEL 20 +typedef struct { + int32 n; + chanspec_t channels[MAX_ROAM_CHANNEL]; +} wl_roam_channel_list_t; +/* endif RCC || ROAM_CHANNEL_CACHE */ + +/* values for IOV_MFP arg */ +enum { + WL_MFP_NONE = 0, + WL_MFP_CAPABLE, + WL_MFP_REQUIRED +}; + +typedef enum { + CHANSW_UNKNOWN = 0, /* channel switch due to unknown reason */ + CHANSW_SCAN = 1, /* channel switch due to scan */ + CHANSW_PHYCAL = 2, /* channel switch due to phy calibration */ + CHANSW_INIT = 3, /* channel set at WLC up time */ + CHANSW_ASSOC = 4, /* channel switch due to association */ + CHANSW_ROAM = 5, /* channel switch due to roam */ + CHANSW_MCHAN = 6, /* channel switch triggered by mchan module */ + CHANSW_IOVAR = 7, /* channel switch due to IOVAR */ + CHANSW_CSA_DFS = 8, /* channel switch due to chan switch announcement from AP */ + CHANSW_APCS = 9, /* Channel switch from AP channel select module */ + CHANSW_AWDL = 10, /* channel switch due to AWDL */ + CHANSW_FBT = 11, /* Channel switch from FBT module for action frame response */ + CHANSW_UPDBW = 12, /* channel switch at update bandwidth */ + CHANSW_ULB = 13, /* channel switch at ULB */ + CHANSW_LAST = 14 /* last channel switch reason */ +} chansw_reason_t; + +/* + * WOWL unassociated mode power svae pattern. + */ +typedef struct wowl_radio_duty_cycle { + uint16 wake_interval; + uint16 sleep_interval; +} wowl_radio_duty_cycle_t; + +typedef struct nd_ra_ol_limits { + uint16 version; /* version of the iovar buffer */ + uint16 type; /* type of data provided */ + uint16 length; /* length of the entire structure */ + uint16 pad1; /* pad union to 4 byte boundary */ + union { + struct { + uint16 min_time; /* seconds, min time for RA offload hold */ + uint16 lifetime_percent; + /* percent, lifetime percentage for offload hold time */ + } lifetime_relative; + struct { + uint16 hold_time; /* seconds, RA offload hold time */ + uint16 pad2; /* unused */ + } fixed; + } limits; +} nd_ra_ol_limits_t; + +#define ND_RA_OL_LIMITS_VER 1 + +/* nd_ra_ol_limits sub-types */ +#define ND_RA_OL_LIMITS_REL_TYPE 0 /* relative, percent of RA lifetime */ +#define ND_RA_OL_LIMITS_FIXED_TYPE 1 /* fixed time */ + +/* buffer lengths for the different nd_ra_ol_limits types */ +#define ND_RA_OL_LIMITS_REL_TYPE_LEN 12 +#define ND_RA_OL_LIMITS_FIXED_TYPE_LEN 10 + +/* + * Temperature Throttling control mode + */ +typedef struct wl_temp_control { + uint8 enable; + uint8 PAD; + uint16 control_bit; +} wl_temp_control_t; + +/* SensorHub Interworking mode */ + +#define SHUB_CONTROL_VERSION 1 +#define SHUB_CONTROL_LEN 12 + +typedef struct { + uint16 verison; + uint16 length; + uint16 cmd; + uint16 op_mode; + uint16 interval; + uint16 enable; +} shub_control_t; + +/* WLC_MAJOR_VER <= 5 */ +/* Data structures for non-TLV format */ + +/* Data structures for rsdb caps */ +/* + * The flags field of the rsdb_caps_response is designed to be + * a Bit Mask. As of now only Bit 0 is used as mentioned below. + */ + +/* Bit-0 in flags is used to indicate if the cores can operate synchronously +* i.e either as 2x2 MIMO or 2(1x1 SISO). This is true only for 4349 variants +* 0 - device can operate only in rsdb mode (eg: 4364) +* 1 - device can operate in both rsdb and mimo (eg : 4359 variants) +*/ + +#define WL_RSDB_CAPS_VER 2 +#define SYNCHRONOUS_OPERATION_TRUE (1 << 0) +#define WL_RSDB_CAPS_FIXED_LEN OFFSETOF(rsdb_caps_response_t, num_chains) + +typedef struct rsdb_caps_response { + uint8 ver; /* Version */ + uint8 len; /* length of this structure excluding ver and len */ + uint8 rsdb; /* TRUE for rsdb chip */ + uint8 num_of_cores; /* no of d11 cores */ + uint16 flags; /* Flags to indicate various capabilities */ + uint8 num_chains[1]; /* Tx/Rx chains for each core */ +} rsdb_caps_response_t; + +/* Data structures for rsdb bands */ + +#define WL_RSDB_BANDS_VER 2 +#define WL_RSDB_BANDS_FIXED_LEN OFFSETOF(rsdb_bands_t, band) + +typedef struct rsdb_bands +{ + uint8 ver; + uint8 len; + uint16 num_cores; /* num of D11 cores */ + int16 band[1]; /* The band operating on each of the d11 cores */ +} rsdb_bands_t; + +/* rsdb config */ + +#define WL_RSDB_CONFIG_VER 3 +#define ALLOW_SIB_PARALLEL_SCAN (1 << 0) +#define MAX_BANDS 2 + +#define WL_RSDB_CONFIG_LEN sizeof(rsdb_config_t) + +typedef uint8 rsdb_opmode_t; +typedef uint32 rsdb_flags_t; + +typedef enum rsdb_modes { + WLC_SDB_MODE_NOSDB_MAIN = 1, /* 2X2 or MIMO mode (applicable only for 4355) */ + WLC_SDB_MODE_NOSDB_AUX = 2, + WLC_SDB_MODE_SDB_MAIN = 3, /* This is RSDB mode(default) applicable only for 4364 */ + WLC_SDB_MODE_SDB_AUX = 4, + WLC_SDB_MODE_SDB_AUTO = 5, /* Same as WLC_RSDB_MODE_RSDB(1+1) mode above */ +} rsdb_modes_t; + +typedef struct rsdb_config { + uint8 ver; + uint8 len; + uint16 reserved; + rsdb_opmode_t non_infra_mode; + rsdb_opmode_t infra_mode[MAX_BANDS]; + rsdb_flags_t flags[MAX_BANDS]; + rsdb_opmode_t current_mode; /* Valid only in GET, returns the current mode */ + uint8 pad[3]; +} rsdb_config_t; + +/* WLC_MAJOR_VER > =5 */ +/* TLV definitions and data structures for rsdb subcmds */ + +enum wl_rsdb_cmd_ids { + /* RSDB ioctls */ + WL_RSDB_CMD_VER = 0, + WL_RSDB_CMD_CAPS = 1, + WL_RSDB_CMD_BANDS = 2, + WL_RSDB_CMD_CONFIG = 3, + /* Add before this !! */ + WL_RSDB_CMD_LAST +}; +#define WL_RSDB_IOV_VERSION 0x1 + +typedef struct rsdb_caps_response_v1 { + uint8 rsdb; /* TRUE for rsdb chip */ + uint8 num_of_cores; /* no of d11 cores */ + uint16 flags; /* Flags to indicate various capabilities */ + uint8 num_chains[MAX_NUM_D11CORES]; /* Tx/Rx chains for each core */ + uint8 band_cap[MAX_NUM_D11CORES]; /* band cap bitmask per slice */ +} rsdb_caps_response_v1_t; + +typedef struct rsdb_bands_v1 +{ + uint8 num_cores; /* num of D11 cores */ + uint8 pad; /* padding bytes for 4 byte alignment */ + int8 band[MAX_NUM_D11CORES]; /* The band operating on each of the d11 cores */ +} rsdb_bands_v1_t; + +typedef struct rsdb_config_xtlv { + rsdb_opmode_t reserved1; /* Non_infra mode is no more applicable */ + rsdb_opmode_t infra_mode[MAX_BANDS]; /* Target mode for Infra association */ + uint8 pad; /* pad bytes for 4 byte alignment */ + rsdb_flags_t flags[MAX_BANDS]; + rsdb_opmode_t current_mode; /* GET only; has current mode of operation */ + uint8 pad1[3]; +} rsdb_config_xtlv_t; + +/* Definitions for slot_bss chanseq iovar */ +#define WL_SLOT_BSS_VERSION 1 + +/* critical slots max size */ +#define WL_SLOTTED_BSS_CS_BMP_CFG_MAX_SZ 128 /* arbitrary */ + +enum wl_slotted_bss_cmd_id { + WL_SLOTTED_BSS_CMD_VER = 0, + WL_SLOTTED_BSS_CMD_CHANSEQ = 1, + WL_SLOTTED_BSS_CMD_CS_BMP = 2 /* critical slots bitmap */ +}; +typedef uint16 chan_seq_type_t; +enum chan_seq_type { + CHAN_SEQ_TYPE_AWDL = 1, + CHAN_SEQ_TYPE_SLICE = 2, + CHAN_SEQ_TYPE_NAN = 3 +}; +typedef uint8 sched_flag_t; +enum sched_flag { + NO_SDB_SCHED = 0x1, + SDB_TDM_SCHED = 0x2, + SDB_SPLIT_BAND_SCHED = 0x4, /* default mode for 4357 */ + MAIN_ONLY = 0x8, + AUX_ONLY = 0x10, + SDB_DUAL_TIME = (MAIN_ONLY | AUX_ONLY), + NO_SDB_MAIN_ONLY = (NO_SDB_SCHED | MAIN_ONLY), /* default mode for 4364 */ + SDB_TDM_SCHED_MAIN = (SDB_TDM_SCHED | MAIN_ONLY), + SDB_TDM_SCHED_AUX = (SDB_TDM_SCHED | AUX_ONLY), + SDB_TDM_SCHED_DUAL_TIME = (SDB_TDM_SCHED | SDB_DUAL_TIME), + SDB_SPLIT_BAND_SCHED_DUAL_TIME = (SDB_SPLIT_BAND_SCHED | SDB_DUAL_TIME) +}; + +typedef struct chan_seq_tlv_data { + uint32 flags; + uint8 data[1]; +} chan_seq_tlv_data_t; + +typedef struct chan_seq_tlv { + chan_seq_type_t type; + uint16 len; + chan_seq_tlv_data_t chanseq_data[1]; +} chan_seq_tlv_t; + +typedef struct sb_channel_sequence { + sched_flag_t sched_flags; /* (sdb-tdm or sdb-sb or Dual-Time) */ + uint8 num_seq; /* number of chan_seq_tlv following */ + uint16 pad; + chan_seq_tlv_t seq[1]; +} sb_channel_sequence_t; + +typedef struct slice_chan_seq { + uint8 slice_index; /* 0(Main) or 1 (Aux) */ + uint8 num_chanspecs; + uint8 dur; + uint8 pad; + chanspec_t chanspecs[1]; +} slice_chan_seq_t; + +#define SLOT_BSS_SLICE_TYPE_DUR_MAX_RANGE 2u +#define SLOTTED_BSS_AGGR_EN (1 << 0) /* Bitmap of mode */ +#define SLOTTED_BSS_AGGR_LIMIT_DUR (1 << 1) /* Jira 49554 */ + +#define WL_SLICE_CHAN_SEQ_FIXED_LEN OFFSETOF(slice_chan_seq_t, chanspecs) +/* Definitions for slotted_bss stats */ +#define SBSS_STATS_VERSION 1 +#define SBSS_STATS_CURRENT_VERSION SBSS_STATS_VERSION + +#define SBSS_MAX_CHAN_STATS 4 + +typedef struct sbss_core_stats { + uint32 sb_slot_start; + uint32 sb_slot_end; + uint32 sb_slot_skip; + uint32 mismatch_count; +} sbss_core_stats_t; + +typedef struct sbss_chan_stats { + chanspec_t chanspec; + uint32 slot_start; + uint32 slot_end; + uint32 slot_skip; +} sbss_chan_stats_t; + +typedef struct sbss_stats_v1 { + uint16 version; + uint16 length; + sbss_core_stats_t corestats[MAX_NUM_D11CORES]; + sbss_chan_stats_t sbss_chanstats[MAX_NUM_D11CORES][SBSS_MAX_CHAN_STATS]; +} sbss_stats_t; + +/* slotted bss critical slots */ +typedef struct wl_sbss_cs_bmp_s { + uint8 bitmap_len; + uint8 pad[3]; + uint8 bitmap[]; +} wl_sbss_cs_bmp_t; + +typedef struct sim_pm_params { + uint32 enabled; + uint16 cycle; + uint16 up; +} sim_pm_params_t; + +/* Digital napping status */ +#define WL_NAP_STATUS_VERSION_1 1 +typedef struct wl_nap_status_v1 { + uint16 version; /* structure version */ + uint16 len; /* length of returned data */ + uint16 fw_status; /* bitmask of FW disable reasons */ + uint8 hw_status; /* bitmask for actual HW state info */ + uint8 slice_index; /* which slice this represents */ + uint32 total_disable_dur; /* total time (ms) disabled for fw_status */ +} wl_nap_status_v1_t; + +/* Bits for fw_status */ +#define NAP_DISABLED_HOST 0x0001 /* Host has disabled through nap_enable */ +#define NAP_DISABLED_RSSI 0x0002 /* Disabled because of nap_rssi_threshold */ +#define NAP_DISABLED_SCAN 0x0004 /* Disabled because of scan */ +#define NAP_DISABLED_ASSOC 0x0008 /* Disabled because of association */ +#define NAP_DISABLED_LTE 0x0010 /* Disabled because of LTE */ +#define NAP_DISABLED_ACI 0x0020 /* Disabled because of ACI mitigation */ + +/* Bits for hw_status */ +#define NAP_HWCFG 0x01 /* State of NAP config bit in phy HW */ +#define NAP_NOCLK 0x80 /* No clock to read HW (e.g. core down) */ + +/* ifdef WL_NATOE */ +#define WL_NATOE_IOCTL_VERSION 1 +#define WL_NATOE_IOC_BUFSZ 512 /* sufficient ioc buff size for natoe */ +#define WL_NATOE_DBG_STATS_BUFSZ 2048 +#define NATOE_FLAGS_ENAB_MASK 0x1 +#define NATOE_FLAGS_ACTIVE_MASK 0x2 +#define NATOE_FLAGS_PUBNW_MASK 0x4 +#define NATOE_FLAGS_PVTNW_MASK 0x8 +#define NATOE_FLAGS_ENAB_SHFT_MASK 0 +#define NATOE_FLAGS_ACTIVE_SHFT_MASK 1 +#define NATOE_FLAGS_PUBNW_SHFT_MASK 2 +#define NATOE_FLAGS_PVTNW_SHFT_MASK 3 +#define NATOE_FLAGS_PUB_NW_UP (1 << NATOE_FLAGS_PUBNW_SHFT_MASK) +#define NATOE_FLAGS_PVT_NW_UP (1 << NATOE_FLAGS_PVTNW_SHFT_MASK) + +#define PCIE_FRWDPKT_STATS_VERSION 1 + +/* Module version is 1 for IGUANA */ +#define WL_NATOE_MODULE_VER_1 1 +/* Module version is 2 for Lemur */ +#define WL_NATOE_MODULE_VER_2 2 + +/* WL_NATOE_CMD_MOD_VER */ +typedef uint16 wl_natoe_ver_t; +/* config natoe STA and AP IP's structure */ +typedef struct { + uint32 sta_ip; + uint32 sta_netmask; + uint32 sta_router_ip; + uint32 sta_dnsip; + uint32 ap_ip; + uint32 ap_netmask; +} wl_natoe_config_ips_t; + +/* natoe ports config structure */ +typedef struct { + uint16 start_port_num; + uint16 no_of_ports; +} wl_natoe_ports_config_t; + +/* natoe ports exception info */ +typedef struct { + uint16 sta_port_num; + uint16 dst_port_num; /* for SIP type protocol, dst_port_num info can be ignored by FW */ + uint32 ip; /* for SIP ip is APcli_ip and for port clash it is dst_ip */ + uint8 entry_type; /* Create/Destroy */ + uint8 pad[3]; +} wl_natoe_exception_port_t; + +/* container for natoe ioctls & events */ +typedef struct wl_natoe_ioc { + uint16 version; /* interface command or event version */ + uint16 id; /* natoe ioctl cmd ID */ + uint16 len; /* total length of all tlv records in data[] */ + uint16 pad; /* pad to be 32 bit aligment */ + uint8 data[]; /* var len payload of bcm_xtlv_t type */ +} wl_natoe_ioc_t; + +typedef struct wl_natoe_pool_stats_v1 { + /* For debug purposes */ + uint16 poolreorg_cnt; + uint16 poolrevert_cnt; + uint16 txfrag_state; + uint16 rxfrag_state; + uint16 txfrag_plen; + uint16 rxfrag_plen; + uint16 tx_pavail; + uint16 rx_pavail; + uint16 txmin_bkup_bufs; + uint16 rxmin_bkup_bufs; + uint16 pktpool_sbuf_alloc; + uint16 pktpool_plen; + uint16 pktpool_pavail; + /* Peak shared buffer count in all iterations */ + uint16 sbuf_peak; + /* Peak shared buffer count in current D3 iteration */ + uint16 sbuf_peak_cur; +} wl_natoe_pool_stats_v1_t; + +typedef struct wl_natoe_arp_entry_v1 { + struct ipv4_addr ip; + struct ether_addr mac_addr; + uint8 lifetime; + uint8 flags; +} wl_natoe_arp_entry_v1_t; + +typedef struct wl_natoe_dbg_arp_tbl_info_v1 { + uint8 valid_arp_entries; + uint8 PAD[3]; + wl_natoe_arp_entry_v1_t arp_ent[]; +} wl_natoe_dbg_arp_tbl_info_v1_t; + +typedef struct wl_natoe_skip_port_entry_v1 { + struct ipv4_addr srcip; + uint16 src_port; + uint16 lifetime; +} wl_natoe_skip_port_entry_v1_t; + +typedef struct wl_natoe_skip_port_info_v1 { + uint8 valid_entries; + uint8 PAD[3]; + wl_natoe_skip_port_entry_v1_t skip_port_ent[]; +} wl_natoe_skip_port_info_v1_t; + +typedef struct wl_natoe_dbg_stats_v1 { + uint16 active_nat_entries; + uint16 active_dns_entries; + uint16 active_icmp_entries; + uint16 valid_arp_entries; + uint16 prev_nat_entries; + uint16 prev_dns_entries; + uint16 tcp_fast_reclaim_cnt; + uint16 mcast_packets; + uint16 bcast_packets; + uint16 port_commands_rcvd; + uint16 unsupported_prot; + uint16 arp_req_sent; + uint16 arp_rsp_rcvd; + uint16 non_ether_frames; + uint16 port_alloc_fail; + uint16 srcip_tbl_full; + uint16 dstip_tbl_full; + uint16 nat_tbl_full; + uint16 icmp_error_cnt; + uint16 pkt_drops_resource; + uint32 frwd_nat_pkt_cnt; + uint32 reverse_nat_pkt_cnt; + uint16 pub_nw_chspec; + uint16 pvt_nw_chspec; + uint8 pubnw_cfg_idx; + uint8 pvtnw_cfg_idx; + uint8 pubnw_cfg_ID; + uint8 pvtnw_cfg_ID; + uint16 natoe_flags; +} wl_natoe_dbg_stats_v1_t; + +typedef struct wl_natoe_exception_port_inf_v1 { + uint16 except_bmap_size; + uint8 port_except_bmap[]; +} wl_natoe_exception_port_inf_v1_t; + +typedef struct wl_natoe_dstnat_entry_v1 { + struct ipv4_addr clientip; + struct ether_addr client_mac_addr; + uint16 client_listenport; + uint8 opcode; +} wl_natoe_dstnat_entry_v1_t; + +typedef struct wl_pcie_frwd_stats_v1 { + uint16 version; + uint16 len; + uint16 frwd_txfrag_q_cnt; /* no. of txfrags in frwd_txfrag_list */ + /* no. of outstanding lbufs in txpath on if0/ifx */ + uint16 tx_frwd_n_lb_if0; + uint16 tx_frwd_n_lb_ifx; + /* no. of outstanding lfrags in txpath on if0/ifx */ + uint16 tx_frwd_n_lf_if0; + uint16 tx_frwd_n_lf_ifx; + /* no. of pending frwd pkts dropped upon d3 entry */ + uint16 tx_frwd_d3_drop_cnt; + /* Total no. of lbufs frwded in txpath on if0/ifx */ + uint32 tx_frwd_n_lb_if0_cnt; + uint32 tx_frwd_n_lb_ifx_cnt; + /* Total no. of lfrags frwded in txpath on if0/ifx */ + uint32 tx_frwd_n_lf_if0_cnt; + uint32 tx_frwd_n_lf_ifx_cnt; + uint32 frwd_tx_drop_thr_cnt; /* no. of pkts dropped due to txfrag threshold */ + uint32 frwd_tx_drop_err_cnt; /* no. of pkts dropped due to txfrags not avail / errors */ +} wl_pcie_frwd_stats_v1_t; + +enum wl_natoe_cmds { + WL_NATOE_CMD_MOD_VER = 0, + WL_NATOE_CMD_ENABLE = 1, + WL_NATOE_CMD_CONFIG_IPS = 2, + WL_NATOE_CMD_CONFIG_PORTS = 3, + WL_NATOE_CMD_DBG_STATS = 4, + WL_NATOE_CMD_EXCEPTION_PORT = 5, + WL_NATOE_CMD_SKIP_PORT = 6, + WL_NATOE_CMD_TBL_CNT = 7, + WL_NATOE_CMD_CONFIG_DSTNAT = 8, + WL_NATOE_CMD_CTRL = 9 +}; + +enum wl_natoe_cmd_xtlv_id { + WL_NATOE_XTLV_MOD_VER = 0, + WL_NATOE_XTLV_ENABLE = 1, + WL_NATOE_XTLV_CONFIG_IPS = 2, + WL_NATOE_XTLV_CONFIG_PORTS = 3, + WL_NATOE_XTLV_DBG_STATS = 4, + WL_NATOE_XTLV_EXCEPTION_PORT = 5, + WL_NATOE_XTLV_SKIP_PORT = 6, + WL_NATOE_XTLV_TBL_CNT = 7, + WL_NATOE_XTLV_ARP_TBL = 8, + WL_NATOE_XTLV_POOLREORG = 9, + WL_NATOE_XTLV_CONFIG_DSTNAT = 10, + WL_NATOE_XTLV_CTRL = 11 +}; + +/* endif WL_NATOE */ + +enum wl_idauth_cmd_ids { + WL_IDAUTH_CMD_CONFIG = 1, + WL_IDAUTH_CMD_PEER_INFO = 2, + WL_IDAUTH_CMD_COUNTERS = 3, + WL_IDAUTH_CMD_LAST +}; +enum wl_idauth_xtlv_id { + WL_IDAUTH_XTLV_AUTH_ENAB = 0x1, + WL_IDAUTH_XTLV_GTK_ROTATION = 0x2, + WL_IDAUTH_XTLV_EAPOL_COUNT = 0x3, + WL_IDAUTH_XTLV_EAPOL_INTRVL = 0x4, + WL_IDAUTH_XTLV_BLKLIST_COUNT = 0x5, + WL_IDAUTH_XTLV_BLKLIST_AGE = 0x6, + WL_IDAUTH_XTLV_PEERS_INFO = 0x7, + WL_IDAUTH_XTLV_COUNTERS = 0x8 +}; +enum wl_idauth_stats { + WL_AUTH_PEER_STATE_AUTHORISED = 0x01, + WL_AUTH_PEER_STATE_BLACKLISTED = 0x02, + WL_AUTH_PEER_STATE_4WAY_HS_ONGOING = 0x03, + WL_AUTH_PEER_STATE_LAST +}; +typedef struct { + uint16 state; /* Peer State: Authorised or Blacklisted */ + struct ether_addr peer_addr; /* peer Address */ + uint32 blklist_end_time; /* Time of blacklist end */ +} auth_peer_t; +typedef struct wl_idauth_counters { + uint32 auth_reqs; /* No of auth req recvd */ + uint32 mic_fail; /* No of mic fails */ + uint32 four_way_hs_fail; /* No of 4-way handshake fails */ +} wl_idauth_counters_t; + +#define WLC_UTRACE_LEN (1024u * 4u) // default length +#define WLC_UTRACE_LEN_AUX (1024u * 3u) // reduced length to fit smaller AUX BM +#define WLC_UTRACE_READ_END 0 +#define WLC_UTRACE_MORE_DATA 1 +typedef struct wl_utrace_capture_args_v1 { + uint32 length; + uint32 flag; +} wl_utrace_capture_args_v1_t; + +#define UTRACE_CAPTURE_VER_2 2 +typedef struct wl_utrace_capture_args_v2 { + /* structure control */ + uint16 version; /**< structure version */ + uint16 length; /**< length of the response */ + uint32 flag; /* Indicates if there is more data or not */ +} wl_utrace_capture_args_v2_t; + +/* Signal read end. */ +#define WLC_REGVAL_READ_END 0 +/* Signal more data pending. */ +#define WLC_REGVAL_MORE_DATA 1 +/* Internal read state. */ +#define WLC_REGVAL_READ_CONTINUE 2 + +#define WLC_REGVAL_DUMP_PHYREG 0 +#define WLC_REGVAL_DUMP_RADREG 1 + +#define PHYREGVAL_CAPTURE_BUFFER_LEN 2048 + +typedef struct wl_regval_capture_args { + uint32 control_flag; /* Carries status information. */ +} wl_regval_capture_args_t; + +/* XTLV IDs for the Health Check "hc" iovar top level container */ +enum { + WL_HC_XTLV_ID_CAT_HC = 1, /* category for HC as a whole */ + WL_HC_XTLV_ID_CAT_DATAPATH_TX = 2, /* Datapath Tx */ + WL_HC_XTLV_ID_CAT_DATAPATH_RX = 3, /* Datapath Rx */ + WL_HC_XTLV_ID_CAT_SCAN = 4, /* Scan */ + WL_HC_XTLV_ID_CAT_EVENTMASK = 5, /* Health Check event mask. */ +}; + +/* Health Check: Common XTLV IDs for sub-elements in the top level container + * Number starts at 0x8000 to be out of the way for category specific IDs. + */ +enum { + WL_HC_XTLV_ID_ERR = 0x8000, /* for sub-command err return */ + WL_HC_XTLV_ID_IDLIST = 0x8001, /* container for uint16 IDs */ +}; + +/* Health Check: Datapath TX IDs */ +enum { + WL_HC_TX_XTLV_ID_VAL_STALL_THRESHOLD = 1, /* stall_threshold */ + WL_HC_TX_XTLV_ID_VAL_STALL_SAMPLE_SIZE = 2, /* stall_sample_size */ + WL_HC_TX_XTLV_ID_VAL_STALL_TIMEOUT = 3, /* stall_timeout */ + WL_HC_TX_XTLV_ID_VAL_STALL_FORCE = 4, /* stall_force */ + WL_HC_TX_XTLV_ID_VAL_STALL_EXCLUDE = 5, /* stall_exclude */ + WL_HC_TX_XTLV_ID_VAL_FC_TIMEOUT = 6, /* flow ctl timeout */ + WL_HC_TX_XTLV_ID_VAL_FC_FORCE = 7, /* flow ctl force failure */ + WL_HC_TX_XTLV_ID_VAL_DELAY_TO_TRAP = 8, /* delay threshold for forced trap */ + WL_HC_TX_XTLV_ID_VAL_DELAY_TO_RPT = 9, /* delay threshold for event log report */ + WL_HC_TX_XTLV_ID_VAL_FAILURE_TO_RPT = 10, /* threshold for consecutive TX failures */ +}; + +/* Health Check: Datapath RX IDs */ +enum { + WL_HC_RX_XTLV_ID_VAL_DMA_STALL_TIMEOUT = 1, /* dma_stall_timeout */ + WL_HC_RX_XTLV_ID_VAL_DMA_STALL_FORCE = 2, /* dma_stall test trigger */ + WL_HC_RX_XTLV_ID_VAL_STALL_THRESHOLD = 3, /* stall_threshold */ + WL_HC_RX_XTLV_ID_VAL_STALL_SAMPLE_SIZE = 4, /* stall_sample_size */ + WL_HC_RX_XTLV_ID_VAL_STALL_FORCE = 5, /* stall test trigger */ +}; + +/* Health Check: Datapath SCAN IDs */ +enum { + WL_HC_XTLV_ID_VAL_SCAN_STALL_THRESHOLD = 1, /* scan stall threshold */ +}; + +/* Health check: PHY IDs */ +/* Needed for iguana 13.35 branch */ +typedef enum { + PHY_HC_DD_ALL = 0, + PHY_HC_DD_TEMPSENSE = 1, + PHY_HC_DD_VCOCAL = 2, + PHY_HC_DD_RX = 3, + PHY_HC_DD_TX = 4, + PHY_HC_DD_LAST /* This must be the last entry */ +} phy_hc_dd_type_t; + +typedef enum { + PHY_HC_DD_TEMP_FAIL = 0, + PHY_HC_DD_VCO_FAIL = 1, + PHY_HC_DD_RXDSN_FAIL = 2, + PHY_HC_DD_TXPOW_FAIL = 3, + PHY_HC_DD_END /* This must be the last entry */ +} phy_hc_dd_type_v2_t; + +/* IDs of Health Check report structures for sub types of health checks within WL */ +typedef enum wl_hc_dd_type { + WL_HC_DD_PCIE = 0, /* PCIe */ + WL_HC_DD_RX_DMA_STALL = 1, /* RX DMA stall check */ + WL_HC_DD_RX_STALL = 2, /* RX stall check */ + WL_HC_DD_TX_STALL = 3, /* TX stall check */ + WL_HC_DD_SCAN_STALL = 4, /* SCAN stall check */ + WL_HC_DD_PHY = 5, /* PHY health check */ + WL_HC_DD_REINIT = 6, /* Reinit due to other reasons */ + WL_HC_DD_TXQ_STALL = 7, /* TXQ stall */ + WL_HC_DD_MAX +} wl_hc_dd_type_t; + +/* + * Health Check report structures for sub types of health checks within WL + */ + +/* Health Check report structure for Rx DMA Stall check */ +typedef struct { + uint16 type; + uint16 length; + uint16 timeout; + uint16 stalled_dma_bitmap; +} wl_rx_dma_hc_info_t; + +/* Health Check report structure for Tx packet failure check */ +typedef struct { + uint16 type; + uint16 length; + uint32 stall_bitmap; + uint32 stall_bitmap1; + uint32 failure_ac; + uint32 threshold; + uint32 tx_all; + uint32 tx_failure_all; +} wl_tx_hc_info_t; + +/* Health Check report structure for Rx dropped packet failure check */ +typedef struct { + uint16 type; + uint16 length; + uint32 bsscfg_idx; + uint32 rx_hc_pkts; + uint32 rx_hc_dropped_all; + uint32 rx_hc_alert_th; +} wl_rx_hc_info_t; + +/* HE top level command IDs */ +enum { + WL_HE_CMD_ENAB = 0, + WL_HE_CMD_FEATURES = 1, + WL_HE_CMD_TWT_SETUP = 2, + WL_HE_CMD_TWT_TEARDOWN = 3, + WL_HE_CMD_TWT_INFO = 4, + WL_HE_CMD_BSSCOLOR = 5, + WL_HE_CMD_PARTIAL_BSSCOLOR = 6, + WL_HE_CMD_CAP = 7, + WL_HE_CMD_STAID = 8, + WL_HE_CMD_RTSDURTHRESH = 10, + WL_HE_CMD_PEDURATION = 11, + WL_HE_CMD_TESTBED_MODE = 12, + WL_HE_CMD_LAST +}; + +/* TWT top level command IDs */ +enum { + WL_TWT_CMD_ENAB = 0, + WL_TWT_CMD_SETUP = 1, + WL_TWT_CMD_TEARDOWN = 2, + WL_TWT_CMD_INFO = 3, + WL_TWT_CMD_AUTOSCHED = 4, + WL_TWT_CMD_EARLY_TERM_TIME = 6, + WL_TWT_CMD_LAST +}; + +/* TODO: Remove the follwoing after mering TWT changes to trunk */ +#define WL_TWT_CMD_DEF_IN_WLIOCTL 1 + +#define WL_HEB_VER_1 1 + +/* HEB top level command IDs */ +enum { + WL_HEB_CMD_ENAB = 0, + WL_HEB_CMD_NUM_HEB = 1, + WL_HEB_CMD_COUNTERS = 2, + WL_HEB_CMD_CLEAR_COUNTERS = 3, + WL_HEB_CMD_CONFIG = 4, + WL_HEB_CMD_STATUS = 5, + WL_HEB_CMD_LAST +}; + +/* HEB counters structures */ +typedef struct wl_heb_int_cnt_v1 { + uint16 pre_event; + uint16 start_event; + uint16 end_event; + uint16 missed; +} wl_heb_int_cnt_v1_t; + +typedef struct wl_heb_cnt_v1 { + /* structure control */ + uint16 version; /* structure version */ + uint16 length; /* data length (starting after this field) */ + wl_heb_int_cnt_v1_t heb_int_cnt[1]; +} wl_heb_cnt_v1_t; + +// struct for configuring HEB +typedef struct wl_config_heb_fill_v1 { + uint16 version; /* structure version */ + uint16 length; /* data length (starting after this field) */ + uint32 duration; + uint32 periodicity; + uint16 heb_idx; + uint16 preeventtime; + uint8 count; + uint8 PAD[3]; +} wl_config_heb_fill_v1_t; + +typedef struct wl_heb_blk_params_v1 { + /* Don't change the order of following elements. This is as per the HEB HW spec */ + uint32 event_int_val_l; + uint32 event_int_val_h; + uint32 param2; + uint32 param3; + uint32 pre_event_intmsk_bmp; + uint32 start_event_intmsk_bmp; + uint32 end_event_intmsk_bmp; + uint32 event_driver_info; + uint16 param1; + uint8 event_count; + uint8 noa_invert; +} wl_heb_blk_params_v1_t; + +typedef struct wl_heb_int_status_v1 { + uint32 heb_idx; + wl_heb_blk_params_v1_t blk_params; +} wl_heb_reg_status_v1_t; + +typedef struct wl_heb_status_v1 { + uint16 version; /* structure version */ + uint16 length; /* data length (starting after this field) */ + wl_heb_reg_status_v1_t heb_status[1]; +} wl_heb_status_v1_t; + +/* TWT Setup descriptor */ +typedef struct { + /* Setup Command. */ + uint8 setup_cmd; /* See TWT_SETUP_CMD_XXXX in 802.11ah.h, + * valid when bcast_twt is FALSE. + */ + /* Flow attributes */ + uint8 flow_flags; /* See WL_TWT_FLOW_FLAG_XXXX below */ + uint8 flow_id; /* must be between 0 and 7 */ + /* Target Wake Time */ + uint8 wake_type; /* See WL_TWT_TIME_TYPE_XXXX below */ + uint32 wake_time_h; /* target wake time - BSS TSF (us) */ + uint32 wake_time_l; + uint32 wake_dur; /* target wake duration in unit of microseconds */ + uint32 wake_int; /* target wake interval */ + + uint16 bid; /* must be between 0 and 255. Set 0xFFFF for auto assignment */ + uint16 li; /* Listen interval: Units in number of beacon intervals */ + uint8 channel; /* twt channel */ + uint8 pad[3]; +} wl_twt_sdesc_t; + +/* Flow flags */ +#define WL_TWT_FLOW_FLAG_BROADCAST (1 << 0) +#define WL_TWT_FLOW_FLAG_IMPLICIT (1 << 1) +#define WL_TWT_FLOW_FLAG_UNANNOUNCED (1 << 2) +#define WL_TWT_FLOW_FLAG_TRIGGER (1 << 3) +#define WL_TWT_FLOW_FLAG_WAKE_TBTT_NEGO (1 << 4) +#define WL_TWT_FLOW_FLAG_REQUEST (1 << 5) + +/* Flow id */ +#define WL_TWT_FLOW_ID_FID 0x07 /* flow id */ +#define WL_TWT_FLOW_ID_GID_MASK 0x70 /* group id - broadcast TWT only */ +#define WL_TWT_FLOW_ID_GID_SHIFT 4 + +#define WL_TWT_INV_BCAST_ID 0xFFFFu +#define WL_TWT_INV_FLOW_ID 0xFFu + +#define WL_TWT_DIALOG_TOKEN_AUTO 0xFFFF + +/* Wake type */ +/* TODO: not yet finalized */ +#define WL_TWT_TIME_TYPE_BSS 0 /* The time specified in wake_time_h/l is + * the BSS TSF time. + */ +#define WL_TWT_TIME_TYPE_OFFSET 1 /* The time specified in wake_time_h/l is an offset + * of the TSF time when the iovar is processed. + */ + +#define WL_TWT_SETUP_VER 0 + +/* HE TWT Setup command */ +typedef struct { + /* structure control */ + uint16 version; /* structure version */ + uint16 length; /* data length (starting after this field) */ + /* peer address */ + struct ether_addr peer; /* leave it all 0s' for AP */ + /* session id */ + uint16 dialog; /* an arbitrary number to identify the seesion */ + /* setup descriptor */ + wl_twt_sdesc_t desc; +} wl_twt_setup_t; + +#define WL_TWT_TEARDOWN_VER 0 + +/* HE TWT Teardown command */ +typedef struct { + /* structure control */ + uint16 version; /* structure version */ + uint16 length; /* data length (starting after this field) */ + /* peer address */ + struct ether_addr peer; /* leave it all 0s' for AP */ + /* flow attributes */ + uint8 flow_flags; /* See WL_TWT_FLOW_FLAG_XXXX above. + * (only BROADCAST) is applicable) + */ + uint8 flow_id; /* must be between 0 and 7 */ + uint16 bid; /* must be between 0 and 255 */ +} wl_twt_teardown_t; + +/* twt information descriptor */ +typedef struct { + uint8 flow_flags; /* See WL_TWT_INFO_FLAG_XXX below */ + uint8 flow_id; + uint8 wake_type; /* See WL_TWT_TIME_TYPE_XXXX below */ + uint8 pad[1]; + uint32 next_twt_h; + uint32 next_twt_l; +} wl_twt_idesc_t; + +/* Flow flags */ +#define WL_TWT_INFO_FLAG_RESP_REQ (1 << 0) /* Response Requested */ +#define WL_TWT_INFO_FLAG_NEXT_TWT_REQ (1 << 1) /* Next TWT Request */ +#define WL_TWT_INFO_FLAG_BTWT_RESCHED (1 << 2) /* Broadcast Reschedule */ +#define WL_TWT_INFO_FLAG_RESUME (1 << 4) /* 1 is TWT Resume, 0 is TWT Suspend */ + +#define WL_TWT_INFO_VER 0 + +/* HE TWT Information command */ +typedef struct { + /* structure control */ + uint16 version; /* structure version */ + uint16 length; /* data length (starting after this field) */ + /* peer address */ + struct ether_addr peer; /* leave it all 0s' for AP */ + uint8 pad[2]; + /* Temporary change. to be removed */ + wl_twt_idesc_t desc; + /* information descriptor */ + wl_twt_idesc_t idesc; +} wl_twt_info_t; + +/* Current version for wlc_clm_power_limits_req_t structure and flags */ +#define WLC_CLM_POWER_LIMITS_REQ_VERSION 1 +/* "clm_power_limits" iovar request structure */ +typedef struct wlc_clm_power_limits_req { + /* Input. Structure and flags version */ + uint32 version; + /* Full length of buffer (includes this structure and space for TLV-encoded PPR) */ + uint32 buflen; + /* Input. Flags (see WLC_CLM_POWER_LIMITS_INPUT_FLAG_... below) */ + uint32 input_flags; + /* Input. CC of region whose data is being requested */ + char cc[WLC_CNTRY_BUF_SZ]; + /* Input. Channel/subchannel in chanspec_t format */ + uint32 chanspec; + /* Subchannel encoded as clm_limits_type_t */ + uint32 clm_subchannel; + /* Input. 0-based antenna index */ + uint32 antenna_idx; + /* Output. General flags (see WLC_CLM_POWER_LIMITS_OUTPUT_FLAG_... below) */ + uint32 output_flags; + /* Output. 2.4G country flags, encoded as clm_flags_t enum */ + uint32 clm_country_flags_2g; + /* Output. 5G country flags, encoded as clm_flags_t enum */ + uint32 clm_country_flags_5g; + /* Output. Length of TLV-encoded PPR data that follows this structure */ + uint32 ppr_tlv_size; + /* Output. Beginning of buffer for TLV-encoded PPR data */ + uint8 ppr_tlv[1]; +} wlc_clm_power_limits_req_t; + +/* Input. Do not apply SAR limits */ +#define WLC_CLM_POWER_LIMITS_INPUT_FLAG_NO_SAR 0x00000001 +/* Input. Do not apply board limits */ +#define WLC_CLM_POWER_LIMITS_INPUT_FLAG_NO_BOARD 0x00000002 +/* Output. Limits taken from product-specific country data */ +#define WLC_CLM_POWER_LIMITS_OUTPUT_FLAG_PRODUCT_LIMITS 0x00000001 +/* Output. Limits taken from product-specific worldwide data */ +#define WLC_CLM_POWER_LIMITS_OUTPUT_FLAG_WORLDWIDE_LIMITS 0x00000002 +/* Output. Limits taken from country-default (all-product) data */ +#define WLC_CLM_POWER_LIMITS_OUTPUT_FLAG_DEFAULT_COUNTRY_LIMITS 0x00000004 + +#define WL_MBO_IOV_MAJOR_VER 1 +#define WL_MBO_IOV_MINOR_VER 1 +#define WL_MBO_IOV_MAJOR_VER_SHIFT 8 +#define WL_MBO_IOV_VERSION \ + ((WL_MBO_IOV_MAJOR_VER << WL_MBO_IOV_MAJOR_VER_SHIFT)| WL_MBO_IOV_MINOR_VER) + +#define MBO_MAX_CHAN_PREF_ENTRIES 16 + +enum wl_mbo_cmd_ids { + WL_MBO_CMD_ADD_CHAN_PREF = 1, + WL_MBO_CMD_DEL_CHAN_PREF = 2, + WL_MBO_CMD_LIST_CHAN_PREF = 3, + WL_MBO_CMD_CELLULAR_DATA_CAP = 4, + WL_MBO_CMD_DUMP_COUNTERS = 5, + WL_MBO_CMD_CLEAR_COUNTERS = 6, + WL_MBO_CMD_FORCE_ASSOC = 7, + WL_MBO_CMD_BSSTRANS_REJECT = 8, + WL_MBO_CMD_SEND_NOTIF = 9, + /* Unused command, This enum no can be use + * for next new command + */ + WL_MBO_CMD_CLEAR_CHAN_PREF = 10, + WL_MBO_CMD_NBR_INFO_CACHE = 11, + WL_MBO_CMD_ANQPO_SUPPORT = 12, + WL_MBO_CMD_DBG_EVENT_CHECK = 13, + /* Add before this !! */ + WL_MBO_CMD_LAST +}; + +enum wl_mbo_xtlv_id { + WL_MBO_XTLV_OPCLASS = 0x1, + WL_MBO_XTLV_CHAN = 0x2, + WL_MBO_XTLV_PREFERENCE = 0x3, + WL_MBO_XTLV_REASON_CODE = 0x4, + WL_MBO_XTLV_CELL_DATA_CAP = 0x5, + WL_MBO_XTLV_COUNTERS = 0x6, + WL_MBO_XTLV_ENABLE = 0x7, + WL_MBO_XTLV_SUB_ELEM_TYPE = 0x8, + WL_MBO_XTLV_BTQ_TRIG_START_OFFSET = 0x9, + WL_MBO_XTLV_BTQ_TRIG_RSSI_DELTA = 0xa, + WL_MBO_XTLV_ANQP_CELL_SUPP = 0xb +}; + +typedef struct wl_mbo_counters { + /* No of transition req recvd */ + uint16 trans_req_rcvd; + /* No of transition req with disassoc imminent */ + uint16 trans_req_disassoc; + /* No of transition req with BSS Termination */ + uint16 trans_req_bss_term; + /* No of trans req w/ unspecified reason */ + uint16 trans_resn_unspec; + /* No of trans req w/ reason frame loss */ + uint16 trans_resn_frm_loss; + /* No of trans req w/ reason traffic delay */ + uint16 trans_resn_traffic_delay; + /* No of trans req w/ reason insufficient buffer */ + uint16 trans_resn_insuff_bw; + /* No of trans req w/ reason load balance */ + uint16 trans_resn_load_bal; + /* No of trans req w/ reason low rssi */ + uint16 trans_resn_low_rssi; + /* No of trans req w/ reason excessive retransmission */ + uint16 trans_resn_xcess_retransmn; + /* No of trans req w/ reason gray zone */ + uint16 trans_resn_gray_zone; + /* No of trans req w/ reason switch to premium AP */ + uint16 trans_resn_prem_ap_sw; + /* No of transition rejection sent */ + uint16 trans_rejn_sent; + /* No of trans rejn reason excessive frame loss */ + uint16 trans_rejn_xcess_frm_loss; + /* No of trans rejn reason excessive traffic delay */ + uint16 trans_rejn_xcess_traffic_delay; + /* No of trans rejn reason insufficient QoS capability */ + uint16 trans_rejn_insuffic_qos_cap; + /* No of trans rejn reason low RSSI */ + uint16 trans_rejn_low_rssi; + /* No of trans rejn reason high interference */ + uint16 trans_rejn_high_interference; + /* No of trans rejn reason service unavilable */ + uint16 trans_rejn_service_unavail; + /* No of beacon request rcvd */ + uint16 bcn_req_rcvd; + /* No of beacon report sent */ + uint16 bcn_rep_sent; + /* No of null beacon report sent */ + uint16 null_bcn_rep_sent; + /* No of wifi to cell switch */ + uint16 wifi_to_cell; +} wl_mbo_counters_t; + +#define WL_FILS_IOV_MAJOR_VER 1 +#define WL_FILS_IOV_MINOR_VER 1 +#define WL_FILS_IOV_MAJOR_VER_SHIFT 8 +#define WL_FILS_IOV_VERSION \ + ((WL_FILS_IOV_MAJOR_VER << WL_FILS_IOV_MAJOR_VER_SHIFT)| WL_FILS_IOV_MINOR_VER) + +enum wl_fils_cmd_ids { + WL_FILS_CMD_ADD_IND_IE = 1, + WL_FILS_CMD_ADD_AUTH_DATA = 2, /* Deprecated, kept to prevent ROM invalidation */ + WL_FILS_CMD_ADD_HLP_IE = 3, + WL_FILS_CMD_ADD_CONNECT_PARAMS = 4, + WL_FILS_CMD_GET_CONNECT_PARAMS = 5, + /* Add before this !! */ + WL_FILS_CMD_LAST +}; + +enum wl_fils_xtlv_id { + WL_FILS_XTLV_IND_IE = 0x1, + WL_FILS_XTLV_AUTH_DATA = 0x2, /* Deprecated, kept to prevent ROM invalidation */ + WL_FILS_XTLV_HLP_IE = 0x3, + WL_FILS_XTLV_ERP_USERNAME = 0x4, + WL_FILS_XTLV_ERP_REALM = 0x5, + WL_FILS_XTLV_ERP_RRK = 0x6, + WL_FILS_XTLV_ERP_NEXT_SEQ_NUM = 0x7, + WL_FILS_XTLV_KEK = 0x8, + WL_FILS_XTLV_PMK = 0x9, + WL_FILS_XTLV_TK = 0xa, + WL_FILS_XTLV_PMKID = 0xb +}; + +#define WL_OCE_IOV_MAJOR_VER 1 +#define WL_OCE_IOV_MINOR_VER 1 +#define WL_OCE_IOV_MAJOR_VER_SHIFT 8 +#define WL_OCE_IOV_VERSION \ + ((WL_OCE_IOV_MAJOR_VER << WL_OCE_IOV_MAJOR_VER_SHIFT)| WL_OCE_IOV_MINOR_VER) + +enum wl_oce_cmd_ids { + WL_OCE_CMD_ENABLE = 1, + WL_OCE_CMD_PROBE_DEF_TIME = 2, + WL_OCE_CMD_FD_TX_PERIOD = 3, + WL_OCE_CMD_FD_TX_DURATION = 4, + WL_OCE_CMD_RSSI_TH = 5, + WL_OCE_CMD_RWAN_LINKS = 6, + WL_OCE_CMD_CU_TRIGGER = 7, + /* Add before this !! */ + WL_OCE_CMD_LAST +}; + +enum wl_oce_xtlv_id { + WL_OCE_XTLV_ENABLE = 0x1, + WL_OCE_XTLV_PROBE_DEF_TIME = 0x2, + WL_OCE_XTLV_FD_TX_PERIOD = 0x3, + WL_OCE_XTLV_FD_TX_DURATION = 0x4, + WL_OCE_XTLV_RSSI_TH = 0x5, + WL_OCE_XTLV_RWAN_LINKS = 0x6, + WL_OCE_XTLV_CU_TRIGGER = 0x7 +}; + +#define WL_ESP_IOV_MAJOR_VER 1 +#define WL_ESP_IOV_MINOR_VER 1 +#define WL_ESP_IOV_MAJOR_VER_SHIFT 8 +#define WL_ESP_IOV_VERSION \ + ((WL_ESP_IOV_MAJOR_VER << WL_ESP_IOV_MAJOR_VER_SHIFT)| WL_ESP_IOV_MINOR_VER) + +enum wl_esp_cmd_ids { + WL_ESP_CMD_ENABLE = 1, + WL_ESP_CMD_STATIC = 2, + /* Add before this !! */ + WL_ESP_CMD_LAST +}; + +enum wl_esp_xtlv_id { + WL_ESP_XTLV_ENABLE = 0x1, + WL_ESP_XTLV_STATIC_AC = 0x2, /* access category */ + WL_ESP_XTLV_STATIC_TYPE = 0x3, /* data type */ + WL_ESP_XTLV_STATIC_VAL = 0x4 +}; + +/* otpread command */ +#define WL_OTPREAD_VER 1 + +typedef struct { + uint16 version; /* cmd structure version */ + uint16 cmd_len; /* cmd struct len */ + uint32 rdmode; /* otp read mode */ + uint32 rdoffset; /* byte offset into otp to start read */ + uint32 rdsize; /* number of bytes to read */ +} wl_otpread_cmd_t; + +/* "otpecc_rows" command */ +typedef struct { + uint16 version; /* version of this structure */ + uint16 len; /* len in bytes of this structure */ + uint32 cmdtype; /* command type : 0 : read row data, 1 : ECC lock */ + uint32 rowoffset; /* start row offset */ + uint32 numrows; /* number of rows */ + uint8 rowdata[]; /* read rows data */ +} wl_otpecc_rows_t; + +#define WL_OTPECC_ROWS_VER 1 + +#define WL_OTPECC_ROWS_CMD_READ 0 +#define WL_OTPECC_ROWS_CMD_LOCK 1 + +#define WL_OTPECC_ARGIDX_CMDTYPE 0 /* command type */ +#define WL_OTPECC_ARGIDX_ROWOFFSET 1 /* start row offset */ +#define WL_OTPECC_ARGIDX_NUMROWS 2 /* number of rows */ + +/* "otpeccrows" raw data size per row */ +#define WL_ECCDUMP_ROW_SIZE_BYTE 6 /* 4 bytes row data + 2 bytes ECC status */ +#define WL_ECCDUMP_ROW_SIZE_WORD 3 + +/* otpECCstatus */ +#define OTP_ECC_ENAB_SHIFT 13 +#define OTP_ECC_ENAB_MASK 0x7 +#define OTP_ECC_CORR_ST_SHIFT 12 +#define OTP_ECC_CORR_ST_MASK 0x1 +#define OTP_ECC_DBL_ERR_SHIFT 11 +#define OTP_ECC_DBL_ERR_MASK 0x1 +#define OTP_ECC_DED_ST_SHIFT 10 +#define OTP_ECC_DED_ST_MASK 0x1 +#define OTP_ECC_SEC_ST_SHIFT 9 +#define OTP_ECC_SEC_ST_MASK 0x1 +#define OTP_ECC_DATA_SHIFT 0 +#define OTP_ECC_DATA_MASK 0x7f + +/* OTP_ECC_CORR_ST field */ +#define OTP_ECC_MODE 1 +#define OTP_NO_ECC_MODE 0 + +/* OTP_ECC_ENAB field (bit15:13) : + * When 2 or 3 bits are set, + * it indicates that OTP ECC is enabled on the last row read. + * Otherwise, ECC is disabled + */ +#define OTP_ECC_ENAB(val) \ + (bcm_bitcount((uint8 *)&(val), sizeof(uint8)) > 1) + +#define WL_LEAKY_AP_STATS_GT_TYPE 0 +#define WL_LEAKY_AP_STATS_PKT_TYPE 1 +typedef struct wlc_leaked_infra_guard_marker { + /* type field for this TLV: WL_LEAKY_AP_STATS_GT_TYPE */ + uint16 type; + /* length field for this TLV */ + uint16 len; + /* guard sample sequence number; Updated by 1 on every guard sample */ + uint32 seq_number; + /* Guard time start time (tsf; PS indicated and acked) */ + uint32 start_time; + /* tsf timestamp for the GT end event */ + uint32 gt_tsf_l; + /* Guard time period in ms */ + uint16 guard_duration; + /* Number PPDUs in the notification */ + uint16 num_pkts; + /* Flags to indicate some states see below */ + uint8 flag; + /* pad for 32-bit alignment */ + uint8 reserved[3]; +} wlc_leaked_infra_guard_marker_t; + +/* Flag information */ +#define WL_LEAKED_GUARD_TIME_NONE 0 /* Not in any guard time */ +#define WL_LEAKED_GUARD_TIME_FRTS (0x01 << 0) /* Normal FRTS power save */ +#define WL_LEAKED_GUARD_TIME_SCAN (0x01 << 1) /* Channel switch due to scanning */ +#define WL_LEAKED_GUARD_TIME_AWDL_PSF (0x01 << 2) /* Channel switch due to AWDL PSF */ +#define WL_LEAKED_GUARD_TIME_AWDL_AW (0x01 << 3) /* Channel switch due to AWDL AW */ +#define WL_LEAKED_GUARD_TIME_INFRA_STA (0x01 << 4) /* generic type infra sta channel switch */ +#define WL_LEAKED_GUARD_TIME_TERMINATED (0x01 << 7) /* indicate a GT is terminated early */ + +typedef struct wlc_leaked_infra_packet_stat { + uint16 type; /* type field for this TLV: WL_LEAKY_AP_STATS_PKT_TYPE */ + uint16 len; /* length field for this TLV */ + uint16 ppdu_len_bytes; /* PPDU packet length in bytes */ + uint16 num_mpdus; /* number of the MPDUs in the PPDU */ + uint32 ppdu_time; /* PPDU arrival time at the begining of the guard time */ + uint32 rate; /* PPDU packet rate; Received packet's data rate */ + uint16 seq_number; /* sequence number */ + int8 rssi; /* RSSI */ + uint8 tid; /* tid */ +} wlc_leaked_infra_packet_stat_t; + +/* Wake timer structure definition */ +#define WAKE_TIMER_VERSION 1 +#define WAKE_TIMER_NOLIMIT 0xFFFF + +typedef struct wake_timer { + uint16 ver; + uint16 len; + uint16 limit; /* number of events to deliver + * 0-disable, 0xffff-indefinite, num_events otherwise + */ + uint16 count; /* number of events delivered since enable (get only) */ + uint16 period; /* timeout/period in milliseconds */ +} wake_timer_t; + +typedef struct wl_desense_restage_gain { + uint16 version; + uint16 length; + uint32 band; + uint8 num_cores; + uint8 desense_array[WL_TX_CHAINS_MAX]; + uint8 PAD[3]; +} wl_desense_restage_gain_t; + +#define MAX_UCM_CHAINS 5 +#define MAX_UCM_PROFILES 10 +#define UCM_PROFILE_VERSION_1 1 + +/* UCM per chain attribute struct */ +typedef struct wlc_btcx_chain_attr { + uint16 length; /* chain attr length, version is same as profile version */ + int8 desense_level; /* per chain desense level */ + int8 ack_pwr_strong_rssi; /* per chain ack power at strong rssi */ + int8 ack_pwr_weak_rssi; /* per chain ack power at weak rssi */ + int8 tx_pwr_strong_rssi; /* per chain tx power at strong rssi */ + int8 tx_pwr_weak_rssi; /* per chain tx power at weak rssi */ + uint8 PAD[1]; /* additional bytes for alignment */ +} wlc_btcx_chain_attr_t; + +typedef struct wlc_btcx_profile_v1 { + uint16 version; /* UCM profile version */ + uint16 length; /* profile size */ + uint16 fixed_length; /* size of the fixed portion of the profile */ + uint8 init; /* profile initialized or not */ + uint8 chain_attr_count; /* Number of elements in chain_attr array */ + uint8 profile_index; /* profile index */ + uint8 mode_strong_wl_bt; /* Mode under strong WLAN and BT RSSI */ + uint8 mode_weak_wl; /* Mode under weak WLAN RSSI */ + uint8 mode_weak_bt; /* Mode under weak BT RSSI */ + uint8 mode_weak_wl_bt; /* Mode under weak BT and WLAN RSSI */ + int8 mode_wl_hi_lo_rssi_thresh; /* Strong to weak WLAN RSSI threshold for mode selection */ + int8 mode_wl_lo_hi_rssi_thresh; /* Weak to strong WLAN RSSI threshold for mode selection */ + int8 mode_bt_hi_lo_rssi_thresh; /* Strong to weak BT RSSI threshold for mode selection */ + int8 mode_bt_lo_hi_rssi_thresh; /* Weak to strong BT RSSI threshold for mode selection */ + int8 desense_wl_hi_lo_rssi_thresh; /* Strong to weak RSSI threshold for desense */ + int8 desense_wl_lo_hi_rssi_thresh; /* Weak to strong RSSI threshold for desense */ + int8 ack_pwr_wl_hi_lo_rssi_thresh; /* Strong to weak RSSI threshold for ACK power */ + int8 ack_pwr_wl_lo_hi_rssi_thresh; /* Weak to strong RSSI threshold for ACK power */ + int8 tx_pwr_wl_hi_lo_rssi_thresh; /* Strong to weak RSSI threshold for Tx power */ + int8 tx_pwr_wl_lo_hi_rssi_thresh; /* Weak to strong RSSI threshold for Tx power */ + uint8 PAD[1]; /* additional bytes for 4 byte alignment */ + wlc_btcx_chain_attr_t chain_attr[]; /* variable length array with chain attributes */ +} wlc_btcx_profile_v1_t; + +#define SSSR_D11_RESET_SEQ_STEPS 5 + +#define SSSR_REG_INFO_VER_0 0u +#define SSSR_REG_INFO_VER_1 1u +#define SSSR_REG_INFO_VER_2 2u + +typedef struct sssr_reg_info_v0 { + uint16 version; + uint16 length; /* length of the structure validated at host */ + struct { + struct { + uint32 pmuintmask0; + uint32 pmuintmask1; + uint32 resreqtimer; + uint32 macresreqtimer; + uint32 macresreqtimer1; + } base_regs; + } pmu_regs; + struct { + struct { + uint32 intmask; + uint32 powerctrl; + uint32 clockcontrolstatus; + uint32 powerctrl_mask; + } base_regs; + } chipcommon_regs; + struct { + struct { + uint32 clockcontrolstatus; + uint32 clockcontrolstatus_val; + } base_regs; + struct { + uint32 resetctrl; + uint32 itopoobb; + } wrapper_regs; + } arm_regs; + struct { + struct { + uint32 ltrstate; + uint32 clockcontrolstatus; + uint32 clockcontrolstatus_val; + } base_regs; + struct { + uint32 itopoobb; + } wrapper_regs; + } pcie_regs; + struct { + struct { + uint32 ioctrl; + } wrapper_regs; + uint32 vasip_sr_addr; + uint32 vasip_sr_size; + } vasip_regs; + struct { + struct { + uint32 xmtaddress; + uint32 xmtdata; + uint32 clockcontrolstatus; + uint32 clockcontrolstatus_val; + } base_regs; + struct { + uint32 resetctrl; + uint32 itopoobb; + uint32 ioctrl; + uint32 ioctrl_resetseq_val[SSSR_D11_RESET_SEQ_STEPS]; + } wrapper_regs; + uint32 sr_size; + } mac_regs[MAX_NUM_D11CORES]; +} sssr_reg_info_v0_t; + +typedef struct sssr_reg_info_v1 { + uint16 version; + uint16 length; /* length of the structure validated at host */ + struct { + struct { + uint32 pmuintmask0; + uint32 pmuintmask1; + uint32 resreqtimer; + uint32 macresreqtimer; + uint32 macresreqtimer1; + } base_regs; + } pmu_regs; + struct { + struct { + uint32 intmask; + uint32 powerctrl; + uint32 clockcontrolstatus; + uint32 powerctrl_mask; + } base_regs; + } chipcommon_regs; + struct { + struct { + uint32 clockcontrolstatus; + uint32 clockcontrolstatus_val; + } base_regs; + struct { + uint32 resetctrl; + uint32 itopoobb; + } wrapper_regs; + } arm_regs; + struct { + struct { + uint32 ltrstate; + uint32 clockcontrolstatus; + uint32 clockcontrolstatus_val; + } base_regs; + struct { + uint32 itopoobb; + } wrapper_regs; + } pcie_regs; + struct { + struct { + uint32 ioctrl; + } wrapper_regs; + uint32 vasip_sr_addr; + uint32 vasip_sr_size; + } vasip_regs; + struct { + struct { + uint32 xmtaddress; + uint32 xmtdata; + uint32 clockcontrolstatus; + uint32 clockcontrolstatus_val; + } base_regs; + struct { + uint32 resetctrl; + uint32 itopoobb; + uint32 ioctrl; + uint32 ioctrl_resetseq_val[SSSR_D11_RESET_SEQ_STEPS]; + } wrapper_regs; + uint32 sr_size; + } mac_regs[MAX_NUM_D11CORES]; + struct { + uint32 dig_sr_addr; + uint32 dig_sr_size; + } dig_mem_info; +} sssr_reg_info_v1_t; + +#define MAX_NUM_D11_CORES_WITH_SCAN 3u + +typedef struct sssr_reg_info_v2 { + uint16 version; + uint16 length; /* length of the structure validated at host */ + struct { + struct { + uint32 pmuintmask0; + uint32 pmuintmask1; + uint32 resreqtimer; + uint32 macresreqtimer; + uint32 macresreqtimer1; + uint32 macresreqtimer2; + } base_regs; + } pmu_regs; + struct { + struct { + uint32 intmask; + uint32 powerctrl; + uint32 clockcontrolstatus; + uint32 powerctrl_mask; + } base_regs; + } chipcommon_regs; + struct { + struct { + uint32 clockcontrolstatus; + uint32 clockcontrolstatus_val; + } base_regs; + struct { + uint32 resetctrl; + uint32 itopoobb; + } wrapper_regs; + } arm_regs; + struct { + struct { + uint32 ltrstate; + uint32 clockcontrolstatus; + uint32 clockcontrolstatus_val; + } base_regs; + struct { + uint32 itopoobb; + } wrapper_regs; + } pcie_regs; + struct { + struct { + uint32 xmtaddress; + uint32 xmtdata; + uint32 clockcontrolstatus; + uint32 clockcontrolstatus_val; + } base_regs; + struct { + uint32 resetctrl; + uint32 itopoobb; + uint32 ioctrl; + uint32 ioctrl_resetseq_val[SSSR_D11_RESET_SEQ_STEPS]; + } wrapper_regs; + uint32 sr_size; + } mac_regs[MAX_NUM_D11_CORES_WITH_SCAN]; + struct { + uint32 dig_sr_addr; + uint32 dig_sr_size; + } dig_mem_info; +} sssr_reg_info_v2_t; + +#ifndef SSSR_REG_INFO_HAS_ALIAS +typedef sssr_reg_info_v0_t sssr_reg_info_t; +#define SSSR_REG_INFO_VER SSSR_REG_INFO_VER_0 +#endif // endif + +/* ADaptive Power Save(ADPS) structure definition */ +#define WL_ADPS_IOV_MAJOR_VER 1 +#define WL_ADPS_IOV_MINOR_VER 0 +#define WL_ADPS_IOV_MAJOR_VER_SHIFT 8 +#define WL_ADPS_IOV_VER \ + ((WL_ADPS_IOV_MAJOR_VER << WL_ADPS_IOV_MAJOR_VER_SHIFT) | WL_ADPS_IOV_MINOR_VER) + +#define ADPS_NUM_DIR 2 +#define ADPS_RX 0 +#define ADPS_TX 1 + +#define WL_ADPS_IOV_MODE 0x0001 +#define WL_ADPS_IOV_RSSI 0x0002 +#define WL_ADPS_IOV_DUMP 0x0003 +#define WL_ADPS_IOV_DUMP_CLEAR 0x0004 +#define WL_ADPS_IOV_SUSPEND 0x0005 + +#define ADPS_SUMMARY_STEP_NUM 2 +#define ADPS_SUMMARY_STEP_LOW 0 +#define ADPS_SUMMARY_STEP_HIGH 1 + +#define ADPS_SUB_IOV_VERSION_1 1 +#define ADPS_SUB_IOV_VERSION_2 2 + +/* suspend/resume ADPS by wl/private command from host */ +#define ADPS_RESUME 0u +#define ADPS_SUSPEND 1u + +typedef struct wl_adps_params_v1 { + uint16 version; + uint16 length; + uint8 band; /* band - 2G or 5G */ + uint8 mode; /* operation mode, default = 0 (ADPS disable) */ + uint16 padding; +} wl_adps_params_v1_t; + +typedef struct wl_adps_rssi { + int32 thresh_hi; /* rssi threshold to resume ADPS operation */ + int32 thresh_lo; /* rssi threshold to suspend ADPS operation */ +} wl_adps_rssi_t; + +typedef struct wl_adps_rssi_params_v1 { + uint16 version; + uint16 length; + uint8 band; + uint8 padding[3]; + wl_adps_rssi_t rssi; +} wl_adps_rssi_params_v1_t; + +typedef struct adps_stat_elem { + uint32 duration; /* each step duration time (mSec) */ + uint32 counts; /* each step hit count number */ +} adps_stat_elem_t; + +typedef struct wl_adps_dump_summary_v1 { + uint16 version; + uint16 length; + uint8 mode; /* operation mode: On/Off */ + uint8 flags; /* restrict flags */ + uint8 current_step; /* current step */ + uint8 padding; + adps_stat_elem_t stat[ADPS_SUMMARY_STEP_NUM]; /* statistics */ +} wl_adps_dump_summary_v1_t; + +typedef struct wl_adps_dump_summary_v2 { + uint16 version; + uint16 length; + uint8 mode; /* operation mode: On/Off */ + uint8 current_step; /* current step */ + uint8 padding[2]; + uint32 flags; /* restrict flags */ + adps_stat_elem_t stat[ADPS_SUMMARY_STEP_NUM]; /* statistics */ +} wl_adps_dump_summary_v2_t; + +typedef struct wl_adps_suspend_v1 { + uint16 version; + uint16 length; + uint8 suspend; /* 1: suspend 0: resume */ + uint8 padding[3]; +} wl_adps_suspend_v1_t; + +typedef struct wlc_btc_2gchain_dis { + uint16 ver; + uint16 len; + uint8 chain_dis; + uint8 flag; +} wlc_btc_2gchain_dis_t; + +#define WLC_BTC_2GCHAIN_DIS_REASSOC 0x1 +#define WLC_BTC_2GCHAIN_DIS_VER1 0x1 +#define WLC_BTC_2GCHAIN_DIS_VER1_LEN 6 + +/* --- BTCX WiFi Protection (btc_wifi_prot iovar) --- */ + +/* Current iovar structure version: 1 */ +#define WL_BTC_WIFI_PROT_VER_1 1 + +typedef struct wl_btc_wifi_prot_v1 { + uint16 ver; /* version */ + uint16 len; /* total length */ + uint8 data[]; /* bcm_xtlv_t payload */ +} wl_btc_wifi_prot_v1_t; + +/* Xtlv tags (protection type) and data */ +#define WL_BTC_WIFI_PROT_M1_M4 1 +typedef struct wl_btc_wifi_prot_m1_m4 { + uint32 enable; /* enable/disable m1-m4 protection */ + uint32 timeout; /* maximum timeout in ms (0: default) */ +} wl_btc_wifi_prot_m1_m4_t; + +#define WL_BTC_WIFI_PROT_ENABLE 1 +#define WL_BTC_WIFI_PROT__DISABLE 0 + +/* --- End BTCX WiFi Protection --- */ + +enum wl_rpsnoa_cmd_ids { + WL_RPSNOA_CMD_ENABLE = 1, + WL_RPSNOA_CMD_STATUS, + WL_RPSNOA_CMD_PARAMS, + WL_RPSNOA_CMD_LAST +}; + +typedef struct rpsnoa_cmnhdr { + uint16 ver; /* cmd structure version */ + uint16 len; /* cmd structure len */ + uint32 subcmd; + uint32 cnt; +} rpsnoa_cmnhdr_t; + +typedef struct rpsnoa_data { + int16 band; + int16 value; +} rpsnoa_data_t; + +typedef struct rpsnoa_stats { + int16 band; + int16 state; + uint32 sleep_dur; + uint32 sleep_avail_dur; + uint32 last_pps; +} rpsnoa_stats_t; + +typedef struct rpsnoa_param { + uint16 band; + uint8 level; + uint8 stas_assoc_check; + uint32 pps; + uint32 quiet_time; +} rpsnoa_param_t; + +typedef struct rpsnoa_iovar { + rpsnoa_cmnhdr_t hdr; + rpsnoa_data_t data[1]; +} rpsnoa_iovar_t; + +typedef struct rpsnoa_iovar_status { + rpsnoa_cmnhdr_t hdr; + rpsnoa_stats_t stats[1]; +} rpsnoa_iovar_status_t; + +typedef struct rpsnoa_iovar_params { + rpsnoa_cmnhdr_t hdr; + rpsnoa_param_t param[1]; +} rpsnoa_iovar_params_t; + +/* Per-interface reportable stats types */ +enum wl_ifstats_xtlv_id { + /* global */ + WL_IFSTATS_XTLV_SLICE_INDEX = 1, + WL_IFSTATS_XTLV_IF_INDEX = 2, + WL_IFSTATS_XTLV_MAC_ADDR = 3, + WL_IFSTATS_XTLV_REPORT_CMD = 4, /* Comes in an iovar */ + WL_IFSTATS_XTLV_BUS_PCIE = 5, + + /* Report data across all SCBs using ecounters */ + /* STA_info ecounters */ + WL_IFSTATS_XTLV_WL_STA_INFO_ECOUNTERS = 0x100, + /* For AMPDU stat sub-types requested in a different format */ + /* these could be sum and report stats across slices. OR + * report sub-types in pairs so host can sum and add. + * Information sent here is across slices, therefore global + */ + WL_IFSTATS_XTLV_TX_AMPDU_STATS = 0x101, + WL_IFSTATS_XTLV_RX_AMPDU_STATS = 0x102, + /* scb ecounter statistics */ + WL_IFSTATS_XTLV_SCB_ECOUNTERS = 0x103, + /* Global NAN stats */ + WL_IFSTATS_XTLV_NAN_STATS = 0x104, + + /* Per-slice information + * Per-interface reporting could also include slice specific data + */ + /* xtlv container for reporting */ + WL_IFSTATS_XTLV_WL_SLICE = 0x301, + /* Per-slice AMPDU stats */ + WL_IFSTATS_XTLV_WL_SLICE_TX_AMPDU_DUMP = 0x302, + WL_IFSTATS_XTLV_WL_SLICE_RX_AMPDU_DUMP = 0x303, + /* Per-slice BTCOEX stats */ + WL_IFSTATS_XTLV_WL_SLICE_BTCOEX = 0x304, + /* V11_WLCNTRS used in ecounters */ + WL_IFSTATS_XTLV_WL_SLICE_V11_WLCNTRS = 0x305, + /* V30_WLCNTRS Used in ecounters */ + WL_IFSTATS_XTLV_WL_SLICE_V30_WLCNTRS = 0x306, + /* phy,ucode,scan pwrstats */ + WL_IFSTATS_XTLV_WL_SLICE_PWRSTATS_PHY = 0x307, + WL_IFSTATS_XTLV_WL_SLICE_PWRSTATS_SCAN = 0x308, + WL_IFSTATS_XTLV_WL_SLICE_PWRSTATS_WAKE_V2 = 0x309, + /* Per-slice LTECOEX stats */ + WL_IFSTATS_XTLV_WL_SLICE_LTECOEX = 0x30A, + /* TVPM ecounters */ + WL_IFSTATS_XTLV_WL_SLICE_TVPM = 0x30B, + /* TDMTX ecounters */ + WL_IFSTATS_XTLV_WL_SLICE_TDMTX = 0x30C, + /* Slice specific state capture in periodic fasion */ + WL_SLICESTATS_XTLV_PERIODIC_STATE = 0x30D, + WL_SLICESTATS_XTLV_HIST_TX_STATS = 0x30E, + WL_SLICESTATS_XTLV_HIST_RX_STATS = 0x30F, + /* Per-interface */ + /* XTLV container for reporting */ + WL_IFSTATS_XTLV_IF = 0x501, + /* Generic stats applicable to all IFs */ + WL_IFSTATS_XTLV_GENERIC = 0x502, + /* Infra specific */ + WL_IFSTATS_XTLV_INFRA_SPECIFIC = 0x503, + /* MGT counters infra and softAP */ + WL_IFSTATS_XTLV_MGT_CNT = 0x504, + /* AMPDU stats on per-IF */ + WL_IFSTATS_XTLV_AMPDU_DUMP = 0x505, + WL_IFSTATS_XTLV_IF_SPECIFIC = 0x506, + WL_IFSTATS_XTLV_WL_PWRSTATS_AWDL = 0x507, + WL_IFSTATS_XTLV_IF_LQM = 0x508, + /* Interface specific state capture in periodic fashion */ + WL_IFSTATS_XTLV_IF_PERIODIC_STATE = 0x509, + /* Event statistics on per-IF */ + WL_IFSTATS_XTLV_IF_EVENT_STATS = 0x50A, + /* ecounters for nan */ + /* nan slot stats */ + WL_IFSTATS_XTLV_NAN_SLOT_STATS = 0x601 +}; + +/* current version of wl_stats_report_t structure for request */ +#define WL_STATS_REPORT_REQUEST_VERSION_V2 2 + +/* current version of wl_stats_report_t structure for response */ +#define WL_STATS_REPORT_RESPONSE_VERSION_V2 2 + +/** Top structure of if_counters IOVar buffer */ +typedef struct wl_stats_report { + uint16 version; /**< see version definitions above */ + uint16 length; /**< length of data including all paddings. */ + uint8 data []; /**< variable length payload: + * 1 or more bcm_xtlv_t type of tuples. + * each tuple is padded to multiple of 4 bytes. + * 'length' field of this structure includes all paddings. + */ +} wl_stats_report_t; + +/* interface specific mgt count */ +#define WL_MGT_STATS_VERSION_V1 1 +/* Associated stats type: WL_IFSTATS_MGT_CNT */ +typedef struct { + uint16 version; + uint16 length; + + /* detailed control/management frames */ + uint32 txnull; + uint32 rxnull; + uint32 txqosnull; + uint32 rxqosnull; + uint32 txassocreq; + uint32 rxassocreq; + uint32 txreassocreq; + uint32 rxreassocreq; + uint32 txdisassoc; + uint32 rxdisassoc; + uint32 txassocrsp; + uint32 rxassocrsp; + uint32 txreassocrsp; + uint32 rxreassocrsp; + uint32 txauth; + uint32 rxauth; + uint32 txdeauth; + uint32 rxdeauth; + uint32 txprobereq; + uint32 rxprobereq; + uint32 txprobersp; + uint32 rxprobersp; + uint32 txaction; + uint32 rxaction; + uint32 txpspoll; + uint32 rxpspoll; +} wl_if_mgt_stats_t; + +#define WL_INFRA_STATS_VERSION_V1 1 +/* Associated stats type: WL_IFSTATS_INFRA_SPECIFIC */ +typedef struct wl_infra_stats { + uint16 version; /**< version of the structure */ + uint16 length; + uint32 rxbeaconmbss; + uint32 tbtt; +} wl_if_infra_stats_t; + +#define LTECOEX_STATS_VER 1 + +typedef struct wlc_ltecoex_stats { + uint16 version; /**< WL_IFSTATS_XTLV_WL_SLICE_LTECOEX */ + uint16 len; /* Length of wl_ltecx_stats structure */ + uint8 slice_index; /* Slice unit of wl_ltecx_stats structure */ + uint8 pad[3]; /* Padding */ + /* LTE noise based eCounters Bins + cumulative the wl_cnt_wlc_t and wl_ctl_mgt_cnt_t + counter information based on LTE Coex interference level + */ + uint32 txframe_no_LTE; /* txframe counter in no LTE Coex case */ + uint32 rxframe_no_LTE; /* rxframe counter in no LTE Coex case */ + uint32 rxrtry_no_LTE; /* rxrtry counter in no LTE Coex case */ + uint32 txretrans_no_LTE; /* txretrans counter in no LTE Coex case */ + uint32 txnocts_no_LTE; /* txnocts counter in no LTE Coex case */ + uint32 txrts_no_LTE; /* txrts counter in no LTE Coex case */ + uint32 txdeauth_no_LTE; /* txdeauth counter in no LTE Coex case */ + uint32 txassocreq_no_LTE; /* txassocreq counter in no LTE Coex case */ + uint32 txassocrsp_no_LTE; /* txassocrsp counter in no LTE Coex case */ + uint32 txreassocreq_no_LTE; /* txreassocreq counter in no LTE Coex case */ + uint32 txreassocrsp_no_LTE; /* txreassocrsp counter in no LTE Coex case */ + uint32 txframe_light_LTE; /* txframe counter in light LTE Coex case */ + uint32 txretrans_light_LTE; /* txretrans counter in light LTE Coex case */ + uint32 rxframe_light_LTE; /* rxframe counter in light LTE Coex case */ + uint32 rxrtry_light_LTE; /* rxrtry counter in light LTE Coex case */ + uint32 txnocts_light_LTE; /* txnocts counter in light LTE Coex case */ + uint32 txrts_light_LTE; /* txrts counter in light LTE Coex case */ + uint32 txdeauth_light_LTE; /* txdeauth counter in light LTE Coex case */ + uint32 txassocreq_light_LTE; /* txassocreq counter in light LTE Coex case */ + uint32 txassocrsp_light_LTE; /* txassocrsp counter in light LTE Coex case */ + uint32 txreassocreq_light_LTE; /* txreassocreq counter in light LTE Coex case */ + uint32 txreassocrsp_light_LTE; /* txreassocrsp counter in light LTE Coex case */ + uint32 txframe_heavy_LTE; /* txframe counter in heavy LTE Coex case */ + uint32 txretrans_heavy_LTE; /* txretrans counter in heavy LTE Coex case */ + uint32 rxframe_heavy_LTE; /* rxframe counter in heavy LTE Coex case */ + uint32 rxrtry_heavy_LTE; /* rxrtry counter in heavy LTE Coex case */ + uint32 txnocts_heavy_LTE; /* txnocts counter in heavy LTE Coex case */ + uint32 txrts_heavy_LTE; /* txrts counter in heavy LTE Coex case */ + uint32 txdeauth_heavy_LTE; /* txdeauth counter in heavy LTE Coex case */ + uint32 txassocreq_heavy_LTE; /* txassocreq counter in heavy LTE Coex case */ + uint32 txassocrsp_heavy_LTE; /* txassocrsp counter in heavy LTE Coex case */ + uint32 txreassocreq_heavy_LTE; /* txreassocreq counter in heavy LTE Coex case */ + uint32 txreassocrsp_heavy_LTE; /* txreassocrsp counter in heavy LTE Coex case */ + + /* LTE specific ecounters */ + uint16 type4_txinhi_dur; /* Duration of tx inhibit(in ms) due to Type4 */ + uint16 type4_nonzero_cnt; /* Counts of none zero Type4 msg */ + uint16 type4_timeout_cnt; /* Counts of Type4 timeout */ + uint16 rx_pri_dur; /* Duration of wlan_rx_pri assertions */ + uint16 rx_pri_cnt; /* Count of wlan_rx_pri assertions */ + uint16 type6_dur; /* duration of LTE Tx power limiting assertions */ + uint16 type6_cnt; /* Count of LTE Tx power limiting assertions */ + uint16 ts_prot_frm_cnt; /* count of WLAN protection frames triggered by LTE coex */ + uint16 ts_gr_cnt; /* count of intervals granted to WLAN in timesharing */ + uint16 ts_gr_dur; /* duration granted to WLAN in timesharing */ +} wlc_ltecoex_stats_t; + +#define CSA_EVT_CSA_RXED (1 << 0) +#define CSA_EVT_CSA_TIMEOUT (1 << 1) +#define CSA_EVT_FROM_INFRA (1 << 2) +typedef struct csa_event_data { + chanspec_t chan_old; + dot11_ext_csa_ie_t ecsa; + dot11_mesh_csp_ie_t mcsp; + dot11_wide_bw_chan_switch_ie_t wbcs; + uint8 flags; + uint8 pad[3]; +} csa_event_data_t; + +/* ifdef (WL_ASSOC_BCN_RPT) */ +enum wl_bcn_report_cmd_id { + WL_BCN_RPT_CMD_VER = 0, + WL_BCN_RPT_CMD_CONFIG = 1, + WL_BCN_RPT_CMD_VENDOR_IE = 2, + WL_BCN_RPT_CMD_LAST +}; + +/* beacon report specific macros */ +#define WL_BCN_RPT_CCX_IE_OVERRIDE (1u << 0) + +/* beacon report specific macros */ +#define WL_BCN_RPT_ASSOC_SCAN_UNSOLICITED_MODE (1u << 1) +#define WL_BCN_RPT_ASSOC_SCAN_SOLICITED_MODE (1u << 2) +#define WL_BCN_RPT_ASSOC_SCAN_MODE_SHIFT (1) +#define WL_BCN_RPT_ASSOC_SCAN_MODE_MASK (WL_BCN_RPT_ASSOC_SCAN_UNSOLICITED_MODE |\ + WL_BCN_RPT_ASSOC_SCAN_SOLICITED_MODE) +#define WL_BCN_RPT_ASSOC_SCAN_MODE_MAX (WL_BCN_RPT_ASSOC_SCAN_MODE_MASK >> \ + WL_BCN_RPT_ASSOC_SCAN_MODE_SHIFT) +/* beacon report mode specific macro */ +#define WL_BCN_RPT_ASSOC_SCAN_MODE_DEFAULT WL_BCN_RPT_ASSOC_SCAN_UNSOLICITED_MODE + +/* beacon report timeout config specific macros */ +#define WL_BCN_RPT_ASSOC_SCAN_CACHE_TIMEOUT_DEFAULT (120000) +#define WL_BCN_RPT_ASSOC_SCAN_CACHE_TIMEOUT_MIN (60000) +#define WL_BCN_RPT_ASSOC_SCAN_CACHE_TIMEOUT_MAX (0xFFFFFFFF) + +/* beacon report cache count specific macros */ +#define WL_BCN_RPT_ASSOC_SCAN_CACHE_COUNT_MIN (0) +#define WL_BCN_RPT_ASSOC_SCAN_CACHE_COUNT_MAX (8) +#define WL_BCN_RPT_ASSOC_SCAN_CACHE_COUNT_DEFAULT (WL_BCN_RPT_ASSOC_SCAN_CACHE_COUNT_MAX) + +#define WL_BCN_REPORT_CMD_VERSION 1 +struct wl_bcn_report_cfg { + uint32 flags; /**< Flags that defines the operation/setting information */ + uint32 scan_cache_timeout; /**< scan cache timeout value in millisec */ + uint32 scan_cache_timer_pend; /**< Read only pending time for timer expiry in millisec */ + uint8 scan_cache_cnt; /**< scan cache count */ +}; + +/* endif (WL_ASSOC_BCN_RPT) */ + +/* Thermal, Voltage, and Power Mitigation */ +#define TVPM_REQ_VERSION_1 1 +#define TVPM_REQ_CURRENT_VERSION TVPM_REQ_VERSION_1 + +/* tvpm iovar data */ +typedef struct { + uint16 version; /* TVPM request version */ + uint16 length; /* Length of the entire structure */ + + uint16 req_type; /* Request type: wl_tvpm_req_type_t */ + uint16 req_len; /* Length of the following value */ + uint8 value[]; /* Variable length data depending on req_type */ +} wl_tvpm_req_t; + +/* tvpm iovar request types */ +typedef enum { + WL_TVPM_REQ_CLTM_INDEX, /* req_value: uint32, range 1...100 */ + WL_TVPM_REQ_PPM_INDEX, /* req_value: uint32, range 1...100 */ + WL_TVPM_REQ_ENABLE, /* req_value: uint32, range 0...1 */ + WL_TVPM_REQ_STATUS, /* req_value: none */ + WL_TVPM_REQ_PERIOD, /* req_value: int32, range {-1,1-10} */ + WL_TVPM_REQ_MAX +} wl_tvpm_req_type_t; + +/* structure for data returned by request type WL_TVPM_REQ_STATUS */ +typedef struct wl_tvpm_status { + uint16 enable; /* whether TVPM is enabled */ + uint16 tx_dutycycle; /* a percentage: 1-100 */ + int16 tx_power_backoff; /* 0...-6 */ + uint16 num_active_chains; /* 1...3 */ + int16 temp; /* local temperature in degrees C */ + uint8 vbat; /* local voltage in units of 0.1V */ + uint8 pad; +} wl_tvpm_status_t; + +/* TVPM ecounters */ +typedef struct wl_tvpm_ecounters_t { + uint16 version; /* version field */ + uint16 length; /* byte length in wl_tvpm_ecounters_t starting at version */ + uint16 tx_dutycycle; /* a percentage: 1-100 */ + int16 tx_power_backoff; /* 0...-6 */ + uint16 num_active_chains; /* 1...3 */ + int16 temp; /* local temperature */ + uint8 vbat; /* local voltage */ + uint8 cltm; /* CLTM index */ + uint8 ppm; /* PPM index */ + uint8 pad; /* pad to align to uint16 */ +} wl_tvpm_ecounters_t; + +#define TDMTX_ECOUNTERS_VERSION_V1 1 +#define TDMTX_ECOUNTERS_VERSION_V2 2 + +/* TDMTX ecounters */ +typedef struct wl_tdmtx_ecounters_v1 { + uint16 version; /* version field */ + uint16 length; /* byte length in wl_tdmtx_ecounters_t starting at version */ + uint32 txa_on; /* TXA on requests */ + uint32 txa_tmcnt; /* Total number of TXA timeout */ + uint32 por_on; /* TXA POR requests */ + uint32 txpuen; /* Path enable requests */ + uint32 txpudis; /* Total number of times Tx path is muted on the slice */ + uint32 txpri_on; /* Total number of times Tx priority was obtained by the slice */ + uint32 txdefer; /* Total number of times Tx was deferred by the slice */ + uint32 txmute; /* Total number of times active Tx muted on the slice */ + uint32 actpwrboff; /* Total number of times TX power is backed off by the slice */ + uint32 txa_dur; /* Total time txa on */ + uint32 txpri_dur; /* Total time TXPri */ + uint32 txdefer_dur; /* Total time txdefer */ +} wl_tdmtx_ecounters_v1_t; + +/* TDMTX ecounters for version 2 */ +typedef struct wl_tdmtx_ecounters_v2 { + uint16 version; /* version field */ + uint16 length; /* byte length in wl_tdmtx_ecounters_t starting at version */ + uint32 txa_on; /* TXA on requests */ + uint32 txa_tmcnt; /* Total number of TXA timeout */ + uint32 porhi_on; /* TXA PORHI requests */ + uint32 porlo_on; /* TXA PORLO requests */ + uint32 txpuen; /* Path enable requests */ + uint32 txpudis; /* Total number of times Tx path is muted on the slice */ + uint32 txpri_on; /* Total number of times Tx priority was obtained by the slice */ + uint32 txdefer; /* Total number of times Tx was deferred by the slice */ + uint32 txmute; /* Total number of times active Tx muted on the slice */ + uint32 actpwrboff; /* Total number of times TX power is backed off by the slice */ + uint32 txa_dur; /* Total time txa on */ + uint32 txpri_dur; /* Total time TXPri */ + uint32 txdefer_dur; /* Total time txdefer */ +} wl_tdmtx_ecounters_v2_t; + +/* Note: if this struct is changing update wl_scb_ecounters_vX_t version, + * as this struct is sent as payload in wl_scb_ecounters_vX_t + */ +typedef struct wlc_scb_stats_v1 { + uint32 tx_pkts; /* num of packets transmitted (ucast) */ + uint32 tx_failures; /* num of packets failed */ + uint32 rx_ucast_pkts; /* num of unicast packets received */ + uint32 rx_mcast_pkts; /* num of multicast packets received */ + uint32 tx_rate; /* Rate of last successful tx frame */ + uint32 rx_rate; /* Rate of last successful rx frame */ + uint32 rx_decrypt_succeeds; /* num of packets decrypted successfully */ + uint32 rx_decrypt_failures; /* num of packets decrypted unsuccessfully */ + uint32 tx_mcast_pkts; /* num of mcast pkts txed */ + uint64 tx_ucast_bytes; /* data bytes txed (ucast) */ + uint64 tx_mcast_bytes; /* data bytes txed (mcast) */ + uint64 rx_ucast_bytes; /* data bytes recvd ucast */ + uint64 rx_mcast_bytes; /* data bytes recvd mcast */ + uint32 tx_pkts_retried; /* num of packets where a retry was necessary */ + uint32 tx_pkts_retry_exhausted; /* num of packets where a retry was exhausted */ + uint32 tx_rate_mgmt; /* Rate of last transmitted management frame */ + uint32 tx_rate_fallback; /* last used lowest fallback TX rate */ + uint32 rx_pkts_retried; /* # rx with retry bit set */ + uint32 tx_pkts_total; /* total num of tx pkts */ + uint32 tx_pkts_retries; /* total num of tx retries */ + uint32 tx_pkts_fw_total; /* total num of tx pkts generated from fw */ + uint32 tx_pkts_fw_retries; /* num of fw generated tx pkts retried */ + uint32 tx_pkts_fw_retry_exhausted; /* num of fw generated tx pkts where retry exhausted */ +} wlc_scb_stats_v1_t; + +/* ecounters for scb stats + * XTLV ID: WL_IFSTATS_XTLV_SCB_ECOUNTERS + */ + +#define WL_SCB_ECOUNTERS_VERSION_1 1 +#define WL_SCB_ECOUNTERS_VERSION_2 2 + +typedef struct wl_scb_ecounters_v1 { + uint16 version; /* version field */ + uint16 length; /* struct length starting from version */ + uint32 chanspec; /* current chanspec where scb is operating */ + struct ether_addr ea; /* peer ndi or sta ea */ + uint8 peer_type; /* peer type */ + uint8 pad; + + /* scb tx and rx stats */ + wlc_scb_stats_v1_t stats; +} wl_scb_ecounters_v1_t; + +typedef struct wl_scb_ecounters_v2 { + uint16 version; /* version field */ + uint16 length; /* struct length starting from version */ + uint32 chanspec; /* current chanspec where scb is operating */ + struct ether_addr ea; /* peer ndi or sta ea */ + uint8 peer_type; /* peer type */ + uint8 pad; + + /* scb tx and rx stats */ + uint16 tx_rate; /* Rate(in Mbps) of last successful tx frame */ + uint16 rx_rate; /* Rate(in Mbps) of last successful rx frame */ + uint16 tx_rate_fallback; /* last used lowest fallback TX rate(in Mbps) */ + uint16 pad1; + uint32 rx_decrypt_succeeds; /* num of packets decrypted successfully */ + uint32 rx_decrypt_failures; /* num of packets decrypted unsuccessfully */ + uint32 rx_pkts_retried; /* # rx with retry bit set */ + uint32 tx_pkts_retries; /* total num of tx retries */ + uint32 tx_failures; /* num of packets failed */ + uint32 tx_pkts_total; /* total num of tx pkts */ + int8 rssi[WL_STA_ANT_MAX]; /* average rssi per antenna of data frames */ +} wl_scb_ecounters_v2_t; + +/* ecounters for nan slot stats + * XTLV ID: WL_IFSTATS_XTLV_NAN_SLOT_STATS + */ + +#define WL_NAN_SLOT_ECOUNTERS_VERSION_1 1 + +typedef struct wl_nan_slot_ecounters_v1 { + uint16 version; /* version field */ + uint16 length; /* struct length starting from version */ + uint32 chan[NAN_MAX_BANDS]; /* cur nan slot chanspec of both bands */ + uint16 cur_slot_idx; /* cur nan slot index */ + uint16 pad; + nan_sched_stats_t sched; /* sched stats */ + wl_nan_mac_stats_t mac; /* mac stats */ +} wl_nan_slot_ecounters_v1_t; +/* + * BT log definitions + */ + +/* common iovar struct */ +typedef struct wl_btl { + uint16 subcmd_id; /* subcommand id */ + uint16 len; /* total length of data[] */ + uint8 data[2]; /* subcommand data, variable length */ +} wl_btl_t; + +/* subcommand ids */ +#define WL_BTL_SUBCMD_ENABLE 0 /* enable/disable logging */ +#define WL_BTL_SUBCMD_STATS 1 /* statistics */ + +/* WL_BTL_SUBCMD_ENABLE data */ +typedef struct wl_blt_enable { + uint8 enable; /* 1 - enable, 0 - disable */ + uint8 pad[3]; /* 4-byte struct alignment */ +} wl_btl_enable_t; + +/* WL_BTL_SUBCMD_STATS data */ +typedef struct wl_blt_stats { + uint32 bt_interrupt; /* num BT interrupts */ + uint32 config_req; /* num CONFIG_REQ */ + uint32 config_res_success; /* num CONFIG_RES successful */ + uint32 config_res_fail; /* num CONFIG_RES failed */ + uint32 log_req; /* num LOG_REQ */ + uint32 log_res_success; /* num LOG_RES successful */ + uint32 log_res_fail; /* num LOG_RES failed */ + uint32 indirect_read_fail; /* num indirect read fail */ + uint32 indirect_write_fail; /* num indirect write fail */ + uint32 dma_fail; /* num DMA failed */ + uint32 min_log_req_duration; /* min log request duration in usec */ + uint32 max_log_req_duration; /* max log request duration in usec */ + uint16 mem_dump_req; /* num mem dump requests */ + uint16 mem_dump_success; /* num mem dumps successful */ + uint16 mem_dump_fail; /* num mem dumps failed */ + uint16 bt_wake_success; /* num BT wakes successful */ + uint16 bt_wake_fail; /* num BT wakes failed */ + uint16 mem_dump_req_interrupt; /* num MEM_DUMP_REQ interrupt */ + uint16 mem_dump_res_interrupt; /* num MEM_DUMP_RES interrupt */ + uint16 mem_dump_res_timeout; /* num MEM_DUMP_RES timeout */ + uint16 mem_dump_proc_no_bt_ready; /* num proceed if no BT ready */ + uint16 mem_dump_proc_no_bt_response; /* num proceed if no BT response */ + uint16 mem_dump_proc_no_bt_clock; /* num proceed if no BT clock */ + uint16 pad; /* alignment */ + uint32 last_failed_region; /* start addr of last failed region */ + uint32 min_mem_dump_duration; /* min mem dump duration in usec */ + uint32 max_mem_dump_duration; /* max mem dump duration in usec */ +} wl_btl_stats_t; + +/* IOV AWD DATA */ + +/* AWD DATA structures */ +typedef struct { + uint8 version; /* Extended trap version info */ + uint8 reserved; /* currently unused */ + uint16 length; /* Length of data excluding this header */ + uint8 data[]; /* this data is TLV of tags */ +} awd_data_v1_t; + +/* AWD TAG structure */ +typedef struct { + uint8 tagid; /* one of AWD DATA TAGs numbers */ + uint8 length; /* the data size represented by this field must be aligned to 32 bits */ + uint8 data[]; /* variable size, defined by length field */ +} awd_tag_data_v1_t; + +/* IOV ETD DATA */ + +/* ETD DATA structures */ +typedef struct { + uint8 version; /* Extended trap version info */ + uint8 reserved; /* currently unused */ + uint16 length; /* Length of data excluding this header */ + uint8 data[]; /* this data is TLV of tags */ +} etd_data_v1_t; + +/* ETD TAG structure */ +typedef struct { + uint8 tagid; /* one of ETD DATA TAGs numbers */ + uint8 length; /* the data size represented by this field must be aligned to 32 bits */ + uint8 data[]; /* variable size, defined by length field */ +} etd_tag_data_v1_t; + +/* ETD information structures associated with ETD_DATA_Tags */ +/* ETD_JOIN_CLASSIFICATION_INFO 10 */ +typedef struct { + uint8 assoc_type; /* assoc type */ + uint8 assoc_state; /* current state of assoc state machine */ + uint8 wpa_state; /* wpa->state */ + uint8 wsec_portopen; /* shows if security port is open */ + uint8 total_attempts_num; /* total number of join attempts (bss_retries) */ + uint8 num_of_targets; /* up to 3, in current design */ + uint8 reserved [2]; /* padding to get 32 bits alignment */ + uint32 wsec; /* bsscfg->wsec */ + uint32 wpa_auth; /* bsscfg->WPA_auth */ + uint32 time_to_join; /* time duration to process WLC_SET_SSID request (ms) */ +} join_classification_info_v1_t; + +/* ETD_JOIN_TARGET_CLASSIFICATION_INFO 11 */ +typedef struct { + int8 rssi; /* RSSI on current channel */ + uint8 cca; /* CCA on current channel */ + uint8 channel; /* current channel */ + uint8 num_of_attempts; /* (bss_retries) up to 5 */ + uint8 oui[3]; /* the first three octets of the AP's address */ + uint8 reserved; /* padding to get 32 bits alignment */ + uint32 time_duration; /* time duration of current attempt (ms) */ +} join_target_classification_info_v1_t; + +/* ETD_ASSOC_STATE 12 */ +typedef struct { + uint8 assoc_state; /* assoc type */ + uint8 reserved [3]; /* padding to get 32 bits alignment */ +} join_assoc_state_v1_t; + +/* ETD_CHANNEL 13 tag */ +typedef struct { + uint8 channel; /* last attempt channel */ + uint8 reserved [3]; /* padding to get 32 bits alignment */ +} join_channel_v1_t; + +/* ETD_TOTAL_NUM_OF_JOIN_ATTEMPTS 14 */ +typedef struct { + uint8 total_attempts_num; /* total number of join attempts (bss_retries) */ + uint8 reserved [3]; /* padding to get 32 bits alignment */ +} join_total_attempts_num_v1_t; + +/* IOV_ROAM_CACHE structures */ + +enum wl_rmc_report_cmd_id { + WL_RMC_RPT_CMD_VER = 0, + WL_RMC_RPT_CMD_DATA = 1, + WL_RMC_RPT_CMD_LAST +}; + +enum wl_rmc_report_xtlv_id { + WL_RMC_RPT_XTLV_VER = 0x0, + WL_RMC_RPT_XTLV_BSS_INFO = 0x1, + WL_RMC_RPT_XTLV_CANDIDATE_INFO = 0x2 +}; + +/* WL_RMC_RPT_XTLV_BSS_INFO */ +typedef struct { + int16 rssi; /* current BSS RSSI */ + uint8 reason; /* reason code for last full scan */ + uint8 status; /* last status code for not roaming */ + uint32 fullscan_count; /* number of full scans performed on current BSS */ + uint32 time_full_scan; /* delta time (in ms) between cur time and full scan timestamp */ +} rmc_bss_info_v1_t; + +/* WL_RMC_RPT_XTLV_CANDIDATE_INFO */ +typedef struct { + int16 rssi; /* last seen rssi */ + uint16 ctl_channel; /* channel */ + uint32 time_last_seen; /* delta time (in ms) between cur time and last seen timestamp */ + uint16 bss_load; /* BSS load */ + uint8 bssid [6]; /* padding to get 32 bits alignment */ +} rmc_candidate_info_v1_t; + +#define WL_FILTER_IE_VERSION 1 +enum wl_filter_ie_options { + WL_FILTER_IE_CLEAR = 0, /* allow element id in packet.For suboption */ + WL_FILTER_IE_SET = 1, /* filter element id in packet.For suboption */ + WL_FILTER_IE_LIST = 2, /* list element ID's.Set as option */ + WL_FILTER_IE_CLEAR_ALL = 3, /* clear all the element.Set as option */ + WL_FILTER_IE_CHECK_SUB_OPTION = 4 /* check for suboptions.Set only as option */ +}; + +typedef struct wl_filter_ie_tlv { + uint16 id; + uint16 len; /* sub option length + pattern length */ + uint8 data[]; /* sub option + pattern matching(OUI,type,sub-type) */ +} wl_filter_ie_tlv_t; + +typedef struct wl_filter_ie_iov { + uint16 version; /* Structure version */ + uint16 len; /* Total length of the structure */ + uint16 fixed_length; /* Total length of fixed fields */ + uint8 option; /* Filter action - check for suboption */ + uint8 pad[1]; /* Align to 4 bytes */ + uint32 pktflag; /* frame type */ + uint8 tlvs[]; /* variable data (zero in for list ,clearall) */ +} wl_filter_ie_iov_v1_t; + +/* Event aggregation config */ +#define EVENT_AGGR_CFG_VERSION 1 +#define EVENT_AGGR_DISABLED 0x0 +#define EVENT_AGGR_ENABLED 0x1 + +#define EVENT_AGGR_BUFSIZE_MAX 1512 +#define EVENT_AGGR_BUFSIZE_MIN 512 + +#define EVENT_AGGR_FLUSH_TIMEOUT_DEFAULT 100 +#define EVENT_AGGR_FLUSH_TIMEOUT_MAX 2000 +#define EVENT_AGGR_NUM_EVENTS_FLUSH 5 +typedef struct event_aggr_config { + uint16 version; + uint16 len; + uint16 flags; /* bit 0 to enable/disable the feature */ + uint16 bufsize; /* Aggregate buffer size */ + uint16 flush_timeout; /* Timeout for event flush */ + uint16 num_events_flush; /* Number of events aggregated before flush */ +} event_aggr_config_t; + +#ifndef WL_TDMTX_TYPEDEF_HAS_ALIAS +typedef tdmtx_cnt_v1_t tdmtx_cnt_t; +typedef tdmtx_cnt_shm_v1_t tdmtx_cnt_shm_t; +typedef wl_tdmtx_ecounters_v1_t wl_tdmtx_ecounters_t; +#define WL_CNT_TDMTX_STRUCT_SZ (sizeof(tdmtx_cnt_t)) +#define WL_CNT_TDMTX_SHM_SZ (sizeof(tdmtx_cnt_shm_t)) +#endif // endif + +/** chanctxt related statistics */ +#define CHANCTXT_STATS_VERSION_1 1 +#define CHANCTXT_STATS_CURRENT_VERSION CHANCTXT_STATS_VERSION_1 +typedef struct wlc_chanctxt_stats { + uint32 excursionq_end_miss; + uint32 activeq_end_miss; + uint32 no_chanctxt_count; + uint32 txqueue_end_incomplete; + uint32 txqueue_start_incomplete; +} wlc_chanctxt_stats_core_t; + +typedef struct chanctxt_stats { + uint16 version; + uint16 length; + wlc_chanctxt_stats_core_t corestats[MAX_NUM_D11CORES]; +} wlc_chanctxt_stats_t; + +typedef struct wl_txdc_ioc { + uint8 ver; + uint8 id; /* ID of the sub-command */ + uint16 len; /* total length of all data[] */ + uint8 data[]; /* var len payload */ +} wl_txdc_ioc_t; + +/* + * iovar subcommand ids + */ +enum { + IOV_TXDC_ENB = 1, + IOV_TXDC_MODE = 2, + IOV_TXDC_DUMP = 3, + IOV_TXDC_LAST +}; + +/* WL_NAN_XTLV_SLOT_STATS */ +/* WL_NAN_EVENT_SLOT_START, WL_NAN_EVENT_SLOT_END */ +typedef struct nan_slot_event_data { + uint32 cur_slot_idx; /* current idx in channel schedule */ + uint32 fw_time; /* target current time in microseconds */ + uint32 band; /* current band (2G/5G) for which the event is received */ +} nan_slot_event_data_t; + +/* SAE (Simultaneous Authentication of Equals) error codes. + * These error codes are local. + */ + +#define WL_SAE_E_BASE -3072 + +/* SAE status codes are reserved from -3072 to -4095 (1K) */ + +enum WL_SAE_E_STATUS_CODES { + WL_SAE_E_AUTH_FAILURE = -3072, + /* Discard silently */ + WL_SAE_E_AUTH_DISCARD = -3073, + /* Authentication in progress */ + WL_SAE_E_AUTH_CONTINUE = -3074, + /* Invalid scalar/elt */ + WL_SAE_E_AUTH_COMMIT_INVALID = -3075, + /* Invalid confirm token */ + WL_SAE_E_AUTH_CONFIRM_INVALID = -3076, + /* Peer scalar validation failure */ + WL_SAE_E_CRYPTO_SCALAR_VALIDATION = -3077, + /* Peer element prime validation failure */ + WL_SAE_E_CRYPTO_ELE_PRIME_VALIDATION = -3078, + /* Peer element is not on the curve */ + WL_SAE_E_CRYPTO_ELE_NOT_ON_CURVE = -3079, + /* Generic EC error (eliptic curve related) */ + WL_SAE_E_CRYPTO_EC_ERROR = -3080, + /* Both local and peer mac addrs are same */ + WL_SAE_E_CRYPTO_EQUAL_MACADDRS = -3081, + /* Loop exceeded in deriving the scalar */ + WL_SAE_E_CRYPTO_SCALAR_ITER_EXCEEDED = -3082, + /* ECC group is unsupported */ + WL_SAE_E_CRYPTO_UNSUPPORTED_GROUP = -3083, + /* Exceeded the hunting-and-pecking counter */ + WL_SAE_E_CRYPTO_PWE_COUNTER_EXCEEDED = -3084, + /* SAE crypto component is not initialized */ + WL_SAE_E_CRYPTO_NOT_INITED = -3085, + /* bn_get has failed */ + WL_SAE_E_CRYPTO_BN_GET_ERROR = -3086, + /* bn_set has failed */ + WL_SAE_E_CRYPTO_BN_SET_ERROR = -3087, + /* PMK is not computed yet */ + WL_SAE_E_CRYPTO_PMK_UNAVAILABLE = -3088, + /* Peer confirm did not match */ + WL_SAE_E_CRYPTO_CONFIRM_MISMATCH = -3089, + /* Element K is at infinity no the curve */ + WL_SAE_E_CRYPTO_KEY_AT_INFINITY = -3090, + /* SAE Crypto private data magic number mismatch */ + WL_SAE_E_CRYPTO_PRIV_MAGIC_MISMATCH = -3091 +}; + +/* PMK manager block. Event codes from -5120 to -6143 */ + +/* PSK hashing event codes */ +typedef enum wlc_pmk_psk_hash_status { + WL_PMK_E_PSK_HASH_FAILED = -5120, + WL_PMK_E_PSK_HASH_DONE = -5121, + WL_PMK_E_PSK_HASH_RUNNING = -5122, + WL_PMK_E_PSK_INVALID = -5123, + WL_PMK_E_PSK_NOMEM = -5124 +} wlc_pmk_psk_hash_status_t; + +/* Block Channel */ +#define WL_BLOCK_CHANNEL_VER_1 1u + +typedef struct wl_block_ch_v1 { + uint16 version; + uint16 len; + uint32 band; /* Band select */ + uint8 channel_num; /* The number of block channels in the selected band */ + uint8 padding[3]; + uint8 channel[]; /* Channel to block, Variable Length */ +} wl_block_ch_v1_t; + +typedef struct dma_wl_addr_region { + uint32 addr_low; + uint32 addr_high; +} dma_wl_addr_region_t; + +#define WL_ROAMSTATS_IOV_VERSION 1 + +#define MAX_PREV_ROAM_EVENTS 16u + +#define ROAMSTATS_UNKNOWN_CNT 0xFFFFu + +/* roaming statistics counter structures */ +typedef struct wlc_assoc_roamstats_event_msg_v1 { + uint32 event_type; /* Message (see below) */ + uint32 status; /* Status code (see below) */ + uint32 reason; /* Reason code (if applicable) */ + uint32 timestamp; /* Timestamp of event */ +} wlc_assoc_roamstats_event_msg_v1_t; + +enum wl_roamstats_cmd_id { + WL_ROAMSTATS_XTLV_CMD_VER = 0, + WL_ROAMSTATS_XTLV_CMD_RESET = 1, + WL_ROAMSTATS_XTLV_CMD_STATUS = 2, + WL_ROAMSTATS_XTLV_CMD_LAST /* Keep this at the end */ +}; + +enum wl_roamstats_xtlv_id { + WL_ROAMSTATS_XTLV_VER = 0x0, + WL_ROAMSTATS_XTLV_COUNTER_INFO = 0x1, + WL_ROAMSTATS_XTLV_PREV_ROAM_EVENTS = 0x2, + WL_ROAMSTATS_XTLV_REASON_INFO = 0x3 +}; + +/* WL_ROAMSTATS_XTLV_COUNTER_INFO */ +typedef struct { + uint32 initial_assoc_time; + uint32 prev_roam_time; + uint32 host_access_time; + uint16 roam_success_cnt; + uint16 roam_fail_cnt; + uint16 roam_attempt_cnt; + uint16 max_roam_target_cnt; + uint16 min_roam_target_cnt; + uint16 max_cached_ch_cnt; + uint16 min_cached_ch_cnt; + uint16 partial_roam_scan_cnt; + uint16 full_roam_scan_cnt; +} roamstats_counter_info_v1_t; + +/* WL_ROAMSTATS_XTLV_PREV_ROAM_EVENTS */ +typedef struct { + uint16 max; + uint16 pos; + wlc_assoc_roamstats_event_msg_v1_t roam_event[]; +} roamstats_prev_roam_events_v1_t; + +/* WL_ROAMSTATS_XTLV_REASON_INFO */ +typedef struct { + uint16 max; + uint16 reason_cnt[]; +} roamstats_reason_info_v1_t; + +#ifdef HEALTH_CHECK_WLIOCTL +/* Health check status format: + * reporting status size = uint32 + * 8 LSB bits are reserved for: WARN (0), ERROR (1), and other levels + * MSB 24 bits are reserved for client to fill in its specific status + */ +#define HEALTH_CHECK_STATUS_OK 0 +/* Bit positions. */ +#define HEALTH_CHECK_STATUS_WARN 0x1 +#define HEALTH_CHECK_STATUS_ERROR 0x2 +#define HEALTH_CHECK_STATUS_TRAP 0x4 +#define HEALTH_CHECK_STATUS_NOEVENT 0x8 + +/* Indication that required information is populated in log buffers */ +#define HEALTH_CHECK_STATUS_INFO_LOG_BUF 0x80 +#define HEALTH_CHECK_STATUS_MASK (0xFF) + +#define HEALTH_CHECK_STATUS_MSB_SHIFT 8 +#endif /* HEALTH_CHECK_WLIOCTL */ + +/** receive signal reporting module interface */ + +#define WL_RXSIG_IOV_MAJOR_VER (1u) +#define WL_RXSIG_IOV_MINOR_VER (1u) +#define WL_RXSIG_IOV_MAJOR_VER_SHIFT (8u) +#define WL_RXSIG_IOV_VERSION \ + ((WL_RXSIG_IOV_MAJOR_VER << WL_RXSIG_IOV_MAJOR_VER_SHIFT) | WL_RXSIG_IOV_MINOR_VER) +#define WL_RXSIG_IOV_GET_MAJOR(x) (x >> WL_RXSIG_IOV_MAJOR_VER_SHIFT) +#define WL_RXSIG_IOV_GET_MINOR(x) (x & 0xFF) + +enum wl_rxsig_cmd_rssi_mode { + WL_RXSIG_MODE_DB = 0x0, + WL_RXSIG_MODE_QDB = 0x1, + WL_RXSIG_MODE_LAST +}; + +/* structure defs for 'wl rxsig [cmd]' iovars */ +enum wl_rxsig_iov_v1 { + WL_RXSIG_CMD_RSSI = 0x1, /**< combined rssi moving avg */ + WL_RXSIG_CMD_SNR = 0x2, /**< combined snr moving avg */ + WL_RXSIG_CMD_RSSIANT = 0x3, /**< rssi moving avg per-ant */ + WL_RXSIG_CMD_SNRANT = 0x4, /**< snr moving avg per-snr */ + WL_RXSIG_CMD_SMPLWIN = 0x5, /**< config for sampling window size */ + WL_RXSIG_CMD_SMPLGRP = 0x7, /**< config for grouping of pkt type */ + WL_RXSIG_CMD_STA_MA = 0x8, + WL_RXSIG_CMD_MAMODE = 0x9, + WL_RXSIG_CMD_MADIV = 0xa, + WL_RXSIG_CMD_DUMP = 0xb, + WL_RXSIG_CMD_DUMPWIN = 0xc, + WL_RXSIG_CMD_TOTAL +}; + +struct wl_rxsig_cfg_v1 { + uint16 version; + chanspec_t chan; /**< chanspec info for querying stats */ + uint8 pmac[ETHER_ADDR_LEN]; /**< peer(link) mac address */ +}; + +struct wl_rxsig_iov_rssi_v1 { + int8 rssi; + uint8 rssi_qdb; + uint8 pad[2]; +}; + +struct wl_rxsig_iov_snr_v1 { + int16 snr; + uint16 pad; +}; + +struct wl_rxsig_iov_rssi_ant_v1 { + int8 deci[WL_RSSI_ANT_MAX]; + uint8 frac[WL_RSSI_ANT_MAX]; + uint8 rssi_mode; /**< MODE_DB or MODE_QDB */ + uint8 num_of_ant; /**< total number of ants */ + uint8 pad[2]; /**< padding for 32bit align */ +}; + +#ifdef BCM_SDC + +#define SDC_TRIGGER_CONFIG_VER_1 1 +typedef struct { + uint16 version; + uint16 type; + uint8 activate; + uint8 pad; +} sdc_trigger_cfg_t; + +typedef enum sdc_trigger_types { + SDC_TYPE_STA_ONBOARD_DEBUG = 1, +#ifdef SDC_TEST + /* + * This is for test purpose only. Don't assign specific value. + * Keep at the end + */ + SDC_TYPE_TEST1, + SDC_TYPE_TEST2, + SDC_TYPE_TEST3, +#endif /* SDC_TEST */ + SDC_TYPE_MAX_TRIGGER +} sdc_trigger_types_t; + +/* *** SDC_TYPE_STA_ONBOARD_DEBUG specific ******* */ + +/* tlv IDs uniquely identifies tx and rx stats component */ +enum wl_slice_hist_stats_xtlv_id { + WL_STATE_HIST_TX_TOSS_REASONS = 0x1, + WL_STATE_HIST_RX_TOSS_REASONS = 0x2 +}; + +#ifndef WLC_HIST_TOSS_LEN +#define WLC_HIST_TOSS_LEN (8u) +#endif // endif +#define WL_HIST_COMPACT_TOSS_STATS_TX_VER_1 (1u) +#define WL_HIST_COMPACT_TOSS_STATS_RX_VER_1 (1u) + +/* [see HIST_TOSS_xxxx macros] + * bits [7..0] : 8 bits : toss sts. + * [11..8] : cfgidx + * [15..12]: ac + * [31..16]: seq + */ +#define HIST_TOSS_STS_POS (0u) +#define HIST_TOSS_STS_MASK (0x000000ffu) +#define HIST_TOSS_CFGIDX_POS (8u) +#define HIST_TOSS_CFGIDX_MASK (0x00000f00u) +#define HIST_TOSS_AC_POS (12u) +#define HIST_TOSS_AC_MASK (0x0000f000u) +#define HIST_TOSS_SEQ_POS (16u) +#define HIST_TOSS_SEQ_MASK (0xffff0000u) + +#define WLC_SDC_COMPACT_TOSS_REASON(sts, ifidx, ac, seq) \ + ((sts) | ((ifidx) << HIST_TOSS_CFGIDX_POS) | ((ac) << HIST_TOSS_AC_POS) | \ + (seq << HIST_TOSS_SEQ_POS)) + +typedef struct { + uint16 version; + uint8 hist_toss_type; /* from wl_slice_hist_XX_stats_xtlv_id */ + uint8 hist_toss_num; /* number of elements in hist_toss_xxx */ + uint32 hist_toss_cur_idx; /* latest data is in this index */ + uint32 hist_toss_reasons[WLC_HIST_TOSS_LEN]; /* last 8 reasons along with seq, etc as + * per HIST_TOSS_xxx format + */ + uint32 hist_toss_counts[WLC_HIST_TOSS_LEN]; /* toss counts corr to reasons */ +} wl_hist_compact_toss_stats_v1_t; + +/* ***END of SDC_TYPE_STA_ONBOARD_DEBUG specific ******* */ + +#endif /* BCM_SDC */ + +typedef struct wl_avs_info_v1 { + uint16 version; /* Structure version */ + uint16 equ_version; /* Equation Version */ + uint32 RO; /* RO in OTP */ + uint32 equ_csr; /* Equated CSR */ + uint32 read_csr; /* Read Back CSR */ + uint32 aging; /* aging setting in nvram */ +} wl_avs_info_v1_t; + +#define WL_AVS_INFO_VER_1 1 + +/* bitmap for clm_flags iovar */ +#define WL_CLM_TXBF 0x01 /**< Flag for Tx beam forming */ +#define WL_CLM_RED_EU 0x02 /* Flag for EU RED */ +#define WL_CLM_EDCRS_EU 0x04 /**< Use EU post-2015 energy detect */ +#define WL_CLM_DFS_TPC 0x08 /**< Flag for DFS TPC */ +#define WL_CLM_RADAR_TYPE_EU 0x10 /**< Flag for EU */ +#define WL_CLM_DFS_FCC WL_CLM_DFS_TPC /**< Flag for DFS FCC */ +#define WL_CLM_DFS_EU (WL_CLM_DFS_TPC | WL_CLM_RADAR_TYPE_EU) /**< Flag for DFS EU */ + +/* SC (scan core) command IDs */ +enum wl_sc_cmd { + WL_SC_CMD_DBG = 0, + WL_SC_CMD_CNX = 1, + WL_SC_CMD_LAST +}; + +#endif /* _wlioctl_h_ */ diff --git a/bcmdhd.100.10.315.x/include/wlioctl_defs.h b/bcmdhd.100.10.315.x/include/wlioctl_defs.h new file mode 100644 index 0000000..7d7e550 --- /dev/null +++ b/bcmdhd.100.10.315.x/include/wlioctl_defs.h @@ -0,0 +1,2320 @@ +/* + * Custom OID/ioctl definitions for + * Broadcom 802.11abg Networking Device Driver + * + * Definitions subject to change without notice. + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: wlioctl_defs.h 766704 2018-06-09 05:44:18Z $ + */ + +#ifndef wlioctl_defs_h +#define wlioctl_defs_h + +/* All builds use the new 11ac ratespec/chanspec */ +#undef D11AC_IOTYPES +#define D11AC_IOTYPES + +#ifndef USE_NEW_RSPEC_DEFS +/* WL_RSPEC defines for rate information */ +#define WL_RSPEC_RATE_MASK 0x000000FF /* rate or HT MCS value */ +#define WL_RSPEC_HE_MCS_MASK 0x0000000F /* HE MCS value */ +#define WL_RSPEC_HE_NSS_MASK 0x000000F0 /* HE Nss value */ +#define WL_RSPEC_HE_NSS_SHIFT 4 /* HE Nss value shift */ +#define WL_RSPEC_VHT_MCS_MASK 0x0000000F /* VHT MCS value */ +#define WL_RSPEC_VHT_NSS_MASK 0x000000F0 /* VHT Nss value */ +#define WL_RSPEC_VHT_NSS_SHIFT 4 /* VHT Nss value shift */ +#define WL_RSPEC_TXEXP_MASK 0x00000300 +#define WL_RSPEC_TXEXP_SHIFT 8 +#define WL_RSPEC_BW_MASK 0x00070000 /* bandwidth mask */ +#define WL_RSPEC_BW_SHIFT 16 /* bandwidth shift */ +#define WL_RSPEC_STBC 0x00100000 /* STBC encoding, Nsts = 2 x Nss */ +#define WL_RSPEC_TXBF 0x00200000 /* bit indicates TXBF mode */ +#define WL_RSPEC_LDPC 0x00400000 /* bit indicates adv coding in use */ +#define WL_RSPEC_SGI 0x00800000 /* Short GI mode */ +#define WL_RSPEC_ENCODING_MASK 0x03000000 /* Encoding of Rate/MCS field */ +#define WL_RSPEC_OVERRIDE_RATE 0x40000000 /* bit indicate to override mcs only */ +#define WL_RSPEC_OVERRIDE_MODE 0x80000000 /* bit indicates override rate & mode */ + +/* WL_RSPEC_ENCODING field defs */ +#define WL_RSPEC_ENCODE_RATE 0x00000000 /* Legacy rate is stored in RSPEC_RATE_MASK */ +#define WL_RSPEC_ENCODE_HT 0x01000000 /* HT MCS is stored in RSPEC_RATE_MASK */ +#define WL_RSPEC_ENCODE_VHT 0x02000000 /* VHT MCS and Nss is stored in RSPEC_RATE_MASK */ +#define WL_RSPEC_ENCODE_HE 0x03000000 /* HE MCS and Nss is stored in RSPEC_RATE_MASK */ + +/* WL_RSPEC_BW field defs */ +#define WL_RSPEC_BW_UNSPECIFIED 0 +#define WL_RSPEC_BW_20MHZ 0x00010000 +#define WL_RSPEC_BW_40MHZ 0x00020000 +#define WL_RSPEC_BW_80MHZ 0x00030000 +#define WL_RSPEC_BW_160MHZ 0x00040000 +#define WL_RSPEC_BW_10MHZ 0x00050000 +#define WL_RSPEC_BW_5MHZ 0x00060000 +#define WL_RSPEC_BW_2P5MHZ 0x00070000 + +#define HIGHEST_SINGLE_STREAM_MCS 7 /* MCS values greater than this enable multiple streams */ + +#endif /* !USE_NEW_RSPEC_DEFS */ + +/* Legacy defines for the nrate iovar */ +#define OLD_NRATE_MCS_INUSE 0x00000080 /* MSC in use,indicates b0-6 holds an mcs */ +#define OLD_NRATE_RATE_MASK 0x0000007f /* rate/mcs value */ +#define OLD_NRATE_STF_MASK 0x0000ff00 /* stf mode mask: siso, cdd, stbc, sdm */ +#define OLD_NRATE_STF_SHIFT 8 /* stf mode shift */ +#define OLD_NRATE_OVERRIDE 0x80000000 /* bit indicates override both rate & mode */ +#define OLD_NRATE_OVERRIDE_MCS_ONLY 0x40000000 /* bit indicate to override mcs only */ +#define OLD_NRATE_SGI 0x00800000 /* sgi mode */ +#define OLD_NRATE_LDPC_CODING 0x00400000 /* bit indicates adv coding in use */ + +#define OLD_NRATE_STF_SISO 0 /* stf mode SISO */ +#define OLD_NRATE_STF_CDD 1 /* stf mode CDD */ +#define OLD_NRATE_STF_STBC 2 /* stf mode STBC */ +#define OLD_NRATE_STF_SDM 3 /* stf mode SDM */ + +#define WLC_11N_N_PROP_MCS 6 +#define WLC_11N_FIRST_PROP_MCS 87 +#define WLC_11N_LAST_PROP_MCS 102 + +#define MAX_CCA_CHANNELS 38 /* Max number of 20 Mhz wide channels */ +#define MAX_CCA_SECS 60 /* CCA keeps this many seconds history */ + +#define IBSS_MED 15 /* Mediom in-bss congestion percentage */ +#define IBSS_HI 25 /* Hi in-bss congestion percentage */ +#define OBSS_MED 12 +#define OBSS_HI 25 +#define INTERFER_MED 5 +#define INTERFER_HI 10 + +#define CCA_FLAG_2G_ONLY 0x01 /* Return a channel from 2.4 Ghz band */ +#define CCA_FLAG_5G_ONLY 0x02 /* Return a channel from 2.4 Ghz band */ +#define CCA_FLAG_IGNORE_DURATION 0x04 /* Ignore dwell time for each channel */ +#define CCA_FLAGS_PREFER_1_6_11 0x10 +#define CCA_FLAG_IGNORE_INTERFER 0x20 /* do not exlude channel based on interfer level */ + +#define CCA_ERRNO_BAND 1 /* After filtering for band pref, no choices left */ +#define CCA_ERRNO_DURATION 2 /* After filtering for duration, no choices left */ +#define CCA_ERRNO_PREF_CHAN 3 /* After filtering for chan pref, no choices left */ +#define CCA_ERRNO_INTERFER 4 /* After filtering for interference, no choices left */ +#define CCA_ERRNO_TOO_FEW 5 /* Only 1 channel was input */ + +#define WL_STA_AID(a) ((a) &~ 0xc000) + +/* Flags for sta_info_t indicating properties of STA */ +#define WL_STA_BRCM 0x00000001 /* Running a Broadcom driver */ +#define WL_STA_WME 0x00000002 /* WMM association */ +#define WL_STA_NONERP 0x00000004 /* No ERP */ +#define WL_STA_AUTHE 0x00000008 /* Authenticated */ +#define WL_STA_ASSOC 0x00000010 /* Associated */ +#define WL_STA_AUTHO 0x00000020 /* Authorized */ +#define WL_STA_WDS 0x00000040 /* Wireless Distribution System */ +#define WL_STA_WDS_LINKUP 0x00000080 /* WDS traffic/probes flowing properly */ +#define WL_STA_PS 0x00000100 /* STA is in power save mode from AP's viewpoint */ +#define WL_STA_APSD_BE 0x00000200 /* APSD delv/trigger for AC_BE is default enabled */ +#define WL_STA_APSD_BK 0x00000400 /* APSD delv/trigger for AC_BK is default enabled */ +#define WL_STA_APSD_VI 0x00000800 /* APSD delv/trigger for AC_VI is default enabled */ +#define WL_STA_APSD_VO 0x00001000 /* APSD delv/trigger for AC_VO is default enabled */ +#define WL_STA_N_CAP 0x00002000 /* STA 802.11n capable */ +#define WL_STA_SCBSTATS 0x00004000 /* Per STA debug stats */ +#define WL_STA_AMPDU_CAP 0x00008000 /* STA AMPDU capable */ +#define WL_STA_AMSDU_CAP 0x00010000 /* STA AMSDU capable */ +#define WL_STA_MIMO_PS 0x00020000 /* mimo ps mode is enabled */ +#define WL_STA_MIMO_RTS 0x00040000 /* send rts in mimo ps mode */ +#define WL_STA_RIFS_CAP 0x00080000 /* rifs enabled */ +#define WL_STA_VHT_CAP 0x00100000 /* STA VHT(11ac) capable */ +#define WL_STA_WPS 0x00200000 /* WPS state */ +#define WL_STA_DWDS_CAP 0x01000000 /* DWDS CAP */ +#define WL_STA_DWDS 0x02000000 /* DWDS active */ +#define WL_WDS_LINKUP WL_STA_WDS_LINKUP /* deprecated */ + +/* STA HT cap fields */ +#define WL_STA_CAP_LDPC_CODING 0x0001 /* Support for rx of LDPC coded pkts */ +#define WL_STA_CAP_40MHZ 0x0002 /* FALSE:20Mhz, TRUE:20/40MHZ supported */ +#define WL_STA_CAP_MIMO_PS_MASK 0x000C /* Mimo PS mask */ +#define WL_STA_CAP_MIMO_PS_SHIFT 0x0002 /* Mimo PS shift */ +#define WL_STA_CAP_MIMO_PS_OFF 0x0003 /* Mimo PS, no restriction */ +#define WL_STA_CAP_MIMO_PS_RTS 0x0001 /* Mimo PS, send RTS/CTS around MIMO frames */ +#define WL_STA_CAP_MIMO_PS_ON 0x0000 /* Mimo PS, MIMO disallowed */ +#define WL_STA_CAP_GF 0x0010 /* Greenfield preamble support */ +#define WL_STA_CAP_SHORT_GI_20 0x0020 /* 20MHZ short guard interval support */ +#define WL_STA_CAP_SHORT_GI_40 0x0040 /* 40Mhz short guard interval support */ +#define WL_STA_CAP_TX_STBC 0x0080 /* Tx STBC support */ +#define WL_STA_CAP_RX_STBC_MASK 0x0300 /* Rx STBC mask */ +#define WL_STA_CAP_RX_STBC_SHIFT 8 /* Rx STBC shift */ +#define WL_STA_CAP_DELAYED_BA 0x0400 /* delayed BA support */ +#define WL_STA_CAP_MAX_AMSDU 0x0800 /* Max AMSDU size in bytes , 0=3839, 1=7935 */ +#define WL_STA_CAP_DSSS_CCK 0x1000 /* DSSS/CCK supported by the BSS */ +#define WL_STA_CAP_PSMP 0x2000 /* Power Save Multi Poll support */ +#define WL_STA_CAP_40MHZ_INTOLERANT 0x4000 /* 40MHz Intolerant */ +#define WL_STA_CAP_LSIG_TXOP 0x8000 /* L-SIG TXOP protection support */ + +#define WL_STA_CAP_RX_STBC_NO 0x0 /* no rx STBC support */ +#define WL_STA_CAP_RX_STBC_ONE_STREAM 0x1 /* rx STBC support of 1 spatial stream */ +#define WL_STA_CAP_RX_STBC_TWO_STREAM 0x2 /* rx STBC support of 1-2 spatial streams */ +#define WL_STA_CAP_RX_STBC_THREE_STREAM 0x3 /* rx STBC support of 1-3 spatial streams */ + +/* scb vht flags */ +#define WL_STA_VHT_LDPCCAP 0x0001 +#define WL_STA_SGI80 0x0002 +#define WL_STA_SGI160 0x0004 +#define WL_STA_VHT_TX_STBCCAP 0x0008 +#define WL_STA_VHT_RX_STBCCAP 0x0010 +#define WL_STA_SU_BEAMFORMER 0x0020 +#define WL_STA_SU_BEAMFORMEE 0x0040 +#define WL_STA_MU_BEAMFORMER 0x0080 +#define WL_STA_MU_BEAMFORMEE 0x0100 +#define WL_STA_VHT_TXOP_PS 0x0200 +#define WL_STA_HTC_VHT_CAP 0x0400 + +/* Values for TX Filter override mode */ +#define WLC_TXFILTER_OVERRIDE_DISABLED 0 +#define WLC_TXFILTER_OVERRIDE_ENABLED 1 + +#define WL_IOCTL_ACTION_GET 0x0 +#define WL_IOCTL_ACTION_SET 0x1 +#define WL_IOCTL_ACTION_OVL_IDX_MASK 0x1e +#define WL_IOCTL_ACTION_OVL_RSV 0x20 +#define WL_IOCTL_ACTION_OVL 0x40 +#define WL_IOCTL_ACTION_MASK 0x7e +#define WL_IOCTL_ACTION_OVL_SHIFT 1 + +/* For WLC_SET_INFRA ioctl & infra_configuration iovar SET/GET operations */ +#define WL_BSSTYPE_INDEP 0 +#define WL_BSSTYPE_INFRA 1 +#define WL_BSSTYPE_ANY 2 /* deprecated */ +#define WL_BSSTYPE_MESH 3 + +/* Bit definitions of mws_active_scan_throttle iovar */ + +#define WL_SCAN_THROTTLE_MASK 0xF + +#define WL_SCAN_THROTTLE_ASSOCSCAN (1U << 0) +#define WL_SCAN_THROTTLE_ROAMSCAN (1U << 1) +#define WL_SCAN_THROTTLE_OTHER_FW_SCAN (1U << 2) /* for other scans like pno etc */ +#define WL_SCAN_THROTTLE_HOSTSCAN (1U << 3) + +#define WL_SCANFLAGS_CLIENT_MASK 0xF00 +#define WL_SCANFLAGS_CLIENT_SHIFT 8 + +/* Bitmask for scan_type */ +/* Use lower 16 bit for scan flags, the upper 16 bits are for internal use */ +#define WL_SCANFLAGS_PASSIVE 0x01 /* force passive scan */ +#define WL_SCANFLAGS_LOW_PRIO 0x02 /* Low priority scan */ +#define WL_SCANFLAGS_PROHIBITED 0x04 /* allow scanning prohibited channels */ +#define WL_SCANFLAGS_OFFCHAN 0x08 /* allow scanning/reporting off-channel APs */ +#define WL_SCANFLAGS_HOTSPOT 0x10 /* automatic ANQP to hotspot APs */ +#define WL_SCANFLAGS_SWTCHAN 0x20 /* Force channel switch for differerent bandwidth */ +#define WL_SCANFLAGS_FORCE_PARALLEL 0x40 /* Force parallel scan even when actcb_fn_t is on. + * by default parallel scan will be disabled if actcb_fn_t + * is provided. + */ +#define WL_SCANFLAGS_SISO 0x40 /* Use 1 RX chain for scanning */ +#define WL_SCANFLAGS_MIMO 0x80 /* Force MIMO scanning */ +#define WL_SCANFLAGS_ASSOCSCAN 0x100 /* Assoc scan */ +#define WL_SCANFLAGS_ROAMSCAN 0x200 /* Roam scan */ +#define WL_SCANFLAGS_FWSCAN 0x400 /* Other FW scan */ +#define WL_SCANFLAGS_HOSTSCAN 0x800 /* Host scan */ + +/* wl_iscan_results status values */ +#define WL_SCAN_RESULTS_SUCCESS 0 +#define WL_SCAN_RESULTS_PARTIAL 1 +#define WL_SCAN_RESULTS_PENDING 2 +#define WL_SCAN_RESULTS_ABORTED 3 +#define WL_SCAN_RESULTS_NO_MEM 4 + +#define SCANOL_ENABLED (1 << 0) +#define SCANOL_BCAST_SSID (1 << 1) +#define SCANOL_NOTIFY_BCAST_SSID (1 << 2) +#define SCANOL_RESULTS_PER_CYCLE (1 << 3) + +/* scan times in milliseconds */ +#define SCANOL_HOME_TIME 45 /* for home channel processing */ +#define SCANOL_ASSOC_TIME 20 /* dwell on a channel while associated */ +#define SCANOL_UNASSOC_TIME 40 /* dwell on a channel while unassociated */ +#define SCANOL_PASSIVE_TIME 110 /* listen on a channelfor passive scan */ +#define SCANOL_AWAY_LIMIT 100 /* max time to be away from home channel */ +#define SCANOL_IDLE_REST_TIME 40 +#define SCANOL_IDLE_REST_MULTIPLIER 0 +#define SCANOL_ACTIVE_REST_TIME 20 +#define SCANOL_ACTIVE_REST_MULTIPLIER 0 +#define SCANOL_CYCLE_IDLE_REST_TIME 300000 /* Idle Rest Time between Scan Cycle (msec) */ +#define SCANOL_CYCLE_IDLE_REST_MULTIPLIER 0 /* Idle Rest Time Multiplier */ +#define SCANOL_CYCLE_ACTIVE_REST_TIME 200 +#define SCANOL_CYCLE_ACTIVE_REST_MULTIPLIER 0 +#define SCANOL_MAX_REST_TIME 3600000 /* max rest time between scan cycle (msec) */ +#define SCANOL_CYCLE_DEFAULT 0 /* default for Max Scan Cycle, 0 = forever */ +#define SCANOL_CYCLE_MAX 864000 /* Max Scan Cycle */ + /* 10 sec/scan cycle => 100 days */ +#define SCANOL_NPROBES 2 /* for Active scan; send n probes on each channel */ +#define SCANOL_NPROBES_MAX 5 /* for Active scan; send n probes on each channel */ +#define SCANOL_SCAN_START_DLY 10 /* delay start of offload scan (sec) */ +#define SCANOL_SCAN_START_DLY_MAX 240 /* delay start of offload scan (sec) */ +#define SCANOL_MULTIPLIER_MAX 10 /* Max Multiplier */ +#define SCANOL_UNASSOC_TIME_MAX 100 /* max dwell on a channel while unassociated */ +#define SCANOL_PASSIVE_TIME_MAX 500 /* max listen on a channel for passive scan */ +#define SCANOL_SSID_MAX 16 /* max supported preferred SSID */ + +/* masks for channel and ssid count */ +#define WL_SCAN_PARAMS_COUNT_MASK 0x0000ffff +#define WL_SCAN_PARAMS_NSSID_SHIFT 16 + +#define WL_SCAN_ACTION_START 1 +#define WL_SCAN_ACTION_CONTINUE 2 +#define WL_SCAN_ACTION_ABORT 3 +#if defined(SIMPLE_ISCAN) +#define ISCAN_RETRY_CNT 5 +#define ISCAN_STATE_IDLE 0 +#define ISCAN_STATE_SCANING 1 +#define ISCAN_STATE_PENDING 2 +#endif /* SIMPLE_ISCAN */ + +#define ANTENNA_NUM_1 1 /* total number of antennas to be used */ +#define ANTENNA_NUM_2 2 +#define ANTENNA_NUM_3 3 +#define ANTENNA_NUM_4 4 + +#define ANT_SELCFG_AUTO 0x80 /* bit indicates antenna sel AUTO */ +#define ANT_SELCFG_MASK 0x33 /* antenna configuration mask */ +#define ANT_SELCFG_TX_UNICAST 0 /* unicast tx antenna configuration */ +#define ANT_SELCFG_RX_UNICAST 1 /* unicast rx antenna configuration */ +#define ANT_SELCFG_TX_DEF 2 /* default tx antenna configuration */ +#define ANT_SELCFG_RX_DEF 3 /* default rx antenna configuration */ + +/* interference source detection and identification mode */ +#define ITFR_MODE_DISABLE 0 /* disable feature */ +#define ITFR_MODE_MANUAL_ENABLE 1 /* enable manual detection */ +#define ITFR_MODE_AUTO_ENABLE 2 /* enable auto detection */ + +/* bit definitions for flags in interference source report */ +#define ITFR_INTERFERENCED 1 /* interference detected */ +#define ITFR_HOME_CHANNEL 2 /* home channel has interference */ +#define ITFR_NOISY_ENVIRONMENT 4 /* noisy environemnt so feature stopped */ + +#define WL_NUM_RPI_BINS 8 +#define WL_RM_TYPE_BASIC 1 +#define WL_RM_TYPE_CCA 2 +#define WL_RM_TYPE_RPI 3 +#define WL_RM_TYPE_ABORT -1 /* ABORT any in-progress RM request */ + +#define WL_RM_FLAG_PARALLEL (1<<0) + +#define WL_RM_FLAG_LATE (1<<1) +#define WL_RM_FLAG_INCAPABLE (1<<2) +#define WL_RM_FLAG_REFUSED (1<<3) + +/* flags */ +#define WLC_ASSOC_REQ_IS_REASSOC 0x01 /* assoc req was actually a reassoc */ + +#define WLC_CIS_DEFAULT 0 /* built-in default */ +#define WLC_CIS_SROM 1 /* source is sprom */ +#define WLC_CIS_OTP 2 /* source is otp */ + +/* PCL - Power Control Loop */ +/* current gain setting is replaced by user input */ +#define WL_ATTEN_APP_INPUT_PCL_OFF 0 /* turn off PCL, apply supplied input */ +#define WL_ATTEN_PCL_ON 1 /* turn on PCL */ +/* current gain setting is maintained */ +#define WL_ATTEN_PCL_OFF 2 /* turn off PCL. */ + +/* defines used by poweridx iovar - it controls power in a-band */ +/* current gain setting is maintained */ +#define WL_PWRIDX_PCL_OFF -2 /* turn off PCL. */ +#define WL_PWRIDX_PCL_ON -1 /* turn on PCL */ +#define WL_PWRIDX_LOWER_LIMIT -2 /* lower limit */ +#define WL_PWRIDX_UPPER_LIMIT 63 /* upper limit */ +/* value >= 0 causes + * - input to be set to that value + * - PCL to be off + */ + +#define BCM_MAC_STATUS_INDICATION (0x40010200L) + +/* Values for TX Filter override mode */ +#define WLC_TXFILTER_OVERRIDE_DISABLED 0 +#define WLC_TXFILTER_OVERRIDE_ENABLED 1 + +/* magic pattern used for mismatch driver and wl */ +#define WL_TXFIFO_SZ_MAGIC 0xa5a5 + +/* check this magic number */ +#define WLC_IOCTL_MAGIC 0x14e46c77 + +/* bss_info_cap_t flags */ +#define WL_BSS_FLAGS_FROM_BEACON 0x01 /* bss_info derived from beacon */ +#define WL_BSS_FLAGS_FROM_CACHE 0x02 /* bss_info collected from cache */ +#define WL_BSS_FLAGS_RSSI_ONCHANNEL 0x04 /* rssi info received on channel (vs offchannel) */ +#define WL_BSS_FLAGS_HS20 0x08 /* hotspot 2.0 capable */ +#define WL_BSS_FLAGS_RSSI_INVALID 0x10 /* BSS contains invalid RSSI */ +#define WL_BSS_FLAGS_RSSI_INACCURATE 0x20 /* BSS contains inaccurate RSSI */ +#define WL_BSS_FLAGS_SNR_INVALID 0x40 /* BSS contains invalid SNR */ +#define WL_BSS_FLAGS_NF_INVALID 0x80 /* BSS contains invalid noise floor */ + +/* bit definitions for bcnflags in wl_bss_info */ +#define WL_BSS_BCNFLAGS_INTERWORK_PRESENT 0x01 /* beacon had IE, accessnet valid */ +#define WL_BSS_BCNFLAGS_INTERWORK_PRESENT_VALID 0x02 /* on indicates support for this API */ + +/* bssinfo flag for nbss_cap */ +#define VHT_BI_SGI_80MHZ 0x00000100 +#define VHT_BI_80MHZ 0x00000200 +#define VHT_BI_160MHZ 0x00000400 +#define VHT_BI_8080MHZ 0x00000800 + +/* reference to wl_ioctl_t struct used by usermode driver */ +#define ioctl_subtype set /* subtype param */ +#define ioctl_pid used /* pid param */ +#define ioctl_status needed /* status param */ + +/* Enumerate crypto algorithms */ +#define CRYPTO_ALGO_OFF 0 +#define CRYPTO_ALGO_WEP1 1 +#define CRYPTO_ALGO_TKIP 2 +#define CRYPTO_ALGO_WEP128 3 +#define CRYPTO_ALGO_AES_CCM 4 +#define CRYPTO_ALGO_AES_OCB_MSDU 5 +#define CRYPTO_ALGO_AES_OCB_MPDU 6 +#if !defined(BCMCCX) && !defined(BCMEXTCCX) +#define CRYPTO_ALGO_NALG 7 +#else +#define CRYPTO_ALGO_CKIP 7 +#define CRYPTO_ALGO_CKIP_MMH 8 +#define CRYPTO_ALGO_WEP_MMH 9 +#define CRYPTO_ALGO_NALG 10 +#endif /* !BCMCCX && !BCMEXTCCX */ + +#define CRYPTO_ALGO_SMS4 11 +#define CRYPTO_ALGO_PMK 12 /* for 802.1x supp to set PMK before 4-way */ +#define CRYPTO_ALGO_BIP 13 /* 802.11w BIP (aes cmac) */ + +#define CRYPTO_ALGO_AES_GCM 14 /* 128 bit GCM */ +#define CRYPTO_ALGO_AES_CCM256 15 /* 256 bit CCM */ +#define CRYPTO_ALGO_AES_GCM256 16 /* 256 bit GCM */ +#define CRYPTO_ALGO_BIP_CMAC256 17 /* 256 bit BIP CMAC */ +#define CRYPTO_ALGO_BIP_GMAC 18 /* 128 bit BIP GMAC */ +#define CRYPTO_ALGO_BIP_GMAC256 19 /* 256 bit BIP GMAC */ + +#define CRYPTO_ALGO_NONE CRYPTO_ALGO_OFF + +/* algo bit vector */ +#define KEY_ALGO_MASK(_algo) (1 << _algo) + +#if defined(BCMCCX) || defined(BCMEXTCCX) +#define KEY_ALGO_MASK_CCX (KEY_ALGO_MASK(CRYPTO_ALGO_CKIP) | \ + KEY_ALGO_MASK(CRYPTO_ALGO_CKIP_MMH) | \ + KEY_ALGO_MASK(CRYPTO_ALGO_WEP_MMH)) +#endif /* defined(BCMCCX) || defined(BCMEXTCCX) */ + +#define KEY_ALGO_MASK_WEP (KEY_ALGO_MASK(CRYPTO_ALGO_WEP1) | \ + KEY_ALGO_MASK(CRYPTO_ALGO_WEP128) | \ + KEY_ALGO_MASK(CRYPTO_ALGO_NALG)) + +#define KEY_ALGO_MASK_AES (KEY_ALGO_MASK(CRYPTO_ALGO_AES_CCM) | \ + KEY_ALGO_MASK(CRYPTO_ALGO_AES_CCM256) | \ + KEY_ALGO_MASK(CRYPTO_ALGO_AES_GCM) | \ + KEY_ALGO_MASK(CRYPTO_ALGO_AES_GCM256)) +#define KEY_ALGO_MASK_TKIP (KEY_ALGO_MASK(CRYPTO_ALGO_TKIP)) +#define KEY_ALGO_MASK_WAPI (KEY_ALGO_MASK(CRYPTO_ALGO_SMS4)) + +#define WSEC_GEN_MIC_ERROR 0x0001 +#define WSEC_GEN_REPLAY 0x0002 +#define WSEC_GEN_ICV_ERROR 0x0004 +#define WSEC_GEN_MFP_ACT_ERROR 0x0008 +#define WSEC_GEN_MFP_DISASSOC_ERROR 0x0010 +#define WSEC_GEN_MFP_DEAUTH_ERROR 0x0020 + +#define WL_SOFT_KEY (1 << 0) /* Indicates this key is using soft encrypt */ +#define WL_PRIMARY_KEY (1 << 1) /* Indicates this key is the primary (ie tx) key */ +#if defined(BCMCCX) || defined(BCMEXTCCX) +#define WL_CKIP_KP (1 << 4) /* CMIC */ +#define WL_CKIP_MMH (1 << 5) /* CKIP */ +#else +#define WL_KF_RES_4 (1 << 4) /* Reserved for backward compat */ +#define WL_KF_RES_5 (1 << 5) /* Reserved for backward compat */ +#endif /* BCMCCX || BCMEXTCCX */ +#define WL_IBSS_PEER_GROUP_KEY (1 << 6) /* Indicates a group key for a IBSS PEER */ + +/* wireless security bitvec */ +#define WSEC_NONE 0x0 +#define WEP_ENABLED 0x0001 +#define TKIP_ENABLED 0x0002 +#define AES_ENABLED 0x0004 +#define WSEC_SWFLAG 0x0008 +#ifdef BCMCCX +#define CKIP_KP_ENABLED 0x0010 +#define CKIP_MIC_ENABLED 0x0020 +#endif /* BCMCCX */ +#define SES_OW_ENABLED 0x0040 /* to go into transition mode without setting wep */ +#ifdef BCMWAPI_WPI +#define SMS4_ENABLED 0x0100 +#endif /* BCMWAPI_WPI */ + +#define WSEC_WEP_ENABLED(wsec) ((wsec) & WEP_ENABLED) +#define WSEC_TKIP_ENABLED(wsec) ((wsec) & TKIP_ENABLED) +#define WSEC_AES_ENABLED(wsec) ((wsec) & AES_ENABLED) + +/* Macros to check if algorithm is enabled */ +#define WSEC_INFO_ALGO_ENABLED(_wi, _algo) \ + (_wi).cur_algos & (1 << CRYPTO_ALGO_##_algo) + +#define WSEC_INFO_ALGO_NONE(_wi) (((_wi).cur_algos) == 0) + +#ifdef BCMCCX +#define WSEC_CKIP_KP_ENABLED(wsec) ((wsec) & CKIP_KP_ENABLED) +#define WSEC_CKIP_MIC_ENABLED(wsec) ((wsec) & CKIP_MIC_ENABLED) +#define WSEC_CKIP_ENABLED(wsec) ((wsec) & (CKIP_KP_ENABLED|CKIP_MIC_ENABLED)) + +#ifdef BCMWAPI_WPI +#define WSEC_ENABLED(wsec) \ + ((wsec) & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED | CKIP_KP_ENABLED | \ + CKIP_MIC_ENABLED | SMS4_ENABLED)) +#else /* BCMWAPI_WPI */ +#define WSEC_ENABLED(wsec) \ + ((wsec) & \ + (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED | CKIP_KP_ENABLED | CKIP_MIC_ENABLED)) +#endif /* BCMWAPI_WPI */ +#else /* defined BCMCCX */ +#ifdef BCMWAPI_WPI +#define WSEC_ENABLED(wsec) ((wsec) & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED | SMS4_ENABLED)) +#else /* BCMWAPI_WPI */ +#define WSEC_ENABLED(wsec) ((wsec) & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED)) +#endif /* BCMWAPI_WPI */ +#endif /* BCMCCX */ + +#define WSEC_SES_OW_ENABLED(wsec) ((wsec) & SES_OW_ENABLED) +#ifdef BCMWAPI_WAI +#define WSEC_SMS4_ENABLED(wsec) ((wsec) & SMS4_ENABLED) +#endif /* BCMWAPI_WAI */ + +/* Following macros are not used any more. Just kept here to + * avoid build issue in BISON/CARIBOU branch + */ +#define MFP_CAPABLE 0x0200 +#define MFP_REQUIRED 0x0400 +#define MFP_SHA256 0x0800 /* a special configuration for STA for WIFI test tool */ + +/* WPA authentication mode bitvec */ +#define WPA_AUTH_DISABLED 0x0000 /* Legacy (i.e., non-WPA) */ +#define WPA_AUTH_NONE 0x0001 /* none (IBSS) */ +#define WPA_AUTH_UNSPECIFIED 0x0002 /* over 802.1x */ +#define WPA_AUTH_PSK 0x0004 /* Pre-shared key */ +#if defined(BCMCCX) || defined(BCMEXTCCX) +#define WPA_AUTH_CCKM 0x0008 /* CCKM */ +#define WPA2_AUTH_CCKM 0x0010 /* CCKM2 */ +#endif /* BCMCCX || BCMEXTCCX */ +/* #define WPA_AUTH_8021X 0x0020 */ /* 802.1x, reserved */ +#define WPA2_AUTH_UNSPECIFIED 0x0040 /* over 802.1x */ +#define WPA2_AUTH_PSK 0x0080 /* Pre-shared key */ +#define BRCM_AUTH_PSK 0x0100 /* BRCM specific PSK */ +#define BRCM_AUTH_DPT 0x0200 /* DPT PSK without group keys */ +#if defined(BCMWAPI_WAI) || defined(BCMWAPI_WPI) +#define WPA_AUTH_WAPI 0x0400 +#define WAPI_AUTH_NONE WPA_AUTH_NONE /* none (IBSS) */ +#define WAPI_AUTH_UNSPECIFIED 0x0400 /* over AS */ +#define WAPI_AUTH_PSK 0x0800 /* Pre-shared key */ +#endif /* BCMWAPI_WAI || BCMWAPI_WPI */ +#define WPA2_AUTH_1X_SHA256 0x1000 /* 1X with SHA256 key derivation */ +#define WPA2_AUTH_TPK 0x2000 /* TDLS Peer Key */ +#define WPA2_AUTH_FT 0x4000 /* Fast Transition. */ +#define WPA2_AUTH_PSK_SHA256 0x8000 /* PSK with SHA256 key derivation */ +#define WPA2_AUTH_FILS_SHA256 0x10000 /* FILS with SHA256 key derivation */ +#define WPA2_AUTH_FILS_SHA384 0x20000 /* FILS with SHA384 key derivation */ +#define WPA2_AUTH_IS_FILS(auth) ((auth) & (WPA2_AUTH_FILS_SHA256 | WPA2_AUTH_FILS_SHA384)) +#define WPA3_AUTH_SAE_PSK 0x40000 /* SAE with 4-way handshake */ +#define WPA3_AUTH_SAE_FBT 0x80000 /* SAE with FT */ +#define WPA3_AUTH_OWE 0x100000 /* OWE */ +#define WPA3_AUTH_1X_SUITE_B_SHA256 0x200000 /* Suite B SHA256 */ +#define WPA3_AUTH_1X_SUITE_B_SHA384 0x400000 /* Suite B-192 SHA384 */ +#define WPA3_AUTH_PSK_SHA384 0x800000 /* PSK with SHA384 key derivation */ + +/* WPA2_AUTH_SHA256 not used anymore. Just kept here to avoid build issue in DINGO */ +#define WPA2_AUTH_SHA256 0x8000 +#define WPA_AUTH_PFN_ANY 0xffffffff /* for PFN, match only ssid */ + +/* pmkid */ +#define MAXPMKID 16 + +/* SROM12 changes */ +#define WLC_IOCTL_MAXLEN 8192 /* max length ioctl buffer required */ + +#define WLC_IOCTL_SMLEN 256 /* "small" length ioctl buffer required */ +#define WLC_IOCTL_MEDLEN 1896 /* "med" length ioctl buffer required */ +#if defined(LCNCONF) || defined(LCN40CONF) || defined(LCN20CONF) +#define WLC_SAMPLECOLLECT_MAXLEN 8192 /* Max Sample Collect buffer */ +#else +#define WLC_SAMPLECOLLECT_MAXLEN 10240 /* Max Sample Collect buffer for two cores */ +#endif // endif +#define WLC_SAMPLECOLLECT_MAXLEN_LCN40 8192 + +#define WLC_IOCTL_NANRESP_MAXLEN 4096u /* "max" length nan ioctl resp buffer required */ +#define WLC_IOCTL_NANRESP_MEDLEN 800u /* "med" length nan ioctl resp buffer required */ + +/* common ioctl definitions */ +#define WLC_GET_MAGIC 0 +#define WLC_GET_VERSION 1 +#define WLC_UP 2 +#define WLC_DOWN 3 +#define WLC_GET_LOOP 4 +#define WLC_SET_LOOP 5 +#define WLC_DUMP 6 +#define WLC_GET_MSGLEVEL 7 +#define WLC_SET_MSGLEVEL 8 +#define WLC_GET_PROMISC 9 +#define WLC_SET_PROMISC 10 +/* #define WLC_OVERLAY_IOCTL 11 */ /* not supported */ +#define WLC_GET_RATE 12 +#define WLC_GET_MAX_RATE 13 +#define WLC_GET_INSTANCE 14 +/* #define WLC_GET_FRAG 15 */ /* no longer supported */ +/* #define WLC_SET_FRAG 16 */ /* no longer supported */ +/* #define WLC_GET_RTS 17 */ /* no longer supported */ +/* #define WLC_SET_RTS 18 */ /* no longer supported */ +#define WLC_GET_INFRA 19 +#define WLC_SET_INFRA 20 +#define WLC_GET_AUTH 21 +#define WLC_SET_AUTH 22 +#define WLC_GET_BSSID 23 +#define WLC_SET_BSSID 24 +#define WLC_GET_SSID 25 +#define WLC_SET_SSID 26 +#define WLC_RESTART 27 +#define WLC_TERMINATED 28 +/* #define WLC_DUMP_SCB 28 */ /* no longer supported */ +#define WLC_GET_CHANNEL 29 +#define WLC_SET_CHANNEL 30 +#define WLC_GET_SRL 31 +#define WLC_SET_SRL 32 +#define WLC_GET_LRL 33 +#define WLC_SET_LRL 34 +#define WLC_GET_PLCPHDR 35 +#define WLC_SET_PLCPHDR 36 +#define WLC_GET_RADIO 37 +#define WLC_SET_RADIO 38 +#define WLC_GET_PHYTYPE 39 +#define WLC_DUMP_RATE 40 +#define WLC_SET_RATE_PARAMS 41 +#define WLC_GET_FIXRATE 42 +#define WLC_SET_FIXRATE 43 +/* #define WLC_GET_WEP 42 */ /* no longer supported */ +/* #define WLC_SET_WEP 43 */ /* no longer supported */ +#define WLC_GET_KEY 44 +#define WLC_SET_KEY 45 +#define WLC_GET_REGULATORY 46 +#define WLC_SET_REGULATORY 47 +#define WLC_GET_PASSIVE_SCAN 48 +#define WLC_SET_PASSIVE_SCAN 49 +#define WLC_SCAN 50 +#define WLC_SCAN_RESULTS 51 +#define WLC_DISASSOC 52 +#define WLC_REASSOC 53 +#define WLC_GET_ROAM_TRIGGER 54 +#define WLC_SET_ROAM_TRIGGER 55 +#define WLC_GET_ROAM_DELTA 56 +#define WLC_SET_ROAM_DELTA 57 +#define WLC_GET_ROAM_SCAN_PERIOD 58 +#define WLC_SET_ROAM_SCAN_PERIOD 59 +#define WLC_EVM 60 /* diag */ +#define WLC_GET_TXANT 61 +#define WLC_SET_TXANT 62 +#define WLC_GET_ANTDIV 63 +#define WLC_SET_ANTDIV 64 +/* #define WLC_GET_TXPWR 65 */ /* no longer supported */ +/* #define WLC_SET_TXPWR 66 */ /* no longer supported */ +#define WLC_GET_CLOSED 67 +#define WLC_SET_CLOSED 68 +#define WLC_GET_MACLIST 69 +#define WLC_SET_MACLIST 70 +#define WLC_GET_RATESET 71 +#define WLC_SET_RATESET 72 +/* #define WLC_GET_LOCALE 73 */ /* no longer supported */ +#define WLC_LONGTRAIN 74 +#define WLC_GET_BCNPRD 75 +#define WLC_SET_BCNPRD 76 +#define WLC_GET_DTIMPRD 77 +#define WLC_SET_DTIMPRD 78 +#define WLC_GET_SROM 79 +#define WLC_SET_SROM 80 +#define WLC_GET_WEP_RESTRICT 81 +#define WLC_SET_WEP_RESTRICT 82 +#define WLC_GET_COUNTRY 83 +#define WLC_SET_COUNTRY 84 +#define WLC_GET_PM 85 +#define WLC_SET_PM 86 +#define WLC_GET_WAKE 87 +#define WLC_SET_WAKE 88 +/* #define WLC_GET_D11CNTS 89 */ /* -> "counters" iovar */ +#define WLC_GET_FORCELINK 90 /* ndis only */ +#define WLC_SET_FORCELINK 91 /* ndis only */ +#define WLC_FREQ_ACCURACY 92 /* diag */ +#define WLC_CARRIER_SUPPRESS 93 /* diag */ +#define WLC_GET_PHYREG 94 +#define WLC_SET_PHYREG 95 +#define WLC_GET_RADIOREG 96 +#define WLC_SET_RADIOREG 97 +#define WLC_GET_REVINFO 98 +#define WLC_GET_UCANTDIV 99 +#define WLC_SET_UCANTDIV 100 +#define WLC_R_REG 101 +#define WLC_W_REG 102 +/* #define WLC_DIAG_LOOPBACK 103 old tray diag */ +/* #define WLC_RESET_D11CNTS 104 */ /* -> "reset_d11cnts" iovar */ +#define WLC_GET_MACMODE 105 +#define WLC_SET_MACMODE 106 +#define WLC_GET_MONITOR 107 +#define WLC_SET_MONITOR 108 +#define WLC_GET_GMODE 109 +#define WLC_SET_GMODE 110 +#define WLC_GET_LEGACY_ERP 111 +#define WLC_SET_LEGACY_ERP 112 +#define WLC_GET_RX_ANT 113 +#define WLC_GET_CURR_RATESET 114 /* current rateset */ +#define WLC_GET_SCANSUPPRESS 115 +#define WLC_SET_SCANSUPPRESS 116 +#define WLC_GET_AP 117 +#define WLC_SET_AP 118 +#define WLC_GET_EAP_RESTRICT 119 +#define WLC_SET_EAP_RESTRICT 120 +#define WLC_SCB_AUTHORIZE 121 +#define WLC_SCB_DEAUTHORIZE 122 +#define WLC_GET_WDSLIST 123 +#define WLC_SET_WDSLIST 124 +#define WLC_GET_ATIM 125 +#define WLC_SET_ATIM 126 +#define WLC_GET_RSSI 127 +#define WLC_GET_PHYANTDIV 128 +#define WLC_SET_PHYANTDIV 129 +#define WLC_AP_RX_ONLY 130 +#define WLC_GET_TX_PATH_PWR 131 +#define WLC_SET_TX_PATH_PWR 132 +#define WLC_GET_WSEC 133 +#define WLC_SET_WSEC 134 +#define WLC_GET_PHY_NOISE 135 +#define WLC_GET_BSS_INFO 136 +#define WLC_GET_PKTCNTS 137 +#define WLC_GET_LAZYWDS 138 +#define WLC_SET_LAZYWDS 139 +#define WLC_GET_BANDLIST 140 + +#define WLC_GET_BAND 141 +#define WLC_SET_BAND 142 +#define WLC_SCB_DEAUTHENTICATE 143 +#define WLC_GET_SHORTSLOT 144 +#define WLC_GET_SHORTSLOT_OVERRIDE 145 +#define WLC_SET_SHORTSLOT_OVERRIDE 146 +#define WLC_GET_SHORTSLOT_RESTRICT 147 +#define WLC_SET_SHORTSLOT_RESTRICT 148 +#define WLC_GET_GMODE_PROTECTION 149 +#define WLC_GET_GMODE_PROTECTION_OVERRIDE 150 +#define WLC_SET_GMODE_PROTECTION_OVERRIDE 151 +#define WLC_UPGRADE 152 +/* #define WLC_GET_MRATE 153 */ /* no longer supported */ +/* #define WLC_SET_MRATE 154 */ /* no longer supported */ +#define WLC_GET_IGNORE_BCNS 155 +#define WLC_SET_IGNORE_BCNS 156 +#define WLC_GET_SCB_TIMEOUT 157 +#define WLC_SET_SCB_TIMEOUT 158 +#define WLC_GET_ASSOCLIST 159 +#define WLC_GET_CLK 160 +#define WLC_SET_CLK 161 +#define WLC_GET_UP 162 +#define WLC_OUT 163 +#define WLC_GET_WPA_AUTH 164 +#define WLC_SET_WPA_AUTH 165 +#define WLC_GET_UCFLAGS 166 +#define WLC_SET_UCFLAGS 167 +#define WLC_GET_PWRIDX 168 +#define WLC_SET_PWRIDX 169 +#define WLC_GET_TSSI 170 +#define WLC_GET_SUP_RATESET_OVERRIDE 171 +#define WLC_SET_SUP_RATESET_OVERRIDE 172 +/* #define WLC_SET_FAST_TIMER 173 */ /* no longer supported */ +/* #define WLC_GET_FAST_TIMER 174 */ /* no longer supported */ +/* #define WLC_SET_SLOW_TIMER 175 */ /* no longer supported */ +/* #define WLC_GET_SLOW_TIMER 176 */ /* no longer supported */ +/* #define WLC_DUMP_PHYREGS 177 */ /* no longer supported */ +#define WLC_GET_PROTECTION_CONTROL 178 +#define WLC_SET_PROTECTION_CONTROL 179 +#define WLC_GET_PHYLIST 180 +#define WLC_ENCRYPT_STRENGTH 181 /* ndis only */ +#define WLC_DECRYPT_STATUS 182 /* ndis only */ +#define WLC_GET_KEY_SEQ 183 +#define WLC_GET_SCAN_CHANNEL_TIME 184 +#define WLC_SET_SCAN_CHANNEL_TIME 185 +#define WLC_GET_SCAN_UNASSOC_TIME 186 +#define WLC_SET_SCAN_UNASSOC_TIME 187 +#define WLC_GET_SCAN_HOME_TIME 188 +#define WLC_SET_SCAN_HOME_TIME 189 +#define WLC_GET_SCAN_NPROBES 190 +#define WLC_SET_SCAN_NPROBES 191 +#define WLC_GET_PRB_RESP_TIMEOUT 192 +#define WLC_SET_PRB_RESP_TIMEOUT 193 +#define WLC_GET_ATTEN 194 +#define WLC_SET_ATTEN 195 +#define WLC_GET_SHMEM 196 /* diag */ +#define WLC_SET_SHMEM 197 /* diag */ +/* #define WLC_GET_GMODE_PROTECTION_CTS 198 */ /* no longer supported */ +/* #define WLC_SET_GMODE_PROTECTION_CTS 199 */ /* no longer supported */ +#define WLC_SET_WSEC_TEST 200 +#define WLC_SCB_DEAUTHENTICATE_FOR_REASON 201 +#define WLC_TKIP_COUNTERMEASURES 202 +#define WLC_GET_PIOMODE 203 +#define WLC_SET_PIOMODE 204 +#define WLC_SET_ASSOC_PREFER 205 +#define WLC_GET_ASSOC_PREFER 206 +#define WLC_SET_ROAM_PREFER 207 +#define WLC_GET_ROAM_PREFER 208 +#define WLC_SET_LED 209 +#define WLC_GET_LED 210 +#define WLC_GET_INTERFERENCE_MODE 211 +#define WLC_SET_INTERFERENCE_MODE 212 +#define WLC_GET_CHANNEL_QA 213 +#define WLC_START_CHANNEL_QA 214 +#define WLC_GET_CHANNEL_SEL 215 +#define WLC_START_CHANNEL_SEL 216 +#define WLC_GET_VALID_CHANNELS 217 +#define WLC_GET_FAKEFRAG 218 +#define WLC_SET_FAKEFRAG 219 +#define WLC_GET_PWROUT_PERCENTAGE 220 +#define WLC_SET_PWROUT_PERCENTAGE 221 +#define WLC_SET_BAD_FRAME_PREEMPT 222 +#define WLC_GET_BAD_FRAME_PREEMPT 223 +#define WLC_SET_LEAP_LIST 224 +#define WLC_GET_LEAP_LIST 225 +#define WLC_GET_CWMIN 226 +#define WLC_SET_CWMIN 227 +#define WLC_GET_CWMAX 228 +#define WLC_SET_CWMAX 229 +#define WLC_GET_WET 230 +#define WLC_SET_WET 231 +#define WLC_GET_PUB 232 +/* #define WLC_SET_GLACIAL_TIMER 233 */ /* no longer supported */ +/* #define WLC_GET_GLACIAL_TIMER 234 */ /* no longer supported */ +#define WLC_GET_KEY_PRIMARY 235 +#define WLC_SET_KEY_PRIMARY 236 + +/* #define WLC_DUMP_RADIOREGS 237 */ /* no longer supported */ +#define WLC_GET_ACI_ARGS 238 +#define WLC_SET_ACI_ARGS 239 +#define WLC_UNSET_CALLBACK 240 +#define WLC_SET_CALLBACK 241 +#define WLC_GET_RADAR 242 +#define WLC_SET_RADAR 243 +#define WLC_SET_SPECT_MANAGMENT 244 +#define WLC_GET_SPECT_MANAGMENT 245 +#define WLC_WDS_GET_REMOTE_HWADDR 246 /* handled in wl_linux.c/wl_vx.c */ +#define WLC_WDS_GET_WPA_SUP 247 +#define WLC_SET_CS_SCAN_TIMER 248 +#define WLC_GET_CS_SCAN_TIMER 249 +#define WLC_MEASURE_REQUEST 250 +#define WLC_INIT 251 +#define WLC_SEND_QUIET 252 +#define WLC_KEEPALIVE 253 +#define WLC_SEND_PWR_CONSTRAINT 254 +#define WLC_UPGRADE_STATUS 255 +#define WLC_CURRENT_PWR 256 +#define WLC_GET_SCAN_PASSIVE_TIME 257 +#define WLC_SET_SCAN_PASSIVE_TIME 258 +#define WLC_LEGACY_LINK_BEHAVIOR 259 +#define WLC_GET_CHANNELS_IN_COUNTRY 260 +#define WLC_GET_COUNTRY_LIST 261 +#define WLC_GET_VAR 262 /* get value of named variable */ +#define WLC_SET_VAR 263 /* set named variable to value */ +#define WLC_NVRAM_GET 264 /* deprecated */ +#define WLC_NVRAM_SET 265 +#define WLC_NVRAM_DUMP 266 +#define WLC_REBOOT 267 +#define WLC_SET_WSEC_PMK 268 +#define WLC_GET_AUTH_MODE 269 +#define WLC_SET_AUTH_MODE 270 +#define WLC_GET_WAKEENTRY 271 +#define WLC_SET_WAKEENTRY 272 +#define WLC_NDCONFIG_ITEM 273 /* currently handled in wl_oid.c */ +#define WLC_NVOTPW 274 +#define WLC_OTPW 275 +#define WLC_IOV_BLOCK_GET 276 +#define WLC_IOV_MODULES_GET 277 +#define WLC_SOFT_RESET 278 +#define WLC_GET_ALLOW_MODE 279 +#define WLC_SET_ALLOW_MODE 280 +#define WLC_GET_DESIRED_BSSID 281 +#define WLC_SET_DESIRED_BSSID 282 +#define WLC_DISASSOC_MYAP 283 +#define WLC_GET_NBANDS 284 /* for Dongle EXT_STA support */ +#define WLC_GET_BANDSTATES 285 /* for Dongle EXT_STA support */ +#define WLC_GET_WLC_BSS_INFO 286 /* for Dongle EXT_STA support */ +#define WLC_GET_ASSOC_INFO 287 /* for Dongle EXT_STA support */ +#define WLC_GET_OID_PHY 288 /* for Dongle EXT_STA support */ +#define WLC_SET_OID_PHY 289 /* for Dongle EXT_STA support */ +#define WLC_SET_ASSOC_TIME 290 /* for Dongle EXT_STA support */ +#define WLC_GET_DESIRED_SSID 291 /* for Dongle EXT_STA support */ +#define WLC_GET_CHANSPEC 292 /* for Dongle EXT_STA support */ +#define WLC_GET_ASSOC_STATE 293 /* for Dongle EXT_STA support */ +#define WLC_SET_PHY_STATE 294 /* for Dongle EXT_STA support */ +#define WLC_GET_SCAN_PENDING 295 /* for Dongle EXT_STA support */ +#define WLC_GET_SCANREQ_PENDING 296 /* for Dongle EXT_STA support */ +#define WLC_GET_PREV_ROAM_REASON 297 /* for Dongle EXT_STA support */ +#define WLC_SET_PREV_ROAM_REASON 298 /* for Dongle EXT_STA support */ +#define WLC_GET_BANDSTATES_PI 299 /* for Dongle EXT_STA support */ +#define WLC_GET_PHY_STATE 300 /* for Dongle EXT_STA support */ +#define WLC_GET_BSS_WPA_RSN 301 /* for Dongle EXT_STA support */ +#define WLC_GET_BSS_WPA2_RSN 302 /* for Dongle EXT_STA support */ +#define WLC_GET_BSS_BCN_TS 303 /* for Dongle EXT_STA support */ +#define WLC_GET_INT_DISASSOC 304 /* for Dongle EXT_STA support */ +#define WLC_SET_NUM_PEERS 305 /* for Dongle EXT_STA support */ +#define WLC_GET_NUM_BSS 306 /* for Dongle EXT_STA support */ +#define WLC_PHY_SAMPLE_COLLECT 307 /* phy sample collect mode */ +/* #define WLC_UM_PRIV 308 */ /* Deprecated: usermode driver */ +#define WLC_GET_CMD 309 +/* #define WLC_LAST 310 */ /* Never used - can be reused */ +#define WLC_SET_INTERFERENCE_OVERRIDE_MODE 311 /* set inter mode override */ +#define WLC_GET_INTERFERENCE_OVERRIDE_MODE 312 /* get inter mode override */ +/* #define WLC_GET_WAI_RESTRICT 313 */ +/* #define WLC_SET_WAI_RESTRICT 314 */ +/* #define WLC_SET_WAI_REKEY 315 */ +#define WLC_SET_NAT_CONFIG 316 /* for configuring NAT filter driver */ +#define WLC_GET_NAT_STATE 317 +#define WLC_GET_TXBF_RATESET 318 +#define WLC_SET_TXBF_RATESET 319 +#define WLC_SCAN_CQ 320 +#define WLC_GET_RSSI_QDB 321 /* qdB portion of the RSSI */ +#define WLC_DUMP_RATESET 322 +#define WLC_ECHO 323 +#define WLC_LAST 324 +#define WLC_SPEC_FLAG 0x80000000 /* For some special IOCTL */ +#ifndef EPICTRL_COOKIE +#define EPICTRL_COOKIE 0xABADCEDE +#endif // endif + +/* vx wlc ioctl's offset */ +#define CMN_IOCTL_OFF 0x180 + +/* + * custom OID support + * + * 0xFF - implementation specific OID + * 0xE4 - first byte of Broadcom PCI vendor ID + * 0x14 - second byte of Broadcom PCI vendor ID + * 0xXX - the custom OID number + */ + +/* begin 0x1f values beyond the start of the ET driver range. */ +#define WL_OID_BASE 0xFFE41420 + +/* NDIS overrides */ +#define OID_WL_GETINSTANCE (WL_OID_BASE + WLC_GET_INSTANCE) +#define OID_WL_GET_FORCELINK (WL_OID_BASE + WLC_GET_FORCELINK) +#define OID_WL_SET_FORCELINK (WL_OID_BASE + WLC_SET_FORCELINK) +#define OID_WL_ENCRYPT_STRENGTH (WL_OID_BASE + WLC_ENCRYPT_STRENGTH) +#define OID_WL_DECRYPT_STATUS (WL_OID_BASE + WLC_DECRYPT_STATUS) +#define OID_LEGACY_LINK_BEHAVIOR (WL_OID_BASE + WLC_LEGACY_LINK_BEHAVIOR) +#define OID_WL_NDCONFIG_ITEM (WL_OID_BASE + WLC_NDCONFIG_ITEM) + +/* EXT_STA Dongle suuport */ +#define OID_STA_CHANSPEC (WL_OID_BASE + WLC_GET_CHANSPEC) +#define OID_STA_NBANDS (WL_OID_BASE + WLC_GET_NBANDS) +#define OID_STA_GET_PHY (WL_OID_BASE + WLC_GET_OID_PHY) +#define OID_STA_SET_PHY (WL_OID_BASE + WLC_SET_OID_PHY) +#define OID_STA_ASSOC_TIME (WL_OID_BASE + WLC_SET_ASSOC_TIME) +#define OID_STA_DESIRED_SSID (WL_OID_BASE + WLC_GET_DESIRED_SSID) +#define OID_STA_SET_PHY_STATE (WL_OID_BASE + WLC_SET_PHY_STATE) +#define OID_STA_SCAN_PENDING (WL_OID_BASE + WLC_GET_SCAN_PENDING) +#define OID_STA_SCANREQ_PENDING (WL_OID_BASE + WLC_GET_SCANREQ_PENDING) +#define OID_STA_GET_ROAM_REASON (WL_OID_BASE + WLC_GET_PREV_ROAM_REASON) +#define OID_STA_SET_ROAM_REASON (WL_OID_BASE + WLC_SET_PREV_ROAM_REASON) +#define OID_STA_GET_PHY_STATE (WL_OID_BASE + WLC_GET_PHY_STATE) +#define OID_STA_INT_DISASSOC (WL_OID_BASE + WLC_GET_INT_DISASSOC) +#define OID_STA_SET_NUM_PEERS (WL_OID_BASE + WLC_SET_NUM_PEERS) +#define OID_STA_GET_NUM_BSS (WL_OID_BASE + WLC_GET_NUM_BSS) + +/* NAT filter driver support */ +#define OID_NAT_SET_CONFIG (WL_OID_BASE + WLC_SET_NAT_CONFIG) +#define OID_NAT_GET_STATE (WL_OID_BASE + WLC_GET_NAT_STATE) + +#define WL_DECRYPT_STATUS_SUCCESS 1 +#define WL_DECRYPT_STATUS_FAILURE 2 +#define WL_DECRYPT_STATUS_UNKNOWN 3 + +/* allows user-mode app to poll the status of USB image upgrade */ +#define WLC_UPGRADE_SUCCESS 0 +#define WLC_UPGRADE_PENDING 1 + +/* WLC_GET_AUTH, WLC_SET_AUTH values */ +#define WL_AUTH_OPEN_SYSTEM 0 /* d11 open authentication */ +#define WL_AUTH_SHARED_KEY 1 /* d11 shared authentication */ +#define WL_AUTH_OPEN_SHARED 2 /* try open, then shared if open failed w/rc 13 */ +#define WL_AUTH_FILS_SHARED 4 /* d11 fils shared key authentication */ +#define WL_AUTH_FILS_SHARED_PFS 5 /* d11 fils shared key w/ pfs authentication */ +#define WL_AUTH_FILS_PUBLIC 6 /* d11 fils public key authentication */ + +/* a large TX Power as an init value to factor out of MIN() calculations, + * keep low enough to fit in an int8, units are .25 dBm + */ +#define WLC_TXPWR_MAX (127) /* ~32 dBm = 1,500 mW */ + +/* "diag" iovar argument and error code */ +#define WL_DIAG_INTERRUPT 1 /* d11 loopback interrupt test */ +#define WL_DIAG_LOOPBACK 2 /* d11 loopback data test */ +#define WL_DIAG_MEMORY 3 /* d11 memory test */ +#define WL_DIAG_LED 4 /* LED test */ +#define WL_DIAG_REG 5 /* d11/phy register test */ +#define WL_DIAG_SROM 6 /* srom read/crc test */ +#define WL_DIAG_DMA 7 /* DMA test */ +#define WL_DIAG_LOOPBACK_EXT 8 /* enhenced d11 loopback data test */ + +#define WL_DIAGERR_SUCCESS 0 +#define WL_DIAGERR_FAIL_TO_RUN 1 /* unable to run requested diag */ +#define WL_DIAGERR_NOT_SUPPORTED 2 /* diag requested is not supported */ +#define WL_DIAGERR_INTERRUPT_FAIL 3 /* loopback interrupt test failed */ +#define WL_DIAGERR_LOOPBACK_FAIL 4 /* loopback data test failed */ +#define WL_DIAGERR_SROM_FAIL 5 /* srom read failed */ +#define WL_DIAGERR_SROM_BADCRC 6 /* srom crc failed */ +#define WL_DIAGERR_REG_FAIL 7 /* d11/phy register test failed */ +#define WL_DIAGERR_MEMORY_FAIL 8 /* d11 memory test failed */ +#define WL_DIAGERR_NOMEM 9 /* diag test failed due to no memory */ +#define WL_DIAGERR_DMA_FAIL 10 /* DMA test failed */ + +#define WL_DIAGERR_MEMORY_TIMEOUT 11 /* d11 memory test didn't finish in time */ +#define WL_DIAGERR_MEMORY_BADPATTERN 12 /* d11 memory test result in bad pattern */ + +/* band types */ +#define WLC_BAND_AUTO 0 /* auto-select */ +#define WLC_BAND_5G 1 /* 5 Ghz */ +#define WLC_BAND_2G 2 /* 2.4 Ghz */ +#define WLC_BAND_ALL 3 /* all bands */ +#define WLC_BAND_6G 4 /* 6 Ghz */ +#define WLC_BAND_INVALID -1 /* Invalid band */ + +/* band range returned by band_range iovar */ +#define WL_CHAN_FREQ_RANGE_2G 0 +#define WL_CHAN_FREQ_RANGE_5GL 1 +#define WL_CHAN_FREQ_RANGE_5GM 2 +#define WL_CHAN_FREQ_RANGE_5GH 3 + +#define WL_CHAN_FREQ_RANGE_5GLL_5BAND 4 +#define WL_CHAN_FREQ_RANGE_5GLH_5BAND 5 +#define WL_CHAN_FREQ_RANGE_5GML_5BAND 6 +#define WL_CHAN_FREQ_RANGE_5GMH_5BAND 7 +#define WL_CHAN_FREQ_RANGE_5GH_5BAND 8 + +#define WL_CHAN_FREQ_RANGE_5G_BAND0 1 +#define WL_CHAN_FREQ_RANGE_5G_BAND1 2 +#define WL_CHAN_FREQ_RANGE_5G_BAND2 3 +#define WL_CHAN_FREQ_RANGE_5G_BAND3 4 +#define WL_CHAN_FREQ_RANGE_5G_4BAND 5 + +/* SROM12 */ +#define WL_CHAN_FREQ_RANGE_5G_BAND4 5 +#define WL_CHAN_FREQ_RANGE_2G_40 6 +#define WL_CHAN_FREQ_RANGE_5G_BAND0_40 7 +#define WL_CHAN_FREQ_RANGE_5G_BAND1_40 8 +#define WL_CHAN_FREQ_RANGE_5G_BAND2_40 9 +#define WL_CHAN_FREQ_RANGE_5G_BAND3_40 10 +#define WL_CHAN_FREQ_RANGE_5G_BAND4_40 11 +#define WL_CHAN_FREQ_RANGE_5G_BAND0_80 12 +#define WL_CHAN_FREQ_RANGE_5G_BAND1_80 13 +#define WL_CHAN_FREQ_RANGE_5G_BAND2_80 14 +#define WL_CHAN_FREQ_RANGE_5G_BAND3_80 15 +#define WL_CHAN_FREQ_RANGE_5G_BAND4_80 16 + +#define WL_CHAN_FREQ_RANGE_5G_5BAND 18 +#define WL_CHAN_FREQ_RANGE_5G_5BAND_40 19 +#define WL_CHAN_FREQ_RANGE_5G_5BAND_80 20 + +#define WLC_MACMODE_DISABLED 0 /* MAC list disabled */ +#define WLC_MACMODE_DENY 1 /* Deny specified (i.e. allow unspecified) */ +#define WLC_MACMODE_ALLOW 2 /* Allow specified (i.e. deny unspecified) */ + +/* + * 54g modes (basic bits may still be overridden) + * + * GMODE_LEGACY_B Rateset: 1b, 2b, 5.5, 11 + * Preamble: Long + * Shortslot: Off + * GMODE_AUTO Rateset: 1b, 2b, 5.5b, 11b, 18, 24, 36, 54 + * Extended Rateset: 6, 9, 12, 48 + * Preamble: Long + * Shortslot: Auto + * GMODE_ONLY Rateset: 1b, 2b, 5.5b, 11b, 18, 24b, 36, 54 + * Extended Rateset: 6b, 9, 12b, 48 + * Preamble: Short required + * Shortslot: Auto + * GMODE_B_DEFERRED Rateset: 1b, 2b, 5.5b, 11b, 18, 24, 36, 54 + * Extended Rateset: 6, 9, 12, 48 + * Preamble: Long + * Shortslot: On + * GMODE_PERFORMANCE Rateset: 1b, 2b, 5.5b, 6b, 9, 11b, 12b, 18, 24b, 36, 48, 54 + * Preamble: Short required + * Shortslot: On and required + * GMODE_LRS Rateset: 1b, 2b, 5.5b, 11b + * Extended Rateset: 6, 9, 12, 18, 24, 36, 48, 54 + * Preamble: Long + * Shortslot: Auto + */ +#define GMODE_LEGACY_B 0 +#define GMODE_AUTO 1 +#define GMODE_ONLY 2 +#define GMODE_B_DEFERRED 3 +#define GMODE_PERFORMANCE 4 +#define GMODE_LRS 5 +#define GMODE_MAX 6 + +/* values for PLCPHdr_override */ +#define WLC_PLCP_AUTO -1 +#define WLC_PLCP_SHORT 0 +#define WLC_PLCP_LONG 1 + +/* values for g_protection_override and n_protection_override */ +#define WLC_PROTECTION_AUTO -1 +#define WLC_PROTECTION_OFF 0 +#define WLC_PROTECTION_ON 1 +#define WLC_PROTECTION_MMHDR_ONLY 2 +#define WLC_PROTECTION_CTS_ONLY 3 + +/* values for g_protection_control and n_protection_control */ +#define WLC_PROTECTION_CTL_OFF 0 +#define WLC_PROTECTION_CTL_LOCAL 1 +#define WLC_PROTECTION_CTL_OVERLAP 2 + +/* values for n_protection */ +#define WLC_N_PROTECTION_OFF 0 +#define WLC_N_PROTECTION_OPTIONAL 1 +#define WLC_N_PROTECTION_20IN40 2 +#define WLC_N_PROTECTION_MIXEDMODE 3 + +/* values for n_preamble_type */ +#define WLC_N_PREAMBLE_MIXEDMODE 0 +#define WLC_N_PREAMBLE_GF 1 +#define WLC_N_PREAMBLE_GF_BRCM 2 + +/* values for band specific 40MHz capabilities (deprecated) */ +#define WLC_N_BW_20ALL 0 +#define WLC_N_BW_40ALL 1 +#define WLC_N_BW_20IN2G_40IN5G 2 + +#define WLC_BW_20MHZ_BIT (1<<0) +#define WLC_BW_40MHZ_BIT (1<<1) +#define WLC_BW_80MHZ_BIT (1<<2) +#define WLC_BW_160MHZ_BIT (1<<3) +#define WLC_BW_10MHZ_BIT (1<<4) +#define WLC_BW_5MHZ_BIT (1<<5) +#define WLC_BW_2P5MHZ_BIT (1<<6) +/* Bandwidth capabilities */ +#define WLC_BW_CAP_20MHZ (WLC_BW_20MHZ_BIT) +#define WLC_BW_CAP_40MHZ (WLC_BW_40MHZ_BIT|WLC_BW_20MHZ_BIT) +#define WLC_BW_CAP_80MHZ (WLC_BW_80MHZ_BIT|WLC_BW_40MHZ_BIT|WLC_BW_20MHZ_BIT) +#define WLC_BW_CAP_160MHZ (WLC_BW_160MHZ_BIT|WLC_BW_80MHZ_BIT| \ + WLC_BW_40MHZ_BIT|WLC_BW_20MHZ_BIT) +#define WLC_BW_CAP_2P5MHZ (WLC_BW_2P5MHZ_BIT) +#define WLC_BW_CAP_5MHZ (WLC_BW_5MHZ_BIT) +#define WLC_BW_CAP_10MHZ (WLC_BW_10MHZ_BIT) +#define WLC_BW_CAP_UNRESTRICTED 0xFF + +#define WL_BW_CAP_20MHZ(bw_cap) (((bw_cap) & WLC_BW_20MHZ_BIT) ? TRUE : FALSE) +#define WL_BW_CAP_40MHZ(bw_cap) (((bw_cap) & WLC_BW_40MHZ_BIT) ? TRUE : FALSE) +#define WL_BW_CAP_80MHZ(bw_cap) (((bw_cap) & WLC_BW_80MHZ_BIT) ? TRUE : FALSE) +#define WL_BW_CAP_160MHZ(bw_cap)(((bw_cap) & WLC_BW_160MHZ_BIT) ? TRUE : FALSE) +#define WL_BW_CAP_2P5MHZ(bw_cap)(((bw_cap) & WLC_BW_2P5MHZ_BIT) ? TRUE : FALSE) +#define WL_BW_CAP_5MHZ(bw_cap) (((bw_cap) & WLC_BW_5MHZ_BIT) ? TRUE : FALSE) +#define WL_BW_CAP_10MHZ(bw_cap) (((bw_cap) & WLC_BW_10MHZ_BIT) ? TRUE : FALSE) +/* values to force tx/rx chain */ +#define WLC_N_TXRX_CHAIN0 0 +#define WLC_N_TXRX_CHAIN1 1 + +/* bitflags for SGI support (sgi_rx iovar) */ +#define WLC_N_SGI_20 0x01 +#define WLC_N_SGI_40 0x02 +#define WLC_VHT_SGI_80 0x04 +#define WLC_VHT_SGI_160 0x08 + +/* when sgi_tx==WLC_SGI_ALL, bypass rate selection, enable sgi for all mcs */ +#define WLC_SGI_ALL 0x02 + +#define LISTEN_INTERVAL 10 +/* interference mitigation options */ +#define INTERFERE_OVRRIDE_OFF -1 /* interference override off */ +#define INTERFERE_NONE 0 /* off */ +#define NON_WLAN 1 /* foreign/non 802.11 interference, no auto detect */ +#define WLAN_MANUAL 2 /* ACI: no auto detection */ +#define WLAN_AUTO 3 /* ACI: auto detect */ +#define WLAN_AUTO_W_NOISE 4 /* ACI: auto - detect and non 802.11 interference */ +#define AUTO_ACTIVE (1 << 7) /* Auto is currently active */ + +/* interfernece mode bit-masks (ACPHY) */ +#define ACPHY_ACI_GLITCHBASED_DESENSE 1 /* bit 0 */ +#define ACPHY_ACI_HWACI_PKTGAINLMT 2 /* bit 1 */ +#define ACPHY_ACI_W2NB_PKTGAINLMT 4 /* bit 2 */ +#define ACPHY_ACI_PREEMPTION 8 /* bit 3 */ +#define ACPHY_HWACI_MITIGATION 16 /* bit 4 */ +#define ACPHY_LPD_PREEMPTION 32 /* bit 5 */ +#define ACPHY_HWOBSS_MITIGATION 64 /* bit 6 */ +#define ACPHY_ACI_MAX_MODE 127 + +/* AP environment */ +#define AP_ENV_DETECT_NOT_USED 0 /* We aren't using AP environment detection */ +#define AP_ENV_DENSE 1 /* "Corporate" or other AP dense environment */ +#define AP_ENV_SPARSE 2 /* "Home" or other sparse environment */ +#define AP_ENV_INDETERMINATE 3 /* AP environment hasn't been identified */ + +#define TRIGGER_NOW 0 +#define TRIGGER_CRS 0x01 +#define TRIGGER_CRSDEASSERT 0x02 +#define TRIGGER_GOODFCS 0x04 +#define TRIGGER_BADFCS 0x08 +#define TRIGGER_BADPLCP 0x10 +#define TRIGGER_CRSGLITCH 0x20 + +#define WL_SAMPLEDATA_HEADER_TYPE 1 +#define WL_SAMPLEDATA_HEADER_SIZE 80 /* sample collect header size (bytes) */ +#define WL_SAMPLEDATA_TYPE 2 +#define WL_SAMPLEDATA_SEQ 0xff /* sequence # */ +#define WL_SAMPLEDATA_MORE_DATA 0x100 /* more data mask */ + +/* WL_OTA START */ +#define WL_OTA_ARG_PARSE_BLK_SIZE 1200 +#define WL_OTA_TEST_MAX_NUM_RATE 30 +#define WL_OTA_TEST_MAX_NUM_SEQ 100 +#define WL_OTA_TEST_MAX_NUM_RSSI 85 +#define WL_THRESHOLD_LO_BAND 70 /* range from 5250MHz - 5350MHz */ + +/* radar iovar SET defines */ +#define WL_RADAR_DETECTOR_OFF 0 /* radar detector off */ +#define WL_RADAR_DETECTOR_ON 1 /* radar detector on */ +#define WL_RADAR_SIMULATED 2 /* force radar detector to declare + * detection once + */ +#define WL_RADAR_SIMULATED_SC 3 /* force radar detector to declare + * detection once on scan core + * if available and active + */ +#define WL_RSSI_ANT_VERSION 1 /* current version of wl_rssi_ant_t */ +#define WL_ANT_RX_MAX 2 /* max 2 receive antennas */ +#define WL_ANT_HT_RX_MAX 4 /* max 4 receive antennas/cores */ +#define WL_ANT_IDX_1 0 /* antenna index 1 */ +#define WL_ANT_IDX_2 1 /* antenna index 2 */ + +#ifndef WL_RSSI_ANT_MAX +#define WL_RSSI_ANT_MAX 4 /* max possible rx antennas */ +#elif WL_RSSI_ANT_MAX != 4 +#error "WL_RSSI_ANT_MAX does not match" +#endif // endif + +/* dfs_status iovar-related defines */ + +/* cac - channel availability check, + * ism - in-service monitoring + * csa - channel switching announcement + */ + +/* cac state values */ +#define WL_DFS_CACSTATE_IDLE 0 /* state for operating in non-radar channel */ +#define WL_DFS_CACSTATE_PREISM_CAC 1 /* CAC in progress */ +#define WL_DFS_CACSTATE_ISM 2 /* ISM in progress */ +#define WL_DFS_CACSTATE_CSA 3 /* csa */ +#define WL_DFS_CACSTATE_POSTISM_CAC 4 /* ISM CAC */ +#define WL_DFS_CACSTATE_PREISM_OOC 5 /* PREISM OOC */ +#define WL_DFS_CACSTATE_POSTISM_OOC 6 /* POSTISM OOC */ +#define WL_DFS_CACSTATES 7 /* this many states exist */ + +/* Defines used with channel_bandwidth for curpower */ +#define WL_BW_20MHZ 0 +#define WL_BW_40MHZ 1 +#define WL_BW_80MHZ 2 +#define WL_BW_160MHZ 3 +#define WL_BW_8080MHZ 4 +#define WL_BW_2P5MHZ 5 +#define WL_BW_5MHZ 6 +#define WL_BW_10MHZ 7 + +/* tx_power_t.flags bits */ +#define WL_TX_POWER_F_ENABLED 1 +#define WL_TX_POWER_F_HW 2 +#define WL_TX_POWER_F_MIMO 4 +#define WL_TX_POWER_F_SISO 8 +#define WL_TX_POWER_F_HT 0x10 +#define WL_TX_POWER_F_VHT 0x20 +#define WL_TX_POWER_F_OPENLOOP 0x40 +#define WL_TX_POWER_F_PROP11NRATES 0x80 +#define WL_TX_POWER_F_UNIT_QDBM 0x100 +#define WL_TX_POWER_F_TXCAP 0x200 +/* Message levels */ +#define WL_ERROR_VAL 0x00000001 +#define WL_TRACE_VAL 0x00000002 +#define WL_PRHDRS_VAL 0x00000004 +#define WL_PRPKT_VAL 0x00000008 +#define WL_INFORM_VAL 0x00000010 +#define WL_TMP_VAL 0x00000020 +#define WL_OID_VAL 0x00000040 +#define WL_RATE_VAL 0x00000080 +#define WL_ASSOC_VAL 0x00000100 +#define WL_PRUSR_VAL 0x00000200 +#define WL_PS_VAL 0x00000400 +#define WL_TXPWR_VAL 0x00000000 /* retired in TOT on 6/10/2009 */ +#define WL_MODE_SWITCH_VAL 0x00000800 /* Using retired TXPWR val */ +#define WL_PORT_VAL 0x00001000 +#define WL_DUAL_VAL 0x00002000 +#define WL_WSEC_VAL 0x00004000 +#define WL_WSEC_DUMP_VAL 0x00008000 +#define WL_LOG_VAL 0x00010000 +#define WL_NRSSI_VAL 0x00000000 /* retired in TOT on 6/10/2009 */ +#define WL_BCNTRIM_VAL 0x00020000 /* Using retired NRSSI VAL */ +#define WL_LOFT_VAL 0x00000000 /* retired in TOT on 6/10/2009 */ +#define WL_PFN_VAL 0x00040000 /* Using retired LOFT_VAL */ +#define WL_REGULATORY_VAL 0x00080000 +#define WL_CSA_VAL 0x00080000 /* Reusing REGULATORY_VAL due to lackof bits */ +#define WL_TAF_VAL 0x00100000 +#define WL_RADAR_VAL 0x00000000 /* retired in TOT on 6/10/2009 */ +#define WL_WDI_VAL 0x00200000 /* Using retired WL_RADAR_VAL VAL */ +#define WL_MPC_VAL 0x00400000 +#define WL_APSTA_VAL 0x00800000 +#define WL_DFS_VAL 0x01000000 +#define WL_BA_VAL 0x00000000 /* retired in TOT on 6/14/2010 */ +#define WL_MUMIMO_VAL 0x02000000 /* Using retired WL_BA_VAL */ +#define WL_ACI_VAL 0x04000000 +#define WL_PRMAC_VAL 0x04000000 +#define WL_MBSS_VAL 0x04000000 +#define WL_CAC_VAL 0x08000000 +#define WL_AMSDU_VAL 0x10000000 +#define WL_AMPDU_VAL 0x20000000 +#define WL_FFPLD_VAL 0x40000000 +#define WL_ROAM_EXP_VAL 0x80000000 + +/* wl_msg_level is full. For new bits take the next one and AND with + * wl_msg_level2 in wl_dbg.h + */ +#define WL_DPT_VAL 0x00000001 +/* re-using WL_DPT_VAL */ +/* re-using WL_MESH_VAL */ +#define WL_NATOE_VAL 0x00000001 +#define WL_MESH_VAL 0x00000001 +#define WL_SCAN_VAL 0x00000002 +#define WL_WOWL_VAL 0x00000004 +#define WL_COEX_VAL 0x00000008 +#define WL_RTDC_VAL 0x00000010 +#define WL_PROTO_VAL 0x00000020 +#define WL_SWDIV_VAL 0x00000040 +#define WL_CHANINT_VAL 0x00000080 +#define WL_WMF_VAL 0x00000100 +#define WL_P2P_VAL 0x00000200 +#define WL_ITFR_VAL 0x00000400 +#define WL_MCHAN_VAL 0x00000800 +#define WL_TDLS_VAL 0x00001000 +#define WL_MCNX_VAL 0x00002000 +#define WL_PROT_VAL 0x00004000 +#define WL_PSTA_VAL 0x00008000 +#define WL_TSO_VAL 0x00010000 +#define WL_TRF_MGMT_VAL 0x00020000 +#define WL_LPC_VAL 0x00040000 +#define WL_L2FILTER_VAL 0x00080000 +#define WL_TXBF_VAL 0x00100000 +#define WL_P2PO_VAL 0x00200000 +#define WL_TBTT_VAL 0x00400000 +#define WL_FBT_VAL 0x00800000 +#define WL_RRM_VAL 0x00800000 /* reuse */ +#define WL_MQ_VAL 0x01000000 +/* This level is currently used in Phoenix2 only */ +#define WL_SRSCAN_VAL 0x02000000 +#define WL_WNM_VAL 0x04000000 +/* re-using WL_WNM_VAL for MBO */ +#define WL_MBO_VAL 0x04000000 +/* re-using WL_SRSCAN_VAL */ +#define WL_RANDMAC_VAL 0x02000000 +#define WL_UNUSED_VAL 0x10000000 /* Was a duplicate for WL_LPC_VAL. Removed */ +#define WL_NET_DETECT_VAL 0x20000000 +#define WL_OCE_VAL 0x20000000 /* reuse */ +#define WL_PCIE_VAL 0x40000000 +#define WL_PMDUR_VAL 0x80000000 +/* use top-bit for WL_TIME_STAMP_VAL because this is a modifier + * rather than a message-type of its own + */ +#define WL_TIMESTAMP_VAL 0x80000000 + +/* wl_msg_level2 is full. For new bits take the next one and AND with + * wl_msg_level3 in wl_dbg.h + */ +#define WL_ASSOC_AP_VAL 0x00000001 +#define WL_FILS_VAL 0x00000002 + +/* max # of leds supported by GPIO (gpio pin# == led index#) */ +#define WL_LED_NUMGPIO 32 /* gpio 0-31 */ + +/* led per-pin behaviors */ +#define WL_LED_OFF 0 /* always off */ +#define WL_LED_ON 1 /* always on */ +#define WL_LED_ACTIVITY 2 /* activity */ +#define WL_LED_RADIO 3 /* radio enabled */ +#define WL_LED_ARADIO 4 /* 5 Ghz radio enabled */ +#define WL_LED_BRADIO 5 /* 2.4Ghz radio enabled */ +#define WL_LED_BGMODE 6 /* on if gmode, off if bmode */ +#define WL_LED_WI1 7 +#define WL_LED_WI2 8 +#define WL_LED_WI3 9 +#define WL_LED_ASSOC 10 /* associated state indicator */ +#define WL_LED_INACTIVE 11 /* null behavior (clears default behavior) */ +#define WL_LED_ASSOCACT 12 /* on when associated; blink fast for activity */ +#define WL_LED_WI4 13 +#define WL_LED_WI5 14 +#define WL_LED_BLINKSLOW 15 /* blink slow */ +#define WL_LED_BLINKMED 16 /* blink med */ +#define WL_LED_BLINKFAST 17 /* blink fast */ +#define WL_LED_BLINKCUSTOM 18 /* blink custom */ +#define WL_LED_BLINKPERIODIC 19 /* blink periodic (custom 1000ms / off 400ms) */ +#define WL_LED_ASSOC_WITH_SEC 20 /* when connected with security */ + /* keep on for 300 sec */ +#define WL_LED_START_OFF 21 /* off upon boot, could be turned on later */ +#define WL_LED_WI6 22 +#define WL_LED_WI7 23 +#define WL_LED_WI8 24 +#define WL_LED_NUMBEHAVIOR 25 + +/* led behavior numeric value format */ +#define WL_LED_BEH_MASK 0x3f /* behavior mask */ +#define WL_LED_PMU_OVERRIDE 0x40 /* need to set PMU Override bit for the GPIO */ +#define WL_LED_AL_MASK 0x80 /* activelow (polarity) bit */ + +/* number of bytes needed to define a proper bit mask for MAC event reporting */ +#define BCMIO_ROUNDUP(x, y) ((((x) + ((y) - 1)) / (y)) * (y)) +#define BCMIO_NBBY 8 +#define WL_EVENTING_MASK_LEN (16+4) + +#define WL_EVENTING_MASK_EXT_LEN \ + MAX(WL_EVENTING_MASK_LEN, (ROUNDUP(WLC_E_LAST, NBBY)/NBBY)) + +/* join preference types */ +#define WL_JOIN_PREF_RSSI 1 /* by RSSI */ +#define WL_JOIN_PREF_WPA 2 /* by akm and ciphers */ +#define WL_JOIN_PREF_BAND 3 /* by 802.11 band */ +#define WL_JOIN_PREF_RSSI_DELTA 4 /* by 802.11 band only if RSSI delta condition matches */ +#define WL_JOIN_PREF_TRANS_PREF 5 /* defined by requesting AP */ + +/* band preference */ +#define WLJP_BAND_ASSOC_PREF 255 /* use what WLC_SET_ASSOC_PREFER ioctl specifies */ + +/* any multicast cipher suite */ +#define WL_WPA_ACP_MCS_ANY "\x00\x00\x00\x00" + +/* 802.11h measurement types */ +#define WLC_MEASURE_TPC 1 +#define WLC_MEASURE_CHANNEL_BASIC 2 +#define WLC_MEASURE_CHANNEL_CCA 3 +#define WLC_MEASURE_CHANNEL_RPI 4 + +/* regulatory enforcement levels */ +#define SPECT_MNGMT_OFF 0 /* both 11h and 11d disabled */ +#define SPECT_MNGMT_LOOSE_11H 1 /* allow non-11h APs in scan lists */ +#define SPECT_MNGMT_STRICT_11H 2 /* prune out non-11h APs from scan list */ +#define SPECT_MNGMT_STRICT_11D 3 /* switch to 802.11D mode */ +/* SPECT_MNGMT_LOOSE_11H_D - same as SPECT_MNGMT_LOOSE with the exception that Country IE + * adoption is done regardless of capability spectrum_management + */ +#define SPECT_MNGMT_LOOSE_11H_D 4 /* operation defined above */ + +/* bit position in per_chan_info; these depend on current country/regulatory domain */ +#define WL_CHAN_VALID_HW (1u << 0) /* valid with current HW */ +#define WL_CHAN_VALID_SW (1u << 1) /* valid with current country setting */ +#define WL_CHAN_BAND_5G (1u << 2) /* 5GHz-band channel */ +#define WL_CHAN_RADAR (1u << 3) /* radar sensitive channel */ +#define WL_CHAN_INACTIVE (1u << 4) /* temporarily inactive due to radar */ +#define WL_CHAN_PASSIVE (1u << 5) /* channel is in passive mode */ +#define WL_CHAN_RESTRICTED (1u << 6) /* restricted use channel */ +#define WL_CHAN_RADAR_EU_WEATHER (1u << 7) /* EU Radar weather channel. + * Implies an EU Radar channel. + */ +#define WL_CHAN_CLM_RESTRICTED (1u << 8) /* channel restricted in CLM (i.e. by default) */ +#define WL_CHAN_BAND_6G (1u << 9) /* 6GHz-band channel */ +#define WL_CHAN_OOS_SHIFT 24u /* shift for OOS field */ +#define WL_CHAN_OOS_MASK 0xFF000000u /* field specifying minutes remaining for this + * channel's out-of-service period due to radar + * detection + */ + +/* BTC mode used by "btc_mode" iovar */ +#define WL_BTC_DISABLE 0 /* disable BT coexistence */ +#define WL_BTC_FULLTDM 1 /* full TDM COEX */ +#define WL_BTC_ENABLE 1 /* full TDM COEX to maintain backward compatiblity */ +#define WL_BTC_PREMPT 2 /* full TDM COEX with preemption */ +#define WL_BTC_LITE 3 /* light weight coex for large isolation platform */ +#define WL_BTC_PARALLEL 4 /* BT and WLAN run in parallel with separate antenna */ +#define WL_BTC_HYBRID 5 /* hybrid coex, only ack is allowed to transmit in BT slot */ +#define WL_BTC_DEFAULT 8 /* set the default mode for the device */ +#define WL_INF_BTC_DISABLE 0 +#define WL_INF_BTC_ENABLE 1 +#define WL_INF_BTC_AUTO 3 + +/* BTC wire used by "btc_wire" iovar */ +#define WL_BTC_DEFWIRE 0 /* use default wire setting */ +#define WL_BTC_2WIRE 2 /* use 2-wire BTC */ +#define WL_BTC_3WIRE 3 /* use 3-wire BTC */ +#define WL_BTC_4WIRE 4 /* use 4-wire BTC */ + +/* BTC flags: BTC configuration that can be set by host */ +#define WL_BTC_FLAG_PREMPT (1 << 0) +#define WL_BTC_FLAG_BT_DEF (1 << 1) +#define WL_BTC_FLAG_ACTIVE_PROT (1 << 2) +#define WL_BTC_FLAG_SIM_RSP (1 << 3) +#define WL_BTC_FLAG_PS_PROTECT (1 << 4) +#define WL_BTC_FLAG_SIM_TX_LP (1 << 5) +#define WL_BTC_FLAG_ECI (1 << 6) +#define WL_BTC_FLAG_LIGHT (1 << 7) +#define WL_BTC_FLAG_PARALLEL (1 << 8) + +/* maximum channels returned by the get valid channels iovar */ +#define WL_NUMCHANNELS 64 + +/* max number of chanspecs (used by the iovar to calc. buf space) */ +#ifdef WL11AC_80P80 +#define WL_NUMCHANSPECS 206 +#else +#define WL_NUMCHANSPECS 110 +#endif // endif + +/* WDS link local endpoint WPA role */ +#define WL_WDS_WPA_ROLE_AUTH 0 /* authenticator */ +#define WL_WDS_WPA_ROLE_SUP 1 /* supplicant */ +#define WL_WDS_WPA_ROLE_AUTO 255 /* auto, based on mac addr value */ + +/* Base offset values */ +#define WL_PKT_FILTER_BASE_PKT 0 +#define WL_PKT_FILTER_BASE_END 1 +#define WL_PKT_FILTER_BASE_D11_H 2 /* May be removed */ +#define WL_PKT_FILTER_BASE_D11_D 3 /* May be removed */ +#define WL_PKT_FILTER_BASE_ETH_H 4 +#define WL_PKT_FILTER_BASE_ETH_D 5 +#define WL_PKT_FILTER_BASE_ARP_H 6 +#define WL_PKT_FILTER_BASE_ARP_D 7 /* May be removed */ +#define WL_PKT_FILTER_BASE_IP4_H 8 +#define WL_PKT_FILTER_BASE_IP4_D 9 +#define WL_PKT_FILTER_BASE_IP6_H 10 +#define WL_PKT_FILTER_BASE_IP6_D 11 +#define WL_PKT_FILTER_BASE_TCP_H 12 +#define WL_PKT_FILTER_BASE_TCP_D 13 /* May be removed */ +#define WL_PKT_FILTER_BASE_UDP_H 14 +#define WL_PKT_FILTER_BASE_UDP_D 15 +#define WL_PKT_FILTER_BASE_IP6_P 16 +#define WL_PKT_FILTER_BASE_COUNT 17 /* May be removed */ + +/* String mapping for bases that may be used by applications or debug */ +#define WL_PKT_FILTER_BASE_NAMES \ + { "START", WL_PKT_FILTER_BASE_PKT }, \ + { "END", WL_PKT_FILTER_BASE_END }, \ + { "ETH_H", WL_PKT_FILTER_BASE_ETH_H }, \ + { "ETH_D", WL_PKT_FILTER_BASE_ETH_D }, \ + { "D11_H", WL_PKT_FILTER_BASE_D11_H }, \ + { "D11_D", WL_PKT_FILTER_BASE_D11_D }, \ + { "ARP_H", WL_PKT_FILTER_BASE_ARP_H }, \ + { "IP4_H", WL_PKT_FILTER_BASE_IP4_H }, \ + { "IP4_D", WL_PKT_FILTER_BASE_IP4_D }, \ + { "IP6_H", WL_PKT_FILTER_BASE_IP6_H }, \ + { "IP6_D", WL_PKT_FILTER_BASE_IP6_D }, \ + { "IP6_P", WL_PKT_FILTER_BASE_IP6_P }, \ + { "TCP_H", WL_PKT_FILTER_BASE_TCP_H }, \ + { "TCP_D", WL_PKT_FILTER_BASE_TCP_D }, \ + { "UDP_H", WL_PKT_FILTER_BASE_UDP_H }, \ + { "UDP_D", WL_PKT_FILTER_BASE_UDP_D } + +/* Flags for a pattern list element */ +#define WL_PKT_FILTER_MFLAG_NEG 0x0001 + +/* + * Packet engine interface + */ + +#define WL_PKTENG_PER_TX_START 0x01 +#define WL_PKTENG_PER_TX_STOP 0x02 +#define WL_PKTENG_PER_RX_START 0x04 +#define WL_PKTENG_PER_RX_WITH_ACK_START 0x05 +#define WL_PKTENG_PER_TX_WITH_ACK_START 0x06 +#define WL_PKTENG_PER_RX_STOP 0x08 +#define WL_PKTENG_PER_RU_TX_START 0x09 +#define WL_PKTENG_PER_TRIG_TX_START 0x0a +#define WL_PKTENG_PER_MASK 0xff + +#define WL_PKTENG_SYNCHRONOUS 0x100 /* synchronous flag */ +#define WL_PKTENG_SYNCHRONOUS_UNBLK 0x200 /* synchronous unblock flag */ +#define WL_PKTENG_COLLECT 0x400 /* Save last Rx'ed packet */ +#ifdef PKTENG_LONGPKTSZ +/* max pktsz limit for pkteng */ +#define WL_PKTENG_MAXPKTSZ PKTENG_LONGPKTSZ +#else +#define WL_PKTENG_MAXPKTSZ 16384 +#endif // endif + +#define NUM_80211b_RATES 4 +#define NUM_80211ag_RATES 8 +#define NUM_80211n_RATES 32 +#define NUM_80211_RATES (NUM_80211b_RATES+NUM_80211ag_RATES+NUM_80211n_RATES) + +/* + * WOWL capability/override settings + */ +#define WL_WOWL_MAGIC (1 << 0) /* Wakeup on Magic packet */ +#define WL_WOWL_NET (1 << 1) /* Wakeup on Netpattern */ +#define WL_WOWL_DIS (1 << 2) /* Wakeup on loss-of-link due to Disassoc/Deauth */ +#define WL_WOWL_RETR (1 << 3) /* Wakeup on retrograde TSF */ +#define WL_WOWL_BCN (1 << 4) /* Wakeup on loss of beacon */ +#define WL_WOWL_TST (1 << 5) /* Wakeup after test */ +#define WL_WOWL_M1 (1 << 6) /* Wakeup after PTK refresh */ +#define WL_WOWL_EAPID (1 << 7) /* Wakeup after receipt of EAP-Identity Req */ +#define WL_WOWL_PME_GPIO (1 << 8) /* Wakeind via PME(0) or GPIO(1) */ +#define WL_WOWL_ULP_BAILOUT (1 << 8) /* wakeind via unknown pkt by basic ULP-offloads - + * WL_WOWL_ULP_BAILOUT - same as WL_WOWL_PME_GPIO used only for DONGLE BUILDS + */ +#define WL_WOWL_NEEDTKIP1 (1 << 9) /* need tkip phase 1 key to be updated by the driver */ +#define WL_WOWL_GTK_FAILURE (1 << 10) /* enable wakeup if GTK fails */ +#define WL_WOWL_EXTMAGPAT (1 << 11) /* support extended magic packets */ +#define WL_WOWL_ARPOFFLOAD (1 << 12) /* support ARP/NS/keepalive offloading */ +#define WL_WOWL_WPA2 (1 << 13) /* read protocol version for EAPOL frames */ +#define WL_WOWL_KEYROT (1 << 14) /* If the bit is set, use key rotaton */ +#define WL_WOWL_BCAST (1 << 15) /* If the bit is set, frm received was bcast frame */ +#define WL_WOWL_SCANOL (1 << 16) /* If the bit is set, scan offload is enabled */ +#define WL_WOWL_TCPKEEP_TIME (1 << 17) /* Wakeup on tcpkeep alive timeout */ +#define WL_WOWL_MDNS_CONFLICT (1 << 18) /* Wakeup on mDNS Conflict Resolution */ +#define WL_WOWL_MDNS_SERVICE (1 << 19) /* Wakeup on mDNS Service Connect */ +#define WL_WOWL_TCPKEEP_DATA (1 << 20) /* tcp keepalive got data */ +#define WL_WOWL_FW_HALT (1 << 21) /* Firmware died in wowl mode */ +#define WL_WOWL_ENAB_HWRADIO (1 << 22) /* Enable detection of radio button changes */ +#define WL_WOWL_MIC_FAIL (1 << 23) /* Offloads detected MIC failure(s) */ +#define WL_WOWL_UNASSOC (1 << 24) /* Wakeup in Unassociated state (Net/Magic Pattern) */ +#define WL_WOWL_SECURE (1 << 25) /* Wakeup if received matched secured pattern */ +#define WL_WOWL_EXCESS_WAKE (1 << 26) /* Excess wake */ +#define WL_WOWL_LINKDOWN (1 << 31) /* Link Down indication in WoWL mode */ + +#define WL_WOWL_TCPKEEP (1 << 20) /* temp copy to satisfy automerger */ +#define MAGIC_PKT_MINLEN 102 /* Magic pkt min length is 6 * 0xFF + 16 * ETHER_ADDR_LEN */ + +#define WOWL_PATTEN_TYPE_ARP (1 << 0) /* ARP offload Pattern */ +#define WOWL_PATTEN_TYPE_NA (1 << 1) /* NA offload Pattern */ + +#define MAGIC_PKT_MINLEN 102 /* Magic pkt min length is 6 * 0xFF + 16 * ETHER_ADDR_LEN */ +#define MAGIC_PKT_NUM_MAC_ADDRS 16 + +/* Overlap BSS Scan parameters default, minimum, maximum */ +#define WLC_OBSS_SCAN_PASSIVE_DWELL_DEFAULT 20 /* unit TU */ +#define WLC_OBSS_SCAN_PASSIVE_DWELL_MIN 5 /* unit TU */ +#define WLC_OBSS_SCAN_PASSIVE_DWELL_MAX 1000 /* unit TU */ +#define WLC_OBSS_SCAN_ACTIVE_DWELL_DEFAULT 10 /* unit TU */ +#define WLC_OBSS_SCAN_ACTIVE_DWELL_MIN 10 /* unit TU */ +#define WLC_OBSS_SCAN_ACTIVE_DWELL_MAX 1000 /* unit TU */ +#define WLC_OBSS_SCAN_WIDTHSCAN_INTERVAL_DEFAULT 300 /* unit Sec */ +#define WLC_OBSS_SCAN_WIDTHSCAN_INTERVAL_MIN 10 /* unit Sec */ +#define WLC_OBSS_SCAN_WIDTHSCAN_INTERVAL_MAX 900 /* unit Sec */ +#define WLC_OBSS_SCAN_CHANWIDTH_TRANSITION_DLY_DEFAULT 5 +#define WLC_OBSS_SCAN_CHANWIDTH_TRANSITION_DLY_MIN 5 +#define WLC_OBSS_SCAN_CHANWIDTH_TRANSITION_DLY_MAX 100 +#define WLC_OBSS_SCAN_PASSIVE_TOTAL_PER_CHANNEL_DEFAULT 200 /* unit TU */ +#define WLC_OBSS_SCAN_PASSIVE_TOTAL_PER_CHANNEL_MIN 200 /* unit TU */ +#define WLC_OBSS_SCAN_PASSIVE_TOTAL_PER_CHANNEL_MAX 10000 /* unit TU */ +#define WLC_OBSS_SCAN_ACTIVE_TOTAL_PER_CHANNEL_DEFAULT 20 /* unit TU */ +#define WLC_OBSS_SCAN_ACTIVE_TOTAL_PER_CHANNEL_MIN 20 /* unit TU */ +#define WLC_OBSS_SCAN_ACTIVE_TOTAL_PER_CHANNEL_MAX 10000 /* unit TU */ +#define WLC_OBSS_SCAN_ACTIVITY_THRESHOLD_DEFAULT 25 /* unit percent */ +#define WLC_OBSS_SCAN_ACTIVITY_THRESHOLD_MIN 0 /* unit percent */ +#define WLC_OBSS_SCAN_ACTIVITY_THRESHOLD_MAX 100 /* unit percent */ + +#define WL_MIN_NUM_OBSS_SCAN_ARG 7 /* minimum number of arguments required for OBSS Scan */ + +#define WL_COEX_INFO_MASK 0x07 +#define WL_COEX_INFO_REQ 0x01 +#define WL_COEX_40MHZ_INTOLERANT 0x02 +#define WL_COEX_WIDTH20 0x04 + +#define WLC_RSSI_INVALID 0 /* invalid RSSI value */ + +#define MAX_RSSI_LEVELS 8 + +/* **** EXTLOG **** */ +#define EXTLOG_CUR_VER 0x0100 + +#define MAX_ARGSTR_LEN 18 /* At least big enough for storing ETHER_ADDR_STR_LEN */ + +/* log modules (bitmap) */ +#define LOG_MODULE_COMMON 0x0001 +#define LOG_MODULE_ASSOC 0x0002 +#define LOG_MODULE_EVENT 0x0004 +#define LOG_MODULE_MAX 3 /* Update when adding module */ + +/* log levels */ +#define WL_LOG_LEVEL_DISABLE 0 +#define WL_LOG_LEVEL_ERR 1 +#define WL_LOG_LEVEL_WARN 2 +#define WL_LOG_LEVEL_INFO 3 +#define WL_LOG_LEVEL_MAX WL_LOG_LEVEL_INFO /* Update when adding level */ + +/* flag */ +#define LOG_FLAG_EVENT 1 + +/* log arg_type */ +#define LOG_ARGTYPE_NULL 0 +#define LOG_ARGTYPE_STR 1 /* %s */ +#define LOG_ARGTYPE_INT 2 /* %d */ +#define LOG_ARGTYPE_INT_STR 3 /* %d...%s */ +#define LOG_ARGTYPE_STR_INT 4 /* %s...%d */ + +/* 802.11 Mgmt Packet flags */ +#define VNDR_IE_BEACON_FLAG 0x1 +#define VNDR_IE_PRBRSP_FLAG 0x2 +#define VNDR_IE_ASSOCRSP_FLAG 0x4 +#define VNDR_IE_AUTHRSP_FLAG 0x8 +#define VNDR_IE_PRBREQ_FLAG 0x10 +#define VNDR_IE_ASSOCREQ_FLAG 0x20 +#define VNDR_IE_IWAPID_FLAG 0x40 /* vendor IE in IW advertisement protocol ID field */ +#define VNDR_IE_AUTHREQ_FLAG 0x80 +#define VNDR_IE_CUSTOM_FLAG 0x100 /* allow custom IE id */ +#define VNDR_IE_DISASSOC_FLAG 0x200 + +#if defined(WLP2P) +/* P2P Action Frames flags (spec ordered) */ +#define VNDR_IE_GONREQ_FLAG 0x001000 +#define VNDR_IE_GONRSP_FLAG 0x002000 +#define VNDR_IE_GONCFM_FLAG 0x004000 +#define VNDR_IE_INVREQ_FLAG 0x008000 +#define VNDR_IE_INVRSP_FLAG 0x010000 +#define VNDR_IE_DISREQ_FLAG 0x020000 +#define VNDR_IE_DISRSP_FLAG 0x040000 +#define VNDR_IE_PRDREQ_FLAG 0x080000 +#define VNDR_IE_PRDRSP_FLAG 0x100000 + +#define VNDR_IE_P2PAF_SHIFT 12 +#endif /* WLP2P */ + +/* channel interference measurement (chanim) related defines */ + +/* chanim mode */ +#define CHANIM_DISABLE 0 /* disabled */ +#define CHANIM_DETECT 1 /* detection only */ +#define CHANIM_EXT 2 /* external state machine */ +#define CHANIM_ACT 3 /* full internal state machine, detect + act */ +#define CHANIM_MODE_MAX 4 + +/* define for apcs reason code */ +#define APCS_INIT 0 +#define APCS_IOCTL 1 +#define APCS_CHANIM 2 +#define APCS_CSTIMER 3 +#define APCS_TXDLY 5 +#define APCS_NONACSD 6 +#define APCS_DFS_REENTRY 7 +#define APCS_TXFAIL 8 +#define APCS_MAX 9 + +/* number of ACS record entries */ +#define CHANIM_ACS_RECORD 10 + +/* CHANIM */ +#define CCASTATS_TXDUR 0 +#define CCASTATS_INBSS 1 +#define CCASTATS_OBSS 2 +#define CCASTATS_NOCTG 3 +#define CCASTATS_NOPKT 4 +#define CCASTATS_DOZE 5 +#define CCASTATS_TXOP 6 +#define CCASTATS_GDTXDUR 7 +#define CCASTATS_BDTXDUR 8 + +#ifndef WLCHANIM_V2 +#define CCASTATS_MAX 9 +#else /* WLCHANIM_V2 */ +#define CCASTATS_MYRX 9 +#define CCASTATS_MAX 10 +#endif /* WLCHANIM_V2 */ + +#define WL_CHANIM_COUNT_ALL 0xff +#define WL_CHANIM_COUNT_ONE 0x1 + +/* ap tpc modes */ +#define AP_TPC_OFF 0 +#define AP_TPC_BSS_PWR 1 /* BSS power control */ +#define AP_TPC_AP_PWR 2 /* AP power control */ +#define AP_TPC_AP_BSS_PWR 3 /* Both AP and BSS power control */ +#define AP_TPC_MAX_LINK_MARGIN 127 + +/* ap tpc modes */ +#define AP_TPC_OFF 0 +#define AP_TPC_BSS_PWR 1 /* BSS power control */ +#define AP_TPC_AP_PWR 2 /* AP power control */ +#define AP_TPC_AP_BSS_PWR 3 /* Both AP and BSS power control */ +#define AP_TPC_MAX_LINK_MARGIN 127 + +/* state */ +#define WL_P2P_DISC_ST_SCAN 0 +#define WL_P2P_DISC_ST_LISTEN 1 +#define WL_P2P_DISC_ST_SEARCH 2 + +/* i/f type */ +#define WL_P2P_IF_CLIENT 0 +#define WL_P2P_IF_GO 1 +#define WL_P2P_IF_DYNBCN_GO 2 +#define WL_P2P_IF_DEV 3 + +/* p2p GO configuration */ +#define WL_P2P_ENABLE_CONF 1 /* configure */ +#define WL_P2P_DISABLE_CONF 0 /* un-configure */ + +/* count */ +#define WL_P2P_SCHED_RSVD 0 +#define WL_P2P_SCHED_REPEAT 255 /* anything > 255 will be treated as 255 */ + +#define WL_P2P_SCHED_FIXED_LEN 3 + +/* schedule type */ +#define WL_P2P_SCHED_TYPE_ABS 0 /* Scheduled Absence */ +#define WL_P2P_SCHED_TYPE_REQ_ABS 1 /* Requested Absence */ + +/* schedule action during absence periods (for WL_P2P_SCHED_ABS type) */ +#define WL_P2P_SCHED_ACTION_NONE 0 /* no action */ +#define WL_P2P_SCHED_ACTION_DOZE 1 /* doze */ +/* schedule option - WL_P2P_SCHED_TYPE_REQ_ABS */ +#define WL_P2P_SCHED_ACTION_GOOFF 2 /* turn off GO beacon/prbrsp functions */ +/* schedule option - WL_P2P_SCHED_TYPE_XXX */ +#define WL_P2P_SCHED_ACTION_RESET 255 /* reset */ + +/* schedule option - WL_P2P_SCHED_TYPE_ABS */ +#define WL_P2P_SCHED_OPTION_NORMAL 0 /* normal start/interval/duration/count */ +#define WL_P2P_SCHED_OPTION_BCNPCT 1 /* percentage of beacon interval */ +/* schedule option - WL_P2P_SCHED_TYPE_REQ_ABS */ +#define WL_P2P_SCHED_OPTION_TSFOFS 2 /* normal start/internal/duration/count with + * start being an offset of the 'current' TSF + */ + +/* feature flags */ +#define WL_P2P_FEAT_GO_CSA (1 << 0) /* GO moves with the STA using CSA method */ +#define WL_P2P_FEAT_GO_NOLEGACY (1 << 1) /* GO does not probe respond to non-p2p probe + * requests + */ +#define WL_P2P_FEAT_RESTRICT_DEV_RESP (1 << 2) /* Restrict p2p dev interface from responding */ + +/* n-mode support capability */ +/* 2x2 includes both 1x1 & 2x2 devices + * reserved #define 2 for future when we want to separate 1x1 & 2x2 and + * control it independently + */ +#define WL_11N_2x2 1 +#define WL_11N_3x3 3 +#define WL_11N_4x4 4 + +/* define 11n feature disable flags */ +#define WLFEATURE_DISABLE_11N 0x00000001 +#define WLFEATURE_DISABLE_11N_STBC_TX 0x00000002 +#define WLFEATURE_DISABLE_11N_STBC_RX 0x00000004 +#define WLFEATURE_DISABLE_11N_SGI_TX 0x00000008 +#define WLFEATURE_DISABLE_11N_SGI_RX 0x00000010 +#define WLFEATURE_DISABLE_11N_AMPDU_TX 0x00000020 +#define WLFEATURE_DISABLE_11N_AMPDU_RX 0x00000040 +#define WLFEATURE_DISABLE_11N_GF 0x00000080 + +/* Proxy STA modes */ +#define PSTA_MODE_DISABLED 0 +#define PSTA_MODE_PROXY 1 +#define PSTA_MODE_REPEATER 2 + +/* op code in nat_cfg */ +#define NAT_OP_ENABLE 1 /* enable NAT on given interface */ +#define NAT_OP_DISABLE 2 /* disable NAT on given interface */ +#define NAT_OP_DISABLE_ALL 3 /* disable NAT on all interfaces */ + +/* NAT state */ +#define NAT_STATE_ENABLED 1 /* NAT is enabled */ +#define NAT_STATE_DISABLED 2 /* NAT is disabled */ + +#define CHANNEL_5G_LOW_START 36 /* 5G low (36..48) CDD enable/disable bit mask */ +#define CHANNEL_5G_MID_START 52 /* 5G mid (52..64) CDD enable/disable bit mask */ +#define CHANNEL_5G_HIGH_START 100 /* 5G high (100..140) CDD enable/disable bit mask */ +#define CHANNEL_5G_UPPER_START 149 /* 5G upper (149..161) CDD enable/disable bit mask */ + +/* D0 Coalescing */ +#define IPV4_ARP_FILTER 0x0001 +#define IPV4_NETBT_FILTER 0x0002 +#define IPV4_LLMNR_FILTER 0x0004 +#define IPV4_SSDP_FILTER 0x0008 +#define IPV4_WSD_FILTER 0x0010 +#define IPV6_NETBT_FILTER 0x0200 +#define IPV6_LLMNR_FILTER 0x0400 +#define IPV6_SSDP_FILTER 0x0800 +#define IPV6_WSD_FILTER 0x1000 + +/* Network Offload Engine */ +#define NWOE_OL_ENABLE 0x00000001 + +/* + * Traffic management structures/defines. + */ + +/* Traffic management bandwidth parameters */ +#define TRF_MGMT_MAX_PRIORITIES 3 + +#define TRF_MGMT_FLAG_ADD_DSCP 0x0001 /* Add DSCP to IP TOS field */ +#define TRF_MGMT_FLAG_DISABLE_SHAPING 0x0002 /* Don't shape traffic */ +#define TRF_MGMT_FLAG_MANAGE_LOCAL_TRAFFIC 0x0008 /* Manage traffic over our local subnet */ +#define TRF_MGMT_FLAG_FILTER_ON_MACADDR 0x0010 /* filter on MAC address */ +#define TRF_MGMT_FLAG_NO_RX 0x0020 /* do not apply fiters to rx packets */ + +#define TRF_FILTER_MAC_ADDR 0x0001 /* L2 filter use dst mac address for filtering */ +#define TRF_FILTER_IP_ADDR 0x0002 /* L3 filter use ip ddress for filtering */ +#define TRF_FILTER_L4 0x0004 /* L4 filter use tcp/udp for filtering */ +#define TRF_FILTER_DWM 0x0008 /* L3 filter use DSCP for filtering */ +#define TRF_FILTER_FAVORED 0x0010 /* Tag the packet FAVORED */ + +/* WNM/NPS subfeatures mask */ +#define WL_WNM_BSSTRANS 0x00000001 +#define WL_WNM_PROXYARP 0x00000002 +#define WL_WNM_MAXIDLE 0x00000004 +#define WL_WNM_TIMBC 0x00000008 +#define WL_WNM_TFS 0x00000010 +#define WL_WNM_SLEEP 0x00000020 +#define WL_WNM_DMS 0x00000040 +#define WL_WNM_FMS 0x00000080 +#define WL_WNM_NOTIF 0x00000100 +#define WL_WNM_WBTEXT 0x00000200 +#define WL_WNM_ESTM 0x00000400 +#define WL_WNM_MAX 0x00000800 +#ifdef WLWNM_BRCM +#define BRCM_WNM_FEATURE_SET\ + (WL_WNM_PROXYARP | \ + WL_WNM_SLEEP | \ + WL_WNM_FMS | \ + WL_WNM_TFS | \ + WL_WNM_TIMBC | \ + WL_WNM_BSSTRANS | \ + WL_WNM_DMS | \ + WL_WNM_NOTIF | \ + 0) +#endif /* WLWNM_BRCM */ +#ifndef ETHER_MAX_DATA +#define ETHER_MAX_DATA 1500 +#endif /* ETHER_MAX_DATA */ + +/* Different discovery modes for dpt */ +#define DPT_DISCOVERY_MANUAL 0x01 /* manual discovery mode */ +#define DPT_DISCOVERY_AUTO 0x02 /* auto discovery mode */ +#define DPT_DISCOVERY_SCAN 0x04 /* scan-based discovery mode */ + +/* different path selection values */ +#define DPT_PATHSEL_AUTO 0 /* auto mode for path selection */ +#define DPT_PATHSEL_DIRECT 1 /* always use direct DPT path */ +#define DPT_PATHSEL_APPATH 2 /* always use AP path */ + +/* different ops for deny list */ +#define DPT_DENY_LIST_ADD 1 /* add to dpt deny list */ +#define DPT_DENY_LIST_REMOVE 2 /* remove from dpt deny list */ + +/* different ops for manual end point */ +#define DPT_MANUAL_EP_CREATE 1 /* create manual dpt endpoint */ +#define DPT_MANUAL_EP_MODIFY 2 /* modify manual dpt endpoint */ +#define DPT_MANUAL_EP_DELETE 3 /* delete manual dpt endpoint */ + +/* flags to indicate DPT status */ +#define DPT_STATUS_ACTIVE 0x01 /* link active (though may be suspended) */ +#define DPT_STATUS_AES 0x02 /* link secured through AES encryption */ +#define DPT_STATUS_FAILED 0x04 /* DPT link failed */ + +#ifdef WLTDLS +/* different ops for manual end point */ +#define TDLS_MANUAL_EP_CREATE 1 /* create manual dpt endpoint */ +#define TDLS_MANUAL_EP_MODIFY 2 /* modify manual dpt endpoint */ +#define TDLS_MANUAL_EP_DELETE 3 /* delete manual dpt endpoint */ +#define TDLS_MANUAL_EP_PM 4 /* put dpt endpoint in PM mode */ +#define TDLS_MANUAL_EP_WAKE 5 /* wake up dpt endpoint from PM */ +#define TDLS_MANUAL_EP_DISCOVERY 6 /* discover if endpoint is TDLS capable */ +#define TDLS_MANUAL_EP_CHSW 7 /* channel switch */ +#define TDLS_MANUAL_EP_WFD_TPQ 8 /* WiFi-Display Tunneled Probe reQuest */ + +/* modes */ +#define TDLS_WFD_IE_TX 0 +#define TDLS_WFD_IE_RX 1 +#define TDLS_WFD_PROBE_IE_TX 2 +#define TDLS_WFD_PROBE_IE_RX 3 +#endif /* WLTDLS */ + +/* define for flag */ +#define TSPEC_PENDING 0 /* TSPEC pending */ +#define TSPEC_ACCEPTED 1 /* TSPEC accepted */ +#define TSPEC_REJECTED 2 /* TSPEC rejected */ +#define TSPEC_UNKNOWN 3 /* TSPEC unknown */ +#define TSPEC_STATUS_MASK 7 /* TSPEC status mask */ + +#ifdef BCMCCX +/* "wlan_reason" iovar interface */ +#define WL_WLAN_ASSOC_REASON_NORMAL_NETWORK 0 /* normal WLAN network setup */ +#define WL_WLAN_ASSOC_REASON_ROAM_FROM_CELLULAR_NETWORK 1 /* roam from Cellular network */ +#define WL_WLAN_ASSOC_REASON_ROAM_FROM_LAN 2 /* roam from LAN */ +#define WL_WLAN_ASSOC_REASON_MAX 2 /* largest value allowed */ +#endif /* BCMCCX */ + +/* Software feature flag defines used by wlfeatureflag */ +#ifdef WLAFTERBURNER +#define WL_SWFL_ABBFL 0x0001 /* Allow Afterburner on systems w/o hardware BFL */ +#define WL_SWFL_ABENCORE 0x0002 /* Allow AB on non-4318E chips */ +#endif /* WLAFTERBURNER */ +#define WL_SWFL_NOHWRADIO 0x0004 +#define WL_SWFL_FLOWCONTROL 0x0008 /* Enable backpressure to OS stack */ +#define WL_SWFL_WLBSSSORT 0x0010 /* Per-port supports sorting of BSS */ + +#define WL_LIFETIME_MAX 0xFFFF /* Max value in ms */ + +#define CSA_BROADCAST_ACTION_FRAME 0 /* csa broadcast action frame */ +#define CSA_UNICAST_ACTION_FRAME 1 /* csa unicast action frame */ + +/* Roaming trigger definitions for WLC_SET_ROAM_TRIGGER. + * + * (-100 < value < 0) value is used directly as a roaming trigger in dBm + * (0 <= value) value specifies a logical roaming trigger level from + * the list below + * + * WLC_GET_ROAM_TRIGGER always returns roaming trigger value in dBm, never + * the logical roam trigger value. + */ +#define WLC_ROAM_TRIGGER_DEFAULT 0 /* default roaming trigger */ +#define WLC_ROAM_TRIGGER_BANDWIDTH 1 /* optimize for bandwidth roaming trigger */ +#define WLC_ROAM_TRIGGER_DISTANCE 2 /* optimize for distance roaming trigger */ +#define WLC_ROAM_TRIGGER_AUTO 3 /* auto-detect environment */ +#define WLC_ROAM_TRIGGER_MAX_VALUE 3 /* max. valid value */ + +#define WLC_ROAM_NEVER_ROAM_TRIGGER (-100) /* Avoid Roaming by setting a large value */ + +/* Preferred Network Offload (PNO, formerly PFN) defines */ +#define WPA_AUTH_PFN_ANY 0xffffffff /* for PFN, match only ssid */ + +#define SORT_CRITERIA_BIT 0 +#define AUTO_NET_SWITCH_BIT 1 +#define ENABLE_BKGRD_SCAN_BIT 2 +#define IMMEDIATE_SCAN_BIT 3 +#define AUTO_CONNECT_BIT 4 +#define ENABLE_BD_SCAN_BIT 5 +#define ENABLE_ADAPTSCAN_BIT 6 +#define IMMEDIATE_EVENT_BIT 8 +#define SUPPRESS_SSID_BIT 9 +#define ENABLE_NET_OFFLOAD_BIT 10 +/* report found/lost events for SSID and BSSID networks seperately */ +#define REPORT_SEPERATELY_BIT 11 +#define BESTN_BSSID_ONLY_BIT 12 + +#define SORT_CRITERIA_MASK 0x0001 +#define AUTO_NET_SWITCH_MASK 0x0002 +#define ENABLE_BKGRD_SCAN_MASK 0x0004 +#define IMMEDIATE_SCAN_MASK 0x0008 +#define AUTO_CONNECT_MASK 0x0010 + +#define ENABLE_BD_SCAN_MASK 0x0020 +#define ENABLE_ADAPTSCAN_MASK 0x00c0 +#define IMMEDIATE_EVENT_MASK 0x0100 +#define SUPPRESS_SSID_MASK 0x0200 +#define ENABLE_NET_OFFLOAD_MASK 0x0400 +/* report found/lost events for SSID and BSSID networks seperately */ +#define REPORT_SEPERATELY_MASK 0x0800 +#define BESTN_BSSID_ONLY_MASK 0x1000 + +#define PFN_VERSION 2 +#ifdef PFN_SCANRESULT_2 +#define PFN_SCANRESULT_VERSION 2 +#else +#define PFN_SCANRESULT_VERSION 1 +#endif /* PFN_SCANRESULT_2 */ +#ifndef MAX_PFN_LIST_COUNT +#define MAX_PFN_LIST_COUNT 16 +#endif /* MAX_PFN_LIST_COUNT */ + +#define PFN_COMPLETE 1 +#define PFN_INCOMPLETE 0 + +#define DEFAULT_BESTN 2 +#define DEFAULT_MSCAN 0 +#define DEFAULT_REPEAT 10 +#define DEFAULT_EXP 2 + +#define PFN_PARTIAL_SCAN_BIT 0 +#define PFN_PARTIAL_SCAN_MASK 1 + +#define WL_PFN_SUPPRESSFOUND_MASK 0x08 +#define WL_PFN_SUPPRESSLOST_MASK 0x10 +#define WL_PFN_SSID_A_BAND_TRIG 0x20 +#define WL_PFN_SSID_BG_BAND_TRIG 0x40 +#define WL_PFN_SSID_IMPRECISE_MATCH 0x80 +#define WL_PFN_SSID_SAME_NETWORK 0x10000 +#define WL_PFN_SUPPRESS_AGING_MASK 0x20000 +#define WL_PFN_FLUSH_ALL_SSIDS 0x40000 +#define WL_PFN_RSSI_MASK 0xff00 +#define WL_PFN_RSSI_SHIFT 8 + +#define WL_PFN_REPORT_ALLNET 0 +#define WL_PFN_REPORT_SSIDNET 1 +#define WL_PFN_REPORT_BSSIDNET 2 + +#define WL_PFN_CFG_FLAGS_PROHIBITED 0x00000001 /* Accept and use prohibited channels */ +#define WL_PFN_CFG_FLAGS_HISTORY_OFF 0x00000002 /* Scan history suppressed */ + +#define WL_PFN_HIDDEN_BIT 2 +#define PNO_SCAN_MAX_FW 508*1000 /* max time scan time in msec */ +#define PNO_SCAN_MAX_FW_SEC PNO_SCAN_MAX_FW/1000 /* max time scan time in SEC */ +#define PNO_SCAN_MIN_FW_SEC 10 /* min time scan time in SEC */ +#define WL_PFN_HIDDEN_MASK 0x4 +#define MAX_SSID_WHITELIST_NUM 4 +#define MAX_BSSID_PREF_LIST_NUM 32 +#define MAX_BSSID_BLACKLIST_NUM 32 + +#ifndef BESTN_MAX +#define BESTN_MAX 10 +#endif // endif + +#ifndef MSCAN_MAX +#define MSCAN_MAX 32 +#endif // endif + +/* TCP Checksum Offload error injection for testing */ +#define TOE_ERRTEST_TX_CSUM 0x00000001 +#define TOE_ERRTEST_RX_CSUM 0x00000002 +#define TOE_ERRTEST_RX_CSUM2 0x00000004 + +/* ARP Offload feature flags for arp_ol iovar */ +#define ARP_OL_AGENT 0x00000001 +#define ARP_OL_SNOOP 0x00000002 +#define ARP_OL_HOST_AUTO_REPLY 0x00000004 +#define ARP_OL_PEER_AUTO_REPLY 0x00000008 + +/* ARP Offload error injection */ +#define ARP_ERRTEST_REPLY_PEER 0x1 +#define ARP_ERRTEST_REPLY_HOST 0x2 + +#define ARP_MULTIHOMING_MAX 8 /* Maximum local host IP addresses */ +#if defined(WL_PKT_FLTR_EXT) && !defined(WL_PKT_FLTR_EXT_DISABLED) +#define ND_MULTIHOMING_MAX 32 /* Maximum local host IP addresses */ +#else +#define ND_MULTIHOMING_MAX 10 /* Maximum local host IP addresses */ +#endif /* WL_PKT_FLTR_EXT && !WL_PKT_FLTR_EXT_DISABLED */ +#define ND_REQUEST_MAX 5 /* Max set of offload params */ +/* AOAC wake event flag */ +#define WAKE_EVENT_NLO_DISCOVERY_BIT 1 +#define WAKE_EVENT_AP_ASSOCIATION_LOST_BIT 2 +#define WAKE_EVENT_GTK_HANDSHAKE_ERROR_BIT 4 +#define WAKE_EVENT_4WAY_HANDSHAKE_REQUEST_BIT 8 +#define WAKE_EVENT_NET_PACKET_BIT 0x10 + +#define MAX_NUM_WOL_PATTERN 22 /* LOGO requirements min 22 */ + +/* Packet filter operation mode */ +/* True: 1; False: 0 */ +#define PKT_FILTER_MODE_FORWARD_ON_MATCH 1 +/* Enable and disable pkt_filter as a whole */ +#define PKT_FILTER_MODE_DISABLE 2 +/* Cache first matched rx pkt(be queried by host later) */ +#define PKT_FILTER_MODE_PKT_CACHE_ON_MATCH 4 +/* If pkt_filter is enabled and no filter is set, don't forward anything */ +#define PKT_FILTER_MODE_PKT_FORWARD_OFF_DEFAULT 8 + +#ifdef DONGLEOVERLAYS +#define OVERLAY_IDX_MASK 0x000000ff +#define OVERLAY_IDX_SHIFT 0 +#define OVERLAY_FLAGS_MASK 0xffffff00 +#define OVERLAY_FLAGS_SHIFT 8 +/* overlay written to device memory immediately after loading the base image */ +#define OVERLAY_FLAG_POSTLOAD 0x100 +/* defer overlay download until the device responds w/WLC_E_OVL_DOWNLOAD event */ +#define OVERLAY_FLAG_DEFER_DL 0x200 +/* overlay downloaded prior to the host going to sleep */ +#define OVERLAY_FLAG_PRESLEEP 0x400 +#define OVERLAY_DOWNLOAD_CHUNKSIZE 1024 +#endif /* DONGLEOVERLAYS */ + +/* reuse two number in the sc/rc space */ +#define SMFS_CODE_MALFORMED 0xFFFE +#define SMFS_CODE_IGNORED 0xFFFD + +/* RFAWARE def */ +#define BCM_ACTION_RFAWARE 0x77 +#define BCM_ACTION_RFAWARE_DCS 0x01 + +/* DCS reason code define */ +#define BCM_DCS_IOVAR 0x1 +#define BCM_DCS_UNKNOWN 0xFF + +#ifdef PROP_TXSTATUS +/* Bit definitions for tlv iovar */ +/* + * enable RSSI signals: + * WLFC_CTL_TYPE_RSSI + */ +#define WLFC_FLAGS_RSSI_SIGNALS 0x0001 + +/* enable (if/mac_open, if/mac_close,, mac_add, mac_del) signals: + * + * WLFC_CTL_TYPE_MAC_OPEN + * WLFC_CTL_TYPE_MAC_CLOSE + * + * WLFC_CTL_TYPE_INTERFACE_OPEN + * WLFC_CTL_TYPE_INTERFACE_CLOSE + * + * WLFC_CTL_TYPE_MACDESC_ADD + * WLFC_CTL_TYPE_MACDESC_DEL + * + */ +#define WLFC_FLAGS_XONXOFF_SIGNALS 0x0002 + +/* enable (status, fifo_credit, mac_credit) signals + * WLFC_CTL_TYPE_MAC_REQUEST_CREDIT + * WLFC_CTL_TYPE_TXSTATUS + * WLFC_CTL_TYPE_FIFO_CREDITBACK + */ +#define WLFC_FLAGS_CREDIT_STATUS_SIGNALS 0x0004 + +#define WLFC_FLAGS_HOST_PROPTXSTATUS_ACTIVE 0x0008 +#define WLFC_FLAGS_PSQ_GENERATIONFSM_ENABLE 0x0010 +#define WLFC_FLAGS_PSQ_ZERO_BUFFER_ENABLE 0x0020 +#define WLFC_FLAGS_HOST_RXRERODER_ACTIVE 0x0040 +#define WLFC_FLAGS_PKT_STAMP_SIGNALS 0x0080 + +#endif /* PROP_TXSTATUS */ + +#define WL_TIMBC_STATUS_AP_UNKNOWN 255 /* AP status for internal use only */ + +#define WL_DFRTS_LOGIC_OFF 0 /* Feature is disabled */ +#define WL_DFRTS_LOGIC_OR 1 /* OR all non-zero threshold conditions */ +#define WL_DFRTS_LOGIC_AND 2 /* AND all non-zero threshold conditions */ + +/* Definitions for Reliable Multicast */ +#define WL_RELMCAST_MAX_CLIENT 32 +#define WL_RELMCAST_FLAG_INBLACKLIST 1 +#define WL_RELMCAST_FLAG_ACTIVEACKER 2 +#define WL_RELMCAST_FLAG_RELMCAST 4 + +/* structures for proximity detection device role */ +#define WL_PROXD_MODE_DISABLE 0 +#define WL_PROXD_MODE_NEUTRAL 1 +#define WL_PROXD_MODE_INITIATOR 2 +#define WL_PROXD_MODE_TARGET 3 +#define WL_PROXD_RANDOM_WAKEUP 0x8000 + +#ifdef NET_DETECT +#define NET_DETECT_MAX_WAKE_DATA_SIZE 2048 +#define NET_DETECT_MAX_PROFILES 16 +#define NET_DETECT_MAX_CHANNELS 50 +#endif /* NET_DETECT */ + +/* Bit masks for radio disabled status - returned by WL_GET_RADIO */ +#define WL_RADIO_SW_DISABLE (1<<0) +#define WL_RADIO_HW_DISABLE (1<<1) +#define WL_RADIO_MPC_DISABLE (1<<2) +#define WL_RADIO_COUNTRY_DISABLE (1<<3) /* some countries don't support any channel */ +#define WL_RADIO_PERCORE_DISABLE (1<<4) /* Radio diable per core for DVT */ +#define WL_RADIO_TSYNC_PWRSAVE_DISABLE (1<<5) /* Disable Radio in tsync mode for power saving */ + +#define WL_SPURAVOID_OFF 0 +#define WL_SPURAVOID_ON1 1 +#define WL_SPURAVOID_ON2 2 + +#define WL_4335_SPURAVOID_ON1 1 +#define WL_4335_SPURAVOID_ON2 2 +#define WL_4335_SPURAVOID_ON3 3 +#define WL_4335_SPURAVOID_ON4 4 +#define WL_4335_SPURAVOID_ON5 5 +#define WL_4335_SPURAVOID_ON6 6 +#define WL_4335_SPURAVOID_ON7 7 +#define WL_4335_SPURAVOID_ON8 8 +#define WL_4335_SPURAVOID_ON9 9 + +/* Override bit for WLC_SET_TXPWR. if set, ignore other level limits */ +#define WL_TXPWR_OVERRIDE (1U<<31) +#define WL_TXPWR_2G (1U<<30) +#define WL_TXPWR_5G (1U<<29) +#define WL_TXPWR_NEG (1U<<28) + +#define WL_TXPWR_MASK (~(0x7<<29)) +#define WL_TXPWR_CORE_MAX (3) +#define WL_TXPWR_CORE0_MASK (0x000000FF) +#define WL_TXPWR_CORE0_SHIFT (0) +#define WL_TXPWR_CORE1_MASK (0x0000FF00) +#define WL_TXPWR_CORE1_SHIFT (8) +#define WL_TXPWR_CORE2_MASK (0x00FF0000) +#define WL_TXPWR_CORE2_SHIFT (16) + +/* phy types (returned by WLC_GET_PHYTPE) */ +#define WLC_PHY_TYPE_A 0 +#define WLC_PHY_TYPE_B 1 +#define WLC_PHY_TYPE_G 2 +#define WLC_PHY_TYPE_N 4 +#define WLC_PHY_TYPE_LP 5 +#define WLC_PHY_TYPE_SSN 6 +#define WLC_PHY_TYPE_HT 7 +#define WLC_PHY_TYPE_LCN 8 +#define WLC_PHY_TYPE_LCN40 10 +#define WLC_PHY_TYPE_AC 11 +#define WLC_PHY_TYPE_LCN20 12 +#define WLC_PHY_TYPE_NULL 0xf + +/* Values for PM */ +#define PM_OFF 0 +#define PM_MAX 1 +#define PM_FAST 2 +#define PM_FORCE_OFF 3 /* use this bit to force PM off even bt is active */ + +#define WL_WME_CNT_VERSION 1 /* current version of wl_wme_cnt_t */ + +/* fbt_cap: FBT assoc / reassoc modes. */ +#define WLC_FBT_CAP_DRV_4WAY_AND_REASSOC 1 /* Driver 4-way handshake & reassoc (WLFBT). */ + +/* monitor_promisc_level bits */ +#define WL_MONPROMISC_PROMISC 0x0001 +#define WL_MONPROMISC_CTRL 0x0002 +#define WL_MONPROMISC_FCS 0x0004 + +/* TCP Checksum Offload defines */ +#define TOE_TX_CSUM_OL 0x00000001 +#define TOE_RX_CSUM_OL 0x00000002 + +/* Wi-Fi Display Services (WFDS) */ +#define WL_P2P_SOCIAL_CHANNELS_MAX WL_NUMCHANNELS +#define MAX_WFDS_SEEK_SVC 4 /* Max # of wfds services to seek */ +#define MAX_WFDS_ADVERT_SVC 4 /* Max # of wfds services to advertise */ +#define MAX_WFDS_SVC_NAME_LEN 200 /* maximum service_name length */ +#define MAX_WFDS_ADV_SVC_INFO_LEN 65000 /* maximum adv service_info length */ +#define P2P_WFDS_HASH_LEN 6 /* Length of a WFDS service hash */ +#define MAX_WFDS_SEEK_SVC_INFO_LEN 255 /* maximum seek service_info req length */ +#define MAX_WFDS_SEEK_SVC_NAME_LEN 200 /* maximum service_name length */ + +/* ap_isolate bitmaps */ +#define AP_ISOLATE_DISABLED 0x0 +#define AP_ISOLATE_SENDUP_ALL 0x01 +#define AP_ISOLATE_SENDUP_MCAST 0x02 + +/* Type values for the wl_pwrstats_t data field */ +#define WL_PWRSTATS_TYPE_PHY 0 /**< struct wl_pwr_phy_stats */ +#define WL_PWRSTATS_TYPE_SCAN 1 /**< struct wl_pwr_scan_stats */ +#define WL_PWRSTATS_TYPE_USB_HSIC 2 /**< struct wl_pwr_usb_hsic_stats */ +#define WL_PWRSTATS_TYPE_PM_AWAKE1 3 /**< struct wl_pwr_pm_awake_stats_v1 */ +#define WL_PWRSTATS_TYPE_CONNECTION 4 /* struct wl_pwr_connect_stats; assoc and key-exch time */ +#define WL_PWRSTATS_TYPE_PCIE 6 /**< struct wl_pwr_pcie_stats */ +#define WL_PWRSTATS_TYPE_PM_AWAKE2 7 /**< struct wl_pwr_pm_awake_stats_v2 */ +#define WL_PWRSTATS_TYPE_SDIO 8 /* struct wl_pwr_sdio_stats */ +#define WL_PWRSTATS_TYPE_MIMO_PS_METRICS 9 /* struct wl_mimo_meas_metrics_t */ +#define WL_PWRSTATS_TYPE_SLICE_INDEX 10 /* slice index for which this report is meant for */ +#define WL_PWRSTATS_TYPE_TSYNC 11 /**< struct wl_pwr_tsync_stats */ +#define WL_PWRSTATS_TYPE_OPS_STATS 12 /* struct wl_pwr_ops_stats_t */ +#define WL_PWRSTATS_TYPE_BCNTRIM_STATS 13 /* struct wl_pwr_bcntrim_stats_t */ +#define WL_PWRSTATS_TYPE_SLICE_INDEX_BAND_INFO 14 /* wl_pwr_slice_index_band_t */ +#define WL_PWRSTATS_TYPE_PSBW_STATS 15 /* struct wl_pwr_psbw_stats_t */ + +/* IOV AWD DATA */ +#define AWD_DATA_JOIN_INFO 0 +#define AWD_DATA_VERSION_V1 1 + +/* IOV ETD DATA */ +#define ETD_DATA_JOIN_INFO 0 +#define ETD_DATA_VERSION_V1 1 + +#endif /* wlioctl_defs_h */ diff --git a/bcmdhd.100.10.315.x/include/wlioctl_utils.h b/bcmdhd.100.10.315.x/include/wlioctl_utils.h new file mode 100644 index 0000000..d553af5 --- /dev/null +++ b/bcmdhd.100.10.315.x/include/wlioctl_utils.h @@ -0,0 +1,61 @@ +/* + * Custom OID/ioctl related helper functions. + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * <> + * + * $Id: wlioctl_utils.h 626207 2016-03-19 17:39:14Z $ + */ + +#ifndef _wlioctl_utils_h_ +#define _wlioctl_utils_h_ + +#include + +#ifndef BCMDRIVER +#define CCA_THRESH_MILLI 14 +#define CCA_THRESH_INTERFERE 6 + +extern cca_congest_channel_req_t * cca_per_chan_summary(cca_congest_channel_req_t *input, + cca_congest_channel_req_t *avg, bool percent); + +extern int cca_analyze(cca_congest_channel_req_t *input[], int num_chans, + uint flags, chanspec_t *answer); +#endif /* BCMDRIVER */ + +extern int wl_cntbuf_to_xtlv_format(void *ctx, void *cntbuf, + int buflen, uint32 corerev); + +extern const char * wl_get_reinit_rc_name(int rc); + +/* Get data pointer of wlc layer counters tuple from xtlv formatted counters IOVar buffer. */ +#define GET_WLCCNT_FROM_CNTBUF(cntbuf) (const wl_cnt_wlc_t*) \ + bcm_get_data_from_xtlv_buf(((const wl_cnt_info_t *)cntbuf)->data, \ + ((const wl_cnt_info_t *)cntbuf)->datalen, WL_CNT_XTLV_WLC, \ + NULL, BCM_XTLV_OPTION_ALIGN32) + +#define CHK_CNTBUF_DATALEN(cntbuf, ioctl_buflen) do { \ + if (((wl_cnt_info_t *)cntbuf)->datalen + \ + OFFSETOF(wl_cnt_info_t, data) > ioctl_buflen) \ + printf("%s: IOVAR buffer short!\n", __FUNCTION__); \ +} while (0) + +#endif /* _wlioctl_utils_h_ */ diff --git a/bcmdhd.100.10.315.x/include/wpa.h b/bcmdhd.100.10.315.x/include/wpa.h new file mode 100644 index 0000000..ba35495 --- /dev/null +++ b/bcmdhd.100.10.315.x/include/wpa.h @@ -0,0 +1,290 @@ +/* + * Fundamental types and constants relating to WPA + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: wpa.h 761317 2018-05-07 21:33:58Z $ + */ + +#ifndef _proto_wpa_h_ +#define _proto_wpa_h_ + +#include +#include + +/* This marks the start of a packed structure section. */ +#include + +/* Reason Codes */ + +/* 13 through 23 taken from IEEE Std 802.11i-2004 */ +#define DOT11_RC_INVALID_WPA_IE 13 /* Invalid info. element */ +#define DOT11_RC_MIC_FAILURE 14 /* Michael failure */ +#define DOT11_RC_4WH_TIMEOUT 15 /* 4-way handshake timeout */ +#define DOT11_RC_GTK_UPDATE_TIMEOUT 16 /* Group key update timeout */ +#define DOT11_RC_WPA_IE_MISMATCH 17 /* WPA IE in 4-way handshake differs from + * (re-)assoc. request/probe response + */ +#define DOT11_RC_INVALID_MC_CIPHER 18 /* Invalid multicast cipher */ +#define DOT11_RC_INVALID_UC_CIPHER 19 /* Invalid unicast cipher */ +#define DOT11_RC_INVALID_AKMP 20 /* Invalid authenticated key management protocol */ +#define DOT11_RC_BAD_WPA_VERSION 21 /* Unsupported WPA version */ +#define DOT11_RC_INVALID_WPA_CAP 22 /* Invalid WPA IE capabilities */ +#define DOT11_RC_8021X_AUTH_FAIL 23 /* 802.1X authentication failure */ + +#define WPA2_PMKID_LEN 16 + +/* WPA IE fixed portion */ +typedef BWL_PRE_PACKED_STRUCT struct +{ + uint8 tag; /* TAG */ + uint8 length; /* TAG length */ + uint8 oui[3]; /* IE OUI */ + uint8 oui_type; /* OUI type */ + BWL_PRE_PACKED_STRUCT struct { + uint8 low; + uint8 high; + } BWL_POST_PACKED_STRUCT version; /* IE version */ +} BWL_POST_PACKED_STRUCT wpa_ie_fixed_t; +#define WPA_IE_OUITYPE_LEN 4 +#define WPA_IE_FIXED_LEN 8 +#define WPA_IE_TAG_FIXED_LEN 6 + +#define BIP_OUI_TYPE WPA2_OUI "\x06" + +typedef BWL_PRE_PACKED_STRUCT struct { + uint8 tag; /* TAG */ + uint8 length; /* TAG length */ + BWL_PRE_PACKED_STRUCT struct { + uint8 low; + uint8 high; + } BWL_POST_PACKED_STRUCT version; /* IE version */ +} BWL_POST_PACKED_STRUCT wpa_rsn_ie_fixed_t; +#define WPA_RSN_IE_FIXED_LEN 4 +#define WPA_RSN_IE_TAG_FIXED_LEN 2 +typedef uint8 wpa_pmkid_t[WPA2_PMKID_LEN]; + +#define WFA_OSEN_IE_FIXED_LEN 6 + +/* WPA suite/multicast suite */ +typedef BWL_PRE_PACKED_STRUCT struct +{ + uint8 oui[3]; + uint8 type; +} BWL_POST_PACKED_STRUCT wpa_suite_t, wpa_suite_mcast_t; +#define WPA_SUITE_LEN 4 + +/* WPA unicast suite list/key management suite list */ +typedef BWL_PRE_PACKED_STRUCT struct +{ + BWL_PRE_PACKED_STRUCT struct { + uint8 low; + uint8 high; + } BWL_POST_PACKED_STRUCT count; + wpa_suite_t list[1]; +} BWL_POST_PACKED_STRUCT wpa_suite_ucast_t, wpa_suite_auth_key_mgmt_t; +#define WPA_IE_SUITE_COUNT_LEN 2 +typedef BWL_PRE_PACKED_STRUCT struct +{ + BWL_PRE_PACKED_STRUCT struct { + uint8 low; + uint8 high; + } BWL_POST_PACKED_STRUCT count; + wpa_pmkid_t list[1]; +} BWL_POST_PACKED_STRUCT wpa_pmkid_list_t; + +/* WPA cipher suites */ +#define WPA_CIPHER_NONE 0 /* None */ +#define WPA_CIPHER_WEP_40 1 /* WEP (40-bit) */ +#define WPA_CIPHER_TKIP 2 /* TKIP: default for WPA */ +#define WPA_CIPHER_AES_OCB 3 /* AES (OCB) */ +#define WPA_CIPHER_AES_CCM 4 /* AES (CCM) */ +#define WPA_CIPHER_WEP_104 5 /* WEP (104-bit) */ +#define WPA_CIPHER_BIP 6 /* WEP (104-bit) */ +#define WPA_CIPHER_TPK 7 /* Group addressed traffic not allowed */ +#ifdef BCMCCX +#define WPA_CIPHER_CKIP 8 /* KP with no MIC */ +#define WPA_CIPHER_CKIP_MMH 9 /* KP with MIC ("CKIP/MMH", "CKIP+CMIC") */ +#define WPA_CIPHER_WEP_MMH 10 /* MIC with no KP ("WEP/MMH", "CMIC") */ + +#define IS_CCX_CIPHER(cipher) ((cipher) == WPA_CIPHER_CKIP || \ + (cipher) == WPA_CIPHER_CKIP_MMH || \ + (cipher) == WPA_CIPHER_WEP_MMH) +#endif /* BCMCCX */ + +#define WPA_CIPHER_AES_GCM 8 /* AES (GCM) */ +#define WPA_CIPHER_AES_GCM256 9 /* AES (GCM256) */ +#define WPA_CIPHER_CCMP_256 10 /* CCMP-256 */ +#define WPA_CIPHER_BIP_GMAC_128 11 /* BIP_GMAC_128 */ +#define WPA_CIPHER_BIP_GMAC_256 12 /* BIP_GMAC_256 */ +#define WPA_CIPHER_BIP_CMAC_256 13 /* BIP_CMAC_256 */ + +#ifdef BCMWAPI_WAI +#define WAPI_CIPHER_NONE WPA_CIPHER_NONE +#define WAPI_CIPHER_SMS4 11 + +#define WAPI_CSE_WPI_SMS4 1 +#endif /* BCMWAPI_WAI */ + +#define IS_WPA_CIPHER(cipher) ((cipher) == WPA_CIPHER_NONE || \ + (cipher) == WPA_CIPHER_WEP_40 || \ + (cipher) == WPA_CIPHER_WEP_104 || \ + (cipher) == WPA_CIPHER_TKIP || \ + (cipher) == WPA_CIPHER_AES_OCB || \ + (cipher) == WPA_CIPHER_AES_CCM || \ + (cipher) == WPA_CIPHER_AES_GCM || \ + (cipher) == WPA_CIPHER_AES_GCM256 || \ + (cipher) == WPA_CIPHER_TPK) + +#ifdef BCMWAPI_WAI +#define IS_WAPI_CIPHER(cipher) ((cipher) == WAPI_CIPHER_NONE || \ + (cipher) == WAPI_CSE_WPI_SMS4) + +/* convert WAPI_CSE_WPI_XXX to WAPI_CIPHER_XXX */ +#define WAPI_CSE_WPI_2_CIPHER(cse) ((cse) == WAPI_CSE_WPI_SMS4 ? \ + WAPI_CIPHER_SMS4 : WAPI_CIPHER_NONE) + +#define WAPI_CIPHER_2_CSE_WPI(cipher) ((cipher) == WAPI_CIPHER_SMS4 ? \ + WAPI_CSE_WPI_SMS4 : WAPI_CIPHER_NONE) +#endif /* BCMWAPI_WAI */ + +#define IS_VALID_AKM(akm) ((akm) == RSN_AKM_NONE || \ + (akm) == RSN_AKM_UNSPECIFIED || \ + (akm) == RSN_AKM_PSK || \ + (akm) == RSN_AKM_FBT_1X || \ + (akm) == RSN_AKM_FBT_PSK || \ + (akm) == RSN_AKM_MFP_1X || \ + (akm) == RSN_AKM_MFP_PSK || \ + (akm) == RSN_AKM_SHA256_1X || \ + (akm) == RSN_AKM_SHA256_PSK || \ + (akm) == RSN_AKM_TPK || \ + (akm) == RSN_AKM_SAE_PSK || \ + (akm) == RSN_AKM_SAE_FBT || \ + (akm) == RSN_AKM_FILS_SHA256 || \ + (akm) == RSN_AKM_FILS_SHA384 || \ + (akm) == RSN_AKM_OWE || \ + (akm) == RSN_AKM_SUITEB_SHA256_1X || \ + (akm) == RSN_AKM_SUITEB_SHA384_1X) + +#define IS_VALID_BIP_CIPHER(cipher) ((cipher) == WPA_CIPHER_BIP || \ + (cipher) == WPA_CIPHER_BIP_GMAC_128 || \ + (cipher) == WPA_CIPHER_BIP_GMAC_256 || \ + (cipher) == WPA_CIPHER_BIP_CMAC_256) +/* WPA TKIP countermeasures parameters */ +#define WPA_TKIP_CM_DETECT 60 /* multiple MIC failure window (seconds) */ +#define WPA_TKIP_CM_BLOCK 60 /* countermeasures active window (seconds) */ + +/* RSN IE defines */ +#define RSN_CAP_LEN 2 /* Length of RSN capabilities field (2 octets) */ + +/* RSN Capabilities defined in 802.11i */ +#define RSN_CAP_PREAUTH 0x0001 +#define RSN_CAP_NOPAIRWISE 0x0002 +#define RSN_CAP_PTK_REPLAY_CNTR_MASK 0x000C +#define RSN_CAP_PTK_REPLAY_CNTR_SHIFT 2 +#define RSN_CAP_GTK_REPLAY_CNTR_MASK 0x0030 +#define RSN_CAP_GTK_REPLAY_CNTR_SHIFT 4 +#define RSN_CAP_1_REPLAY_CNTR 0 +#define RSN_CAP_2_REPLAY_CNTRS 1 +#define RSN_CAP_4_REPLAY_CNTRS 2 +#define RSN_CAP_16_REPLAY_CNTRS 3 +#define RSN_CAP_MFPR 0x0040 +#define RSN_CAP_MFPC 0x0080 +#define RSN_CAP_SPPC 0x0400 +#define RSN_CAP_SPPR 0x0800 + +/* WPA capabilities defined in 802.11i */ +#define WPA_CAP_4_REPLAY_CNTRS RSN_CAP_4_REPLAY_CNTRS +#define WPA_CAP_16_REPLAY_CNTRS RSN_CAP_16_REPLAY_CNTRS +#define WPA_CAP_REPLAY_CNTR_SHIFT RSN_CAP_PTK_REPLAY_CNTR_SHIFT +#define WPA_CAP_REPLAY_CNTR_MASK RSN_CAP_PTK_REPLAY_CNTR_MASK + +/* WPA capabilities defined in 802.11zD9.0 */ +#define WPA_CAP_PEER_KEY_ENABLE (0x1 << 1) /* bit 9 */ + +/* WPA Specific defines */ +#define WPA_CAP_LEN RSN_CAP_LEN /* Length of RSN capabilities in RSN IE (2 octets) */ +#define WPA_PMKID_CNT_LEN 2 /* Length of RSN PMKID count (2 octests) */ + +#define WPA_CAP_WPA2_PREAUTH RSN_CAP_PREAUTH + +#define WPA2_PMKID_COUNT_LEN 2 + +/* RSN dev type in rsn_info struct */ +typedef enum { + DEV_NONE = 0, + DEV_STA = 1, + DEV_AP = 2 +} device_type_t; + +typedef uint32 rsn_akm_mask_t; /* RSN_AKM_... see 802.11.h */ +typedef uint8 rsn_cipher_t; /* WPA_CIPHER_xxx */ +typedef uint32 rsn_ciphers_t; /* mask of rsn_cipher_t */ +typedef uint8 rsn_akm_t; +typedef uint8 auth_ie_type_mask_t; + +typedef struct rsn_ie_info { + uint8 version; + rsn_cipher_t g_cipher; + uint8 p_count; + uint8 akm_count; + uint8 pmkid_count; + rsn_akm_t sta_akm; /* single STA akm */ + uint16 caps; + rsn_ciphers_t p_ciphers; + rsn_akm_mask_t akms; + uint8 pmkids_offset; /* offset into the IE */ + rsn_cipher_t g_mgmt_cipher; + device_type_t dev_type; /* AP or STA */ + rsn_cipher_t sta_cipher; /* single STA cipher */ + uint16 key_desc; /* key descriptor version as STA */ + int parse_status; + uint16 mic_len; /* unused. keep for ROM compatibility. */ + auth_ie_type_mask_t auth_ie_type; /* bit field of WPA, WPA2 and (not yet) CCX WAPI */ + uint8 pmk_len; /* EAPOL PMK */ + uint8 kck_mic_len; /* EAPOL MIC (by KCK) */ + uint8 kck_len; /* EAPOL KCK */ + uint8 kek_len; /* EAPOL KEK */ + uint8 tk_len; /* EAPOL TK */ + uint8 ptk_len; /* EAPOL PTK */ +} rsn_ie_info_t; + +#ifdef BCMWAPI_WAI +#define WAPI_CAP_PREAUTH RSN_CAP_PREAUTH + +/* Other WAI definition */ +#define WAPI_WAI_REQUEST 0x00F1 +#define WAPI_UNICAST_REKEY 0x00F2 +#define WAPI_STA_AGING 0x00F3 +#define WAPI_MUTIL_REKEY 0x00F4 +#define WAPI_STA_STATS 0x00F5 + +#define WAPI_USK_REKEY_COUNT 0x4000000 /* 0xA00000 */ +#define WAPI_MSK_REKEY_COUNT 0x4000000 /* 0xA00000 */ +#endif /* BCMWAPI_WAI */ + +/* This marks the end of a packed structure section. */ +#include + +#endif /* _proto_wpa_h_ */ diff --git a/bcmdhd.100.10.315.x/include/wps.h b/bcmdhd.100.10.315.x/include/wps.h new file mode 100644 index 0000000..7684de0 --- /dev/null +++ b/bcmdhd.100.10.315.x/include/wps.h @@ -0,0 +1,385 @@ +/* + * WPS IE definitions + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id$ + */ + +#ifndef _WPS_ +#define _WPS_ + +#ifdef __cplusplus +extern "C" { +#endif // endif + +/* Data Element Definitions */ +#define WPS_ID_AP_CHANNEL 0x1001 +#define WPS_ID_ASSOC_STATE 0x1002 +#define WPS_ID_AUTH_TYPE 0x1003 +#define WPS_ID_AUTH_TYPE_FLAGS 0x1004 +#define WPS_ID_AUTHENTICATOR 0x1005 +#define WPS_ID_CONFIG_METHODS 0x1008 +#define WPS_ID_CONFIG_ERROR 0x1009 +#define WPS_ID_CONF_URL4 0x100A +#define WPS_ID_CONF_URL6 0x100B +#define WPS_ID_CONN_TYPE 0x100C +#define WPS_ID_CONN_TYPE_FLAGS 0x100D +#define WPS_ID_CREDENTIAL 0x100E +#define WPS_ID_DEVICE_NAME 0x1011 +#define WPS_ID_DEVICE_PWD_ID 0x1012 +#define WPS_ID_E_HASH1 0x1014 +#define WPS_ID_E_HASH2 0x1015 +#define WPS_ID_E_SNONCE1 0x1016 +#define WPS_ID_E_SNONCE2 0x1017 +#define WPS_ID_ENCR_SETTINGS 0x1018 +#define WPS_ID_ENCR_TYPE 0x100F +#define WPS_ID_ENCR_TYPE_FLAGS 0x1010 +#define WPS_ID_ENROLLEE_NONCE 0x101A +#define WPS_ID_FEATURE_ID 0x101B +#define WPS_ID_IDENTITY 0x101C +#define WPS_ID_IDENTITY_PROOF 0x101D +#define WPS_ID_KEY_WRAP_AUTH 0x101E +#define WPS_ID_KEY_IDENTIFIER 0x101F +#define WPS_ID_MAC_ADDR 0x1020 +#define WPS_ID_MANUFACTURER 0x1021 +#define WPS_ID_MSG_TYPE 0x1022 +#define WPS_ID_MODEL_NAME 0x1023 +#define WPS_ID_MODEL_NUMBER 0x1024 +#define WPS_ID_NW_INDEX 0x1026 +#define WPS_ID_NW_KEY 0x1027 +#define WPS_ID_NW_KEY_INDEX 0x1028 +#define WPS_ID_NEW_DEVICE_NAME 0x1029 +#define WPS_ID_NEW_PWD 0x102A +#define WPS_ID_OOB_DEV_PWD 0x102C +#define WPS_ID_OS_VERSION 0x102D +#define WPS_ID_POWER_LEVEL 0x102F +#define WPS_ID_PSK_CURRENT 0x1030 +#define WPS_ID_PSK_MAX 0x1031 +#define WPS_ID_PUBLIC_KEY 0x1032 +#define WPS_ID_RADIO_ENABLED 0x1033 +#define WPS_ID_REBOOT 0x1034 +#define WPS_ID_REGISTRAR_CURRENT 0x1035 +#define WPS_ID_REGISTRAR_ESTBLSHD 0x1036 +#define WPS_ID_REGISTRAR_LIST 0x1037 +#define WPS_ID_REGISTRAR_MAX 0x1038 +#define WPS_ID_REGISTRAR_NONCE 0x1039 +#define WPS_ID_REQ_TYPE 0x103A +#define WPS_ID_RESP_TYPE 0x103B +#define WPS_ID_RF_BAND 0x103C +#define WPS_ID_R_HASH1 0x103D +#define WPS_ID_R_HASH2 0x103E +#define WPS_ID_R_SNONCE1 0x103F +#define WPS_ID_R_SNONCE2 0x1040 +#define WPS_ID_SEL_REGISTRAR 0x1041 +#define WPS_ID_SERIAL_NUM 0x1042 +#define WPS_ID_SC_STATE 0x1044 +#define WPS_ID_SSID 0x1045 +#define WPS_ID_TOT_NETWORKS 0x1046 +#define WPS_ID_UUID_E 0x1047 +#define WPS_ID_UUID_R 0x1048 +#define WPS_ID_VENDOR_EXT 0x1049 +#define WPS_ID_VERSION 0x104A +#define WPS_ID_X509_CERT_REQ 0x104B +#define WPS_ID_X509_CERT 0x104C +#define WPS_ID_EAP_IDENTITY 0x104D +#define WPS_ID_MSG_COUNTER 0x104E +#define WPS_ID_PUBKEY_HASH 0x104F +#define WPS_ID_REKEY_KEY 0x1050 +#define WPS_ID_KEY_LIFETIME 0x1051 +#define WPS_ID_PERM_CFG_METHODS 0x1052 +#define WPS_ID_SEL_REG_CFG_METHODS 0x1053 +#define WPS_ID_PRIM_DEV_TYPE 0x1054 +#define WPS_ID_SEC_DEV_TYPE_LIST 0x1055 +#define WPS_ID_PORTABLE_DEVICE 0x1056 +#define WPS_ID_AP_SETUP_LOCKED 0x1057 +#define WPS_ID_APP_LIST 0x1058 +#define WPS_ID_EAP_TYPE 0x1059 +#define WPS_ID_INIT_VECTOR 0x1060 +#define WPS_ID_KEY_PROVIDED_AUTO 0x1061 +#define WPS_ID_8021X_ENABLED 0x1062 +#define WPS_ID_WEP_TRANSMIT_KEY 0x1064 +#define WPS_ID_REQ_DEV_TYPE 0x106A + +/* WSC 2.0, WFA Vendor Extension Subelements */ +#define WFA_VENDOR_EXT_ID "\x00\x37\x2A" +#define WPS_WFA_SUBID_VERSION2 0x00 +#define WPS_WFA_SUBID_AUTHORIZED_MACS 0x01 +#define WPS_WFA_SUBID_NW_KEY_SHAREABLE 0x02 +#define WPS_WFA_SUBID_REQ_TO_ENROLL 0x03 +#define WPS_WFA_SUBID_SETTINGS_DELAY_TIME 0x04 +#define WPS_WFA_SUBID_REG_CFG_METHODS 0x05 + +/* WCN-NET Windows Rally Vertical Pairing Vendor Extensions */ +#define MS_VENDOR_EXT_ID "\x00\x01\x37" +#define WPS_MS_ID_VPI 0x1001 /* Vertical Pairing Identifier TLV */ +#define WPS_MS_ID_TRANSPORT_UUID 0x1002 /* Transport UUID TLV */ + +/* Vertical Pairing Identifier TLV Definitions */ +#define WPS_MS_VPI_TRANSPORT_NONE 0x00 /* None */ +#define WPS_MS_VPI_TRANSPORT_DPWS 0x01 /* Devices Profile for Web Services */ +#define WPS_MS_VPI_TRANSPORT_UPNP 0x02 /* uPnP */ +#define WPS_MS_VPI_TRANSPORT_SDNWS 0x03 /* Secure Devices Profile for Web Services */ +#define WPS_MS_VPI_NO_PROFILE_REQ 0x00 /* Wi-Fi profile not requested. + * Not supported in Windows 7 + */ +#define WPS_MS_VPI_PROFILE_REQ 0x01 /* Wi-Fi profile requested. */ + +/* sizes of the fixed size elements */ +#define WPS_ID_AP_CHANNEL_S 2 +#define WPS_ID_ASSOC_STATE_S 2 +#define WPS_ID_AUTH_TYPE_S 2 +#define WPS_ID_AUTH_TYPE_FLAGS_S 2 +#define WPS_ID_AUTHENTICATOR_S 8 +#define WPS_ID_CONFIG_METHODS_S 2 +#define WPS_ID_CONFIG_ERROR_S 2 +#define WPS_ID_CONN_TYPE_S 1 +#define WPS_ID_CONN_TYPE_FLAGS_S 1 +#define WPS_ID_DEVICE_PWD_ID_S 2 +#define WPS_ID_ENCR_TYPE_S 2 +#define WPS_ID_ENCR_TYPE_FLAGS_S 2 +#define WPS_ID_FEATURE_ID_S 4 +#define WPS_ID_MAC_ADDR_S 6 +#define WPS_ID_MSG_TYPE_S 1 +#define WPS_ID_SC_STATE_S 1 +#define WPS_ID_RF_BAND_S 1 +#define WPS_ID_OS_VERSION_S 4 +#define WPS_ID_VERSION_S 1 +#define WPS_ID_SEL_REGISTRAR_S 1 +#define WPS_ID_SEL_REG_CFG_METHODS_S 2 +#define WPS_ID_REQ_TYPE_S 1 +#define WPS_ID_RESP_TYPE_S 1 +#define WPS_ID_AP_SETUP_LOCKED_S 1 + +/* WSC 2.0, WFA Vendor Extension Subelements */ +#define WPS_WFA_SUBID_VERSION2_S 1 +#define WPS_WFA_SUBID_NW_KEY_SHAREABLE_S 1 +#define WPS_WFA_SUBID_REQ_TO_ENROLL_S 1 +#define WPS_WFA_SUBID_SETTINGS_DELAY_TIME_S 1 +#define WPS_WFA_SUBID_REG_CFG_METHODS_S 2 + +/* Association states */ +#define WPS_ASSOC_NOT_ASSOCIATED 0 +#define WPS_ASSOC_CONN_SUCCESS 1 +#define WPS_ASSOC_CONFIG_FAIL 2 +#define WPS_ASSOC_ASSOC_FAIL 3 +#define WPS_ASSOC_IP_FAIL 4 + +/* Authentication types */ +#define WPS_AUTHTYPE_OPEN 0x0001 +#define WPS_AUTHTYPE_WPAPSK 0x0002 /* Deprecated in WSC 2.0 */ +#define WPS_AUTHTYPE_SHARED 0x0004 /* Deprecated in WSC 2.0 */ +#define WPS_AUTHTYPE_WPA 0x0008 /* Deprecated in WSC 2.0 */ +#define WPS_AUTHTYPE_WPA2 0x0010 +#define WPS_AUTHTYPE_WPA2PSK 0x0020 + +/* Config methods */ +#define WPS_CONFMET_USBA 0x0001 /* Deprecated in WSC 2.0 */ +#define WPS_CONFMET_ETHERNET 0x0002 /* Deprecated in WSC 2.0 */ +#define WPS_CONFMET_LABEL 0x0004 +#define WPS_CONFMET_DISPLAY 0x0008 +#define WPS_CONFMET_EXT_NFC_TOK 0x0010 +#define WPS_CONFMET_INT_NFC_TOK 0x0020 +#define WPS_CONFMET_NFC_INTF 0x0040 +#define WPS_CONFMET_PBC 0x0080 +#define WPS_CONFMET_KEYPAD 0x0100 +/* WSC 2.0 */ +#define WPS_CONFMET_VIRT_PBC 0x0280 +#define WPS_CONFMET_PHY_PBC 0x0480 +#define WPS_CONFMET_VIRT_DISPLAY 0x2008 +#define WPS_CONFMET_PHY_DISPLAY 0x4008 + +/* WPS error messages */ +#define WPS_ERROR_NO_ERROR 0 +#define WPS_ERROR_OOB_INT_READ_ERR 1 +#define WPS_ERROR_DECRYPT_CRC_FAIL 2 +#define WPS_ERROR_CHAN24_NOT_SUPP 3 +#define WPS_ERROR_CHAN50_NOT_SUPP 4 +#define WPS_ERROR_SIGNAL_WEAK 5 /* Deprecated in WSC 2.0 */ +#define WPS_ERROR_NW_AUTH_FAIL 6 /* Deprecated in WSC 2.0 */ +#define WPS_ERROR_NW_ASSOC_FAIL 7 /* Deprecated in WSC 2.0 */ +#define WPS_ERROR_NO_DHCP_RESP 8 /* Deprecated in WSC 2.0 */ +#define WPS_ERROR_FAILED_DHCP_CONF 9 /* Deprecated in WSC 2.0 */ +#define WPS_ERROR_IP_ADDR_CONFLICT 10 /* Deprecated in WSC 2.0 */ +#define WPS_ERROR_FAIL_CONN_REGISTRAR 11 +#define WPS_ERROR_MULTI_PBC_DETECTED 12 +#define WPS_ERROR_ROGUE_SUSPECTED 13 +#define WPS_ERROR_DEVICE_BUSY 14 +#define WPS_ERROR_SETUP_LOCKED 15 +#define WPS_ERROR_MSG_TIMEOUT 16 /* Deprecated in WSC 2.0 */ +#define WPS_ERROR_REG_SESSION_TIMEOUT 17 /* Deprecated in WSC 2.0 */ +#define WPS_ERROR_DEV_PWD_AUTH_FAIL 18 +#define WPS_ERROR_60GHZ_NOT_SUPPORT 19 +#define WPS_ERROR_PKH_MISMATCH 20 /* Public Key Hash Mismatch */ + +/* Connection types */ +#define WPS_CONNTYPE_ESS 0x01 +#define WPS_CONNTYPE_IBSS 0x02 + +/* Device password ID */ +#define WPS_DEVICEPWDID_DEFAULT 0x0000 +#define WPS_DEVICEPWDID_USER_SPEC 0x0001 +#define WPS_DEVICEPWDID_MACHINE_SPEC 0x0002 +#define WPS_DEVICEPWDID_REKEY 0x0003 +#define WPS_DEVICEPWDID_PUSH_BTN 0x0004 +#define WPS_DEVICEPWDID_REG_SPEC 0x0005 +#define WPS_DEVICEPWDID_IBSS 0x0006 +#define WPS_DEVICEPWDID_NFC_CHO 0x0007 /* NFC-Connection-Handover */ +#define WPS_DEVICEPWDID_WFDS 0x0008 /* Wi-Fi Direct Services Specification */ + +/* Encryption type */ +#define WPS_ENCRTYPE_NONE 0x0001 +#define WPS_ENCRTYPE_WEP 0x0002 /* Deprecated in WSC 2.0 */ +#define WPS_ENCRTYPE_TKIP 0x0004 /* Deprecated in version 2.0. TKIP can only + * be advertised on the AP when Mixed Mode + * is enabled (Encryption Type is 0x000c). + */ +#define WPS_ENCRTYPE_AES 0x0008 + +/* WPS Message Types */ +#define WPS_ID_BEACON 0x01 +#define WPS_ID_PROBE_REQ 0x02 +#define WPS_ID_PROBE_RESP 0x03 +#define WPS_ID_MESSAGE_M1 0x04 +#define WPS_ID_MESSAGE_M2 0x05 +#define WPS_ID_MESSAGE_M2D 0x06 +#define WPS_ID_MESSAGE_M3 0x07 +#define WPS_ID_MESSAGE_M4 0x08 +#define WPS_ID_MESSAGE_M5 0x09 +#define WPS_ID_MESSAGE_M6 0x0A +#define WPS_ID_MESSAGE_M7 0x0B +#define WPS_ID_MESSAGE_M8 0x0C +#define WPS_ID_MESSAGE_ACK 0x0D +#define WPS_ID_MESSAGE_NACK 0x0E +#define WPS_ID_MESSAGE_DONE 0x0F + +/* WSP private ID for local use */ +#define WPS_PRIVATE_ID_IDENTITY (WPS_ID_MESSAGE_DONE + 1) +#define WPS_PRIVATE_ID_WPS_START (WPS_ID_MESSAGE_DONE + 2) +#define WPS_PRIVATE_ID_FAILURE (WPS_ID_MESSAGE_DONE + 3) +#define WPS_PRIVATE_ID_FRAG (WPS_ID_MESSAGE_DONE + 4) +#define WPS_PRIVATE_ID_FRAG_ACK (WPS_ID_MESSAGE_DONE + 5) +#define WPS_PRIVATE_ID_EAPOL_START (WPS_ID_MESSAGE_DONE + 6) + +/* Device Type categories for primary and secondary device types */ +#define WPS_DEVICE_TYPE_CAT_COMPUTER 1 +#define WPS_DEVICE_TYPE_CAT_INPUT_DEVICE 2 +#define WPS_DEVICE_TYPE_CAT_PRINTER 3 +#define WPS_DEVICE_TYPE_CAT_CAMERA 4 +#define WPS_DEVICE_TYPE_CAT_STORAGE 5 +#define WPS_DEVICE_TYPE_CAT_NW_INFRA 6 +#define WPS_DEVICE_TYPE_CAT_DISPLAYS 7 +#define WPS_DEVICE_TYPE_CAT_MM_DEVICES 8 +#define WPS_DEVICE_TYPE_CAT_GAME_DEVICES 9 +#define WPS_DEVICE_TYPE_CAT_TELEPHONE 10 +#define WPS_DEVICE_TYPE_CAT_AUDIO_DEVICES 11 /* WSC 2.0 */ + +/* Device Type sub categories for primary and secondary device types */ +#define WPS_DEVICE_TYPE_SUB_CAT_COMP_PC 1 +#define WPS_DEVICE_TYPE_SUB_CAT_COMP_SERVER 2 +#define WPS_DEVICE_TYPE_SUB_CAT_COMP_MEDIA_CTR 3 +#define WPS_DEVICE_TYPE_SUB_CAT_COMP_UM_PC 4 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_COMP_NOTEBOOK 5 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_COMP_DESKTOP 6 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_COMP_MID 7 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_COMP_NETBOOK 8 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_INP_Keyboard 1 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_INP_MOUSE 2 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_INP_JOYSTICK 3 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_INP_TRACKBALL 4 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_INP_GAM_CTRL 5 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_INP_REMOTE 6 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_INP_TOUCHSCREEN 7 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_INP_BIO_READER 8 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_INP_BAR_READER 9 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_PRTR_PRINTER 1 +#define WPS_DEVICE_TYPE_SUB_CAT_PRTR_SCANNER 2 +#define WPS_DEVICE_TYPE_SUB_CAT_PRTR_FAX 3 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_PRTR_COPIER 4 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_PRTR_ALLINONE 5 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_CAM_DGTL_STILL 1 +#define WPS_DEVICE_TYPE_SUB_CAT_CAM_VIDEO_CAM 2 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_CAM_WEB_CAM 3 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_CAM_SECU_CAM 4 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_STOR_NAS 1 +#define WPS_DEVICE_TYPE_SUB_CAT_NW_AP 1 +#define WPS_DEVICE_TYPE_SUB_CAT_NW_ROUTER 2 +#define WPS_DEVICE_TYPE_SUB_CAT_NW_SWITCH 3 +#define WPS_DEVICE_TYPE_SUB_CAT_NW_GATEWAY 4 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_NW_BRIDGE 5 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_DISP_TV 1 +#define WPS_DEVICE_TYPE_SUB_CAT_DISP_PIC_FRAME 2 +#define WPS_DEVICE_TYPE_SUB_CAT_DISP_PROJECTOR 3 +#define WPS_DEVICE_TYPE_SUB_CAT_DISP_MONITOR 4 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_MM_DAR 1 +#define WPS_DEVICE_TYPE_SUB_CAT_MM_PVR 2 +#define WPS_DEVICE_TYPE_SUB_CAT_MM_MCX 3 +#define WPS_DEVICE_TYPE_SUB_CAT_MM_STB 4 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_MM_MS_ME 5 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_MM_PVP 6 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_GAM_XBOX 1 +#define WPS_DEVICE_TYPE_SUB_CAT_GAM_XBOX_360 2 +#define WPS_DEVICE_TYPE_SUB_CAT_GAM_PS 3 +#define WPS_DEVICE_TYPE_SUB_CAT_GAM_GC 4 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_GAM_PGD 5 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_PHONE_WM 1 +#define WPS_DEVICE_TYPE_SUB_CAT_PHONE_PSM 2 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_PHONE_PDM 3 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_PHONE_SSM 4 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_PHONE_SDM 5 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_AUDIO_TUNER 1 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_AUDIO_SPEAKERS 2 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_AUDIO_PMP 3 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_AUDIO_HEADSET 4 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_AUDIO_HPHONE 5 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_AUDIO_MPHONE 6 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_AUDIO_HTS 7 /* WSC 2.0 */ + +/* Device request/response type */ +#define WPS_MSGTYPE_ENROLLEE_INFO_ONLY 0x00 +#define WPS_MSGTYPE_ENROLLEE_OPEN_8021X 0x01 +#define WPS_MSGTYPE_REGISTRAR 0x02 +#define WPS_MSGTYPE_AP_WLAN_MGR 0x03 + +/* RF Band */ +#define WPS_RFBAND_24GHZ 0x01 +#define WPS_RFBAND_50GHZ 0x02 + +/* Simple Config state */ +#define WPS_SCSTATE_UNCONFIGURED 0x01 +#define WPS_SCSTATE_CONFIGURED 0x02 +#define WPS_SCSTATE_OFF 11 + +/* WPS Vendor extension key */ +#define WPS_OUI_HEADER_LEN 2 +#define WPS_OUI_HEADER_SIZE 4 +#define WPS_OUI_FIXED_HEADER_OFF 16 +#define WPS_WFA_SUBID_V2_OFF 3 +#define WPS_WFA_V2_OFF 5 + +#ifdef __cplusplus +} +#endif // endif + +#endif /* _WPS_ */ diff --git a/bcmdhd.100.10.315.x/linux_osl.c b/bcmdhd.100.10.315.x/linux_osl.c new file mode 100644 index 0000000..6105a2f --- /dev/null +++ b/bcmdhd.100.10.315.x/linux_osl.c @@ -0,0 +1,1903 @@ +/* + * Linux OS Independent Layer + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: linux_osl.c 767848 2018-06-15 09:33:44Z $ + */ + +#define LINUX_PORT + +#include +#include +#include +#include + +#if defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING) +#include +#endif /* __ARM_ARCH_7A__ && !DHD_USE_COHERENT_MEM_FOR_RING */ + +#include + +#include +#include +#include +#include +#include + +#ifdef BCM_SECURE_DMA +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#endif /* BCM_SECURE_DMA */ + +#include + +#if defined(STB) +#include +extern spinlock_t l2x0_reg_lock; +#endif // endif + +#ifdef BCM_OBJECT_TRACE +#include +#endif /* BCM_OBJECT_TRACE */ +#include "linux_osl_priv.h" + +#define PCI_CFG_RETRY 10 + +#define DUMPBUFSZ 1024 + +#ifdef BCM_SECURE_DMA +static void * osl_sec_dma_ioremap(osl_t *osh, struct page *page, size_t size, + bool iscache, bool isdecr); +static void osl_sec_dma_iounmap(osl_t *osh, void *contig_base_va, size_t size); +static int osl_sec_dma_init_elem_mem_block(osl_t *osh, size_t mbsize, int max, + sec_mem_elem_t **list); +static void osl_sec_dma_deinit_elem_mem_block(osl_t *osh, size_t mbsize, int max, + void *sec_list_base); +static sec_mem_elem_t * osl_sec_dma_alloc_mem_elem(osl_t *osh, void *va, uint size, + int direction, struct sec_cma_info *ptr_cma_info, uint offset); +static void osl_sec_dma_free_mem_elem(osl_t *osh, sec_mem_elem_t *sec_mem_elem); +static void osl_sec_dma_init_consistent(osl_t *osh); +static void *osl_sec_dma_alloc_consistent(osl_t *osh, uint size, uint16 align_bits, + ulong *pap); +static void osl_sec_dma_free_consistent(osl_t *osh, void *va, uint size, dmaaddr_t pa); +#endif /* BCM_SECURE_DMA */ + +/* PCMCIA attribute space access macros */ + +uint32 g_assert_type = 0; /* By Default Kernel Panic */ + +module_param(g_assert_type, int, 0); +#ifdef BCM_SECURE_DMA +#define SECDMA_MODULE_PARAMS 0 +#define SECDMA_EXT_FILE 1 +unsigned long secdma_addr = 0; +unsigned long secdma_addr2 = 0; +u32 secdma_size = 0; +u32 secdma_size2 = 0; +module_param(secdma_addr, ulong, 0); +module_param(secdma_size, int, 0); +module_param(secdma_addr2, ulong, 0); +module_param(secdma_size2, int, 0); +static int secdma_found = 0; +#endif /* BCM_SECURE_DMA */ + +#ifdef USE_DMA_LOCK +#define DMA_LOCK(osh) spin_lock_bh(&(osh)->dma_lock) +#define DMA_UNLOCK(osh) spin_unlock_bh(&(osh)->dma_lock) +#define DMA_LOCK_INIT(osh) spin_lock_init(&(osh)->dma_lock) +#else +#define DMA_LOCK(osh) do { /* noop */ } while(0) +#define DMA_UNLOCK(osh) do { /* noop */ } while(0) +#define DMA_LOCK_INIT(osh) do { /* noop */ } while(0) +#endif /* USE_DMA_LOCK */ + +static int16 linuxbcmerrormap[] = +{ 0, /* 0 */ + -EINVAL, /* BCME_ERROR */ + -EINVAL, /* BCME_BADARG */ + -EINVAL, /* BCME_BADOPTION */ + -EINVAL, /* BCME_NOTUP */ + -EINVAL, /* BCME_NOTDOWN */ + -EINVAL, /* BCME_NOTAP */ + -EINVAL, /* BCME_NOTSTA */ + -EINVAL, /* BCME_BADKEYIDX */ + -EINVAL, /* BCME_RADIOOFF */ + -EINVAL, /* BCME_NOTBANDLOCKED */ + -EINVAL, /* BCME_NOCLK */ + -EINVAL, /* BCME_BADRATESET */ + -EINVAL, /* BCME_BADBAND */ + -E2BIG, /* BCME_BUFTOOSHORT */ + -E2BIG, /* BCME_BUFTOOLONG */ + -EBUSY, /* BCME_BUSY */ + -EINVAL, /* BCME_NOTASSOCIATED */ + -EINVAL, /* BCME_BADSSIDLEN */ + -EINVAL, /* BCME_OUTOFRANGECHAN */ + -EINVAL, /* BCME_BADCHAN */ + -EFAULT, /* BCME_BADADDR */ + -ENOMEM, /* BCME_NORESOURCE */ + -EOPNOTSUPP, /* BCME_UNSUPPORTED */ + -EMSGSIZE, /* BCME_BADLENGTH */ + -EINVAL, /* BCME_NOTREADY */ + -EPERM, /* BCME_EPERM */ + -ENOMEM, /* BCME_NOMEM */ + -EINVAL, /* BCME_ASSOCIATED */ + -ERANGE, /* BCME_RANGE */ + -EINVAL, /* BCME_NOTFOUND */ + -EINVAL, /* BCME_WME_NOT_ENABLED */ + -EINVAL, /* BCME_TSPEC_NOTFOUND */ + -EINVAL, /* BCME_ACM_NOTSUPPORTED */ + -EINVAL, /* BCME_NOT_WME_ASSOCIATION */ + -EIO, /* BCME_SDIO_ERROR */ + -ENODEV, /* BCME_DONGLE_DOWN */ + -EINVAL, /* BCME_VERSION */ + -EIO, /* BCME_TXFAIL */ + -EIO, /* BCME_RXFAIL */ + -ENODEV, /* BCME_NODEVICE */ + -EINVAL, /* BCME_NMODE_DISABLED */ + -ENODATA, /* BCME_NONRESIDENT */ + -EINVAL, /* BCME_SCANREJECT */ + -EINVAL, /* BCME_USAGE_ERROR */ + -EIO, /* BCME_IOCTL_ERROR */ + -EIO, /* BCME_SERIAL_PORT_ERR */ + -EOPNOTSUPP, /* BCME_DISABLED, BCME_NOTENABLED */ + -EIO, /* BCME_DECERR */ + -EIO, /* BCME_ENCERR */ + -EIO, /* BCME_MICERR */ + -ERANGE, /* BCME_REPLAY */ + -EINVAL, /* BCME_IE_NOTFOUND */ + -EINVAL, /* BCME_DATA_NOTFOUND */ + -EINVAL, /* BCME_NOT_GC */ + -EINVAL, /* BCME_PRS_REQ_FAILED */ + -EINVAL, /* BCME_NO_P2P_SE */ + -EINVAL, /* BCME_NOA_PND */ + -EINVAL, /* BCME_FRAG_Q_FAILED */ + -EINVAL, /* BCME_GET_AF_FAILED */ + -EINVAL, /* BCME_MSCH_NOTREADY */ + -EINVAL, /* BCME_IOV_LAST_CMD */ + -EINVAL, /* BCME_MINIPMU_CAL_FAIL */ + -EINVAL, /* BCME_RCAL_FAIL */ + -EINVAL, /* BCME_LPF_RCCAL_FAIL */ + -EINVAL, /* BCME_DACBUF_RCCAL_FAIL */ + -EINVAL, /* BCME_VCOCAL_FAIL */ + -EINVAL, /* BCME_BANDLOCKED */ + -EINVAL, /* BCME_DNGL_DEVRESET */ + +/* When an new error code is added to bcmutils.h, add os + * specific error translation here as well + */ +/* check if BCME_LAST changed since the last time this function was updated */ +#if BCME_LAST != -68 +#error "You need to add a OS error translation in the linuxbcmerrormap \ + for new error code defined in bcmutils.h" +#endif // endif +}; +uint lmtest = FALSE; + +#ifdef DHD_MAP_LOGGING +#define DHD_MAP_LOG_SIZE 2048 + +typedef struct dhd_map_record { + dma_addr_t addr; + uint64 time; +} dhd_map_log_t; + +dhd_map_log_t *dhd_map_log = NULL, *dhd_unmap_log = NULL; +uint32 map_idx = 0, unmap_idx = 0; + +void +osl_dma_map_dump(void) +{ + printk("%s: map_idx=%d unmap_idx=%d current time=%llu\n", + __FUNCTION__, map_idx, unmap_idx, OSL_SYSUPTIME_US()); + if (dhd_map_log && dhd_unmap_log) { + printk("%s: dhd_map_log(pa)=%llx size=%d, dma_unmap_log(pa)=%llx size=%d\n", + __FUNCTION__, (uint64)__virt_to_phys((ulong)dhd_map_log), + (uint32)(sizeof(dhd_map_log_t) * DHD_MAP_LOG_SIZE), + (uint64)__virt_to_phys((ulong)dhd_unmap_log), + (uint32)(sizeof(dhd_map_log_t) * DHD_MAP_LOG_SIZE)); + } +} +#endif /* DHD_MAP_LOGGING */ + +/* translate bcmerrors into linux errors */ +int +osl_error(int bcmerror) +{ + if (bcmerror > 0) + bcmerror = 0; + else if (bcmerror < BCME_LAST) + bcmerror = BCME_ERROR; + + /* Array bounds covered by ASSERT in osl_attach */ + return linuxbcmerrormap[-bcmerror]; +} + +osl_t * +osl_attach(void *pdev, uint bustype, bool pkttag) +{ + void **osl_cmn = NULL; + osl_t *osh; + gfp_t flags; +#ifdef BCM_SECURE_DMA + u32 secdma_memsize; +#endif // endif + + flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC; + if (!(osh = kmalloc(sizeof(osl_t), flags))) + return osh; + + ASSERT(osh); + + bzero(osh, sizeof(osl_t)); + + if (osl_cmn == NULL || *osl_cmn == NULL) { + if (!(osh->cmn = kmalloc(sizeof(osl_cmn_t), flags))) { + kfree(osh); + return NULL; + } + bzero(osh->cmn, sizeof(osl_cmn_t)); + if (osl_cmn) + *osl_cmn = osh->cmn; + atomic_set(&osh->cmn->malloced, 0); + osh->cmn->dbgmem_list = NULL; + spin_lock_init(&(osh->cmn->dbgmem_lock)); + + spin_lock_init(&(osh->cmn->pktalloc_lock)); + + } else { + osh->cmn = *osl_cmn; + } + atomic_add(1, &osh->cmn->refcount); + + bcm_object_trace_init(); + + /* Check that error map has the right number of entries in it */ + ASSERT(ABS(BCME_LAST) == (ARRAYSIZE(linuxbcmerrormap) - 1)); + + osh->failed = 0; + osh->pdev = pdev; + osh->pub.pkttag = pkttag; + osh->bustype = bustype; + osh->magic = OS_HANDLE_MAGIC; +#ifdef BCM_SECURE_DMA + + if ((secdma_addr != 0) && (secdma_size != 0)) { + printk("linux_osl.c: Buffer info passed via module params, using it.\n"); + if (secdma_found == 0) { + osh->contig_base_alloc = (phys_addr_t)secdma_addr; + secdma_memsize = secdma_size; + } else if (secdma_found == 1) { + osh->contig_base_alloc = (phys_addr_t)secdma_addr2; + secdma_memsize = secdma_size2; + } else { + printk("linux_osl.c secdma: secDMA instances %d \n", secdma_found); + kfree(osh); + return NULL; + } + osh->contig_base = (phys_addr_t)osh->contig_base_alloc; + printf("linux_osl.c: secdma_cma_size = 0x%x\n", secdma_memsize); + printf("linux_osl.c: secdma_cma_addr = 0x%x \n", + (unsigned int)osh->contig_base_alloc); + osh->stb_ext_params = SECDMA_MODULE_PARAMS; + } + else if (stbpriv_init(osh) == 0) { + printk("linux_osl.c: stbpriv.txt found. Get buffer info.\n"); + if (secdma_found == 0) { + osh->contig_base_alloc = + (phys_addr_t)bcm_strtoul(stbparam_get("secdma_cma_addr"), NULL, 0); + secdma_memsize = bcm_strtoul(stbparam_get("secdma_cma_size"), NULL, 0); + } else if (secdma_found == 1) { + osh->contig_base_alloc = + (phys_addr_t)bcm_strtoul(stbparam_get("secdma_cma_addr2"), NULL, 0); + secdma_memsize = bcm_strtoul(stbparam_get("secdma_cma_size2"), NULL, 0); + } else { + printk("linux_osl.c secdma: secDMA instances %d \n", secdma_found); + kfree(osh); + return NULL; + } + osh->contig_base = (phys_addr_t)osh->contig_base_alloc; + printf("linux_osl.c: secdma_cma_size = 0x%x\n", secdma_memsize); + printf("linux_osl.c: secdma_cma_addr = 0x%x \n", + (unsigned int)osh->contig_base_alloc); + osh->stb_ext_params = SECDMA_EXT_FILE; + } + else { + printk("linux_osl.c: secDMA no longer supports internal buffer allocation.\n"); + kfree(osh); + return NULL; + } + secdma_found++; + osh->contig_base_alloc_coherent_va = osl_sec_dma_ioremap(osh, + phys_to_page((u32)osh->contig_base_alloc), + CMA_DMA_DESC_MEMBLOCK, FALSE, TRUE); + + if (osh->contig_base_alloc_coherent_va == NULL) { + if (osh->cmn) + kfree(osh->cmn); + kfree(osh); + return NULL; + } + osh->contig_base_coherent_va = osh->contig_base_alloc_coherent_va; + osh->contig_base_alloc_coherent = osh->contig_base_alloc; + osl_sec_dma_init_consistent(osh); + + osh->contig_base_alloc += CMA_DMA_DESC_MEMBLOCK; + + osh->contig_base_alloc_va = osl_sec_dma_ioremap(osh, + phys_to_page((u32)osh->contig_base_alloc), CMA_DMA_DATA_MEMBLOCK, TRUE, FALSE); + if (osh->contig_base_alloc_va == NULL) { + osl_sec_dma_iounmap(osh, osh->contig_base_coherent_va, CMA_DMA_DESC_MEMBLOCK); + if (osh->cmn) + kfree(osh->cmn); + kfree(osh); + return NULL; + } + osh->contig_base_va = osh->contig_base_alloc_va; + +#ifdef NOT_YET + /* + * osl_sec_dma_init_elem_mem_block(osh, CMA_BUFSIZE_512, CMA_BUFNUM, &osh->sec_list_512); + * osh->sec_list_base_512 = osh->sec_list_512; + * osl_sec_dma_init_elem_mem_block(osh, CMA_BUFSIZE_2K, CMA_BUFNUM, &osh->sec_list_2048); + * osh->sec_list_base_2048 = osh->sec_list_2048; + */ +#endif // endif + if (BCME_OK != osl_sec_dma_init_elem_mem_block(osh, + CMA_BUFSIZE_4K, CMA_BUFNUM, &osh->sec_list_4096)) { + osl_sec_dma_iounmap(osh, osh->contig_base_coherent_va, CMA_DMA_DESC_MEMBLOCK); + osl_sec_dma_iounmap(osh, osh->contig_base_va, CMA_DMA_DATA_MEMBLOCK); + if (osh->cmn) + kfree(osh->cmn); + kfree(osh); + return NULL; + } + osh->sec_list_base_4096 = osh->sec_list_4096; + +#endif /* BCM_SECURE_DMA */ + + switch (bustype) { + case PCI_BUS: + case SI_BUS: + case PCMCIA_BUS: + osh->pub.mmbus = TRUE; + break; + case JTAG_BUS: + case SDIO_BUS: + case USB_BUS: + case SPI_BUS: + case RPC_BUS: + osh->pub.mmbus = FALSE; + break; + default: + ASSERT(FALSE); + break; + } + + DMA_LOCK_INIT(osh); + +#ifdef DHD_MAP_LOGGING + dhd_map_log = kmalloc(sizeof(dhd_map_log_t) * DHD_MAP_LOG_SIZE, flags); + if (dhd_map_log) { + memset(dhd_map_log, 0, sizeof(dhd_map_log_t) * DHD_MAP_LOG_SIZE); + } + dhd_unmap_log = kmalloc(sizeof(dhd_map_log_t) * DHD_MAP_LOG_SIZE, flags); + if (dhd_unmap_log) { + memset(dhd_unmap_log, 0, sizeof(dhd_map_log_t) * DHD_MAP_LOG_SIZE); + } +#endif /* DHD_MAP_LOGGING */ + + return osh; +} + +void osl_set_bus_handle(osl_t *osh, void *bus_handle) +{ + osh->bus_handle = bus_handle; +} + +void* osl_get_bus_handle(osl_t *osh) +{ + return osh->bus_handle; +} + +#if defined(BCM_BACKPLANE_TIMEOUT) +void osl_set_bpt_cb(osl_t *osh, void *bpt_cb, void *bpt_ctx) +{ + if (osh) { + osh->bpt_cb = (bpt_cb_fn)bpt_cb; + osh->sih = bpt_ctx; + } +} +#endif /* BCM_BACKPLANE_TIMEOUT */ + +void +osl_detach(osl_t *osh) +{ + if (osh == NULL) + return; + +#ifdef BCM_SECURE_DMA + if (osh->stb_ext_params == SECDMA_EXT_FILE) + stbpriv_exit(osh); +#ifdef NOT_YET + osl_sec_dma_deinit_elem_mem_block(osh, CMA_BUFSIZE_512, CMA_BUFNUM, osh->sec_list_base_512); + osl_sec_dma_deinit_elem_mem_block(osh, CMA_BUFSIZE_2K, CMA_BUFNUM, osh->sec_list_base_2048); +#endif /* NOT_YET */ + osl_sec_dma_deinit_elem_mem_block(osh, CMA_BUFSIZE_4K, CMA_BUFNUM, osh->sec_list_base_4096); + osl_sec_dma_iounmap(osh, osh->contig_base_coherent_va, CMA_DMA_DESC_MEMBLOCK); + osl_sec_dma_iounmap(osh, osh->contig_base_va, CMA_DMA_DATA_MEMBLOCK); + secdma_found--; +#endif /* BCM_SECURE_DMA */ + + bcm_object_trace_deinit(); + +#ifdef DHD_MAP_LOGGING + if (dhd_map_log) { + kfree(dhd_map_log); + } + if (dhd_unmap_log) { + kfree(dhd_unmap_log); + } +#endif /* DHD_MAP_LOGGING */ + + ASSERT(osh->magic == OS_HANDLE_MAGIC); + atomic_sub(1, &osh->cmn->refcount); + if (atomic_read(&osh->cmn->refcount) == 0) { + kfree(osh->cmn); + } + kfree(osh); +} + +/* APIs to set/get specific quirks in OSL layer */ +void BCMFASTPATH +osl_flag_set(osl_t *osh, uint32 mask) +{ + osh->flags |= mask; +} + +void +osl_flag_clr(osl_t *osh, uint32 mask) +{ + osh->flags &= ~mask; +} + +#if defined(STB) +inline bool BCMFASTPATH +#else +bool +#endif // endif +osl_is_flag_set(osl_t *osh, uint32 mask) +{ + return (osh->flags & mask); +} + +#if (defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING)) || \ + defined(STB_SOC_WIFI) + +inline int BCMFASTPATH +osl_arch_is_coherent(void) +{ + return 0; +} + +inline int BCMFASTPATH +osl_acp_war_enab(void) +{ + return 0; +} + +inline void BCMFASTPATH +osl_cache_flush(void *va, uint size) +{ + + if (size > 0) +#ifdef STB_SOC_WIFI + dma_sync_single_for_device(OSH_NULL, virt_to_phys(va), size, DMA_TX); +#else /* STB_SOC_WIFI */ + dma_sync_single_for_device(OSH_NULL, virt_to_dma(OSH_NULL, va), size, + DMA_TO_DEVICE); +#endif /* STB_SOC_WIFI */ +} + +inline void BCMFASTPATH +osl_cache_inv(void *va, uint size) +{ + +#ifdef STB_SOC_WIFI + dma_sync_single_for_cpu(OSH_NULL, virt_to_phys(va), size, DMA_RX); +#else /* STB_SOC_WIFI */ + dma_sync_single_for_cpu(OSH_NULL, virt_to_dma(OSH_NULL, va), size, DMA_FROM_DEVICE); +#endif /* STB_SOC_WIFI */ +} + +inline void BCMFASTPATH +osl_prefetch(const void *ptr) +{ +#if !defined(STB_SOC_WIFI) + __asm__ __volatile__("pld\t%0" :: "o"(*(const char *)ptr) : "cc"); +#endif // endif +} + +#endif // endif + +uint32 +osl_pci_read_config(osl_t *osh, uint offset, uint size) +{ + uint val = 0; + uint retry = PCI_CFG_RETRY; + + ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC))); + + /* only 4byte access supported */ + ASSERT(size == 4); + + do { + pci_read_config_dword(osh->pdev, offset, &val); + if (val != 0xffffffff) + break; + } while (retry--); + + return (val); +} + +void +osl_pci_write_config(osl_t *osh, uint offset, uint size, uint val) +{ + uint retry = PCI_CFG_RETRY; + + ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC))); + + /* only 4byte access supported */ + ASSERT(size == 4); + + do { + pci_write_config_dword(osh->pdev, offset, val); + if (offset != PCI_BAR0_WIN) + break; + if (osl_pci_read_config(osh, offset, size) == val) + break; + } while (retry--); + +} + +/* return bus # for the pci device pointed by osh->pdev */ +uint +osl_pci_bus(osl_t *osh) +{ + ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev); + +#if defined(__ARM_ARCH_7A__) && LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35) + return pci_domain_nr(((struct pci_dev *)osh->pdev)->bus); +#else + return ((struct pci_dev *)osh->pdev)->bus->number; +#endif // endif +} + +/* return slot # for the pci device pointed by osh->pdev */ +uint +osl_pci_slot(osl_t *osh) +{ + ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev); + +#if defined(__ARM_ARCH_7A__) && LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35) + return PCI_SLOT(((struct pci_dev *)osh->pdev)->devfn) + 1; +#else + return PCI_SLOT(((struct pci_dev *)osh->pdev)->devfn); +#endif // endif +} + +/* return domain # for the pci device pointed by osh->pdev */ +uint +osl_pcie_domain(osl_t *osh) +{ + ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev); + + return pci_domain_nr(((struct pci_dev *)osh->pdev)->bus); +} + +/* return bus # for the pci device pointed by osh->pdev */ +uint +osl_pcie_bus(osl_t *osh) +{ + ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev); + + return ((struct pci_dev *)osh->pdev)->bus->number; +} + +/* return the pci device pointed by osh->pdev */ +struct pci_dev * +osl_pci_device(osl_t *osh) +{ + ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev); + + return osh->pdev; +} + +static void +osl_pcmcia_attr(osl_t *osh, uint offset, char *buf, int size, bool write) +{ +} + +void +osl_pcmcia_read_attr(osl_t *osh, uint offset, void *buf, int size) +{ + osl_pcmcia_attr(osh, offset, (char *) buf, size, FALSE); +} + +void +osl_pcmcia_write_attr(osl_t *osh, uint offset, void *buf, int size) +{ + osl_pcmcia_attr(osh, offset, (char *) buf, size, TRUE); +} + +void * +osl_malloc(osl_t *osh, uint size) +{ + void *addr; + gfp_t flags; + + /* only ASSERT if osh is defined */ + if (osh) + ASSERT(osh->magic == OS_HANDLE_MAGIC); +#ifdef CONFIG_DHD_USE_STATIC_BUF + if (bcm_static_buf) + { + unsigned long irq_flags; + int i = 0; + if ((size >= PAGE_SIZE)&&(size <= STATIC_BUF_SIZE)) + { + spin_lock_irqsave(&bcm_static_buf->static_lock, irq_flags); + + for (i = 0; i < STATIC_BUF_MAX_NUM; i++) + { + if (bcm_static_buf->buf_use[i] == 0) + break; + } + + if (i == STATIC_BUF_MAX_NUM) + { + spin_unlock_irqrestore(&bcm_static_buf->static_lock, irq_flags); + printk("all static buff in use!\n"); + goto original; + } + + bcm_static_buf->buf_use[i] = 1; + spin_unlock_irqrestore(&bcm_static_buf->static_lock, irq_flags); + + bzero(bcm_static_buf->buf_ptr+STATIC_BUF_SIZE*i, size); + if (osh) + atomic_add(size, &osh->cmn->malloced); + + return ((void *)(bcm_static_buf->buf_ptr+STATIC_BUF_SIZE*i)); + } + } +original: +#endif /* CONFIG_DHD_USE_STATIC_BUF */ + + flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC; + if ((addr = kmalloc(size, flags)) == NULL) { + if (osh) + osh->failed++; + return (NULL); + } + if (osh && osh->cmn) + atomic_add(size, &osh->cmn->malloced); + + return (addr); +} + +void * +osl_mallocz(osl_t *osh, uint size) +{ + void *ptr; + + ptr = osl_malloc(osh, size); + + if (ptr != NULL) { + bzero(ptr, size); + } + + return ptr; +} + +void +osl_mfree(osl_t *osh, void *addr, uint size) +{ +#ifdef CONFIG_DHD_USE_STATIC_BUF + unsigned long flags; + + if (bcm_static_buf) + { + if ((addr > (void *)bcm_static_buf) && ((unsigned char *)addr + <= ((unsigned char *)bcm_static_buf + STATIC_BUF_TOTAL_LEN))) + { + int buf_idx = 0; + + buf_idx = ((unsigned char *)addr - bcm_static_buf->buf_ptr)/STATIC_BUF_SIZE; + + spin_lock_irqsave(&bcm_static_buf->static_lock, flags); + bcm_static_buf->buf_use[buf_idx] = 0; + spin_unlock_irqrestore(&bcm_static_buf->static_lock, flags); + + if (osh && osh->cmn) { + ASSERT(osh->magic == OS_HANDLE_MAGIC); + atomic_sub(size, &osh->cmn->malloced); + } + return; + } + } +#endif /* CONFIG_DHD_USE_STATIC_BUF */ + if (osh && osh->cmn) { + ASSERT(osh->magic == OS_HANDLE_MAGIC); + + ASSERT(size <= osl_malloced(osh)); + + atomic_sub(size, &osh->cmn->malloced); + } + kfree(addr); +} + +void * +osl_vmalloc(osl_t *osh, uint size) +{ + void *addr; + + /* only ASSERT if osh is defined */ + if (osh) + ASSERT(osh->magic == OS_HANDLE_MAGIC); + if ((addr = vmalloc(size)) == NULL) { + if (osh) + osh->failed++; + return (NULL); + } + if (osh && osh->cmn) + atomic_add(size, &osh->cmn->malloced); + + return (addr); +} + +void * +osl_vmallocz(osl_t *osh, uint size) +{ + void *ptr; + + ptr = osl_vmalloc(osh, size); + + if (ptr != NULL) { + bzero(ptr, size); + } + + return ptr; +} + +void +osl_vmfree(osl_t *osh, void *addr, uint size) +{ + if (osh && osh->cmn) { + ASSERT(osh->magic == OS_HANDLE_MAGIC); + + ASSERT(size <= osl_malloced(osh)); + + atomic_sub(size, &osh->cmn->malloced); + } + vfree(addr); +} + +uint +osl_check_memleak(osl_t *osh) +{ + ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC))); + if (atomic_read(&osh->cmn->refcount) == 1) + return (atomic_read(&osh->cmn->malloced)); + else + return 0; +} + +uint +osl_malloced(osl_t *osh) +{ + ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC))); + return (atomic_read(&osh->cmn->malloced)); +} + +uint +osl_malloc_failed(osl_t *osh) +{ + ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC))); + return (osh->failed); +} + +uint +osl_dma_consistent_align(void) +{ + return (PAGE_SIZE); +} + +void* +osl_dma_alloc_consistent(osl_t *osh, uint size, uint16 align_bits, uint *alloced, dmaaddr_t *pap) +{ + void *va; + uint16 align = (1 << align_bits); + ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC))); + + if (!ISALIGNED(DMA_CONSISTENT_ALIGN, align)) + size += align; + *alloced = size; + +#ifndef BCM_SECURE_DMA +#if (defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING)) || \ + defined(STB_SOC_WIFI) + va = kmalloc(size, GFP_ATOMIC | __GFP_ZERO); + if (va) + *pap = (ulong)__virt_to_phys((ulong)va); +#else + { + dma_addr_t pap_lin; + struct pci_dev *hwdev = osh->pdev; + gfp_t flags; +#ifdef DHD_ALLOC_COHERENT_MEM_FROM_ATOMIC_POOL + flags = GFP_ATOMIC; +#else + flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC; +#endif /* DHD_ALLOC_COHERENT_MEM_FROM_ATOMIC_POOL */ + va = dma_alloc_coherent(&hwdev->dev, size, &pap_lin, flags); +#ifdef BCMDMA64OSL + PHYSADDRLOSET(*pap, pap_lin & 0xffffffff); + PHYSADDRHISET(*pap, (pap_lin >> 32) & 0xffffffff); +#else + *pap = (dmaaddr_t)pap_lin; +#endif /* BCMDMA64OSL */ + } +#endif /* __ARM_ARCH_7A__ && !DHD_USE_COHERENT_MEM_FOR_RING */ +#else + va = osl_sec_dma_alloc_consistent(osh, size, align_bits, pap); +#endif /* BCM_SECURE_DMA */ + return va; +} + +void +osl_dma_free_consistent(osl_t *osh, void *va, uint size, dmaaddr_t pa) +{ +#ifdef BCMDMA64OSL + dma_addr_t paddr; +#endif /* BCMDMA64OSL */ + ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC))); + +#ifndef BCM_SECURE_DMA +#if (defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING)) || \ + defined(STB_SOC_WIFI) + kfree(va); +#else +#ifdef BCMDMA64OSL + PHYSADDRTOULONG(pa, paddr); + pci_free_consistent(osh->pdev, size, va, paddr); +#else + pci_free_consistent(osh->pdev, size, va, (dma_addr_t)pa); +#endif /* BCMDMA64OSL */ +#endif /* __ARM_ARCH_7A__ && !DHD_USE_COHERENT_MEM_FOR_RING */ +#else + osl_sec_dma_free_consistent(osh, va, size, pa); +#endif /* BCM_SECURE_DMA */ +} + +void * +osl_virt_to_phys(void *va) +{ + return (void *)(uintptr)virt_to_phys(va); +} + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36) +#include +void BCMFASTPATH +osl_dma_flush(osl_t *osh, void *va, uint size, int direction, void *p, hnddma_seg_map_t *dmah) +{ + return; +} +#endif /* LINUX_VERSION_CODE >= 2.6.36 */ + +dmaaddr_t BCMFASTPATH +osl_dma_map(osl_t *osh, void *va, uint size, int direction, void *p, hnddma_seg_map_t *dmah) +{ + int dir; + dmaaddr_t ret_addr; + dma_addr_t map_addr; + int ret; + + DMA_LOCK(osh); + + ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC))); + dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE; + +#ifdef STB_SOC_WIFI +#if (__LINUX_ARM_ARCH__ == 8) + /* need to flush or invalidate the cache here */ + if (dir == DMA_TX) { /* to device */ + osl_cache_flush(va, size); + } else if (dir == DMA_RX) { /* from device */ + osl_cache_inv(va, size); + } else { /* both */ + osl_cache_flush(va, size); + osl_cache_inv(va, size); + } + DMA_UNLOCK(osh); + return virt_to_phys(va); +#else /* (__LINUX_ARM_ARCH__ == 8) */ + map_addr = dma_map_single(osh->pdev, va, size, dir); + DMA_UNLOCK(osh); + return map_addr; +#endif /* (__LINUX_ARM_ARCH__ == 8) */ +#else /* ! STB_SOC_WIFI */ + map_addr = pci_map_single(osh->pdev, va, size, dir); +#endif /* ! STB_SOC_WIFI */ + +#ifdef DHD_MAP_LOGGING + if (dhd_map_log) { + dhd_map_log[map_idx].addr = map_addr; + dhd_map_log[map_idx].time = OSL_SYSUPTIME_US(); + map_idx++; + map_idx = map_idx % DHD_MAP_LOG_SIZE; + } +#endif /* DHD_MAP_LOGGING */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + ret = pci_dma_mapping_error(osh->pdev, map_addr); +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 5)) + ret = pci_dma_mapping_error(map_addr); +#else + ret = 0; +#endif // endif + if (ret) { + printk("%s: Failed to map memory\n", __FUNCTION__); + PHYSADDRLOSET(ret_addr, 0); + PHYSADDRHISET(ret_addr, 0); + } else { + PHYSADDRLOSET(ret_addr, map_addr & 0xffffffff); + PHYSADDRHISET(ret_addr, (map_addr >> 32) & 0xffffffff); + } + + DMA_UNLOCK(osh); + + return ret_addr; +} + +void BCMFASTPATH +osl_dma_unmap(osl_t *osh, dmaaddr_t pa, uint size, int direction) +{ + int dir; +#ifdef BCMDMA64OSL + dma_addr_t paddr; +#endif /* BCMDMA64OSL */ + + ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC))); + + DMA_LOCK(osh); + + dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE; + +#ifdef BCMDMA64OSL + PHYSADDRTOULONG(pa, paddr); +#ifdef DHD_MAP_LOGGING + if (dhd_unmap_log) { + dhd_unmap_log[unmap_idx].addr = paddr; + dhd_unmap_log[unmap_idx].time = OSL_SYSUPTIME_US(); + unmap_idx++; + unmap_idx = unmap_idx % DHD_MAP_LOG_SIZE; + } +#endif /* DHD_MAP_LOGGING */ + + pci_unmap_single(osh->pdev, paddr, size, dir); +#else /* BCMDMA64OSL */ + +#ifdef STB_SOC_WIFI +#if (__LINUX_ARM_ARCH__ == 8) + if (dir == DMA_TX) { /* to device */ + dma_sync_single_for_device(OSH_NULL, pa, size, DMA_TX); + } else if (dir == DMA_RX) { /* from device */ + dma_sync_single_for_cpu(OSH_NULL, pa, size, DMA_RX); + } else { /* both */ + dma_sync_single_for_device(OSH_NULL, pa, size, DMA_TX); + dma_sync_single_for_cpu(OSH_NULL, pa, size, DMA_RX); + } +#else /* (__LINUX_ARM_ARCH__ == 8) */ + dma_unmap_single(osh->pdev, (uintptr)pa, size, dir); +#endif /* (__LINUX_ARM_ARCH__ == 8) */ +#else /* STB_SOC_WIFI */ +#ifdef DHD_MAP_LOGGING + if (dhd_unmap_log) { + dhd_unmap_log[unmap_idx].addr = pa; + dhd_unmap_log[unmap_idx].time = OSL_SYSUPTIME_US(); + unmap_idx++; + unmap_idx = unmap_idx % DHD_MAP_LOG_SIZE; + } +#endif /* DHD_MAP_LOGGING */ + + pci_unmap_single(osh->pdev, (uint32)pa, size, dir); +#endif /* STB_SOC_WIFI */ + +#endif /* BCMDMA64OSL */ + DMA_UNLOCK(osh); +} + +/* OSL function for CPU relax */ +inline void BCMFASTPATH +osl_cpu_relax(void) +{ + cpu_relax(); +} + +extern void osl_preempt_disable(osl_t *osh) +{ + preempt_disable(); +} + +extern void osl_preempt_enable(osl_t *osh) +{ + preempt_enable(); +} + +#if defined(BCMASSERT_LOG) +void +osl_assert(const char *exp, const char *file, int line) +{ + char tempbuf[256]; + const char *basename; + + basename = strrchr(file, '/'); + /* skip the '/' */ + if (basename) + basename++; + + if (!basename) + basename = file; + +#ifdef BCMASSERT_LOG + snprintf(tempbuf, 64, "\"%s\": file \"%s\", line %d\n", + exp, basename, line); +#endif /* BCMASSERT_LOG */ + + switch (g_assert_type) { + case 0: + panic("%s", tempbuf); + break; + case 1: + /* fall through */ + case 3: + printk("%s", tempbuf); + break; + case 2: + printk("%s", tempbuf); + BUG(); + break; + default: + break; + } +} +#endif // endif + +void +osl_delay(uint usec) +{ + uint d; + + while (usec > 0) { + d = MIN(usec, 1000); + udelay(d); + usec -= d; + } +} + +void +osl_sleep(uint ms) +{ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36) + if (ms < 20) + usleep_range(ms*1000, ms*1000 + 1000); + else +#endif // endif + msleep(ms); +} + +uint64 +osl_sysuptime_us(void) +{ + struct timeval tv; + uint64 usec; + + do_gettimeofday(&tv); + /* tv_usec content is fraction of a second */ + usec = (uint64)tv.tv_sec * 1000000ul + tv.tv_usec; + return usec; +} + +/* + * OSLREGOPS specifies the use of osl_XXX routines to be used for register access + */ + +/* + * BINOSL selects the slightly slower function-call-based binary compatible osl. + */ + +uint32 +osl_rand(void) +{ + uint32 rand; + + get_random_bytes(&rand, sizeof(rand)); + + return rand; +} + +/* Linux Kernel: File Operations: start */ +void * +osl_os_open_image(char *filename) +{ + struct file *fp; + + fp = filp_open(filename, O_RDONLY, 0); + /* + * 2.6.11 (FC4) supports filp_open() but later revs don't? + * Alternative: + * fp = open_namei(AT_FDCWD, filename, O_RD, 0); + * ??? + */ + if (IS_ERR(fp)) + fp = NULL; + + return fp; +} + +int +osl_os_get_image_block(char *buf, int len, void *image) +{ + struct file *fp = (struct file *)image; + int rdlen; + + if (!image) + return 0; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) + rdlen = kernel_read(fp, buf, len, &fp->f_pos); +#else + rdlen = kernel_read(fp, fp->f_pos, buf, len); +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) */ + + if (rdlen > 0) + fp->f_pos += rdlen; + + return rdlen; +} + +void +osl_os_close_image(void *image) +{ + if (image) + filp_close((struct file *)image, NULL); +} + +int +osl_os_image_size(void *image) +{ + int len = 0, curroffset; + + if (image) { + /* store the current offset */ + curroffset = generic_file_llseek(image, 0, 1); + /* goto end of file to get length */ + len = generic_file_llseek(image, 0, 2); + /* restore back the offset */ + generic_file_llseek(image, curroffset, 0); + } + return len; +} + +/* Linux Kernel: File Operations: end */ + +#if (defined(STB) && defined(__arm__)) +inline void osl_pcie_rreg(osl_t *osh, ulong addr, volatile void *v, uint size) +{ + unsigned long flags = 0; + int pci_access = 0; + int acp_war_enab = ACP_WAR_ENAB(); + + if (osh && BUSTYPE(osh->bustype) == PCI_BUS) + pci_access = 1; + + if (pci_access && acp_war_enab) + spin_lock_irqsave(&l2x0_reg_lock, flags); + + switch (size) { + case sizeof(uint8): + *(volatile uint8*)v = readb((volatile uint8*)(addr)); + break; + case sizeof(uint16): + *(volatile uint16*)v = readw((volatile uint16*)(addr)); + break; + case sizeof(uint32): + *(volatile uint32*)v = readl((volatile uint32*)(addr)); + break; + case sizeof(uint64): + *(volatile uint64*)v = *((volatile uint64*)(addr)); + break; + } + + if (pci_access && acp_war_enab) + spin_unlock_irqrestore(&l2x0_reg_lock, flags); +} +#endif // endif + +#if defined(BCM_BACKPLANE_TIMEOUT) +inline void osl_bpt_rreg(osl_t *osh, ulong addr, volatile void *v, uint size) +{ + bool poll_timeout = FALSE; + static int in_si_clear = FALSE; + + switch (size) { + case sizeof(uint8): + *(volatile uint8*)v = readb((volatile uint8*)(addr)); + if (*(volatile uint8*)v == 0xff) + poll_timeout = TRUE; + break; + case sizeof(uint16): + *(volatile uint16*)v = readw((volatile uint16*)(addr)); + if (*(volatile uint16*)v == 0xffff) + poll_timeout = TRUE; + break; + case sizeof(uint32): + *(volatile uint32*)v = readl((volatile uint32*)(addr)); + if (*(volatile uint32*)v == 0xffffffff) + poll_timeout = TRUE; + break; + case sizeof(uint64): + *(volatile uint64*)v = *((volatile uint64*)(addr)); + if (*(volatile uint64*)v == 0xffffffffffffffff) + poll_timeout = TRUE; + break; + } + + if (osh && osh->sih && (in_si_clear == FALSE) && poll_timeout && osh->bpt_cb) { + in_si_clear = TRUE; + osh->bpt_cb((void *)osh->sih, (void *)addr); + in_si_clear = FALSE; + } +} +#endif /* BCM_BACKPLANE_TIMEOUT */ + +#ifdef BCM_SECURE_DMA +static void * +osl_sec_dma_ioremap(osl_t *osh, struct page *page, size_t size, bool iscache, bool isdecr) +{ + + struct page **map; + int order, i; + void *addr = NULL; + + size = PAGE_ALIGN(size); + order = get_order(size); + + map = kmalloc(sizeof(struct page *) << order, GFP_ATOMIC); + + if (map == NULL) + return NULL; + + for (i = 0; i < (size >> PAGE_SHIFT); i++) + map[i] = page + i; + + if (iscache) { + addr = vmap(map, size >> PAGE_SHIFT, VM_MAP, __pgprot(PAGE_KERNEL)); + if (isdecr) { + osh->contig_delta_va_pa = ((uint8 *)addr - page_to_phys(page)); + } + } else { + +#if defined(__ARM_ARCH_7A__) + addr = vmap(map, size >> PAGE_SHIFT, VM_MAP, + pgprot_noncached(__pgprot(PAGE_KERNEL))); +#endif // endif + if (isdecr) { + osh->contig_delta_va_pa = ((uint8 *)addr - page_to_phys(page)); + } + } + + kfree(map); + return (void *)addr; +} + +static void +osl_sec_dma_iounmap(osl_t *osh, void *contig_base_va, size_t size) +{ + vunmap(contig_base_va); +} + +static int +osl_sec_dma_init_elem_mem_block(osl_t *osh, size_t mbsize, int max, sec_mem_elem_t **list) +{ + int i; + int ret = BCME_OK; + sec_mem_elem_t *sec_mem_elem; + + if ((sec_mem_elem = kmalloc(sizeof(sec_mem_elem_t)*(max), GFP_ATOMIC)) != NULL) { + + *list = sec_mem_elem; + bzero(sec_mem_elem, sizeof(sec_mem_elem_t)*(max)); + for (i = 0; i < max-1; i++) { + sec_mem_elem->next = (sec_mem_elem + 1); + sec_mem_elem->size = mbsize; + sec_mem_elem->pa_cma = osh->contig_base_alloc; + sec_mem_elem->vac = osh->contig_base_alloc_va; + + sec_mem_elem->pa_cma_page = phys_to_page(sec_mem_elem->pa_cma); + osh->contig_base_alloc += mbsize; + osh->contig_base_alloc_va = ((uint8 *)osh->contig_base_alloc_va + mbsize); + + sec_mem_elem = sec_mem_elem + 1; + } + sec_mem_elem->next = NULL; + sec_mem_elem->size = mbsize; + sec_mem_elem->pa_cma = osh->contig_base_alloc; + sec_mem_elem->vac = osh->contig_base_alloc_va; + + sec_mem_elem->pa_cma_page = phys_to_page(sec_mem_elem->pa_cma); + osh->contig_base_alloc += mbsize; + osh->contig_base_alloc_va = ((uint8 *)osh->contig_base_alloc_va + mbsize); + + } else { + printf("%s sec mem elem kmalloc failed\n", __FUNCTION__); + ret = BCME_ERROR; + } + return ret; +} + +static void +osl_sec_dma_deinit_elem_mem_block(osl_t *osh, size_t mbsize, int max, void *sec_list_base) +{ + if (sec_list_base) + kfree(sec_list_base); +} + +static sec_mem_elem_t * BCMFASTPATH +osl_sec_dma_alloc_mem_elem(osl_t *osh, void *va, uint size, int direction, + struct sec_cma_info *ptr_cma_info, uint offset) +{ + sec_mem_elem_t *sec_mem_elem = NULL; + +#ifdef NOT_YET + if (size <= 512 && osh->sec_list_512) { + sec_mem_elem = osh->sec_list_512; + osh->sec_list_512 = sec_mem_elem->next; + } + else if (size <= 2048 && osh->sec_list_2048) { + sec_mem_elem = osh->sec_list_2048; + osh->sec_list_2048 = sec_mem_elem->next; + } + else +#else + ASSERT(osh->sec_list_4096); + sec_mem_elem = osh->sec_list_4096; + osh->sec_list_4096 = sec_mem_elem->next; +#endif /* NOT_YET */ + + sec_mem_elem->next = NULL; + + if (ptr_cma_info->sec_alloc_list_tail) { + ptr_cma_info->sec_alloc_list_tail->next = sec_mem_elem; + ptr_cma_info->sec_alloc_list_tail = sec_mem_elem; + } + else { + /* First allocation: If tail is NULL, sec_alloc_list MUST also be NULL */ + ASSERT(ptr_cma_info->sec_alloc_list == NULL); + ptr_cma_info->sec_alloc_list = sec_mem_elem; + ptr_cma_info->sec_alloc_list_tail = sec_mem_elem; + } + return sec_mem_elem; +} + +static void BCMFASTPATH +osl_sec_dma_free_mem_elem(osl_t *osh, sec_mem_elem_t *sec_mem_elem) +{ + sec_mem_elem->dma_handle = 0x0; + sec_mem_elem->va = NULL; +#ifdef NOT_YET + if (sec_mem_elem->size == 512) { + sec_mem_elem->next = osh->sec_list_512; + osh->sec_list_512 = sec_mem_elem; + } else if (sec_mem_elem->size == 2048) { + sec_mem_elem->next = osh->sec_list_2048; + osh->sec_list_2048 = sec_mem_elem; + } else if (sec_mem_elem->size == 4096) { +#endif /* NOT_YET */ + sec_mem_elem->next = osh->sec_list_4096; + osh->sec_list_4096 = sec_mem_elem; +#ifdef NOT_YET + } + else + printf("%s free failed size=%d\n", __FUNCTION__, sec_mem_elem->size); +#endif /* NOT_YET */ +} + +static sec_mem_elem_t * BCMFASTPATH +osl_sec_dma_find_rem_elem(osl_t *osh, struct sec_cma_info *ptr_cma_info, dma_addr_t dma_handle) +{ + sec_mem_elem_t *sec_mem_elem = ptr_cma_info->sec_alloc_list; + sec_mem_elem_t *sec_prv_elem = ptr_cma_info->sec_alloc_list; + + if (sec_mem_elem->dma_handle == dma_handle) { + + ptr_cma_info->sec_alloc_list = sec_mem_elem->next; + + if (sec_mem_elem == ptr_cma_info->sec_alloc_list_tail) { + ptr_cma_info->sec_alloc_list_tail = NULL; + ASSERT(ptr_cma_info->sec_alloc_list == NULL); + } + + return sec_mem_elem; + } + sec_mem_elem = sec_mem_elem->next; + + while (sec_mem_elem != NULL) { + + if (sec_mem_elem->dma_handle == dma_handle) { + + sec_prv_elem->next = sec_mem_elem->next; + if (sec_mem_elem == ptr_cma_info->sec_alloc_list_tail) + ptr_cma_info->sec_alloc_list_tail = sec_prv_elem; + + return sec_mem_elem; + } + sec_prv_elem = sec_mem_elem; + sec_mem_elem = sec_mem_elem->next; + } + return NULL; +} + +static sec_mem_elem_t * +osl_sec_dma_rem_first_elem(osl_t *osh, struct sec_cma_info *ptr_cma_info) +{ + sec_mem_elem_t *sec_mem_elem = ptr_cma_info->sec_alloc_list; + + if (sec_mem_elem) { + + ptr_cma_info->sec_alloc_list = sec_mem_elem->next; + + if (ptr_cma_info->sec_alloc_list == NULL) + ptr_cma_info->sec_alloc_list_tail = NULL; + + return sec_mem_elem; + + } else + return NULL; +} + +static void * BCMFASTPATH +osl_sec_dma_last_elem(osl_t *osh, struct sec_cma_info *ptr_cma_info) +{ + return ptr_cma_info->sec_alloc_list_tail; +} + +dma_addr_t BCMFASTPATH +osl_sec_dma_map_txmeta(osl_t *osh, void *va, uint size, int direction, void *p, + hnddma_seg_map_t *dmah, void *ptr_cma_info) +{ + sec_mem_elem_t *sec_mem_elem; + struct page *pa_cma_page; + uint loffset; + void *vaorig = ((uint8 *)va + size); + dma_addr_t dma_handle = 0x0; + /* packet will be the one added with osl_sec_dma_map() just before this call */ + + sec_mem_elem = osl_sec_dma_last_elem(osh, ptr_cma_info); + + if (sec_mem_elem && sec_mem_elem->va == vaorig) { + + pa_cma_page = phys_to_page(sec_mem_elem->pa_cma); + loffset = sec_mem_elem->pa_cma -(sec_mem_elem->pa_cma & ~(PAGE_SIZE-1)); + + dma_handle = dma_map_page(OSH_NULL, pa_cma_page, loffset, size, + (direction == DMA_TX ? DMA_TO_DEVICE:DMA_FROM_DEVICE)); + + } else { + printf("%s: error orig va not found va = 0x%p \n", + __FUNCTION__, vaorig); + } + return dma_handle; +} + +dma_addr_t BCMFASTPATH +osl_sec_dma_map(osl_t *osh, void *va, uint size, int direction, void *p, + hnddma_seg_map_t *dmah, void *ptr_cma_info, uint offset) +{ + + sec_mem_elem_t *sec_mem_elem; + struct page *pa_cma_page; + void *pa_cma_kmap_va = NULL; + uint buflen = 0; + dma_addr_t dma_handle = 0x0; + uint loffset; +#ifdef NOT_YET + int *fragva; + struct sk_buff *skb; + int i = 0; +#endif /* NOT_YET */ + + ASSERT((direction == DMA_RX) || (direction == DMA_TX)); + sec_mem_elem = osl_sec_dma_alloc_mem_elem(osh, va, size, direction, ptr_cma_info, offset); + + sec_mem_elem->va = va; + sec_mem_elem->direction = direction; + pa_cma_page = sec_mem_elem->pa_cma_page; + + loffset = sec_mem_elem->pa_cma -(sec_mem_elem->pa_cma & ~(PAGE_SIZE-1)); + /* pa_cma_kmap_va = kmap_atomic(pa_cma_page); + * pa_cma_kmap_va += loffset; + */ + + pa_cma_kmap_va = sec_mem_elem->vac; + pa_cma_kmap_va = ((uint8 *)pa_cma_kmap_va + offset); + buflen = size; + + if (direction == DMA_TX) { + memcpy((uint8*)pa_cma_kmap_va+offset, va, size); + +#ifdef NOT_YET + if (p == NULL) { + + memcpy(pa_cma_kmap_va, va, size); + /* prhex("Txpkt",pa_cma_kmap_va, size); */ + } else { + for (skb = (struct sk_buff *)p; skb != NULL; skb = PKTNEXT(osh, skb)) { + if (skb_is_nonlinear(skb)) { + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + skb_frag_t *f = &skb_shinfo(skb)->frags[i]; + fragva = kmap_atomic(skb_frag_page(f)); + pa_cma_kmap_va = ((uint8 *)pa_cma_kmap_va + buflen); + memcpy((pa_cma_kmap_va), + (fragva + f->page_offset), skb_frag_size(f)); + kunmap_atomic(fragva); + buflen += skb_frag_size(f); + } + } else { + + pa_cma_kmap_va = ((uint8 *)pa_cma_kmap_va + buflen); + memcpy(pa_cma_kmap_va, skb->data, skb->len); + buflen += skb->len; + } + } + + } +#endif /* NOT_YET */ + if (dmah) { + dmah->nsegs = 1; + dmah->origsize = buflen; + } + } + else + { + if ((p != NULL) && (dmah != NULL)) { + dmah->nsegs = 1; + dmah->origsize = buflen; + } + *(uint32 *)(pa_cma_kmap_va) = 0x0; + } + + if (direction == DMA_RX) { + flush_kernel_vmap_range(pa_cma_kmap_va, sizeof(int)); + } + dma_handle = dma_map_page(OSH_NULL, pa_cma_page, loffset+offset, buflen, + (direction == DMA_TX ? DMA_TO_DEVICE:DMA_FROM_DEVICE)); + if (dmah) { + dmah->segs[0].addr = dma_handle; + dmah->segs[0].length = buflen; + } + sec_mem_elem->dma_handle = dma_handle; + /* kunmap_atomic(pa_cma_kmap_va-loffset); */ + return dma_handle; +} + +dma_addr_t BCMFASTPATH +osl_sec_dma_dd_map(osl_t *osh, void *va, uint size, int direction, void *p, hnddma_seg_map_t *map) +{ + + struct page *pa_cma_page; + phys_addr_t pa_cma; + dma_addr_t dma_handle = 0x0; + uint loffset; + + pa_cma = ((uint8 *)va - (uint8 *)osh->contig_delta_va_pa); + pa_cma_page = phys_to_page(pa_cma); + loffset = pa_cma -(pa_cma & ~(PAGE_SIZE-1)); + + dma_handle = dma_map_page(OSH_NULL, pa_cma_page, loffset, size, + (direction == DMA_TX ? DMA_TO_DEVICE:DMA_FROM_DEVICE)); + + return dma_handle; +} + +void BCMFASTPATH +osl_sec_dma_unmap(osl_t *osh, dma_addr_t dma_handle, uint size, int direction, +void *p, hnddma_seg_map_t *map, void *ptr_cma_info, uint offset) +{ + sec_mem_elem_t *sec_mem_elem; +#ifdef NOT_YET + struct page *pa_cma_page; +#endif // endif + void *pa_cma_kmap_va = NULL; + uint buflen = 0; + dma_addr_t pa_cma; + void *va; + int read_count = 0; + BCM_REFERENCE(buflen); + BCM_REFERENCE(read_count); + + sec_mem_elem = osl_sec_dma_find_rem_elem(osh, ptr_cma_info, dma_handle); + ASSERT(sec_mem_elem); + + va = sec_mem_elem->va; + va = (uint8 *)va - offset; + pa_cma = sec_mem_elem->pa_cma; + +#ifdef NOT_YET + pa_cma_page = sec_mem_elem->pa_cma_page; +#endif // endif + + if (direction == DMA_RX) { + + if (p == NULL) { + + /* pa_cma_kmap_va = kmap_atomic(pa_cma_page); + * pa_cma_kmap_va += loffset; + */ + + pa_cma_kmap_va = sec_mem_elem->vac; + + do { + invalidate_kernel_vmap_range(pa_cma_kmap_va, sizeof(int)); + + buflen = *(uint *)(pa_cma_kmap_va); + if (buflen) + break; + + OSL_DELAY(1); + read_count++; + } while (read_count < 200); + dma_unmap_page(OSH_NULL, pa_cma, size, DMA_FROM_DEVICE); + memcpy(va, pa_cma_kmap_va, size); + /* kunmap_atomic(pa_cma_kmap_va); */ + } +#ifdef NOT_YET + else { + buflen = 0; + for (skb = (struct sk_buff *)p; (buflen < size) && + (skb != NULL); skb = skb->next) { + if (skb_is_nonlinear(skb)) { + pa_cma_kmap_va = kmap_atomic(pa_cma_page); + for (i = 0; (buflen < size) && + (i < skb_shinfo(skb)->nr_frags); i++) { + skb_frag_t *f = &skb_shinfo(skb)->frags[i]; + cpuaddr = kmap_atomic(skb_frag_page(f)); + pa_cma_kmap_va = ((uint8 *)pa_cma_kmap_va + buflen); + memcpy((cpuaddr + f->page_offset), + pa_cma_kmap_va, skb_frag_size(f)); + kunmap_atomic(cpuaddr); + buflen += skb_frag_size(f); + } + kunmap_atomic(pa_cma_kmap_va); + } else { + pa_cma_kmap_va = kmap_atomic(pa_cma_page); + pa_cma_kmap_va = ((uint8 *)pa_cma_kmap_va + buflen); + memcpy(skb->data, pa_cma_kmap_va, skb->len); + kunmap_atomic(pa_cma_kmap_va); + buflen += skb->len; + } + + } + + } +#endif /* NOT YET */ + } else { + dma_unmap_page(OSH_NULL, pa_cma, size+offset, DMA_TO_DEVICE); + } + + osl_sec_dma_free_mem_elem(osh, sec_mem_elem); +} + +void +osl_sec_dma_unmap_all(osl_t *osh, void *ptr_cma_info) +{ + + sec_mem_elem_t *sec_mem_elem; + + sec_mem_elem = osl_sec_dma_rem_first_elem(osh, ptr_cma_info); + + while (sec_mem_elem != NULL) { + + dma_unmap_page(OSH_NULL, sec_mem_elem->pa_cma, sec_mem_elem->size, + sec_mem_elem->direction == DMA_TX ? DMA_TO_DEVICE : DMA_FROM_DEVICE); + osl_sec_dma_free_mem_elem(osh, sec_mem_elem); + + sec_mem_elem = osl_sec_dma_rem_first_elem(osh, ptr_cma_info); + } +} + +static void +osl_sec_dma_init_consistent(osl_t *osh) +{ + int i; + void *temp_va = osh->contig_base_alloc_coherent_va; + phys_addr_t temp_pa = osh->contig_base_alloc_coherent; + + for (i = 0; i < SEC_CMA_COHERENT_MAX; i++) { + osh->sec_cma_coherent[i].avail = TRUE; + osh->sec_cma_coherent[i].va = temp_va; + osh->sec_cma_coherent[i].pa = temp_pa; + temp_va = ((uint8 *)temp_va)+SEC_CMA_COHERENT_BLK; + temp_pa += SEC_CMA_COHERENT_BLK; + } +} + +static void * +osl_sec_dma_alloc_consistent(osl_t *osh, uint size, uint16 align_bits, ulong *pap) +{ + + void *temp_va = NULL; + ulong temp_pa = 0; + int i; + + if (size > SEC_CMA_COHERENT_BLK) { + printf("%s unsupported size\n", __FUNCTION__); + return NULL; + } + + for (i = 0; i < SEC_CMA_COHERENT_MAX; i++) { + if (osh->sec_cma_coherent[i].avail == TRUE) { + temp_va = osh->sec_cma_coherent[i].va; + temp_pa = osh->sec_cma_coherent[i].pa; + osh->sec_cma_coherent[i].avail = FALSE; + break; + } + } + + if (i == SEC_CMA_COHERENT_MAX) + printf("%s:No coherent mem: va = 0x%p pa = 0x%lx size = %d\n", __FUNCTION__, + temp_va, (ulong)temp_pa, size); + + *pap = (unsigned long)temp_pa; + return temp_va; +} + +static void +osl_sec_dma_free_consistent(osl_t *osh, void *va, uint size, dmaaddr_t pa) +{ + int i = 0; + + for (i = 0; i < SEC_CMA_COHERENT_MAX; i++) { + if (osh->sec_cma_coherent[i].va == va) { + osh->sec_cma_coherent[i].avail = TRUE; + break; + } + } + if (i == SEC_CMA_COHERENT_MAX) + printf("%s:Error: va = 0x%p pa = 0x%lx size = %d\n", __FUNCTION__, + va, (ulong)pa, size); +} +#endif /* BCM_SECURE_DMA */ + +/* timer apis */ +/* Note: All timer api's are thread unsafe and should be protected with locks by caller */ + +#ifdef REPORT_FATAL_TIMEOUTS +osl_timer_t * +osl_timer_init(osl_t *osh, const char *name, void (*fn)(void *arg), void *arg) +{ + osl_timer_t *t; + BCM_REFERENCE(fn); + if ((t = MALLOCZ(NULL, sizeof(osl_timer_t))) == NULL) { + printk(KERN_ERR "osl_timer_init: out of memory, malloced %d bytes\n", + (int)sizeof(osl_timer_t)); + return (NULL); + } + bzero(t, sizeof(osl_timer_t)); + if ((t->timer = MALLOCZ(NULL, sizeof(struct timer_list))) == NULL) { + printf("osl_timer_init: malloc failed\n"); + MFREE(NULL, t, sizeof(osl_timer_t)); + return (NULL); + } + t->timer->data = (ulong)arg; + t->timer->function = (linux_timer_fn)fn; + t->set = TRUE; + + init_timer(t->timer); + + return (t); +} + +void +osl_timer_add(osl_t *osh, osl_timer_t *t, uint32 ms, bool periodic) +{ + if (t == NULL) { + printf("%s: Timer handle is NULL\n", __FUNCTION__); + return; + } + ASSERT(!t->set); + + t->set = TRUE; + if (periodic) { + printf("Periodic timers are not supported by Linux timer apis\n"); + } + t->timer->expires = jiffies + ms*HZ/1000; + + add_timer(t->timer); + + return; +} + +void +osl_timer_update(osl_t *osh, osl_timer_t *t, uint32 ms, bool periodic) +{ + if (t == NULL) { + printf("%s: Timer handle is NULL\n", __FUNCTION__); + return; + } + if (periodic) { + printf("Periodic timers are not supported by Linux timer apis\n"); + } + t->set = TRUE; + t->timer->expires = jiffies + ms*HZ/1000; + + mod_timer(t->timer, t->timer->expires); + + return; +} + +/* + * Return TRUE if timer successfully deleted, FALSE if still pending + */ +bool +osl_timer_del(osl_t *osh, osl_timer_t *t) +{ + if (t == NULL) { + printf("%s: Timer handle is NULL\n", __FUNCTION__); + return (FALSE); + } + if (t->set) { + t->set = FALSE; + if (t->timer) { + del_timer(t->timer); + MFREE(NULL, t->timer, sizeof(struct timer_list)); + } + MFREE(NULL, t, sizeof(osl_timer_t)); + } + return (TRUE); +} +#endif diff --git a/bcmdhd.100.10.315.x/linux_osl_priv.h b/bcmdhd.100.10.315.x/linux_osl_priv.h new file mode 100644 index 0000000..0b602cd --- /dev/null +++ b/bcmdhd.100.10.315.x/linux_osl_priv.h @@ -0,0 +1,179 @@ +/* + * Private header file for Linux OS Independent Layer + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: linux_osl_priv.h 737887 2017-12-23 12:15:26Z $ + */ + +#ifndef _LINUX_OSL_PRIV_H_ +#define _LINUX_OSL_PRIV_H_ + +#define OS_HANDLE_MAGIC 0x1234abcd /* Magic # to recognize osh */ +#define BCM_MEM_FILENAME_LEN 24 /* Mem. filename length */ + +/* dependancy check */ +#if !defined(BCMPCIE) && defined(DHD_USE_STATIC_CTRLBUF) +#error "DHD_USE_STATIC_CTRLBUF suppored PCIE target only" +#endif /* !BCMPCIE && DHD_USE_STATIC_CTRLBUF */ + +#ifdef CONFIG_DHD_USE_STATIC_BUF +#ifdef DHD_USE_STATIC_CTRLBUF +#define DHD_SKB_1PAGE_BUFSIZE (PAGE_SIZE*1) +#define DHD_SKB_2PAGE_BUFSIZE (PAGE_SIZE*2) +#define DHD_SKB_4PAGE_BUFSIZE (PAGE_SIZE*4) + +#define PREALLOC_FREE_MAGIC 0xFEDC +#define PREALLOC_USED_MAGIC 0xFCDE +#else +#define DHD_SKB_HDRSIZE 336 +#define DHD_SKB_1PAGE_BUFSIZE ((PAGE_SIZE*1)-DHD_SKB_HDRSIZE) +#define DHD_SKB_2PAGE_BUFSIZE ((PAGE_SIZE*2)-DHD_SKB_HDRSIZE) +#define DHD_SKB_4PAGE_BUFSIZE ((PAGE_SIZE*4)-DHD_SKB_HDRSIZE) +#endif /* DHD_USE_STATIC_CTRLBUF */ + +#define STATIC_BUF_MAX_NUM 16 +#define STATIC_BUF_SIZE (PAGE_SIZE*2) +#define STATIC_BUF_TOTAL_LEN (STATIC_BUF_MAX_NUM * STATIC_BUF_SIZE) + +typedef struct bcm_static_buf { + spinlock_t static_lock; + unsigned char *buf_ptr; + unsigned char buf_use[STATIC_BUF_MAX_NUM]; +} bcm_static_buf_t; + +extern bcm_static_buf_t *bcm_static_buf; + +#ifdef DHD_USE_STATIC_CTRLBUF +#define STATIC_PKT_4PAGE_NUM 0 +#define DHD_SKB_MAX_BUFSIZE DHD_SKB_2PAGE_BUFSIZE +#elif defined(ENHANCED_STATIC_BUF) +#define STATIC_PKT_4PAGE_NUM 1 +#define DHD_SKB_MAX_BUFSIZE DHD_SKB_4PAGE_BUFSIZE +#else +#define STATIC_PKT_4PAGE_NUM 0 +#define DHD_SKB_MAX_BUFSIZE DHD_SKB_2PAGE_BUFSIZE +#endif /* DHD_USE_STATIC_CTRLBUF */ + +#ifdef DHD_USE_STATIC_CTRLBUF +#define STATIC_PKT_1PAGE_NUM 0 +#define STATIC_PKT_2PAGE_NUM 128 +#else +#define STATIC_PKT_1PAGE_NUM 8 +#define STATIC_PKT_2PAGE_NUM 8 +#endif /* DHD_USE_STATIC_CTRLBUF */ + +#define STATIC_PKT_1_2PAGE_NUM \ + ((STATIC_PKT_1PAGE_NUM) + (STATIC_PKT_2PAGE_NUM)) +#define STATIC_PKT_MAX_NUM \ + ((STATIC_PKT_1_2PAGE_NUM) + (STATIC_PKT_4PAGE_NUM)) + +typedef struct bcm_static_pkt { +#ifdef DHD_USE_STATIC_CTRLBUF + struct sk_buff *skb_8k[STATIC_PKT_2PAGE_NUM]; + unsigned char pkt_invalid[STATIC_PKT_2PAGE_NUM]; + spinlock_t osl_pkt_lock; + uint32 last_allocated_index; +#else + struct sk_buff *skb_4k[STATIC_PKT_1PAGE_NUM]; + struct sk_buff *skb_8k[STATIC_PKT_2PAGE_NUM]; +#ifdef ENHANCED_STATIC_BUF + struct sk_buff *skb_16k; +#endif /* ENHANCED_STATIC_BUF */ + struct semaphore osl_pkt_sem; +#endif /* DHD_USE_STATIC_CTRLBUF */ + unsigned char pkt_use[STATIC_PKT_MAX_NUM]; +} bcm_static_pkt_t; + +extern bcm_static_pkt_t *bcm_static_skb; +#endif /* CONFIG_DHD_USE_STATIC_BUF */ + +typedef struct bcm_mem_link { + struct bcm_mem_link *prev; + struct bcm_mem_link *next; + uint size; + int line; + void *osh; + char file[BCM_MEM_FILENAME_LEN]; +} bcm_mem_link_t; + +struct osl_cmn_info { + atomic_t malloced; + atomic_t pktalloced; /* Number of allocated packet buffers */ + spinlock_t dbgmem_lock; + bcm_mem_link_t *dbgmem_list; + bcm_mem_link_t *dbgvmem_list; + spinlock_t pktalloc_lock; + atomic_t refcount; /* Number of references to this shared structure. */ +}; +typedef struct osl_cmn_info osl_cmn_t; + +#if defined(BCM_BACKPLANE_TIMEOUT) +typedef uint32 (*bpt_cb_fn)(void *ctx, void *addr); +#endif /* BCM_BACKPLANE_TIMEOUT */ + +struct osl_info { + osl_pubinfo_t pub; + uint32 flags; /* If specific cases to be handled in the OSL */ + uint magic; + void *pdev; + uint failed; + uint bustype; + osl_cmn_t *cmn; /* Common OSL related data shred between two OSH's */ + + void *bus_handle; +#ifdef BCM_SECURE_DMA +#ifdef NOT_YET + struct sec_mem_elem *sec_list_512; + struct sec_mem_elem *sec_list_base_512; + struct sec_mem_elem *sec_list_2048; + struct sec_mem_elem *sec_list_base_2048; +#endif /* NOT_YET */ + struct sec_mem_elem *sec_list_4096; + struct sec_mem_elem *sec_list_base_4096; + phys_addr_t contig_base; + void *contig_base_va; + phys_addr_t contig_base_alloc; + void *contig_base_alloc_va; + phys_addr_t contig_base_alloc_coherent; + void *contig_base_alloc_coherent_va; + void *contig_base_coherent_va; + void *contig_delta_va_pa; + struct { + phys_addr_t pa; + void *va; + bool avail; + } sec_cma_coherent[SEC_CMA_COHERENT_MAX]; + int stb_ext_params; +#endif /* BCM_SECURE_DMA */ +#if defined(BCM_BACKPLANE_TIMEOUT) + bpt_cb_fn bpt_cb; + void *sih; +#endif /* BCM_BACKPLANE_TIMEOUT */ +#ifdef USE_DMA_LOCK + spinlock_t dma_lock; +#endif /* USE_DMA_LOCK */ +}; + +#endif /* _LINUX_OSL_PRIV_H_ */ diff --git a/bcmdhd.100.10.315.x/linux_pkt.c b/bcmdhd.100.10.315.x/linux_pkt.c new file mode 100644 index 0000000..15891fd --- /dev/null +++ b/bcmdhd.100.10.315.x/linux_pkt.c @@ -0,0 +1,623 @@ +/* + * Linux Packet (skb) interface + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: linux_pkt.c 769682 2018-06-27 07:29:55Z $ + */ + +#include +#include +#include +#include + +#include + +#include +#include +#include + +#include +#include "linux_osl_priv.h" + +#ifdef CONFIG_DHD_USE_STATIC_BUF + +bcm_static_buf_t *bcm_static_buf = 0; +bcm_static_pkt_t *bcm_static_skb = 0; + +void* wifi_platform_prealloc(void *adapter, int section, unsigned long size); +#endif /* CONFIG_DHD_USE_STATIC_BUF */ + +#ifdef BCM_OBJECT_TRACE +/* don't clear the first 4 byte that is the pkt sn */ +#define OSL_PKTTAG_CLEAR(p) \ +do { \ + struct sk_buff *s = (struct sk_buff *)(p); \ + uint tagsz = sizeof(s->cb); \ + ASSERT(OSL_PKTTAG_SZ <= tagsz); \ + memset(s->cb + 4, 0, tagsz - 4); \ +} while (0) +#else +#define OSL_PKTTAG_CLEAR(p) \ +do { \ + struct sk_buff *s = (struct sk_buff *)(p); \ + uint tagsz = sizeof(s->cb); \ + ASSERT(OSL_PKTTAG_SZ <= tagsz); \ + memset(s->cb, 0, tagsz); \ +} while (0) +#endif /* BCM_OBJECT_TRACE */ + +int osl_static_mem_init(osl_t *osh, void *adapter) +{ +#ifdef CONFIG_DHD_USE_STATIC_BUF + if (!bcm_static_buf && adapter) { + if (!(bcm_static_buf = (bcm_static_buf_t *)wifi_platform_prealloc(adapter, + 3, STATIC_BUF_SIZE + STATIC_BUF_TOTAL_LEN))) { + printk("can not alloc static buf!\n"); + bcm_static_skb = NULL; + ASSERT(osh->magic == OS_HANDLE_MAGIC); + return -ENOMEM; + } else { + printk("succeed to alloc static buf\n"); + } + + spin_lock_init(&bcm_static_buf->static_lock); + + bcm_static_buf->buf_ptr = (unsigned char *)bcm_static_buf + STATIC_BUF_SIZE; + } + +#if defined(BCMSDIO) || defined(DHD_USE_STATIC_CTRLBUF) + if (!bcm_static_skb && adapter) { + int i; + void *skb_buff_ptr = 0; + bcm_static_skb = (bcm_static_pkt_t *)((char *)bcm_static_buf + 2048); + skb_buff_ptr = wifi_platform_prealloc(adapter, 4, 0); + if (!skb_buff_ptr) { + printk("cannot alloc static buf!\n"); + bcm_static_buf = NULL; + bcm_static_skb = NULL; + ASSERT(osh->magic == OS_HANDLE_MAGIC); + return -ENOMEM; + } + + bcopy(skb_buff_ptr, bcm_static_skb, sizeof(struct sk_buff *) * + (STATIC_PKT_MAX_NUM)); + for (i = 0; i < STATIC_PKT_MAX_NUM; i++) { + bcm_static_skb->pkt_use[i] = 0; + } + +#ifdef DHD_USE_STATIC_CTRLBUF + spin_lock_init(&bcm_static_skb->osl_pkt_lock); + bcm_static_skb->last_allocated_index = 0; +#else + sema_init(&bcm_static_skb->osl_pkt_sem, 1); +#endif /* DHD_USE_STATIC_CTRLBUF */ + } +#endif /* BCMSDIO || DHD_USE_STATIC_CTRLBUF */ +#endif /* CONFIG_DHD_USE_STATIC_BUF */ + + return 0; +} + +int osl_static_mem_deinit(osl_t *osh, void *adapter) +{ +#ifdef CONFIG_DHD_USE_STATIC_BUF + if (bcm_static_buf) { + bcm_static_buf = 0; + } +#ifdef BCMSDIO + if (bcm_static_skb) { + bcm_static_skb = 0; + } +#endif /* BCMSDIO */ +#endif /* CONFIG_DHD_USE_STATIC_BUF */ + return 0; +} + +/* + * To avoid ACP latency, a fwder buf will be sent directly to DDR using + * DDR aliasing into non-ACP address space. Such Fwder buffers must be + * explicitly managed from a coherency perspective. + */ +static inline void BCMFASTPATH +osl_fwderbuf_reset(osl_t *osh, struct sk_buff *skb) +{ +} + +static struct sk_buff * BCMFASTPATH +osl_alloc_skb(osl_t *osh, unsigned int len) +{ + struct sk_buff *skb; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25) + gfp_t flags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL; +#ifdef DHD_USE_ATOMIC_PKTGET + flags = GFP_ATOMIC; +#endif /* DHD_USE_ATOMIC_PKTGET */ + skb = __dev_alloc_skb(len, flags); +#else + skb = dev_alloc_skb(len); +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25) */ + + return skb; +} + +/* Convert a driver packet to native(OS) packet + * In the process, packettag is zeroed out before sending up + * IP code depends on skb->cb to be setup correctly with various options + * In our case, that means it should be 0 + */ +struct sk_buff * BCMFASTPATH +osl_pkt_tonative(osl_t *osh, void *pkt) +{ + struct sk_buff *nskb; + + if (osh->pub.pkttag) + OSL_PKTTAG_CLEAR(pkt); + + /* Decrement the packet counter */ + for (nskb = (struct sk_buff *)pkt; nskb; nskb = nskb->next) { + atomic_sub(PKTISCHAINED(nskb) ? PKTCCNT(nskb) : 1, &osh->cmn->pktalloced); + + } + return (struct sk_buff *)pkt; +} + +/* Convert a native(OS) packet to driver packet. + * In the process, native packet is destroyed, there is no copying + * Also, a packettag is zeroed out + */ +void * BCMFASTPATH +osl_pkt_frmnative(osl_t *osh, void *pkt) +{ + struct sk_buff *cskb; + struct sk_buff *nskb; + unsigned long pktalloced = 0; + + if (osh->pub.pkttag) + OSL_PKTTAG_CLEAR(pkt); + + /* walk the PKTCLINK() list */ + for (cskb = (struct sk_buff *)pkt; + cskb != NULL; + cskb = PKTISCHAINED(cskb) ? PKTCLINK(cskb) : NULL) { + + /* walk the pkt buffer list */ + for (nskb = cskb; nskb; nskb = nskb->next) { + + /* Increment the packet counter */ + pktalloced++; + + /* clean the 'prev' pointer + * Kernel 3.18 is leaving skb->prev pointer set to skb + * to indicate a non-fragmented skb + */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) + nskb->prev = NULL; +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0) */ + + } + } + + /* Increment the packet counter */ + atomic_add(pktalloced, &osh->cmn->pktalloced); + + return (void *)pkt; +} + +/* Return a new packet. zero out pkttag */ +void * BCMFASTPATH +#ifdef BCM_OBJECT_TRACE +linux_pktget(osl_t *osh, uint len, int line, const char *caller) +#else +linux_pktget(osl_t *osh, uint len) +#endif /* BCM_OBJECT_TRACE */ +{ + struct sk_buff *skb; + uchar num = 0; + if (lmtest != FALSE) { + get_random_bytes(&num, sizeof(uchar)); + if ((num + 1) <= (256 * lmtest / 100)) + return NULL; + } + + if ((skb = osl_alloc_skb(osh, len))) { + skb->tail += len; + skb->len += len; + skb->priority = 0; + + atomic_inc(&osh->cmn->pktalloced); +#ifdef BCM_OBJECT_TRACE + bcm_object_trace_opr(skb, BCM_OBJDBG_ADD_PKT, caller, line); +#endif /* BCM_OBJECT_TRACE */ + } + + return ((void*) skb); +} + +/* Free the driver packet. Free the tag if present */ +void BCMFASTPATH +#ifdef BCM_OBJECT_TRACE +linux_pktfree(osl_t *osh, void *p, bool send, int line, const char *caller) +#else +linux_pktfree(osl_t *osh, void *p, bool send) +#endif /* BCM_OBJECT_TRACE */ +{ + struct sk_buff *skb, *nskb; + if (osh == NULL) + return; + + skb = (struct sk_buff*) p; + + if (send) { + if (osh->pub.tx_fn) { + osh->pub.tx_fn(osh->pub.tx_ctx, p, 0); + } + } else { + if (osh->pub.rx_fn) { + osh->pub.rx_fn(osh->pub.rx_ctx, p); + } + } + + PKTDBG_TRACE(osh, (void *) skb, PKTLIST_PKTFREE); + +#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_CTRLBUF) + if (skb && (skb->mac_len == PREALLOC_USED_MAGIC)) { + printk("%s: pkt %p is from static pool\n", + __FUNCTION__, p); + dump_stack(); + return; + } + + if (skb && (skb->mac_len == PREALLOC_FREE_MAGIC)) { + printk("%s: pkt %p is from static pool and not in used\n", + __FUNCTION__, p); + dump_stack(); + return; + } +#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_CTRLBUF */ + + /* perversion: we use skb->next to chain multi-skb packets */ + while (skb) { + nskb = skb->next; + skb->next = NULL; + +#ifdef BCM_OBJECT_TRACE + bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, caller, line); +#endif /* BCM_OBJECT_TRACE */ + + { + if (skb->destructor) { + /* cannot kfree_skb() on hard IRQ (net/core/skbuff.c) if + * destructor exists + */ + dev_kfree_skb_any(skb); + } else { + /* can free immediately (even in_irq()) if destructor + * does not exist + */ + dev_kfree_skb(skb); + } + } + atomic_dec(&osh->cmn->pktalloced); + skb = nskb; + } +} + +#ifdef CONFIG_DHD_USE_STATIC_BUF +void* +osl_pktget_static(osl_t *osh, uint len) +{ + int i = 0; + struct sk_buff *skb; +#ifdef DHD_USE_STATIC_CTRLBUF + unsigned long flags; +#endif /* DHD_USE_STATIC_CTRLBUF */ + + if (!bcm_static_skb) + return linux_pktget(osh, len); + + if (len > DHD_SKB_MAX_BUFSIZE) { + printk("%s: attempt to allocate huge packet (0x%x)\n", __FUNCTION__, len); + return linux_pktget(osh, len); + } + +#ifdef DHD_USE_STATIC_CTRLBUF + spin_lock_irqsave(&bcm_static_skb->osl_pkt_lock, flags); + + if (len <= DHD_SKB_2PAGE_BUFSIZE) { + uint32 index; + for (i = 0; i < STATIC_PKT_2PAGE_NUM; i++) { + index = bcm_static_skb->last_allocated_index % STATIC_PKT_2PAGE_NUM; + bcm_static_skb->last_allocated_index++; + if (bcm_static_skb->skb_8k[index] && + bcm_static_skb->pkt_use[index] == 0) { + break; + } + } + + if (i < STATIC_PKT_2PAGE_NUM) { + bcm_static_skb->pkt_use[index] = 1; + skb = bcm_static_skb->skb_8k[index]; + skb->data = skb->head; +#ifdef NET_SKBUFF_DATA_USES_OFFSET + skb_set_tail_pointer(skb, PKT_HEADROOM_DEFAULT); +#else + skb->tail = skb->data + PKT_HEADROOM_DEFAULT; +#endif /* NET_SKBUFF_DATA_USES_OFFSET */ + skb->data += PKT_HEADROOM_DEFAULT; + skb->cloned = 0; + skb->priority = 0; +#ifdef NET_SKBUFF_DATA_USES_OFFSET + skb_set_tail_pointer(skb, len); +#else + skb->tail = skb->data + len; +#endif /* NET_SKBUFF_DATA_USES_OFFSET */ + skb->len = len; + skb->mac_len = PREALLOC_USED_MAGIC; + spin_unlock_irqrestore(&bcm_static_skb->osl_pkt_lock, flags); + return skb; + } + } + + spin_unlock_irqrestore(&bcm_static_skb->osl_pkt_lock, flags); + printk("%s: all static pkt in use!\n", __FUNCTION__); + return NULL; +#else + down(&bcm_static_skb->osl_pkt_sem); + + if (len <= DHD_SKB_1PAGE_BUFSIZE) { + for (i = 0; i < STATIC_PKT_1PAGE_NUM; i++) { + if (bcm_static_skb->skb_4k[i] && + bcm_static_skb->pkt_use[i] == 0) { + break; + } + } + + if (i != STATIC_PKT_1PAGE_NUM) { + bcm_static_skb->pkt_use[i] = 1; + + skb = bcm_static_skb->skb_4k[i]; +#ifdef NET_SKBUFF_DATA_USES_OFFSET + skb_set_tail_pointer(skb, len); +#else + skb->tail = skb->data + len; +#endif /* NET_SKBUFF_DATA_USES_OFFSET */ + skb->len = len; + + up(&bcm_static_skb->osl_pkt_sem); + return skb; + } + } + + if (len <= DHD_SKB_2PAGE_BUFSIZE) { + for (i = STATIC_PKT_1PAGE_NUM; i < STATIC_PKT_1_2PAGE_NUM; i++) { + if (bcm_static_skb->skb_8k[i - STATIC_PKT_1PAGE_NUM] && + bcm_static_skb->pkt_use[i] == 0) { + break; + } + } + + if ((i >= STATIC_PKT_1PAGE_NUM) && (i < STATIC_PKT_1_2PAGE_NUM)) { + bcm_static_skb->pkt_use[i] = 1; + skb = bcm_static_skb->skb_8k[i - STATIC_PKT_1PAGE_NUM]; +#ifdef NET_SKBUFF_DATA_USES_OFFSET + skb_set_tail_pointer(skb, len); +#else + skb->tail = skb->data + len; +#endif /* NET_SKBUFF_DATA_USES_OFFSET */ + skb->len = len; + + up(&bcm_static_skb->osl_pkt_sem); + return skb; + } + } + +#if defined(ENHANCED_STATIC_BUF) + if (bcm_static_skb->skb_16k && + bcm_static_skb->pkt_use[STATIC_PKT_MAX_NUM - 1] == 0) { + bcm_static_skb->pkt_use[STATIC_PKT_MAX_NUM - 1] = 1; + + skb = bcm_static_skb->skb_16k; +#ifdef NET_SKBUFF_DATA_USES_OFFSET + skb_set_tail_pointer(skb, len); +#else + skb->tail = skb->data + len; +#endif /* NET_SKBUFF_DATA_USES_OFFSET */ + skb->len = len; + + up(&bcm_static_skb->osl_pkt_sem); + return skb; + } +#endif /* ENHANCED_STATIC_BUF */ + + up(&bcm_static_skb->osl_pkt_sem); + printk("%s: all static pkt in use!\n", __FUNCTION__); + return linux_pktget(osh, len); +#endif /* DHD_USE_STATIC_CTRLBUF */ +} + +void +osl_pktfree_static(osl_t *osh, void *p, bool send) +{ + int i; +#ifdef DHD_USE_STATIC_CTRLBUF + struct sk_buff *skb = (struct sk_buff *)p; + unsigned long flags; +#endif /* DHD_USE_STATIC_CTRLBUF */ + + if (!p) { + return; + } + + if (!bcm_static_skb) { + linux_pktfree(osh, p, send); + return; + } + +#ifdef DHD_USE_STATIC_CTRLBUF + spin_lock_irqsave(&bcm_static_skb->osl_pkt_lock, flags); + + for (i = 0; i < STATIC_PKT_2PAGE_NUM; i++) { + if (p == bcm_static_skb->skb_8k[i]) { + if (bcm_static_skb->pkt_use[i] == 0) { + printk("%s: static pkt idx %d(%p) is double free\n", + __FUNCTION__, i, p); + } else { + bcm_static_skb->pkt_use[i] = 0; + } + + if (skb->mac_len != PREALLOC_USED_MAGIC) { + printk("%s: static pkt idx %d(%p) is not in used\n", + __FUNCTION__, i, p); + } + + skb->mac_len = PREALLOC_FREE_MAGIC; + spin_unlock_irqrestore(&bcm_static_skb->osl_pkt_lock, flags); + return; + } + } + + spin_unlock_irqrestore(&bcm_static_skb->osl_pkt_lock, flags); + printk("%s: packet %p does not exist in the pool\n", __FUNCTION__, p); +#else + down(&bcm_static_skb->osl_pkt_sem); + for (i = 0; i < STATIC_PKT_1PAGE_NUM; i++) { + if (p == bcm_static_skb->skb_4k[i]) { + bcm_static_skb->pkt_use[i] = 0; + up(&bcm_static_skb->osl_pkt_sem); + return; + } + } + + for (i = STATIC_PKT_1PAGE_NUM; i < STATIC_PKT_1_2PAGE_NUM; i++) { + if (p == bcm_static_skb->skb_8k[i - STATIC_PKT_1PAGE_NUM]) { + bcm_static_skb->pkt_use[i] = 0; + up(&bcm_static_skb->osl_pkt_sem); + return; + } + } +#ifdef ENHANCED_STATIC_BUF + if (p == bcm_static_skb->skb_16k) { + bcm_static_skb->pkt_use[STATIC_PKT_MAX_NUM - 1] = 0; + up(&bcm_static_skb->osl_pkt_sem); + return; + } +#endif // endif + up(&bcm_static_skb->osl_pkt_sem); +#endif /* DHD_USE_STATIC_CTRLBUF */ + linux_pktfree(osh, p, send); +} +#endif /* CONFIG_DHD_USE_STATIC_BUF */ + +/* Clone a packet. + * The pkttag contents are NOT cloned. + */ +void * +#ifdef BCM_OBJECT_TRACE +osl_pktdup(osl_t *osh, void *skb, int line, const char *caller) +#else +osl_pktdup(osl_t *osh, void *skb) +#endif /* BCM_OBJECT_TRACE */ +{ + void * p; + + ASSERT(!PKTISCHAINED(skb)); + + /* clear the CTFBUF flag if set and map the rest of the buffer + * before cloning. + */ + PKTCTFMAP(osh, skb); + + if ((p = skb_clone((struct sk_buff *)skb, GFP_ATOMIC)) == NULL) + return NULL; + + /* skb_clone copies skb->cb.. we don't want that */ + if (osh->pub.pkttag) + OSL_PKTTAG_CLEAR(p); + + /* Increment the packet counter */ + atomic_inc(&osh->cmn->pktalloced); +#ifdef BCM_OBJECT_TRACE + bcm_object_trace_opr(p, BCM_OBJDBG_ADD_PKT, caller, line); +#endif /* BCM_OBJECT_TRACE */ + + return (p); +} + +/* + * BINOSL selects the slightly slower function-call-based binary compatible osl. + */ + +uint +osl_pktalloced(osl_t *osh) +{ + if (atomic_read(&osh->cmn->refcount) == 1) + return (atomic_read(&osh->cmn->pktalloced)); + else + return 0; +} + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) && defined(TSQ_MULTIPLIER) +#include +#include +void +osl_pkt_orphan_partial(struct sk_buff *skb, int tsq) +{ + uint32 fraction; + static void *p_tcp_wfree = NULL; + + if (tsq <= 0) + return; + + if (!skb->destructor || skb->destructor == sock_wfree) + return; + + if (unlikely(!p_tcp_wfree)) { + char sym[KSYM_SYMBOL_LEN]; + sprint_symbol(sym, (unsigned long)skb->destructor); + sym[9] = 0; + if (!strcmp(sym, "tcp_wfree")) + p_tcp_wfree = skb->destructor; + else + return; + } + + if (unlikely(skb->destructor != p_tcp_wfree || !skb->sk)) + return; + + /* abstract a certain portion of skb truesize from the socket + * sk_wmem_alloc to allow more skb can be allocated for this + * socket for better cusion meeting WiFi device requirement + */ + fraction = skb->truesize * (tsq - 1) / tsq; + skb->truesize -= fraction; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0) + atomic_sub(fraction, &skb->sk->sk_wmem_alloc.refs); +#else + atomic_sub(fraction, &skb->sk->sk_wmem_alloc); +#endif // endif + skb_orphan(skb); +} +#endif /* LINUX_VERSION >= 3.6.0 && TSQ_MULTIPLIER */ diff --git a/bcmdhd.100.10.315.x/pcie_core.c b/bcmdhd.100.10.315.x/pcie_core.c new file mode 100644 index 0000000..08c21a3 --- /dev/null +++ b/bcmdhd.100.10.315.x/pcie_core.c @@ -0,0 +1,158 @@ +/** @file pcie_core.c + * + * Contains PCIe related functions that are shared between different driver models (e.g. firmware + * builds, DHD builds, BMAC builds), in order to avoid code duplication. + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: pcie_core.c 769591 2018-06-27 00:08:22Z $ + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "pcie_core.h" + +/* local prototypes */ + +/* local variables */ + +/* function definitions */ + +#ifdef BCMDRIVER + +/* wd_mask/wd_val is only for chipc_corerev >= 65 */ +void pcie_watchdog_reset(osl_t *osh, si_t *sih, uint32 wd_mask, uint32 wd_val) +{ + uint32 val, i, lsc; + uint16 cfg_offset[] = {PCIECFGREG_STATUS_CMD, PCIECFGREG_PM_CSR, + PCIECFGREG_MSI_CAP, PCIECFGREG_MSI_ADDR_L, + PCIECFGREG_MSI_ADDR_H, PCIECFGREG_MSI_DATA, + PCIECFGREG_LINK_STATUS_CTRL2, PCIECFGREG_RBAR_CTRL, + PCIECFGREG_PML1_SUB_CTRL1, PCIECFGREG_REG_BAR2_CONFIG, + PCIECFGREG_REG_BAR3_CONFIG}; + sbpcieregs_t *pcieregs = NULL; + uint32 origidx = si_coreidx(sih); + +#ifdef BCMFPGA_HW + if (CCREV(sih->ccrev) < 67) { + /* To avoid hang on FPGA, donot reset watchdog */ + si_setcoreidx(sih, origidx); + return; + } +#endif // endif + + /* Switch to PCIE2 core */ + pcieregs = (sbpcieregs_t *)si_setcore(sih, PCIE2_CORE_ID, 0); + BCM_REFERENCE(pcieregs); + ASSERT(pcieregs != NULL); + + /* Disable/restore ASPM Control to protect the watchdog reset */ + W_REG(osh, &pcieregs->configaddr, PCIECFGREG_LINK_STATUS_CTRL); + lsc = R_REG(osh, &pcieregs->configdata); + val = lsc & (~PCIE_ASPM_ENAB); + W_REG(osh, &pcieregs->configdata, val); + + if (CCREV(sih->ccrev) >= 65) { + si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, watchdog), wd_mask, wd_val); + si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, watchdog), WD_COUNTER_MASK, 4); +#ifdef BCMQT_HW + OSL_DELAY(2000 * 4000); +#else + OSL_DELAY(2000); /* 2 ms */ +#endif // endif + val = si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, intstatus), 0, 0); + si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, intstatus), + wd_mask, val & wd_mask); + } else { + si_corereg_writeonly(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, watchdog), ~0, 4); + /* Read a config space to make sure the above write gets flushed on PCIe bus */ + val = OSL_PCI_READ_CONFIG(osh, PCI_CFG_VID, sizeof(uint32)); + OSL_DELAY(100000); + } + + W_REG(osh, &pcieregs->configaddr, PCIECFGREG_LINK_STATUS_CTRL); + W_REG(osh, &pcieregs->configdata, lsc); + + if (sih->buscorerev <= 13) { + /* Write configuration registers back to the shadow registers + * cause shadow registers are cleared out after watchdog reset. + */ + for (i = 0; i < ARRAYSIZE(cfg_offset); i++) { + W_REG(osh, &pcieregs->configaddr, cfg_offset[i]); + val = R_REG(osh, &pcieregs->configdata); + W_REG(osh, &pcieregs->configdata, val); + } + } + si_setcoreidx(sih, origidx); +} + +/* CRWLPCIEGEN2-117 pcie_pipe_Iddq should be controlled + * by the L12 state from MAC to save power by putting the + * SerDes analog in IDDQ mode + */ +void pcie_serdes_iddqdisable(osl_t *osh, si_t *sih, sbpcieregs_t *sbpcieregs) +{ + sbpcieregs_t *pcie = NULL; + uint crwlpciegen2_117_disable = 0; + uint32 origidx = si_coreidx(sih); + + crwlpciegen2_117_disable = PCIE_PipeIddqDisable0 | PCIE_PipeIddqDisable1; + /* Switch to PCIE2 core */ + pcie = (sbpcieregs_t *)si_setcore(sih, PCIE2_CORE_ID, 0); + BCM_REFERENCE(pcie); + ASSERT(pcie != NULL); + + OR_REG(osh, &sbpcieregs->control, + crwlpciegen2_117_disable); + + si_setcoreidx(sih, origidx); +} + +#define PCIE_PMCR_REFUP_MASK 0x3f0001e0 +#define PCIE_PMCR_REFEXT_MASK 0x400000 +#define PCIE_PMCR_REFUP_100US 0x38000080 +#define PCIE_PMCR_REFEXT_100US 0x400000 + +/* Set PCIE TRefUp time to 100us */ +void pcie_set_trefup_time_100us(si_t *sih) +{ + si_corereg(sih, sih->buscoreidx, + OFFSETOF(sbpcieregs_t, configaddr), ~0, PCI_PMCR_REFUP); + si_corereg(sih, sih->buscoreidx, + OFFSETOF(sbpcieregs_t, configdata), PCIE_PMCR_REFUP_MASK, PCIE_PMCR_REFUP_100US); + + si_corereg(sih, sih->buscoreidx, + OFFSETOF(sbpcieregs_t, configaddr), ~0, PCI_PMCR_REFUP_EXT); + si_corereg(sih, sih->buscoreidx, + OFFSETOF(sbpcieregs_t, configdata), PCIE_PMCR_REFEXT_MASK, PCIE_PMCR_REFEXT_100US); +} + +#endif /* BCMDRIVER */ diff --git a/bcmdhd.100.10.315.x/sbutils.c b/bcmdhd.100.10.315.x/sbutils.c new file mode 100644 index 0000000..1a3253a --- /dev/null +++ b/bcmdhd.100.10.315.x/sbutils.c @@ -0,0 +1,1093 @@ +/* + * Misc utility routines for accessing chip-specific features + * of the SiliconBackplane-based Broadcom chips. + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: sbutils.c 700323 2017-05-18 16:12:11Z $ + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "siutils_priv.h" + +/* local prototypes */ +static uint _sb_coreidx(si_info_t *sii, uint32 sba); +static uint _sb_scan(si_info_t *sii, uint32 sba, volatile void *regs, uint bus, uint32 sbba, + uint ncores, uint devid); +static uint32 _sb_coresba(si_info_t *sii); +static volatile void *_sb_setcoreidx(si_info_t *sii, uint coreidx); +#define SET_SBREG(sii, r, mask, val) \ + W_SBREG((sii), (r), ((R_SBREG((sii), (r)) & ~(mask)) | (val))) +#define REGS2SB(va) (sbconfig_t*) ((volatile int8*)(va) + SBCONFIGOFF) + +/* sonicsrev */ +#define SONICS_2_2 (SBIDL_RV_2_2 >> SBIDL_RV_SHIFT) +#define SONICS_2_3 (SBIDL_RV_2_3 >> SBIDL_RV_SHIFT) + +#define R_SBREG(sii, sbr) sb_read_sbreg((sii), (sbr)) +#define W_SBREG(sii, sbr, v) sb_write_sbreg((sii), (sbr), (v)) +#define AND_SBREG(sii, sbr, v) W_SBREG((sii), (sbr), (R_SBREG((sii), (sbr)) & (v))) +#define OR_SBREG(sii, sbr, v) W_SBREG((sii), (sbr), (R_SBREG((sii), (sbr)) | (v))) + +static uint32 +sb_read_sbreg(si_info_t *sii, volatile uint32 *sbr) +{ + uint8 tmp; + uint32 val, intr_val = 0; + + /* + * compact flash only has 11 bits address, while we needs 12 bits address. + * MEM_SEG will be OR'd with other 11 bits address in hardware, + * so we program MEM_SEG with 12th bit when necessary(access sb regsiters). + * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special + */ + if (PCMCIA(sii)) { + INTR_OFF(sii, intr_val); + tmp = 1; + OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1); + sbr = (volatile uint32 *)((uintptr)sbr & ~(1 << 11)); /* mask out bit 11 */ + } + + val = R_REG(sii->osh, sbr); + + if (PCMCIA(sii)) { + tmp = 0; + OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1); + INTR_RESTORE(sii, intr_val); + } + + return (val); +} + +static void +sb_write_sbreg(si_info_t *sii, volatile uint32 *sbr, uint32 v) +{ + uint8 tmp; + volatile uint32 dummy; + uint32 intr_val = 0; + + /* + * compact flash only has 11 bits address, while we needs 12 bits address. + * MEM_SEG will be OR'd with other 11 bits address in hardware, + * so we program MEM_SEG with 12th bit when necessary(access sb regsiters). + * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special + */ + if (PCMCIA(sii)) { + INTR_OFF(sii, intr_val); + tmp = 1; + OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1); + sbr = (volatile uint32 *)((uintptr)sbr & ~(1 << 11)); /* mask out bit 11 */ + } + + if (BUSTYPE(sii->pub.bustype) == PCMCIA_BUS) { + dummy = R_REG(sii->osh, sbr); + BCM_REFERENCE(dummy); + W_REG(sii->osh, (volatile uint16 *)sbr, (uint16)(v & 0xffff)); + dummy = R_REG(sii->osh, sbr); + BCM_REFERENCE(dummy); + W_REG(sii->osh, ((volatile uint16 *)sbr + 1), (uint16)((v >> 16) & 0xffff)); + } else + W_REG(sii->osh, sbr, v); + + if (PCMCIA(sii)) { + tmp = 0; + OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1); + INTR_RESTORE(sii, intr_val); + } +} + +uint +sb_coreid(si_t *sih) +{ + si_info_t *sii; + sbconfig_t *sb; + + sii = SI_INFO(sih); + sb = REGS2SB(sii->curmap); + + return ((R_SBREG(sii, &sb->sbidhigh) & SBIDH_CC_MASK) >> SBIDH_CC_SHIFT); +} + +uint +sb_intflag(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + volatile void *corereg; + sbconfig_t *sb; + uint origidx, intflag, intr_val = 0; + + INTR_OFF(sii, intr_val); + origidx = si_coreidx(sih); + corereg = si_setcore(sih, CC_CORE_ID, 0); + ASSERT(corereg != NULL); + sb = REGS2SB(corereg); + intflag = R_SBREG(sii, &sb->sbflagst); + sb_setcoreidx(sih, origidx); + INTR_RESTORE(sii, intr_val); + + return intflag; +} + +uint +sb_flag(si_t *sih) +{ + si_info_t *sii; + sbconfig_t *sb; + + sii = SI_INFO(sih); + sb = REGS2SB(sii->curmap); + + return R_SBREG(sii, &sb->sbtpsflag) & SBTPS_NUM0_MASK; +} + +void +sb_setint(si_t *sih, int siflag) +{ + si_info_t *sii; + sbconfig_t *sb; + uint32 vec; + + sii = SI_INFO(sih); + sb = REGS2SB(sii->curmap); + + if (siflag == -1) + vec = 0; + else + vec = 1 << siflag; + W_SBREG(sii, &sb->sbintvec, vec); +} + +/* return core index of the core with address 'sba' */ +static uint +_sb_coreidx(si_info_t *sii, uint32 sba) +{ + uint i; + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + + for (i = 0; i < sii->numcores; i ++) + if (sba == cores_info->coresba[i]) + return i; + return BADIDX; +} + +/* return core address of the current core */ +static uint32 +_sb_coresba(si_info_t *sii) +{ + uint32 sbaddr; + + switch (BUSTYPE(sii->pub.bustype)) { + case SI_BUS: { + sbconfig_t *sb = REGS2SB(sii->curmap); + sbaddr = sb_base(R_SBREG(sii, &sb->sbadmatch0)); + break; + } + + case PCI_BUS: + sbaddr = OSL_PCI_READ_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(uint32)); + break; + + case PCMCIA_BUS: { + uint8 tmp = 0; + OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR0, &tmp, 1); + sbaddr = (uint32)tmp << 12; + OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR1, &tmp, 1); + sbaddr |= (uint32)tmp << 16; + OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR2, &tmp, 1); + sbaddr |= (uint32)tmp << 24; + break; + } + +#ifdef BCMSDIO + case SPI_BUS: + case SDIO_BUS: + sbaddr = (uint32)(uintptr)sii->curmap; + break; +#endif // endif + + default: + sbaddr = BADCOREADDR; + break; + } + + return sbaddr; +} + +uint +sb_corevendor(si_t *sih) +{ + si_info_t *sii; + sbconfig_t *sb; + + sii = SI_INFO(sih); + sb = REGS2SB(sii->curmap); + + return ((R_SBREG(sii, &sb->sbidhigh) & SBIDH_VC_MASK) >> SBIDH_VC_SHIFT); +} + +uint +sb_corerev(si_t *sih) +{ + si_info_t *sii; + sbconfig_t *sb; + uint sbidh; + + sii = SI_INFO(sih); + sb = REGS2SB(sii->curmap); + sbidh = R_SBREG(sii, &sb->sbidhigh); + + return (SBCOREREV(sbidh)); +} + +/* set core-specific control flags */ +void +sb_core_cflags_wo(si_t *sih, uint32 mask, uint32 val) +{ + si_info_t *sii; + sbconfig_t *sb; + uint32 w; + + sii = SI_INFO(sih); + sb = REGS2SB(sii->curmap); + + ASSERT((val & ~mask) == 0); + + /* mask and set */ + w = (R_SBREG(sii, &sb->sbtmstatelow) & ~(mask << SBTML_SICF_SHIFT)) | + (val << SBTML_SICF_SHIFT); + W_SBREG(sii, &sb->sbtmstatelow, w); +} + +/* set/clear core-specific control flags */ +uint32 +sb_core_cflags(si_t *sih, uint32 mask, uint32 val) +{ + si_info_t *sii; + sbconfig_t *sb; + uint32 w; + + sii = SI_INFO(sih); + sb = REGS2SB(sii->curmap); + + ASSERT((val & ~mask) == 0); + + /* mask and set */ + if (mask || val) { + w = (R_SBREG(sii, &sb->sbtmstatelow) & ~(mask << SBTML_SICF_SHIFT)) | + (val << SBTML_SICF_SHIFT); + W_SBREG(sii, &sb->sbtmstatelow, w); + } + + /* return the new value + * for write operation, the following readback ensures the completion of write opration. + */ + return (R_SBREG(sii, &sb->sbtmstatelow) >> SBTML_SICF_SHIFT); +} + +/* set/clear core-specific status flags */ +uint32 +sb_core_sflags(si_t *sih, uint32 mask, uint32 val) +{ + si_info_t *sii; + sbconfig_t *sb; + uint32 w; + + sii = SI_INFO(sih); + sb = REGS2SB(sii->curmap); + + ASSERT((val & ~mask) == 0); + ASSERT((mask & ~SISF_CORE_BITS) == 0); + + /* mask and set */ + if (mask || val) { + w = (R_SBREG(sii, &sb->sbtmstatehigh) & ~(mask << SBTMH_SISF_SHIFT)) | + (val << SBTMH_SISF_SHIFT); + W_SBREG(sii, &sb->sbtmstatehigh, w); + } + + /* return the new value */ + return (R_SBREG(sii, &sb->sbtmstatehigh) >> SBTMH_SISF_SHIFT); +} + +bool +sb_iscoreup(si_t *sih) +{ + si_info_t *sii; + sbconfig_t *sb; + + sii = SI_INFO(sih); + sb = REGS2SB(sii->curmap); + + return ((R_SBREG(sii, &sb->sbtmstatelow) & + (SBTML_RESET | SBTML_REJ_MASK | (SICF_CLOCK_EN << SBTML_SICF_SHIFT))) == + (SICF_CLOCK_EN << SBTML_SICF_SHIFT)); +} + +/* + * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation, + * switch back to the original core, and return the new value. + * + * When using the silicon backplane, no fidleing with interrupts or core switches are needed. + * + * Also, when using pci/pcie, we can optimize away the core switching for pci registers + * and (on newer pci cores) chipcommon registers. + */ +uint +sb_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val) +{ + uint origidx = 0; + volatile uint32 *r = NULL; + uint w; + uint intr_val = 0; + bool fast = FALSE; + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + + ASSERT(GOODIDX(coreidx)); + ASSERT(regoff < SI_CORE_SIZE); + ASSERT((val & ~mask) == 0); + + if (coreidx >= SI_MAXCORES) + return 0; + + if (BUSTYPE(sii->pub.bustype) == SI_BUS) { + /* If internal bus, we can always get at everything */ + fast = TRUE; + /* map if does not exist */ + if (!cores_info->regs[coreidx]) { + cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx], + SI_CORE_SIZE); + ASSERT(GOODREGS(cores_info->regs[coreidx])); + } + r = (volatile uint32 *)((volatile uchar *)cores_info->regs[coreidx] + regoff); + } else if (BUSTYPE(sii->pub.bustype) == PCI_BUS) { + /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */ + + if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) { + /* Chipc registers are mapped at 12KB */ + + fast = TRUE; + r = (volatile uint32 *)((volatile char *)sii->curmap + + PCI_16KB0_CCREGS_OFFSET + regoff); + } else if (sii->pub.buscoreidx == coreidx) { + /* pci registers are at either in the last 2KB of an 8KB window + * or, in pcie and pci rev 13 at 8KB + */ + fast = TRUE; + if (SI_FAST(sii)) + r = (volatile uint32 *)((volatile char *)sii->curmap + + PCI_16KB0_PCIREGS_OFFSET + regoff); + else + r = (volatile uint32 *)((volatile char *)sii->curmap + + ((regoff >= SBCONFIGOFF) ? + PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) + + regoff); + } + } + + if (!fast) { + INTR_OFF(sii, intr_val); + + /* save current core index */ + origidx = si_coreidx(&sii->pub); + + /* switch core */ + r = (volatile uint32*) ((volatile uchar*)sb_setcoreidx(&sii->pub, coreidx) + + regoff); + } + ASSERT(r != NULL); + + /* mask and set */ + if (mask || val) { + if (regoff >= SBCONFIGOFF) { + w = (R_SBREG(sii, r) & ~mask) | val; + W_SBREG(sii, r, w); + } else { + w = (R_REG(sii->osh, r) & ~mask) | val; + W_REG(sii->osh, r, w); + } + } + + /* readback */ + if (regoff >= SBCONFIGOFF) + w = R_SBREG(sii, r); + else { + w = R_REG(sii->osh, r); + } + + if (!fast) { + /* restore core index */ + if (origidx != coreidx) + sb_setcoreidx(&sii->pub, origidx); + + INTR_RESTORE(sii, intr_val); + } + + return (w); +} + +/* + * If there is no need for fiddling with interrupts or core switches (typically silicon + * back plane registers, pci registers and chipcommon registers), this function + * returns the register offset on this core to a mapped address. This address can + * be used for W_REG/R_REG directly. + * + * For accessing registers that would need a core switch, this function will return + * NULL. + */ +volatile uint32 * +sb_corereg_addr(si_t *sih, uint coreidx, uint regoff) +{ + volatile uint32 *r = NULL; + bool fast = FALSE; + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + + ASSERT(GOODIDX(coreidx)); + ASSERT(regoff < SI_CORE_SIZE); + + if (coreidx >= SI_MAXCORES) + return 0; + + if (BUSTYPE(sii->pub.bustype) == SI_BUS) { + /* If internal bus, we can always get at everything */ + fast = TRUE; + /* map if does not exist */ + if (!cores_info->regs[coreidx]) { + cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx], + SI_CORE_SIZE); + ASSERT(GOODREGS(cores_info->regs[coreidx])); + } + r = (volatile uint32 *)((volatile uchar *)cores_info->regs[coreidx] + regoff); + } else if (BUSTYPE(sii->pub.bustype) == PCI_BUS) { + /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */ + + if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) { + /* Chipc registers are mapped at 12KB */ + + fast = TRUE; + r = (volatile uint32 *)((volatile char *)sii->curmap + + PCI_16KB0_CCREGS_OFFSET + regoff); + } else if (sii->pub.buscoreidx == coreidx) { + /* pci registers are at either in the last 2KB of an 8KB window + * or, in pcie and pci rev 13 at 8KB + */ + fast = TRUE; + if (SI_FAST(sii)) + r = (volatile uint32 *)((volatile char *)sii->curmap + + PCI_16KB0_PCIREGS_OFFSET + regoff); + else + r = (volatile uint32 *)((volatile char *)sii->curmap + + ((regoff >= SBCONFIGOFF) ? + PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) + + regoff); + } + } + + if (!fast) + return 0; + + return (r); +} + +/* Scan the enumeration space to find all cores starting from the given + * bus 'sbba'. Append coreid and other info to the lists in 'si'. 'sba' + * is the default core address at chip POR time and 'regs' is the virtual + * address that the default core is mapped at. 'ncores' is the number of + * cores expected on bus 'sbba'. It returns the total number of cores + * starting from bus 'sbba', inclusive. + */ +#define SB_MAXBUSES 2 +static uint +_sb_scan(si_info_t *sii, uint32 sba, volatile void *regs, uint bus, + uint32 sbba, uint numcores, uint devid) +{ + uint next; + uint ncc = 0; + uint i; + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + + if (bus >= SB_MAXBUSES) { + SI_ERROR(("_sb_scan: bus 0x%08x at level %d is too deep to scan\n", sbba, bus)); + return 0; + } + SI_MSG(("_sb_scan: scan bus 0x%08x assume %u cores\n", sbba, numcores)); + + /* Scan all cores on the bus starting from core 0. + * Core addresses must be contiguous on each bus. + */ + for (i = 0, next = sii->numcores; i < numcores && next < SB_BUS_MAXCORES; i++, next++) { + cores_info->coresba[next] = sbba + (i * SI_CORE_SIZE); + + /* keep and reuse the initial register mapping */ + if ((BUSTYPE(sii->pub.bustype) == SI_BUS) && (cores_info->coresba[next] == sba)) { + SI_VMSG(("_sb_scan: reuse mapped regs %p for core %u\n", regs, next)); + cores_info->regs[next] = regs; + } + + /* change core to 'next' and read its coreid */ + sii->curmap = _sb_setcoreidx(sii, next); + sii->curidx = next; + + cores_info->coreid[next] = sb_coreid(&sii->pub); + + /* core specific processing... */ + /* chipc provides # cores */ + if (cores_info->coreid[next] == CC_CORE_ID) { + chipcregs_t *cc = (chipcregs_t *)sii->curmap; + uint32 ccrev = sb_corerev(&sii->pub); + + /* determine numcores - this is the total # cores in the chip */ + if (((ccrev == 4) || (ccrev >= 6))) { + ASSERT(cc); + numcores = (R_REG(sii->osh, &cc->chipid) & CID_CC_MASK) >> + CID_CC_SHIFT; + } else { + /* Older chips */ + uint chip = CHIPID(sii->pub.chip); + + if (chip == BCM4704_CHIP_ID) + numcores = 9; + else if (chip == BCM5365_CHIP_ID) + numcores = 7; + else { + SI_ERROR(("sb_chip2numcores: unsupported chip 0x%x\n", + chip)); + ASSERT(0); + numcores = 1; + } + } + SI_VMSG(("_sb_scan: there are %u cores in the chip %s\n", numcores, + sii->pub.issim ? "QT" : "")); + } + /* scan bridged SB(s) and add results to the end of the list */ + else if (cores_info->coreid[next] == OCP_CORE_ID) { + sbconfig_t *sb = REGS2SB(sii->curmap); + uint32 nsbba = R_SBREG(sii, &sb->sbadmatch1); + uint nsbcc; + + sii->numcores = next + 1; + + if ((nsbba & 0xfff00000) != si_enum_base(devid)) + continue; + nsbba &= 0xfffff000; + if (_sb_coreidx(sii, nsbba) != BADIDX) + continue; + + nsbcc = (R_SBREG(sii, &sb->sbtmstatehigh) & 0x000f0000) >> 16; + nsbcc = _sb_scan(sii, sba, regs, bus + 1, nsbba, nsbcc, devid); + if (sbba == si_enum_base(devid)) + numcores -= nsbcc; + ncc += nsbcc; + } + } + + SI_MSG(("_sb_scan: found %u cores on bus 0x%08x\n", i, sbba)); + + sii->numcores = i + ncc; + return sii->numcores; +} + +/* scan the sb enumerated space to identify all cores */ +void +sb_scan(si_t *sih, volatile void *regs, uint devid) +{ + uint32 origsba; + sbconfig_t *sb; + si_info_t *sii = SI_INFO(sih); + BCM_REFERENCE(devid); + + sb = REGS2SB(sii->curmap); + + sii->pub.socirev = (R_SBREG(sii, &sb->sbidlow) & SBIDL_RV_MASK) >> SBIDL_RV_SHIFT; + + /* Save the current core info and validate it later till we know + * for sure what is good and what is bad. + */ + origsba = _sb_coresba(sii); + + /* scan all SB(s) starting from SI_ENUM_BASE_DEFAULT */ + sii->numcores = _sb_scan(sii, origsba, regs, 0, si_enum_base(devid), 1, devid); +} + +/* + * This function changes logical "focus" to the indicated core; + * must be called with interrupts off. + * Moreover, callers should keep interrupts off during switching out of and back to d11 core + */ +volatile void * +sb_setcoreidx(si_t *sih, uint coreidx) +{ + si_info_t *sii = SI_INFO(sih); + + if (coreidx >= sii->numcores) + return (NULL); + + /* + * If the user has provided an interrupt mask enabled function, + * then assert interrupts are disabled before switching the core. + */ + ASSERT((sii->intrsenabled_fn == NULL) || !(*(sii)->intrsenabled_fn)((sii)->intr_arg)); + + sii->curmap = _sb_setcoreidx(sii, coreidx); + sii->curidx = coreidx; + + return (sii->curmap); +} + +/* This function changes the logical "focus" to the indicated core. + * Return the current core's virtual address. + */ +static volatile void * +_sb_setcoreidx(si_info_t *sii, uint coreidx) +{ + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + uint32 sbaddr = cores_info->coresba[coreidx]; + volatile void *regs; + + switch (BUSTYPE(sii->pub.bustype)) { + case SI_BUS: + /* map new one */ + if (!cores_info->regs[coreidx]) { + cores_info->regs[coreidx] = REG_MAP(sbaddr, SI_CORE_SIZE); + ASSERT(GOODREGS(cores_info->regs[coreidx])); + } + regs = cores_info->regs[coreidx]; + break; + + case PCI_BUS: + /* point bar0 window */ + OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, sbaddr); + regs = sii->curmap; + break; + + case PCMCIA_BUS: { + uint8 tmp = (sbaddr >> 12) & 0x0f; + OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR0, &tmp, 1); + tmp = (sbaddr >> 16) & 0xff; + OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR1, &tmp, 1); + tmp = (sbaddr >> 24) & 0xff; + OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR2, &tmp, 1); + regs = sii->curmap; + break; + } +#ifdef BCMSDIO + case SPI_BUS: + case SDIO_BUS: + /* map new one */ + if (!cores_info->regs[coreidx]) { + cores_info->regs[coreidx] = (void *)(uintptr)sbaddr; + ASSERT(GOODREGS(cores_info->regs[coreidx])); + } + regs = cores_info->regs[coreidx]; + break; +#endif /* BCMSDIO */ + + default: + ASSERT(0); + regs = NULL; + break; + } + + return regs; +} + +/* Return the address of sbadmatch0/1/2/3 register */ +static volatile uint32 * +sb_admatch(si_info_t *sii, uint asidx) +{ + sbconfig_t *sb; + volatile uint32 *addrm; + + sb = REGS2SB(sii->curmap); + + switch (asidx) { + case 0: + addrm = &sb->sbadmatch0; + break; + + case 1: + addrm = &sb->sbadmatch1; + break; + + case 2: + addrm = &sb->sbadmatch2; + break; + + case 3: + addrm = &sb->sbadmatch3; + break; + + default: + SI_ERROR(("%s: Address space index (%d) out of range\n", __FUNCTION__, asidx)); + return 0; + } + + return (addrm); +} + +/* Return the number of address spaces in current core */ +int +sb_numaddrspaces(si_t *sih) +{ + si_info_t *sii; + sbconfig_t *sb; + + sii = SI_INFO(sih); + sb = REGS2SB(sii->curmap); + + /* + 1 because of enumeration space */ + return ((R_SBREG(sii, &sb->sbidlow) & SBIDL_AR_MASK) >> SBIDL_AR_SHIFT) + 1; +} + +/* Return the address of the nth address space in the current core */ +uint32 +sb_addrspace(si_t *sih, uint asidx) +{ + si_info_t *sii; + + sii = SI_INFO(sih); + + return (sb_base(R_SBREG(sii, sb_admatch(sii, asidx)))); +} + +/* Return the size of the nth address space in the current core */ +uint32 +sb_addrspacesize(si_t *sih, uint asidx) +{ + si_info_t *sii; + + sii = SI_INFO(sih); + + return (sb_size(R_SBREG(sii, sb_admatch(sii, asidx)))); +} + +/* do buffered registers update */ +void +sb_commit(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + uint origidx; + uint intr_val = 0; + + origidx = sii->curidx; + ASSERT(GOODIDX(origidx)); + + INTR_OFF(sii, intr_val); + + /* switch over to chipcommon core if there is one, else use pci */ + if (sii->pub.ccrev != NOREV) { + chipcregs_t *ccregs = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0); + ASSERT(ccregs != NULL); + + /* do the buffer registers update */ + W_REG(sii->osh, &ccregs->broadcastaddress, SB_COMMIT); + W_REG(sii->osh, &ccregs->broadcastdata, 0x0); + } else + ASSERT(0); + + /* restore core index */ + sb_setcoreidx(sih, origidx); + INTR_RESTORE(sii, intr_val); +} + +void +sb_core_disable(si_t *sih, uint32 bits) +{ + si_info_t *sii; + volatile uint32 dummy; + sbconfig_t *sb; + + sii = SI_INFO(sih); + + ASSERT(GOODREGS(sii->curmap)); + sb = REGS2SB(sii->curmap); + + /* if core is already in reset, just return */ + if (R_SBREG(sii, &sb->sbtmstatelow) & SBTML_RESET) + return; + + /* if clocks are not enabled, put into reset and return */ + if ((R_SBREG(sii, &sb->sbtmstatelow) & (SICF_CLOCK_EN << SBTML_SICF_SHIFT)) == 0) + goto disable; + + /* set target reject and spin until busy is clear (preserve core-specific bits) */ + OR_SBREG(sii, &sb->sbtmstatelow, SBTML_REJ); + dummy = R_SBREG(sii, &sb->sbtmstatelow); + BCM_REFERENCE(dummy); + OSL_DELAY(1); + SPINWAIT((R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_BUSY), 100000); + if (R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_BUSY) + SI_ERROR(("%s: target state still busy\n", __FUNCTION__)); + + if (R_SBREG(sii, &sb->sbidlow) & SBIDL_INIT) { + OR_SBREG(sii, &sb->sbimstate, SBIM_RJ); + dummy = R_SBREG(sii, &sb->sbimstate); + BCM_REFERENCE(dummy); + OSL_DELAY(1); + SPINWAIT((R_SBREG(sii, &sb->sbimstate) & SBIM_BY), 100000); + } + + /* set reset and reject while enabling the clocks */ + W_SBREG(sii, &sb->sbtmstatelow, + (((bits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) | + SBTML_REJ | SBTML_RESET)); + dummy = R_SBREG(sii, &sb->sbtmstatelow); + BCM_REFERENCE(dummy); + OSL_DELAY(10); + + /* don't forget to clear the initiator reject bit */ + if (R_SBREG(sii, &sb->sbidlow) & SBIDL_INIT) + AND_SBREG(sii, &sb->sbimstate, ~SBIM_RJ); + +disable: + /* leave reset and reject asserted */ + W_SBREG(sii, &sb->sbtmstatelow, ((bits << SBTML_SICF_SHIFT) | SBTML_REJ | SBTML_RESET)); + OSL_DELAY(1); +} + +/* reset and re-enable a core + * inputs: + * bits - core specific bits that are set during and after reset sequence + * resetbits - core specific bits that are set only during reset sequence + */ +void +sb_core_reset(si_t *sih, uint32 bits, uint32 resetbits) +{ + si_info_t *sii; + sbconfig_t *sb; + volatile uint32 dummy; + + sii = SI_INFO(sih); + ASSERT(GOODREGS(sii->curmap)); + sb = REGS2SB(sii->curmap); + + /* + * Must do the disable sequence first to work for arbitrary current core state. + */ + sb_core_disable(sih, (bits | resetbits)); + + /* + * Now do the initialization sequence. + */ + + /* set reset while enabling the clock and forcing them on throughout the core */ + W_SBREG(sii, &sb->sbtmstatelow, + (((bits | resetbits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) | + SBTML_RESET)); + dummy = R_SBREG(sii, &sb->sbtmstatelow); + BCM_REFERENCE(dummy); + OSL_DELAY(1); + + if (R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_SERR) { + W_SBREG(sii, &sb->sbtmstatehigh, 0); + } + if ((dummy = R_SBREG(sii, &sb->sbimstate)) & (SBIM_IBE | SBIM_TO)) { + AND_SBREG(sii, &sb->sbimstate, ~(SBIM_IBE | SBIM_TO)); + } + + /* clear reset and allow it to propagate throughout the core */ + W_SBREG(sii, &sb->sbtmstatelow, + ((bits | resetbits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT)); + dummy = R_SBREG(sii, &sb->sbtmstatelow); + BCM_REFERENCE(dummy); + OSL_DELAY(1); + + /* leave clock enabled */ + W_SBREG(sii, &sb->sbtmstatelow, ((bits | SICF_CLOCK_EN) << SBTML_SICF_SHIFT)); + dummy = R_SBREG(sii, &sb->sbtmstatelow); + BCM_REFERENCE(dummy); + OSL_DELAY(1); +} + +/* + * Set the initiator timeout for the "master core". + * The master core is defined to be the core in control + * of the chip and so it issues accesses to non-memory + * locations (Because of dma *any* core can access memeory). + * + * The routine uses the bus to decide who is the master: + * SI_BUS => mips + * JTAG_BUS => chipc + * PCI_BUS => pci or pcie + * PCMCIA_BUS => pcmcia + * SDIO_BUS => pcmcia + * + * This routine exists so callers can disable initiator + * timeouts so accesses to very slow devices like otp + * won't cause an abort. The routine allows arbitrary + * settings of the service and request timeouts, though. + * + * Returns the timeout state before changing it or -1 + * on error. + */ + +#define TO_MASK (SBIMCL_RTO_MASK | SBIMCL_STO_MASK) + +uint32 +sb_set_initiator_to(si_t *sih, uint32 to, uint idx) +{ + si_info_t *sii = SI_INFO(sih); + uint origidx; + uint intr_val = 0; + uint32 tmp, ret = 0xffffffff; + sbconfig_t *sb; + + if ((to & ~TO_MASK) != 0) + return ret; + + /* Figure out the master core */ + if (idx == BADIDX) { + switch (BUSTYPE(sii->pub.bustype)) { + case PCI_BUS: + idx = sii->pub.buscoreidx; + break; + case JTAG_BUS: + idx = SI_CC_IDX; + break; + case PCMCIA_BUS: +#ifdef BCMSDIO + case SDIO_BUS: +#endif // endif + idx = si_findcoreidx(sih, PCMCIA_CORE_ID, 0); + break; + case SI_BUS: + idx = si_findcoreidx(sih, MIPS33_CORE_ID, 0); + break; + default: + ASSERT(0); + } + if (idx == BADIDX) + return ret; + } + + INTR_OFF(sii, intr_val); + origidx = si_coreidx(sih); + + sb = REGS2SB(sb_setcoreidx(sih, idx)); + + tmp = R_SBREG(sii, &sb->sbimconfiglow); + ret = tmp & TO_MASK; + W_SBREG(sii, &sb->sbimconfiglow, (tmp & ~TO_MASK) | to); + + sb_commit(sih); + sb_setcoreidx(sih, origidx); + INTR_RESTORE(sii, intr_val); + return ret; +} + +uint32 +sb_base(uint32 admatch) +{ + uint32 base; + uint type; + + type = admatch & SBAM_TYPE_MASK; + ASSERT(type < 3); + + base = 0; + + if (type == 0) { + base = admatch & SBAM_BASE0_MASK; + } else if (type == 1) { + ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */ + base = admatch & SBAM_BASE1_MASK; + } else if (type == 2) { + ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */ + base = admatch & SBAM_BASE2_MASK; + } + + return (base); +} + +uint32 +sb_size(uint32 admatch) +{ + uint32 size; + uint type; + + type = admatch & SBAM_TYPE_MASK; + ASSERT(type < 3); + + size = 0; + + if (type == 0) { + size = 1 << (((admatch & SBAM_ADINT0_MASK) >> SBAM_ADINT0_SHIFT) + 1); + } else if (type == 1) { + ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */ + size = 1 << (((admatch & SBAM_ADINT1_MASK) >> SBAM_ADINT1_SHIFT) + 1); + } else if (type == 2) { + ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */ + size = 1 << (((admatch & SBAM_ADINT2_MASK) >> SBAM_ADINT2_SHIFT) + 1); + } + + return (size); +} + +#if defined(BCMDBG_PHYDUMP) +/* print interesting sbconfig registers */ +void +sb_dumpregs(si_t *sih, struct bcmstrbuf *b) +{ + sbconfig_t *sb; + uint origidx, i, intr_val = 0; + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + + origidx = sii->curidx; + + INTR_OFF(sii, intr_val); + + for (i = 0; i < sii->numcores; i++) { + sb = REGS2SB(sb_setcoreidx(sih, i)); + + bcm_bprintf(b, "core 0x%x: \n", cores_info->coreid[i]); + + if (sii->pub.socirev > SONICS_2_2) + bcm_bprintf(b, "sbimerrlog 0x%x sbimerrloga 0x%x\n", + sb_corereg(sih, si_coreidx(&sii->pub), SBIMERRLOG, 0, 0), + sb_corereg(sih, si_coreidx(&sii->pub), SBIMERRLOGA, 0, 0)); + + bcm_bprintf(b, "sbtmstatelow 0x%x sbtmstatehigh 0x%x sbidhigh 0x%x " + "sbimstate 0x%x\n sbimconfiglow 0x%x sbimconfighigh 0x%x\n", + R_SBREG(sii, &sb->sbtmstatelow), R_SBREG(sii, &sb->sbtmstatehigh), + R_SBREG(sii, &sb->sbidhigh), R_SBREG(sii, &sb->sbimstate), + R_SBREG(sii, &sb->sbimconfiglow), R_SBREG(sii, &sb->sbimconfighigh)); + } + + sb_setcoreidx(sih, origidx); + INTR_RESTORE(sii, intr_val); +} +#endif // endif diff --git a/bcmdhd.100.10.315.x/siutils.c b/bcmdhd.100.10.315.x/siutils.c new file mode 100644 index 0000000..02c7c7b --- /dev/null +++ b/bcmdhd.100.10.315.x/siutils.c @@ -0,0 +1,3764 @@ +/* + * Misc utility routines for accessing chip-specific features + * of the SiliconBackplane-based Broadcom chips. + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: siutils.c 769534 2018-06-26 21:19:11Z $ + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifndef BCMSDIO +#include +#endif // endif +#ifdef BCMPCIEDEV +#include +#endif /* BCMPCIEDEV */ +#include +#include +#include +#include +#ifdef BCMSDIO +#include +#include +#include +#include +#include +#include +#endif /* BCMSDIO */ +#include +#ifdef BCMSPI +#include +#endif /* BCMSPI */ +#include + +#ifdef BCM_SDRBL +#include +#endif /* BCM_SDRBL */ +#ifdef HNDGCI +#include +#endif /* HNDGCI */ +#ifdef WLGCIMBHLR +#include +#endif /* WLGCIMBHLR */ +#ifdef BCMULP +#include +#endif /* BCMULP */ +#include + +#include + +#include "siutils_priv.h" +#ifdef SECI_UART +/* Defines the set of GPIOs to be used for SECI UART if not specified in NVRAM */ +/* For further details on each ppin functionality please refer to PINMUX table in + * Top level architecture of BCMXXXX Chip + */ +#define DEFAULT_SECI_UART_PINMUX 0x08090a0b +#define DEFAULT_SECI_UART_PINMUX_43430 0x0102 +static bool force_seci_clk = 0; +#endif /* SECI_UART */ + +#define XTAL_FREQ_26000KHZ 26000 + +/** + * A set of PMU registers is clocked in the ILP domain, which has an implication on register write + * behavior: if such a register is written, it takes multiple ILP clocks for the PMU block to absorb + * the write. During that time the 'SlowWritePending' bit in the PMUStatus register is set. + */ +#define PMUREGS_ILP_SENSITIVE(regoff) \ + ((regoff) == OFFSETOF(pmuregs_t, pmutimer) || \ + (regoff) == OFFSETOF(pmuregs_t, pmuwatchdog) || \ + (regoff) == OFFSETOF(pmuregs_t, res_req_timer)) + +#define CHIPCREGS_ILP_SENSITIVE(regoff) \ + ((regoff) == OFFSETOF(chipcregs_t, pmutimer) || \ + (regoff) == OFFSETOF(chipcregs_t, pmuwatchdog) || \ + (regoff) == OFFSETOF(chipcregs_t, res_req_timer)) + +#define GCI_FEM_CTRL_WAR 0x11111111 + +#ifndef AXI_TO_VAL +#define AXI_TO_VAL 19 +#endif /* AXI_TO_VAL */ + +#ifndef AXI_TO_VAL_4347 +/* + * Increase BP timeout for fast clock and short PCIe timeouts + * New timeout: 2 ** 25 cycles + */ +#define AXI_TO_VAL_4347 25 +#endif /* AXI_TO_VAL_4347 */ + +/* local prototypes */ +static si_info_t *si_doattach(si_info_t *sii, uint devid, osl_t *osh, volatile void *regs, + uint bustype, void *sdh, char **vars, uint *varsz); +static bool si_buscore_prep(si_info_t *sii, uint bustype, uint devid, void *sdh); +static bool si_buscore_setup(si_info_t *sii, chipcregs_t *cc, uint bustype, uint32 savewin, + uint *origidx, volatile void *regs); + +static bool si_pmu_is_ilp_sensitive(uint32 idx, uint regoff); + +/* global variable to indicate reservation/release of gpio's */ +static uint32 si_gpioreservation = 0; + +/* global flag to prevent shared resources from being initialized multiple times in si_attach() */ +static bool si_onetimeinit = FALSE; + +#ifdef SR_DEBUG +static const uint32 si_power_island_test_array[] = { + 0x0000, 0x0001, 0x0010, 0x0011, + 0x0100, 0x0101, 0x0110, 0x0111, + 0x1000, 0x1001, 0x1010, 0x1011, + 0x1100, 0x1101, 0x1110, 0x1111 +}; +#endif /* SR_DEBUG */ + +int do_4360_pcie2_war = 0; + +#ifdef BCMULP +/* Variable to store boot_type: warm_boot/cold_boot/etc. */ +static int boot_type = 0; +#endif // endif + +/* global kernel resource */ +static si_info_t ksii; +static si_cores_info_t ksii_cores_info; + +/** + * Allocate an si handle. This function may be called multiple times. + * + * devid - pci device id (used to determine chip#) + * osh - opaque OS handle + * regs - virtual address of initial core registers + * bustype - pci/pcmcia/sb/sdio/etc + * vars - pointer to a to-be created pointer area for "environment" variables. Some callers of this + * function set 'vars' to NULL, making dereferencing of this parameter undesired. + * varsz - pointer to int to return the size of the vars + */ +si_t * +si_attach(uint devid, osl_t *osh, volatile void *regs, + uint bustype, void *sdh, char **vars, uint *varsz) +{ + si_info_t *sii; + si_cores_info_t *cores_info; + /* alloc si_info_t */ + /* freed after ucode download for firmware builds */ + if ((sii = MALLOCZ_NOPERSIST(osh, sizeof(si_info_t))) == NULL) { + SI_ERROR(("si_attach: malloc failed! malloced %d bytes\n", MALLOCED(osh))); + return (NULL); + } + + /* alloc si_cores_info_t */ + if ((cores_info = (si_cores_info_t *)MALLOCZ(osh, + sizeof(si_cores_info_t))) == NULL) { + SI_ERROR(("si_attach: malloc failed! malloced %d bytes\n", MALLOCED(osh))); + MFREE(osh, sii, sizeof(si_info_t)); + return (NULL); + } + sii->cores_info = cores_info; + + if (si_doattach(sii, devid, osh, regs, bustype, sdh, vars, varsz) == NULL) { + MFREE(osh, sii, sizeof(si_info_t)); + MFREE(osh, cores_info, sizeof(si_cores_info_t)); + return (NULL); + } + sii->vars = vars ? *vars : NULL; + sii->varsz = varsz ? *varsz : 0; + + return (si_t *)sii; +} + +static uint32 wd_msticks; /**< watchdog timer ticks normalized to ms */ + +/** Returns the backplane address of the chipcommon core for a particular chip */ +uint32 +si_enum_base(uint devid) +{ + // NIC/DHD build + switch (devid) { + case BCM7271_CHIP_ID: + case BCM7271_D11AC_ID: + case BCM7271_D11AC2G_ID: + case BCM7271_D11AC5G_ID: + return 0xF1800000; + } + + return SI_ENUM_BASE_DEFAULT; +} + +/** generic kernel variant of si_attach(). Is not called for Linux WLAN NIC builds. */ +si_t * +si_kattach(osl_t *osh) +{ + static bool ksii_attached = FALSE; + si_cores_info_t *cores_info; + + if (!ksii_attached) { + void *regs = NULL; + const uint device_id = BCM4710_DEVICE_ID; // pick an arbitrary default device_id + + regs = REG_MAP(si_enum_base(device_id), SI_CORE_SIZE); // map physical to virtual + cores_info = (si_cores_info_t *)&ksii_cores_info; + ksii.cores_info = cores_info; + + ASSERT(osh); + if (si_doattach(&ksii, device_id, osh, regs, + SI_BUS, NULL, + osh != SI_OSH ? &(ksii.vars) : NULL, + osh != SI_OSH ? &(ksii.varsz) : NULL) == NULL) { + SI_ERROR(("si_kattach: si_doattach failed\n")); + REG_UNMAP(regs); + return NULL; + } + REG_UNMAP(regs); + + /* save ticks normalized to ms for si_watchdog_ms() */ + if (PMUCTL_ENAB(&ksii.pub)) { + /* based on 32KHz ILP clock */ + wd_msticks = 32; + } else { + wd_msticks = ALP_CLOCK / 1000; + } + + ksii_attached = TRUE; + SI_MSG(("si_kattach done. ccrev = %d, wd_msticks = %d\n", + CCREV(ksii.pub.ccrev), wd_msticks)); + } + + return &ksii.pub; +} + +static bool +si_buscore_prep(si_info_t *sii, uint bustype, uint devid, void *sdh) +{ + BCM_REFERENCE(sdh); + BCM_REFERENCE(devid); + /* need to set memseg flag for CF card first before any sb registers access */ + if (BUSTYPE(bustype) == PCMCIA_BUS) + sii->memseg = TRUE; + +#if defined(BCMSDIO) && !defined(BCMSDIOLITE) + if (BUSTYPE(bustype) == SDIO_BUS) { + int err; + uint8 clkset; + + /* Try forcing SDIO core to do ALPAvail request only */ + clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ; + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err); + if (!err) { + uint8 clkval; + + /* If register supported, wait for ALPAvail and then force ALP */ + clkval = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, NULL); + if ((clkval & ~SBSDIO_AVBITS) == clkset) { + SPINWAIT(((clkval = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, + SBSDIO_FUNC1_CHIPCLKCSR, NULL)), !SBSDIO_ALPAV(clkval)), + PMU_MAX_TRANSITION_DLY); + if (!SBSDIO_ALPAV(clkval)) { + SI_ERROR(("timeout on ALPAV wait, clkval 0x%02x\n", + clkval)); + return FALSE; + } + clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP; + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, + clkset, &err); + OSL_DELAY(65); + } + } + + /* Also, disable the extra SDIO pull-ups */ + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SDIOPULLUP, 0, NULL); + } + +#ifdef BCMSPI + /* Avoid backplane accesses before wake-wlan (i.e. htavail) for spi. + * F1 read accesses may return correct data but with data-not-available dstatus bit set. + */ + if (BUSTYPE(bustype) == SPI_BUS) { + + int err; + uint32 regdata; + /* wake up wlan function :WAKE_UP goes as HT_AVAIL request in hardware */ + regdata = bcmsdh_cfg_read_word(sdh, SDIO_FUNC_0, SPID_CONFIG, NULL); + SI_MSG(("F0 REG0 rd = 0x%x\n", regdata)); + regdata |= WAKE_UP; + + bcmsdh_cfg_write_word(sdh, SDIO_FUNC_0, SPID_CONFIG, regdata, &err); + + OSL_DELAY(100000); + } +#endif /* BCMSPI */ +#endif /* BCMSDIO && BCMDONGLEHOST && !BCMSDIOLITE */ + + return TRUE; +} + +uint32 +si_get_pmu_reg_addr(si_t *sih, uint32 offset) +{ + si_info_t *sii = SI_INFO(sih); + uint32 pmuaddr = INVALID_ADDR; + uint origidx = 0; + + SI_MSG(("%s: pmu access, offset: %x\n", __FUNCTION__, offset)); + if (!(sii->pub.cccaps & CC_CAP_PMU)) { + goto done; + } + if (AOB_ENAB(&sii->pub)) { + uint pmucoreidx; + pmuregs_t *pmu; + SI_MSG(("%s: AOBENAB: %x\n", __FUNCTION__, offset)); + origidx = sii->curidx; + pmucoreidx = si_findcoreidx(&sii->pub, PMU_CORE_ID, 0); + pmu = si_setcoreidx(&sii->pub, pmucoreidx); + pmuaddr = (uint32)(uintptr)((volatile uint8*)pmu + offset); + si_setcoreidx(sih, origidx); + } else + pmuaddr = SI_ENUM_BASE(sih) + offset; + +done: + printf("%s: addrRET: %x\n", __FUNCTION__, pmuaddr); + return pmuaddr; +} + +static bool +si_buscore_setup(si_info_t *sii, chipcregs_t *cc, uint bustype, uint32 savewin, + uint *origidx, volatile void *regs) +{ + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + bool pci, pcie, pcie_gen2 = FALSE; + uint i; + uint pciidx, pcieidx, pcirev, pcierev; + +#if defined(BCM_BACKPLANE_TIMEOUT) || defined(AXI_TIMEOUTS) + /* first, enable backplane timeouts */ + si_slave_wrapper_add(&sii->pub); +#endif // endif + sii->curidx = 0; + + cc = si_setcoreidx(&sii->pub, SI_CC_IDX); + ASSERT((uintptr)cc); + + /* get chipcommon rev */ + sii->pub.ccrev = (int)si_corerev(&sii->pub); + + /* get chipcommon chipstatus */ + if (CCREV(sii->pub.ccrev) >= 11) + sii->pub.chipst = R_REG(sii->osh, &cc->chipstatus); + + /* get chipcommon capabilites */ + sii->pub.cccaps = R_REG(sii->osh, &cc->capabilities); + /* get chipcommon extended capabilities */ + + if (CCREV(sii->pub.ccrev) >= 35) + sii->pub.cccaps_ext = R_REG(sii->osh, &cc->capabilities_ext); + + /* get pmu rev and caps */ + if (sii->pub.cccaps & CC_CAP_PMU) { + if (AOB_ENAB(&sii->pub)) { + uint pmucoreidx; + pmuregs_t *pmu; + struct si_pub *sih = &sii->pub; + + pmucoreidx = si_findcoreidx(&sii->pub, PMU_CORE_ID, 0); + if (!GOODIDX(pmucoreidx)) { + SI_ERROR(("si_buscore_setup: si_findcoreidx failed\n")); + return FALSE; + } + + pmu = si_setcoreidx(&sii->pub, pmucoreidx); + sii->pub.pmucaps = R_REG(sii->osh, &pmu->pmucapabilities); + si_setcoreidx(&sii->pub, SI_CC_IDX); + + sii->pub.gcirev = si_corereg(sih, + GCI_CORE_IDX(sih), + GCI_OFFSETOF(sih, gci_corecaps0), 0, 0) & GCI_CAP0_REV_MASK; + } else + sii->pub.pmucaps = R_REG(sii->osh, &cc->pmucapabilities); + + sii->pub.pmurev = sii->pub.pmucaps & PCAP_REV_MASK; + } + + SI_MSG(("Chipc: rev %d, caps 0x%x, chipst 0x%x pmurev %d, pmucaps 0x%x\n", + CCREV(sii->pub.ccrev), sii->pub.cccaps, sii->pub.chipst, sii->pub.pmurev, + sii->pub.pmucaps)); + + /* figure out bus/orignal core idx */ + sii->pub.buscoretype = NODEV_CORE_ID; + sii->pub.buscorerev = (uint)NOREV; + sii->pub.buscoreidx = BADIDX; + + pci = pcie = FALSE; + pcirev = pcierev = (uint)NOREV; + pciidx = pcieidx = BADIDX; + + for (i = 0; i < sii->numcores; i++) { + uint cid, crev; + + si_setcoreidx(&sii->pub, i); + cid = si_coreid(&sii->pub); + crev = si_corerev(&sii->pub); + + /* Display cores found */ + SI_VMSG(("CORE[%d]: id 0x%x rev %d base 0x%x size:%x regs 0x%p\n", + i, cid, crev, sii->coresba[i], sii->coresba_size[i], + OSL_OBFUSCATE_BUF(sii->regs[i]))); + + if (BUSTYPE(bustype) == SI_BUS) { + /* now look at the chipstatus register to figure the pacakge */ + /* for SDIO but downloaded on PCIE dev */ +#ifdef BCMPCIEDEV_ENABLED + if (cid == PCIE2_CORE_ID) { + pcieidx = i; + pcierev = crev; + pcie = TRUE; + pcie_gen2 = TRUE; + } +#endif // endif + + } else if (BUSTYPE(bustype) == PCI_BUS) { + if (cid == PCI_CORE_ID) { + pciidx = i; + pcirev = crev; + pci = TRUE; + } else if ((cid == PCIE_CORE_ID) || (cid == PCIE2_CORE_ID)) { + pcieidx = i; + pcierev = crev; + pcie = TRUE; + if (cid == PCIE2_CORE_ID) + pcie_gen2 = TRUE; + } + } else if ((BUSTYPE(bustype) == PCMCIA_BUS) && + (cid == PCMCIA_CORE_ID)) { + sii->pub.buscorerev = crev; + sii->pub.buscoretype = cid; + sii->pub.buscoreidx = i; + } +#ifdef BCMSDIO + else if (((BUSTYPE(bustype) == SDIO_BUS) || + (BUSTYPE(bustype) == SPI_BUS)) && + ((cid == PCMCIA_CORE_ID) || + (cid == SDIOD_CORE_ID))) { + sii->pub.buscorerev = crev; + sii->pub.buscoretype = cid; + sii->pub.buscoreidx = i; + } +#endif /* BCMSDIO */ + + /* find the core idx before entering this func. */ + if ((savewin && (savewin == cores_info->coresba[i])) || + (regs == cores_info->regs[i])) + *origidx = i; + } + +#if defined(PCIE_FULL_DONGLE) + if (pcie) { + if (pcie_gen2) + sii->pub.buscoretype = PCIE2_CORE_ID; + else + sii->pub.buscoretype = PCIE_CORE_ID; + sii->pub.buscorerev = pcierev; + sii->pub.buscoreidx = pcieidx; + } + BCM_REFERENCE(pci); + BCM_REFERENCE(pcirev); + BCM_REFERENCE(pciidx); +#else + if (pci) { + sii->pub.buscoretype = PCI_CORE_ID; + sii->pub.buscorerev = pcirev; + sii->pub.buscoreidx = pciidx; + } else if (pcie) { + if (pcie_gen2) + sii->pub.buscoretype = PCIE2_CORE_ID; + else + sii->pub.buscoretype = PCIE_CORE_ID; + sii->pub.buscorerev = pcierev; + sii->pub.buscoreidx = pcieidx; + } +#endif /* defined(PCIE_FULL_DONGLE) */ + + SI_VMSG(("Buscore id/type/rev %d/0x%x/%d\n", sii->pub.buscoreidx, sii->pub.buscoretype, + sii->pub.buscorerev)); + +#if defined(BCMSDIO) + /* Make sure any on-chip ARM is off (in case strapping is wrong), or downloaded code was + * already running. + */ + if ((BUSTYPE(bustype) == SDIO_BUS) || (BUSTYPE(bustype) == SPI_BUS)) { + if (si_setcore(&sii->pub, ARM7S_CORE_ID, 0) || + si_setcore(&sii->pub, ARMCM3_CORE_ID, 0)) + si_core_disable(&sii->pub, 0); + } +#endif /* BCMSDIO && BCMDONGLEHOST */ + + /* return to the original core */ + si_setcoreidx(&sii->pub, *origidx); + + return TRUE; +} + +uint16 +si_chipid(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + + return (sii->chipnew) ? sii->chipnew : sih->chip; +} + +/* CHIP_ID's being mapped here should not be used anywhere else in the code */ +static void +si_chipid_fixup(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + + ASSERT(sii->chipnew == 0); + switch (sih->chip) { + case BCM43567_CHIP_ID: + sii->chipnew = sih->chip; /* save it */ + sii->pub.chip = BCM43570_CHIP_ID; /* chip class */ + break; + case BCM43562_CHIP_ID: + case BCM4358_CHIP_ID: + case BCM43566_CHIP_ID: + sii->chipnew = sih->chip; /* save it */ + sii->pub.chip = BCM43569_CHIP_ID; /* chip class */ + break; + case BCM4356_CHIP_ID: + case BCM4371_CHIP_ID: + sii->chipnew = sih->chip; /* save it */ + sii->pub.chip = BCM4354_CHIP_ID; /* chip class */ + break; + case BCM4357_CHIP_ID: + case BCM4361_CHIP_ID: + sii->chipnew = sih->chip; /* save it */ + sii->pub.chip = BCM4347_CHIP_ID; /* chip class */ + break; + default: + break; + } +} + +#ifdef BCMULP +static void +si_check_boot_type(si_t *sih, osl_t *osh) +{ + if (sih->pmurev >= 30) { + boot_type = PMU_REG_NEW(sih, swscratch, 0, 0); + } else { + boot_type = CHIPC_REG(sih, flashdata, 0, 0); + } + + SI_ERROR(("%s: boot_type: 0x%08x\n", __func__, boot_type)); +} +#endif /* BCMULP */ + +#ifdef BCM_BACKPLANE_TIMEOUT +uint32 +si_clear_backplane_to_fast(void *sih, void *addr) +{ + si_t *_sih = DISCARD_QUAL(sih, si_t); + + if (CHIPTYPE(_sih->socitype) == SOCI_AI) { + return ai_clear_backplane_to_fast(_sih, addr); + } + + return 0; +} + +const si_axi_error_info_t * +si_get_axi_errlog_info(si_t *sih) +{ + if (CHIPTYPE(sih->socitype) == SOCI_AI) { + return (const si_axi_error_info_t *)sih->err_info; + } + + return NULL; +} + +void +si_reset_axi_errlog_info(si_t *sih) +{ + if (sih->err_info) { + sih->err_info->count = 0; + } +} +#endif /* BCM_BACKPLANE_TIMEOUT */ + +/** + * Allocate an si handle. This function may be called multiple times. This function is called by + * both si_attach() and si_kattach(). + * + * vars - pointer to a to-be created pointer area for "environment" variables. Some callers of this + * function set 'vars' to NULL. + */ +static si_info_t * +si_doattach(si_info_t *sii, uint devid, osl_t *osh, volatile void *regs, + uint bustype, void *sdh, char **vars, uint *varsz) +{ + struct si_pub *sih = &sii->pub; + uint32 w, savewin; + chipcregs_t *cc; + char *pvars = NULL; + uint origidx; +#ifdef NVSRCX + char *sromvars; +#endif // endif + + ASSERT(GOODREGS(regs)); + + savewin = 0; + + sih->buscoreidx = BADIDX; + sii->device_removed = FALSE; + + sii->curmap = regs; + sii->sdh = sdh; + sii->osh = osh; + sii->second_bar0win = ~0x0; + sih->enum_base = si_enum_base(devid); + +#if defined(BCM_BACKPLANE_TIMEOUT) + sih->err_info = MALLOCZ(osh, sizeof(si_axi_error_info_t)); + if (sih->err_info == NULL) { + SI_ERROR(("%s: %zu bytes MALLOC FAILED", + __FUNCTION__, sizeof(si_axi_error_info_t))); + } +#endif /* BCM_BACKPLANE_TIMEOUT */ + +#if defined(BCM_BACKPLANE_TIMEOUT) + osl_set_bpt_cb(osh, (void *)si_clear_backplane_to_fast, (void *)sih); +#endif // endif + + /* check to see if we are a si core mimic'ing a pci core */ + if ((bustype == PCI_BUS) && + (OSL_PCI_READ_CONFIG(sii->osh, PCI_SPROM_CONTROL, sizeof(uint32)) == 0xffffffff)) { + SI_ERROR(("%s: incoming bus is PCI but it's a lie, switching to SI " + "devid:0x%x\n", __FUNCTION__, devid)); + bustype = SI_BUS; + } + + /* find Chipcommon address */ + if (bustype == PCI_BUS) { + savewin = OSL_PCI_READ_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(uint32)); + if (!GOODCOREADDR(savewin, SI_ENUM_BASE(sih))) + savewin = SI_ENUM_BASE(sih); + OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, SI_ENUM_BASE(sih)); + if (!regs) + return NULL; + cc = (chipcregs_t *)regs; +#ifdef BCMSDIO + } else if ((bustype == SDIO_BUS) || (bustype == SPI_BUS)) { + cc = (chipcregs_t *)sii->curmap; +#endif // endif + } else { + cc = (chipcregs_t *)REG_MAP(SI_ENUM_BASE(sih), SI_CORE_SIZE); + } + + sih->bustype = bustype; +#ifdef BCMBUSTYPE + if (bustype != BUSTYPE(bustype)) { + SI_ERROR(("si_doattach: bus type %d does not match configured bus type %d\n", + bustype, BUSTYPE(bustype))); + return NULL; + } +#endif // endif + + /* bus/core/clk setup for register access */ + if (!si_buscore_prep(sii, bustype, devid, sdh)) { + SI_ERROR(("si_doattach: si_core_clk_prep failed %d\n", bustype)); + return NULL; + } + + /* ChipID recognition. + * We assume we can read chipid at offset 0 from the regs arg. + * If we add other chiptypes (or if we need to support old sdio hosts w/o chipcommon), + * some way of recognizing them needs to be added here. + */ + if (!cc) { + SI_ERROR(("%s: chipcommon register space is null \n", __FUNCTION__)); + return NULL; + } + w = R_REG(osh, &cc->chipid); + if ((w & 0xfffff) == 148277) w -= 65532; + sih->socitype = (w & CID_TYPE_MASK) >> CID_TYPE_SHIFT; + /* Might as wll fill in chip id rev & pkg */ + sih->chip = w & CID_ID_MASK; + sih->chiprev = (w & CID_REV_MASK) >> CID_REV_SHIFT; + sih->chippkg = (w & CID_PKG_MASK) >> CID_PKG_SHIFT; + +#if defined(BCMSDIO) && (defined(HW_OOB) || defined(FORCE_WOWLAN)) + dhd_conf_set_hw_oob_intr(sdh, sih); +#endif + + si_chipid_fixup(sih); + + if (CHIPID(sih->chip) == BCM43465_CHIP_ID) { + sih->chip = BCM4366_CHIP_ID; + } else if (CHIPID(sih->chip) == BCM43525_CHIP_ID) { + sih->chip = BCM4365_CHIP_ID; + } + + sih->issim = IS_SIM(sih->chippkg); + + /* scan for cores */ + if (CHIPTYPE(sii->pub.socitype) == SOCI_SB) { + SI_MSG(("Found chip type SB (0x%08x)\n", w)); + sb_scan(&sii->pub, regs, devid); + } else if ((CHIPTYPE(sii->pub.socitype) == SOCI_AI) || + (CHIPTYPE(sii->pub.socitype) == SOCI_NAI) || + (CHIPTYPE(sii->pub.socitype) == SOCI_DVTBUS)) { + + if (CHIPTYPE(sii->pub.socitype) == SOCI_AI) + SI_MSG(("Found chip type AI (0x%08x)\n", w)); + else if (CHIPTYPE(sii->pub.socitype) == SOCI_NAI) + SI_MSG(("Found chip type NAI (0x%08x)\n", w)); + else + SI_MSG(("Found chip type DVT (0x%08x)\n", w)); + /* pass chipc address instead of original core base */ + + if (sii->osh) { + sii->axi_wrapper = (axi_wrapper_t *)MALLOCZ(sii->osh, + (sizeof(axi_wrapper_t) * SI_MAX_AXI_WRAPPERS)); + + if (sii->axi_wrapper == NULL) { + SI_ERROR(("%s: %zu bytes MALLOC Failed", __FUNCTION__, + (sizeof(axi_wrapper_t) * SI_MAX_AXI_WRAPPERS))); + } + } else { + sii->axi_wrapper = NULL; + } + + ai_scan(&sii->pub, (void *)(uintptr)cc, devid); + } else if (CHIPTYPE(sii->pub.socitype) == SOCI_UBUS) { + SI_MSG(("Found chip type UBUS (0x%08x), chip id = 0x%4x\n", w, sih->chip)); + /* pass chipc address instead of original core base */ + ub_scan(&sii->pub, (void *)(uintptr)cc, devid); + } else { + SI_ERROR(("Found chip of unknown type (0x%08x)\n", w)); + return NULL; + } + /* no cores found, bail out */ + if (sii->numcores == 0) { + SI_ERROR(("si_doattach: could not find any cores\n")); + return NULL; + } + /* bus/core/clk setup */ + origidx = SI_CC_IDX; + if (!si_buscore_setup(sii, cc, bustype, savewin, &origidx, regs)) { + SI_ERROR(("si_doattach: si_buscore_setup failed\n")); + goto exit; + } +#ifdef BCMULP + if (BCMULP_ENAB()) { + si_check_boot_type(sih, osh); + if (ulp_module_init(osh, sih) != BCME_OK) { + ULP_ERR(("%s: err in ulp_module_init\n", __FUNCTION__)); + goto exit; + } + } +#endif /* BCMULP */ + +#if !defined(_CFEZ_) || defined(CFG_WL) + /* assume current core is CC */ + if ((CCREV(sii->pub.ccrev) == 0x25) && ((CHIPID(sih->chip) == BCM43236_CHIP_ID || + CHIPID(sih->chip) == BCM43235_CHIP_ID || + CHIPID(sih->chip) == BCM43234_CHIP_ID || + CHIPID(sih->chip) == BCM43238_CHIP_ID) && + (CHIPREV(sii->pub.chiprev) <= 2))) { + + if ((cc->chipstatus & CST43236_BP_CLK) != 0) { + uint clkdiv; + clkdiv = R_REG(osh, &cc->clkdiv); + /* otp_clk_div is even number, 120/14 < 9mhz */ + clkdiv = (clkdiv & ~CLKD_OTP) | (14 << CLKD_OTP_SHIFT); + W_REG(osh, &cc->clkdiv, clkdiv); + SI_ERROR(("%s: set clkdiv to %x\n", __FUNCTION__, clkdiv)); + } + OSL_DELAY(10); + } + + /* Set the clkdiv2 divisor bits (2:0) to 0x4 if srom is present */ + if (bustype == SI_BUS) { + uint32 clkdiv2, sromprsnt, capabilities, srom_supported; + capabilities = R_REG(osh, &cc->capabilities); + srom_supported = capabilities & SROM_SUPPORTED; + if (srom_supported) + { + sromprsnt = R_REG(osh, &cc->sromcontrol); + sromprsnt = sromprsnt & SROM_PRSNT_MASK; + if (sromprsnt) { + /* SROM clock come from backplane clock/div2. Must <= 1Mhz */ + clkdiv2 = (R_REG(osh, &cc->clkdiv2) & ~CLKD2_SROM); + clkdiv2 |= CLKD2_SROMDIV_192; + W_REG(osh, &cc->clkdiv2, clkdiv2); + } + } + } + + if (bustype == PCI_BUS) { + + } +#endif // endif +#ifdef BCM_SDRBL + /* 4360 rom bootloader in PCIE case, if the SDR is enabled, But preotection is + * not turned on, then we want to hold arm in reset. + * Bottomline: In sdrenable case, we allow arm to boot only when protection is + * turned on. + */ + if (CHIP_HOSTIF_PCIE(&(sii->pub))) { + uint32 sflags = si_arm_sflags(&(sii->pub)); + + /* If SDR is enabled but protection is not turned on + * then we want to force arm to WFI. + */ + if ((sflags & (SISF_SDRENABLE | SISF_TCMPROT)) == SISF_SDRENABLE) { + disable_arm_irq(); + while (1) { + hnd_cpu_wait(sih); + } + } + } +#endif /* BCM_SDRBL */ + + pvars = NULL; + BCM_REFERENCE(pvars); + + { + sii->lhl_ps_mode = LHL_PS_MODE_0; + } + + if (!si_onetimeinit) { + + if (CCREV(sii->pub.ccrev) >= 20) { + uint32 gpiopullup = 0, gpiopulldown = 0; + cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0); + ASSERT(cc != NULL); + + W_REG(osh, &cc->gpiopullup, gpiopullup); + W_REG(osh, &cc->gpiopulldown, gpiopulldown); + si_setcoreidx(sih, origidx); + } + + } + + /* clear any previous epidiag-induced target abort */ + ASSERT(!si_taclear(sih, FALSE)); + +#if defined(BCMPMU_STATS) && !defined(BCMPMU_STATS_DISABLED) + si_pmustatstimer_init(sih); +#endif /* BCMPMU_STATS */ + +#ifdef BOOTLOADER_CONSOLE_OUTPUT + /* Enable console prints */ + si_muxenab(sii, 3); +#endif // endif + + return (sii); + +exit: + + return NULL; +} + +/** may be called with core in reset */ +void +si_detach(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + uint idx; + + if (BUSTYPE(sih->bustype) == SI_BUS) + for (idx = 0; idx < SI_MAXCORES; idx++) + if (cores_info->regs[idx]) { + REG_UNMAP(cores_info->regs[idx]); + cores_info->regs[idx] = NULL; + } + +#if !defined(BCMBUSTYPE) || (BCMBUSTYPE == SI_BUS) + if (cores_info != &ksii_cores_info) +#endif /* !BCMBUSTYPE || (BCMBUSTYPE == SI_BUS) */ + MFREE(sii->osh, cores_info, sizeof(si_cores_info_t)); + +#if defined(BCM_BACKPLANE_TIMEOUT) + if (sih->err_info) { + MFREE(sii->osh, sih->err_info, sizeof(si_axi_error_info_t)); + sii->pub.err_info = NULL; + } +#endif /* BCM_BACKPLANE_TIMEOUT */ + + if (sii->axi_wrapper) { + MFREE(sii->osh, sii->axi_wrapper, + (sizeof(axi_wrapper_t) * SI_MAX_AXI_WRAPPERS)); + sii->axi_wrapper = NULL; + } + +#if !defined(BCMBUSTYPE) || (BCMBUSTYPE == SI_BUS) + if (sii != &ksii) +#endif /* !BCMBUSTYPE || (BCMBUSTYPE == SI_BUS) */ + MFREE(sii->osh, sii, sizeof(si_info_t)); +} + +void * +si_osh(si_t *sih) +{ + si_info_t *sii; + + sii = SI_INFO(sih); + return sii->osh; +} + +void +si_setosh(si_t *sih, osl_t *osh) +{ + si_info_t *sii; + + sii = SI_INFO(sih); + if (sii->osh != NULL) { + SI_ERROR(("osh is already set....\n")); + ASSERT(!sii->osh); + } + sii->osh = osh; +} + +/** register driver interrupt disabling and restoring callback functions */ +void +si_register_intr_callback(si_t *sih, void *intrsoff_fn, void *intrsrestore_fn, + void *intrsenabled_fn, void *intr_arg) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + sii->intr_arg = intr_arg; + sii->intrsoff_fn = (si_intrsoff_t)intrsoff_fn; + sii->intrsrestore_fn = (si_intrsrestore_t)intrsrestore_fn; + sii->intrsenabled_fn = (si_intrsenabled_t)intrsenabled_fn; + /* save current core id. when this function called, the current core + * must be the core which provides driver functions(il, et, wl, etc.) + */ + sii->dev_coreid = cores_info->coreid[sii->curidx]; +} + +void +si_deregister_intr_callback(si_t *sih) +{ + si_info_t *sii; + + sii = SI_INFO(sih); + sii->intrsoff_fn = NULL; + sii->intrsrestore_fn = NULL; + sii->intrsenabled_fn = NULL; +} + +uint +si_intflag(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + + if (CHIPTYPE(sih->socitype) == SOCI_SB) + return sb_intflag(sih); + else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || + (CHIPTYPE(sih->socitype) == SOCI_DVTBUS) || + (CHIPTYPE(sih->socitype) == SOCI_NAI)) + return R_REG(sii->osh, ((uint32 *)(uintptr) + (sii->oob_router + OOB_STATUSA))); + else { + ASSERT(0); + return 0; + } +} + +uint +si_flag(si_t *sih) +{ + if (CHIPTYPE(sih->socitype) == SOCI_SB) + return sb_flag(sih); + else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || + (CHIPTYPE(sih->socitype) == SOCI_DVTBUS) || + (CHIPTYPE(sih->socitype) == SOCI_NAI)) + return ai_flag(sih); + else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) + return ub_flag(sih); + else { + ASSERT(0); + return 0; + } +} + +uint +si_flag_alt(si_t *sih) +{ + if ((CHIPTYPE(sih->socitype) == SOCI_AI) || + (CHIPTYPE(sih->socitype) == SOCI_DVTBUS) || + (CHIPTYPE(sih->socitype) == SOCI_NAI)) + return ai_flag_alt(sih); + else { + ASSERT(0); + return 0; + } +} + +void +si_setint(si_t *sih, int siflag) +{ + if (CHIPTYPE(sih->socitype) == SOCI_SB) + sb_setint(sih, siflag); + else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || + (CHIPTYPE(sih->socitype) == SOCI_DVTBUS) || + (CHIPTYPE(sih->socitype) == SOCI_NAI)) + ai_setint(sih, siflag); + else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) + ub_setint(sih, siflag); + else + ASSERT(0); +} + +uint +si_coreid(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + + return cores_info->coreid[sii->curidx]; +} + +uint +si_coreidx(si_t *sih) +{ + si_info_t *sii; + + sii = SI_INFO(sih); + return sii->curidx; +} + +volatile void * +si_d11_switch_addrbase(si_t *sih, uint coreunit) +{ + return si_setcore(sih, D11_CORE_ID, coreunit); +} + +/** return the core-type instantiation # of the current core */ +uint +si_coreunit(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + uint idx; + uint coreid; + uint coreunit; + uint i; + + coreunit = 0; + + idx = sii->curidx; + + ASSERT(GOODREGS(sii->curmap)); + coreid = si_coreid(sih); + + /* count the cores of our type */ + for (i = 0; i < idx; i++) + if (cores_info->coreid[i] == coreid) + coreunit++; + + return (coreunit); +} + +uint +si_corevendor(si_t *sih) +{ + if (CHIPTYPE(sih->socitype) == SOCI_SB) + return sb_corevendor(sih); + else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || + (CHIPTYPE(sih->socitype) == SOCI_DVTBUS) || + (CHIPTYPE(sih->socitype) == SOCI_NAI)) + return ai_corevendor(sih); + else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) + return ub_corevendor(sih); + else { + ASSERT(0); + return 0; + } +} + +bool +si_backplane64(si_t *sih) +{ + return ((sih->cccaps & CC_CAP_BKPLN64) != 0); +} + +uint +si_corerev(si_t *sih) +{ + if (CHIPTYPE(sih->socitype) == SOCI_SB) + return sb_corerev(sih); + else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || + (CHIPTYPE(sih->socitype) == SOCI_DVTBUS) || + (CHIPTYPE(sih->socitype) == SOCI_NAI)) + return ai_corerev(sih); + else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) + return ub_corerev(sih); + else { + ASSERT(0); + return 0; + } +} + +uint +si_corerev_minor(si_t *sih) +{ + if (CHIPTYPE(sih->socitype) == SOCI_AI) { + return ai_corerev_minor(sih); + } else { + return 0; + } +} + +/* return index of coreid or BADIDX if not found */ +uint +si_findcoreidx(si_t *sih, uint coreid, uint coreunit) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + uint found; + uint i; + + found = 0; + + for (i = 0; i < sii->numcores; i++) + if (cores_info->coreid[i] == coreid) { + if (found == coreunit) + return (i); + found++; + } + + return (BADIDX); +} + +/** return total coreunit of coreid or zero if not found */ +uint +si_numcoreunits(si_t *sih, uint coreid) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + uint found = 0; + uint i; + + for (i = 0; i < sii->numcores; i++) { + if (cores_info->coreid[i] == coreid) { + found++; + } + } + + return found; +} + +/** return total D11 coreunits */ +uint +BCMRAMFN(si_numd11coreunits)(si_t *sih) +{ + uint found = 0; + + found = si_numcoreunits(sih, D11_CORE_ID); + +#if defined(WLRSDB) && defined(WLRSDB_DISABLED) + /* If RSDB functionality is compiled out, + * then ignore any D11 cores beyond the first + * Used in norsdb dongle build variants for rsdb chip. + */ + found = 1; +#endif /* defined(WLRSDB) && !defined(WLRSDB_DISABLED) */ + + return found; +} + +/** return list of found cores */ +uint +si_corelist(si_t *sih, uint coreid[]) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + + bcopy((uchar*)cores_info->coreid, (uchar*)coreid, (sii->numcores * sizeof(uint))); + return (sii->numcores); +} + +/** return current wrapper mapping */ +void * +si_wrapperregs(si_t *sih) +{ + si_info_t *sii; + + sii = SI_INFO(sih); + ASSERT(GOODREGS(sii->curwrap)); + + return (sii->curwrap); +} + +/** return current register mapping */ +volatile void * +si_coreregs(si_t *sih) +{ + si_info_t *sii; + + sii = SI_INFO(sih); + ASSERT(GOODREGS(sii->curmap)); + + return (sii->curmap); +} + +/** + * This function changes logical "focus" to the indicated core; + * must be called with interrupts off. + * Moreover, callers should keep interrupts off during switching out of and back to d11 core + */ +volatile void * +si_setcore(si_t *sih, uint coreid, uint coreunit) +{ + uint idx; + + idx = si_findcoreidx(sih, coreid, coreunit); + if (!GOODIDX(idx)) + return (NULL); + + if (CHIPTYPE(sih->socitype) == SOCI_SB) + return sb_setcoreidx(sih, idx); + else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || + (CHIPTYPE(sih->socitype) == SOCI_DVTBUS) || + (CHIPTYPE(sih->socitype) == SOCI_NAI)) + return ai_setcoreidx(sih, idx); + else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) + return ub_setcoreidx(sih, idx); + else { + ASSERT(0); + return NULL; + } +} + +volatile void * +si_setcoreidx(si_t *sih, uint coreidx) +{ + if (CHIPTYPE(sih->socitype) == SOCI_SB) + return sb_setcoreidx(sih, coreidx); + else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || + (CHIPTYPE(sih->socitype) == SOCI_DVTBUS) || + (CHIPTYPE(sih->socitype) == SOCI_NAI)) + return ai_setcoreidx(sih, coreidx); + else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) + return ub_setcoreidx(sih, coreidx); + else { + ASSERT(0); + return NULL; + } +} + +/** Turn off interrupt as required by sb_setcore, before switch core */ +volatile void * +si_switch_core(si_t *sih, uint coreid, uint *origidx, uint *intr_val) +{ + volatile void *cc; + si_info_t *sii = SI_INFO(sih); + + if (SI_FAST(sii)) { + /* Overloading the origidx variable to remember the coreid, + * this works because the core ids cannot be confused with + * core indices. + */ + *origidx = coreid; + if (coreid == CC_CORE_ID) + return (volatile void *)CCREGS_FAST(sii); + else if (coreid == BUSCORETYPE(sih->buscoretype)) + return (volatile void *)PCIEREGS(sii); + } + INTR_OFF(sii, *intr_val); + *origidx = sii->curidx; + cc = si_setcore(sih, coreid, 0); + ASSERT(cc != NULL); + + return cc; +} + +/* restore coreidx and restore interrupt */ +void +si_restore_core(si_t *sih, uint coreid, uint intr_val) +{ + si_info_t *sii = SI_INFO(sih); + + if (SI_FAST(sii) && ((coreid == CC_CORE_ID) || (coreid == BUSCORETYPE(sih->buscoretype)))) + return; + + si_setcoreidx(sih, coreid); + INTR_RESTORE(sii, intr_val); +} + +int +si_numaddrspaces(si_t *sih) +{ + if (CHIPTYPE(sih->socitype) == SOCI_SB) + return sb_numaddrspaces(sih); + else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || + (CHIPTYPE(sih->socitype) == SOCI_DVTBUS) || + (CHIPTYPE(sih->socitype) == SOCI_NAI)) + return ai_numaddrspaces(sih); + else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) + return ub_numaddrspaces(sih); + else { + ASSERT(0); + return 0; + } +} + +/* Return the address of the nth address space in the current core + * Arguments: + * sih : Pointer to struct si_t + * spidx : slave port index + * baidx : base address index + */ + +uint32 +si_addrspace(si_t *sih, uint spidx, uint baidx) +{ + if (CHIPTYPE(sih->socitype) == SOCI_SB) + return sb_addrspace(sih, baidx); + else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || + (CHIPTYPE(sih->socitype) == SOCI_DVTBUS) || + (CHIPTYPE(sih->socitype) == SOCI_NAI)) + return ai_addrspace(sih, spidx, baidx); + else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) + return ub_addrspace(sih, baidx); + else { + ASSERT(0); + return 0; + } +} + +/* Return the size of the nth address space in the current core + * Arguments: + * sih : Pointer to struct si_t + * spidx : slave port index + * baidx : base address index + */ +uint32 +si_addrspacesize(si_t *sih, uint spidx, uint baidx) +{ + if (CHIPTYPE(sih->socitype) == SOCI_SB) + return sb_addrspacesize(sih, baidx); + else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || + (CHIPTYPE(sih->socitype) == SOCI_DVTBUS) || + (CHIPTYPE(sih->socitype) == SOCI_NAI)) + return ai_addrspacesize(sih, spidx, baidx); + else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) + return ub_addrspacesize(sih, baidx); + else { + ASSERT(0); + return 0; + } +} + +void +si_coreaddrspaceX(si_t *sih, uint asidx, uint32 *addr, uint32 *size) +{ + /* Only supported for SOCI_AI */ + if ((CHIPTYPE(sih->socitype) == SOCI_AI) || + (CHIPTYPE(sih->socitype) == SOCI_DVTBUS) || + (CHIPTYPE(sih->socitype) == SOCI_NAI)) + ai_coreaddrspaceX(sih, asidx, addr, size); + else + *size = 0; +} + +uint32 +si_core_cflags(si_t *sih, uint32 mask, uint32 val) +{ + if (CHIPTYPE(sih->socitype) == SOCI_SB) + return sb_core_cflags(sih, mask, val); + else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || + (CHIPTYPE(sih->socitype) == SOCI_DVTBUS) || + (CHIPTYPE(sih->socitype) == SOCI_NAI)) + return ai_core_cflags(sih, mask, val); + else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) + return ub_core_cflags(sih, mask, val); + else { + ASSERT(0); + return 0; + } +} + +void +si_core_cflags_wo(si_t *sih, uint32 mask, uint32 val) +{ + if (CHIPTYPE(sih->socitype) == SOCI_SB) + sb_core_cflags_wo(sih, mask, val); + else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || + (CHIPTYPE(sih->socitype) == SOCI_DVTBUS) || + (CHIPTYPE(sih->socitype) == SOCI_NAI)) + ai_core_cflags_wo(sih, mask, val); + else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) + ub_core_cflags_wo(sih, mask, val); + else + ASSERT(0); +} + +uint32 +si_core_sflags(si_t *sih, uint32 mask, uint32 val) +{ + if (CHIPTYPE(sih->socitype) == SOCI_SB) + return sb_core_sflags(sih, mask, val); + else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || + (CHIPTYPE(sih->socitype) == SOCI_DVTBUS) || + (CHIPTYPE(sih->socitype) == SOCI_NAI)) + return ai_core_sflags(sih, mask, val); + else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) + return ub_core_sflags(sih, mask, val); + else { + ASSERT(0); + return 0; + } +} + +void +si_commit(si_t *sih) +{ + if (CHIPTYPE(sih->socitype) == SOCI_SB) + sb_commit(sih); + else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || + (CHIPTYPE(sih->socitype) == SOCI_DVTBUS) || + (CHIPTYPE(sih->socitype) == SOCI_NAI)) + ; + else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) + ; + else { + ASSERT(0); + } +} + +bool +si_iscoreup(si_t *sih) +{ + if (CHIPTYPE(sih->socitype) == SOCI_SB) + return sb_iscoreup(sih); + else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || + (CHIPTYPE(sih->socitype) == SOCI_DVTBUS) || + (CHIPTYPE(sih->socitype) == SOCI_NAI)) + return ai_iscoreup(sih); + else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) + return ub_iscoreup(sih); + else { + ASSERT(0); + return FALSE; + } +} + +uint +si_wrapperreg(si_t *sih, uint32 offset, uint32 mask, uint32 val) +{ + /* only for AI back plane chips */ + if ((CHIPTYPE(sih->socitype) == SOCI_AI) || + (CHIPTYPE(sih->socitype) == SOCI_DVTBUS) || + (CHIPTYPE(sih->socitype) == SOCI_NAI)) + return (ai_wrap_reg(sih, offset, mask, val)); + return 0; +} +/* si_backplane_access is used to read full backplane address from host for PCIE FD + * it uses secondary bar-0 window which lies at an offset of 16K from primary bar-0 + * Provides support for read/write of 1/2/4 bytes of backplane address + * Can be used to read/write + * 1. core regs + * 2. Wrapper regs + * 3. memory + * 4. BT area + * For accessing any 32 bit backplane address, [31 : 12] of backplane should be given in "region" + * [11 : 0] should be the "regoff" + * for reading 4 bytes from reg 0x200 of d11 core use it like below + * : si_backplane_access(sih, 0x18001000, 0x200, 4, 0, TRUE) + */ +static int si_backplane_addr_sane(uint addr, uint size) +{ + int bcmerror = BCME_OK; + + /* For 2 byte access, address has to be 2 byte aligned */ + if (size == 2) { + if (addr & 0x1) { + bcmerror = BCME_ERROR; + } + } + /* For 4 byte access, address has to be 4 byte aligned */ + if (size == 4) { + if (addr & 0x3) { + bcmerror = BCME_ERROR; + } + } + return bcmerror; +} + +void +si_invalidate_second_bar0win(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + sii->second_bar0win = ~0x0; +} + +int +si_backplane_access(si_t *sih, uint addr, uint size, uint *val, bool read) +{ + volatile uint32 *r = NULL; + uint32 region = 0; + si_info_t *sii = SI_INFO(sih); + + /* Valid only for pcie bus */ + if (BUSTYPE(sih->bustype) != PCI_BUS) { + SI_ERROR(("Valid only for pcie bus \n")); + return BCME_ERROR; + } + + /* Split adrr into region and address offset */ + region = (addr & (0xFFFFF << 12)); + addr = addr & 0xFFF; + + /* check for address and size sanity */ + if (si_backplane_addr_sane(addr, size) != BCME_OK) + return BCME_ERROR; + + /* Update window if required */ + if (sii->second_bar0win != region) { + OSL_PCI_WRITE_CONFIG(sii->osh, PCIE2_BAR0_CORE2_WIN, 4, region); + sii->second_bar0win = region; + } + + /* Estimate effective address + * sii->curmap : bar-0 virtual address + * PCI_SECOND_BAR0_OFFSET : secondar bar-0 offset + * regoff : actual reg offset + */ + r = (volatile uint32 *)((volatile char *)sii->curmap + PCI_SECOND_BAR0_OFFSET + addr); + + SI_VMSG(("si curmap %p region %x regaddr %x effective addr %p READ %d\n", + (volatile char*)sii->curmap, region, addr, r, read)); + + switch (size) { + case sizeof(uint8) : + if (read) + *val = R_REG(sii->osh, (volatile uint8*)r); + else + W_REG(sii->osh, (volatile uint8*)r, *val); + break; + case sizeof(uint16) : + if (read) + *val = R_REG(sii->osh, (volatile uint16*)r); + else + W_REG(sii->osh, (volatile uint16*)r, *val); + break; + case sizeof(uint32) : + if (read) + *val = R_REG(sii->osh, (volatile uint32*)r); + else + W_REG(sii->osh, (volatile uint32*)r, *val); + break; + default : + SI_ERROR(("Invalid size %d \n", size)); + return (BCME_ERROR); + break; + } + + return (BCME_OK); +} +uint +si_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val) +{ + if (CHIPTYPE(sih->socitype) == SOCI_SB) + return sb_corereg(sih, coreidx, regoff, mask, val); + else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || + (CHIPTYPE(sih->socitype) == SOCI_DVTBUS) || + (CHIPTYPE(sih->socitype) == SOCI_NAI)) + return ai_corereg(sih, coreidx, regoff, mask, val); + else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) + return ub_corereg(sih, coreidx, regoff, mask, val); + else { + ASSERT(0); + return 0; + } +} + +uint +si_corereg_writeonly(si_t *sih, uint coreidx, uint regoff, uint mask, uint val) +{ + return ai_corereg_writeonly(sih, coreidx, regoff, mask, val); +} + +/** ILP sensitive register access needs special treatment to avoid backplane stalls */ +bool si_pmu_is_ilp_sensitive(uint32 idx, uint regoff) +{ + if (idx == SI_CC_IDX) { + if (CHIPCREGS_ILP_SENSITIVE(regoff)) + return TRUE; + } else if (PMUREGS_ILP_SENSITIVE(regoff)) { + return TRUE; + } + + return FALSE; +} + +/** 'idx' should refer either to the chipcommon core or the PMU core */ +uint +si_pmu_corereg(si_t *sih, uint32 idx, uint regoff, uint mask, uint val) +{ + int pmustatus_offset; + + /* prevent backplane stall on double write to 'ILP domain' registers in the PMU */ + if (mask != 0 && PMUREV(sih->pmurev) >= 22 && + si_pmu_is_ilp_sensitive(idx, regoff)) { + pmustatus_offset = AOB_ENAB(sih) ? OFFSETOF(pmuregs_t, pmustatus) : + OFFSETOF(chipcregs_t, pmustatus); + + while (si_corereg(sih, idx, pmustatus_offset, 0, 0) & PST_SLOW_WR_PENDING) + {}; + } + + return si_corereg(sih, idx, regoff, mask, val); +} + +/* + * If there is no need for fiddling with interrupts or core switches (typically silicon + * back plane registers, pci registers and chipcommon registers), this function + * returns the register offset on this core to a mapped address. This address can + * be used for W_REG/R_REG directly. + * + * For accessing registers that would need a core switch, this function will return + * NULL. + */ +volatile uint32 * +si_corereg_addr(si_t *sih, uint coreidx, uint regoff) +{ + if (CHIPTYPE(sih->socitype) == SOCI_SB) + return sb_corereg_addr(sih, coreidx, regoff); + else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || + (CHIPTYPE(sih->socitype) == SOCI_DVTBUS) || + (CHIPTYPE(sih->socitype) == SOCI_NAI)) + return ai_corereg_addr(sih, coreidx, regoff); + else { + return 0; + } +} + +void +si_core_disable(si_t *sih, uint32 bits) +{ + if (CHIPTYPE(sih->socitype) == SOCI_SB) + sb_core_disable(sih, bits); + else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || + (CHIPTYPE(sih->socitype) == SOCI_DVTBUS) || + (CHIPTYPE(sih->socitype) == SOCI_NAI)) + ai_core_disable(sih, bits); + else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) + ub_core_disable(sih, bits); +} + +void +si_core_reset(si_t *sih, uint32 bits, uint32 resetbits) +{ + if (CHIPTYPE(sih->socitype) == SOCI_SB) + sb_core_reset(sih, bits, resetbits); + else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || + (CHIPTYPE(sih->socitype) == SOCI_DVTBUS) || + (CHIPTYPE(sih->socitype) == SOCI_NAI)) + ai_core_reset(sih, bits, resetbits); + else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) + ub_core_reset(sih, bits, resetbits); +} + +/** Run bist on current core. Caller needs to take care of core-specific bist hazards */ +int +si_corebist(si_t *sih) +{ + uint32 cflags; + int result = 0; + + /* Read core control flags */ + cflags = si_core_cflags(sih, 0, 0); + + /* Set bist & fgc */ + si_core_cflags(sih, ~0, (SICF_BIST_EN | SICF_FGC)); + + /* Wait for bist done */ + SPINWAIT(((si_core_sflags(sih, 0, 0) & SISF_BIST_DONE) == 0), 100000); + + if (si_core_sflags(sih, 0, 0) & SISF_BIST_ERROR) + result = BCME_ERROR; + + /* Reset core control flags */ + si_core_cflags(sih, 0xffff, cflags); + + return result; +} + +uint +si_num_slaveports(si_t *sih, uint coreid) +{ + uint idx = si_findcoreidx(sih, coreid, 0); + uint num = 0; + + if (idx != BADIDX) { + if (CHIPTYPE(sih->socitype) == SOCI_AI) { + num = ai_num_slaveports(sih, idx); + } + } + return num; +} + +uint32 +si_get_slaveport_addr(si_t *sih, uint spidx, uint baidx, uint core_id, uint coreunit) +{ + si_info_t *sii = SI_INFO(sih); + uint origidx = sii->curidx; + uint32 addr = 0x0; + + if (!((CHIPTYPE(sih->socitype) == SOCI_AI) || + (CHIPTYPE(sih->socitype) == SOCI_DVTBUS) || + (CHIPTYPE(sih->socitype) == SOCI_NAI))) + goto done; + + si_setcore(sih, core_id, coreunit); + + addr = ai_addrspace(sih, spidx, baidx); + + si_setcoreidx(sih, origidx); + +done: + return addr; +} + +uint32 +si_get_d11_slaveport_addr(si_t *sih, uint spidx, uint baidx, uint coreunit) +{ + si_info_t *sii = SI_INFO(sih); + uint origidx = sii->curidx; + uint32 addr = 0x0; + + if (!((CHIPTYPE(sih->socitype) == SOCI_AI) || + (CHIPTYPE(sih->socitype) == SOCI_DVTBUS) || + (CHIPTYPE(sih->socitype) == SOCI_NAI))) + goto done; + + si_setcore(sih, D11_CORE_ID, coreunit); + + addr = ai_addrspace(sih, spidx, baidx); + + si_setcoreidx(sih, origidx); + +done: + return addr; +} + +static uint32 +factor6(uint32 x) +{ + switch (x) { + case CC_F6_2: return 2; + case CC_F6_3: return 3; + case CC_F6_4: return 4; + case CC_F6_5: return 5; + case CC_F6_6: return 6; + case CC_F6_7: return 7; + default: return 0; + } +} + +/* + * Divide the clock by the divisor with protection for + * a zero divisor. + */ +static uint32 +divide_clock(uint32 clock, uint32 div) +{ + return div ? clock / div : 0; +} + +/** calculate the speed the SI would run at given a set of clockcontrol values */ +uint32 +si_clock_rate(uint32 pll_type, uint32 n, uint32 m) +{ + uint32 n1, n2, clock, m1, m2, m3, mc; + + n1 = n & CN_N1_MASK; + n2 = (n & CN_N2_MASK) >> CN_N2_SHIFT; + + if (pll_type == PLL_TYPE6) { + if (m & CC_T6_MMASK) + return CC_T6_M1; + else + return CC_T6_M0; + } else if ((pll_type == PLL_TYPE1) || + (pll_type == PLL_TYPE3) || + (pll_type == PLL_TYPE4) || + (pll_type == PLL_TYPE7)) { + n1 = factor6(n1); + n2 += CC_F5_BIAS; + } else if (pll_type == PLL_TYPE2) { + n1 += CC_T2_BIAS; + n2 += CC_T2_BIAS; + ASSERT((n1 >= 2) && (n1 <= 7)); + ASSERT((n2 >= 5) && (n2 <= 23)); + } else if (pll_type == PLL_TYPE5) { + return (100000000); + } else + ASSERT(0); + /* PLL types 3 and 7 use BASE2 (25Mhz) */ + if ((pll_type == PLL_TYPE3) || + (pll_type == PLL_TYPE7)) { + clock = CC_CLOCK_BASE2 * n1 * n2; + } else + clock = CC_CLOCK_BASE1 * n1 * n2; + + if (clock == 0) + return 0; + + m1 = m & CC_M1_MASK; + m2 = (m & CC_M2_MASK) >> CC_M2_SHIFT; + m3 = (m & CC_M3_MASK) >> CC_M3_SHIFT; + mc = (m & CC_MC_MASK) >> CC_MC_SHIFT; + + if ((pll_type == PLL_TYPE1) || + (pll_type == PLL_TYPE3) || + (pll_type == PLL_TYPE4) || + (pll_type == PLL_TYPE7)) { + m1 = factor6(m1); + if ((pll_type == PLL_TYPE1) || (pll_type == PLL_TYPE3)) + m2 += CC_F5_BIAS; + else + m2 = factor6(m2); + m3 = factor6(m3); + + switch (mc) { + case CC_MC_BYPASS: return (clock); + case CC_MC_M1: return divide_clock(clock, m1); + case CC_MC_M1M2: return divide_clock(clock, m1 * m2); + case CC_MC_M1M2M3: return divide_clock(clock, m1 * m2 * m3); + case CC_MC_M1M3: return divide_clock(clock, m1 * m3); + default: return (0); + } + } else { + ASSERT(pll_type == PLL_TYPE2); + + m1 += CC_T2_BIAS; + m2 += CC_T2M2_BIAS; + m3 += CC_T2_BIAS; + ASSERT((m1 >= 2) && (m1 <= 7)); + ASSERT((m2 >= 3) && (m2 <= 10)); + ASSERT((m3 >= 2) && (m3 <= 7)); + + if ((mc & CC_T2MC_M1BYP) == 0) + clock /= m1; + if ((mc & CC_T2MC_M2BYP) == 0) + clock /= m2; + if ((mc & CC_T2MC_M3BYP) == 0) + clock /= m3; + + return (clock); + } +} + +/** + * Some chips could have multiple host interfaces, however only one will be active. + * For a given chip. Depending pkgopt and cc_chipst return the active host interface. + */ +uint +si_chip_hostif(si_t *sih) +{ + uint hosti = 0; + + switch (CHIPID(sih->chip)) { + case BCM43018_CHIP_ID: + case BCM43430_CHIP_ID: + hosti = CHIP_HOSTIF_SDIOMODE; + break; + case BCM43012_CHIP_ID: + hosti = CHIP_HOSTIF_SDIOMODE; + break; + CASE_BCM43602_CHIP: + hosti = CHIP_HOSTIF_PCIEMODE; + break; + + case BCM4360_CHIP_ID: + /* chippkg bit-0 == 0 is PCIE only pkgs + * chippkg bit-0 == 1 has both PCIE and USB cores enabled + */ + if ((sih->chippkg & 0x1) && (sih->chipst & CST4360_MODE_USB)) + hosti = CHIP_HOSTIF_USBMODE; + else + hosti = CHIP_HOSTIF_PCIEMODE; + + break; + + case BCM4335_CHIP_ID: + /* TBD: like in 4360, do we need to check pkg? */ + if (CST4335_CHIPMODE_USB20D(sih->chipst)) + hosti = CHIP_HOSTIF_USBMODE; + else if (CST4335_CHIPMODE_SDIOD(sih->chipst)) + hosti = CHIP_HOSTIF_SDIOMODE; + else + hosti = CHIP_HOSTIF_PCIEMODE; + break; + + CASE_BCM4345_CHIP: + if (CST4345_CHIPMODE_USB20D(sih->chipst) || CST4345_CHIPMODE_HSIC(sih->chipst)) + hosti = CHIP_HOSTIF_USBMODE; + else if (CST4345_CHIPMODE_SDIOD(sih->chipst)) + hosti = CHIP_HOSTIF_SDIOMODE; + else if (CST4345_CHIPMODE_PCIE(sih->chipst)) + hosti = CHIP_HOSTIF_PCIEMODE; + break; + + case BCM4349_CHIP_GRPID: + case BCM53573_CHIP_GRPID: + if (CST4349_CHIPMODE_SDIOD(sih->chipst)) + hosti = CHIP_HOSTIF_SDIOMODE; + else if (CST4349_CHIPMODE_PCIE(sih->chipst)) + hosti = CHIP_HOSTIF_PCIEMODE; + break; + case BCM4364_CHIP_ID: + if (CST4364_CHIPMODE_SDIOD(sih->chipst)) + hosti = CHIP_HOSTIF_SDIOMODE; + else if (CST4364_CHIPMODE_PCIE(sih->chipst)) + hosti = CHIP_HOSTIF_PCIEMODE; + break; + case BCM4373_CHIP_ID: + if (CST4373_CHIPMODE_USB20D(sih->chipst)) + hosti = CHIP_HOSTIF_USBMODE; + else if (CST4373_CHIPMODE_SDIOD(sih->chipst)) + hosti = CHIP_HOSTIF_SDIOMODE; + else if (CST4373_CHIPMODE_PCIE(sih->chipst)) + hosti = CHIP_HOSTIF_PCIEMODE; + break; + + case BCM4347_CHIP_GRPID: + if (CST4347_CHIPMODE_SDIOD(sih->chipst)) + hosti = CHIP_HOSTIF_SDIOMODE; + else if (CST4347_CHIPMODE_PCIE(sih->chipst)) + hosti = CHIP_HOSTIF_PCIEMODE; + break; + case BCM4369_CHIP_GRPID: + if (CST4369_CHIPMODE_SDIOD(sih->chipst)) + hosti = CHIP_HOSTIF_SDIOMODE; + else if (CST4369_CHIPMODE_PCIE(sih->chipst)) + hosti = CHIP_HOSTIF_PCIEMODE; + break; + case BCM4350_CHIP_ID: + case BCM4354_CHIP_ID: + case BCM43556_CHIP_ID: + case BCM43558_CHIP_ID: + case BCM43566_CHIP_ID: + case BCM43568_CHIP_ID: + case BCM43569_CHIP_ID: + case BCM43570_CHIP_ID: + case BCM4358_CHIP_ID: + if (CST4350_CHIPMODE_USB20D(sih->chipst) || + CST4350_CHIPMODE_HSIC20D(sih->chipst) || + CST4350_CHIPMODE_USB30D(sih->chipst) || + CST4350_CHIPMODE_USB30D_WL(sih->chipst) || + CST4350_CHIPMODE_HSIC30D(sih->chipst)) + hosti = CHIP_HOSTIF_USBMODE; + else if (CST4350_CHIPMODE_SDIOD(sih->chipst)) + hosti = CHIP_HOSTIF_SDIOMODE; + else if (CST4350_CHIPMODE_PCIE(sih->chipst)) + hosti = CHIP_HOSTIF_PCIEMODE; + break; + + default: + break; + } + + return hosti; +} + +/** set chip watchdog reset timer to fire in 'ticks' */ +void +si_watchdog(si_t *sih, uint ticks) +{ + uint nb, maxt; + uint pmu_wdt = 1; + + if (PMUCTL_ENAB(sih) && pmu_wdt) { + nb = (CCREV(sih->ccrev) < 26) ? 16 : ((CCREV(sih->ccrev) >= 37) ? 32 : 24); + /* The mips compiler uses the sllv instruction, + * so we specially handle the 32-bit case. + */ + if (nb == 32) + maxt = 0xffffffff; + else + maxt = ((1 << nb) - 1); + + if (ticks == 1) + ticks = 2; + else if (ticks > maxt) + ticks = maxt; + if (CHIPID(sih->chip) == BCM43012_CHIP_ID) { + PMU_REG_NEW(sih, min_res_mask, ~0, DEFAULT_43012_MIN_RES_MASK); + PMU_REG_NEW(sih, watchdog_res_mask, ~0, DEFAULT_43012_MIN_RES_MASK); + PMU_REG_NEW(sih, pmustatus, PST_WDRESET, PST_WDRESET); + PMU_REG_NEW(sih, pmucontrol_ext, PCTL_EXT_FASTLPO_SWENAB, 0); + SPINWAIT((PMU_REG(sih, pmustatus, 0, 0) & PST_ILPFASTLPO), + PMU_MAX_TRANSITION_DLY); + } + pmu_corereg(sih, SI_CC_IDX, pmuwatchdog, ~0, ticks); + } else { + maxt = (1 << 28) - 1; + if (ticks > maxt) + ticks = maxt; + + si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, watchdog), ~0, ticks); + } +} + +/** trigger watchdog reset after ms milliseconds */ +void +si_watchdog_ms(si_t *sih, uint32 ms) +{ + si_watchdog(sih, wd_msticks * ms); +} + +uint32 si_watchdog_msticks(void) +{ + return wd_msticks; +} + +bool +si_taclear(si_t *sih, bool details) +{ + return FALSE; +} + +/** return the slow clock source - LPO, XTAL, or PCI */ +static uint +si_slowclk_src(si_info_t *sii) +{ + chipcregs_t *cc; + + ASSERT(SI_FAST(sii) || si_coreid(&sii->pub) == CC_CORE_ID); + + if (CCREV(sii->pub.ccrev) < 6) { + if ((BUSTYPE(sii->pub.bustype) == PCI_BUS) && + (OSL_PCI_READ_CONFIG(sii->osh, PCI_GPIO_OUT, sizeof(uint32)) & + PCI_CFG_GPIO_SCS)) + return (SCC_SS_PCI); + else + return (SCC_SS_XTAL); + } else if (CCREV(sii->pub.ccrev) < 10) { + cc = (chipcregs_t *)si_setcoreidx(&sii->pub, sii->curidx); + ASSERT(cc); + return (R_REG(sii->osh, &cc->slow_clk_ctl) & SCC_SS_MASK); + } else /* Insta-clock */ + return (SCC_SS_XTAL); +} + +/** return the ILP (slowclock) min or max frequency */ +static uint +si_slowclk_freq(si_info_t *sii, bool max_freq, chipcregs_t *cc) +{ + uint32 slowclk; + uint div; + + ASSERT(SI_FAST(sii) || si_coreid(&sii->pub) == CC_CORE_ID); + + /* shouldn't be here unless we've established the chip has dynamic clk control */ + ASSERT(R_REG(sii->osh, &cc->capabilities) & CC_CAP_PWR_CTL); + + slowclk = si_slowclk_src(sii); + if (CCREV(sii->pub.ccrev) < 6) { + if (slowclk == SCC_SS_PCI) + return (max_freq ? (PCIMAXFREQ / 64) : (PCIMINFREQ / 64)); + else + return (max_freq ? (XTALMAXFREQ / 32) : (XTALMINFREQ / 32)); + } else if (CCREV(sii->pub.ccrev) < 10) { + div = 4 * + (((R_REG(sii->osh, &cc->slow_clk_ctl) & SCC_CD_MASK) >> SCC_CD_SHIFT) + 1); + if (slowclk == SCC_SS_LPO) + return (max_freq ? LPOMAXFREQ : LPOMINFREQ); + else if (slowclk == SCC_SS_XTAL) + return (max_freq ? (XTALMAXFREQ / div) : (XTALMINFREQ / div)); + else if (slowclk == SCC_SS_PCI) + return (max_freq ? (PCIMAXFREQ / div) : (PCIMINFREQ / div)); + else + ASSERT(0); + } else { + /* Chipc rev 10 is InstaClock */ + div = R_REG(sii->osh, &cc->system_clk_ctl) >> SYCC_CD_SHIFT; + div = 4 * (div + 1); + return (max_freq ? XTALMAXFREQ : (XTALMINFREQ / div)); + } + return (0); +} + +static void +si_clkctl_setdelay(si_info_t *sii, void *chipcregs) +{ + chipcregs_t *cc = (chipcregs_t *)chipcregs; + uint slowmaxfreq, pll_delay, slowclk; + uint pll_on_delay, fref_sel_delay; + + pll_delay = PLL_DELAY; + + /* If the slow clock is not sourced by the xtal then add the xtal_on_delay + * since the xtal will also be powered down by dynamic clk control logic. + */ + + slowclk = si_slowclk_src(sii); + if (slowclk != SCC_SS_XTAL) + pll_delay += XTAL_ON_DELAY; + + /* Starting with 4318 it is ILP that is used for the delays */ + slowmaxfreq = si_slowclk_freq(sii, (CCREV(sii->pub.ccrev) >= 10) ? FALSE : TRUE, cc); + + pll_on_delay = ((slowmaxfreq * pll_delay) + 999999) / 1000000; + fref_sel_delay = ((slowmaxfreq * FREF_DELAY) + 999999) / 1000000; + + W_REG(sii->osh, &cc->pll_on_delay, pll_on_delay); + W_REG(sii->osh, &cc->fref_sel_delay, fref_sel_delay); +} + +/** initialize power control delay registers */ +void +si_clkctl_init(si_t *sih) +{ + si_info_t *sii; + uint origidx = 0; + chipcregs_t *cc; + bool fast; + + if (!CCCTL_ENAB(sih)) + return; + + sii = SI_INFO(sih); + fast = SI_FAST(sii); + if (!fast) { + origidx = sii->curidx; + if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) + return; + } else if ((cc = (chipcregs_t *)CCREGS_FAST(sii)) == NULL) + return; + ASSERT(cc != NULL); + + /* set all Instaclk chip ILP to 1 MHz */ + if (CCREV(sih->ccrev) >= 10) + SET_REG(sii->osh, &cc->system_clk_ctl, SYCC_CD_MASK, + (ILP_DIV_1MHZ << SYCC_CD_SHIFT)); + + si_clkctl_setdelay(sii, (void *)(uintptr)cc); + + OSL_DELAY(20000); + + if (!fast) + si_setcoreidx(sih, origidx); +} + +/** change logical "focus" to the gpio core for optimized access */ +volatile void * +si_gpiosetcore(si_t *sih) +{ + return (si_setcoreidx(sih, SI_CC_IDX)); +} + +/** + * mask & set gpiocontrol bits. + * If a gpiocontrol bit is set to 0, chipcommon controls the corresponding GPIO pin. + * If a gpiocontrol bit is set to 1, the GPIO pin is no longer a GPIO and becomes dedicated + * to some chip-specific purpose. + */ +uint32 +si_gpiocontrol(si_t *sih, uint32 mask, uint32 val, uint8 priority) +{ + uint regoff; + + regoff = 0; + + /* gpios could be shared on router platforms + * ignore reservation if it's high priority (e.g., test apps) + */ + if ((priority != GPIO_HI_PRIORITY) && + (BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) { + mask = priority ? (si_gpioreservation & mask) : + ((si_gpioreservation | mask) & ~(si_gpioreservation)); + val &= mask; + } + + regoff = OFFSETOF(chipcregs_t, gpiocontrol); + return (si_corereg(sih, SI_CC_IDX, regoff, mask, val)); +} + +/** mask&set gpio output enable bits */ +uint32 +si_gpioouten(si_t *sih, uint32 mask, uint32 val, uint8 priority) +{ + uint regoff; + + regoff = 0; + + /* gpios could be shared on router platforms + * ignore reservation if it's high priority (e.g., test apps) + */ + if ((priority != GPIO_HI_PRIORITY) && + (BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) { + mask = priority ? (si_gpioreservation & mask) : + ((si_gpioreservation | mask) & ~(si_gpioreservation)); + val &= mask; + } + + regoff = OFFSETOF(chipcregs_t, gpioouten); + return (si_corereg(sih, SI_CC_IDX, regoff, mask, val)); +} + +/** mask&set gpio output bits */ +uint32 +si_gpioout(si_t *sih, uint32 mask, uint32 val, uint8 priority) +{ + uint regoff; + + regoff = 0; + + /* gpios could be shared on router platforms + * ignore reservation if it's high priority (e.g., test apps) + */ + if ((priority != GPIO_HI_PRIORITY) && + (BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) { + mask = priority ? (si_gpioreservation & mask) : + ((si_gpioreservation | mask) & ~(si_gpioreservation)); + val &= mask; + } + + regoff = OFFSETOF(chipcregs_t, gpioout); + return (si_corereg(sih, SI_CC_IDX, regoff, mask, val)); +} + +/** reserve one gpio */ +uint32 +si_gpioreserve(si_t *sih, uint32 gpio_bitmask, uint8 priority) +{ + /* only cores on SI_BUS share GPIO's and only applcation users need to + * reserve/release GPIO + */ + if ((BUSTYPE(sih->bustype) != SI_BUS) || (!priority)) { + ASSERT((BUSTYPE(sih->bustype) == SI_BUS) && (priority)); + return 0xffffffff; + } + /* make sure only one bit is set */ + if ((!gpio_bitmask) || ((gpio_bitmask) & (gpio_bitmask - 1))) { + ASSERT((gpio_bitmask) && !((gpio_bitmask) & (gpio_bitmask - 1))); + return 0xffffffff; + } + + /* already reserved */ + if (si_gpioreservation & gpio_bitmask) + return 0xffffffff; + /* set reservation */ + si_gpioreservation |= gpio_bitmask; + + return si_gpioreservation; +} + +/** + * release one gpio. + * + * releasing the gpio doesn't change the current value on the GPIO last write value + * persists till someone overwrites it. + */ +uint32 +si_gpiorelease(si_t *sih, uint32 gpio_bitmask, uint8 priority) +{ + /* only cores on SI_BUS share GPIO's and only applcation users need to + * reserve/release GPIO + */ + if ((BUSTYPE(sih->bustype) != SI_BUS) || (!priority)) { + ASSERT((BUSTYPE(sih->bustype) == SI_BUS) && (priority)); + return 0xffffffff; + } + /* make sure only one bit is set */ + if ((!gpio_bitmask) || ((gpio_bitmask) & (gpio_bitmask - 1))) { + ASSERT((gpio_bitmask) && !((gpio_bitmask) & (gpio_bitmask - 1))); + return 0xffffffff; + } + + /* already released */ + if (!(si_gpioreservation & gpio_bitmask)) + return 0xffffffff; + + /* clear reservation */ + si_gpioreservation &= ~gpio_bitmask; + + return si_gpioreservation; +} + +/* return the current gpioin register value */ +uint32 +si_gpioin(si_t *sih) +{ + uint regoff; + + regoff = OFFSETOF(chipcregs_t, gpioin); + return (si_corereg(sih, SI_CC_IDX, regoff, 0, 0)); +} + +/* mask&set gpio interrupt polarity bits */ +uint32 +si_gpiointpolarity(si_t *sih, uint32 mask, uint32 val, uint8 priority) +{ + uint regoff; + + /* gpios could be shared on router platforms */ + if ((BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) { + mask = priority ? (si_gpioreservation & mask) : + ((si_gpioreservation | mask) & ~(si_gpioreservation)); + val &= mask; + } + + regoff = OFFSETOF(chipcregs_t, gpiointpolarity); + return (si_corereg(sih, SI_CC_IDX, regoff, mask, val)); +} + +/* mask&set gpio interrupt mask bits */ +uint32 +si_gpiointmask(si_t *sih, uint32 mask, uint32 val, uint8 priority) +{ + uint regoff; + + /* gpios could be shared on router platforms */ + if ((BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) { + mask = priority ? (si_gpioreservation & mask) : + ((si_gpioreservation | mask) & ~(si_gpioreservation)); + val &= mask; + } + + regoff = OFFSETOF(chipcregs_t, gpiointmask); + return (si_corereg(sih, SI_CC_IDX, regoff, mask, val)); +} + +uint32 +si_gpioeventintmask(si_t *sih, uint32 mask, uint32 val, uint8 priority) +{ + uint regoff; + /* gpios could be shared on router platforms */ + if ((BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) { + mask = priority ? (si_gpioreservation & mask) : + ((si_gpioreservation | mask) & ~(si_gpioreservation)); + val &= mask; + } + regoff = OFFSETOF(chipcregs_t, gpioeventintmask); + return (si_corereg(sih, SI_CC_IDX, regoff, mask, val)); +} + +/* assign the gpio to an led */ +uint32 +si_gpioled(si_t *sih, uint32 mask, uint32 val) +{ + if (CCREV(sih->ccrev) < 16) + return 0xffffffff; + + /* gpio led powersave reg */ + return (si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, gpiotimeroutmask), mask, val)); +} + +/* mask&set gpio timer val */ +uint32 +si_gpiotimerval(si_t *sih, uint32 mask, uint32 gpiotimerval) +{ + if (CCREV(sih->ccrev) < 16) + return 0xffffffff; + + return (si_corereg(sih, SI_CC_IDX, + OFFSETOF(chipcregs_t, gpiotimerval), mask, gpiotimerval)); +} + +uint32 +si_gpiopull(si_t *sih, bool updown, uint32 mask, uint32 val) +{ + uint offs; + + if (CCREV(sih->ccrev) < 20) + return 0xffffffff; + + offs = (updown ? OFFSETOF(chipcregs_t, gpiopulldown) : OFFSETOF(chipcregs_t, gpiopullup)); + return (si_corereg(sih, SI_CC_IDX, offs, mask, val)); +} + +uint32 +si_gpioevent(si_t *sih, uint regtype, uint32 mask, uint32 val) +{ + uint offs; + + if (CCREV(sih->ccrev) < 11) + return 0xffffffff; + + if (regtype == GPIO_REGEVT) + offs = OFFSETOF(chipcregs_t, gpioevent); + else if (regtype == GPIO_REGEVT_INTMSK) + offs = OFFSETOF(chipcregs_t, gpioeventintmask); + else if (regtype == GPIO_REGEVT_INTPOL) + offs = OFFSETOF(chipcregs_t, gpioeventintpolarity); + else + return 0xffffffff; + + return (si_corereg(sih, SI_CC_IDX, offs, mask, val)); +} + +uint32 +si_gpio_int_enable(si_t *sih, bool enable) +{ + uint offs; + + if (CCREV(sih->ccrev) < 11) + return 0xffffffff; + + offs = OFFSETOF(chipcregs_t, intmask); + return (si_corereg(sih, SI_CC_IDX, offs, CI_GPIO, (enable ? CI_GPIO : 0))); +} + +/** Return the size of the specified SYSMEM bank */ +static uint +sysmem_banksize(si_info_t *sii, sysmemregs_t *regs, uint8 idx) +{ + uint banksize, bankinfo; + uint bankidx = idx; + + W_REG(sii->osh, ®s->bankidx, bankidx); + bankinfo = R_REG(sii->osh, ®s->bankinfo); + banksize = SYSMEM_BANKINFO_SZBASE * ((bankinfo & SYSMEM_BANKINFO_SZMASK) + 1); + return banksize; +} + +/** Return the RAM size of the SYSMEM core */ +uint32 +si_sysmem_size(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + uint origidx; + uint intr_val = 0; + + sysmemregs_t *regs; + bool wasup; + uint32 coreinfo; + uint memsize = 0; + uint8 i; + uint nb, nrb; + + /* Block ints and save current core */ + INTR_OFF(sii, intr_val); + origidx = si_coreidx(sih); + + /* Switch to SYSMEM core */ + if (!(regs = si_setcore(sih, SYSMEM_CORE_ID, 0))) + goto done; + + /* Get info for determining size */ + if (!(wasup = si_iscoreup(sih))) + si_core_reset(sih, 0, 0); + coreinfo = R_REG(sii->osh, ®s->coreinfo); + + /* Number of ROM banks, SW need to skip the ROM banks. */ + nrb = (coreinfo & SYSMEM_SRCI_ROMNB_MASK) >> SYSMEM_SRCI_ROMNB_SHIFT; + + nb = (coreinfo & SYSMEM_SRCI_SRNB_MASK) >> SYSMEM_SRCI_SRNB_SHIFT; + for (i = 0; i < nb; i++) + memsize += sysmem_banksize(sii, regs, i + nrb); + + si_setcoreidx(sih, origidx); + +done: + INTR_RESTORE(sii, intr_val); + + return memsize; +} + +/** Return the size of the specified SOCRAM bank */ +static uint +socram_banksize(si_info_t *sii, sbsocramregs_t *regs, uint8 idx, uint8 mem_type) +{ + uint banksize, bankinfo; + uint bankidx = idx | (mem_type << SOCRAM_BANKIDX_MEMTYPE_SHIFT); + + ASSERT(mem_type <= SOCRAM_MEMTYPE_DEVRAM); + + W_REG(sii->osh, ®s->bankidx, bankidx); + bankinfo = R_REG(sii->osh, ®s->bankinfo); + banksize = SOCRAM_BANKINFO_SZBASE * ((bankinfo & SOCRAM_BANKINFO_SZMASK) + 1); + return banksize; +} + +void si_socram_set_bankpda(si_t *sih, uint32 bankidx, uint32 bankpda) +{ + si_info_t *sii = SI_INFO(sih); + uint origidx; + uint intr_val = 0; + sbsocramregs_t *regs; + bool wasup; + uint corerev; + + /* Block ints and save current core */ + INTR_OFF(sii, intr_val); + origidx = si_coreidx(sih); + + /* Switch to SOCRAM core */ + if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0))) + goto done; + + if (!(wasup = si_iscoreup(sih))) + si_core_reset(sih, 0, 0); + + corerev = si_corerev(sih); + if (corerev >= 16) { + W_REG(sii->osh, ®s->bankidx, bankidx); + W_REG(sii->osh, ®s->bankpda, bankpda); + } + + /* Return to previous state and core */ + if (!wasup) + si_core_disable(sih, 0); + si_setcoreidx(sih, origidx); + +done: + INTR_RESTORE(sii, intr_val); +} + +void +si_socdevram(si_t *sih, bool set, uint8 *enable, uint8 *protect, uint8 *remap) +{ + si_info_t *sii = SI_INFO(sih); + uint origidx; + uint intr_val = 0; + sbsocramregs_t *regs; + bool wasup; + uint corerev; + + /* Block ints and save current core */ + INTR_OFF(sii, intr_val); + origidx = si_coreidx(sih); + + if (!set) + *enable = *protect = *remap = 0; + + /* Switch to SOCRAM core */ + if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0))) + goto done; + + /* Get info for determining size */ + if (!(wasup = si_iscoreup(sih))) + si_core_reset(sih, 0, 0); + + corerev = si_corerev(sih); + if (corerev >= 10) { + uint32 extcinfo; + uint8 nb; + uint8 i; + uint32 bankidx, bankinfo; + + extcinfo = R_REG(sii->osh, ®s->extracoreinfo); + nb = ((extcinfo & SOCRAM_DEVRAMBANK_MASK) >> SOCRAM_DEVRAMBANK_SHIFT); + for (i = 0; i < nb; i++) { + bankidx = i | (SOCRAM_MEMTYPE_DEVRAM << SOCRAM_BANKIDX_MEMTYPE_SHIFT); + W_REG(sii->osh, ®s->bankidx, bankidx); + bankinfo = R_REG(sii->osh, ®s->bankinfo); + if (set) { + bankinfo &= ~SOCRAM_BANKINFO_DEVRAMSEL_MASK; + bankinfo &= ~SOCRAM_BANKINFO_DEVRAMPRO_MASK; + bankinfo &= ~SOCRAM_BANKINFO_DEVRAMREMAP_MASK; + if (*enable) { + bankinfo |= (1 << SOCRAM_BANKINFO_DEVRAMSEL_SHIFT); + if (*protect) + bankinfo |= (1 << SOCRAM_BANKINFO_DEVRAMPRO_SHIFT); + if ((corerev >= 16) && *remap) + bankinfo |= + (1 << SOCRAM_BANKINFO_DEVRAMREMAP_SHIFT); + } + W_REG(sii->osh, ®s->bankinfo, bankinfo); + } else if (i == 0) { + if (bankinfo & SOCRAM_BANKINFO_DEVRAMSEL_MASK) { + *enable = 1; + if (bankinfo & SOCRAM_BANKINFO_DEVRAMPRO_MASK) + *protect = 1; + if (bankinfo & SOCRAM_BANKINFO_DEVRAMREMAP_MASK) + *remap = 1; + } + } + } + } + + /* Return to previous state and core */ + if (!wasup) + si_core_disable(sih, 0); + si_setcoreidx(sih, origidx); + +done: + INTR_RESTORE(sii, intr_val); +} + +bool +si_socdevram_remap_isenb(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + uint origidx; + uint intr_val = 0; + sbsocramregs_t *regs; + bool wasup, remap = FALSE; + uint corerev; + uint32 extcinfo; + uint8 nb; + uint8 i; + uint32 bankidx, bankinfo; + + /* Block ints and save current core */ + INTR_OFF(sii, intr_val); + origidx = si_coreidx(sih); + + /* Switch to SOCRAM core */ + if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0))) + goto done; + + /* Get info for determining size */ + if (!(wasup = si_iscoreup(sih))) + si_core_reset(sih, 0, 0); + + corerev = si_corerev(sih); + if (corerev >= 16) { + extcinfo = R_REG(sii->osh, ®s->extracoreinfo); + nb = ((extcinfo & SOCRAM_DEVRAMBANK_MASK) >> SOCRAM_DEVRAMBANK_SHIFT); + for (i = 0; i < nb; i++) { + bankidx = i | (SOCRAM_MEMTYPE_DEVRAM << SOCRAM_BANKIDX_MEMTYPE_SHIFT); + W_REG(sii->osh, ®s->bankidx, bankidx); + bankinfo = R_REG(sii->osh, ®s->bankinfo); + if (bankinfo & SOCRAM_BANKINFO_DEVRAMREMAP_MASK) { + remap = TRUE; + break; + } + } + } + + /* Return to previous state and core */ + if (!wasup) + si_core_disable(sih, 0); + si_setcoreidx(sih, origidx); + +done: + INTR_RESTORE(sii, intr_val); + return remap; +} + +bool +si_socdevram_pkg(si_t *sih) +{ + if (si_socdevram_size(sih) > 0) + return TRUE; + else + return FALSE; +} + +uint32 +si_socdevram_size(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + uint origidx; + uint intr_val = 0; + uint32 memsize = 0; + sbsocramregs_t *regs; + bool wasup; + uint corerev; + + /* Block ints and save current core */ + INTR_OFF(sii, intr_val); + origidx = si_coreidx(sih); + + /* Switch to SOCRAM core */ + if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0))) + goto done; + + /* Get info for determining size */ + if (!(wasup = si_iscoreup(sih))) + si_core_reset(sih, 0, 0); + + corerev = si_corerev(sih); + if (corerev >= 10) { + uint32 extcinfo; + uint8 nb; + uint8 i; + + extcinfo = R_REG(sii->osh, ®s->extracoreinfo); + nb = (((extcinfo & SOCRAM_DEVRAMBANK_MASK) >> SOCRAM_DEVRAMBANK_SHIFT)); + for (i = 0; i < nb; i++) + memsize += socram_banksize(sii, regs, i, SOCRAM_MEMTYPE_DEVRAM); + } + + /* Return to previous state and core */ + if (!wasup) + si_core_disable(sih, 0); + si_setcoreidx(sih, origidx); + +done: + INTR_RESTORE(sii, intr_val); + + return memsize; +} + +uint32 +si_socdevram_remap_size(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + uint origidx; + uint intr_val = 0; + uint32 memsize = 0, banksz; + sbsocramregs_t *regs; + bool wasup; + uint corerev; + uint32 extcinfo; + uint8 nb; + uint8 i; + uint32 bankidx, bankinfo; + + /* Block ints and save current core */ + INTR_OFF(sii, intr_val); + origidx = si_coreidx(sih); + + /* Switch to SOCRAM core */ + if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0))) + goto done; + + /* Get info for determining size */ + if (!(wasup = si_iscoreup(sih))) + si_core_reset(sih, 0, 0); + + corerev = si_corerev(sih); + if (corerev >= 16) { + extcinfo = R_REG(sii->osh, ®s->extracoreinfo); + nb = (((extcinfo & SOCRAM_DEVRAMBANK_MASK) >> SOCRAM_DEVRAMBANK_SHIFT)); + + /* + * FIX: A0 Issue: Max addressable is 512KB, instead 640KB + * Only four banks are accessible to ARM + */ + if ((corerev == 16) && (nb == 5)) + nb = 4; + + for (i = 0; i < nb; i++) { + bankidx = i | (SOCRAM_MEMTYPE_DEVRAM << SOCRAM_BANKIDX_MEMTYPE_SHIFT); + W_REG(sii->osh, ®s->bankidx, bankidx); + bankinfo = R_REG(sii->osh, ®s->bankinfo); + if (bankinfo & SOCRAM_BANKINFO_DEVRAMREMAP_MASK) { + banksz = socram_banksize(sii, regs, i, SOCRAM_MEMTYPE_DEVRAM); + memsize += banksz; + } else { + /* Account only consecutive banks for now */ + break; + } + } + } + + /* Return to previous state and core */ + if (!wasup) + si_core_disable(sih, 0); + si_setcoreidx(sih, origidx); + +done: + INTR_RESTORE(sii, intr_val); + + return memsize; +} + +/** Return the RAM size of the SOCRAM core */ +uint32 +si_socram_size(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + uint origidx; + uint intr_val = 0; + + sbsocramregs_t *regs; + bool wasup; + uint corerev; + uint32 coreinfo; + uint memsize = 0; + + /* Block ints and save current core */ + INTR_OFF(sii, intr_val); + origidx = si_coreidx(sih); + + /* Switch to SOCRAM core */ + if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0))) + goto done; + + /* Get info for determining size */ + if (!(wasup = si_iscoreup(sih))) + si_core_reset(sih, 0, 0); + corerev = si_corerev(sih); + coreinfo = R_REG(sii->osh, ®s->coreinfo); + + /* Calculate size from coreinfo based on rev */ + if (corerev == 0) + memsize = 1 << (16 + (coreinfo & SRCI_MS0_MASK)); + else if (corerev < 3) { + memsize = 1 << (SR_BSZ_BASE + (coreinfo & SRCI_SRBSZ_MASK)); + memsize *= (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT; + } else if ((corerev <= 7) || (corerev == 12)) { + uint nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT; + uint bsz = (coreinfo & SRCI_SRBSZ_MASK); + uint lss = (coreinfo & SRCI_LSS_MASK) >> SRCI_LSS_SHIFT; + if (lss != 0) + nb --; + memsize = nb * (1 << (bsz + SR_BSZ_BASE)); + if (lss != 0) + memsize += (1 << ((lss - 1) + SR_BSZ_BASE)); + } else { + uint8 i; + uint nb; + /* length of SRAM Banks increased for corerev greater than 23 */ + if (corerev >= 23) { + nb = (coreinfo & (SRCI_SRNB_MASK | SRCI_SRNB_MASK_EXT)) >> SRCI_SRNB_SHIFT; + } else { + nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT; + } + for (i = 0; i < nb; i++) + memsize += socram_banksize(sii, regs, i, SOCRAM_MEMTYPE_RAM); + } + + /* Return to previous state and core */ + if (!wasup) + si_core_disable(sih, 0); + si_setcoreidx(sih, origidx); + +done: + INTR_RESTORE(sii, intr_val); + + return memsize; +} + +/** Return the TCM-RAM size of the ARMCR4 core. */ +uint32 +si_tcm_size(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + uint origidx; + uint intr_val = 0; + volatile uint8 *regs; + bool wasup; + uint32 corecap; + uint memsize = 0; + uint32 nab = 0; + uint32 nbb = 0; + uint32 totb = 0; + uint32 bxinfo = 0; + uint32 idx = 0; + volatile uint32 *arm_cap_reg; + volatile uint32 *arm_bidx; + volatile uint32 *arm_binfo; + + /* Block ints and save current core */ + INTR_OFF(sii, intr_val); + origidx = si_coreidx(sih); + + /* Switch to CR4 core */ + if (!(regs = si_setcore(sih, ARMCR4_CORE_ID, 0))) + goto done; + + /* Get info for determining size. If in reset, come out of reset, + * but remain in halt + */ + if (!(wasup = si_iscoreup(sih))) + si_core_reset(sih, SICF_CPUHALT, SICF_CPUHALT); + + arm_cap_reg = (volatile uint32 *)(regs + SI_CR4_CAP); + corecap = R_REG(sii->osh, arm_cap_reg); + + nab = (corecap & ARMCR4_TCBANB_MASK) >> ARMCR4_TCBANB_SHIFT; + nbb = (corecap & ARMCR4_TCBBNB_MASK) >> ARMCR4_TCBBNB_SHIFT; + totb = nab + nbb; + + arm_bidx = (volatile uint32 *)(regs + SI_CR4_BANKIDX); + arm_binfo = (volatile uint32 *)(regs + SI_CR4_BANKINFO); + for (idx = 0; idx < totb; idx++) { + W_REG(sii->osh, arm_bidx, idx); + + bxinfo = R_REG(sii->osh, arm_binfo); + memsize += ((bxinfo & ARMCR4_BSZ_MASK) + 1) * ARMCR4_BSZ_MULT; + } + + /* Return to previous state and core */ + if (!wasup) + si_core_disable(sih, 0); + si_setcoreidx(sih, origidx); + +done: + INTR_RESTORE(sii, intr_val); + + return memsize; +} + +bool +si_has_flops(si_t *sih) +{ + uint origidx, cr4_rev; + + /* Find out CR4 core revision */ + origidx = si_coreidx(sih); + if (si_setcore(sih, ARMCR4_CORE_ID, 0)) { + cr4_rev = si_corerev(sih); + si_setcoreidx(sih, origidx); + + if (cr4_rev == 1 || cr4_rev >= 3) + return TRUE; + } + return FALSE; +} + +uint32 +si_socram_srmem_size(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + uint origidx; + uint intr_val = 0; + + sbsocramregs_t *regs; + bool wasup; + uint corerev; + uint32 coreinfo; + uint memsize = 0; + + if (CHIPID(sih->chip) == BCM43430_CHIP_ID || + CHIPID(sih->chip) == BCM43018_CHIP_ID) { + return (64 * 1024); + } + + /* Block ints and save current core */ + INTR_OFF(sii, intr_val); + origidx = si_coreidx(sih); + + /* Switch to SOCRAM core */ + if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0))) + goto done; + + /* Get info for determining size */ + if (!(wasup = si_iscoreup(sih))) + si_core_reset(sih, 0, 0); + corerev = si_corerev(sih); + coreinfo = R_REG(sii->osh, ®s->coreinfo); + + /* Calculate size from coreinfo based on rev */ + if (corerev >= 16) { + uint8 i; + uint nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT; + for (i = 0; i < nb; i++) { + W_REG(sii->osh, ®s->bankidx, i); + if (R_REG(sii->osh, ®s->bankinfo) & SOCRAM_BANKINFO_RETNTRAM_MASK) + memsize += socram_banksize(sii, regs, i, SOCRAM_MEMTYPE_RAM); + } + } + + /* Return to previous state and core */ + if (!wasup) + si_core_disable(sih, 0); + si_setcoreidx(sih, origidx); + +done: + INTR_RESTORE(sii, intr_val); + + return memsize; +} + +#if !defined(_CFEZ_) || defined(CFG_WL) +void +si_btcgpiowar(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + uint origidx; + uint intr_val = 0; + chipcregs_t *cc; + + /* Make sure that there is ChipCommon core present && + * UART_TX is strapped to 1 + */ + if (!(sih->cccaps & CC_CAP_UARTGPIO)) + return; + + /* si_corereg cannot be used as we have to guarantee 8-bit read/writes */ + INTR_OFF(sii, intr_val); + + origidx = si_coreidx(sih); + + cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0); + ASSERT(cc != NULL); + + W_REG(sii->osh, &cc->uart0mcr, R_REG(sii->osh, &cc->uart0mcr) | 0x04); + + /* restore the original index */ + si_setcoreidx(sih, origidx); + + INTR_RESTORE(sii, intr_val); +} + +void +si_chipcontrl_restore(si_t *sih, uint32 val) +{ + si_info_t *sii = SI_INFO(sih); + chipcregs_t *cc; + uint origidx = si_coreidx(sih); + + if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) { + SI_ERROR(("%s: Failed to find CORE ID!\n", __FUNCTION__)); + return; + } + W_REG(sii->osh, &cc->chipcontrol, val); + si_setcoreidx(sih, origidx); +} + +uint32 +si_chipcontrl_read(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + chipcregs_t *cc; + uint origidx = si_coreidx(sih); + uint32 val; + + if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) { + SI_ERROR(("%s: Failed to find CORE ID!\n", __FUNCTION__)); + return -1; + } + val = R_REG(sii->osh, &cc->chipcontrol); + si_setcoreidx(sih, origidx); + return val; +} + +/** switch muxed pins, on: SROM, off: FEMCTRL. Called for a family of ac chips, not just 4360. */ +void +si_chipcontrl_srom4360(si_t *sih, bool on) +{ + si_info_t *sii = SI_INFO(sih); + chipcregs_t *cc; + uint origidx = si_coreidx(sih); + uint32 val; + + if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) { + SI_ERROR(("%s: Failed to find CORE ID!\n", __FUNCTION__)); + return; + } + val = R_REG(sii->osh, &cc->chipcontrol); + + if (on) { + val &= ~(CCTRL4360_SECI_MODE | + CCTRL4360_BTSWCTRL_MODE | + CCTRL4360_EXTRA_FEMCTRL_MODE | + CCTRL4360_BT_LGCY_MODE | + CCTRL4360_CORE2FEMCTRL4_ON); + + W_REG(sii->osh, &cc->chipcontrol, val); + } else { + } + + si_setcoreidx(sih, origidx); +} + +/** + * The SROM clock is derived from the backplane clock. 4365 (200Mhz) and 43684 (240Mhz) have a fast + * backplane clock that requires a higher-than-POR-default clock divisor ratio for the SROM clock. + */ +void +si_srom_clk_set(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + chipcregs_t *cc; + uint origidx = si_coreidx(sih); + uint32 val; + uint32 divisor = 1; + + if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) { + SI_ERROR(("%s: Failed to find CORE ID!\n", __FUNCTION__)); + return; + } + + val = R_REG(sii->osh, &cc->clkdiv2); + if (BCM4365_CHIP(sih->chip)) { + divisor = CLKD2_SROMDIV_192; /* divide 200 by 192 -> SPROM clock ~ 1.04Mhz */ + } else { + ASSERT(0); + } + + W_REG(sii->osh, &cc->clkdiv2, ((val & ~CLKD2_SROM) | divisor)); + si_setcoreidx(sih, origidx); +} +#endif // endif + +void +si_pmu_avb_clk_set(si_t *sih, osl_t *osh, bool set_flag) +{ +} + +void +si_btc_enable_chipcontrol(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + chipcregs_t *cc; + uint origidx = si_coreidx(sih); + + if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) { + SI_ERROR(("%s: Failed to find CORE ID!\n", __FUNCTION__)); + return; + } + + /* BT fix */ + W_REG(sii->osh, &cc->chipcontrol, + R_REG(sii->osh, &cc->chipcontrol) | CC_BTCOEX_EN_MASK); + + si_setcoreidx(sih, origidx); +} + +/** cache device removed state */ +void si_set_device_removed(si_t *sih, bool status) +{ + si_info_t *sii = SI_INFO(sih); + + sii->device_removed = status; +} + +/** check if the device is removed */ +bool +si_deviceremoved(si_t *sih) +{ + uint32 w; + si_info_t *sii = SI_INFO(sih); + + if (sii->device_removed) { + return TRUE; + } + + switch (BUSTYPE(sih->bustype)) { + case PCI_BUS: + ASSERT(SI_INFO(sih)->osh != NULL); + w = OSL_PCI_READ_CONFIG(SI_INFO(sih)->osh, PCI_CFG_VID, sizeof(uint32)); + if ((w & 0xFFFF) != VENDOR_BROADCOM) + return TRUE; + break; + } + return FALSE; +} + +bool +si_is_warmboot(void) +{ + +#ifdef BCMULP + return (boot_type == WARM_BOOT); +#else + return FALSE; +#endif // endif +} + +bool +si_is_sprom_available(si_t *sih) +{ + if (CCREV(sih->ccrev) >= 31) { + si_info_t *sii; + uint origidx; + chipcregs_t *cc; + uint32 sromctrl; + + if ((sih->cccaps & CC_CAP_SROM) == 0) + return FALSE; + + sii = SI_INFO(sih); + origidx = sii->curidx; + cc = si_setcoreidx(sih, SI_CC_IDX); + ASSERT(cc); + sromctrl = R_REG(sii->osh, &cc->sromcontrol); + si_setcoreidx(sih, origidx); + return (sromctrl & SRC_PRESENT); + } + + switch (CHIPID(sih->chip)) { + case BCM43018_CHIP_ID: + case BCM43430_CHIP_ID: + return FALSE; + case BCM4335_CHIP_ID: + CASE_BCM4345_CHIP: + return ((sih->chipst & CST4335_SPROM_MASK) && + !(sih->chipst & CST4335_SFLASH_MASK)); + case BCM4349_CHIP_GRPID: + return (sih->chipst & CST4349_SPROM_PRESENT) != 0; + case BCM53573_CHIP_GRPID: + return FALSE; /* SPROM PRESENT is not defined for 53573 as of now */ + case BCM4364_CHIP_ID: + return (sih->chipst & CST4364_SPROM_PRESENT) != 0; + case BCM4369_CHIP_GRPID: + if (CHIPREV(sih->chiprev) == 0) { + /* WAR for 4369a0: HW4369-1729. no sprom, default to otp always. */ + return 0; + } else { + return (sih->chipst & CST4369_SPROM_PRESENT) != 0; + } + case BCM4347_CHIP_GRPID: + return (sih->chipst & CST4347_SPROM_PRESENT) != 0; + break; + case BCM4350_CHIP_ID: + case BCM4354_CHIP_ID: + case BCM43556_CHIP_ID: + case BCM43558_CHIP_ID: + case BCM43566_CHIP_ID: + case BCM43568_CHIP_ID: + case BCM43569_CHIP_ID: + case BCM43570_CHIP_ID: + case BCM4358_CHIP_ID: + return (sih->chipst & CST4350_SPROM_PRESENT) != 0; + CASE_BCM43602_CHIP: + return (sih->chipst & CST43602_SPROM_PRESENT) != 0; + case BCM43131_CHIP_ID: + case BCM43217_CHIP_ID: + case BCM43428_CHIP_ID: + return (sih->chipst & CST43228_OTP_PRESENT) != CST43228_OTP_PRESENT; + case BCM4373_CHIP_ID: + case BCM43012_CHIP_ID: + return FALSE; + default: + return TRUE; + } +} + +uint32 si_get_sromctl(si_t *sih) +{ + chipcregs_t *cc; + uint origidx = si_coreidx(sih); + uint32 sromctl; + osl_t *osh = si_osh(sih); + + cc = si_setcoreidx(sih, SI_CC_IDX); + ASSERT((uintptr)cc); + + sromctl = R_REG(osh, &cc->sromcontrol); + + /* return to the original core */ + si_setcoreidx(sih, origidx); + return sromctl; +} + +int si_set_sromctl(si_t *sih, uint32 value) +{ + chipcregs_t *cc; + uint origidx = si_coreidx(sih); + osl_t *osh = si_osh(sih); + int ret = BCME_OK; + + cc = si_setcoreidx(sih, SI_CC_IDX); + ASSERT((uintptr)cc); + + /* get chipcommon rev */ + if (si_corerev(sih) >= 32) { + /* SpromCtrl is only accessible if CoreCapabilities.SpromSupported and + * SpromPresent is 1. + */ + if ((R_REG(osh, &cc->capabilities) & CC_CAP_SROM) != 0 && + (R_REG(osh, &cc->sromcontrol) & SRC_PRESENT)) { + W_REG(osh, &cc->sromcontrol, value); + } else { + ret = BCME_NODEVICE; + } + } else { + ret = BCME_UNSUPPORTED; + } + + /* return to the original core */ + si_setcoreidx(sih, origidx); + + return ret; +} + +uint +si_core_wrapperreg(si_t *sih, uint32 coreidx, uint32 offset, uint32 mask, uint32 val) +{ + uint origidx, intr_val = 0; + uint ret_val; + si_info_t *sii = SI_INFO(sih); + + origidx = si_coreidx(sih); + + INTR_OFF(sii, intr_val); + si_setcoreidx(sih, coreidx); + + ret_val = si_wrapperreg(sih, offset, mask, val); + + /* return to the original core */ + si_setcoreidx(sih, origidx); + INTR_RESTORE(sii, intr_val); + return ret_val; +} + +/* cleanup the timer from the host when ARM is been halted + * without a chance for ARM cleanup its resources + * If left not cleanup, Intr from a software timer can still + * request HT clk when ARM is halted. + */ +uint32 +si_pmu_res_req_timer_clr(si_t *sih) +{ + uint32 mask; + + mask = PRRT_REQ_ACTIVE | PRRT_INTEN | PRRT_HT_REQ; + mask <<= 14; + /* clear mask bits */ + pmu_corereg(sih, SI_CC_IDX, res_req_timer, mask, 0); + /* readback to ensure write completes */ + return pmu_corereg(sih, SI_CC_IDX, res_req_timer, 0, 0); +} + +/** turn on/off rfldo */ +void +si_pmu_rfldo(si_t *sih, bool on) +{ +} + +/* Caller of this function should make sure is on PCIE core + * Used in pciedev.c. + */ +void +si_pcie_disable_oobselltr(si_t *sih) +{ + ASSERT(si_coreid(sih) == PCIE2_CORE_ID); + if (PCIECOREREV(sih->buscorerev) >= 23) + si_wrapperreg(sih, AI_OOBSELIND74, ~0, 0); + else + si_wrapperreg(sih, AI_OOBSELIND30, ~0, 0); +} + +void +si_pcie_ltr_war(si_t *sih) +{ +} + +void +si_pcie_hw_LTR_war(si_t *sih) +{ +} + +void +si_pciedev_reg_pm_clk_period(si_t *sih) +{ +} + +void +si_pciedev_crwlpciegen2(si_t *sih) +{ +} + +void +si_pcie_prep_D3(si_t *sih, bool enter_D3) +{ +} + +#if defined(AXI_TIMEOUTS) || defined(BCM_BACKPLANE_TIMEOUT) +uint32 +si_clear_backplane_to_per_core(si_t *sih, uint coreid, uint coreunit, void * wrap) +{ + if ((CHIPTYPE(sih->socitype) == SOCI_AI) || + (CHIPTYPE(sih->socitype) == SOCI_DVTBUS)) { + return ai_clear_backplane_to_per_core(sih, coreid, coreunit, wrap); + } + + return AXI_WRAP_STS_NONE; +} +#endif /* AXI_TIMEOUTS || BCM_BACKPLANE_TIMEOUT */ + +uint32 +si_clear_backplane_to(si_t *sih) +{ + if ((CHIPTYPE(sih->socitype) == SOCI_AI) || + (CHIPTYPE(sih->socitype) == SOCI_DVTBUS)) { + return ai_clear_backplane_to(sih); + } + + return 0; +} + +void +si_update_backplane_timeouts(si_t *sih, bool enable, uint32 timeout_exp, uint32 cid) +{ +#if defined(AXI_TIMEOUTS) || defined(BCM_BACKPLANE_TIMEOUT) + /* Enable only for AXI */ + if (CHIPTYPE(sih->socitype) != SOCI_AI) { + return; + } + + ai_update_backplane_timeouts(sih, enable, timeout_exp, cid); +#endif /* AXI_TIMEOUTS || BCM_BACKPLANE_TIMEOUT */ +} + +/* + * This routine adds the AXI timeouts for + * chipcommon, pcie and ARM slave wrappers + */ +void +si_slave_wrapper_add(si_t *sih) +{ +#if defined(AXI_TIMEOUTS) || defined(BCM_BACKPLANE_TIMEOUT) + uint32 axi_to = 0; + + /* Enable only for AXI */ + if ((CHIPTYPE(sih->socitype) != SOCI_AI) && + (CHIPTYPE(sih->socitype) != SOCI_DVTBUS)) { + return; + } + + if (CHIPID(sih->chip) == BCM4345_CHIP_ID && CHIPREV(sih->chiprev) >= 6) { + si_info_t *sii = SI_INFO(sih); + + int wrapper_idx = (int)sii->axi_num_wrappers - 1; + + ASSERT(wrapper_idx >= 0); /* axi_wrapper[] not initialised */ + do { + if (sii->axi_wrapper[wrapper_idx].wrapper_type == AI_SLAVE_WRAPPER && + sii->axi_wrapper[wrapper_idx].cid == 0xfff) { + sii->axi_wrapper[wrapper_idx].wrapper_addr = 0x1810b000; + break; + } + } while (wrapper_idx-- > 0); + ASSERT(wrapper_idx >= 0); /* all addresses valid for the chiprev under test */ + } + + if (BCM4347_CHIP(sih->chip)) { + axi_to = AXI_TO_VAL_4347; + } + else { + axi_to = AXI_TO_VAL; + } + + /* All required slave wrappers are added in ai_scan */ + ai_update_backplane_timeouts(sih, TRUE, axi_to, 0); + +#ifdef DISABLE_PCIE2_AXI_TIMEOUT + ai_update_backplane_timeouts(sih, FALSE, 0, PCIE_CORE_ID); + ai_update_backplane_timeouts(sih, FALSE, 0, PCIE2_CORE_ID); +#endif // endif + +#endif /* AXI_TIMEOUTS || BCM_BACKPLANE_TIMEOUT */ + +} + +void +si_pll_sr_reinit(si_t *sih) +{ +} + +/* Programming d11 core oob settings for 4364 + * WARs for HW4364-237 and HW4364-166 +*/ +void +si_config_4364_d11_oob(si_t *sih, uint coreid) +{ + uint save_idx; + + save_idx = si_coreidx(sih); + si_setcore(sih, coreid, 0); + si_wrapperreg(sih, AI_OOBSELINC30, ~0, 0x81828180); + si_wrapperreg(sih, AI_OOBSELINC74, ~0, 0x87868183); + si_wrapperreg(sih, AI_OOBSELOUTB74, ~0, 0x84858484); + si_setcore(sih, coreid, 1); + si_wrapperreg(sih, AI_OOBSELINC30, ~0, 0x81828180); + si_wrapperreg(sih, AI_OOBSELINC74, ~0, 0x87868184); + si_wrapperreg(sih, AI_OOBSELOUTB74, ~0, 0x84868484); + si_setcoreidx(sih, save_idx); +} + +void +si_pll_closeloop(si_t *sih) +{ +#if defined(SAVERESTORE) + uint32 data; + + /* disable PLL open loop operation */ + switch (CHIPID(sih->chip)) { +#ifdef SAVERESTORE + case BCM43018_CHIP_ID: + case BCM43430_CHIP_ID: + if (SR_ENAB() && sr_isenab(sih)) { + /* read back the pll openloop state */ + data = si_pmu_pllcontrol(sih, PMU1_PLL0_PLLCTL8, 0, 0); + /* current mode is openloop (possible POR) */ + if ((data & PMU1_PLLCTL8_OPENLOOP_MASK) != 0) { + si_pmu_pllcontrol(sih, PMU1_PLL0_PLLCTL8, + PMU1_PLLCTL8_OPENLOOP_MASK, 0); + si_pmu_pllupd(sih); + } + } + break; +#endif /* SAVERESTORE */ + case BCM4347_CHIP_GRPID: + case BCM4369_CHIP_GRPID: + si_pmu_chipcontrol(sih, PMU_CHIPCTL1, + PMU_CC1_ENABLE_CLOSED_LOOP_MASK, PMU_CC1_ENABLE_CLOSED_LOOP); + break; + default: + /* any unsupported chip bail */ + return; + } +#endif // endif +} + +#if defined(BCMSRPWR) && !defined(BCMSRPWR_DISABLED) +bool _bcmsrpwr = TRUE; +#else +bool _bcmsrpwr = FALSE; +#endif // endif + +#ifndef BCMSDIO +#define PWRREQ_OFFSET(sih) DAR_PCIE_PWR_CTRL((sih)->buscorerev) +#else +#define PWRREQ_OFFSET(sih) OFFSETOF(chipcregs_t, powerctl) +#endif // endif + +static void +si_corereg_pciefast_write(si_t *sih, uint regoff, uint val) +{ + volatile uint32 *r = NULL; + si_info_t *sii = SI_INFO(sih); + + ASSERT((BUSTYPE(sih->bustype) == PCI_BUS)); + + r = (volatile uint32 *)((volatile char *)sii->curmap + + PCI_16KB0_PCIREGS_OFFSET + regoff); + + W_REG(sii->osh, r, val); +} + +static uint +si_corereg_pciefast_read(si_t *sih, uint regoff) +{ + volatile uint32 *r = NULL; + si_info_t *sii = SI_INFO(sih); + + ASSERT((BUSTYPE(sih->bustype) == PCI_BUS)); + +#ifndef BCMSDIO + if (PCIECOREREV(sih->buscorerev) == 66) { + si_corereg_pciefast_write(sih, OFFSETOF(sbpcieregs_t, u1.dar_64.dar_ctrl), 0); + } +#endif // endif + + r = (volatile uint32 *)((volatile char *)sii->curmap + + PCI_16KB0_PCIREGS_OFFSET + regoff); + + return R_REG(sii->osh, r); +} + +uint32 +si_srpwr_request(si_t *sih, uint32 mask, uint32 val) +{ + uint32 r, offset = (BUSTYPE(sih->bustype) == SI_BUS) ? + OFFSETOF(chipcregs_t, powerctl) : PWRREQ_OFFSET(sih); + uint32 mask2 = mask; + uint32 val2 = val; + volatile uint32 *fast_srpwr_addr = (volatile uint32 *)((uintptr)SI_ENUM_BASE(sih) + + (uintptr)offset); + + if (mask || val) { + mask <<= SRPWR_REQON_SHIFT; + val <<= SRPWR_REQON_SHIFT; + + /* Return if requested power request is already set */ + if (BUSTYPE(sih->bustype) == SI_BUS) { + r = R_REG(OSH_NULL, fast_srpwr_addr); + } else { + r = si_corereg_pciefast_read(sih, offset); + } + + if ((r & mask) == val) { + return r; + } + + r = (r & ~mask) | val; + + if (BUSTYPE(sih->bustype) == SI_BUS) { + W_REG(OSH_NULL, fast_srpwr_addr, r); + r = R_REG(OSH_NULL, fast_srpwr_addr); + } else { + si_corereg_pciefast_write(sih, offset, r); + r = si_corereg_pciefast_read(sih, offset); + } + + if (val2) { + if ((r & (mask2 << SRPWR_STATUS_SHIFT)) == + (val2 << SRPWR_STATUS_SHIFT)) { + return r; + } + si_srpwr_stat_spinwait(sih, mask2, val2); + } + } else { + if (BUSTYPE(sih->bustype) == SI_BUS) { + r = R_REG(OSH_NULL, fast_srpwr_addr); + } else { + r = si_corereg_pciefast_read(sih, offset); + } + } + + return r; +} + +uint32 +si_srpwr_stat_spinwait(si_t *sih, uint32 mask, uint32 val) +{ + uint32 r, offset = (BUSTYPE(sih->bustype) == SI_BUS) ? + OFFSETOF(chipcregs_t, powerctl) : PWRREQ_OFFSET(sih); + volatile uint32 *fast_srpwr_addr = (volatile uint32 *)((uintptr)SI_ENUM_BASE(sih) + + (uintptr)offset); + + ASSERT(mask); + ASSERT(val); + + /* spinwait on pwrstatus */ + mask <<= SRPWR_STATUS_SHIFT; + val <<= SRPWR_STATUS_SHIFT; + + if (BUSTYPE(sih->bustype) == SI_BUS) { + SPINWAIT(((R_REG(OSH_NULL, fast_srpwr_addr) & mask) != val), + PMU_MAX_TRANSITION_DLY); + r = R_REG(OSH_NULL, fast_srpwr_addr) & mask; + ASSERT(r == val); + } else { + SPINWAIT(((si_corereg_pciefast_read(sih, offset) & mask) != val), + PMU_MAX_TRANSITION_DLY); + r = si_corereg_pciefast_read(sih, offset) & mask; + ASSERT(r == val); + } + + r = (r >> SRPWR_STATUS_SHIFT) & SRPWR_DMN_ALL_MASK; + + return r; +} + +uint32 +si_srpwr_stat(si_t *sih) +{ + uint32 r, offset = (BUSTYPE(sih->bustype) == SI_BUS) ? + OFFSETOF(chipcregs_t, powerctl) : PWRREQ_OFFSET(sih); + uint cidx = (BUSTYPE(sih->bustype) == SI_BUS) ? SI_CC_IDX : sih->buscoreidx; + + if (BUSTYPE(sih->bustype) == SI_BUS) { + r = si_corereg(sih, cidx, offset, 0, 0); + } else { + r = si_corereg_pciefast_read(sih, offset); + } + + r = (r >> SRPWR_STATUS_SHIFT) & SRPWR_DMN_ALL_MASK; + + return r; +} + +uint32 +si_srpwr_domain(si_t *sih) +{ + uint32 r, offset = (BUSTYPE(sih->bustype) == SI_BUS) ? + OFFSETOF(chipcregs_t, powerctl) : PWRREQ_OFFSET(sih); + uint cidx = (BUSTYPE(sih->bustype) == SI_BUS) ? SI_CC_IDX : sih->buscoreidx; + + if (BUSTYPE(sih->bustype) == SI_BUS) { + r = si_corereg(sih, cidx, offset, 0, 0); + } else { + r = si_corereg_pciefast_read(sih, offset); + } + + r = (r >> SRPWR_DMN_SHIFT) & SRPWR_DMN_ALL_MASK; + + return r; +} + +/* Utility API to read/write the raw registers with absolute address. + * This function can be invoked from either FW or host driver. + */ +uint32 +si_raw_reg(si_t *sih, uint32 reg, uint32 val, uint32 wrire_req) +{ + si_info_t *sii = SI_INFO(sih); + uint32 address_space = reg & ~0xFFF; + volatile uint32 * addr = (void*)(uintptr)(reg); + uint32 prev_value = 0; + uint32 cfg_reg = 0; + + if (sii == NULL) { + return 0; + } + + /* No need to translate the absolute address on SI bus */ + if (BUSTYPE(sih->bustype) == SI_BUS) { + goto skip_cfg; + } + + /* This API supports only the PCI host interface */ + if (BUSTYPE(sih->bustype) != PCI_BUS) { + return ID32_INVALID; + } + + if (PCIE_GEN2(sii)) { + /* Use BAR0 Secondary window is PCIe Gen2. + * Set the secondary BAR0 Window to current register of interest + */ + addr = (volatile uint32*)(((volatile uint8*)sii->curmap) + + PCI_SEC_BAR0_WIN_OFFSET + (reg & 0xfff)); + cfg_reg = PCIE2_BAR0_CORE2_WIN; + + } else { + /* PCIe Gen1 do not have secondary BAR0 window. + * reuse the BAR0 WIN2 + */ + addr = (volatile uint32*)(((volatile uint8*)sii->curmap) + + PCI_BAR0_WIN2_OFFSET + (reg & 0xfff)); + cfg_reg = PCI_BAR0_WIN2; + } + + prev_value = OSL_PCI_READ_CONFIG(sii->osh, cfg_reg, 4); + + if (prev_value != address_space) { + OSL_PCI_WRITE_CONFIG(sii->osh, cfg_reg, + sizeof(uint32), address_space); + } else { + prev_value = 0; + } + +skip_cfg: + if (wrire_req) { + W_REG(sii->osh, addr, val); + } else { + val = R_REG(sii->osh, addr); + } + + if (prev_value) { + /* Restore BAR0 WIN2 for PCIE GEN1 devices */ + OSL_PCI_WRITE_CONFIG(sii->osh, + cfg_reg, sizeof(uint32), prev_value); + } + + return val; +} + +uint8 +si_lhl_ps_mode(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + return sii->lhl_ps_mode; +} diff --git a/bcmdhd.100.10.315.x/siutils_priv.h b/bcmdhd.100.10.315.x/siutils_priv.h new file mode 100644 index 0000000..e17abe8 --- /dev/null +++ b/bcmdhd.100.10.315.x/siutils_priv.h @@ -0,0 +1,354 @@ +/* + * Include file private to the SOC Interconnect support files. + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: siutils_priv.h 769534 2018-06-26 21:19:11Z $ + */ + +#ifndef _siutils_priv_h_ +#define _siutils_priv_h_ + +#if defined(SI_ERROR_ENFORCE) +#define SI_ERROR(args) printf args +#else +#define SI_ERROR(args) printf args +#endif // endif + +#if defined(ENABLE_CORECAPTURE) + +#define SI_PRINT(args) osl_wificc_logDebug args + +#else + +#define SI_PRINT(args) printf args + +#endif /* ENABLE_CORECAPTURE */ + +#define SI_MSG(args) + +#ifdef BCMDBG_SI +#define SI_VMSG(args) printf args +#else +#define SI_VMSG(args) +#endif // endif + +#define IS_SIM(chippkg) ((chippkg == HDLSIM_PKG_ID) || (chippkg == HWSIM_PKG_ID)) + +typedef uint32 (*si_intrsoff_t)(void *intr_arg); +typedef void (*si_intrsrestore_t)(void *intr_arg, uint32 arg); +typedef bool (*si_intrsenabled_t)(void *intr_arg); + +#define SI_GPIO_MAX 16 + +typedef struct gci_gpio_item { + void *arg; + uint8 gci_gpio; + uint8 status; + gci_gpio_handler_t handler; + struct gci_gpio_item *next; +} gci_gpio_item_t; + +#define AI_SLAVE_WRAPPER 0 +#define AI_MASTER_WRAPPER 1 + +typedef struct axi_wrapper { + uint32 mfg; + uint32 cid; + uint32 rev; + uint32 wrapper_type; + uint32 wrapper_addr; + uint32 wrapper_size; +} axi_wrapper_t; + +#define SI_MAX_AXI_WRAPPERS 32 +#define AI_REG_READ_TIMEOUT 300 /* in msec */ + +/* for some combo chips, BT side accesses chipcommon->0x190, as a 16 byte addr */ +/* register at 0x19C doesn't exist, so error is logged at the slave wrapper */ +#define BT_CC_SPROM_BADREG_LO 0x18000190 +#define BT_CC_SPROM_BADREG_SIZE 4 +#define BT_CC_SPROM_BADREG_HI 0 +#define BCM4350_BT_AXI_ID 6 +#define BCM4345_BT_AXI_ID 6 +#define BCM4349_BT_AXI_ID 5 +#define BCM4364_BT_AXI_ID 5 + +/* for BT logging and memory dump, ignore failed access to BT memory */ +#define BCM4347_BT_ADDR_HI 0 +#define BCM4347_BT_ADDR_LO 0x19000000 /* BT address space */ +#define BCM4347_BT_SIZE 0x01000000 /* BT address space size */ +#define BCM4347_UNUSED_AXI_ID 0xffffffff +#define BCM4347_CC_AXI_ID 0 +#define BCM4347_PCIE_AXI_ID 1 + +typedef struct si_cores_info { + volatile void *regs[SI_MAXCORES]; /* other regs va */ + + uint coreid[SI_MAXCORES]; /**< id of each core */ + uint32 coresba[SI_MAXCORES]; /**< backplane address of each core */ + void *regs2[SI_MAXCORES]; /**< va of each core second register set (usbh20) */ + uint32 coresba2[SI_MAXCORES]; /**< address of each core second register set (usbh20) */ + uint32 coresba_size[SI_MAXCORES]; /**< backplane address space size */ + uint32 coresba2_size[SI_MAXCORES]; /**< second address space size */ + + void *wrappers[SI_MAXCORES]; /**< other cores wrapper va */ + uint32 wrapba[SI_MAXCORES]; /**< address of controlling wrapper */ + + void *wrappers2[SI_MAXCORES]; /**< other cores wrapper va */ + uint32 wrapba2[SI_MAXCORES]; /**< address of controlling wrapper */ + + void *wrappers3[SI_MAXCORES]; /**< other cores wrapper va */ + uint32 wrapba3[SI_MAXCORES]; /**< address of controlling wrapper */ + + uint32 cia[SI_MAXCORES]; /**< erom cia entry for each core */ + uint32 cib[SI_MAXCORES]; /**< erom cia entry for each core */ + + uint32 csp2ba[SI_MAXCORES]; /**< Second slave port base addr 0 */ + uint32 csp2ba_size[SI_MAXCORES]; /**< Second slave port addr space size */ +} si_cores_info_t; + +/** misc si info needed by some of the routines */ +typedef struct si_info { + struct si_pub pub; /**< back plane public state (must be first field) */ + + void *osh; /**< osl os handle */ + void *sdh; /**< bcmsdh handle */ + + uint dev_coreid; /**< the core provides driver functions */ + void *intr_arg; /**< interrupt callback function arg */ + si_intrsoff_t intrsoff_fn; /**< turns chip interrupts off */ + si_intrsrestore_t intrsrestore_fn; /**< restore chip interrupts */ + si_intrsenabled_t intrsenabled_fn; /**< check if interrupts are enabled */ + + void *pch; /**< PCI/E core handle */ + + bool memseg; /**< flag to toggle MEM_SEG register */ + + char *vars; + uint varsz; + + volatile void *curmap; /* current regs va */ + + uint curidx; /**< current core index */ + uint numcores; /**< # discovered cores */ + + void *curwrap; /**< current wrapper va */ + + uint32 oob_router; /**< oob router registers for axi */ + + si_cores_info_t *cores_info; + gci_gpio_item_t *gci_gpio_head; /**< gci gpio interrupts head */ + uint chipnew; /**< new chip number */ + uint second_bar0win; /**< Backplane region */ + uint num_br; /**< # discovered bridges */ + uint32 br_wrapba[SI_MAXBR]; /**< address of bridge controlling wrapper */ + uint32 xtalfreq; + uint32 openloop_dco_code; /**< OPEN loop calibration dco code */ + uint8 spurmode; + bool device_removed; + uint axi_num_wrappers; + axi_wrapper_t * axi_wrapper; + uint8 device_wake_opt; /* device_wake GPIO number */ + uint8 lhl_ps_mode; +} si_info_t; + +#define SI_INFO(sih) ((si_info_t *)(uintptr)sih) + +#define GOODCOREADDR(x, b) (((x) >= (b)) && ((x) < ((b) + SI_MAXCORES * SI_CORE_SIZE)) && \ + ISALIGNED((x), SI_CORE_SIZE)) +#define GOODREGS(regs) ((regs) != NULL && ISALIGNED((uintptr)(regs), SI_CORE_SIZE)) +#define BADCOREADDR 0 +#define GOODIDX(idx) (((uint)idx) < SI_MAXCORES) +#define NOREV -1 /**< Invalid rev */ + +#define PCI(si) ((BUSTYPE((si)->pub.bustype) == PCI_BUS) && \ + ((si)->pub.buscoretype == PCI_CORE_ID)) + +#define PCIE_GEN1(si) ((BUSTYPE((si)->pub.bustype) == PCI_BUS) && \ + ((si)->pub.buscoretype == PCIE_CORE_ID)) + +#define PCIE_GEN2(si) ((BUSTYPE((si)->pub.bustype) == PCI_BUS) && \ + ((si)->pub.buscoretype == PCIE2_CORE_ID)) + +#define PCIE(si) (PCIE_GEN1(si) || PCIE_GEN2(si)) + +#define PCMCIA(si) ((BUSTYPE((si)->pub.bustype) == PCMCIA_BUS) && ((si)->memseg == TRUE)) + +/** Newer chips can access PCI/PCIE and CC core without requiring to change PCI BAR0 WIN */ +#define SI_FAST(si) (PCIE(si) || (PCI(si) && ((si)->pub.buscorerev >= 13))) + +#define CCREGS_FAST(si) \ + (((si)->curmap == NULL) ? NULL : \ + ((volatile char *)((si)->curmap) + PCI_16KB0_CCREGS_OFFSET)) +#define PCIEREGS(si) (((volatile char *)((si)->curmap) + PCI_16KB0_PCIREGS_OFFSET)) + +/* + * Macros to disable/restore function core(D11, ENET, ILINE20, etc) interrupts before/ + * after core switching to avoid invalid register accesss inside ISR. + */ +#define INTR_OFF(si, intr_val) \ + if ((si)->intrsoff_fn && (si)->cores_info->coreid[(si)->curidx] == (si)->dev_coreid) { \ + intr_val = (*(si)->intrsoff_fn)((si)->intr_arg); } +#define INTR_RESTORE(si, intr_val) \ + if ((si)->intrsrestore_fn && (si)->cores_info->coreid[(si)->curidx] == (si)->dev_coreid) { \ + (*(si)->intrsrestore_fn)((si)->intr_arg, intr_val); } + +/* dynamic clock control defines */ +#define LPOMINFREQ 25000 /**< low power oscillator min */ +#define LPOMAXFREQ 43000 /**< low power oscillator max */ +#define XTALMINFREQ 19800000 /**< 20 MHz - 1% */ +#define XTALMAXFREQ 20200000 /**< 20 MHz + 1% */ +#define PCIMINFREQ 25000000 /**< 25 MHz */ +#define PCIMAXFREQ 34000000 /**< 33 MHz + fudge */ + +#define ILP_DIV_5MHZ 0 /**< ILP = 5 MHz */ +#define ILP_DIV_1MHZ 4 /**< ILP = 1 MHz */ + +/* GPIO Based LED powersave defines */ +#define DEFAULT_GPIO_ONTIME 10 /**< Default: 10% on */ +#define DEFAULT_GPIO_OFFTIME 90 /**< Default: 10% on */ + +#ifndef DEFAULT_GPIOTIMERVAL +#define DEFAULT_GPIOTIMERVAL ((DEFAULT_GPIO_ONTIME << GPIO_ONTIME_SHIFT) | DEFAULT_GPIO_OFFTIME) +#endif // endif + +/* Silicon Backplane externs */ +extern void sb_scan(si_t *sih, volatile void *regs, uint devid); +extern uint sb_coreid(si_t *sih); +extern uint sb_intflag(si_t *sih); +extern uint sb_flag(si_t *sih); +extern void sb_setint(si_t *sih, int siflag); +extern uint sb_corevendor(si_t *sih); +extern uint sb_corerev(si_t *sih); +extern uint sb_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val); +extern volatile uint32 *sb_corereg_addr(si_t *sih, uint coreidx, uint regoff); +extern bool sb_iscoreup(si_t *sih); +extern volatile void *sb_setcoreidx(si_t *sih, uint coreidx); +extern uint32 sb_core_cflags(si_t *sih, uint32 mask, uint32 val); +extern void sb_core_cflags_wo(si_t *sih, uint32 mask, uint32 val); +extern uint32 sb_core_sflags(si_t *sih, uint32 mask, uint32 val); +extern void sb_commit(si_t *sih); +extern uint32 sb_base(uint32 admatch); +extern uint32 sb_size(uint32 admatch); +extern void sb_core_reset(si_t *sih, uint32 bits, uint32 resetbits); +extern void sb_core_disable(si_t *sih, uint32 bits); +extern uint32 sb_addrspace(si_t *sih, uint asidx); +extern uint32 sb_addrspacesize(si_t *sih, uint asidx); +extern int sb_numaddrspaces(si_t *sih); + +extern uint32 sb_set_initiator_to(si_t *sih, uint32 to, uint idx); + +extern bool sb_taclear(si_t *sih, bool details); + +#if defined(BCMDBG_PHYDUMP) +extern void sb_dumpregs(si_t *sih, struct bcmstrbuf *b); +#endif // endif + +/* Wake-on-wireless-LAN (WOWL) */ +extern bool sb_pci_pmecap(si_t *sih); +struct osl_info; +extern bool sb_pci_fastpmecap(struct osl_info *osh); +extern bool sb_pci_pmeclr(si_t *sih); +extern void sb_pci_pmeen(si_t *sih); +extern uint sb_pcie_readreg(void *sih, uint addrtype, uint offset); + +/* AMBA Interconnect exported externs */ +extern si_t *ai_attach(uint pcidev, osl_t *osh, void *regs, uint bustype, + void *sdh, char **vars, uint *varsz); +extern si_t *ai_kattach(osl_t *osh); +extern void ai_scan(si_t *sih, void *regs, uint devid); + +extern uint ai_flag(si_t *sih); +extern uint ai_flag_alt(si_t *sih); +extern void ai_setint(si_t *sih, int siflag); +extern uint ai_coreidx(si_t *sih); +extern uint ai_corevendor(si_t *sih); +extern uint ai_corerev(si_t *sih); +extern uint ai_corerev_minor(si_t *sih); +extern volatile uint32 *ai_corereg_addr(si_t *sih, uint coreidx, uint regoff); +extern bool ai_iscoreup(si_t *sih); +extern volatile void *ai_setcoreidx(si_t *sih, uint coreidx); +extern volatile void *ai_setcoreidx_2ndwrap(si_t *sih, uint coreidx); +extern volatile void *ai_setcoreidx_3rdwrap(si_t *sih, uint coreidx); +extern uint32 ai_core_cflags(si_t *sih, uint32 mask, uint32 val); +extern void ai_core_cflags_wo(si_t *sih, uint32 mask, uint32 val); +extern uint32 ai_core_sflags(si_t *sih, uint32 mask, uint32 val); +extern uint ai_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val); +extern uint ai_corereg_writeonly(si_t *sih, uint coreidx, uint regoff, uint mask, uint val); +extern void ai_core_reset(si_t *sih, uint32 bits, uint32 resetbits); +extern void ai_d11rsdb_core_reset(si_t *sih, uint32 bits, + uint32 resetbits, void *p, volatile void *s); +extern void ai_core_disable(si_t *sih, uint32 bits); +extern void ai_d11rsdb_core_disable(const si_info_t *sii, uint32 bits, + aidmp_t *pmacai, aidmp_t *smacai); +extern int ai_numaddrspaces(si_t *sih); +extern uint32 ai_addrspace(si_t *sih, uint spidx, uint baidx); +extern uint32 ai_addrspacesize(si_t *sih, uint spidx, uint baidx); +extern void ai_coreaddrspaceX(si_t *sih, uint asidx, uint32 *addr, uint32 *size); +extern uint ai_wrap_reg(si_t *sih, uint32 offset, uint32 mask, uint32 val); +extern void ai_update_backplane_timeouts(si_t *sih, bool enable, uint32 timeout, uint32 cid); +extern uint32 ai_clear_backplane_to(si_t *sih); +void ai_force_clocks(si_t *sih, uint clock_state); +extern uint ai_num_slaveports(si_t *sih, uint coreidx); + +#ifdef BCM_BACKPLANE_TIMEOUT +uint32 ai_clear_backplane_to_fast(si_t *sih, void * addr); +#endif /* BCM_BACKPLANE_TIMEOUT */ + +#if defined(AXI_TIMEOUTS) || defined(BCM_BACKPLANE_TIMEOUT) +extern uint32 ai_clear_backplane_to_per_core(si_t *sih, uint coreid, uint coreunit, void * wrap); +#endif /* AXI_TIMEOUTS || BCM_BACKPLANE_TIMEOUT */ + +#if defined(BCMDBG_PHYDUMP) +extern void ai_dumpregs(si_t *sih, struct bcmstrbuf *b); +#endif // endif + +extern uint32 ai_wrapper_dump_buf_size(si_t *sih); +extern uint32 ai_wrapper_dump_binary(si_t *sih, uchar *p); +extern bool ai_check_enable_backplane_log(si_t *sih); +extern uint32 ai_wrapper_dump_last_timeout(si_t *sih, uint32 *error, uint32 *core, uint32 *ba, + uchar *p); + +#define ub_scan(a, b, c) do {} while (0) +#define ub_flag(a) (0) +#define ub_setint(a, b) do {} while (0) +#define ub_coreidx(a) (0) +#define ub_corevendor(a) (0) +#define ub_corerev(a) (0) +#define ub_iscoreup(a) (0) +#define ub_setcoreidx(a, b) (0) +#define ub_core_cflags(a, b, c) (0) +#define ub_core_cflags_wo(a, b, c) do {} while (0) +#define ub_core_sflags(a, b, c) (0) +#define ub_corereg(a, b, c, d, e) (0) +#define ub_core_reset(a, b, c) do {} while (0) +#define ub_core_disable(a, b) do {} while (0) +#define ub_numaddrspaces(a) (0) +#define ub_addrspace(a, b) (0) +#define ub_addrspacesize(a, b) (0) +#define ub_view(a, b) do {} while (0) +#define ub_dumpregs(a, b) do {} while (0) + +#endif /* _siutils_priv_h_ */ diff --git a/bcmdhd.100.10.315.x/wl_android.c b/bcmdhd.100.10.315.x/wl_android.c new file mode 100644 index 0000000..4a1991c --- /dev/null +++ b/bcmdhd.100.10.315.x/wl_android.c @@ -0,0 +1,5724 @@ +/* + * Linux cfg80211 driver - Android related functions + * + * Copyright (C) 1999-2018, Broadcom. + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: wl_android.c 771907 2018-07-12 11:19:34Z $ + */ + +#include +#include +#include +#ifdef CONFIG_COMPAT +#include +#endif // endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef PNO_SUPPORT +#include +#endif // endif +#ifdef BCMSDIO +#include +#endif // endif +#ifdef WL_CFG80211 +#include +#endif // endif +#ifdef WL_NAN +#include +#endif /* WL_NAN */ +#ifdef DHDTCPACK_SUPPRESS +#include +#endif /* DHDTCPACK_SUPPRESS */ +#include +#include +#include +#ifdef WL_MBO +#include +#endif /* WL_MBO */ +#ifdef WL_BCNRECV +#include +#endif /* WL_BCNRECV */ + +#ifdef WL_STATIC_IF +#define WL_BSSIDX_MAX 16 +#endif /* WL_STATIC_IF */ + +#ifndef WL_CFG80211 +#define htod32(i) i +#define htod16(i) i +#define dtoh32(i) i +#define dtoh16(i) i +#define htodchanspec(i) i +#define dtohchanspec(i) i +#endif + +uint android_msg_level = ANDROID_ERROR_LEVEL; + +/* + * Android private command strings, PLEASE define new private commands here + * so they can be updated easily in the future (if needed) + */ + +#define CMD_START "START" +#define CMD_STOP "STOP" +#define CMD_SCAN_ACTIVE "SCAN-ACTIVE" +#define CMD_SCAN_PASSIVE "SCAN-PASSIVE" +#define CMD_RSSI "RSSI" +#define CMD_LINKSPEED "LINKSPEED" +#define CMD_RXFILTER_START "RXFILTER-START" +#define CMD_RXFILTER_STOP "RXFILTER-STOP" +#define CMD_RXFILTER_ADD "RXFILTER-ADD" +#define CMD_RXFILTER_REMOVE "RXFILTER-REMOVE" +#define CMD_BTCOEXSCAN_START "BTCOEXSCAN-START" +#define CMD_BTCOEXSCAN_STOP "BTCOEXSCAN-STOP" +#define CMD_BTCOEXMODE "BTCOEXMODE" +#define CMD_SETSUSPENDOPT "SETSUSPENDOPT" +#define CMD_SETSUSPENDMODE "SETSUSPENDMODE" +#define CMD_MAXDTIM_IN_SUSPEND "MAX_DTIM_IN_SUSPEND" +#define CMD_P2P_DEV_ADDR "P2P_DEV_ADDR" +#define CMD_SETFWPATH "SETFWPATH" +#define CMD_SETBAND "SETBAND" +#define CMD_GETBAND "GETBAND" +#define CMD_COUNTRY "COUNTRY" +#ifdef WLMESH +#define CMD_SAE_SET_PASSWORD "SAE_SET_PASSWORD" +#define CMD_SET_RSDB_MODE "RSDB_MODE" +#endif +#define CMD_P2P_SET_NOA "P2P_SET_NOA" +#define CMD_P2P_GET_NOA "P2P_GET_NOA" +#define CMD_P2P_SD_OFFLOAD "P2P_SD_" +#define CMD_P2P_LISTEN_OFFLOAD "P2P_LO_" +#define CMD_P2P_SET_PS "P2P_SET_PS" +#define CMD_P2P_ECSA "P2P_ECSA" +#define CMD_P2P_INC_BW "P2P_INCREASE_BW" +#define CMD_SET_AP_WPS_P2P_IE "SET_AP_WPS_P2P_IE" +#define CMD_SETROAMMODE "SETROAMMODE" +#define CMD_SETIBSSBEACONOUIDATA "SETIBSSBEACONOUIDATA" +#define CMD_MIRACAST "MIRACAST" +#ifdef WL_NAN +#define CMD_NAN "NAN_" +#endif /* WL_NAN */ +#define CMD_COUNTRY_DELIMITER "/" + +#if defined(WL_SUPPORT_AUTO_CHANNEL) +#define CMD_GET_BEST_CHANNELS "GET_BEST_CHANNELS" +#endif /* WL_SUPPORT_AUTO_CHANNEL */ + +#define CMD_80211_MODE "MODE" /* 802.11 mode a/b/g/n/ac */ +#define CMD_CHANSPEC "CHANSPEC" +#define CMD_DATARATE "DATARATE" +#define CMD_ASSOC_CLIENTS "ASSOCLIST" +#define CMD_SET_CSA "SETCSA" +#ifdef WL_SUPPORT_AUTO_CHANNEL +#define CMD_SET_HAPD_AUTO_CHANNEL "HAPD_AUTO_CHANNEL" +#endif /* WL_SUPPORT_AUTO_CHANNEL */ +#define CMD_KEEP_ALIVE "KEEPALIVE" + +#ifdef BCMCCX +/* CCX Private Commands */ +#define CMD_GETCCKM_RN "get cckm_rn" +#define CMD_SETCCKM_KRK "set cckm_krk" +#define CMD_GET_ASSOC_RES_IES "get assoc_res_ies" + +#define CCKM_KRK_LEN 16 +#define CCKM_BTK_LEN 32 +#endif // endif + +#ifdef PNO_SUPPORT +#define CMD_PNOSSIDCLR_SET "PNOSSIDCLR" +#define CMD_PNOSETUP_SET "PNOSETUP " +#define CMD_PNOENABLE_SET "PNOFORCE" +#define CMD_PNODEBUG_SET "PNODEBUG" +#define CMD_WLS_BATCHING "WLS_BATCHING" +#endif /* PNO_SUPPORT */ + +#define CMD_HAPD_MAC_FILTER "HAPD_MAC_FILTER" + +#ifdef WLFBT +#define CMD_GET_FTKEY "GET_FTKEY" +#endif // endif + +#define CMD_ROAM_OFFLOAD "SETROAMOFFLOAD" +#define CMD_INTERFACE_CREATE "INTERFACE_CREATE" +#define CMD_INTERFACE_DELETE "INTERFACE_DELETE" +#define CMD_GET_LINK_STATUS "GETLINKSTATUS" + +#define CMD_GET_STA_INFO "GETSTAINFO" + +/* related with CMD_GET_LINK_STATUS */ +#define WL_ANDROID_LINK_VHT 0x01 +#define WL_ANDROID_LINK_MIMO 0x02 +#define WL_ANDROID_LINK_AP_VHT_SUPPORT 0x04 +#define WL_ANDROID_LINK_AP_MIMO_SUPPORT 0x08 + +#ifdef P2PRESP_WFDIE_SRC +#define CMD_P2P_SET_WFDIE_RESP "P2P_SET_WFDIE_RESP" +#define CMD_P2P_GET_WFDIE_RESP "P2P_GET_WFDIE_RESP" +#endif /* P2PRESP_WFDIE_SRC */ + +#define CMD_DFS_AP_MOVE "DFS_AP_MOVE" +#define CMD_WBTEXT_ENABLE "WBTEXT_ENABLE" +#define CMD_WBTEXT_PROFILE_CONFIG "WBTEXT_PROFILE_CONFIG" +#define CMD_WBTEXT_WEIGHT_CONFIG "WBTEXT_WEIGHT_CONFIG" +#define CMD_WBTEXT_TABLE_CONFIG "WBTEXT_TABLE_CONFIG" +#define CMD_WBTEXT_DELTA_CONFIG "WBTEXT_DELTA_CONFIG" +#define CMD_WBTEXT_BTM_TIMER_THRESHOLD "WBTEXT_BTM_TIMER_THRESHOLD" +#define CMD_WBTEXT_BTM_DELTA "WBTEXT_BTM_DELTA" +#define CMD_WBTEXT_ESTM_ENABLE "WBTEXT_ESTM_ENABLE" + +#ifdef WLWFDS +#define CMD_ADD_WFDS_HASH "ADD_WFDS_HASH" +#define CMD_DEL_WFDS_HASH "DEL_WFDS_HASH" +#endif /* WLWFDS */ + +#ifdef SET_RPS_CPUS +#define CMD_RPSMODE "RPSMODE" +#endif /* SET_RPS_CPUS */ + +#ifdef BT_WIFI_HANDOVER +#define CMD_TBOW_TEARDOWN "TBOW_TEARDOWN" +#endif /* BT_WIFI_HANDOVER */ + +#define CMD_MURX_BFE_CAP "MURX_BFE_CAP" + +#ifdef SUPPORT_RSSI_SUM_REPORT +#define CMD_SET_RSSI_LOGGING "SET_RSSI_LOGGING" +#define CMD_GET_RSSI_LOGGING "GET_RSSI_LOGGING" +#define CMD_GET_RSSI_PER_ANT "GET_RSSI_PER_ANT" +#endif /* SUPPORT_RSSI_SUM_REPORT */ + +#define CMD_GET_SNR "GET_SNR" + +#ifdef SUPPORT_AP_HIGHER_BEACONRATE +#define CMD_SET_AP_BEACONRATE "SET_AP_BEACONRATE" +#define CMD_GET_AP_BASICRATE "GET_AP_BASICRATE" +#endif /* SUPPORT_AP_HIGHER_BEACONRATE */ + +#ifdef SUPPORT_AP_RADIO_PWRSAVE +#define CMD_SET_AP_RPS "SET_AP_RPS" +#define CMD_GET_AP_RPS "GET_AP_RPS" +#define CMD_SET_AP_RPS_PARAMS "SET_AP_RPS_PARAMS" +#endif /* SUPPORT_AP_RADIO_PWRSAVE */ + +/* miracast related definition */ +#define MIRACAST_MODE_OFF 0 +#define MIRACAST_MODE_SOURCE 1 +#define MIRACAST_MODE_SINK 2 + +#ifdef CONNECTION_STATISTICS +#define CMD_GET_CONNECTION_STATS "GET_CONNECTION_STATS" + +struct connection_stats { + u32 txframe; + u32 txbyte; + u32 txerror; + u32 rxframe; + u32 rxbyte; + u32 txfail; + u32 txretry; + u32 txretrie; + u32 txrts; + u32 txnocts; + u32 txexptime; + u32 txrate; + u8 chan_idle; +}; +#endif /* CONNECTION_STATISTICS */ + +#ifdef SUPPORT_LQCM +#define CMD_SET_LQCM_ENABLE "SET_LQCM_ENABLE" +#define CMD_GET_LQCM_REPORT "GET_LQCM_REPORT" +#endif // endif + +static LIST_HEAD(miracast_resume_list); +#ifdef WL_CFG80211 +static u8 miracast_cur_mode; +#endif + +#ifdef DHD_LOG_DUMP +#define CMD_NEW_DEBUG_PRINT_DUMP "DEBUG_DUMP" +#define SUBCMD_UNWANTED "UNWANTED" +#define SUBCMD_DISCONNECTED "DISCONNECTED" +void dhd_log_dump_trigger(dhd_pub_t *dhdp, int subcmd); +#endif /* DHD_LOG_DUMP */ + +#ifdef DHD_DEBUG_UART +extern bool dhd_debug_uart_is_running(struct net_device *dev); +#endif /* DHD_DEBUG_UART */ + +struct io_cfg { + s8 *iovar; + s32 param; + u32 ioctl; + void *arg; + u32 len; + struct list_head list; +}; + +#if defined(BCMFW_ROAM_ENABLE) +#define CMD_SET_ROAMPREF "SET_ROAMPREF" + +#define MAX_NUM_SUITES 10 +#define WIDTH_AKM_SUITE 8 +#define JOIN_PREF_RSSI_LEN 0x02 +#define JOIN_PREF_RSSI_SIZE 4 /* RSSI pref header size in bytes */ +#define JOIN_PREF_WPA_HDR_SIZE 4 /* WPA pref header size in bytes */ +#define JOIN_PREF_WPA_TUPLE_SIZE 12 /* Tuple size in bytes */ +#define JOIN_PREF_MAX_WPA_TUPLES 16 +#define MAX_BUF_SIZE (JOIN_PREF_RSSI_SIZE + JOIN_PREF_WPA_HDR_SIZE + \ + (JOIN_PREF_WPA_TUPLE_SIZE * JOIN_PREF_MAX_WPA_TUPLES)) +#endif /* BCMFW_ROAM_ENABLE */ + +#define CMD_DEBUG_VERBOSE "DEBUG_VERBOSE" +#ifdef WL_NATOE + +#define CMD_NATOE "NATOE" + +#define NATOE_MAX_PORT_NUM 65535 + +/* natoe command info structure */ +typedef struct wl_natoe_cmd_info { + uint8 *command; /* pointer to the actual command */ + uint16 tot_len; /* total length of the command */ + uint16 bytes_written; /* Bytes written for get response */ +} wl_natoe_cmd_info_t; + +typedef struct wl_natoe_sub_cmd wl_natoe_sub_cmd_t; +typedef int (natoe_cmd_handler_t)(struct net_device *dev, + const wl_natoe_sub_cmd_t *cmd, char *command, wl_natoe_cmd_info_t *cmd_info); + +struct wl_natoe_sub_cmd { + char *name; + uint8 version; /* cmd version */ + uint16 id; /* id for the dongle f/w switch/case */ + uint16 type; /* base type of argument */ + natoe_cmd_handler_t *handler; /* cmd handler */ +}; + +#define WL_ANDROID_NATOE_FUNC(suffix) wl_android_natoe_subcmd_ ##suffix +static int wl_android_process_natoe_cmd(struct net_device *dev, + char *command, int total_len); +static int wl_android_natoe_subcmd_enable(struct net_device *dev, + const wl_natoe_sub_cmd_t *cmd, char *command, wl_natoe_cmd_info_t *cmd_info); +static int wl_android_natoe_subcmd_config_ips(struct net_device *dev, + const wl_natoe_sub_cmd_t *cmd, char *command, wl_natoe_cmd_info_t *cmd_info); +static int wl_android_natoe_subcmd_config_ports(struct net_device *dev, + const wl_natoe_sub_cmd_t *cmd, char *command, wl_natoe_cmd_info_t *cmd_info); +static int wl_android_natoe_subcmd_dbg_stats(struct net_device *dev, + const wl_natoe_sub_cmd_t *cmd, char *command, wl_natoe_cmd_info_t *cmd_info); +static int wl_android_natoe_subcmd_tbl_cnt(struct net_device *dev, + const wl_natoe_sub_cmd_t *cmd, char *command, wl_natoe_cmd_info_t *cmd_info); + +static const wl_natoe_sub_cmd_t natoe_cmd_list[] = { + /* wl natoe enable [0/1] or new: "wl natoe [0/1]" */ + {"enable", 0x01, WL_NATOE_CMD_ENABLE, + IOVT_BUFFER, WL_ANDROID_NATOE_FUNC(enable) + }, + {"config_ips", 0x01, WL_NATOE_CMD_CONFIG_IPS, + IOVT_BUFFER, WL_ANDROID_NATOE_FUNC(config_ips) + }, + {"config_ports", 0x01, WL_NATOE_CMD_CONFIG_PORTS, + IOVT_BUFFER, WL_ANDROID_NATOE_FUNC(config_ports) + }, + {"stats", 0x01, WL_NATOE_CMD_DBG_STATS, + IOVT_BUFFER, WL_ANDROID_NATOE_FUNC(dbg_stats) + }, + {"tbl_cnt", 0x01, WL_NATOE_CMD_TBL_CNT, + IOVT_BUFFER, WL_ANDROID_NATOE_FUNC(tbl_cnt) + }, + {NULL, 0, 0, 0, NULL} +}; + +#endif /* WL_NATOE */ + +#ifdef SET_PCIE_IRQ_CPU_CORE +#define CMD_PCIE_IRQ_CORE "PCIE_IRQ_CORE" +#endif /* SET_PCIE_IRQ_CPU_CORE */ + +#ifdef WL_BCNRECV +#define CMD_BEACON_RECV "BEACON_RECV" +#endif /* WL_BCNRECV */ + +/* drv command info structure */ +typedef struct wl_drv_cmd_info { + uint8 *command; /* pointer to the actual command */ + uint16 tot_len; /* total length of the command */ + uint16 bytes_written; /* Bytes written for get response */ +} wl_drv_cmd_info_t; + +typedef struct wl_drv_sub_cmd wl_drv_sub_cmd_t; +typedef int (drv_cmd_handler_t)(struct net_device *dev, + const wl_drv_sub_cmd_t *cmd, char *command, wl_drv_cmd_info_t *cmd_info); + +struct wl_drv_sub_cmd { + char *name; + uint8 version; /* cmd version */ + uint16 id; /* id for the dongle f/w switch/case */ + uint16 type; /* base type of argument */ + drv_cmd_handler_t *handler; /* cmd handler */ +}; + +#ifdef WL_MBO + +#define CMD_MBO "MBO" +enum { + WL_MBO_CMD_NON_CHAN_PREF = 1, + WL_MBO_CMD_CELL_DATA_CAP = 2 +}; +#define WL_ANDROID_MBO_FUNC(suffix) wl_android_mbo_subcmd_ ##suffix + +static int wl_android_process_mbo_cmd(struct net_device *dev, + char *command, int total_len); +static int wl_android_mbo_subcmd_cell_data_cap(struct net_device *dev, + const wl_drv_sub_cmd_t *cmd, char *command, wl_drv_cmd_info_t *cmd_info); +static int wl_android_mbo_subcmd_non_pref_chan(struct net_device *dev, + const wl_drv_sub_cmd_t *cmd, char *command, wl_drv_cmd_info_t *cmd_info); + +static const wl_drv_sub_cmd_t mbo_cmd_list[] = { + {"non_pref_chan", 0x01, WL_MBO_CMD_NON_CHAN_PREF, + IOVT_BUFFER, WL_ANDROID_MBO_FUNC(non_pref_chan) + }, + {"cell_data_cap", 0x01, WL_MBO_CMD_CELL_DATA_CAP, + IOVT_BUFFER, WL_ANDROID_MBO_FUNC(cell_data_cap) + }, + {NULL, 0, 0, 0, NULL} +}; + +#endif /* WL_MBO */ + +#ifdef WL_GENL +static s32 wl_genl_handle_msg(struct sk_buff *skb, struct genl_info *info); +static int wl_genl_init(void); +static int wl_genl_deinit(void); + +extern struct net init_net; +/* attribute policy: defines which attribute has which type (e.g int, char * etc) + * possible values defined in net/netlink.h + */ +static struct nla_policy wl_genl_policy[BCM_GENL_ATTR_MAX + 1] = { + [BCM_GENL_ATTR_STRING] = { .type = NLA_NUL_STRING }, + [BCM_GENL_ATTR_MSG] = { .type = NLA_BINARY }, +}; + +#define WL_GENL_VER 1 +/* family definition */ +static struct genl_family wl_genl_family = { + .id = GENL_ID_GENERATE, /* Genetlink would generate the ID */ + .hdrsize = 0, + .name = "bcm-genl", /* Netlink I/F for Android */ + .version = WL_GENL_VER, /* Version Number */ + .maxattr = BCM_GENL_ATTR_MAX, +}; + +/* commands: mapping between the command enumeration and the actual function */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)) +struct genl_ops wl_genl_ops[] = { + { + .cmd = BCM_GENL_CMD_MSG, + .flags = 0, + .policy = wl_genl_policy, + .doit = wl_genl_handle_msg, + .dumpit = NULL, + }, +}; +#else +struct genl_ops wl_genl_ops = { + .cmd = BCM_GENL_CMD_MSG, + .flags = 0, + .policy = wl_genl_policy, + .doit = wl_genl_handle_msg, + .dumpit = NULL, + +}; +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0) */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)) +static struct genl_multicast_group wl_genl_mcast[] = { + { .name = "bcm-genl-mcast", }, +}; +#else +static struct genl_multicast_group wl_genl_mcast = { + .id = GENL_ID_GENERATE, /* Genetlink would generate the ID */ + .name = "bcm-genl-mcast", +}; +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0) */ +#endif /* WL_GENL */ + +#ifdef SUPPORT_LQCM +#define LQCM_ENAB_MASK 0x000000FF /* LQCM enable flag mask */ +#define LQCM_TX_INDEX_MASK 0x0000FF00 /* LQCM tx index mask */ +#define LQCM_RX_INDEX_MASK 0x00FF0000 /* LQCM rx index mask */ + +#define LQCM_TX_INDEX_SHIFT 8 /* LQCM tx index shift */ +#define LQCM_RX_INDEX_SHIFT 16 /* LQCM rx index shift */ +#endif /* SUPPORT_LQCM */ + +/** + * Extern function declarations (TODO: move them to dhd_linux.h) + */ +int dhd_net_bus_devreset(struct net_device *dev, uint8 flag); +int dhd_dev_init_ioctl(struct net_device *dev); +#ifdef WL_CFG80211 +int wl_cfg80211_get_p2p_dev_addr(struct net_device *net, struct ether_addr *p2pdev_addr); +int wl_cfg80211_set_btcoex_dhcp(struct net_device *dev, dhd_pub_t *dhd, char *command); +#else +int wl_cfg80211_get_p2p_dev_addr(struct net_device *net, struct ether_addr *p2pdev_addr) +{ return 0; } +int wl_cfg80211_set_p2p_noa(struct net_device *net, char* buf, int len) +{ return 0; } +int wl_cfg80211_get_p2p_noa(struct net_device *net, char* buf, int len) +{ return 0; } +int wl_cfg80211_set_p2p_ps(struct net_device *net, char* buf, int len) +{ return 0; } +int wl_cfg80211_set_p2p_ecsa(struct net_device *net, char* buf, int len) +{ return 0; } +int wl_cfg80211_increase_p2p_bw(struct net_device *net, char* buf, int len) +{ return 0; } +#endif /* WL_CFG80211 */ +#ifdef ROAM_CHANNEL_CACHE +extern void wl_update_roamscan_cache_by_band(struct net_device *dev, int band); +#endif /* ROAM_CHANNEL_CACHE */ + +#ifdef ENABLE_4335BT_WAR +extern int bcm_bt_lock(int cookie); +extern void bcm_bt_unlock(int cookie); +static int lock_cookie_wifi = 'W' | 'i'<<8 | 'F'<<16 | 'i'<<24; /* cookie is "WiFi" */ +#endif /* ENABLE_4335BT_WAR */ + +extern bool ap_fw_loaded; +extern char iface_name[IFNAMSIZ]; +#ifdef DHD_PM_CONTROL_FROM_FILE +extern bool g_pm_control; +#endif /* DHD_PM_CONTROL_FROM_FILE */ + +/** + * Local (static) functions and variables + */ + +/* Initialize g_wifi_on to 1 so dhd_bus_start will be called for the first + * time (only) in dhd_open, subsequential wifi on will be handled by + * wl_android_wifi_on + */ +int g_wifi_on = TRUE; + +/** + * Local (static) function definitions + */ + +#ifdef WLWFDS +static int wl_android_set_wfds_hash( + struct net_device *dev, char *command, bool enable) +{ + int error = 0; + wl_p2p_wfds_hash_t *wfds_hash = NULL; + char *smbuf = NULL; + struct bcm_cfg80211 *cfg = wl_get_cfg(dev); + + smbuf = (char *)MALLOC(cfg->osh, WLC_IOCTL_MAXLEN); + if (smbuf == NULL) { + ANDROID_ERROR(("%s: failed to allocated memory %d bytes\n", + __FUNCTION__, WLC_IOCTL_MAXLEN)); + return -ENOMEM; + } + + if (enable) { + wfds_hash = (wl_p2p_wfds_hash_t *)(command + strlen(CMD_ADD_WFDS_HASH) + 1); + error = wldev_iovar_setbuf(dev, "p2p_add_wfds_hash", wfds_hash, + sizeof(wl_p2p_wfds_hash_t), smbuf, WLC_IOCTL_MAXLEN, NULL); + } + else { + wfds_hash = (wl_p2p_wfds_hash_t *)(command + strlen(CMD_DEL_WFDS_HASH) + 1); + error = wldev_iovar_setbuf(dev, "p2p_del_wfds_hash", wfds_hash, + sizeof(wl_p2p_wfds_hash_t), smbuf, WLC_IOCTL_MAXLEN, NULL); + } + + if (error) { + ANDROID_ERROR(("%s: failed to %s, error=%d\n", __FUNCTION__, command, error)); + } + + if (smbuf) { + MFREE(cfg->osh, smbuf, WLC_IOCTL_MAXLEN); + } + return error; +} +#endif /* WLWFDS */ + +static int wl_android_get_link_speed(struct net_device *net, char *command, int total_len) +{ + int link_speed; + int bytes_written; + int error; + + error = wldev_get_link_speed(net, &link_speed); + if (error) { + ANDROID_ERROR(("Get linkspeed failed \n")); + return -1; + } + + /* Convert Kbps to Android Mbps */ + link_speed = link_speed / 1000; + bytes_written = snprintf(command, total_len, "LinkSpeed %d", link_speed); + ANDROID_INFO(("%s: command result is %s\n", __FUNCTION__, command)); + return bytes_written; +} + +static int wl_android_get_rssi(struct net_device *net, char *command, int total_len) +{ + wlc_ssid_t ssid = {0, {0}}; + int bytes_written = 0; + int error = 0; + scb_val_t scbval; + char *delim = NULL; + struct net_device *target_ndev = net; +#ifdef WL_VIRTUAL_APSTA + char *pos = NULL; + struct bcm_cfg80211 *cfg; +#endif /* WL_VIRTUAL_APSTA */ + + delim = strchr(command, ' '); + /* For Ap mode rssi command would be + * driver rssi + * for STA/GC mode + * driver rssi + */ + if (delim) { + /* Ap/GO mode + * driver rssi + */ + ANDROID_TRACE(("%s: cmd:%s\n", __FUNCTION__, delim)); + /* skip space from delim after finding char */ + delim++; + if (!(bcm_ether_atoe((delim), &scbval.ea))) { + ANDROID_ERROR(("%s:address err\n", __FUNCTION__)); + return -1; + } + scbval.val = htod32(0); + ANDROID_TRACE(("%s: address:"MACDBG, __FUNCTION__, MAC2STRDBG(scbval.ea.octet))); +#ifdef WL_VIRTUAL_APSTA + /* RSDB AP may have another virtual interface + * In this case, format of private command is as following, + * DRIVER rssi + */ + + /* Current position is start of MAC address string */ + pos = delim; + delim = strchr(pos, ' '); + if (delim) { + /* skip space from delim after finding char */ + delim++; + if (strnlen(delim, IFNAMSIZ)) { + cfg = wl_get_cfg(net); + target_ndev = wl_get_ap_netdev(cfg, delim); + if (target_ndev == NULL) + target_ndev = net; + } + } +#endif /* WL_VIRTUAL_APSTA */ + } + else { + /* STA/GC mode */ + memset(&scbval, 0, sizeof(scb_val_t)); + } + + error = wldev_get_rssi(target_ndev, &scbval); + if (error) + return -1; +#if defined(RSSIOFFSET) + scbval.val = wl_update_rssi_offset(net, scbval.val); +#endif + + error = wldev_get_ssid(target_ndev, &ssid); + if (error) + return -1; + if ((ssid.SSID_len == 0) || (ssid.SSID_len > DOT11_MAX_SSID_LEN)) { + ANDROID_ERROR(("%s: wldev_get_ssid failed\n", __FUNCTION__)); + } else if (total_len <= ssid.SSID_len) { + return -ENOMEM; + } else { + memcpy(command, ssid.SSID, ssid.SSID_len); + bytes_written = ssid.SSID_len; + } + if ((total_len - bytes_written) < (strlen(" rssi -XXX") + 1)) + return -ENOMEM; + + bytes_written += scnprintf(&command[bytes_written], total_len - bytes_written, + " rssi %d", scbval.val); + command[bytes_written] = '\0'; + + ANDROID_TRACE(("%s: command result is %s (%d)\n", __FUNCTION__, command, bytes_written)); + return bytes_written; +} + +static int wl_android_set_suspendopt(struct net_device *dev, char *command) +{ + int suspend_flag; + int ret_now; + int ret = 0; + + suspend_flag = *(command + strlen(CMD_SETSUSPENDOPT) + 1) - '0'; + + if (suspend_flag != 0) { + suspend_flag = 1; + } + ret_now = net_os_set_suspend_disable(dev, suspend_flag); + + if (ret_now != suspend_flag) { + if (!(ret = net_os_set_suspend(dev, ret_now, 1))) { + ANDROID_INFO(("%s: Suspend Flag %d -> %d\n", + __FUNCTION__, ret_now, suspend_flag)); + } else { + ANDROID_ERROR(("%s: failed %d\n", __FUNCTION__, ret)); + } + } + + return ret; +} + +static int wl_android_set_suspendmode(struct net_device *dev, char *command) +{ + int ret = 0; + +#if !defined(CONFIG_HAS_EARLYSUSPEND) || !defined(DHD_USE_EARLYSUSPEND) + int suspend_flag; + + suspend_flag = *(command + strlen(CMD_SETSUSPENDMODE) + 1) - '0'; + if (suspend_flag != 0) + suspend_flag = 1; + + if (!(ret = net_os_set_suspend(dev, suspend_flag, 0))) + ANDROID_INFO(("%s: Suspend Mode %d\n", __FUNCTION__, suspend_flag)); + else + ANDROID_ERROR(("%s: failed %d\n", __FUNCTION__, ret)); +#endif // endif + + return ret; +} + +#ifdef WL_CFG80211 +int wl_android_get_80211_mode(struct net_device *dev, char *command, int total_len) +{ + uint8 mode[5]; + int error = 0; + int bytes_written = 0; + + error = wldev_get_mode(dev, mode, sizeof(mode)); + if (error) + return -1; + + ANDROID_INFO(("%s: mode:%s\n", __FUNCTION__, mode)); + bytes_written = snprintf(command, total_len, "%s %s", CMD_80211_MODE, mode); + ANDROID_INFO(("%s: command:%s EXIT\n", __FUNCTION__, command)); + return bytes_written; + +} + +extern chanspec_t +wl_chspec_driver_to_host(chanspec_t chanspec); +int wl_android_get_chanspec(struct net_device *dev, char *command, int total_len) +{ + int error = 0; + int bytes_written = 0; + int chsp = {0}; + uint16 band = 0; + uint16 bw = 0; + uint16 channel = 0; + u32 sb = 0; + chanspec_t chanspec; + + /* command is + * driver chanspec + */ + error = wldev_iovar_getint(dev, "chanspec", &chsp); + if (error) + return -1; + + chanspec = wl_chspec_driver_to_host(chsp); + ANDROID_INFO(("%s:return value of chanspec:%x\n", __FUNCTION__, chanspec)); + + channel = chanspec & WL_CHANSPEC_CHAN_MASK; + band = chanspec & WL_CHANSPEC_BAND_MASK; + bw = chanspec & WL_CHANSPEC_BW_MASK; + + ANDROID_INFO(("%s:channel:%d band:%d bandwidth:%d\n", __FUNCTION__, channel, band, bw)); + + if (bw == WL_CHANSPEC_BW_80) + bw = WL_CH_BANDWIDTH_80MHZ; + else if (bw == WL_CHANSPEC_BW_40) + bw = WL_CH_BANDWIDTH_40MHZ; + else if (bw == WL_CHANSPEC_BW_20) + bw = WL_CH_BANDWIDTH_20MHZ; + else + bw = WL_CH_BANDWIDTH_20MHZ; + + if (bw == WL_CH_BANDWIDTH_40MHZ) { + if (CHSPEC_SB_UPPER(chanspec)) { + channel += CH_10MHZ_APART; + } else { + channel -= CH_10MHZ_APART; + } + } + else if (bw == WL_CH_BANDWIDTH_80MHZ) { + sb = chanspec & WL_CHANSPEC_CTL_SB_MASK; + if (sb == WL_CHANSPEC_CTL_SB_LL) { + channel -= (CH_10MHZ_APART + CH_20MHZ_APART); + } else if (sb == WL_CHANSPEC_CTL_SB_LU) { + channel -= CH_10MHZ_APART; + } else if (sb == WL_CHANSPEC_CTL_SB_UL) { + channel += CH_10MHZ_APART; + } else { + /* WL_CHANSPEC_CTL_SB_UU */ + channel += (CH_10MHZ_APART + CH_20MHZ_APART); + } + } + bytes_written = snprintf(command, total_len, "%s channel %d band %s bw %d", CMD_CHANSPEC, + channel, band == WL_CHANSPEC_BAND_5G ? "5G":"2G", bw); + + ANDROID_INFO(("%s: command:%s EXIT\n", __FUNCTION__, command)); + return bytes_written; + +} +#endif + +/* returns current datarate datarate returned from firmware are in 500kbps */ +int wl_android_get_datarate(struct net_device *dev, char *command, int total_len) +{ + int error = 0; + int datarate = 0; + int bytes_written = 0; + + error = wldev_get_datarate(dev, &datarate); + if (error) + return -1; + + ANDROID_INFO(("%s:datarate:%d\n", __FUNCTION__, datarate)); + + bytes_written = snprintf(command, total_len, "%s %d", CMD_DATARATE, (datarate/2)); + return bytes_written; +} +int wl_android_get_assoclist(struct net_device *dev, char *command, int total_len) +{ + int error = 0; + int bytes_written = 0; + uint i; + int len = 0; + char mac_buf[MAX_NUM_OF_ASSOCLIST * + sizeof(struct ether_addr) + sizeof(uint)] = {0}; + struct maclist *assoc_maclist = (struct maclist *)mac_buf; + + ANDROID_TRACE(("%s: ENTER\n", __FUNCTION__)); + + assoc_maclist->count = htod32(MAX_NUM_OF_ASSOCLIST); + + error = wldev_ioctl_get(dev, WLC_GET_ASSOCLIST, assoc_maclist, sizeof(mac_buf)); + if (error) + return -1; + + assoc_maclist->count = dtoh32(assoc_maclist->count); + bytes_written = snprintf(command, total_len, "%s listcount: %d Stations:", + CMD_ASSOC_CLIENTS, assoc_maclist->count); + + for (i = 0; i < assoc_maclist->count; i++) { + len = snprintf(command + bytes_written, total_len - bytes_written, " " MACDBG, + MAC2STRDBG(assoc_maclist->ea[i].octet)); + /* A return value of '(total_len - bytes_written)' or more means that the + * output was truncated + */ + if ((len > 0) && (len < (total_len - bytes_written))) { + bytes_written += len; + } else { + ANDROID_ERROR(("%s: Insufficient buffer %d, bytes_written %d\n", + __FUNCTION__, total_len, bytes_written)); + bytes_written = -1; + break; + } + } + return bytes_written; +} + +#ifdef WL_CFG80211 +extern chanspec_t +wl_chspec_host_to_driver(chanspec_t chanspec); +static int wl_android_set_csa(struct net_device *dev, char *command) +{ + int error = 0; + char smbuf[WLC_IOCTL_SMLEN]; + wl_chan_switch_t csa_arg; + u32 chnsp = 0; + int err = 0; + + ANDROID_INFO(("%s: command:%s\n", __FUNCTION__, command)); + + command = (command + strlen(CMD_SET_CSA)); + /* Order is mode, count channel */ + if (!*++command) { + ANDROID_ERROR(("%s:error missing arguments\n", __FUNCTION__)); + return -1; + } + csa_arg.mode = bcm_atoi(command); + + if (csa_arg.mode != 0 && csa_arg.mode != 1) { + ANDROID_ERROR(("Invalid mode\n")); + return -1; + } + + if (!*++command) { + ANDROID_ERROR(("%s:error missing count\n", __FUNCTION__)); + return -1; + } + command++; + csa_arg.count = bcm_atoi(command); + + csa_arg.reg = 0; + csa_arg.chspec = 0; + command += 2; + if (!*command) { + ANDROID_ERROR(("%s:error missing channel\n", __FUNCTION__)); + return -1; + } + + chnsp = wf_chspec_aton(command); + if (chnsp == 0) { + ANDROID_ERROR(("%s:chsp is not correct\n", __FUNCTION__)); + return -1; + } + chnsp = wl_chspec_host_to_driver(chnsp); + csa_arg.chspec = chnsp; + + if (chnsp & WL_CHANSPEC_BAND_5G) { + u32 chanspec = chnsp; + err = wldev_iovar_getint(dev, "per_chan_info", &chanspec); + if (!err) { + if ((chanspec & WL_CHAN_RADAR) || (chanspec & WL_CHAN_PASSIVE)) { + ANDROID_ERROR(("Channel is radar sensitive\n")); + return -1; + } + if (chanspec == 0) { + ANDROID_ERROR(("Invalid hw channel\n")); + return -1; + } + } else { + ANDROID_ERROR(("does not support per_chan_info\n")); + return -1; + } + ANDROID_INFO(("non radar sensitivity\n")); + } + error = wldev_iovar_setbuf(dev, "csa", &csa_arg, sizeof(csa_arg), + smbuf, sizeof(smbuf), NULL); + if (error) { + ANDROID_ERROR(("%s:set csa failed:%d\n", __FUNCTION__, error)); + return -1; + } + return 0; +} +#endif + +static int +wl_android_set_max_dtim(struct net_device *dev, char *command) +{ + int ret = 0; + int dtim_flag; + + dtim_flag = *(command + strlen(CMD_MAXDTIM_IN_SUSPEND) + 1) - '0'; + + if (!(ret = net_os_set_max_dtim_enable(dev, dtim_flag))) { + ANDROID_TRACE(("%s: use Max bcn_li_dtim in suspend %s\n", + __FUNCTION__, (dtim_flag ? "Enable" : "Disable"))); + } else { + ANDROID_ERROR(("%s: failed %d\n", __FUNCTION__, ret)); + } + + return ret; +} + +static int wl_android_get_band(struct net_device *dev, char *command, int total_len) +{ + uint band; + int bytes_written; + int error; + + error = wldev_get_band(dev, &band); + if (error) + return -1; + bytes_written = snprintf(command, total_len, "Band %d", band); + return bytes_written; +} + +#ifdef PNO_SUPPORT +#define PNO_PARAM_SIZE 50 +#define VALUE_SIZE 50 +#define LIMIT_STR_FMT ("%50s %50s") + +static int +wls_parse_batching_cmd(struct net_device *dev, char *command, int total_len) +{ + int err = BCME_OK; + uint i, tokens, len_remain; + char *pos, *pos2, *token, *token2, *delim; + char param[PNO_PARAM_SIZE+1], value[VALUE_SIZE+1]; + struct dhd_pno_batch_params batch_params; + + ANDROID_INFO(("%s: command=%s, len=%d\n", __FUNCTION__, command, total_len)); + len_remain = total_len; + if (len_remain > (strlen(CMD_WLS_BATCHING) + 1)) { + pos = command + strlen(CMD_WLS_BATCHING) + 1; + len_remain -= strlen(CMD_WLS_BATCHING) + 1; + } else { + ANDROID_ERROR(("%s: No arguments, total_len %d\n", __FUNCTION__, total_len)); + err = BCME_ERROR; + goto exit; + } + memset(&batch_params, 0, sizeof(struct dhd_pno_batch_params)); + if (!strncmp(pos, PNO_BATCHING_SET, strlen(PNO_BATCHING_SET))) { + if (len_remain > (strlen(PNO_BATCHING_SET) + 1)) { + pos += strlen(PNO_BATCHING_SET) + 1; + } else { + ANDROID_ERROR(("%s: %s missing arguments, total_len %d\n", + __FUNCTION__, PNO_BATCHING_SET, total_len)); + err = BCME_ERROR; + goto exit; + } + while ((token = strsep(&pos, PNO_PARAMS_DELIMETER)) != NULL) { + memset(param, 0, sizeof(param)); + memset(value, 0, sizeof(value)); + if (token == NULL || !*token) + break; + if (*token == '\0') + continue; + delim = strchr(token, PNO_PARAM_VALUE_DELLIMETER); + if (delim != NULL) + *delim = ' '; + + tokens = sscanf(token, LIMIT_STR_FMT, param, value); + if (!strncmp(param, PNO_PARAM_SCANFREQ, strlen(PNO_PARAM_SCANFREQ))) { + batch_params.scan_fr = simple_strtol(value, NULL, 0); + ANDROID_INFO(("scan_freq : %d\n", batch_params.scan_fr)); + } else if (!strncmp(param, PNO_PARAM_BESTN, strlen(PNO_PARAM_BESTN))) { + batch_params.bestn = simple_strtol(value, NULL, 0); + ANDROID_INFO(("bestn : %d\n", batch_params.bestn)); + } else if (!strncmp(param, PNO_PARAM_MSCAN, strlen(PNO_PARAM_MSCAN))) { + batch_params.mscan = simple_strtol(value, NULL, 0); + ANDROID_INFO(("mscan : %d\n", batch_params.mscan)); + } else if (!strncmp(param, PNO_PARAM_CHANNEL, strlen(PNO_PARAM_CHANNEL))) { + i = 0; + pos2 = value; + tokens = sscanf(value, "<%s>", value); + if (tokens != 1) { + err = BCME_ERROR; + ANDROID_ERROR(("%s : invalid format for channel" + " <> params\n", __FUNCTION__)); + goto exit; + } + while ((token2 = strsep(&pos2, + PNO_PARAM_CHANNEL_DELIMETER)) != NULL) { + if (token2 == NULL || !*token2) + break; + if (*token2 == '\0') + continue; + if (*token2 == 'A' || *token2 == 'B') { + batch_params.band = (*token2 == 'A')? + WLC_BAND_5G : WLC_BAND_2G; + ANDROID_INFO(("band : %s\n", + (*token2 == 'A')? "A" : "B")); + } else { + if ((batch_params.nchan >= WL_NUMCHANNELS) || + (i >= WL_NUMCHANNELS)) { + ANDROID_ERROR(("Too many nchan %d\n", + batch_params.nchan)); + err = BCME_BUFTOOSHORT; + goto exit; + } + batch_params.chan_list[i++] = + simple_strtol(token2, NULL, 0); + batch_params.nchan++; + ANDROID_INFO(("channel :%d\n", + batch_params.chan_list[i-1])); + } + } + } else if (!strncmp(param, PNO_PARAM_RTT, strlen(PNO_PARAM_RTT))) { + batch_params.rtt = simple_strtol(value, NULL, 0); + ANDROID_INFO(("rtt : %d\n", batch_params.rtt)); + } else { + ANDROID_ERROR(("%s : unknown param: %s\n", __FUNCTION__, param)); + err = BCME_ERROR; + goto exit; + } + } + err = dhd_dev_pno_set_for_batch(dev, &batch_params); + if (err < 0) { + ANDROID_ERROR(("failed to configure batch scan\n")); + } else { + memset(command, 0, total_len); + err = snprintf(command, total_len, "%d", err); + } + } else if (!strncmp(pos, PNO_BATCHING_GET, strlen(PNO_BATCHING_GET))) { + err = dhd_dev_pno_get_for_batch(dev, command, total_len); + if (err < 0) { + ANDROID_ERROR(("failed to getting batching results\n")); + } else { + err = strlen(command); + } + } else if (!strncmp(pos, PNO_BATCHING_STOP, strlen(PNO_BATCHING_STOP))) { + err = dhd_dev_pno_stop_for_batch(dev); + if (err < 0) { + ANDROID_ERROR(("failed to stop batching scan\n")); + } else { + memset(command, 0, total_len); + err = snprintf(command, total_len, "OK"); + } + } else { + ANDROID_ERROR(("%s : unknown command\n", __FUNCTION__)); + err = BCME_ERROR; + goto exit; + } +exit: + return err; +} + +#ifndef WL_SCHED_SCAN +static int wl_android_set_pno_setup(struct net_device *dev, char *command, int total_len) +{ + wlc_ssid_ext_t ssids_local[MAX_PFN_LIST_COUNT]; + int res = -1; + int nssid = 0; + cmd_tlv_t *cmd_tlv_temp; + char *str_ptr; + int tlv_size_left; + int pno_time = 0; + int pno_repeat = 0; + int pno_freq_expo_max = 0; + +#ifdef PNO_SET_DEBUG + int i; + char pno_in_example[] = { + 'P', 'N', 'O', 'S', 'E', 'T', 'U', 'P', ' ', + 'S', '1', '2', '0', + 'S', + 0x05, + 'd', 'l', 'i', 'n', 'k', + 'S', + 0x04, + 'G', 'O', 'O', 'G', + 'T', + '0', 'B', + 'R', + '2', + 'M', + '2', + 0x00 + }; +#endif /* PNO_SET_DEBUG */ + ANDROID_INFO(("%s: command=%s, len=%d\n", __FUNCTION__, command, total_len)); + + if (total_len < (strlen(CMD_PNOSETUP_SET) + sizeof(cmd_tlv_t))) { + ANDROID_ERROR(("%s argument=%d less min size\n", __FUNCTION__, total_len)); + goto exit_proc; + } +#ifdef PNO_SET_DEBUG + memcpy(command, pno_in_example, sizeof(pno_in_example)); + total_len = sizeof(pno_in_example); +#endif // endif + str_ptr = command + strlen(CMD_PNOSETUP_SET); + tlv_size_left = total_len - strlen(CMD_PNOSETUP_SET); + + cmd_tlv_temp = (cmd_tlv_t *)str_ptr; + memset(ssids_local, 0, sizeof(ssids_local)); + + if ((cmd_tlv_temp->prefix == PNO_TLV_PREFIX) && + (cmd_tlv_temp->version == PNO_TLV_VERSION) && + (cmd_tlv_temp->subtype == PNO_TLV_SUBTYPE_LEGACY_PNO)) { + + str_ptr += sizeof(cmd_tlv_t); + tlv_size_left -= sizeof(cmd_tlv_t); + + if ((nssid = wl_parse_ssid_list_tlv(&str_ptr, ssids_local, + MAX_PFN_LIST_COUNT, &tlv_size_left)) <= 0) { + ANDROID_ERROR(("SSID is not presented or corrupted ret=%d\n", nssid)); + goto exit_proc; + } else { + if ((str_ptr[0] != PNO_TLV_TYPE_TIME) || (tlv_size_left <= 1)) { + ANDROID_ERROR(("%s scan duration corrupted field size %d\n", + __FUNCTION__, tlv_size_left)); + goto exit_proc; + } + str_ptr++; + pno_time = simple_strtoul(str_ptr, &str_ptr, 16); + ANDROID_INFO(("%s: pno_time=%d\n", __FUNCTION__, pno_time)); + + if (str_ptr[0] != 0) { + if ((str_ptr[0] != PNO_TLV_FREQ_REPEAT)) { + ANDROID_ERROR(("%s pno repeat : corrupted field\n", + __FUNCTION__)); + goto exit_proc; + } + str_ptr++; + pno_repeat = simple_strtoul(str_ptr, &str_ptr, 16); + ANDROID_INFO(("%s :got pno_repeat=%d\n", __FUNCTION__, pno_repeat)); + if (str_ptr[0] != PNO_TLV_FREQ_EXPO_MAX) { + ANDROID_ERROR(("%s FREQ_EXPO_MAX corrupted field size\n", + __FUNCTION__)); + goto exit_proc; + } + str_ptr++; + pno_freq_expo_max = simple_strtoul(str_ptr, &str_ptr, 16); + ANDROID_INFO(("%s: pno_freq_expo_max=%d\n", + __FUNCTION__, pno_freq_expo_max)); + } + } + } else { + ANDROID_ERROR(("%s get wrong TLV command\n", __FUNCTION__)); + goto exit_proc; + } + + res = dhd_dev_pno_set_for_ssid(dev, ssids_local, nssid, pno_time, pno_repeat, + pno_freq_expo_max, NULL, 0); +exit_proc: + return res; +} +#endif /* !WL_SCHED_SCAN */ +#endif /* PNO_SUPPORT */ + +static int wl_android_get_p2p_dev_addr(struct net_device *ndev, char *command, int total_len) +{ + int ret; + struct ether_addr p2pdev_addr; + +#define MAC_ADDR_STR_LEN 18 + if (total_len < MAC_ADDR_STR_LEN) { + ANDROID_ERROR(("%s: buflen %d is less than p2p dev addr\n", + __FUNCTION__, total_len)); + return -1; + } + + ret = wl_cfg80211_get_p2p_dev_addr(ndev, &p2pdev_addr); + if (ret) { + ANDROID_ERROR(("%s Failed to get p2p dev addr\n", __FUNCTION__)); + return -1; + } + return (snprintf(command, total_len, MACF, ETHERP_TO_MACF(&p2pdev_addr))); +} + +#ifdef BCMCCX +static int wl_android_get_cckm_rn(struct net_device *dev, char *command) +{ + int error, rn; + + WL_TRACE(("%s:wl_android_get_cckm_rn\n", dev->name)); + + error = wldev_iovar_getint(dev, "cckm_rn", &rn); + if (unlikely(error)) { + ANDROID_ERROR(("wl_android_get_cckm_rn error (%d)\n", error)); + return -1; + } + memcpy(command, &rn, sizeof(int)); + + return sizeof(int); +} + +static int +wl_android_set_cckm_krk(struct net_device *dev, char *command, int total_len) +{ + int error, key_len, skip_len; + unsigned char key[CCKM_KRK_LEN + CCKM_BTK_LEN]; + char iovar_buf[WLC_IOCTL_SMLEN]; + + WL_TRACE(("%s: wl_iw_set_cckm_krk\n", dev->name)); + + skip_len = strlen("set cckm_krk")+1; + + if (total_len < (skip_len + CCKM_KRK_LEN)) { + return BCME_BADLEN; + } + + if (total_len >= skip_len + CCKM_KRK_LEN + CCKM_BTK_LEN) { + key_len = CCKM_KRK_LEN + CCKM_BTK_LEN; + } else { + key_len = CCKM_KRK_LEN; + } + + memset(iovar_buf, 0, sizeof(iovar_buf)); + memcpy(key, command+skip_len, key_len); + + ANDROID_INFO(("CCKM KRK-BTK (%d/%d) :\n", key_len, total_len)); + if (wl_dbg_level & WL_DBG_DBG) { + prhex(NULL, key, key_len); + } + + error = wldev_iovar_setbuf(dev, "cckm_krk", key, key_len, + iovar_buf, WLC_IOCTL_SMLEN, NULL); + if (unlikely(error)) { + ANDROID_ERROR((" cckm_krk set error (%d)\n", error)); + return -1; + } + return 0; +} + +static int wl_android_get_assoc_res_ies(struct net_device *dev, char *command, int total_len) +{ + int error; + u8 buf[WL_ASSOC_INFO_MAX]; + wl_assoc_info_t assoc_info; + u32 resp_ies_len = 0; + int bytes_written = 0; + + WL_TRACE(("%s: wl_iw_get_assoc_res_ies\n", dev->name)); + + error = wldev_iovar_getbuf(dev, "assoc_info", NULL, 0, buf, WL_ASSOC_INFO_MAX, NULL); + if (unlikely(error)) { + ANDROID_ERROR(("could not get assoc info (%d)\n", error)); + return -1; + } + + memcpy(&assoc_info, buf, sizeof(wl_assoc_info_t)); + assoc_info.req_len = htod32(assoc_info.req_len); + assoc_info.resp_len = htod32(assoc_info.resp_len); + assoc_info.flags = htod32(assoc_info.flags); + + if (assoc_info.resp_len) { + resp_ies_len = assoc_info.resp_len - sizeof(struct dot11_assoc_resp); + } + + if (total_len < (sizeof(u32) + resp_ies_len)) { + ANDROID_ERROR(("%s: Insufficient memory, %d bytes\n", + __FUNCTION__, total_len)); + return -1; + } + /* first 4 bytes are ie len */ + memcpy(command, &resp_ies_len, sizeof(u32)); + bytes_written = sizeof(u32); + + /* get the association resp IE's if there are any */ + if (resp_ies_len) { + error = wldev_iovar_getbuf(dev, "assoc_resp_ies", NULL, 0, + buf, WL_ASSOC_INFO_MAX, NULL); + if (unlikely(error)) { + ANDROID_ERROR(("could not get assoc resp_ies (%d)\n", error)); + return -1; + } + + memcpy(command+sizeof(u32), buf, resp_ies_len); + bytes_written += resp_ies_len; + } + return bytes_written; +} + +#endif /* BCMCCX */ + +int +wl_android_set_ap_mac_list(struct net_device *dev, int macmode, struct maclist *maclist) +{ + int i, j, match; + int ret = 0; + char mac_buf[MAX_NUM_OF_ASSOCLIST * + sizeof(struct ether_addr) + sizeof(uint)] = {0}; + struct maclist *assoc_maclist = (struct maclist *)mac_buf; + + /* set filtering mode */ + if ((ret = wldev_ioctl_set(dev, WLC_SET_MACMODE, &macmode, sizeof(macmode)) != 0)) { + ANDROID_ERROR(("%s : WLC_SET_MACMODE error=%d\n", __FUNCTION__, ret)); + return ret; + } + if (macmode != MACLIST_MODE_DISABLED) { + /* set the MAC filter list */ + if ((ret = wldev_ioctl_set(dev, WLC_SET_MACLIST, maclist, + sizeof(int) + sizeof(struct ether_addr) * maclist->count)) != 0) { + ANDROID_ERROR(("%s : WLC_SET_MACLIST error=%d\n", __FUNCTION__, ret)); + return ret; + } + /* get the current list of associated STAs */ + assoc_maclist->count = MAX_NUM_OF_ASSOCLIST; + if ((ret = wldev_ioctl_get(dev, WLC_GET_ASSOCLIST, assoc_maclist, + sizeof(mac_buf))) != 0) { + ANDROID_ERROR(("%s : WLC_GET_ASSOCLIST error=%d\n", __FUNCTION__, ret)); + return ret; + } + /* do we have any STA associated? */ + if (assoc_maclist->count) { + /* iterate each associated STA */ + for (i = 0; i < assoc_maclist->count; i++) { + match = 0; + /* compare with each entry */ + for (j = 0; j < maclist->count; j++) { + ANDROID_INFO(("%s : associated="MACDBG " list="MACDBG "\n", + __FUNCTION__, MAC2STRDBG(assoc_maclist->ea[i].octet), + MAC2STRDBG(maclist->ea[j].octet))); + if (memcmp(assoc_maclist->ea[i].octet, + maclist->ea[j].octet, ETHER_ADDR_LEN) == 0) { + match = 1; + break; + } + } + /* do conditional deauth */ + /* "if not in the allow list" or "if in the deny list" */ + if ((macmode == MACLIST_MODE_ALLOW && !match) || + (macmode == MACLIST_MODE_DENY && match)) { + scb_val_t scbval; + + scbval.val = htod32(1); + memcpy(&scbval.ea, &assoc_maclist->ea[i], + ETHER_ADDR_LEN); + if ((ret = wldev_ioctl_set(dev, + WLC_SCB_DEAUTHENTICATE_FOR_REASON, + &scbval, sizeof(scb_val_t))) != 0) + ANDROID_ERROR(("%s WLC_SCB_DEAUTHENTICATE error=%d\n", + __FUNCTION__, ret)); + } + } + } + } + return ret; +} + +/* + * HAPD_MAC_FILTER mac_mode mac_cnt mac_addr1 mac_addr2 + * + */ +static int +wl_android_set_mac_address_filter(struct net_device *dev, char* str) +{ + int i; + int ret = 0; + int macnum = 0; + int macmode = MACLIST_MODE_DISABLED; + struct maclist *list; + char eabuf[ETHER_ADDR_STR_LEN]; + const char *token; + dhd_pub_t *dhd = dhd_get_pub(dev); + + /* string should look like below (macmode/macnum/maclist) */ + /* 1 2 00:11:22:33:44:55 00:11:22:33:44:ff */ + + /* get the MAC filter mode */ + token = strsep((char**)&str, " "); + if (!token) { + return -1; + } + macmode = bcm_atoi(token); + + if (macmode < MACLIST_MODE_DISABLED || macmode > MACLIST_MODE_ALLOW) { + ANDROID_ERROR(("%s : invalid macmode %d\n", __FUNCTION__, macmode)); + return -1; + } + + token = strsep((char**)&str, " "); + if (!token) { + return -1; + } + macnum = bcm_atoi(token); + if (macnum < 0 || macnum > MAX_NUM_MAC_FILT) { + ANDROID_ERROR(("%s : invalid number of MAC address entries %d\n", + __FUNCTION__, macnum)); + return -1; + } + /* allocate memory for the MAC list */ + list = (struct maclist*) MALLOCZ(dhd->osh, sizeof(int) + + sizeof(struct ether_addr) * macnum); + if (!list) { + ANDROID_ERROR(("%s : failed to allocate memory\n", __FUNCTION__)); + return -1; + } + /* prepare the MAC list */ + list->count = htod32(macnum); + bzero((char *)eabuf, ETHER_ADDR_STR_LEN); + for (i = 0; i < list->count; i++) { + token = strsep((char**)&str, " "); + if (token == NULL) { + ANDROID_ERROR(("%s : No mac address present\n", __FUNCTION__)); + ret = -EINVAL; + goto exit; + } + strncpy(eabuf, token, ETHER_ADDR_STR_LEN - 1); + if (!(ret = bcm_ether_atoe(eabuf, &list->ea[i]))) { + ANDROID_ERROR(("%s : mac parsing err index=%d, addr=%s\n", + __FUNCTION__, i, eabuf)); + list->count = i; + break; + } + ANDROID_INFO(("%s : %d/%d MACADDR=%s", __FUNCTION__, i, list->count, eabuf)); + } + if (i == 0) + goto exit; + + /* set the list */ + if ((ret = wl_android_set_ap_mac_list(dev, macmode, list)) != 0) + ANDROID_ERROR(("%s : Setting MAC list failed error=%d\n", __FUNCTION__, ret)); + +exit: + MFREE(dhd->osh, list, sizeof(int) + sizeof(struct ether_addr) * macnum); + + return ret; +} + +/** + * Global function definitions (declared in wl_android.h) + */ + +int wl_android_wifi_on(struct net_device *dev) +{ + int ret = 0; + int retry = POWERUP_MAX_RETRY; + + if (!dev) { + ANDROID_ERROR(("%s: dev is null\n", __FUNCTION__)); + return -EINVAL; + } + + printf("%s in 1\n", __FUNCTION__); + dhd_net_if_lock(dev); + printf("%s in 2: g_wifi_on=%d\n", __FUNCTION__, g_wifi_on); + if (!g_wifi_on) { + do { + dhd_net_wifi_platform_set_power(dev, TRUE, WIFI_TURNON_DELAY); +#ifdef BCMSDIO + ret = dhd_net_bus_resume(dev, 0); +#endif /* BCMSDIO */ +#ifdef BCMPCIE + ret = dhd_net_bus_devreset(dev, FALSE); +#endif /* BCMPCIE */ + if (ret == 0) { + break; + } + ANDROID_ERROR(("\nfailed to power up wifi chip, retry again (%d left) **\n\n", + retry)); +#ifdef BCMPCIE + dhd_net_bus_devreset(dev, TRUE); +#endif /* BCMPCIE */ + dhd_net_wifi_platform_set_power(dev, FALSE, WIFI_TURNOFF_DELAY); + } while (retry-- > 0); + if (ret != 0) { + ANDROID_ERROR(("\nfailed to power up wifi chip, max retry reached **\n\n")); + goto exit; + } +#if defined(BCMSDIO) || defined(BCMDBUS) + ret = dhd_net_bus_devreset(dev, FALSE); + if (ret) + goto err; +#ifdef BCMSDIO + dhd_net_bus_resume(dev, 1); +#endif /* BCMSDIO */ +#endif /* BCMSDIO || BCMDBUS */ +#if defined(BCMSDIO) || defined(BCMDBUS) + if (!ret) { + if (dhd_dev_init_ioctl(dev) < 0) { + ret = -EFAULT; + goto err; + } + } +#endif /* BCMSDIO || BCMDBUS */ + g_wifi_on = TRUE; + } + +exit: + printf("%s: Success\n", __FUNCTION__); + dhd_net_if_unlock(dev); + return ret; + +#if defined(BCMSDIO) || defined(BCMDBUS) +err: + dhd_net_bus_devreset(dev, TRUE); +#ifdef BCMSDIO + dhd_net_bus_suspend(dev); +#endif /* BCMSDIO */ + dhd_net_wifi_platform_set_power(dev, FALSE, WIFI_TURNOFF_DELAY); + printf("%s: Failed\n", __FUNCTION__); + dhd_net_if_unlock(dev); + return ret; +#endif /* BCMSDIO || BCMDBUS */ +} + +int wl_android_wifi_off(struct net_device *dev, bool on_failure) +{ + int ret = 0; + + if (!dev) { + ANDROID_ERROR(("%s: dev is null\n", __FUNCTION__)); + return -EINVAL; + } + + printf("%s in 1\n", __FUNCTION__); +#if defined(BCMPCIE) && defined(DHD_DEBUG_UART) + ret = dhd_debug_uart_is_running(dev); + if (ret) { + ANDROID_ERROR(("%s - Debug UART App is running\n", __FUNCTION__)); + return -EBUSY; + } +#endif /* BCMPCIE && DHD_DEBUG_UART */ + dhd_net_if_lock(dev); + printf("%s in 2: g_wifi_on=%d, on_failure=%d\n", __FUNCTION__, g_wifi_on, on_failure); + if (g_wifi_on || on_failure) { +#if defined(BCMSDIO) || defined(BCMPCIE) || defined(BCMDBUS) + ret = dhd_net_bus_devreset(dev, TRUE); +#if defined(BCMSDIO) + dhd_net_bus_suspend(dev); +#endif /* BCMSDIO */ +#endif /* BCMSDIO || BCMPCIE || BCMDBUS */ + dhd_net_wifi_platform_set_power(dev, FALSE, WIFI_TURNOFF_DELAY); + g_wifi_on = FALSE; + } + printf("%s out\n", __FUNCTION__); + dhd_net_if_unlock(dev); + + return ret; +} + +static int wl_android_set_fwpath(struct net_device *net, char *command, int total_len) +{ + if ((strlen(command) - strlen(CMD_SETFWPATH)) > MOD_PARAM_PATHLEN) + return -1; + return dhd_net_set_fw_path(net, command + strlen(CMD_SETFWPATH) + 1); +} + +#ifdef CONNECTION_STATISTICS +static int +wl_chanim_stats(struct net_device *dev, u8 *chan_idle) +{ + int err; + wl_chanim_stats_t *list; + /* Parameter _and_ returned buffer of chanim_stats. */ + wl_chanim_stats_t param; + u8 result[WLC_IOCTL_SMLEN]; + chanim_stats_t *stats; + + memset(¶m, 0, sizeof(param)); + + param.buflen = htod32(sizeof(wl_chanim_stats_t)); + param.count = htod32(WL_CHANIM_COUNT_ONE); + + if ((err = wldev_iovar_getbuf(dev, "chanim_stats", (char*)¶m, sizeof(wl_chanim_stats_t), + (char*)result, sizeof(result), 0)) < 0) { + ANDROID_ERROR(("Failed to get chanim results %d \n", err)); + return err; + } + + list = (wl_chanim_stats_t*)result; + + list->buflen = dtoh32(list->buflen); + list->version = dtoh32(list->version); + list->count = dtoh32(list->count); + + if (list->buflen == 0) { + list->version = 0; + list->count = 0; + } else if (list->version != WL_CHANIM_STATS_VERSION) { + ANDROID_ERROR(("Sorry, firmware has wl_chanim_stats version %d " + "but driver supports only version %d.\n", + list->version, WL_CHANIM_STATS_VERSION)); + list->buflen = 0; + list->count = 0; + } + + stats = list->stats; + stats->glitchcnt = dtoh32(stats->glitchcnt); + stats->badplcp = dtoh32(stats->badplcp); + stats->chanspec = dtoh16(stats->chanspec); + stats->timestamp = dtoh32(stats->timestamp); + stats->chan_idle = dtoh32(stats->chan_idle); + + ANDROID_INFO(("chanspec: 0x%4x glitch: %d badplcp: %d idle: %d timestamp: %d\n", + stats->chanspec, stats->glitchcnt, stats->badplcp, stats->chan_idle, + stats->timestamp)); + + *chan_idle = stats->chan_idle; + + return (err); +} + +static int +wl_android_get_connection_stats(struct net_device *dev, char *command, int total_len) +{ + static char iovar_buf[WLC_IOCTL_MAXLEN]; + const wl_cnt_wlc_t* wlc_cnt = NULL; +#ifndef DISABLE_IF_COUNTERS + wl_if_stats_t* if_stats = NULL; + struct bcm_cfg80211 *cfg = wl_get_cfg(dev); +#endif /* DISABLE_IF_COUNTERS */ + + int link_speed = 0; + struct connection_stats *output; + unsigned int bufsize = 0; + int bytes_written = -1; + int ret = 0; + + ANDROID_INFO(("%s: enter Get Connection Stats\n", __FUNCTION__)); + + if (total_len <= 0) { + ANDROID_ERROR(("%s: invalid buffer size %d\n", __FUNCTION__, total_len)); + goto error; + } + + bufsize = total_len; + if (bufsize < sizeof(struct connection_stats)) { + ANDROID_ERROR(("%s: not enough buffer size, provided=%u, requires=%zu\n", + __FUNCTION__, bufsize, + sizeof(struct connection_stats))); + goto error; + } + + output = (struct connection_stats *)command; + +#ifndef DISABLE_IF_COUNTERS + if_stats = (wl_if_stats_t *)MALLOCZ(cfg->osh, sizeof(*if_stats)); + if (if_stats == NULL) { + ANDROID_ERROR(("%s(%d): MALLOCZ failed\n", __FUNCTION__, __LINE__)); + goto error; + } + memset(if_stats, 0, sizeof(*if_stats)); + + if (FW_SUPPORTED(dhdp, ifst)) { + ret = wl_cfg80211_ifstats_counters(dev, if_stats); + } else { + ret = wldev_iovar_getbuf(dev, "if_counters", NULL, 0, + (char *)if_stats, sizeof(*if_stats), NULL); + } + + if (ret) { + ANDROID_ERROR(("%s: if_counters not supported ret=%d\n", + __FUNCTION__, ret)); + + /* In case if_stats IOVAR is not supported, get information from counters. */ +#endif /* DISABLE_IF_COUNTERS */ + ret = wldev_iovar_getbuf(dev, "counters", NULL, 0, + iovar_buf, WLC_IOCTL_MAXLEN, NULL); + if (unlikely(ret)) { + ANDROID_ERROR(("counters error (%d) - size = %zu\n", ret, sizeof(wl_cnt_wlc_t))); + goto error; + } + ret = wl_cntbuf_to_xtlv_format(NULL, iovar_buf, WL_CNTBUF_MAX_SIZE, 0); + if (ret != BCME_OK) { + ANDROID_ERROR(("%s wl_cntbuf_to_xtlv_format ERR %d\n", + __FUNCTION__, ret)); + goto error; + } + + if (!(wlc_cnt = GET_WLCCNT_FROM_CNTBUF(iovar_buf))) { + ANDROID_ERROR(("%s wlc_cnt NULL!\n", __FUNCTION__)); + goto error; + } + + output->txframe = dtoh32(wlc_cnt->txframe); + output->txbyte = dtoh32(wlc_cnt->txbyte); + output->txerror = dtoh32(wlc_cnt->txerror); + output->rxframe = dtoh32(wlc_cnt->rxframe); + output->rxbyte = dtoh32(wlc_cnt->rxbyte); + output->txfail = dtoh32(wlc_cnt->txfail); + output->txretry = dtoh32(wlc_cnt->txretry); + output->txretrie = dtoh32(wlc_cnt->txretrie); + output->txrts = dtoh32(wlc_cnt->txrts); + output->txnocts = dtoh32(wlc_cnt->txnocts); + output->txexptime = dtoh32(wlc_cnt->txexptime); +#ifndef DISABLE_IF_COUNTERS + } else { + /* Populate from if_stats. */ + if (dtoh16(if_stats->version) > WL_IF_STATS_T_VERSION) { + ANDROID_ERROR(("%s: incorrect version of wl_if_stats_t, expected=%u got=%u\n", + __FUNCTION__, WL_IF_STATS_T_VERSION, if_stats->version)); + goto error; + } + + output->txframe = (uint32)dtoh64(if_stats->txframe); + output->txbyte = (uint32)dtoh64(if_stats->txbyte); + output->txerror = (uint32)dtoh64(if_stats->txerror); + output->rxframe = (uint32)dtoh64(if_stats->rxframe); + output->rxbyte = (uint32)dtoh64(if_stats->rxbyte); + output->txfail = (uint32)dtoh64(if_stats->txfail); + output->txretry = (uint32)dtoh64(if_stats->txretry); + output->txretrie = (uint32)dtoh64(if_stats->txretrie); + if (dtoh16(if_stats->length) > OFFSETOF(wl_if_stats_t, txexptime)) { + output->txexptime = (uint32)dtoh64(if_stats->txexptime); + output->txrts = (uint32)dtoh64(if_stats->txrts); + output->txnocts = (uint32)dtoh64(if_stats->txnocts); + } else { + output->txexptime = 0; + output->txrts = 0; + output->txnocts = 0; + } + } +#endif /* DISABLE_IF_COUNTERS */ + + /* link_speed is in kbps */ + ret = wldev_get_link_speed(dev, &link_speed); + if (ret || link_speed < 0) { + ANDROID_ERROR(("%s: wldev_get_link_speed() failed, ret=%d, speed=%d\n", + __FUNCTION__, ret, link_speed)); + goto error; + } + + output->txrate = link_speed; + + /* Channel idle ratio. */ + if (wl_chanim_stats(dev, &(output->chan_idle)) < 0) { + output->chan_idle = 0; + }; + + bytes_written = sizeof(struct connection_stats); + +error: +#ifndef DISABLE_IF_COUNTERS + if (if_stats) { + MFREE(cfg->osh, if_stats, sizeof(*if_stats)); + } +#endif /* DISABLE_IF_COUNTERS */ + + return bytes_written; +} +#endif /* CONNECTION_STATISTICS */ + +#ifdef WL_NATOE +static int +wl_android_process_natoe_cmd(struct net_device *dev, char *command, int total_len) +{ + int ret = BCME_ERROR; + char *pcmd = command; + char *str = NULL; + wl_natoe_cmd_info_t cmd_info; + const wl_natoe_sub_cmd_t *natoe_cmd = &natoe_cmd_list[0]; + + /* skip to cmd name after "natoe" */ + str = bcmstrtok(&pcmd, " ", NULL); + + /* If natoe subcmd name is not provided, return error */ + if (*pcmd == '\0') { + ANDROID_ERROR(("natoe subcmd not provided %s\n", __FUNCTION__)); + ret = -EINVAL; + return ret; + } + + /* get the natoe command name to str */ + str = bcmstrtok(&pcmd, " ", NULL); + + while (natoe_cmd->name != NULL) { + if (strcmp(natoe_cmd->name, str) == 0) { + /* dispacth cmd to appropriate handler */ + if (natoe_cmd->handler) { + cmd_info.command = command; + cmd_info.tot_len = total_len; + ret = natoe_cmd->handler(dev, natoe_cmd, pcmd, &cmd_info); + } + return ret; + } + natoe_cmd++; + } + return ret; +} + +static int +wlu_natoe_set_vars_cbfn(void *ctx, uint8 *data, uint16 type, uint16 len) +{ + int res = BCME_OK; + wl_natoe_cmd_info_t *cmd_info = (wl_natoe_cmd_info_t *)ctx; + uint8 *command = cmd_info->command; + uint16 total_len = cmd_info->tot_len; + uint16 bytes_written = 0; + + UNUSED_PARAMETER(len); + + switch (type) { + + case WL_NATOE_XTLV_ENABLE: + { + bytes_written = snprintf(command, total_len, "natoe: %s\n", + *data?"enabled":"disabled"); + cmd_info->bytes_written = bytes_written; + break; + } + + case WL_NATOE_XTLV_CONFIG_IPS: + { + wl_natoe_config_ips_t *config_ips; + uint8 buf[16]; + + config_ips = (wl_natoe_config_ips_t *)data; + bcm_ip_ntoa((struct ipv4_addr *)&config_ips->sta_ip, buf); + bytes_written = snprintf(command, total_len, "sta ip: %s\n", buf); + bcm_ip_ntoa((struct ipv4_addr *)&config_ips->sta_netmask, buf); + bytes_written += snprintf(command + bytes_written, total_len, + "sta netmask: %s\n", buf); + bcm_ip_ntoa((struct ipv4_addr *)&config_ips->sta_router_ip, buf); + bytes_written += snprintf(command + bytes_written, total_len, + "sta router ip: %s\n", buf); + bcm_ip_ntoa((struct ipv4_addr *)&config_ips->sta_dnsip, buf); + bytes_written += snprintf(command + bytes_written, total_len, + "sta dns ip: %s\n", buf); + bcm_ip_ntoa((struct ipv4_addr *)&config_ips->ap_ip, buf); + bytes_written += snprintf(command + bytes_written, total_len, + "ap ip: %s\n", buf); + bcm_ip_ntoa((struct ipv4_addr *)&config_ips->ap_netmask, buf); + bytes_written += snprintf(command + bytes_written, total_len, + "ap netmask: %s\n", buf); + cmd_info->bytes_written = bytes_written; + break; + } + + case WL_NATOE_XTLV_CONFIG_PORTS: + { + wl_natoe_ports_config_t *ports_config; + + ports_config = (wl_natoe_ports_config_t *)data; + bytes_written = snprintf(command, total_len, "starting port num: %d\n", + dtoh16(ports_config->start_port_num)); + bytes_written += snprintf(command + bytes_written, total_len, + "number of ports: %d\n", dtoh16(ports_config->no_of_ports)); + cmd_info->bytes_written = bytes_written; + break; + } + + case WL_NATOE_XTLV_DBG_STATS: + { + char *stats_dump = (char *)data; + + bytes_written = snprintf(command, total_len, "%s\n", stats_dump); + cmd_info->bytes_written = bytes_written; + break; + } + + case WL_NATOE_XTLV_TBL_CNT: + { + bytes_written = snprintf(command, total_len, "natoe max tbl entries: %d\n", + dtoh32(*(uint32 *)data)); + cmd_info->bytes_written = bytes_written; + break; + } + + default: + /* ignore */ + break; + } + + return res; +} + +/* + * --- common for all natoe get commands ---- + */ +static int +wl_natoe_get_ioctl(struct net_device *dev, wl_natoe_ioc_t *natoe_ioc, + uint16 iocsz, uint8 *buf, uint16 buflen, wl_natoe_cmd_info_t *cmd_info) +{ + /* for gets we only need to pass ioc header */ + wl_natoe_ioc_t *iocresp = (wl_natoe_ioc_t *)buf; + int res; + + /* send getbuf natoe iovar */ + res = wldev_iovar_getbuf(dev, "natoe", natoe_ioc, iocsz, buf, + buflen, NULL); + + /* check the response buff */ + if ((res == BCME_OK)) { + /* scans ioctl tlvbuf f& invokes the cbfn for processing */ + res = bcm_unpack_xtlv_buf(cmd_info, iocresp->data, iocresp->len, + BCM_XTLV_OPTION_ALIGN32, wlu_natoe_set_vars_cbfn); + + if (res == BCME_OK) { + res = cmd_info->bytes_written; + } + } + else + { + ANDROID_ERROR(("%s: get command failed code %d\n", __FUNCTION__, res)); + res = BCME_ERROR; + } + + return res; +} + +static int +wl_android_natoe_subcmd_enable(struct net_device *dev, const wl_natoe_sub_cmd_t *cmd, + char *command, wl_natoe_cmd_info_t *cmd_info) +{ + int ret = BCME_OK; + wl_natoe_ioc_t *natoe_ioc; + char *pcmd = command; + uint16 iocsz = sizeof(*natoe_ioc) + WL_NATOE_IOC_BUFSZ; + uint16 buflen = WL_NATOE_IOC_BUFSZ; + bcm_xtlv_t *pxtlv = NULL; + char *ioctl_buf = NULL; + struct bcm_cfg80211 *cfg = wl_get_cfg(dev); + + ioctl_buf = (char *)MALLOCZ(cfg->osh, WLC_IOCTL_MEDLEN); + if (!ioctl_buf) { + ANDROID_ERROR(("ioctl memory alloc failed\n")); + return -ENOMEM; + } + + /* alloc mem for ioctl headr + tlv data */ + natoe_ioc = (wl_natoe_ioc_t *)MALLOCZ(cfg->osh, iocsz); + if (!natoe_ioc) { + ANDROID_ERROR(("ioctl header memory alloc failed\n")); + MFREE(cfg->osh, ioctl_buf, WLC_IOCTL_MEDLEN); + return -ENOMEM; + } + + /* make up natoe cmd ioctl header */ + natoe_ioc->version = htod16(WL_NATOE_IOCTL_VERSION); + natoe_ioc->id = htod16(cmd->id); + natoe_ioc->len = htod16(WL_NATOE_IOC_BUFSZ); + pxtlv = (bcm_xtlv_t *)natoe_ioc->data; + + if(*pcmd == WL_IOCTL_ACTION_GET) { /* get */ + iocsz = sizeof(*natoe_ioc) + sizeof(*pxtlv); + ret = wl_natoe_get_ioctl(dev, natoe_ioc, iocsz, ioctl_buf, + WLC_IOCTL_MEDLEN, cmd_info); + if (ret != BCME_OK) { + ANDROID_ERROR(("Fail to get iovar %s\n", __FUNCTION__)); + ret = -EINVAL; + } + } else { /* set */ + uint8 val = bcm_atoi(pcmd); + + /* buflen is max tlv data we can write, it will be decremented as we pack */ + /* save buflen at start */ + uint16 buflen_at_start = buflen; + + /* we'll adjust final ioc size at the end */ + ret = bcm_pack_xtlv_entry((uint8**)&pxtlv, &buflen, WL_NATOE_XTLV_ENABLE, + sizeof(uint8), &val, BCM_XTLV_OPTION_ALIGN32); + + if (ret != BCME_OK) { + ret = -EINVAL; + goto exit; + } + + /* adjust iocsz to the end of last data record */ + natoe_ioc->len = (buflen_at_start - buflen); + iocsz = sizeof(*natoe_ioc) + natoe_ioc->len; + + ret = wldev_iovar_setbuf(dev, "natoe", + natoe_ioc, iocsz, ioctl_buf, WLC_IOCTL_MEDLEN, NULL); + if (ret != BCME_OK) { + ANDROID_ERROR(("Fail to set iovar %d\n", ret)); + ret = -EINVAL; + } + } + +exit: + MFREE(cfg->osh, ioctl_buf, WLC_IOCTL_MEDLEN); + MFREE(cfg->osh, natoe_ioc, iocsz); + + return ret; +} + +static int +wl_android_natoe_subcmd_config_ips(struct net_device *dev, + const wl_natoe_sub_cmd_t *cmd, char *command, wl_natoe_cmd_info_t *cmd_info) +{ + int ret = BCME_OK; + wl_natoe_config_ips_t config_ips; + wl_natoe_ioc_t *natoe_ioc; + char *pcmd = command; + char *str; + uint16 iocsz = sizeof(*natoe_ioc) + WL_NATOE_IOC_BUFSZ; + uint16 buflen = WL_NATOE_IOC_BUFSZ; + bcm_xtlv_t *pxtlv = NULL; + char *ioctl_buf = NULL; + struct bcm_cfg80211 *cfg = wl_get_cfg(dev); + + ioctl_buf = (char *)MALLOCZ(cfg->osh, WLC_IOCTL_MEDLEN); + if (!ioctl_buf) { + ANDROID_ERROR(("ioctl memory alloc failed\n")); + return -ENOMEM; + } + + /* alloc mem for ioctl headr + tlv data */ + natoe_ioc = (wl_natoe_ioc_t *)MALLOCZ(cfg->osh, iocsz); + if (!natoe_ioc) { + ANDROID_ERROR(("ioctl header memory alloc failed\n")); + MFREE(cfg->osh, ioctl_buf, WLC_IOCTL_MEDLEN); + return -ENOMEM; + } + + /* make up natoe cmd ioctl header */ + natoe_ioc->version = htod16(WL_NATOE_IOCTL_VERSION); + natoe_ioc->id = htod16(cmd->id); + natoe_ioc->len = htod16(WL_NATOE_IOC_BUFSZ); + pxtlv = (bcm_xtlv_t *)natoe_ioc->data; + + if(*pcmd == WL_IOCTL_ACTION_GET) { /* get */ + iocsz = sizeof(*natoe_ioc) + sizeof(*pxtlv); + ret = wl_natoe_get_ioctl(dev, natoe_ioc, iocsz, ioctl_buf, + WLC_IOCTL_MEDLEN, cmd_info); + if (ret != BCME_OK) { + ANDROID_ERROR(("Fail to get iovar %s\n", __FUNCTION__)); + ret = -EINVAL; + } + } else { /* set */ + /* buflen is max tlv data we can write, it will be decremented as we pack */ + /* save buflen at start */ + uint16 buflen_at_start = buflen; + + memset(&config_ips, 0, sizeof(config_ips)); + + str = bcmstrtok(&pcmd, " ", NULL); + if (!str || !bcm_atoipv4(str, (struct ipv4_addr *)&config_ips.sta_ip)) { + ANDROID_ERROR(("Invalid STA IP addr %s\n", str)); + ret = -EINVAL; + goto exit; + } + + str = bcmstrtok(&pcmd, " ", NULL); + if (!str || !bcm_atoipv4(str, (struct ipv4_addr *)&config_ips.sta_netmask)) { + ANDROID_ERROR(("Invalid STA netmask %s\n", str)); + ret = -EINVAL; + goto exit; + } + + str = bcmstrtok(&pcmd, " ", NULL); + if (!str || !bcm_atoipv4(str, (struct ipv4_addr *)&config_ips.sta_router_ip)) { + ANDROID_ERROR(("Invalid STA router IP addr %s\n", str)); + ret = -EINVAL; + goto exit; + } + + str = bcmstrtok(&pcmd, " ", NULL); + if (!str || !bcm_atoipv4(str, (struct ipv4_addr *)&config_ips.sta_dnsip)) { + ANDROID_ERROR(("Invalid STA DNS IP addr %s\n", str)); + ret = -EINVAL; + goto exit; + } + + str = bcmstrtok(&pcmd, " ", NULL); + if (!str || !bcm_atoipv4(str, (struct ipv4_addr *)&config_ips.ap_ip)) { + ANDROID_ERROR(("Invalid AP IP addr %s\n", str)); + ret = -EINVAL; + goto exit; + } + + str = bcmstrtok(&pcmd, " ", NULL); + if (!str || !bcm_atoipv4(str, (struct ipv4_addr *)&config_ips.ap_netmask)) { + ANDROID_ERROR(("Invalid AP netmask %s\n", str)); + ret = -EINVAL; + goto exit; + } + + ret = bcm_pack_xtlv_entry((uint8**)&pxtlv, + &buflen, WL_NATOE_XTLV_CONFIG_IPS, sizeof(config_ips), + &config_ips, BCM_XTLV_OPTION_ALIGN32); + + if (ret != BCME_OK) { + ret = -EINVAL; + goto exit; + } + + /* adjust iocsz to the end of last data record */ + natoe_ioc->len = (buflen_at_start - buflen); + iocsz = sizeof(*natoe_ioc) + natoe_ioc->len; + + ret = wldev_iovar_setbuf(dev, "natoe", + natoe_ioc, iocsz, ioctl_buf, WLC_IOCTL_MEDLEN, NULL); + if (ret != BCME_OK) { + ANDROID_ERROR(("Fail to set iovar %d\n", ret)); + ret = -EINVAL; + } + } + +exit: + MFREE(cfg->osh, ioctl_buf, WLC_IOCTL_MEDLEN); + MFREE(cfg->osh, natoe_ioc, sizeof(*natoe_ioc) + WL_NATOE_IOC_BUFSZ); + + return ret; +} + +static int +wl_android_natoe_subcmd_config_ports(struct net_device *dev, + const wl_natoe_sub_cmd_t *cmd, char *command, wl_natoe_cmd_info_t *cmd_info) +{ + int ret = BCME_OK; + wl_natoe_ports_config_t ports_config; + wl_natoe_ioc_t *natoe_ioc; + char *pcmd = command; + char *str; + uint16 iocsz = sizeof(*natoe_ioc) + WL_NATOE_IOC_BUFSZ; + uint16 buflen = WL_NATOE_IOC_BUFSZ; + bcm_xtlv_t *pxtlv = NULL; + char *ioctl_buf = NULL; + struct bcm_cfg80211 *cfg = wl_get_cfg(dev); + + ioctl_buf = (char *)MALLOCZ(cfg->osh, WLC_IOCTL_MEDLEN); + if (!ioctl_buf) { + ANDROID_ERROR(("ioctl memory alloc failed\n")); + return -ENOMEM; + } + + /* alloc mem for ioctl headr + tlv data */ + natoe_ioc = (wl_natoe_ioc_t *)MALLOCZ(cfg->osh, iocsz); + if (!natoe_ioc) { + ANDROID_ERROR(("ioctl header memory alloc failed\n")); + MFREE(cfg->osh, ioctl_buf, WLC_IOCTL_MEDLEN); + return -ENOMEM; + } + + /* make up natoe cmd ioctl header */ + natoe_ioc->version = htod16(WL_NATOE_IOCTL_VERSION); + natoe_ioc->id = htod16(cmd->id); + natoe_ioc->len = htod16(WL_NATOE_IOC_BUFSZ); + pxtlv = (bcm_xtlv_t *)natoe_ioc->data; + + if(*pcmd == WL_IOCTL_ACTION_GET) { /* get */ + iocsz = sizeof(*natoe_ioc) + sizeof(*pxtlv); + ret = wl_natoe_get_ioctl(dev, natoe_ioc, iocsz, ioctl_buf, + WLC_IOCTL_MEDLEN, cmd_info); + if (ret != BCME_OK) { + ANDROID_ERROR(("Fail to get iovar %s\n", __FUNCTION__)); + ret = -EINVAL; + } + } else { /* set */ + /* buflen is max tlv data we can write, it will be decremented as we pack */ + /* save buflen at start */ + uint16 buflen_at_start = buflen; + + memset(&ports_config, 0, sizeof(ports_config)); + + str = bcmstrtok(&pcmd, " ", NULL); + if (!str) { + ANDROID_ERROR(("Invalid port string %s\n", str)); + ret = -EINVAL; + goto exit; + } + ports_config.start_port_num = htod16(bcm_atoi(str)); + + str = bcmstrtok(&pcmd, " ", NULL); + if (!str) { + ANDROID_ERROR(("Invalid port string %s\n", str)); + ret = -EINVAL; + goto exit; + } + ports_config.no_of_ports = htod16(bcm_atoi(str)); + + if ((uint32)(ports_config.start_port_num + ports_config.no_of_ports) > + NATOE_MAX_PORT_NUM) { + ANDROID_ERROR(("Invalid port configuration\n")); + ret = -EINVAL; + goto exit; + } + ret = bcm_pack_xtlv_entry((uint8**)&pxtlv, + &buflen, WL_NATOE_XTLV_CONFIG_PORTS, sizeof(ports_config), + &ports_config, BCM_XTLV_OPTION_ALIGN32); + + if (ret != BCME_OK) { + ret = -EINVAL; + goto exit; + } + + /* adjust iocsz to the end of last data record */ + natoe_ioc->len = (buflen_at_start - buflen); + iocsz = sizeof(*natoe_ioc) + natoe_ioc->len; + + ret = wldev_iovar_setbuf(dev, "natoe", + natoe_ioc, iocsz, ioctl_buf, WLC_IOCTL_MEDLEN, NULL); + if (ret != BCME_OK) { + ANDROID_ERROR(("Fail to set iovar %d\n", ret)); + ret = -EINVAL; + } + } + +exit: + MFREE(cfg->osh, ioctl_buf, WLC_IOCTL_MEDLEN); + MFREE(cfg->osh, natoe_ioc, sizeof(*natoe_ioc) + WL_NATOE_IOC_BUFSZ); + + return ret; +} + +static int +wl_android_natoe_subcmd_dbg_stats(struct net_device *dev, const wl_natoe_sub_cmd_t *cmd, + char *command, wl_natoe_cmd_info_t *cmd_info) +{ + int ret = BCME_OK; + wl_natoe_ioc_t *natoe_ioc; + char *pcmd = command; + uint16 kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL; + uint16 iocsz = sizeof(*natoe_ioc) + WL_NATOE_DBG_STATS_BUFSZ; + uint16 buflen = WL_NATOE_DBG_STATS_BUFSZ; + bcm_xtlv_t *pxtlv = NULL; + char *ioctl_buf = NULL; + struct bcm_cfg80211 *cfg = wl_get_cfg(dev); + + ioctl_buf = (char *)MALLOCZ(cfg->osh, WLC_IOCTL_MAXLEN); + if (!ioctl_buf) { + ANDROID_ERROR(("ioctl memory alloc failed\n")); + return -ENOMEM; + } + + /* alloc mem for ioctl headr + tlv data */ + natoe_ioc = (wl_natoe_ioc_t *)MALLOCZ(cfg->osh, iocsz); + if (!natoe_ioc) { + ANDROID_ERROR(("ioctl header memory alloc failed\n")); + MFREE(cfg->osh, ioctl_buf, WLC_IOCTL_MAXLEN); + return -ENOMEM; + } + + /* make up natoe cmd ioctl header */ + natoe_ioc->version = htod16(WL_NATOE_IOCTL_VERSION); + natoe_ioc->id = htod16(cmd->id); + natoe_ioc->len = htod16(WL_NATOE_DBG_STATS_BUFSZ); + pxtlv = (bcm_xtlv_t *)natoe_ioc->data; + + if(*pcmd == WL_IOCTL_ACTION_GET) { /* get */ + iocsz = sizeof(*natoe_ioc) + sizeof(*pxtlv); + ret = wl_natoe_get_ioctl(dev, natoe_ioc, iocsz, ioctl_buf, + WLC_IOCTL_MAXLEN, cmd_info); + if (ret != BCME_OK) { + ANDROID_ERROR(("Fail to get iovar %s\n", __FUNCTION__)); + ret = -EINVAL; + } + } else { /* set */ + uint8 val = bcm_atoi(pcmd); + + /* buflen is max tlv data we can write, it will be decremented as we pack */ + /* save buflen at start */ + uint16 buflen_at_start = buflen; + + /* we'll adjust final ioc size at the end */ + ret = bcm_pack_xtlv_entry((uint8**)&pxtlv, &buflen, WL_NATOE_XTLV_ENABLE, + sizeof(uint8), &val, BCM_XTLV_OPTION_ALIGN32); + + if (ret != BCME_OK) { + ret = -EINVAL; + goto exit; + } + + /* adjust iocsz to the end of last data record */ + natoe_ioc->len = (buflen_at_start - buflen); + iocsz = sizeof(*natoe_ioc) + natoe_ioc->len; + + ret = wldev_iovar_setbuf(dev, "natoe", + natoe_ioc, iocsz, ioctl_buf, WLC_IOCTL_MAXLEN, NULL); + if (ret != BCME_OK) { + ANDROID_ERROR(("Fail to set iovar %d\n", ret)); + ret = -EINVAL; + } + } + +exit: + MFREE(cfg->osh, ioctl_buf, WLC_IOCTL_MAXLEN); + MFREE(cfg->osh, natoe_ioc, sizeof(*natoe_ioc) + WL_NATOE_DBG_STATS_BUFSZ); + + return ret; +} + +static int +wl_android_natoe_subcmd_tbl_cnt(struct net_device *dev, const wl_natoe_sub_cmd_t *cmd, + char *command, wl_natoe_cmd_info_t *cmd_info) +{ + int ret = BCME_OK; + wl_natoe_ioc_t *natoe_ioc; + char *pcmd = command; + uint16 kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL; + uint16 iocsz = sizeof(*natoe_ioc) + WL_NATOE_IOC_BUFSZ; + uint16 buflen = WL_NATOE_IOC_BUFSZ; + bcm_xtlv_t *pxtlv = NULL; + char *ioctl_buf = NULL; + struct bcm_cfg80211 *cfg = wl_get_cfg(dev); + + ioctl_buf = (char *)MALLOCZ(cfg->osh, WLC_IOCTL_MEDLEN); + if (!ioctl_buf) { + ANDROID_ERROR(("ioctl memory alloc failed\n")); + return -ENOMEM; + } + + /* alloc mem for ioctl headr + tlv data */ + natoe_ioc = (wl_natoe_ioc_t *)MALLOCZ(cfg->osh, iocsz); + if (!natoe_ioc) { + ANDROID_ERROR(("ioctl header memory alloc failed\n")); + MFREE(cfg->osh, ioctl_buf, WLC_IOCTL_MEDLEN); + return -ENOMEM; + } + + /* make up natoe cmd ioctl header */ + natoe_ioc->version = htod16(WL_NATOE_IOCTL_VERSION); + natoe_ioc->id = htod16(cmd->id); + natoe_ioc->len = htod16(WL_NATOE_IOC_BUFSZ); + pxtlv = (bcm_xtlv_t *)natoe_ioc->data; + + if(*pcmd == WL_IOCTL_ACTION_GET) { /* get */ + iocsz = sizeof(*natoe_ioc) + sizeof(*pxtlv); + ret = wl_natoe_get_ioctl(dev, natoe_ioc, iocsz, ioctl_buf, + WLC_IOCTL_MEDLEN, cmd_info); + if (ret != BCME_OK) { + ANDROID_ERROR(("Fail to get iovar %s\n", __FUNCTION__)); + ret = -EINVAL; + } + } else { /* set */ + uint32 val = bcm_atoi(pcmd); + + /* buflen is max tlv data we can write, it will be decremented as we pack */ + /* save buflen at start */ + uint16 buflen_at_start = buflen; + + /* we'll adjust final ioc size at the end */ + ret = bcm_pack_xtlv_entry((uint8**)&pxtlv, &buflen, WL_NATOE_XTLV_TBL_CNT, + sizeof(uint32), &val, BCM_XTLV_OPTION_ALIGN32); + + if (ret != BCME_OK) { + ret = -EINVAL; + goto exit; + } + + /* adjust iocsz to the end of last data record */ + natoe_ioc->len = (buflen_at_start - buflen); + iocsz = sizeof(*natoe_ioc) + natoe_ioc->len; + + ret = wldev_iovar_setbuf(dev, "natoe", + natoe_ioc, iocsz, ioctl_buf, WLC_IOCTL_MEDLEN, NULL); + if (ret != BCME_OK) { + ANDROID_ERROR(("Fail to set iovar %d\n", ret)); + ret = -EINVAL; + } + } + +exit: + MFREE(cfg->osh, ioctl_buf, WLC_IOCTL_MEDLEN); + MFREE(cfg->osh, natoe_ioc, sizeof(*natoe_ioc) + WL_NATOE_IOC_BUFSZ); + + return ret; +} + +#endif /* WL_NATOE */ + +#ifdef WL_MBO +static int +wl_android_process_mbo_cmd(struct net_device *dev, char *command, int total_len) +{ + int ret = BCME_ERROR; + char *pcmd = command; + char *str = NULL; + wl_drv_cmd_info_t cmd_info; + const wl_drv_sub_cmd_t *mbo_cmd = &mbo_cmd_list[0]; + + /* skip to cmd name after "mbo" */ + str = bcmstrtok(&pcmd, " ", NULL); + + /* If mbo subcmd name is not provided, return error */ + if (*pcmd == '\0') { + ANDROID_ERROR(("mbo subcmd not provided %s\n", __FUNCTION__)); + ret = -EINVAL; + return ret; + } + + /* get the mbo command name to str */ + str = bcmstrtok(&pcmd, " ", NULL); + + while (mbo_cmd->name != NULL) { + if (strnicmp(mbo_cmd->name, str, strlen(mbo_cmd->name)) == 0) { + /* dispatch cmd to appropriate handler */ + if (mbo_cmd->handler) { + cmd_info.command = command; + cmd_info.tot_len = total_len; + ret = mbo_cmd->handler(dev, mbo_cmd, pcmd, &cmd_info); + } + return ret; + } + mbo_cmd++; + } + return ret; +} + +static int +wl_android_send_wnm_notif(struct net_device *dev, bcm_iov_buf_t *iov_buf, + uint16 iov_buf_len, uint8 *iov_resp, uint16 iov_resp_len, uint8 sub_elem_type) +{ + int ret = BCME_OK; + uint8 *pxtlv = NULL; + uint16 iovlen = 0; + uint16 buflen = 0, buflen_start = 0; + + memset_s(iov_buf, iov_buf_len, 0, iov_buf_len); + iov_buf->version = WL_MBO_IOV_VERSION; + iov_buf->id = WL_MBO_CMD_SEND_NOTIF; + buflen = buflen_start = iov_buf_len - sizeof(bcm_iov_buf_t); + pxtlv = (uint8 *)&iov_buf->data[0]; + ret = bcm_pack_xtlv_entry(&pxtlv, &buflen, WL_MBO_XTLV_SUB_ELEM_TYPE, + sizeof(sub_elem_type), &sub_elem_type, BCM_XTLV_OPTION_ALIGN32); + if (ret != BCME_OK) { + return ret; + } + iov_buf->len = buflen_start - buflen; + iovlen = sizeof(bcm_iov_buf_t) + iov_buf->len; + ret = wldev_iovar_setbuf(dev, "mbo", + iov_buf, iovlen, iov_resp, WLC_IOCTL_MAXLEN, NULL); + if (ret != BCME_OK) { + ANDROID_ERROR(("Fail to sent wnm notif %d\n", ret)); + } + return ret; +} + +static int +wl_android_mbo_resp_parse_cbfn(void *ctx, const uint8 *data, uint16 type, uint16 len) +{ + wl_drv_cmd_info_t *cmd_info = (wl_drv_cmd_info_t *)ctx; + uint8 *command = cmd_info->command; + uint16 total_len = cmd_info->tot_len; + uint16 bytes_written = 0; + + UNUSED_PARAMETER(len); + /* TODO: validate data value */ + if (data == NULL) { + ANDROID_ERROR(("%s: Bad argument !!\n", __FUNCTION__)); + return -EINVAL; + } + switch (type) { + case WL_MBO_XTLV_CELL_DATA_CAP: + { + bytes_written = snprintf(command, total_len, "cell_data_cap: %u\n", *data); + cmd_info->bytes_written = bytes_written; + } + break; + default: + ANDROID_ERROR(("%s: Unknown tlv %u\n", __FUNCTION__, type)); + } + return BCME_OK; +} + +static int +wl_android_mbo_subcmd_cell_data_cap(struct net_device *dev, const wl_drv_sub_cmd_t *cmd, + char *command, wl_drv_cmd_info_t *cmd_info) +{ + int ret = BCME_OK; + uint8 *pxtlv = NULL; + uint16 buflen = 0, buflen_start = 0; + uint16 iovlen = 0; + char *pcmd = command; + bcm_iov_buf_t *iov_buf = NULL; + bcm_iov_buf_t *p_resp = NULL; + uint8 *iov_resp = NULL; + struct bcm_cfg80211 *cfg = wl_get_cfg(dev); + uint16 version; + + /* first get the configured value */ + iov_buf = (bcm_iov_buf_t *)MALLOCZ(cfg->osh, WLC_IOCTL_MEDLEN); + if (iov_buf == NULL) { + ret = -ENOMEM; + ANDROID_ERROR(("iov buf memory alloc exited\n")); + goto exit; + } + iov_resp = (uint8 *)MALLOCZ(cfg->osh, WLC_IOCTL_MAXLEN); + if (iov_resp == NULL) { + ret = -ENOMEM; + ANDROID_ERROR(("iov resp memory alloc exited\n")); + goto exit; + } + + /* fill header */ + iov_buf->version = WL_MBO_IOV_VERSION; + iov_buf->id = WL_MBO_CMD_CELLULAR_DATA_CAP; + + ret = wldev_iovar_getbuf(dev, "mbo", iov_buf, WLC_IOCTL_MEDLEN, iov_resp, + WLC_IOCTL_MAXLEN, + NULL); + if (ret != BCME_OK) { + goto exit; + } + p_resp = (bcm_iov_buf_t *)iov_resp; + + /* get */ + if (*pcmd == WL_IOCTL_ACTION_GET) { + /* Check for version */ + version = dtoh16(*(uint16 *)iov_resp); + if (version != WL_MBO_IOV_VERSION) { + ret = -EINVAL; + } + if (p_resp->id == WL_MBO_CMD_CELLULAR_DATA_CAP) { + ret = bcm_unpack_xtlv_buf((void *)cmd_info, (uint8 *)p_resp->data, + p_resp->len, BCM_XTLV_OPTION_ALIGN32, + wl_android_mbo_resp_parse_cbfn); + if (ret == BCME_OK) { + ret = cmd_info->bytes_written; + } + } else { + ret = -EINVAL; + ANDROID_ERROR(("Mismatch: resp id %d req id %d\n", p_resp->id, cmd->id)); + goto exit; + } + } else { + uint8 cell_cap = bcm_atoi(pcmd); + const uint8* old_cell_cap = NULL; + uint16 len = 0; + + old_cell_cap = bcm_get_data_from_xtlv_buf((uint8 *)p_resp->data, p_resp->len, + WL_MBO_XTLV_CELL_DATA_CAP, &len, BCM_XTLV_OPTION_ALIGN32); + if (old_cell_cap && *old_cell_cap == cell_cap) { + ANDROID_ERROR(("No change is cellular data capability\n")); + /* No change in value */ + goto exit; + } + + buflen = buflen_start = WLC_IOCTL_MEDLEN - sizeof(bcm_iov_buf_t); + + if (cell_cap < MBO_CELL_DATA_CONN_AVAILABLE || + cell_cap > MBO_CELL_DATA_CONN_NOT_CAPABLE) { + ANDROID_ERROR(("wrong value %u\n", cell_cap)); + ret = -EINVAL; + goto exit; + } + pxtlv = (uint8 *)&iov_buf->data[0]; + ret = bcm_pack_xtlv_entry(&pxtlv, &buflen, WL_MBO_XTLV_CELL_DATA_CAP, + sizeof(cell_cap), &cell_cap, BCM_XTLV_OPTION_ALIGN32); + if (ret != BCME_OK) { + goto exit; + } + iov_buf->len = buflen_start - buflen; + iovlen = sizeof(bcm_iov_buf_t) + iov_buf->len; + ret = wldev_iovar_setbuf(dev, "mbo", + iov_buf, iovlen, iov_resp, WLC_IOCTL_MAXLEN, NULL); + if (ret != BCME_OK) { + ANDROID_ERROR(("Fail to set iovar %d\n", ret)); + ret = -EINVAL; + goto exit; + } + /* send a WNM notification request to associated AP */ + if (wl_get_drv_status(cfg, CONNECTED, dev)) { + ANDROID_INFO(("Sending WNM Notif\n")); + ret = wl_android_send_wnm_notif(dev, iov_buf, WLC_IOCTL_MEDLEN, + iov_resp, WLC_IOCTL_MAXLEN, MBO_ATTR_CELL_DATA_CAP); + if (ret != BCME_OK) { + ANDROID_ERROR(("Fail to send WNM notification %d\n", ret)); + ret = -EINVAL; + } + } + } +exit: + if (iov_buf) { + MFREE(cfg->osh, iov_buf, WLC_IOCTL_MEDLEN); + } + if (iov_resp) { + MFREE(cfg->osh, iov_resp, WLC_IOCTL_MAXLEN); + } + return ret; +} + +static int +wl_android_mbo_non_pref_chan_parse_cbfn(void *ctx, const uint8 *data, uint16 type, uint16 len) +{ + wl_drv_cmd_info_t *cmd_info = (wl_drv_cmd_info_t *)ctx; + uint8 *command = cmd_info->command + cmd_info->bytes_written; + uint16 total_len = cmd_info->tot_len; + uint16 bytes_written = 0; + + ANDROID_INFO(("Total bytes written at begining %u\n", cmd_info->bytes_written)); + UNUSED_PARAMETER(len); + if (data == NULL) { + ANDROID_ERROR(("%s: Bad argument !!\n", __FUNCTION__)); + return -EINVAL; + } + switch (type) { + case WL_MBO_XTLV_OPCLASS: + { + bytes_written = snprintf(command, total_len, "%u:", *data); + ANDROID_ERROR(("wr %u %u\n", bytes_written, *data)); + command += bytes_written; + cmd_info->bytes_written += bytes_written; + } + break; + case WL_MBO_XTLV_CHAN: + { + bytes_written = snprintf(command, total_len, "%u:", *data); + ANDROID_ERROR(("wr %u\n", bytes_written)); + command += bytes_written; + cmd_info->bytes_written += bytes_written; + } + break; + case WL_MBO_XTLV_PREFERENCE: + { + bytes_written = snprintf(command, total_len, "%u:", *data); + ANDROID_ERROR(("wr %u\n", bytes_written)); + command += bytes_written; + cmd_info->bytes_written += bytes_written; + } + break; + case WL_MBO_XTLV_REASON_CODE: + { + bytes_written = snprintf(command, total_len, "%u ", *data); + ANDROID_ERROR(("wr %u\n", bytes_written)); + command += bytes_written; + cmd_info->bytes_written += bytes_written; + } + break; + default: + ANDROID_ERROR(("%s: Unknown tlv %u\n", __FUNCTION__, type)); + } + ANDROID_INFO(("Total bytes written %u\n", cmd_info->bytes_written)); + return BCME_OK; +} + +static int +wl_android_mbo_subcmd_non_pref_chan(struct net_device *dev, + const wl_drv_sub_cmd_t *cmd, char *command, + wl_drv_cmd_info_t *cmd_info) +{ + int ret = BCME_OK; + uint8 *pxtlv = NULL; + uint16 buflen = 0, buflen_start = 0; + uint16 iovlen = 0; + char *pcmd = command; + bcm_iov_buf_t *iov_buf = NULL; + bcm_iov_buf_t *p_resp = NULL; + uint8 *iov_resp = NULL; + struct bcm_cfg80211 *cfg = wl_get_cfg(dev); + uint16 version; + + ANDROID_ERROR(("%s:%d\n", __FUNCTION__, __LINE__)); + iov_buf = (bcm_iov_buf_t *)MALLOCZ(cfg->osh, WLC_IOCTL_MEDLEN); + if (iov_buf == NULL) { + ret = -ENOMEM; + ANDROID_ERROR(("iov buf memory alloc exited\n")); + goto exit; + } + iov_resp = (uint8 *)MALLOCZ(cfg->osh, WLC_IOCTL_MAXLEN); + if (iov_resp == NULL) { + ret = -ENOMEM; + ANDROID_ERROR(("iov resp memory alloc exited\n")); + goto exit; + } + /* get */ + if (*pcmd == WL_IOCTL_ACTION_GET) { + /* fill header */ + iov_buf->version = WL_MBO_IOV_VERSION; + iov_buf->id = WL_MBO_CMD_LIST_CHAN_PREF; + + ret = wldev_iovar_getbuf(dev, "mbo", iov_buf, WLC_IOCTL_MEDLEN, iov_resp, + WLC_IOCTL_MAXLEN, NULL); + if (ret != BCME_OK) { + goto exit; + } + p_resp = (bcm_iov_buf_t *)iov_resp; + /* Check for version */ + version = dtoh16(*(uint16 *)iov_resp); + if (version != WL_MBO_IOV_VERSION) { + ANDROID_ERROR(("Version mismatch. returned ver %u expected %u\n", + version, WL_MBO_IOV_VERSION)); + ret = -EINVAL; + } + if (p_resp->id == WL_MBO_CMD_LIST_CHAN_PREF) { + ret = bcm_unpack_xtlv_buf((void *)cmd_info, (uint8 *)p_resp->data, + p_resp->len, BCM_XTLV_OPTION_ALIGN32, + wl_android_mbo_non_pref_chan_parse_cbfn); + if (ret == BCME_OK) { + ret = cmd_info->bytes_written; + } + } else { + ret = -EINVAL; + ANDROID_ERROR(("Mismatch: resp id %d req id %d\n", p_resp->id, cmd->id)); + goto exit; + } + } else { + char *str = pcmd; + uint opcl = 0, ch = 0, pref = 0, rc = 0; + + str = bcmstrtok(&pcmd, " ", NULL); + if (!(strnicmp(str, "set", 3)) || (!strnicmp(str, "clear", 5))) { + /* delete all configurations */ + iov_buf->version = WL_MBO_IOV_VERSION; + iov_buf->id = WL_MBO_CMD_DEL_CHAN_PREF; + iov_buf->len = 0; + iovlen = sizeof(bcm_iov_buf_t) + iov_buf->len; + ret = wldev_iovar_setbuf(dev, "mbo", + iov_buf, iovlen, iov_resp, WLC_IOCTL_MAXLEN, NULL); + if (ret != BCME_OK) { + ANDROID_ERROR(("Fail to set iovar %d\n", ret)); + ret = -EINVAL; + goto exit; + } + } else { + ANDROID_ERROR(("Unknown command %s\n", str)); + goto exit; + } + /* parse non pref channel list */ + if (strnicmp(str, "set", 3) == 0) { + uint8 cnt = 0; + str = bcmstrtok(&pcmd, " ", NULL); + while (str != NULL) { + ret = sscanf(str, "%u:%u:%u:%u", &opcl, &ch, &pref, &rc); + ANDROID_ERROR(("buflen %u op %u, ch %u, pref %u rc %u\n", + buflen, opcl, ch, pref, rc)); + if (ret != 4) { + ANDROID_ERROR(("Not all parameter presents\n")); + ret = -EINVAL; + } + /* TODO: add a validation check here */ + memset_s(iov_buf, WLC_IOCTL_MEDLEN, 0, WLC_IOCTL_MEDLEN); + buflen = buflen_start = WLC_IOCTL_MEDLEN; + pxtlv = (uint8 *)&iov_buf->data[0]; + /* opclass */ + ret = bcm_pack_xtlv_entry(&pxtlv, &buflen, WL_MBO_XTLV_OPCLASS, + sizeof(uint8), (uint8 *)&opcl, BCM_XTLV_OPTION_ALIGN32); + if (ret != BCME_OK) { + goto exit; + } + /* channel */ + ret = bcm_pack_xtlv_entry(&pxtlv, &buflen, WL_MBO_XTLV_CHAN, + sizeof(uint8), (uint8 *)&ch, BCM_XTLV_OPTION_ALIGN32); + if (ret != BCME_OK) { + goto exit; + } + /* preference */ + ret = bcm_pack_xtlv_entry(&pxtlv, &buflen, WL_MBO_XTLV_PREFERENCE, + sizeof(uint8), (uint8 *)&pref, BCM_XTLV_OPTION_ALIGN32); + if (ret != BCME_OK) { + goto exit; + } + /* reason */ + ret = bcm_pack_xtlv_entry(&pxtlv, &buflen, WL_MBO_XTLV_REASON_CODE, + sizeof(uint8), (uint8 *)&rc, BCM_XTLV_OPTION_ALIGN32); + if (ret != BCME_OK) { + goto exit; + } + ANDROID_ERROR(("len %u\n", (buflen_start - buflen))); + /* Now set the new non pref channels */ + iov_buf->version = WL_MBO_IOV_VERSION; + iov_buf->id = WL_MBO_CMD_ADD_CHAN_PREF; + iov_buf->len = buflen_start - buflen; + iovlen = sizeof(bcm_iov_buf_t) + iov_buf->len; + ret = wldev_iovar_setbuf(dev, "mbo", + iov_buf, iovlen, iov_resp, WLC_IOCTL_MEDLEN, NULL); + if (ret != BCME_OK) { + ANDROID_ERROR(("Fail to set iovar %d\n", ret)); + ret = -EINVAL; + goto exit; + } + cnt++; + if (cnt >= MBO_MAX_CHAN_PREF_ENTRIES) { + break; + } + ANDROID_ERROR(("%d cnt %u\n", __LINE__, cnt)); + str = bcmstrtok(&pcmd, " ", NULL); + } + } + /* send a WNM notification request to associated AP */ + if (wl_get_drv_status(cfg, CONNECTED, dev)) { + ANDROID_INFO(("Sending WNM Notif\n")); + ret = wl_android_send_wnm_notif(dev, iov_buf, WLC_IOCTL_MEDLEN, + iov_resp, WLC_IOCTL_MAXLEN, MBO_ATTR_NON_PREF_CHAN_REPORT); + if (ret != BCME_OK) { + ANDROID_ERROR(("Fail to send WNM notification %d\n", ret)); + ret = -EINVAL; + } + } + } +exit: + if (iov_buf) { + MFREE(cfg->osh, iov_buf, WLC_IOCTL_MEDLEN); + } + if (iov_resp) { + MFREE(cfg->osh, iov_resp, WLC_IOCTL_MAXLEN); + } + return ret; +} +#endif /* WL_MBO */ + +#if defined(WL_SUPPORT_AUTO_CHANNEL) +/* SoftAP feature */ +#define APCS_BAND_2G_LEGACY1 20 +#define APCS_BAND_2G_LEGACY2 0 +#define APCS_BAND_AUTO "band=auto" +#define APCS_BAND_2G "band=2g" +#define APCS_BAND_5G "band=5g" +#define APCS_MAX_2G_CHANNELS 11 +#define APCS_MAX_RETRY 10 +#define APCS_DEFAULT_2G_CH 1 +#define APCS_DEFAULT_5G_CH 149 +static int +wl_android_set_auto_channel(struct net_device *dev, const char* cmd_str, + char* command, int total_len) +{ + int channel = 0; + int chosen = 0; + int retry = 0; + int ret = 0; + int spect = 0; + u8 *reqbuf = NULL; + uint32 band = WLC_BAND_2G; + uint32 buf_size; + struct bcm_cfg80211 *cfg = wl_get_cfg(dev); + char *pos = command; + int band_new, band_cur; + + if (cmd_str) { + ANDROID_INFO(("Command: %s len:%d \n", cmd_str, (int)strlen(cmd_str))); + if (strncmp(cmd_str, APCS_BAND_AUTO, strlen(APCS_BAND_AUTO)) == 0) { + band = WLC_BAND_AUTO; + } else if (strncmp(cmd_str, APCS_BAND_5G, strlen(APCS_BAND_5G)) == 0) { + band = WLC_BAND_5G; + } else if (strncmp(cmd_str, APCS_BAND_2G, strlen(APCS_BAND_2G)) == 0) { + band = WLC_BAND_2G; + } else { + /* + * For backward compatibility: Some platforms used to issue argument 20 or 0 + * to enforce the 2G channel selection + */ + channel = bcm_atoi(cmd_str); + if ((channel == APCS_BAND_2G_LEGACY1) || + (channel == APCS_BAND_2G_LEGACY2)) { + band = WLC_BAND_2G; + } else { + ANDROID_ERROR(("%s: Invalid argument\n", __FUNCTION__)); + return -EINVAL; + } + } + } else { + /* If no argument is provided, default to 2G */ + ANDROID_ERROR(("%s: No argument given default to 2.4G scan\n", __FUNCTION__)); + band = WLC_BAND_2G; + } + ANDROID_INFO(("%s : HAPD_AUTO_CHANNEL = %d, band=%d \n", __FUNCTION__, channel, band)); + + ret = wldev_ioctl_set(dev, WLC_GET_BAND, &band_cur, sizeof(band_cur)); + + /* If STA is connected, return is STA channel, else ACS can be issued, + * set spect to 0 and proceed with ACS + */ + channel = wl_cfg80211_get_sta_channel(cfg); + if (channel) { + channel = (channel <= CH_MAX_2G_CHANNEL) ? + channel : APCS_DEFAULT_2G_CH; + goto done2; + } + + ret = wldev_ioctl_get(dev, WLC_GET_SPECT_MANAGMENT, &spect, sizeof(spect)); + if (ret) { + ANDROID_ERROR(("%s: ACS: error getting the spect, ret=%d\n", __FUNCTION__, ret)); + goto done; + } + + if (spect > 0) { + ret = wl_cfg80211_set_spect(dev, 0); + if (ret < 0) { + ANDROID_ERROR(("%s: ACS: error while setting spect, ret=%d\n", __FUNCTION__, ret)); + goto done; + } + } + + reqbuf = (u8 *)MALLOCZ(cfg->osh, CHANSPEC_BUF_SIZE); + if (reqbuf == NULL) { + ANDROID_ERROR(("%s: failed to allocate chanspec buffer\n", __FUNCTION__)); + return -ENOMEM; + } + + if (band == WLC_BAND_AUTO) { + ANDROID_INFO(("%s: ACS full channel scan \n", __FUNCTION__)); + reqbuf[0] = htod32(0); + } else if (band == WLC_BAND_5G) { + band_new = band_cur==WLC_BAND_2G ? band_cur : WLC_BAND_5G; + ret = wldev_ioctl_set(dev, WLC_SET_BAND, &band_new, sizeof(band_new)); + if (ret < 0) + WL_ERR(("WLC_SET_BAND error %d\n", ret)); + ANDROID_INFO(("%s: ACS 5G band scan \n", __FUNCTION__)); + if ((ret = wl_cfg80211_get_chanspecs_5g(dev, reqbuf, CHANSPEC_BUF_SIZE)) < 0) { + ANDROID_ERROR(("ACS 5g chanspec retreival failed! \n")); + goto done; + } + } else if (band == WLC_BAND_2G) { + /* + * If channel argument is not provided/ argument 20 is provided, + * Restrict channel to 2GHz, 20MHz BW, No SB + */ + ANDROID_INFO(("%s: ACS 2G band scan \n", __FUNCTION__)); + if ((ret = wl_cfg80211_get_chanspecs_2g(dev, reqbuf, CHANSPEC_BUF_SIZE)) < 0) { + ANDROID_ERROR(("ACS 2g chanspec retreival failed! \n")); + goto done; + } + } else { + ANDROID_ERROR(("ACS: No band chosen\n")); + goto done2; + } + + buf_size = CHANSPEC_BUF_SIZE; + ret = wldev_ioctl_set(dev, WLC_START_CHANNEL_SEL, (void *)reqbuf, + buf_size); + if (ret < 0) { + ANDROID_ERROR(("%s: can't start auto channel scan, err = %d\n", + __FUNCTION__, ret)); + channel = 0; + goto done; + } + + /* Wait for auto channel selection, max 3000 ms */ + if ((band == WLC_BAND_2G) || (band == WLC_BAND_5G)) { + OSL_SLEEP(500); + } else { + /* + * Full channel scan at the minimum takes 1.2secs + * even with parallel scan. max wait time: 3500ms + */ + OSL_SLEEP(1000); + } + + retry = APCS_MAX_RETRY; + while (retry--) { + ret = wldev_ioctl_get(dev, WLC_GET_CHANNEL_SEL, &chosen, + sizeof(chosen)); + if (ret < 0) { + chosen = 0; + } else { + chosen = dtoh32(chosen); + } + + if ((ret == 0) && (dtoh32(chosen) != 0)) { + uint chip; + chip = dhd_conf_get_chip(dhd_get_pub(dev)); + if (chip != BCM43143_CHIP_ID) { + u32 chanspec = 0; + chanspec = wl_chspec_driver_to_host(chosen); + ANDROID_INFO(("%s: selected chanspec = 0x%x\n", __FUNCTION__, chanspec)); + chosen = wf_chspec_ctlchan(chanspec); + ANDROID_INFO(("%s: selected chosen = 0x%x\n", __FUNCTION__, chosen)); + } + } + + if (chosen) { + int chosen_band; + int apcs_band; +#ifdef D11AC_IOTYPES + if (wl_cfg80211_get_ioctl_version() == 1) { + channel = LCHSPEC_CHANNEL((chanspec_t)chosen); + } else { + channel = CHSPEC_CHANNEL((chanspec_t)chosen); + } +#else + channel = CHSPEC_CHANNEL((chanspec_t)chosen); +#endif /* D11AC_IOTYPES */ + apcs_band = (band == WLC_BAND_AUTO) ? WLC_BAND_2G : band; + chosen_band = (channel <= CH_MAX_2G_CHANNEL) ? WLC_BAND_2G : WLC_BAND_5G; + if (band == WLC_BAND_AUTO) { + printf("%s: selected channel = %d\n", __FUNCTION__, channel); + break; + } else if (apcs_band == chosen_band) { + printf("%s: selected channel = %d\n", __FUNCTION__, channel); + break; + } + } + ANDROID_INFO(("%s: %d tried, ret = %d, chosen = 0x%x\n", __FUNCTION__, + (APCS_MAX_RETRY - retry), ret, chosen)); + OSL_SLEEP(250); + } + +done: + if ((retry == 0) || (ret < 0)) { + /* On failure, fallback to a default channel */ + if (band == WLC_BAND_5G) { + channel = APCS_DEFAULT_5G_CH; + } else { + channel = APCS_DEFAULT_2G_CH; + } + ANDROID_ERROR(("%s: ACS failed." + " Fall back to default channel (%d) \n", __FUNCTION__, channel)); + } +done2: + ret = wldev_ioctl_set(dev, WLC_SET_BAND, &band_cur, sizeof(band_cur)); + if (ret < 0) + WL_ERR(("WLC_SET_BAND error %d\n", ret)); + if (spect > 0) { + if ((ret = wl_cfg80211_set_spect(dev, spect) < 0)) { + ANDROID_ERROR(("%s: ACS: error while setting spect\n", __FUNCTION__)); + } + } + + if (reqbuf) { + MFREE(cfg->osh, reqbuf, CHANSPEC_BUF_SIZE); + } + + if (channel) { + if (channel < 15) + pos += snprintf(pos, total_len, "2g="); + else + pos += snprintf(pos, total_len, "5g="); + pos += snprintf(pos, total_len, "%d", channel); + ANDROID_INFO(("%s: command result is %s \n", __FUNCTION__, command)); + return strlen(command); + } else { + return ret; + } +} +#endif /* WL_SUPPORT_AUTO_CHANNEL */ + +int wl_android_set_roam_mode(struct net_device *dev, char *command) +{ + int error = 0; + int mode = 0; + + if (sscanf(command, "%*s %d", &mode) != 1) { + ANDROID_ERROR(("%s: Failed to get Parameter\n", __FUNCTION__)); + return -1; + } + + error = wldev_iovar_setint(dev, "roam_off", mode); + if (error) { + ANDROID_ERROR(("%s: Failed to set roaming Mode %d, error = %d\n", + __FUNCTION__, mode, error)); + return -1; + } + else + ANDROID_ERROR(("%s: succeeded to set roaming Mode %d, error = %d\n", + __FUNCTION__, mode, error)); + return 0; +} + +#ifdef WL_CFG80211 +int wl_android_set_ibss_beacon_ouidata(struct net_device *dev, char *command, int total_len) +{ + char ie_buf[VNDR_IE_MAX_LEN]; + char *ioctl_buf = NULL; + char hex[] = "XX"; + char *pcmd = NULL; + int ielen = 0, datalen = 0, idx = 0, tot_len = 0; + vndr_ie_setbuf_t *vndr_ie = NULL; + s32 iecount; + uint32 pktflag; + s32 err = BCME_OK, bssidx; + struct bcm_cfg80211 *cfg = wl_get_cfg(dev); + + /* Check the VSIE (Vendor Specific IE) which was added. + * If exist then send IOVAR to delete it + */ + if (wl_cfg80211_ibss_vsie_delete(dev) != BCME_OK) { + return -EINVAL; + } + + if (total_len < (strlen(CMD_SETIBSSBEACONOUIDATA) + 1)) { + ANDROID_ERROR(("error. total_len:%d\n", total_len)); + return -EINVAL; + } + + pcmd = command + strlen(CMD_SETIBSSBEACONOUIDATA) + 1; + for (idx = 0; idx < DOT11_OUI_LEN; idx++) { + if (*pcmd == '\0') { + ANDROID_ERROR(("error while parsing OUI.\n")); + return -EINVAL; + } + hex[0] = *pcmd++; + hex[1] = *pcmd++; + ie_buf[idx] = (uint8)simple_strtoul(hex, NULL, 16); + } + pcmd++; + while ((*pcmd != '\0') && (idx < VNDR_IE_MAX_LEN)) { + hex[0] = *pcmd++; + hex[1] = *pcmd++; + ie_buf[idx++] = (uint8)simple_strtoul(hex, NULL, 16); + datalen++; + } + + if (datalen <= 0) { + ANDROID_ERROR(("error. vndr ie len:%d\n", datalen)); + return -EINVAL; + } + + tot_len = (int)(sizeof(vndr_ie_setbuf_t) + (datalen - 1)); + vndr_ie = (vndr_ie_setbuf_t *)MALLOCZ(cfg->osh, tot_len); + if (!vndr_ie) { + ANDROID_ERROR(("IE memory alloc failed\n")); + return -ENOMEM; + } + /* Copy the vndr_ie SET command ("add"/"del") to the buffer */ + strncpy(vndr_ie->cmd, "add", VNDR_IE_CMD_LEN - 1); + vndr_ie->cmd[VNDR_IE_CMD_LEN - 1] = '\0'; + + /* Set the IE count - the buffer contains only 1 IE */ + iecount = htod32(1); + memcpy((void *)&vndr_ie->vndr_ie_buffer.iecount, &iecount, sizeof(s32)); + + /* Set packet flag to indicate that BEACON's will contain this IE */ + pktflag = htod32(VNDR_IE_BEACON_FLAG | VNDR_IE_PRBRSP_FLAG); + memcpy((void *)&vndr_ie->vndr_ie_buffer.vndr_ie_list[0].pktflag, &pktflag, + sizeof(u32)); + /* Set the IE ID */ + vndr_ie->vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.id = (uchar) DOT11_MNG_PROPR_ID; + + memcpy(&vndr_ie->vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.oui, &ie_buf, + DOT11_OUI_LEN); + memcpy(&vndr_ie->vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.data, + &ie_buf[DOT11_OUI_LEN], datalen); + + ielen = DOT11_OUI_LEN + datalen; + vndr_ie->vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.len = (uchar) ielen; + + ioctl_buf = (char *)MALLOC(cfg->osh, WLC_IOCTL_MEDLEN); + if (!ioctl_buf) { + ANDROID_ERROR(("ioctl memory alloc failed\n")); + if (vndr_ie) { + MFREE(cfg->osh, vndr_ie, tot_len); + } + return -ENOMEM; + } + memset(ioctl_buf, 0, WLC_IOCTL_MEDLEN); /* init the buffer */ + if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) { + ANDROID_ERROR(("Find index failed\n")); + err = BCME_ERROR; + goto end; + } + err = wldev_iovar_setbuf_bsscfg(dev, "vndr_ie", vndr_ie, tot_len, ioctl_buf, + WLC_IOCTL_MEDLEN, bssidx, &cfg->ioctl_buf_sync); +end: + if (err != BCME_OK) { + err = -EINVAL; + if (vndr_ie) { + MFREE(cfg->osh, vndr_ie, tot_len); + } + } + else { + /* do NOT free 'vndr_ie' for the next process */ + wl_cfg80211_ibss_vsie_set_buffer(dev, vndr_ie, tot_len); + } + + if (ioctl_buf) { + MFREE(cfg->osh, ioctl_buf, WLC_IOCTL_MEDLEN); + } + + return err; +} +#endif + +#if defined(BCMFW_ROAM_ENABLE) +static int +wl_android_set_roampref(struct net_device *dev, char *command, int total_len) +{ + int error = 0; + char smbuf[WLC_IOCTL_SMLEN]; + uint8 buf[MAX_BUF_SIZE]; + uint8 *pref = buf; + char *pcmd; + int num_ucipher_suites = 0; + int num_akm_suites = 0; + wpa_suite_t ucipher_suites[MAX_NUM_SUITES]; + wpa_suite_t akm_suites[MAX_NUM_SUITES]; + int num_tuples = 0; + int total_bytes = 0; + int total_len_left; + int i, j; + char hex[] = "XX"; + + pcmd = command + strlen(CMD_SET_ROAMPREF) + 1; + total_len_left = total_len - strlen(CMD_SET_ROAMPREF) + 1; + + num_akm_suites = simple_strtoul(pcmd, NULL, 16); + if (num_akm_suites > MAX_NUM_SUITES) { + ANDROID_ERROR(("too many AKM suites = %d\n", num_akm_suites)); + return -1; + } + + /* Increment for number of AKM suites field + space */ + pcmd += 3; + total_len_left -= 3; + + /* check to make sure pcmd does not overrun */ + if (total_len_left < (num_akm_suites * WIDTH_AKM_SUITE)) + return -1; + + memset(buf, 0, sizeof(buf)); + memset(akm_suites, 0, sizeof(akm_suites)); + memset(ucipher_suites, 0, sizeof(ucipher_suites)); + + /* Save the AKM suites passed in the command */ + for (i = 0; i < num_akm_suites; i++) { + /* Store the MSB first, as required by join_pref */ + for (j = 0; j < 4; j++) { + hex[0] = *pcmd++; + hex[1] = *pcmd++; + buf[j] = (uint8)simple_strtoul(hex, NULL, 16); + } + memcpy((uint8 *)&akm_suites[i], buf, sizeof(uint32)); + } + + total_len_left -= (num_akm_suites * WIDTH_AKM_SUITE); + num_ucipher_suites = simple_strtoul(pcmd, NULL, 16); + /* Increment for number of cipher suites field + space */ + pcmd += 3; + total_len_left -= 3; + + if (total_len_left < (num_ucipher_suites * WIDTH_AKM_SUITE)) + return -1; + + /* Save the cipher suites passed in the command */ + for (i = 0; i < num_ucipher_suites; i++) { + /* Store the MSB first, as required by join_pref */ + for (j = 0; j < 4; j++) { + hex[0] = *pcmd++; + hex[1] = *pcmd++; + buf[j] = (uint8)simple_strtoul(hex, NULL, 16); + } + memcpy((uint8 *)&ucipher_suites[i], buf, sizeof(uint32)); + } + + /* Join preference for RSSI + * Type : 1 byte (0x01) + * Length : 1 byte (0x02) + * Value : 2 bytes (reserved) + */ + *pref++ = WL_JOIN_PREF_RSSI; + *pref++ = JOIN_PREF_RSSI_LEN; + *pref++ = 0; + *pref++ = 0; + + /* Join preference for WPA + * Type : 1 byte (0x02) + * Length : 1 byte (not used) + * Value : (variable length) + * reserved: 1 byte + * count : 1 byte (no of tuples) + * Tuple1 : 12 bytes + * akm[4] + * ucipher[4] + * mcipher[4] + * Tuple2 : 12 bytes + * Tuplen : 12 bytes + */ + num_tuples = num_akm_suites * num_ucipher_suites; + if (num_tuples != 0) { + if (num_tuples <= JOIN_PREF_MAX_WPA_TUPLES) { + *pref++ = WL_JOIN_PREF_WPA; + *pref++ = 0; + *pref++ = 0; + *pref++ = (uint8)num_tuples; + total_bytes = JOIN_PREF_RSSI_SIZE + JOIN_PREF_WPA_HDR_SIZE + + (JOIN_PREF_WPA_TUPLE_SIZE * num_tuples); + } else { + ANDROID_ERROR(("%s: Too many wpa configs for join_pref \n", __FUNCTION__)); + return -1; + } + } else { + /* No WPA config, configure only RSSI preference */ + total_bytes = JOIN_PREF_RSSI_SIZE; + } + + /* akm-ucipher-mcipher tuples in the format required for join_pref */ + for (i = 0; i < num_ucipher_suites; i++) { + for (j = 0; j < num_akm_suites; j++) { + memcpy(pref, (uint8 *)&akm_suites[j], WPA_SUITE_LEN); + pref += WPA_SUITE_LEN; + memcpy(pref, (uint8 *)&ucipher_suites[i], WPA_SUITE_LEN); + pref += WPA_SUITE_LEN; + /* Set to 0 to match any available multicast cipher */ + memset(pref, 0, WPA_SUITE_LEN); + pref += WPA_SUITE_LEN; + } + } + + prhex("join pref", (uint8 *)buf, total_bytes); + error = wldev_iovar_setbuf(dev, "join_pref", buf, total_bytes, smbuf, sizeof(smbuf), NULL); + if (error) { + ANDROID_ERROR(("Failed to set join_pref, error = %d\n", error)); + } + return error; +} +#endif /* defined(BCMFW_ROAM_ENABLE */ + +#ifdef WL_CFG80211 +static int +wl_android_iolist_add(struct net_device *dev, struct list_head *head, struct io_cfg *config) +{ + struct io_cfg *resume_cfg; + s32 ret; + struct bcm_cfg80211 *cfg = wl_get_cfg(dev); + + resume_cfg = (struct io_cfg *)MALLOCZ(cfg->osh, sizeof(struct io_cfg)); + if (!resume_cfg) + return -ENOMEM; + + if (config->iovar) { + ret = wldev_iovar_getint(dev, config->iovar, &resume_cfg->param); + if (ret) { + ANDROID_ERROR(("%s: Failed to get current %s value\n", + __FUNCTION__, config->iovar)); + goto error; + } + + ret = wldev_iovar_setint(dev, config->iovar, config->param); + if (ret) { + ANDROID_ERROR(("%s: Failed to set %s to %d\n", __FUNCTION__, + config->iovar, config->param)); + goto error; + } + + resume_cfg->iovar = config->iovar; + } else { + resume_cfg->arg = MALLOCZ(cfg->osh, config->len); + if (!resume_cfg->arg) { + ret = -ENOMEM; + goto error; + } + ret = wldev_ioctl_get(dev, config->ioctl, resume_cfg->arg, config->len); + if (ret) { + ANDROID_ERROR(("%s: Failed to get ioctl %d\n", __FUNCTION__, + config->ioctl)); + goto error; + } + ret = wldev_ioctl_set(dev, config->ioctl + 1, config->arg, config->len); + if (ret) { + ANDROID_ERROR(("%s: Failed to set %s to %d\n", __FUNCTION__, + config->iovar, config->param)); + goto error; + } + if (config->ioctl + 1 == WLC_SET_PM) + wl_cfg80211_update_power_mode(dev); + resume_cfg->ioctl = config->ioctl; + resume_cfg->len = config->len; + } + + list_add(&resume_cfg->list, head); + + return 0; +error: + MFREE(cfg->osh, resume_cfg->arg, config->len); + MFREE(cfg->osh, resume_cfg, sizeof(struct io_cfg)); + return ret; +} + +static void +wl_android_iolist_resume(struct net_device *dev, struct list_head *head) +{ + struct io_cfg *config; + struct list_head *cur, *q; + s32 ret = 0; + struct bcm_cfg80211 *cfg = wl_get_cfg(dev); + +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif // endif + list_for_each_safe(cur, q, head) { + config = list_entry(cur, struct io_cfg, list); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif // endif + if (config->iovar) { + if (!ret) + ret = wldev_iovar_setint(dev, config->iovar, + config->param); + } else { + if (!ret) + ret = wldev_ioctl_set(dev, config->ioctl + 1, + config->arg, config->len); + if (config->ioctl + 1 == WLC_SET_PM) + wl_cfg80211_update_power_mode(dev); + MFREE(cfg->osh, config->arg, config->len); + } + list_del(cur); + MFREE(cfg->osh, config, sizeof(struct io_cfg)); + } +} + +static int +wl_android_set_miracast(struct net_device *dev, char *command) +{ + int mode, val = 0; + int ret = 0; + struct io_cfg config; + + if (sscanf(command, "%*s %d", &mode) != 1) { + ANDROID_ERROR(("%s: Failed to get Parameter\n", __FUNCTION__)); + return -1; + } + + ANDROID_INFO(("%s: enter miracast mode %d\n", __FUNCTION__, mode)); + + if (miracast_cur_mode == mode) { + return 0; + } + + wl_android_iolist_resume(dev, &miracast_resume_list); + miracast_cur_mode = MIRACAST_MODE_OFF; + memset((void *)&config, 0, sizeof(config)); + switch (mode) { + case MIRACAST_MODE_SOURCE: +#ifdef MIRACAST_MCHAN_ALGO + /* setting mchan_algo to platform specific value */ + config.iovar = "mchan_algo"; + + ret = wldev_ioctl_get(dev, WLC_GET_BCNPRD, &val, sizeof(int)); + if (!ret && val > 100) { + config.param = 0; + ANDROID_ERROR(("%s: Connected station's beacon interval: " + "%d and set mchan_algo to %d \n", + __FUNCTION__, val, config.param)); + } else { + config.param = MIRACAST_MCHAN_ALGO; + } + ret = wl_android_iolist_add(dev, &miracast_resume_list, &config); + if (ret) { + goto resume; + } +#endif /* MIRACAST_MCHAN_ALGO */ + +#ifdef MIRACAST_MCHAN_BW + /* setting mchan_bw to platform specific value */ + config.iovar = "mchan_bw"; + config.param = MIRACAST_MCHAN_BW; + ret = wl_android_iolist_add(dev, &miracast_resume_list, &config); + if (ret) { + goto resume; + } +#endif /* MIRACAST_MCHAN_BW */ + +#ifdef MIRACAST_AMPDU_SIZE + /* setting apmdu to platform specific value */ + config.iovar = "ampdu_mpdu"; + config.param = MIRACAST_AMPDU_SIZE; + ret = wl_android_iolist_add(dev, &miracast_resume_list, &config); + if (ret) { + goto resume; + } +#endif /* MIRACAST_AMPDU_SIZE */ + /* FALLTROUGH */ + /* Source mode shares most configurations with sink mode. + * Fall through here to avoid code duplication + */ + case MIRACAST_MODE_SINK: + /* disable internal roaming */ + config.iovar = "roam_off"; + config.param = 1; + config.arg = NULL; + config.len = 0; + ret = wl_android_iolist_add(dev, &miracast_resume_list, &config); + if (ret) { + goto resume; + } + + /* tunr off pm */ + ret = wldev_ioctl_get(dev, WLC_GET_PM, &val, sizeof(val)); + if (ret) { + goto resume; + } + + if (val != PM_OFF) { + val = PM_OFF; + config.iovar = NULL; + config.ioctl = WLC_GET_PM; + config.arg = &val; + config.len = sizeof(int); + ret = wl_android_iolist_add(dev, &miracast_resume_list, &config); + if (ret) { + goto resume; + } + } + break; + case MIRACAST_MODE_OFF: + default: + break; + } + miracast_cur_mode = mode; + + return 0; + +resume: + ANDROID_ERROR(("%s: turnoff miracast mode because of err%d\n", __FUNCTION__, ret)); + wl_android_iolist_resume(dev, &miracast_resume_list); + return ret; +} +#endif + +#define NETLINK_OXYGEN 30 +#define AIBSS_BEACON_TIMEOUT 10 + +static struct sock *nl_sk = NULL; + +static void wl_netlink_recv(struct sk_buff *skb) +{ + ANDROID_ERROR(("netlink_recv called\n")); +} + +static int wl_netlink_init(void) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) + struct netlink_kernel_cfg cfg = { + .input = wl_netlink_recv, + }; +#endif // endif + + if (nl_sk != NULL) { + ANDROID_ERROR(("nl_sk already exist\n")); + return BCME_ERROR; + } + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0)) + nl_sk = netlink_kernel_create(&init_net, NETLINK_OXYGEN, + 0, wl_netlink_recv, NULL, THIS_MODULE); +#elif (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0)) + nl_sk = netlink_kernel_create(&init_net, NETLINK_OXYGEN, THIS_MODULE, &cfg); +#else + nl_sk = netlink_kernel_create(&init_net, NETLINK_OXYGEN, &cfg); +#endif // endif + + if (nl_sk == NULL) { + ANDROID_ERROR(("nl_sk is not ready\n")); + return BCME_ERROR; + } + + return BCME_OK; +} + +static void wl_netlink_deinit(void) +{ + if (nl_sk) { + netlink_kernel_release(nl_sk); + nl_sk = NULL; + } +} + +s32 +wl_netlink_send_msg(int pid, int type, int seq, const void *data, size_t size) +{ + struct sk_buff *skb = NULL; + struct nlmsghdr *nlh = NULL; + int ret = -1; + + if (nl_sk == NULL) { + ANDROID_ERROR(("nl_sk was not initialized\n")); + goto nlmsg_failure; + } + + skb = alloc_skb(NLMSG_SPACE(size), GFP_ATOMIC); + if (skb == NULL) { + ANDROID_ERROR(("failed to allocate memory\n")); + goto nlmsg_failure; + } + + nlh = nlmsg_put(skb, 0, 0, 0, size, 0); + if (nlh == NULL) { + ANDROID_ERROR(("failed to build nlmsg, skb_tailroom:%d, nlmsg_total_size:%d\n", + skb_tailroom(skb), nlmsg_total_size(size))); + dev_kfree_skb(skb); + goto nlmsg_failure; + } + + memcpy(nlmsg_data(nlh), data, size); + nlh->nlmsg_seq = seq; + nlh->nlmsg_type = type; + + /* netlink_unicast() takes ownership of the skb and frees it itself. */ + ret = netlink_unicast(nl_sk, skb, pid, 0); + ANDROID_INFO(("netlink_unicast() pid=%d, ret=%d\n", pid, ret)); + +nlmsg_failure: + return ret; +} + +int wl_keep_alive_set(struct net_device *dev, char* extra) +{ + wl_mkeep_alive_pkt_t mkeep_alive_pkt; + int ret; + uint period_msec = 0; + char *buf; + dhd_pub_t *dhd = dhd_get_pub(dev); + + if (extra == NULL) { + ANDROID_ERROR(("%s: extra is NULL\n", __FUNCTION__)); + return -1; + } + if (sscanf(extra, "%d", &period_msec) != 1) { + ANDROID_ERROR(("%s: sscanf error. check period_msec value\n", __FUNCTION__)); + return -EINVAL; + } + ANDROID_ERROR(("%s: period_msec is %d\n", __FUNCTION__, period_msec)); + + memset(&mkeep_alive_pkt, 0, sizeof(wl_mkeep_alive_pkt_t)); + + mkeep_alive_pkt.period_msec = period_msec; + mkeep_alive_pkt.version = htod16(WL_MKEEP_ALIVE_VERSION); + mkeep_alive_pkt.length = htod16(WL_MKEEP_ALIVE_FIXED_LEN); + + /* Setup keep alive zero for null packet generation */ + mkeep_alive_pkt.keep_alive_id = 0; + mkeep_alive_pkt.len_bytes = 0; + + buf = (char *)MALLOC(dhd->osh, WLC_IOCTL_SMLEN); + if (!buf) { + ANDROID_ERROR(("%s: buffer alloc failed\n", __FUNCTION__)); + return BCME_NOMEM; + } + ret = wldev_iovar_setbuf(dev, "mkeep_alive", (char *)&mkeep_alive_pkt, + WL_MKEEP_ALIVE_FIXED_LEN, buf, WLC_IOCTL_SMLEN, NULL); + if (ret < 0) + ANDROID_ERROR(("%s:keep_alive set failed:%d\n", __FUNCTION__, ret)); + else + ANDROID_TRACE(("%s:keep_alive set ok\n", __FUNCTION__)); + MFREE(dhd->osh, buf, WLC_IOCTL_SMLEN); + return ret; +} + +#ifdef P2PRESP_WFDIE_SRC +static int wl_android_get_wfdie_resp(struct net_device *dev, char *command, int total_len) +{ + int error = 0; + int bytes_written = 0; + int only_resp_wfdsrc = 0; + + error = wldev_iovar_getint(dev, "p2p_only_resp_wfdsrc", &only_resp_wfdsrc); + if (error) { + ANDROID_ERROR(("%s: Failed to get the mode for only_resp_wfdsrc, error = %d\n", + __FUNCTION__, error)); + return -1; + } + + bytes_written = snprintf(command, total_len, "%s %d", + CMD_P2P_GET_WFDIE_RESP, only_resp_wfdsrc); + + return bytes_written; +} + +static int wl_android_set_wfdie_resp(struct net_device *dev, int only_resp_wfdsrc) +{ + int error = 0; + + error = wldev_iovar_setint(dev, "p2p_only_resp_wfdsrc", only_resp_wfdsrc); + if (error) { + ANDROID_ERROR(("%s: Failed to set only_resp_wfdsrc %d, error = %d\n", + __FUNCTION__, only_resp_wfdsrc, error)); + return -1; + } + + return 0; +} +#endif /* P2PRESP_WFDIE_SRC */ + +#ifdef BT_WIFI_HANDOVER +static int +wl_tbow_teardown(struct net_device *dev) +{ + int err = BCME_OK; + char buf[WLC_IOCTL_SMLEN]; + tbow_setup_netinfo_t netinfo; + memset(&netinfo, 0, sizeof(netinfo)); + netinfo.opmode = TBOW_HO_MODE_TEARDOWN; + + err = wldev_iovar_setbuf_bsscfg(dev, "tbow_doho", &netinfo, + sizeof(tbow_setup_netinfo_t), buf, WLC_IOCTL_SMLEN, 0, NULL); + if (err < 0) { + ANDROID_ERROR(("tbow_doho iovar error %d\n", err)); + return err; + } + return err; +} +#endif /* BT_WIFI_HANOVER */ + +#ifdef SET_RPS_CPUS +static int +wl_android_set_rps_cpus(struct net_device *dev, char *command) +{ + int error, enable; + + enable = command[strlen(CMD_RPSMODE) + 1] - '0'; + error = dhd_rps_cpus_enable(dev, enable); + +#if defined(DHDTCPACK_SUPPRESS) && defined(BCMPCIE) && defined(WL_CFG80211) + if (!error) { + void *dhdp = wl_cfg80211_get_dhdp(net); + if (enable) { + ANDROID_TRACE(("%s : set ack suppress. TCPACK_SUP_HOLD.\n", __FUNCTION__)); + dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_HOLD); + } else { + ANDROID_TRACE(("%s : clear ack suppress.\n", __FUNCTION__)); + dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_OFF); + } + } +#endif /* DHDTCPACK_SUPPRESS && BCMPCIE && WL_CFG80211 */ + + return error; +} +#endif /* SET_RPS_CPUS */ + +static int wl_android_get_link_status(struct net_device *dev, char *command, + int total_len) +{ + int bytes_written, error, result = 0, single_stream, stf = -1, i, nss = 0, mcs_map; + uint32 rspec; + uint encode, txexp; + wl_bss_info_t *bi; + int datalen = sizeof(uint32) + sizeof(wl_bss_info_t); + char buf[datalen]; + + memset(buf, 0, datalen); + /* get BSS information */ + *(u32 *) buf = htod32(datalen); + error = wldev_ioctl_get(dev, WLC_GET_BSS_INFO, (void *)buf, datalen); + if (unlikely(error)) { + ANDROID_ERROR(("Could not get bss info %d\n", error)); + return -1; + } + + bi = (wl_bss_info_t*) (buf + sizeof(uint32)); + + for (i = 0; i < ETHER_ADDR_LEN; i++) { + if (bi->BSSID.octet[i] > 0) { + break; + } + } + + if (i == ETHER_ADDR_LEN) { + ANDROID_INFO(("No BSSID\n")); + return -1; + } + + /* check VHT capability at beacon */ + if (bi->vht_cap) { + if (CHSPEC_IS5G(bi->chanspec)) { + result |= WL_ANDROID_LINK_AP_VHT_SUPPORT; + } + } + + /* get a rspec (radio spectrum) rate */ + error = wldev_iovar_getint(dev, "nrate", &rspec); + if (unlikely(error) || rspec == 0) { + ANDROID_ERROR(("get link status error (%d)\n", error)); + return -1; + } + + encode = (rspec & WL_RSPEC_ENCODING_MASK); + txexp = (rspec & WL_RSPEC_TXEXP_MASK) >> WL_RSPEC_TXEXP_SHIFT; + + switch (encode) { + case WL_RSPEC_ENCODE_HT: + /* check Rx MCS Map for HT */ + for (i = 0; i < MAX_STREAMS_SUPPORTED; i++) { + int8 bitmap = 0xFF; + if (i == MAX_STREAMS_SUPPORTED-1) { + bitmap = 0x7F; + } + if (bi->basic_mcs[i] & bitmap) { + nss++; + } + } + break; + case WL_RSPEC_ENCODE_VHT: + /* check Rx MCS Map for VHT */ + for (i = 1; i <= VHT_CAP_MCS_MAP_NSS_MAX; i++) { + mcs_map = VHT_MCS_MAP_GET_MCS_PER_SS(i, dtoh16(bi->vht_rxmcsmap)); + if (mcs_map != VHT_CAP_MCS_MAP_NONE) { + nss++; + } + } + break; + } + + /* check MIMO capability with nss in beacon */ + if (nss > 1) { + result |= WL_ANDROID_LINK_AP_MIMO_SUPPORT; + } + + single_stream = (encode == WL_RSPEC_ENCODE_RATE) || + ((encode == WL_RSPEC_ENCODE_HT) && (rspec & WL_RSPEC_HT_MCS_MASK) < 8) || + ((encode == WL_RSPEC_ENCODE_VHT) && + ((rspec & WL_RSPEC_VHT_NSS_MASK) >> WL_RSPEC_VHT_NSS_SHIFT) == 1); + + if (txexp == 0) { + if ((rspec & WL_RSPEC_STBC) && single_stream) { + stf = OLD_NRATE_STF_STBC; + } else { + stf = (single_stream) ? OLD_NRATE_STF_SISO : OLD_NRATE_STF_SDM; + } + } else if (txexp == 1 && single_stream) { + stf = OLD_NRATE_STF_CDD; + } + + /* check 11ac (VHT) */ + if (encode == WL_RSPEC_ENCODE_VHT) { + if (CHSPEC_IS5G(bi->chanspec)) { + result |= WL_ANDROID_LINK_VHT; + } + } + + /* check MIMO */ + if (result & WL_ANDROID_LINK_AP_MIMO_SUPPORT) { + switch (stf) { + case OLD_NRATE_STF_SISO: + break; + case OLD_NRATE_STF_CDD: + case OLD_NRATE_STF_STBC: + result |= WL_ANDROID_LINK_MIMO; + break; + case OLD_NRATE_STF_SDM: + if (!single_stream) { + result |= WL_ANDROID_LINK_MIMO; + } + break; + } + } + + ANDROID_INFO(("%s:result=%d, stf=%d, single_stream=%d, mcs map=%d\n", + __FUNCTION__, result, stf, single_stream, nss)); + + bytes_written = snprintf(command, total_len, "%s %d", CMD_GET_LINK_STATUS, result); + + return bytes_written; +} + +#ifdef P2P_LISTEN_OFFLOADING + +s32 +wl_cfg80211_p2plo_deinit(struct bcm_cfg80211 *cfg) +{ + s32 bssidx; + int ret = 0; + int p2plo_pause = 0; + dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub); + if (!cfg || !cfg->p2p) { + ANDROID_ERROR(("Wl %p or cfg->p2p %p is null\n", + cfg, cfg ? cfg->p2p : 0)); + return 0; + } + if (!dhd->up) { + ANDROID_ERROR(("bus is already down\n")); + return ret; + } + + bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE); + ret = wldev_iovar_setbuf_bsscfg(bcmcfg_to_prmry_ndev(cfg), + "p2po_stop", (void*)&p2plo_pause, sizeof(p2plo_pause), + cfg->ioctl_buf, WLC_IOCTL_SMLEN, bssidx, &cfg->ioctl_buf_sync); + if (ret < 0) { + ANDROID_ERROR(("p2po_stop Failed :%d\n", ret)); + } + + return ret; +} +s32 +wl_cfg80211_p2plo_listen_start(struct net_device *dev, u8 *buf, int len) +{ + struct bcm_cfg80211 *cfg = wl_get_cfg(dev); + s32 bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE); + wl_p2plo_listen_t p2plo_listen; + int ret = -EAGAIN; + int channel = 0; + int period = 0; + int interval = 0; + int count = 0; + if (WL_DRV_STATUS_SENDING_AF_FRM_EXT(cfg)) { + ANDROID_ERROR(("Sending Action Frames. Try it again.\n")); + goto exit; + } + + if (wl_get_drv_status_all(cfg, SCANNING)) { + ANDROID_ERROR(("Scanning already\n")); + goto exit; + } + + if (wl_get_drv_status(cfg, SCAN_ABORTING, dev)) { + ANDROID_ERROR(("Scanning being aborted\n")); + goto exit; + } + + if (wl_get_p2p_status(cfg, DISC_IN_PROGRESS)) { + ANDROID_ERROR(("p2p listen offloading already running\n")); + goto exit; + } + + /* Just in case if it is not enabled */ + if ((ret = wl_cfgp2p_enable_discovery(cfg, dev, NULL, 0)) < 0) { + ANDROID_ERROR(("cfgp2p_enable discovery failed")); + goto exit; + } + + bzero(&p2plo_listen, sizeof(wl_p2plo_listen_t)); + + if (len) { + sscanf(buf, " %10d %10d %10d %10d", &channel, &period, &interval, &count); + if ((channel == 0) || (period == 0) || + (interval == 0) || (count == 0)) { + ANDROID_ERROR(("Wrong argument %d/%d/%d/%d \n", + channel, period, interval, count)); + ret = -EAGAIN; + goto exit; + } + p2plo_listen.period = period; + p2plo_listen.interval = interval; + p2plo_listen.count = count; + + ANDROID_ERROR(("channel:%d period:%d, interval:%d count:%d\n", + channel, period, interval, count)); + } else { + ANDROID_ERROR(("Argument len is wrong.\n")); + ret = -EAGAIN; + goto exit; + } + + if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_listen_channel", (void*)&channel, + sizeof(channel), cfg->ioctl_buf, WLC_IOCTL_SMLEN, + bssidx, &cfg->ioctl_buf_sync)) < 0) { + ANDROID_ERROR(("p2po_listen_channel Failed :%d\n", ret)); + goto exit; + } + + if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_listen", (void*)&p2plo_listen, + sizeof(wl_p2plo_listen_t), cfg->ioctl_buf, WLC_IOCTL_SMLEN, + bssidx, &cfg->ioctl_buf_sync)) < 0) { + ANDROID_ERROR(("p2po_listen Failed :%d\n", ret)); + goto exit; + } + + wl_set_p2p_status(cfg, DISC_IN_PROGRESS); +exit : + return ret; +} +s32 +wl_cfg80211_p2plo_listen_stop(struct net_device *dev) +{ + struct bcm_cfg80211 *cfg = wl_get_cfg(dev); + s32 bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE); + int ret = -EAGAIN; + + if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_stop", NULL, + 0, cfg->ioctl_buf, WLC_IOCTL_SMLEN, + bssidx, &cfg->ioctl_buf_sync)) < 0) { + ANDROID_ERROR(("p2po_stop Failed :%d\n", ret)); + goto exit; + } + +exit: + return ret; +} + +s32 +wl_cfg80211_p2plo_offload(struct net_device *dev, char *cmd, char* buf, int len) +{ + int ret = 0; + + ANDROID_ERROR(("Entry cmd:%s arg_len:%d \n", cmd, len)); + + if (strncmp(cmd, "P2P_LO_START", strlen("P2P_LO_START")) == 0) { + ret = wl_cfg80211_p2plo_listen_start(dev, buf, len); + } else if (strncmp(cmd, "P2P_LO_STOP", strlen("P2P_LO_STOP")) == 0) { + ret = wl_cfg80211_p2plo_listen_stop(dev); + } else { + ANDROID_ERROR(("Request for Unsupported CMD:%s \n", buf)); + ret = -EINVAL; + } + return ret; +} +#endif /* P2P_LISTEN_OFFLOADING */ + +#ifdef WL_MURX +int +wl_android_murx_bfe_cap(struct net_device *dev, int val) +{ + int err = BCME_OK; + int iface_count = wl_cfg80211_iface_count(dev); + struct ether_addr bssid; + wl_reassoc_params_t params; + + if (iface_count > 1) { + ANDROID_ERROR(("murx_bfe_cap change is not allowed when " + "there are multiple interfaces\n")); + return -EINVAL; + } + /* Now there is only single interface */ + err = wldev_iovar_setint(dev, "murx_bfe_cap", val); + if (unlikely(err)) { + ANDROID_ERROR(("Failed to set murx_bfe_cap IOVAR to %d," + "error %d\n", val, err)); + return err; + } + + /* If successful intiate a reassoc */ + memset(&bssid, 0, ETHER_ADDR_LEN); + if ((err = wldev_ioctl_get(dev, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN)) < 0) { + ANDROID_ERROR(("Failed to get bssid, error=%d\n", err)); + return err; + } + + bzero(¶ms, sizeof(wl_reassoc_params_t)); + memcpy(¶ms.bssid, &bssid, ETHER_ADDR_LEN); + + if ((err = wldev_ioctl_set(dev, WLC_REASSOC, ¶ms, + sizeof(wl_reassoc_params_t))) < 0) { + ANDROID_ERROR(("reassoc failed err:%d \n", err)); + } else { + ANDROID_INFO(("reassoc issued successfully\n")); + } + + return err; +} +#endif /* WL_MURX */ + +#ifdef SUPPORT_RSSI_SUM_REPORT +int +wl_android_get_rssi_per_ant(struct net_device *dev, char *command, int total_len) +{ + wl_rssi_ant_mimo_t rssi_ant_mimo; + char *ifname = NULL; + char *peer_mac = NULL; + char *mimo_cmd = "mimo"; + char *pos, *token; + int err = BCME_OK; + int bytes_written = 0; + bool mimo_rssi = FALSE; + + memset(&rssi_ant_mimo, 0, sizeof(wl_rssi_ant_mimo_t)); + /* + * STA I/F: DRIVER GET_RSSI_PER_ANT + * AP/GO I/F: DRIVER GET_RSSI_PER_ANT + */ + pos = command; + + /* drop command */ + token = bcmstrtok(&pos, " ", NULL); + + /* get the interface name */ + token = bcmstrtok(&pos, " ", NULL); + if (!token) { + ANDROID_ERROR(("Invalid arguments\n")); + return -EINVAL; + } + ifname = token; + + /* Optional: Check the MIMO RSSI mode or peer MAC address */ + token = bcmstrtok(&pos, " ", NULL); + if (token) { + /* Check the MIMO RSSI mode */ + if (strncmp(token, mimo_cmd, strlen(mimo_cmd)) == 0) { + mimo_rssi = TRUE; + } else { + peer_mac = token; + } + } + + /* Optional: Check the MIMO RSSI mode - RSSI sum across antennas */ + token = bcmstrtok(&pos, " ", NULL); + if (token && strncmp(token, mimo_cmd, strlen(mimo_cmd)) == 0) { + mimo_rssi = TRUE; + } + + err = wl_get_rssi_per_ant(dev, ifname, peer_mac, &rssi_ant_mimo); + if (unlikely(err)) { + ANDROID_ERROR(("Failed to get RSSI info, err=%d\n", err)); + return err; + } + + /* Parse the results */ + ANDROID_INFO(("ifname %s, version %d, count %d, mimo rssi %d\n", + ifname, rssi_ant_mimo.version, rssi_ant_mimo.count, mimo_rssi)); + if (mimo_rssi) { + ANDROID_INFO(("MIMO RSSI: %d\n", rssi_ant_mimo.rssi_sum)); + bytes_written = snprintf(command, total_len, "%s MIMO %d", + CMD_GET_RSSI_PER_ANT, rssi_ant_mimo.rssi_sum); + } else { + int cnt; + bytes_written = snprintf(command, total_len, "%s PER_ANT ", CMD_GET_RSSI_PER_ANT); + for (cnt = 0; cnt < rssi_ant_mimo.count; cnt++) { + ANDROID_INFO(("RSSI[%d]: %d\n", cnt, rssi_ant_mimo.rssi_ant[cnt])); + bytes_written = snprintf(command, total_len, "%d ", + rssi_ant_mimo.rssi_ant[cnt]); + } + } + + return bytes_written; +} + +int +wl_android_set_rssi_logging(struct net_device *dev, char *command, int total_len) +{ + rssilog_set_param_t set_param; + char *pos, *token; + int err = BCME_OK; + + memset(&set_param, 0, sizeof(rssilog_set_param_t)); + /* + * DRIVER SET_RSSI_LOGGING